diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/LICENSE b/Godeps/_workspace/src/github.com/coreos/go-oidc/LICENSE new file mode 100644 index 000000000000..e06d2081865a --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/NOTICE b/Godeps/_workspace/src/github.com/coreos/go-oidc/NOTICE new file mode 100644 index 000000000000..b39ddfa5cbde --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/http/client.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/http/client.go new file mode 100644 index 000000000000..fd079b4950f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/http/client.go @@ -0,0 +1,7 @@ +package http + +import "net/http" + +type Client interface { + Do(*http.Request) (*http.Response, error) +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/http/http.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/http/http.go new file mode 100644 index 000000000000..f0d051b5f2b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/http/http.go @@ -0,0 +1,159 @@ +package http + +import ( + "encoding/base64" + "encoding/json" + "errors" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/coreos/pkg/capnslog" +) + +var ( + log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "http") +) + +func WriteError(w http.ResponseWriter, code int, msg string) { + e := struct { + Error string `json:"error"` + }{ + Error: msg, + } + b, err := json.Marshal(e) + if err != nil { + log.Errorf("Failed marshaling %#v to JSON: %v", e, err) + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + w.Write(b) +} + +// BasicAuth parses a username and password from the request's +// Authorization header. This was pulled from golang master: +// https://codereview.appspot.com/76540043 +func BasicAuth(r *http.Request) (username, password string, ok bool) { + auth := r.Header.Get("Authorization") + if auth == "" { + return + } + + if !strings.HasPrefix(auth, "Basic ") { + return + } + c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) + if err != nil { + return + } + cs := string(c) + s := strings.IndexByte(cs, ':') + if s < 0 { + return + } + return cs[:s], cs[s+1:], true +} + +func cacheControlMaxAge(hdr string) (time.Duration, bool, error) { + for _, field := range strings.Split(hdr, ",") { + parts := strings.SplitN(strings.TrimSpace(field), "=", 2) + k := strings.ToLower(strings.TrimSpace(parts[0])) + if k != "max-age" { + continue + } + + if len(parts) == 1 { + return 0, false, errors.New("max-age has no value") + } + + v := strings.TrimSpace(parts[1]) + if v == "" { + return 0, false, errors.New("max-age has empty value") + } + + age, err := strconv.Atoi(v) + if err != nil { + return 0, false, err + } + + if age <= 0 { + return 0, false, nil + } + + return time.Duration(age) * time.Second, true, nil + } + + return 0, false, nil +} + +func expires(date, expires string) (time.Duration, bool, error) { + if date == "" || expires == "" { + return 0, false, nil + } + + te, err := time.Parse(time.RFC1123, expires) + if err != nil { + return 0, false, err + } + + td, err := time.Parse(time.RFC1123, date) + if err != nil { + return 0, false, err + } + + ttl := te.Sub(td) + + // headers indicate data already expired, caller should not + // have to care about this case + if ttl <= 0 { + return 0, false, nil + } + + return ttl, true, nil +} + +func Cacheable(hdr http.Header) (time.Duration, bool, error) { + ttl, ok, err := cacheControlMaxAge(hdr.Get("Cache-Control")) + if err != nil || ok { + return ttl, ok, err + } + + return expires(hdr.Get("Date"), hdr.Get("Expires")) +} + +// MergeQuery appends additional query values to an existing URL. +func MergeQuery(u url.URL, q url.Values) url.URL { + uv := u.Query() + for k, vs := range q { + for _, v := range vs { + uv.Add(k, v) + } + } + u.RawQuery = uv.Encode() + return u +} + +// NewResourceLocation appends a resource id to the end of the requested URL path. +func NewResourceLocation(reqURL *url.URL, id string) string { + var u url.URL + u = *reqURL + u.Path = path.Join(u.Path, id) + u.RawQuery = "" + u.Fragment = "" + return u.String() +} + +// CopyRequest returns a clone of the provided *http.Request. +// The returned object is a shallow copy of the struct and a +// deep copy of its Header field. +func CopyRequest(r *http.Request) *http.Request { + r2 := *r + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return &r2 +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/http/middleware.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/http/middleware.go new file mode 100644 index 000000000000..270b3bc08541 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/http/middleware.go @@ -0,0 +1,14 @@ +package http + +import ( + "net/http" +) + +type LoggingMiddleware struct { + Next http.Handler +} + +func (l *LoggingMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) { + log.Infof("HTTP %s %v", r.Method, r.URL) + l.Next.ServeHTTP(w, r) +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/http/url.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/http/url.go new file mode 100644 index 000000000000..df60eb1a6b5a --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/http/url.go @@ -0,0 +1,29 @@ +package http + +import ( + "errors" + "net/url" +) + +// ParseNonEmptyURL checks that a string is a parsable URL which is also not empty +// since `url.Parse("")` does not return an error. Must contian a scheme and a host. +func ParseNonEmptyURL(u string) (*url.URL, error) { + if u == "" { + return nil, errors.New("url is empty") + } + + ur, err := url.Parse(u) + if err != nil { + return nil, err + } + + if ur.Scheme == "" { + return nil, errors.New("url scheme is empty") + } + + if ur.Host == "" { + return nil, errors.New("url host is empty") + } + + return ur, nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/claims.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/claims.go new file mode 100644 index 000000000000..8b48bfd230b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/claims.go @@ -0,0 +1,126 @@ +package jose + +import ( + "encoding/json" + "fmt" + "math" + "time" +) + +type Claims map[string]interface{} + +func (c Claims) Add(name string, value interface{}) { + c[name] = value +} + +func (c Claims) StringClaim(name string) (string, bool, error) { + cl, ok := c[name] + if !ok { + return "", false, nil + } + + v, ok := cl.(string) + if !ok { + return "", false, fmt.Errorf("unable to parse claim as string: %v", name) + } + + return v, true, nil +} + +func (c Claims) StringsClaim(name string) ([]string, bool, error) { + cl, ok := c[name] + if !ok { + return nil, false, nil + } + + if v, ok := cl.([]string); ok { + return v, true, nil + } + + // When unmarshaled, []string will become []interface{}. + if v, ok := cl.([]interface{}); ok { + var ret []string + for _, vv := range v { + str, ok := vv.(string) + if !ok { + return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name) + } + ret = append(ret, str) + } + return ret, true, nil + } + + return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name) +} + +func (c Claims) Int64Claim(name string) (int64, bool, error) { + cl, ok := c[name] + if !ok { + return 0, false, nil + } + + v, ok := cl.(int64) + if !ok { + vf, ok := cl.(float64) + if !ok { + return 0, false, fmt.Errorf("unable to parse claim as int64: %v", name) + } + v = int64(vf) + } + + return v, true, nil +} + +func (c Claims) Float64Claim(name string) (float64, bool, error) { + cl, ok := c[name] + if !ok { + return 0, false, nil + } + + v, ok := cl.(float64) + if !ok { + vi, ok := cl.(int64) + if !ok { + return 0, false, fmt.Errorf("unable to parse claim as float64: %v", name) + } + v = float64(vi) + } + + return v, true, nil +} + +func (c Claims) TimeClaim(name string) (time.Time, bool, error) { + v, ok, err := c.Float64Claim(name) + if !ok || err != nil { + return time.Time{}, ok, err + } + + s := math.Trunc(v) + ns := (v - s) * math.Pow(10, 9) + return time.Unix(int64(s), int64(ns)).UTC(), true, nil +} + +func decodeClaims(payload []byte) (Claims, error) { + var c Claims + if err := json.Unmarshal(payload, &c); err != nil { + return nil, fmt.Errorf("malformed JWT claims, unable to decode: %v", err) + } + return c, nil +} + +func marshalClaims(c Claims) ([]byte, error) { + b, err := json.Marshal(c) + if err != nil { + return nil, err + } + return b, nil +} + +func encodeClaims(c Claims) (string, error) { + b, err := marshalClaims(c) + if err != nil { + return "", err + } + + return encodeSegment(b), nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/jose.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/jose.go new file mode 100644 index 000000000000..6209926596c9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/jose.go @@ -0,0 +1,112 @@ +package jose + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strings" +) + +const ( + HeaderMediaType = "typ" + HeaderKeyAlgorithm = "alg" + HeaderKeyID = "kid" +) + +const ( + // Encryption Algorithm Header Parameter Values for JWS + // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-6 + AlgHS256 = "HS256" + AlgHS384 = "HS384" + AlgHS512 = "HS512" + AlgRS256 = "RS256" + AlgRS384 = "RS384" + AlgRS512 = "RS512" + AlgES256 = "ES256" + AlgES384 = "ES384" + AlgES512 = "ES512" + AlgPS256 = "PS256" + AlgPS384 = "PS384" + AlgPS512 = "PS512" + AlgNone = "none" +) + +const ( + // Algorithm Header Parameter Values for JWE + // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#section-4.1 + AlgRSA15 = "RSA1_5" + AlgRSAOAEP = "RSA-OAEP" + AlgRSAOAEP256 = "RSA-OAEP-256" + AlgA128KW = "A128KW" + AlgA192KW = "A192KW" + AlgA256KW = "A256KW" + AlgDir = "dir" + AlgECDHES = "ECDH-ES" + AlgECDHESA128KW = "ECDH-ES+A128KW" + AlgECDHESA192KW = "ECDH-ES+A192KW" + AlgECDHESA256KW = "ECDH-ES+A256KW" + AlgA128GCMKW = "A128GCMKW" + AlgA192GCMKW = "A192GCMKW" + AlgA256GCMKW = "A256GCMKW" + AlgPBES2HS256A128KW = "PBES2-HS256+A128KW" + AlgPBES2HS384A192KW = "PBES2-HS384+A192KW" + AlgPBES2HS512A256KW = "PBES2-HS512+A256KW" +) + +const ( + // Encryption Algorithm Header Parameter Values for JWE + // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-22 + EncA128CBCHS256 = "A128CBC-HS256" + EncA128CBCHS384 = "A128CBC-HS384" + EncA256CBCHS512 = "A256CBC-HS512" + EncA128GCM = "A128GCM" + EncA192GCM = "A192GCM" + EncA256GCM = "A256GCM" +) + +type JOSEHeader map[string]string + +func (j JOSEHeader) Validate() error { + if _, exists := j[HeaderKeyAlgorithm]; !exists { + return fmt.Errorf("header missing %q parameter", HeaderKeyAlgorithm) + } + + return nil +} + +func decodeHeader(seg string) (JOSEHeader, error) { + b, err := decodeSegment(seg) + if err != nil { + return nil, err + } + + var h JOSEHeader + err = json.Unmarshal(b, &h) + if err != nil { + return nil, err + } + + return h, nil +} + +func encodeHeader(h JOSEHeader) (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + + return encodeSegment(b), nil +} + +// Decode JWT specific base64url encoding with padding stripped +func decodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l != 0 { + seg += strings.Repeat("=", 4-l) + } + return base64.URLEncoding.DecodeString(seg) +} + +// Encode JWT specific base64url encoding with padding stripped +func encodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/jwk.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/jwk.go new file mode 100644 index 000000000000..b7a8e235583a --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/jwk.go @@ -0,0 +1,135 @@ +package jose + +import ( + "bytes" + "encoding/base64" + "encoding/binary" + "encoding/json" + "math/big" + "strings" +) + +// JSON Web Key +// https://tools.ietf.org/html/draft-ietf-jose-json-web-key-36#page-5 +type JWK struct { + ID string + Type string + Alg string + Use string + Exponent int + Modulus *big.Int + Secret []byte +} + +type jwkJSON struct { + ID string `json:"kid"` + Type string `json:"kty"` + Alg string `json:"alg"` + Use string `json:"use"` + Exponent string `json:"e"` + Modulus string `json:"n"` +} + +func (j *JWK) MarshalJSON() ([]byte, error) { + t := jwkJSON{ + ID: j.ID, + Type: j.Type, + Alg: j.Alg, + Use: j.Use, + Exponent: encodeExponent(j.Exponent), + Modulus: encodeModulus(j.Modulus), + } + + return json.Marshal(&t) +} + +func (j *JWK) UnmarshalJSON(data []byte) error { + var t jwkJSON + err := json.Unmarshal(data, &t) + if err != nil { + return err + } + + e, err := decodeExponent(t.Exponent) + if err != nil { + return err + } + + n, err := decodeModulus(t.Modulus) + if err != nil { + return err + } + + j.ID = t.ID + j.Type = t.Type + j.Alg = t.Alg + j.Use = t.Use + j.Exponent = e + j.Modulus = n + + return nil +} + +type JWKSet struct { + Keys []JWK `json:"keys"` +} + +func decodeExponent(e string) (int, error) { + decE, err := decodeBase64URLPaddingOptional(e) + if err != nil { + return 0, err + } + var eBytes []byte + if len(decE) < 8 { + eBytes = make([]byte, 8-len(decE), 8) + eBytes = append(eBytes, decE...) + } else { + eBytes = decE + } + eReader := bytes.NewReader(eBytes) + var E uint64 + err = binary.Read(eReader, binary.BigEndian, &E) + if err != nil { + return 0, err + } + return int(E), nil +} + +func encodeExponent(e int) string { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(e)) + var idx int + for ; idx < 8; idx++ { + if b[idx] != 0x0 { + break + } + } + return base64.URLEncoding.EncodeToString(b[idx:]) +} + +// Turns a URL encoded modulus of a key into a big int. +func decodeModulus(n string) (*big.Int, error) { + decN, err := decodeBase64URLPaddingOptional(n) + if err != nil { + return nil, err + } + N := big.NewInt(0) + N.SetBytes(decN) + return N, nil +} + +func encodeModulus(n *big.Int) string { + return base64.URLEncoding.EncodeToString(n.Bytes()) +} + +// decodeBase64URLPaddingOptional decodes Base64 whether there is padding or not. +// The stdlib version currently doesn't handle this. +// We can get rid of this is if this bug: +// https://github.com/golang/go/issues/4237 +// ever closes. +func decodeBase64URLPaddingOptional(e string) ([]byte, error) { + if m := len(e) % 4; m != 0 { + e += strings.Repeat("=", 4-m) + } + return base64.URLEncoding.DecodeString(e) +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/jws.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/jws.go new file mode 100644 index 000000000000..1049ece831c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/jws.go @@ -0,0 +1,51 @@ +package jose + +import ( + "fmt" + "strings" +) + +type JWS struct { + RawHeader string + Header JOSEHeader + RawPayload string + Payload []byte + Signature []byte +} + +// Given a raw encoded JWS token parses it and verifies the structure. +func ParseJWS(raw string) (JWS, error) { + parts := strings.Split(raw, ".") + if len(parts) != 3 { + return JWS{}, fmt.Errorf("malformed JWS, only %d segments", len(parts)) + } + + rawSig := parts[2] + jws := JWS{ + RawHeader: parts[0], + RawPayload: parts[1], + } + + header, err := decodeHeader(jws.RawHeader) + if err != nil { + return JWS{}, fmt.Errorf("malformed JWS, unable to decode header, %s", err) + } + if err = header.Validate(); err != nil { + return JWS{}, fmt.Errorf("malformed JWS, %s", err) + } + jws.Header = header + + payload, err := decodeSegment(jws.RawPayload) + if err != nil { + return JWS{}, fmt.Errorf("malformed JWS, unable to decode payload: %s", err) + } + jws.Payload = payload + + sig, err := decodeSegment(rawSig) + if err != nil { + return JWS{}, fmt.Errorf("malformed JWS, unable to decode signature: %s", err) + } + jws.Signature = sig + + return jws, nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/jwt.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/jwt.go new file mode 100644 index 000000000000..3b3e9634b002 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/jwt.go @@ -0,0 +1,82 @@ +package jose + +import "strings" + +type JWT JWS + +func ParseJWT(token string) (jwt JWT, err error) { + jws, err := ParseJWS(token) + if err != nil { + return + } + + return JWT(jws), nil +} + +func NewJWT(header JOSEHeader, claims Claims) (jwt JWT, err error) { + jwt = JWT{} + + jwt.Header = header + jwt.Header[HeaderMediaType] = "JWT" + + claimBytes, err := marshalClaims(claims) + if err != nil { + return + } + jwt.Payload = claimBytes + + eh, err := encodeHeader(header) + if err != nil { + return + } + jwt.RawHeader = eh + + ec, err := encodeClaims(claims) + if err != nil { + return + } + jwt.RawPayload = ec + + return +} + +func (j *JWT) KeyID() (string, bool) { + kID, ok := j.Header[HeaderKeyID] + return kID, ok +} + +func (j *JWT) Claims() (Claims, error) { + return decodeClaims(j.Payload) +} + +// Encoded data part of the token which may be signed. +func (j *JWT) Data() string { + return strings.Join([]string{j.RawHeader, j.RawPayload}, ".") +} + +// Full encoded JWT token string in format: header.claims.signature +func (j *JWT) Encode() string { + d := j.Data() + s := encodeSegment(j.Signature) + return strings.Join([]string{d, s}, ".") +} + +func NewSignedJWT(claims Claims, s Signer) (*JWT, error) { + header := JOSEHeader{ + HeaderKeyAlgorithm: s.Alg(), + HeaderKeyID: s.ID(), + } + + jwt, err := NewJWT(header, claims) + if err != nil { + return nil, err + } + + sig, err := s.Sign([]byte(jwt.Data())) + if err != nil { + return nil, err + } + jwt.Signature = sig + + return &jwt, nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/sig.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/sig.go new file mode 100644 index 000000000000..7b2b253cca53 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/sig.go @@ -0,0 +1,24 @@ +package jose + +import ( + "fmt" +) + +type Verifier interface { + ID() string + Alg() string + Verify(sig []byte, data []byte) error +} + +type Signer interface { + Verifier + Sign(data []byte) (sig []byte, err error) +} + +func NewVerifier(jwk JWK) (Verifier, error) { + if jwk.Type != "RSA" { + return nil, fmt.Errorf("unsupported key type %q", jwk.Type) + } + + return NewVerifierRSA(jwk) +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/sig_hmac.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/sig_hmac.go new file mode 100644 index 000000000000..b3ca3ef3d491 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/sig_hmac.go @@ -0,0 +1,67 @@ +package jose + +import ( + "bytes" + "crypto" + "crypto/hmac" + _ "crypto/sha256" + "errors" + "fmt" +) + +type VerifierHMAC struct { + KeyID string + Hash crypto.Hash + Secret []byte +} + +type SignerHMAC struct { + VerifierHMAC +} + +func NewVerifierHMAC(jwk JWK) (*VerifierHMAC, error) { + if jwk.Alg != "" && jwk.Alg != "HS256" { + return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg) + } + + v := VerifierHMAC{ + KeyID: jwk.ID, + Secret: jwk.Secret, + Hash: crypto.SHA256, + } + + return &v, nil +} + +func (v *VerifierHMAC) ID() string { + return v.KeyID +} + +func (v *VerifierHMAC) Alg() string { + return "HS256" +} + +func (v *VerifierHMAC) Verify(sig []byte, data []byte) error { + h := hmac.New(v.Hash.New, v.Secret) + h.Write(data) + if !bytes.Equal(sig, h.Sum(nil)) { + return errors.New("invalid hmac signature") + } + return nil +} + +func NewSignerHMAC(kid string, secret []byte) *SignerHMAC { + return &SignerHMAC{ + VerifierHMAC: VerifierHMAC{ + KeyID: kid, + Secret: secret, + Hash: crypto.SHA256, + }, + } +} + +func (s *SignerHMAC) Sign(data []byte) ([]byte, error) { + h := hmac.New(s.Hash.New, s.Secret) + h.Write(data) + return h.Sum(nil), nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/sig_rsa.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/sig_rsa.go new file mode 100644 index 000000000000..004e45dd835c --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/jose/sig_rsa.go @@ -0,0 +1,67 @@ +package jose + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "fmt" +) + +type VerifierRSA struct { + KeyID string + Hash crypto.Hash + PublicKey rsa.PublicKey +} + +type SignerRSA struct { + PrivateKey rsa.PrivateKey + VerifierRSA +} + +func NewVerifierRSA(jwk JWK) (*VerifierRSA, error) { + if jwk.Alg != "" && jwk.Alg != "RS256" { + return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg) + } + + v := VerifierRSA{ + KeyID: jwk.ID, + PublicKey: rsa.PublicKey{ + N: jwk.Modulus, + E: jwk.Exponent, + }, + Hash: crypto.SHA256, + } + + return &v, nil +} + +func NewSignerRSA(kid string, key rsa.PrivateKey) *SignerRSA { + return &SignerRSA{ + PrivateKey: key, + VerifierRSA: VerifierRSA{ + KeyID: kid, + PublicKey: key.PublicKey, + Hash: crypto.SHA256, + }, + } +} + +func (v *VerifierRSA) ID() string { + return v.KeyID +} + +func (v *VerifierRSA) Alg() string { + return "RS256" +} + +func (v *VerifierRSA) Verify(sig []byte, data []byte) error { + h := v.Hash.New() + h.Write(data) + return rsa.VerifyPKCS1v15(&v.PublicKey, v.Hash, h.Sum(nil), sig) +} + +func (s *SignerRSA) Sign(data []byte) ([]byte, error) { + h := s.Hash.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, &s.PrivateKey, s.Hash, h.Sum(nil)) +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/key/key.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/key/key.go new file mode 100644 index 000000000000..d0142a9e0e08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/key/key.go @@ -0,0 +1,153 @@ +package key + +import ( + "crypto/rand" + "crypto/rsa" + "encoding/base64" + "encoding/json" + "math/big" + "time" + + "github.com/coreos/go-oidc/jose" +) + +func NewPublicKey(jwk jose.JWK) *PublicKey { + return &PublicKey{jwk: jwk} +} + +type PublicKey struct { + jwk jose.JWK +} + +func (k *PublicKey) MarshalJSON() ([]byte, error) { + return json.Marshal(&k.jwk) +} + +func (k *PublicKey) UnmarshalJSON(data []byte) error { + var jwk jose.JWK + if err := json.Unmarshal(data, &jwk); err != nil { + return err + } + k.jwk = jwk + return nil +} + +func (k *PublicKey) ID() string { + return k.jwk.ID +} + +func (k *PublicKey) Verifier() (jose.Verifier, error) { + return jose.NewVerifierRSA(k.jwk) +} + +type PrivateKey struct { + KeyID string + PrivateKey *rsa.PrivateKey +} + +func (k *PrivateKey) ID() string { + return k.KeyID +} + +func (k *PrivateKey) Signer() jose.Signer { + return jose.NewSignerRSA(k.ID(), *k.PrivateKey) +} + +func (k *PrivateKey) JWK() jose.JWK { + return jose.JWK{ + ID: k.KeyID, + Type: "RSA", + Alg: "RS256", + Use: "sig", + Exponent: k.PrivateKey.PublicKey.E, + Modulus: k.PrivateKey.PublicKey.N, + } +} + +type KeySet interface { + ExpiresAt() time.Time +} + +type PublicKeySet struct { + keys []PublicKey + index map[string]*PublicKey + expiresAt time.Time +} + +func NewPublicKeySet(jwks []jose.JWK, exp time.Time) *PublicKeySet { + keys := make([]PublicKey, len(jwks)) + index := make(map[string]*PublicKey) + for i, jwk := range jwks { + keys[i] = *NewPublicKey(jwk) + index[keys[i].ID()] = &keys[i] + } + return &PublicKeySet{ + keys: keys, + index: index, + expiresAt: exp, + } +} + +func (s *PublicKeySet) ExpiresAt() time.Time { + return s.expiresAt +} + +func (s *PublicKeySet) Keys() []PublicKey { + return s.keys +} + +func (s *PublicKeySet) Key(id string) *PublicKey { + return s.index[id] +} + +type PrivateKeySet struct { + keys []*PrivateKey + ActiveKeyID string + expiresAt time.Time +} + +func NewPrivateKeySet(keys []*PrivateKey, exp time.Time) *PrivateKeySet { + return &PrivateKeySet{ + keys: keys, + ActiveKeyID: keys[0].ID(), + expiresAt: exp.UTC(), + } +} + +func (s *PrivateKeySet) Keys() []*PrivateKey { + return s.keys +} + +func (s *PrivateKeySet) ExpiresAt() time.Time { + return s.expiresAt +} + +func (s *PrivateKeySet) Active() *PrivateKey { + for i, k := range s.keys { + if k.ID() == s.ActiveKeyID { + return s.keys[i] + } + } + + return nil +} + +type GeneratePrivateKeyFunc func() (*PrivateKey, error) + +func GeneratePrivateKey() (*PrivateKey, error) { + pk, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, err + } + + k := PrivateKey{ + KeyID: base64BigInt(pk.PublicKey.N), + PrivateKey: pk, + } + + return &k, nil +} + +func base64BigInt(b *big.Int) string { + return base64.URLEncoding.EncodeToString(b.Bytes()) +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/key/manager.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/key/manager.go new file mode 100644 index 000000000000..476ab6a8d2b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/key/manager.go @@ -0,0 +1,99 @@ +package key + +import ( + "errors" + "time" + + "github.com/jonboulle/clockwork" + + "github.com/coreos/go-oidc/jose" + "github.com/coreos/pkg/health" +) + +type PrivateKeyManager interface { + ExpiresAt() time.Time + Signer() (jose.Signer, error) + JWKs() ([]jose.JWK, error) + PublicKeys() ([]PublicKey, error) + + WritableKeySetRepo + health.Checkable +} + +func NewPrivateKeyManager() PrivateKeyManager { + return &privateKeyManager{ + clock: clockwork.NewRealClock(), + } +} + +type privateKeyManager struct { + keySet *PrivateKeySet + clock clockwork.Clock +} + +func (m *privateKeyManager) ExpiresAt() time.Time { + if m.keySet == nil { + return m.clock.Now().UTC() + } + + return m.keySet.ExpiresAt() +} + +func (m *privateKeyManager) Signer() (jose.Signer, error) { + if err := m.Healthy(); err != nil { + return nil, err + } + + return m.keySet.Active().Signer(), nil +} + +func (m *privateKeyManager) JWKs() ([]jose.JWK, error) { + if err := m.Healthy(); err != nil { + return nil, err + } + + keys := m.keySet.Keys() + jwks := make([]jose.JWK, len(keys)) + for i, k := range keys { + jwks[i] = k.JWK() + } + return jwks, nil +} + +func (m *privateKeyManager) PublicKeys() ([]PublicKey, error) { + jwks, err := m.JWKs() + if err != nil { + return nil, err + } + keys := make([]PublicKey, len(jwks)) + for i, jwk := range jwks { + keys[i] = *NewPublicKey(jwk) + } + return keys, nil +} + +func (m *privateKeyManager) Healthy() error { + if m.keySet == nil { + return errors.New("private key manager uninitialized") + } + + if len(m.keySet.Keys()) == 0 { + return errors.New("private key manager zero keys") + } + + if m.keySet.ExpiresAt().Before(m.clock.Now().UTC()) { + return errors.New("private key manager keys expired") + } + + return nil +} + +func (m *privateKeyManager) Set(keySet KeySet) error { + privKeySet, ok := keySet.(*PrivateKeySet) + if !ok { + return errors.New("unable to cast to PrivateKeySet") + } + + m.keySet = privKeySet + return nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/key/repo.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/key/repo.go new file mode 100644 index 000000000000..1acdeb3614c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/key/repo.go @@ -0,0 +1,55 @@ +package key + +import ( + "errors" + "sync" +) + +var ErrorNoKeys = errors.New("no keys found") + +type WritableKeySetRepo interface { + Set(KeySet) error +} + +type ReadableKeySetRepo interface { + Get() (KeySet, error) +} + +type PrivateKeySetRepo interface { + WritableKeySetRepo + ReadableKeySetRepo +} + +func NewPrivateKeySetRepo() PrivateKeySetRepo { + return &memPrivateKeySetRepo{} +} + +type memPrivateKeySetRepo struct { + mu sync.RWMutex + pks PrivateKeySet +} + +func (r *memPrivateKeySetRepo) Set(ks KeySet) error { + pks, ok := ks.(*PrivateKeySet) + if !ok { + return errors.New("unable to cast to PrivateKeySet") + } else if pks == nil { + return errors.New("nil KeySet") + } + + r.mu.Lock() + defer r.mu.Unlock() + + r.pks = *pks + return nil +} + +func (r *memPrivateKeySetRepo) Get() (KeySet, error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.pks.keys == nil { + return nil, ErrorNoKeys + } + return KeySet(&r.pks), nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/key/rotate.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/key/rotate.go new file mode 100644 index 000000000000..9c5508bc1f87 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/key/rotate.go @@ -0,0 +1,165 @@ +package key + +import ( + "errors" + "time" + + "github.com/coreos/pkg/capnslog" + ptime "github.com/coreos/pkg/timeutil" + "github.com/jonboulle/clockwork" +) + +var ( + log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "key") + + ErrorPrivateKeysExpired = errors.New("private keys have expired") +) + +func NewPrivateKeyRotator(repo PrivateKeySetRepo, ttl time.Duration) *PrivateKeyRotator { + return &PrivateKeyRotator{ + repo: repo, + ttl: ttl, + + keep: 2, + generateKey: GeneratePrivateKey, + clock: clockwork.NewRealClock(), + } +} + +type PrivateKeyRotator struct { + repo PrivateKeySetRepo + generateKey GeneratePrivateKeyFunc + clock clockwork.Clock + keep int + ttl time.Duration +} + +func (r *PrivateKeyRotator) expiresAt() time.Time { + return r.clock.Now().UTC().Add(r.ttl) +} + +func (r *PrivateKeyRotator) Healthy() error { + pks, err := r.privateKeySet() + if err != nil { + return err + } + + if r.clock.Now().After(pks.ExpiresAt()) { + return ErrorPrivateKeysExpired + } + + return nil +} + +func (r *PrivateKeyRotator) privateKeySet() (*PrivateKeySet, error) { + ks, err := r.repo.Get() + if err != nil { + return nil, err + } + + pks, ok := ks.(*PrivateKeySet) + if !ok { + return nil, errors.New("unable to cast to PrivateKeySet") + } + return pks, nil +} + +func (r *PrivateKeyRotator) nextRotation() (time.Duration, error) { + pks, err := r.privateKeySet() + if err == ErrorNoKeys { + log.Infof("No keys in private key set; must rotate immediately") + return 0, nil + } + if err != nil { + return 0, err + } + + now := r.clock.Now() + + // Ideally, we want to rotate after half the TTL has elapsed. + idealRotationTime := pks.ExpiresAt().Add(-r.ttl / 2) + + // If we are past the ideal rotation time, rotate immediatly. + return max(0, idealRotationTime.Sub(now)), nil +} + +func max(a, b time.Duration) time.Duration { + if a > b { + return a + } + return b +} + +func (r *PrivateKeyRotator) Run() chan struct{} { + attempt := func() { + k, err := r.generateKey() + if err != nil { + log.Errorf("Failed generating signing key: %v", err) + return + } + + exp := r.expiresAt() + if err := rotatePrivateKeys(r.repo, k, r.keep, exp); err != nil { + log.Errorf("Failed key rotation: %v", err) + return + } + + log.Infof("Rotated signing keys: id=%s expiresAt=%s", k.ID(), exp) + } + + stop := make(chan struct{}) + go func() { + for { + var nextRotation time.Duration + var sleep time.Duration + var err error + for { + if nextRotation, err = r.nextRotation(); err == nil { + break + } + sleep = ptime.ExpBackoff(sleep, time.Minute) + log.Errorf("error getting nextRotation, retrying in %v: %v", sleep, err) + time.Sleep(sleep) + } + + log.Infof("will rotate keys in %v", nextRotation) + select { + case <-r.clock.After(nextRotation): + attempt() + case <-stop: + return + } + } + }() + + return stop +} + +func rotatePrivateKeys(repo PrivateKeySetRepo, k *PrivateKey, keep int, exp time.Time) error { + ks, err := repo.Get() + if err != nil && err != ErrorNoKeys { + return err + } + + var keys []*PrivateKey + if ks != nil { + pks, ok := ks.(*PrivateKeySet) + if !ok { + return errors.New("unable to cast to PrivateKeySet") + } + keys = pks.Keys() + } + + keys = append([]*PrivateKey{k}, keys...) + if l := len(keys); l > keep { + keys = keys[0:keep] + } + + nks := PrivateKeySet{ + keys: keys, + ActiveKeyID: k.ID(), + expiresAt: exp, + } + + return repo.Set(KeySet(&nks)) +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/key/sync.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/key/sync.go new file mode 100644 index 000000000000..e8d5d03d881d --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/key/sync.go @@ -0,0 +1,91 @@ +package key + +import ( + "errors" + "time" + + "github.com/jonboulle/clockwork" + + "github.com/coreos/pkg/timeutil" +) + +func NewKeySetSyncer(r ReadableKeySetRepo, w WritableKeySetRepo) *KeySetSyncer { + return &KeySetSyncer{ + readable: r, + writable: w, + clock: clockwork.NewRealClock(), + } +} + +type KeySetSyncer struct { + readable ReadableKeySetRepo + writable WritableKeySetRepo + clock clockwork.Clock +} + +func (s *KeySetSyncer) Run() chan struct{} { + stop := make(chan struct{}) + go func() { + var failing bool + var next time.Duration + for { + exp, err := syncKeySet(s.readable, s.writable, s.clock) + if err != nil || exp == 0 { + if !failing { + failing = true + next = time.Second + } else { + next = timeutil.ExpBackoff(next, time.Minute) + } + if exp == 0 { + log.Errorf("Synced to already expired key set, retrying in %v: %v", next, err) + + } else { + log.Errorf("Failed syncing key set, retrying in %v: %v", next, err) + } + } else { + failing = false + next = exp / 2 + log.Infof("Synced key set, checking again in %v", next) + } + + select { + case <-s.clock.After(next): + continue + case <-stop: + return + } + } + }() + + return stop +} + +func Sync(r ReadableKeySetRepo, w WritableKeySetRepo) (time.Duration, error) { + return syncKeySet(r, w, clockwork.NewRealClock()) +} + +// syncKeySet copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire. +// If keyset has already expired, returns a zero duration. +func syncKeySet(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) { + var ks KeySet + ks, err = r.Get() + if err != nil { + return + } + + if ks == nil { + err = errors.New("no source KeySet") + return + } + + if err = w.Set(ks); err != nil { + return + } + + now := clock.Now() + if ks.ExpiresAt().After(now) { + exp = ks.ExpiresAt().Sub(now) + } + return +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/oauth2/error.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/oauth2/error.go new file mode 100644 index 000000000000..50d890949a23 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/oauth2/error.go @@ -0,0 +1,29 @@ +package oauth2 + +const ( + ErrorAccessDenied = "access_denied" + ErrorInvalidClient = "invalid_client" + ErrorInvalidGrant = "invalid_grant" + ErrorInvalidRequest = "invalid_request" + ErrorServerError = "server_error" + ErrorUnauthorizedClient = "unauthorized_client" + ErrorUnsupportedGrantType = "unsupported_grant_type" + ErrorUnsupportedResponseType = "unsupported_response_type" +) + +type Error struct { + Type string `json:"error"` + Description string `json:"error_description,omitempty"` + State string `json:"state,omitempty"` +} + +func (e *Error) Error() string { + if e.Description != "" { + return e.Type + ": " + e.Description + } + return e.Type +} + +func NewError(typ string) *Error { + return &Error{Type: typ} +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/oauth2/oauth2.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/oauth2/oauth2.go new file mode 100644 index 000000000000..1c68293a0a8c --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/oauth2/oauth2.go @@ -0,0 +1,416 @@ +package oauth2 + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "mime" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + + phttp "github.com/coreos/go-oidc/http" +) + +// ResponseTypesEqual compares two response_type values. If either +// contains a space, it is treated as an unordered list. For example, +// comparing "code id_token" and "id_token code" would evaluate to true. +func ResponseTypesEqual(r1, r2 string) bool { + if !strings.Contains(r1, " ") || !strings.Contains(r2, " ") { + // fast route, no split needed + return r1 == r2 + } + + // split, sort, and compare + r1Fields := strings.Fields(r1) + r2Fields := strings.Fields(r2) + if len(r1Fields) != len(r2Fields) { + return false + } + sort.Strings(r1Fields) + sort.Strings(r2Fields) + for i, r1Field := range r1Fields { + if r1Field != r2Fields[i] { + return false + } + } + return true +} + +const ( + // OAuth2.0 response types registered by OIDC. + // + // See: https://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#RegistryContents + ResponseTypeCode = "code" + ResponseTypeCodeIDToken = "code id_token" + ResponseTypeCodeIDTokenToken = "code id_token token" + ResponseTypeIDToken = "id_token" + ResponseTypeIDTokenToken = "id_token token" + ResponseTypeToken = "token" + ResponseTypeNone = "none" +) + +const ( + GrantTypeAuthCode = "authorization_code" + GrantTypeClientCreds = "client_credentials" + GrantTypeUserCreds = "password" + GrantTypeImplicit = "implicit" + GrantTypeRefreshToken = "refresh_token" + + AuthMethodClientSecretPost = "client_secret_post" + AuthMethodClientSecretBasic = "client_secret_basic" + AuthMethodClientSecretJWT = "client_secret_jwt" + AuthMethodPrivateKeyJWT = "private_key_jwt" +) + +type Config struct { + Credentials ClientCredentials + Scope []string + RedirectURL string + AuthURL string + TokenURL string + + // Must be one of the AuthMethodXXX methods above. Right now, only + // AuthMethodClientSecretPost and AuthMethodClientSecretBasic are supported. + AuthMethod string +} + +type Client struct { + hc phttp.Client + creds ClientCredentials + scope []string + authURL *url.URL + redirectURL *url.URL + tokenURL *url.URL + authMethod string +} + +type ClientCredentials struct { + ID string + Secret string +} + +func NewClient(hc phttp.Client, cfg Config) (c *Client, err error) { + if len(cfg.Credentials.ID) == 0 { + err = errors.New("missing client id") + return + } + + if len(cfg.Credentials.Secret) == 0 { + err = errors.New("missing client secret") + return + } + + if cfg.AuthMethod == "" { + cfg.AuthMethod = AuthMethodClientSecretBasic + } else if cfg.AuthMethod != AuthMethodClientSecretPost && cfg.AuthMethod != AuthMethodClientSecretBasic { + err = fmt.Errorf("auth method %q is not supported", cfg.AuthMethod) + return + } + + au, err := phttp.ParseNonEmptyURL(cfg.AuthURL) + if err != nil { + return + } + + tu, err := phttp.ParseNonEmptyURL(cfg.TokenURL) + if err != nil { + return + } + + // Allow empty redirect URL in the case where the client + // only needs to verify a given token. + ru, err := url.Parse(cfg.RedirectURL) + if err != nil { + return + } + + c = &Client{ + creds: cfg.Credentials, + scope: cfg.Scope, + redirectURL: ru, + authURL: au, + tokenURL: tu, + hc: hc, + authMethod: cfg.AuthMethod, + } + + return +} + +// Return the embedded HTTP client +func (c *Client) HttpClient() phttp.Client { + return c.hc +} + +// Generate the url for initial redirect to oauth provider. +func (c *Client) AuthCodeURL(state, accessType, prompt string) string { + v := c.commonURLValues() + v.Set("state", state) + if strings.ToLower(accessType) == "offline" { + v.Set("access_type", "offline") + } + + if prompt != "" { + v.Set("prompt", prompt) + } + v.Set("response_type", "code") + + q := v.Encode() + u := *c.authURL + if u.RawQuery == "" { + u.RawQuery = q + } else { + u.RawQuery += "&" + q + } + return u.String() +} + +func (c *Client) commonURLValues() url.Values { + return url.Values{ + "redirect_uri": {c.redirectURL.String()}, + "scope": {strings.Join(c.scope, " ")}, + "client_id": {c.creds.ID}, + } +} + +func (c *Client) newAuthenticatedRequest(urlToken string, values url.Values) (*http.Request, error) { + var req *http.Request + var err error + switch c.authMethod { + case AuthMethodClientSecretPost: + values.Set("client_secret", c.creds.Secret) + req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode())) + if err != nil { + return nil, err + } + case AuthMethodClientSecretBasic: + req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode())) + if err != nil { + return nil, err + } + encodedID := url.QueryEscape(c.creds.ID) + encodedSecret := url.QueryEscape(c.creds.Secret) + req.SetBasicAuth(encodedID, encodedSecret) + default: + panic("misconfigured client: auth method not supported") + } + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + return req, nil + +} + +// ClientCredsToken posts the client id and secret to obtain a token scoped to the OAuth2 client via the "client_credentials" grant type. +// May not be supported by all OAuth2 servers. +func (c *Client) ClientCredsToken(scope []string) (result TokenResponse, err error) { + v := url.Values{ + "scope": {strings.Join(scope, " ")}, + "grant_type": {GrantTypeClientCreds}, + } + + req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) + if err != nil { + return + } + + resp, err := c.hc.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + + return parseTokenResponse(resp) +} + +// UserCredsToken posts the username and password to obtain a token scoped to the OAuth2 client via the "password" grant_type +// May not be supported by all OAuth2 servers. +func (c *Client) UserCredsToken(username, password string) (result TokenResponse, err error) { + v := url.Values{ + "scope": {strings.Join(c.scope, " ")}, + "grant_type": {GrantTypeUserCreds}, + "username": {username}, + "password": {password}, + } + + req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) + if err != nil { + return + } + + resp, err := c.hc.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + + return parseTokenResponse(resp) +} + +// RequestToken requests a token from the Token Endpoint with the specified grantType. +// If 'grantType' == GrantTypeAuthCode, then 'value' should be the authorization code. +// If 'grantType' == GrantTypeRefreshToken, then 'value' should be the refresh token. +func (c *Client) RequestToken(grantType, value string) (result TokenResponse, err error) { + v := c.commonURLValues() + + v.Set("grant_type", grantType) + v.Set("client_secret", c.creds.Secret) + switch grantType { + case GrantTypeAuthCode: + v.Set("code", value) + case GrantTypeRefreshToken: + v.Set("refresh_token", value) + default: + err = fmt.Errorf("unsupported grant_type: %v", grantType) + return + } + + req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) + if err != nil { + return + } + + resp, err := c.hc.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + + return parseTokenResponse(resp) +} + +func parseTokenResponse(resp *http.Response) (result TokenResponse, err error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + badStatusCode := resp.StatusCode < 200 || resp.StatusCode > 299 + + contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return + } + + result = TokenResponse{ + RawBody: body, + } + + newError := func(typ, desc, state string) error { + if typ == "" { + return fmt.Errorf("unrecognized error %s", body) + } + return &Error{typ, desc, state} + } + + if contentType == "application/x-www-form-urlencoded" || contentType == "text/plain" { + var vals url.Values + vals, err = url.ParseQuery(string(body)) + if err != nil { + return + } + if error := vals.Get("error"); error != "" || badStatusCode { + err = newError(error, vals.Get("error_description"), vals.Get("state")) + return + } + e := vals.Get("expires_in") + if e == "" { + e = vals.Get("expires") + } + if e != "" { + result.Expires, err = strconv.Atoi(e) + if err != nil { + return + } + } + result.AccessToken = vals.Get("access_token") + result.TokenType = vals.Get("token_type") + result.IDToken = vals.Get("id_token") + result.RefreshToken = vals.Get("refresh_token") + result.Scope = vals.Get("scope") + } else { + var r struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + RefreshToken string `json:"refresh_token"` + Scope string `json:"scope"` + State string `json:"state"` + ExpiresIn int `json:"expires_in"` + Expires int `json:"expires"` + Error string `json:"error"` + Desc string `json:"error_description"` + } + if err = json.Unmarshal(body, &r); err != nil { + return + } + if r.Error != "" || badStatusCode { + err = newError(r.Error, r.Desc, r.State) + return + } + result.AccessToken = r.AccessToken + result.TokenType = r.TokenType + result.IDToken = r.IDToken + result.RefreshToken = r.RefreshToken + result.Scope = r.Scope + if r.ExpiresIn == 0 { + result.Expires = r.Expires + } else { + result.Expires = r.ExpiresIn + } + } + return +} + +type TokenResponse struct { + AccessToken string + TokenType string + Expires int + IDToken string + RefreshToken string // OPTIONAL. + Scope string // OPTIONAL, if identical to the scope requested by the client, otherwise, REQUIRED. + RawBody []byte // In case callers need some other non-standard info from the token response +} + +type AuthCodeRequest struct { + ResponseType string + ClientID string + RedirectURL *url.URL + Scope []string + State string +} + +func ParseAuthCodeRequest(q url.Values) (AuthCodeRequest, error) { + acr := AuthCodeRequest{ + ResponseType: q.Get("response_type"), + ClientID: q.Get("client_id"), + State: q.Get("state"), + Scope: make([]string, 0), + } + + qs := strings.TrimSpace(q.Get("scope")) + if qs != "" { + acr.Scope = strings.Split(qs, " ") + } + + err := func() error { + if acr.ClientID == "" { + return NewError(ErrorInvalidRequest) + } + + redirectURL := q.Get("redirect_uri") + if redirectURL != "" { + ru, err := url.Parse(redirectURL) + if err != nil { + return NewError(ErrorInvalidRequest) + } + acr.RedirectURL = ru + } + + return nil + }() + + return acr, err +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/client.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/client.go new file mode 100644 index 000000000000..7a3cb40f6450 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/client.go @@ -0,0 +1,846 @@ +package oidc + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/mail" + "net/url" + "sync" + "time" + + phttp "github.com/coreos/go-oidc/http" + "github.com/coreos/go-oidc/jose" + "github.com/coreos/go-oidc/key" + "github.com/coreos/go-oidc/oauth2" +) + +const ( + // amount of time that must pass after the last key sync + // completes before another attempt may begin + keySyncWindow = 5 * time.Second +) + +var ( + DefaultScope = []string{"openid", "email", "profile"} + + supportedAuthMethods = map[string]struct{}{ + oauth2.AuthMethodClientSecretBasic: struct{}{}, + oauth2.AuthMethodClientSecretPost: struct{}{}, + } +) + +type ClientCredentials oauth2.ClientCredentials + +type ClientIdentity struct { + Credentials ClientCredentials + Metadata ClientMetadata +} + +type JWAOptions struct { + // SigningAlg specifies an JWA alg for signing JWTs. + // + // Specifying this field implies different actions depending on the context. It may + // require objects be serialized and signed as a JWT instead of plain JSON, or + // require an existing JWT object use the specified alg. + // + // See: http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata + SigningAlg string + // EncryptionAlg, if provided, specifies that the returned or sent object be stored + // (or nested) within a JWT object and encrypted with the provided JWA alg. + EncryptionAlg string + // EncryptionEnc specifies the JWA enc algorithm to use with EncryptionAlg. If + // EncryptionAlg is provided and EncryptionEnc is omitted, this field defaults + // to A128CBC-HS256. + // + // If EncryptionEnc is provided EncryptionAlg must also be specified. + EncryptionEnc string +} + +func (opt JWAOptions) valid() error { + if opt.EncryptionEnc != "" && opt.EncryptionAlg == "" { + return errors.New("encryption encoding provided with no encryption algorithm") + } + return nil +} + +func (opt JWAOptions) defaults() JWAOptions { + if opt.EncryptionAlg != "" && opt.EncryptionEnc == "" { + opt.EncryptionEnc = jose.EncA128CBCHS256 + } + return opt +} + +var ( + // Ensure ClientMetadata satisfies these interfaces. + _ json.Marshaler = &ClientMetadata{} + _ json.Unmarshaler = &ClientMetadata{} +) + +// ClientMetadata holds metadata that the authorization server associates +// with a client identifier. The fields range from human-facing display +// strings such as client name, to items that impact the security of the +// protocol, such as the list of valid redirect URIs. +// +// See http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata +// +// TODO: support language specific claim representations +// http://openid.net/specs/openid-connect-registration-1_0.html#LanguagesAndScripts +type ClientMetadata struct { + RedirectURIs []url.URL // Required + + // A list of OAuth 2.0 "response_type" values that the client wishes to restrict + // itself to. Either "code", "token", or another registered extension. + // + // If omitted, only "code" will be used. + ResponseTypes []string + // A list of OAuth 2.0 grant types the client wishes to restrict itself to. + // The grant type values used by OIDC are "authorization_code", "implicit", + // and "refresh_token". + // + // If ommitted, only "authorization_code" will be used. + GrantTypes []string + // "native" or "web". If omitted, "web". + ApplicationType string + + // List of email addresses. + Contacts []mail.Address + // Name of client to be presented to the end-user. + ClientName string + // URL that references a logo for the Client application. + LogoURI *url.URL + // URL of the home page of the Client. + ClientURI *url.URL + // Profile data policies and terms of use to be provided to the end user. + PolicyURI *url.URL + TermsOfServiceURI *url.URL + + // URL to or the value of the client's JSON Web Key Set document. + JWKSURI *url.URL + JWKS *jose.JWKSet + + // URL referencing a flie with a single JSON array of redirect URIs. + SectorIdentifierURI *url.URL + + SubjectType string + + // Options to restrict the JWS alg and enc values used for server responses and requests. + IDTokenResponseOptions JWAOptions + UserInfoResponseOptions JWAOptions + RequestObjectOptions JWAOptions + + // Client requested authorization method and signing options for the token endpoint. + // + // Defaults to "client_secret_basic" + TokenEndpointAuthMethod string + TokenEndpointAuthSigningAlg string + + // DefaultMaxAge specifies the maximum amount of time in seconds before an authorized + // user must reauthroize. + // + // If 0, no limitation is placed on the maximum. + DefaultMaxAge int64 + // RequireAuthTime specifies if the auth_time claim in the ID token is required. + RequireAuthTime bool + + // Default Authentication Context Class Reference values for authentication requests. + DefaultACRValues []string + + // URI that a third party can use to initiate a login by the relaying party. + // + // See: http://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin + InitiateLoginURI *url.URL + // Pre-registered request_uri values that may be cached by the server. + RequestURIs []url.URL +} + +// Defaults returns a shallow copy of ClientMetadata with default +// values replacing omitted fields. +func (m ClientMetadata) Defaults() ClientMetadata { + if len(m.ResponseTypes) == 0 { + m.ResponseTypes = []string{oauth2.ResponseTypeCode} + } + if len(m.GrantTypes) == 0 { + m.GrantTypes = []string{oauth2.GrantTypeAuthCode} + } + if m.ApplicationType == "" { + m.ApplicationType = "web" + } + if m.TokenEndpointAuthMethod == "" { + m.TokenEndpointAuthMethod = oauth2.AuthMethodClientSecretBasic + } + m.IDTokenResponseOptions = m.IDTokenResponseOptions.defaults() + m.UserInfoResponseOptions = m.UserInfoResponseOptions.defaults() + m.RequestObjectOptions = m.RequestObjectOptions.defaults() + return m +} + +func (m *ClientMetadata) MarshalJSON() ([]byte, error) { + e := m.toEncodableStruct() + return json.Marshal(&e) +} + +func (m *ClientMetadata) UnmarshalJSON(data []byte) error { + var e encodableClientMetadata + if err := json.Unmarshal(data, &e); err != nil { + return err + } + meta, err := e.toStruct() + if err != nil { + return err + } + if err := meta.Valid(); err != nil { + return err + } + *m = meta + return nil +} + +type encodableClientMetadata struct { + RedirectURIs []string `json:"redirect_uris"` // Required + ResponseTypes []string `json:"response_types,omitempty"` + GrantTypes []string `json:"grant_types,omitempty"` + ApplicationType string `json:"application_type,omitempty"` + Contacts []string `json:"contacts,omitempty"` + ClientName string `json:"client_name,omitempty"` + LogoURI string `json:"logo_uri,omitempty"` + ClientURI string `json:"client_uri,omitempty"` + PolicyURI string `json:"policy_uri,omitempty"` + TermsOfServiceURI string `json:"tos_uri,omitempty"` + JWKSURI string `json:"jwks_uri,omitempty"` + JWKS *jose.JWKSet `json:"jwks,omitempty"` + SectorIdentifierURI string `json:"sector_identifier_uri,omitempty"` + SubjectType string `json:"subject_type,omitempty"` + IDTokenSignedResponseAlg string `json:"id_token_signed_response_alg,omitempty"` + IDTokenEncryptedResponseAlg string `json:"id_token_encrypted_response_alg,omitempty"` + IDTokenEncryptedResponseEnc string `json:"id_token_encrypted_response_enc,omitempty"` + UserInfoSignedResponseAlg string `json:"userinfo_signed_response_alg,omitempty"` + UserInfoEncryptedResponseAlg string `json:"userinfo_encrypted_response_alg,omitempty"` + UserInfoEncryptedResponseEnc string `json:"userinfo_encrypted_response_enc,omitempty"` + RequestObjectSigningAlg string `json:"request_object_signing_alg,omitempty"` + RequestObjectEncryptionAlg string `json:"request_object_encryption_alg,omitempty"` + RequestObjectEncryptionEnc string `json:"request_object_encryption_enc,omitempty"` + TokenEndpointAuthMethod string `json:"token_endpoint_auth_method,omitempty"` + TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg,omitempty"` + DefaultMaxAge int64 `json:"default_max_age,omitempty"` + RequireAuthTime bool `json:"require_auth_time,omitempty"` + DefaultACRValues []string `json:"default_acr_values,omitempty"` + InitiateLoginURI string `json:"initiate_login_uri,omitempty"` + RequestURIs []string `json:"request_uris,omitempty"` +} + +func (c *encodableClientMetadata) toStruct() (ClientMetadata, error) { + p := stickyErrParser{} + m := ClientMetadata{ + RedirectURIs: p.parseURIs(c.RedirectURIs, "redirect_uris"), + ResponseTypes: c.ResponseTypes, + GrantTypes: c.GrantTypes, + ApplicationType: c.ApplicationType, + Contacts: p.parseEmails(c.Contacts, "contacts"), + ClientName: c.ClientName, + LogoURI: p.parseURI(c.LogoURI, "logo_uri"), + ClientURI: p.parseURI(c.ClientURI, "client_uri"), + PolicyURI: p.parseURI(c.PolicyURI, "policy_uri"), + TermsOfServiceURI: p.parseURI(c.TermsOfServiceURI, "tos_uri"), + JWKSURI: p.parseURI(c.JWKSURI, "jwks_uri"), + JWKS: c.JWKS, + SectorIdentifierURI: p.parseURI(c.SectorIdentifierURI, "sector_identifier_uri"), + SubjectType: c.SubjectType, + TokenEndpointAuthMethod: c.TokenEndpointAuthMethod, + TokenEndpointAuthSigningAlg: c.TokenEndpointAuthSigningAlg, + DefaultMaxAge: c.DefaultMaxAge, + RequireAuthTime: c.RequireAuthTime, + DefaultACRValues: c.DefaultACRValues, + InitiateLoginURI: p.parseURI(c.InitiateLoginURI, "initiate_login_uri"), + RequestURIs: p.parseURIs(c.RequestURIs, "request_uris"), + IDTokenResponseOptions: JWAOptions{ + c.IDTokenSignedResponseAlg, + c.IDTokenEncryptedResponseAlg, + c.IDTokenEncryptedResponseEnc, + }, + UserInfoResponseOptions: JWAOptions{ + c.UserInfoSignedResponseAlg, + c.UserInfoEncryptedResponseAlg, + c.UserInfoEncryptedResponseEnc, + }, + RequestObjectOptions: JWAOptions{ + c.RequestObjectSigningAlg, + c.RequestObjectEncryptionAlg, + c.RequestObjectEncryptionEnc, + }, + } + if p.firstErr != nil { + return ClientMetadata{}, p.firstErr + } + return m, nil +} + +// stickyErrParser parses URIs and email addresses. Once it encounters +// a parse error, subsequent calls become no-op. +type stickyErrParser struct { + firstErr error +} + +func (p *stickyErrParser) parseURI(s, field string) *url.URL { + if p.firstErr != nil || s == "" { + return nil + } + u, err := url.Parse(s) + if err == nil { + if u.Host == "" { + err = errors.New("no host in URI") + } else if u.Scheme != "http" && u.Scheme != "https" { + err = errors.New("invalid URI scheme") + } + } + if err != nil { + p.firstErr = fmt.Errorf("failed to parse %s: %v", field, err) + return nil + } + return u +} + +func (p *stickyErrParser) parseURIs(s []string, field string) []url.URL { + if p.firstErr != nil || len(s) == 0 { + return nil + } + uris := make([]url.URL, len(s)) + for i, val := range s { + if val == "" { + p.firstErr = fmt.Errorf("invalid URI in field %s", field) + return nil + } + if u := p.parseURI(val, field); u != nil { + uris[i] = *u + } + } + return uris +} + +func (p *stickyErrParser) parseEmails(s []string, field string) []mail.Address { + if p.firstErr != nil || len(s) == 0 { + return nil + } + addrs := make([]mail.Address, len(s)) + for i, addr := range s { + if addr == "" { + p.firstErr = fmt.Errorf("invalid email in field %s", field) + return nil + } + a, err := mail.ParseAddress(addr) + if err != nil { + p.firstErr = fmt.Errorf("invalid email in field %s: %v", field, err) + return nil + } + addrs[i] = *a + } + return addrs +} + +func (m *ClientMetadata) toEncodableStruct() encodableClientMetadata { + return encodableClientMetadata{ + RedirectURIs: urisToStrings(m.RedirectURIs), + ResponseTypes: m.ResponseTypes, + GrantTypes: m.GrantTypes, + ApplicationType: m.ApplicationType, + Contacts: emailsToStrings(m.Contacts), + ClientName: m.ClientName, + LogoURI: uriToString(m.LogoURI), + ClientURI: uriToString(m.ClientURI), + PolicyURI: uriToString(m.PolicyURI), + TermsOfServiceURI: uriToString(m.TermsOfServiceURI), + JWKSURI: uriToString(m.JWKSURI), + JWKS: m.JWKS, + SectorIdentifierURI: uriToString(m.SectorIdentifierURI), + SubjectType: m.SubjectType, + IDTokenSignedResponseAlg: m.IDTokenResponseOptions.SigningAlg, + IDTokenEncryptedResponseAlg: m.IDTokenResponseOptions.EncryptionAlg, + IDTokenEncryptedResponseEnc: m.IDTokenResponseOptions.EncryptionEnc, + UserInfoSignedResponseAlg: m.UserInfoResponseOptions.SigningAlg, + UserInfoEncryptedResponseAlg: m.UserInfoResponseOptions.EncryptionAlg, + UserInfoEncryptedResponseEnc: m.UserInfoResponseOptions.EncryptionEnc, + RequestObjectSigningAlg: m.RequestObjectOptions.SigningAlg, + RequestObjectEncryptionAlg: m.RequestObjectOptions.EncryptionAlg, + RequestObjectEncryptionEnc: m.RequestObjectOptions.EncryptionEnc, + TokenEndpointAuthMethod: m.TokenEndpointAuthMethod, + TokenEndpointAuthSigningAlg: m.TokenEndpointAuthSigningAlg, + DefaultMaxAge: m.DefaultMaxAge, + RequireAuthTime: m.RequireAuthTime, + DefaultACRValues: m.DefaultACRValues, + InitiateLoginURI: uriToString(m.InitiateLoginURI), + RequestURIs: urisToStrings(m.RequestURIs), + } +} + +func uriToString(u *url.URL) string { + if u == nil { + return "" + } + return u.String() +} + +func urisToStrings(urls []url.URL) []string { + if len(urls) == 0 { + return nil + } + sli := make([]string, len(urls)) + for i, u := range urls { + sli[i] = u.String() + } + return sli +} + +func emailsToStrings(addrs []mail.Address) []string { + if len(addrs) == 0 { + return nil + } + sli := make([]string, len(addrs)) + for i, addr := range addrs { + sli[i] = addr.String() + } + return sli +} + +// Valid determines if a ClientMetadata conforms with the OIDC specification. +// +// Valid is called by UnmarshalJSON. +// +// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for +// URLs fields where the OIDC spec requires it. This may change in future releases +// of this package. See: https://github.com/coreos/go-oidc/issues/34 +func (m *ClientMetadata) Valid() error { + if len(m.RedirectURIs) == 0 { + return errors.New("zero redirect URLs") + } + + validURI := func(u *url.URL, fieldName string) error { + if u.Host == "" { + return fmt.Errorf("no host for uri field %s", fieldName) + } + if u.Scheme != "http" && u.Scheme != "https" { + return fmt.Errorf("uri field %s scheme is not http or https", fieldName) + } + return nil + } + + uris := []struct { + val *url.URL + name string + }{ + {m.LogoURI, "logo_uri"}, + {m.ClientURI, "client_uri"}, + {m.PolicyURI, "policy_uri"}, + {m.TermsOfServiceURI, "tos_uri"}, + {m.JWKSURI, "jwks_uri"}, + {m.SectorIdentifierURI, "sector_identifier_uri"}, + {m.InitiateLoginURI, "initiate_login_uri"}, + } + + for _, uri := range uris { + if uri.val == nil { + continue + } + if err := validURI(uri.val, uri.name); err != nil { + return err + } + } + + uriLists := []struct { + vals []url.URL + name string + }{ + {m.RedirectURIs, "redirect_uris"}, + {m.RequestURIs, "request_uris"}, + } + for _, list := range uriLists { + for _, uri := range list.vals { + if err := validURI(&uri, list.name); err != nil { + return err + } + } + } + + options := []struct { + option JWAOptions + name string + }{ + {m.IDTokenResponseOptions, "id_token response"}, + {m.UserInfoResponseOptions, "userinfo response"}, + {m.RequestObjectOptions, "request_object"}, + } + for _, option := range options { + if err := option.option.valid(); err != nil { + return fmt.Errorf("invalid JWA values for %s: %v", option.name, err) + } + } + return nil +} + +type ClientRegistrationResponse struct { + ClientID string // Required + ClientSecret string + RegistrationAccessToken string + RegistrationClientURI string + // If IsZero is true, unspecified. + ClientIDIssuedAt time.Time + // Time at which the client_secret will expire. + // If IsZero is true, it will not expire. + ClientSecretExpiresAt time.Time + + ClientMetadata +} + +type encodableClientRegistrationResponse struct { + ClientID string `json:"client_id"` // Required + ClientSecret string `json:"client_secret,omitempty"` + RegistrationAccessToken string `json:"registration_access_token,omitempty"` + RegistrationClientURI string `json:"registration_client_uri,omitempty"` + ClientIDIssuedAt int64 `json:"client_id_issued_at,omitempty"` + // Time at which the client_secret will expire, in seconds since the epoch. + // If 0 it will not expire. + ClientSecretExpiresAt int64 `json:"client_secret_expires_at"` // Required + + encodableClientMetadata +} + +func unixToSec(t time.Time) int64 { + if t.IsZero() { + return 0 + } + return t.Unix() +} + +func (c *ClientRegistrationResponse) MarshalJSON() ([]byte, error) { + e := encodableClientRegistrationResponse{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RegistrationAccessToken: c.RegistrationAccessToken, + RegistrationClientURI: c.RegistrationClientURI, + ClientIDIssuedAt: unixToSec(c.ClientIDIssuedAt), + ClientSecretExpiresAt: unixToSec(c.ClientSecretExpiresAt), + encodableClientMetadata: c.ClientMetadata.toEncodableStruct(), + } + return json.Marshal(&e) +} + +func secToUnix(sec int64) time.Time { + if sec == 0 { + return time.Time{} + } + return time.Unix(sec, 0) +} + +func (c *ClientRegistrationResponse) UnmarshalJSON(data []byte) error { + var e encodableClientRegistrationResponse + if err := json.Unmarshal(data, &e); err != nil { + return err + } + if e.ClientID == "" { + return errors.New("no client_id in client registration response") + } + metadata, err := e.encodableClientMetadata.toStruct() + if err != nil { + return err + } + *c = ClientRegistrationResponse{ + ClientID: e.ClientID, + ClientSecret: e.ClientSecret, + RegistrationAccessToken: e.RegistrationAccessToken, + RegistrationClientURI: e.RegistrationClientURI, + ClientIDIssuedAt: secToUnix(e.ClientIDIssuedAt), + ClientSecretExpiresAt: secToUnix(e.ClientSecretExpiresAt), + ClientMetadata: metadata, + } + return nil +} + +type ClientConfig struct { + HTTPClient phttp.Client + Credentials ClientCredentials + Scope []string + RedirectURL string + ProviderConfig ProviderConfig + KeySet key.PublicKeySet +} + +func NewClient(cfg ClientConfig) (*Client, error) { + // Allow empty redirect URL in the case where the client + // only needs to verify a given token. + ru, err := url.Parse(cfg.RedirectURL) + if err != nil { + return nil, fmt.Errorf("invalid redirect URL: %v", err) + } + + c := Client{ + credentials: cfg.Credentials, + httpClient: cfg.HTTPClient, + scope: cfg.Scope, + redirectURL: ru.String(), + providerConfig: newProviderConfigRepo(cfg.ProviderConfig), + keySet: cfg.KeySet, + } + + if c.httpClient == nil { + c.httpClient = http.DefaultClient + } + + if c.scope == nil { + c.scope = make([]string, len(DefaultScope)) + copy(c.scope, DefaultScope) + } + + return &c, nil +} + +type Client struct { + httpClient phttp.Client + providerConfig *providerConfigRepo + credentials ClientCredentials + redirectURL string + scope []string + keySet key.PublicKeySet + providerSyncer *ProviderConfigSyncer + + keySetSyncMutex sync.RWMutex + lastKeySetSync time.Time +} + +func (c *Client) Healthy() error { + now := time.Now().UTC() + + cfg := c.providerConfig.Get() + + if cfg.Empty() { + return errors.New("oidc client provider config empty") + } + + if !cfg.ExpiresAt.IsZero() && cfg.ExpiresAt.Before(now) { + return errors.New("oidc client provider config expired") + } + + return nil +} + +func (c *Client) OAuthClient() (*oauth2.Client, error) { + cfg := c.providerConfig.Get() + authMethod, err := chooseAuthMethod(cfg) + if err != nil { + return nil, err + } + + ocfg := oauth2.Config{ + Credentials: oauth2.ClientCredentials(c.credentials), + RedirectURL: c.redirectURL, + AuthURL: cfg.AuthEndpoint.String(), + TokenURL: cfg.TokenEndpoint.String(), + Scope: c.scope, + AuthMethod: authMethod, + } + + return oauth2.NewClient(c.httpClient, ocfg) +} + +func chooseAuthMethod(cfg ProviderConfig) (string, error) { + if len(cfg.TokenEndpointAuthMethodsSupported) == 0 { + return oauth2.AuthMethodClientSecretBasic, nil + } + + for _, authMethod := range cfg.TokenEndpointAuthMethodsSupported { + if _, ok := supportedAuthMethods[authMethod]; ok { + return authMethod, nil + } + } + + return "", errors.New("no supported auth methods") +} + +// SyncProviderConfig starts the provider config syncer +func (c *Client) SyncProviderConfig(discoveryURL string) chan struct{} { + r := NewHTTPProviderConfigGetter(c.httpClient, discoveryURL) + s := NewProviderConfigSyncer(r, c.providerConfig) + stop := s.Run() + s.WaitUntilInitialSync() + return stop +} + +func (c *Client) maybeSyncKeys() error { + tooSoon := func() bool { + return time.Now().UTC().Before(c.lastKeySetSync.Add(keySyncWindow)) + } + + // ignore request to sync keys if a sync operation has been + // attempted too recently + if tooSoon() { + return nil + } + + c.keySetSyncMutex.Lock() + defer c.keySetSyncMutex.Unlock() + + // check again, as another goroutine may have been holding + // the lock while updating the keys + if tooSoon() { + return nil + } + + cfg := c.providerConfig.Get() + r := NewRemotePublicKeyRepo(c.httpClient, cfg.KeysEndpoint.String()) + w := &clientKeyRepo{client: c} + _, err := key.Sync(r, w) + c.lastKeySetSync = time.Now().UTC() + + return err +} + +type clientKeyRepo struct { + client *Client +} + +func (r *clientKeyRepo) Set(ks key.KeySet) error { + pks, ok := ks.(*key.PublicKeySet) + if !ok { + return errors.New("unable to cast to PublicKey") + } + r.client.keySet = *pks + return nil +} + +func (c *Client) ClientCredsToken(scope []string) (jose.JWT, error) { + cfg := c.providerConfig.Get() + + if !cfg.SupportsGrantType(oauth2.GrantTypeClientCreds) { + return jose.JWT{}, fmt.Errorf("%v grant type is not supported", oauth2.GrantTypeClientCreds) + } + + oac, err := c.OAuthClient() + if err != nil { + return jose.JWT{}, err + } + + t, err := oac.ClientCredsToken(scope) + if err != nil { + return jose.JWT{}, err + } + + jwt, err := jose.ParseJWT(t.IDToken) + if err != nil { + return jose.JWT{}, err + } + + return jwt, c.VerifyJWT(jwt) +} + +// ExchangeAuthCode exchanges an OAuth2 auth code for an OIDC JWT ID token. +func (c *Client) ExchangeAuthCode(code string) (jose.JWT, error) { + oac, err := c.OAuthClient() + if err != nil { + return jose.JWT{}, err + } + + t, err := oac.RequestToken(oauth2.GrantTypeAuthCode, code) + if err != nil { + return jose.JWT{}, err + } + + jwt, err := jose.ParseJWT(t.IDToken) + if err != nil { + return jose.JWT{}, err + } + + return jwt, c.VerifyJWT(jwt) +} + +// RefreshToken uses a refresh token to exchange for a new OIDC JWT ID Token. +func (c *Client) RefreshToken(refreshToken string) (jose.JWT, error) { + oac, err := c.OAuthClient() + if err != nil { + return jose.JWT{}, err + } + + t, err := oac.RequestToken(oauth2.GrantTypeRefreshToken, refreshToken) + if err != nil { + return jose.JWT{}, err + } + + jwt, err := jose.ParseJWT(t.IDToken) + if err != nil { + return jose.JWT{}, err + } + + return jwt, c.VerifyJWT(jwt) +} + +func (c *Client) VerifyJWT(jwt jose.JWT) error { + var keysFunc func() []key.PublicKey + if kID, ok := jwt.KeyID(); ok { + keysFunc = c.keysFuncWithID(kID) + } else { + keysFunc = c.keysFuncAll() + } + + v := NewJWTVerifier( + c.providerConfig.Get().Issuer.String(), + c.credentials.ID, + c.maybeSyncKeys, keysFunc) + + return v.Verify(jwt) +} + +// keysFuncWithID returns a function that retrieves at most unexpired +// public key from the Client that matches the provided ID +func (c *Client) keysFuncWithID(kID string) func() []key.PublicKey { + return func() []key.PublicKey { + c.keySetSyncMutex.RLock() + defer c.keySetSyncMutex.RUnlock() + + if c.keySet.ExpiresAt().Before(time.Now()) { + return []key.PublicKey{} + } + + k := c.keySet.Key(kID) + if k == nil { + return []key.PublicKey{} + } + + return []key.PublicKey{*k} + } +} + +// keysFuncAll returns a function that retrieves all unexpired public +// keys from the Client +func (c *Client) keysFuncAll() func() []key.PublicKey { + return func() []key.PublicKey { + c.keySetSyncMutex.RLock() + defer c.keySetSyncMutex.RUnlock() + + if c.keySet.ExpiresAt().Before(time.Now()) { + return []key.PublicKey{} + } + + return c.keySet.Keys() + } +} + +type providerConfigRepo struct { + mu sync.RWMutex + config ProviderConfig // do not access directly, use Get() +} + +func newProviderConfigRepo(pc ProviderConfig) *providerConfigRepo { + return &providerConfigRepo{sync.RWMutex{}, pc} +} + +// returns an error to implement ProviderConfigSetter +func (r *providerConfigRepo) Set(cfg ProviderConfig) error { + r.mu.Lock() + defer r.mu.Unlock() + r.config = cfg + return nil +} + +func (r *providerConfigRepo) Get() ProviderConfig { + r.mu.RLock() + defer r.mu.RUnlock() + return r.config +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/identity.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/identity.go new file mode 100644 index 000000000000..9bfa8e343994 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/identity.go @@ -0,0 +1,44 @@ +package oidc + +import ( + "errors" + "time" + + "github.com/coreos/go-oidc/jose" +) + +type Identity struct { + ID string + Name string + Email string + ExpiresAt time.Time +} + +func IdentityFromClaims(claims jose.Claims) (*Identity, error) { + if claims == nil { + return nil, errors.New("nil claim set") + } + + var ident Identity + var err error + var ok bool + + if ident.ID, ok, err = claims.StringClaim("sub"); err != nil { + return nil, err + } else if !ok { + return nil, errors.New("missing required claim: sub") + } + + if ident.Email, _, err = claims.StringClaim("email"); err != nil { + return nil, err + } + + exp, ok, err := claims.TimeClaim("exp") + if err != nil { + return nil, err + } else if ok { + ident.ExpiresAt = exp + } + + return &ident, nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/interface.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/interface.go new file mode 100644 index 000000000000..248cac0b4dff --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/interface.go @@ -0,0 +1,3 @@ +package oidc + +type LoginFunc func(ident Identity, sessionKey string) (redirectURL string, err error) diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/key.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/key.go new file mode 100644 index 000000000000..82a0f567d577 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/key.go @@ -0,0 +1,67 @@ +package oidc + +import ( + "encoding/json" + "errors" + "net/http" + "time" + + phttp "github.com/coreos/go-oidc/http" + "github.com/coreos/go-oidc/jose" + "github.com/coreos/go-oidc/key" +) + +// DefaultPublicKeySetTTL is the default TTL set on the PublicKeySet if no +// Cache-Control header is provided by the JWK Set document endpoint. +const DefaultPublicKeySetTTL = 24 * time.Hour + +// NewRemotePublicKeyRepo is responsible for fetching the JWK Set document. +func NewRemotePublicKeyRepo(hc phttp.Client, ep string) *remotePublicKeyRepo { + return &remotePublicKeyRepo{hc: hc, ep: ep} +} + +type remotePublicKeyRepo struct { + hc phttp.Client + ep string +} + +// Get returns a PublicKeySet fetched from the JWK Set document endpoint. A TTL +// is set on the Key Set to avoid it having to be re-retrieved for every +// encryption event. This TTL is typically controlled by the endpoint returning +// a Cache-Control header, but defaults to 24 hours if no Cache-Control header +// is found. +func (r *remotePublicKeyRepo) Get() (key.KeySet, error) { + req, err := http.NewRequest("GET", r.ep, nil) + if err != nil { + return nil, err + } + + resp, err := r.hc.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var d struct { + Keys []jose.JWK `json:"keys"` + } + if err := json.NewDecoder(resp.Body).Decode(&d); err != nil { + return nil, err + } + + if len(d.Keys) == 0 { + return nil, errors.New("zero keys in response") + } + + ttl, ok, err := phttp.Cacheable(resp.Header) + if err != nil { + return nil, err + } + if !ok { + ttl = DefaultPublicKeySetTTL + } + + exp := time.Now().UTC().Add(ttl) + ks := key.NewPublicKeySet(d.Keys, exp) + return ks, nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/provider.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/provider.go new file mode 100644 index 000000000000..1235890c0c2d --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/provider.go @@ -0,0 +1,688 @@ +package oidc + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/coreos/pkg/capnslog" + "github.com/coreos/pkg/timeutil" + "github.com/jonboulle/clockwork" + + phttp "github.com/coreos/go-oidc/http" + "github.com/coreos/go-oidc/oauth2" +) + +var ( + log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "http") +) + +const ( + // Subject Identifier types defined by the OIDC spec. Specifies if the provider + // should provide the same sub claim value to all clients (public) or a unique + // value for each client (pairwise). + // + // See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes + SubjectTypePublic = "public" + SubjectTypePairwise = "pairwise" +) + +var ( + // Default values for omitted provider config fields. + // + // Use ProviderConfig's Defaults method to fill a provider config with these values. + DefaultGrantTypesSupported = []string{oauth2.GrantTypeAuthCode, oauth2.GrantTypeImplicit} + DefaultResponseModesSupported = []string{"query", "fragment"} + DefaultTokenEndpointAuthMethodsSupported = []string{oauth2.AuthMethodClientSecretBasic} + DefaultClaimTypesSupported = []string{"normal"} +) + +const ( + MaximumProviderConfigSyncInterval = 24 * time.Hour + MinimumProviderConfigSyncInterval = time.Minute + + discoveryConfigPath = "/.well-known/openid-configuration" +) + +// internally configurable for tests +var minimumProviderConfigSyncInterval = MinimumProviderConfigSyncInterval + +var ( + // Ensure ProviderConfig satisfies these interfaces. + _ json.Marshaler = &ProviderConfig{} + _ json.Unmarshaler = &ProviderConfig{} +) + +// ProviderConfig represents the OpenID Provider Metadata specifying what +// configurations a provider supports. +// +// See: http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata +type ProviderConfig struct { + Issuer *url.URL // Required + AuthEndpoint *url.URL // Required + TokenEndpoint *url.URL // Required if grant types other than "implicit" are supported + UserInfoEndpoint *url.URL + KeysEndpoint *url.URL // Required + RegistrationEndpoint *url.URL + + // Servers MAY choose not to advertise some supported scope values even when this + // parameter is used, although those defined in OpenID Core SHOULD be listed, if supported. + ScopesSupported []string + // OAuth2.0 response types supported. + ResponseTypesSupported []string // Required + // OAuth2.0 response modes supported. + // + // If omitted, defaults to DefaultResponseModesSupported. + ResponseModesSupported []string + // OAuth2.0 grant types supported. + // + // If omitted, defaults to DefaultGrantTypesSupported. + GrantTypesSupported []string + ACRValuesSupported []string + // SubjectTypesSupported specifies strategies for providing values for the sub claim. + SubjectTypesSupported []string // Required + + // JWA signing and encryption algorith values supported for ID tokens. + IDTokenSigningAlgValues []string // Required + IDTokenEncryptionAlgValues []string + IDTokenEncryptionEncValues []string + + // JWA signing and encryption algorith values supported for user info responses. + UserInfoSigningAlgValues []string + UserInfoEncryptionAlgValues []string + UserInfoEncryptionEncValues []string + + // JWA signing and encryption algorith values supported for request objects. + ReqObjSigningAlgValues []string + ReqObjEncryptionAlgValues []string + ReqObjEncryptionEncValues []string + + TokenEndpointAuthMethodsSupported []string + TokenEndpointAuthSigningAlgValuesSupported []string + DisplayValuesSupported []string + ClaimTypesSupported []string + ClaimsSupported []string + ServiceDocs *url.URL + ClaimsLocalsSupported []string + UILocalsSupported []string + ClaimsParameterSupported bool + RequestParameterSupported bool + RequestURIParamaterSupported bool + RequireRequestURIRegistration bool + + Policy *url.URL + TermsOfService *url.URL + + // Not part of the OpenID Provider Metadata + ExpiresAt time.Time +} + +// Defaults returns a shallow copy of ProviderConfig with default +// values replacing omitted fields. +// +// var cfg oidc.ProviderConfig +// // Fill provider config with default values for omitted fields. +// cfg = cfg.Defaults() +// +func (p ProviderConfig) Defaults() ProviderConfig { + setDefault := func(val *[]string, defaultVal []string) { + if len(*val) == 0 { + *val = defaultVal + } + } + setDefault(&p.GrantTypesSupported, DefaultGrantTypesSupported) + setDefault(&p.ResponseModesSupported, DefaultResponseModesSupported) + setDefault(&p.TokenEndpointAuthMethodsSupported, DefaultTokenEndpointAuthMethodsSupported) + setDefault(&p.ClaimTypesSupported, DefaultClaimTypesSupported) + return p +} + +func (p *ProviderConfig) MarshalJSON() ([]byte, error) { + e := p.toEncodableStruct() + return json.Marshal(&e) +} + +func (p *ProviderConfig) UnmarshalJSON(data []byte) error { + var e encodableProviderConfig + if err := json.Unmarshal(data, &e); err != nil { + return err + } + conf, err := e.toStruct() + if err != nil { + return err + } + if err := conf.Valid(); err != nil { + return err + } + *p = conf + return nil +} + +type encodableProviderConfig struct { + Issuer string `json:"issuer"` + AuthEndpoint string `json:"authorization_endpoint"` + TokenEndpoint string `json:"token_endpoint"` + UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"` + KeysEndpoint string `json:"jwks_uri"` + RegistrationEndpoint string `json:"registration_endpoint,omitempty"` + + // Use 'omitempty' for all slices as per OIDC spec: + // "Claims that return multiple values are represented as JSON arrays. + // Claims with zero elements MUST be omitted from the response." + // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse + + ScopesSupported []string `json:"scopes_supported,omitempty"` + ResponseTypesSupported []string `json:"response_types_supported,omitempty"` + ResponseModesSupported []string `json:"response_modes_supported,omitempty"` + GrantTypesSupported []string `json:"grant_types_supported,omitempty"` + ACRValuesSupported []string `json:"acr_values_supported,omitempty"` + SubjectTypesSupported []string `json:"subject_types_supported,omitempty"` + + IDTokenSigningAlgValues []string `json:"id_token_signing_alg_values_supported,omitempty"` + IDTokenEncryptionAlgValues []string `json:"id_token_encryption_alg_values_supported,omitempty"` + IDTokenEncryptionEncValues []string `json:"id_token_encryption_enc_values_supported,omitempty"` + UserInfoSigningAlgValues []string `json:"userinfo_signing_alg_values_supported,omitempty"` + UserInfoEncryptionAlgValues []string `json:"userinfo_encryption_alg_values_supported,omitempty"` + UserInfoEncryptionEncValues []string `json:"userinfo_encryption_enc_values_supported,omitempty"` + ReqObjSigningAlgValues []string `json:"request_object_signing_alg_values_supported,omitempty"` + ReqObjEncryptionAlgValues []string `json:"request_object_encryption_alg_values_supported,omitempty"` + ReqObjEncryptionEncValues []string `json:"request_object_encryption_enc_values_supported,omitempty"` + + TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"` + TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported,omitempty"` + + DisplayValuesSupported []string `json:"display_values_supported,omitempty"` + ClaimTypesSupported []string `json:"claim_types_supported,omitempty"` + ClaimsSupported []string `json:"claims_supported,omitempty"` + ServiceDocs string `json:"service_documentation,omitempty"` + ClaimsLocalsSupported []string `json:"claims_locales_supported,omitempty"` + UILocalsSupported []string `json:"ui_locales_supported,omitempty"` + ClaimsParameterSupported bool `json:"claims_parameter_supported,omitempty"` + RequestParameterSupported bool `json:"request_parameter_supported,omitempty"` + RequestURIParamaterSupported bool `json:"request_uri_parameter_supported,omitempty"` + RequireRequestURIRegistration bool `json:"require_request_uri_registration,omitempty"` + + Policy string `json:"op_policy_uri,omitempty"` + TermsOfService string `json:"op_tos_uri,omitempty"` +} + +func (cfg ProviderConfig) toEncodableStruct() encodableProviderConfig { + return encodableProviderConfig{ + Issuer: uriToString(cfg.Issuer), + AuthEndpoint: uriToString(cfg.AuthEndpoint), + TokenEndpoint: uriToString(cfg.TokenEndpoint), + UserInfoEndpoint: uriToString(cfg.UserInfoEndpoint), + KeysEndpoint: uriToString(cfg.KeysEndpoint), + RegistrationEndpoint: uriToString(cfg.RegistrationEndpoint), + ScopesSupported: cfg.ScopesSupported, + ResponseTypesSupported: cfg.ResponseTypesSupported, + ResponseModesSupported: cfg.ResponseModesSupported, + GrantTypesSupported: cfg.GrantTypesSupported, + ACRValuesSupported: cfg.ACRValuesSupported, + SubjectTypesSupported: cfg.SubjectTypesSupported, + IDTokenSigningAlgValues: cfg.IDTokenSigningAlgValues, + IDTokenEncryptionAlgValues: cfg.IDTokenEncryptionAlgValues, + IDTokenEncryptionEncValues: cfg.IDTokenEncryptionEncValues, + UserInfoSigningAlgValues: cfg.UserInfoSigningAlgValues, + UserInfoEncryptionAlgValues: cfg.UserInfoEncryptionAlgValues, + UserInfoEncryptionEncValues: cfg.UserInfoEncryptionEncValues, + ReqObjSigningAlgValues: cfg.ReqObjSigningAlgValues, + ReqObjEncryptionAlgValues: cfg.ReqObjEncryptionAlgValues, + ReqObjEncryptionEncValues: cfg.ReqObjEncryptionEncValues, + TokenEndpointAuthMethodsSupported: cfg.TokenEndpointAuthMethodsSupported, + TokenEndpointAuthSigningAlgValuesSupported: cfg.TokenEndpointAuthSigningAlgValuesSupported, + DisplayValuesSupported: cfg.DisplayValuesSupported, + ClaimTypesSupported: cfg.ClaimTypesSupported, + ClaimsSupported: cfg.ClaimsSupported, + ServiceDocs: uriToString(cfg.ServiceDocs), + ClaimsLocalsSupported: cfg.ClaimsLocalsSupported, + UILocalsSupported: cfg.UILocalsSupported, + ClaimsParameterSupported: cfg.ClaimsParameterSupported, + RequestParameterSupported: cfg.RequestParameterSupported, + RequestURIParamaterSupported: cfg.RequestURIParamaterSupported, + RequireRequestURIRegistration: cfg.RequireRequestURIRegistration, + Policy: uriToString(cfg.Policy), + TermsOfService: uriToString(cfg.TermsOfService), + } +} + +func (e encodableProviderConfig) toStruct() (ProviderConfig, error) { + p := stickyErrParser{} + conf := ProviderConfig{ + Issuer: p.parseURI(e.Issuer, "issuer"), + AuthEndpoint: p.parseURI(e.AuthEndpoint, "authorization_endpoint"), + TokenEndpoint: p.parseURI(e.TokenEndpoint, "token_endpoint"), + UserInfoEndpoint: p.parseURI(e.UserInfoEndpoint, "userinfo_endpoint"), + KeysEndpoint: p.parseURI(e.KeysEndpoint, "jwks_uri"), + RegistrationEndpoint: p.parseURI(e.RegistrationEndpoint, "registration_endpoint"), + ScopesSupported: e.ScopesSupported, + ResponseTypesSupported: e.ResponseTypesSupported, + ResponseModesSupported: e.ResponseModesSupported, + GrantTypesSupported: e.GrantTypesSupported, + ACRValuesSupported: e.ACRValuesSupported, + SubjectTypesSupported: e.SubjectTypesSupported, + IDTokenSigningAlgValues: e.IDTokenSigningAlgValues, + IDTokenEncryptionAlgValues: e.IDTokenEncryptionAlgValues, + IDTokenEncryptionEncValues: e.IDTokenEncryptionEncValues, + UserInfoSigningAlgValues: e.UserInfoSigningAlgValues, + UserInfoEncryptionAlgValues: e.UserInfoEncryptionAlgValues, + UserInfoEncryptionEncValues: e.UserInfoEncryptionEncValues, + ReqObjSigningAlgValues: e.ReqObjSigningAlgValues, + ReqObjEncryptionAlgValues: e.ReqObjEncryptionAlgValues, + ReqObjEncryptionEncValues: e.ReqObjEncryptionEncValues, + TokenEndpointAuthMethodsSupported: e.TokenEndpointAuthMethodsSupported, + TokenEndpointAuthSigningAlgValuesSupported: e.TokenEndpointAuthSigningAlgValuesSupported, + DisplayValuesSupported: e.DisplayValuesSupported, + ClaimTypesSupported: e.ClaimTypesSupported, + ClaimsSupported: e.ClaimsSupported, + ServiceDocs: p.parseURI(e.ServiceDocs, "service_documentation"), + ClaimsLocalsSupported: e.ClaimsLocalsSupported, + UILocalsSupported: e.UILocalsSupported, + ClaimsParameterSupported: e.ClaimsParameterSupported, + RequestParameterSupported: e.RequestParameterSupported, + RequestURIParamaterSupported: e.RequestURIParamaterSupported, + RequireRequestURIRegistration: e.RequireRequestURIRegistration, + Policy: p.parseURI(e.Policy, "op_policy-uri"), + TermsOfService: p.parseURI(e.TermsOfService, "op_tos_uri"), + } + if p.firstErr != nil { + return ProviderConfig{}, p.firstErr + } + return conf, nil +} + +// Empty returns if a ProviderConfig holds no information. +// +// This case generally indicates a ProviderConfigGetter has experienced an error +// and has nothing to report. +func (p ProviderConfig) Empty() bool { + return p.Issuer == nil +} + +func contains(sli []string, ele string) bool { + for _, s := range sli { + if s == ele { + return true + } + } + return false +} + +// Valid determines if a ProviderConfig conforms with the OIDC specification. +// If Valid returns successfully it guarantees required field are non-nil and +// URLs are well formed. +// +// Valid is called by UnmarshalJSON. +// +// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for +// URLs fields where the OIDC spec requires it. This may change in future releases +// of this package. See: https://github.com/coreos/go-oidc/issues/34 +func (p ProviderConfig) Valid() error { + grantTypes := p.GrantTypesSupported + if len(grantTypes) == 0 { + grantTypes = DefaultGrantTypesSupported + } + implicitOnly := true + for _, grantType := range grantTypes { + if grantType != oauth2.GrantTypeImplicit { + implicitOnly = false + break + } + } + + if len(p.SubjectTypesSupported) == 0 { + return errors.New("missing required field subject_types_supported") + } + if len(p.IDTokenSigningAlgValues) == 0 { + return errors.New("missing required field id_token_signing_alg_values_supported") + } + + if len(p.ScopesSupported) != 0 && !contains(p.ScopesSupported, "openid") { + return errors.New("scoped_supported must be unspecified or include 'openid'") + } + + if !contains(p.IDTokenSigningAlgValues, "RS256") { + return errors.New("id_token_signing_alg_values_supported must include 'RS256'") + } + if contains(p.TokenEndpointAuthMethodsSupported, "none") { + return errors.New("token_endpoint_auth_signing_alg_values_supported cannot include 'none'") + } + + uris := []struct { + val *url.URL + name string + required bool + }{ + {p.Issuer, "issuer", true}, + {p.AuthEndpoint, "authorization_endpoint", true}, + {p.TokenEndpoint, "token_endpoint", !implicitOnly}, + {p.UserInfoEndpoint, "userinfo_endpoint", false}, + {p.KeysEndpoint, "jwks_uri", true}, + {p.RegistrationEndpoint, "registration_endpoint", false}, + {p.ServiceDocs, "service_documentation", false}, + {p.Policy, "op_policy_uri", false}, + {p.TermsOfService, "op_tos_uri", false}, + } + + for _, uri := range uris { + if uri.val == nil { + if !uri.required { + continue + } + return fmt.Errorf("empty value for required uri field %s", uri.name) + } + if uri.val.Host == "" { + return fmt.Errorf("no host for uri field %s", uri.name) + } + if uri.val.Scheme != "http" && uri.val.Scheme != "https" { + return fmt.Errorf("uri field %s schemeis not http or https", uri.name) + } + } + return nil +} + +// Supports determines if provider supports a client given their respective metadata. +func (p ProviderConfig) Supports(c ClientMetadata) error { + if err := p.Valid(); err != nil { + return fmt.Errorf("invalid provider config: %v", err) + } + if err := c.Valid(); err != nil { + return fmt.Errorf("invalid client config: %v", err) + } + + // Fill default values for omitted fields + c = c.Defaults() + p = p.Defaults() + + // Do the supported values list the requested one? + supports := []struct { + supported []string + requested string + name string + }{ + {p.IDTokenSigningAlgValues, c.IDTokenResponseOptions.SigningAlg, "id_token_signed_response_alg"}, + {p.IDTokenEncryptionAlgValues, c.IDTokenResponseOptions.EncryptionAlg, "id_token_encryption_response_alg"}, + {p.IDTokenEncryptionEncValues, c.IDTokenResponseOptions.EncryptionEnc, "id_token_encryption_response_enc"}, + {p.UserInfoSigningAlgValues, c.UserInfoResponseOptions.SigningAlg, "userinfo_signed_response_alg"}, + {p.UserInfoEncryptionAlgValues, c.UserInfoResponseOptions.EncryptionAlg, "userinfo_encryption_response_alg"}, + {p.UserInfoEncryptionEncValues, c.UserInfoResponseOptions.EncryptionEnc, "userinfo_encryption_response_enc"}, + {p.ReqObjSigningAlgValues, c.RequestObjectOptions.SigningAlg, "request_object_signing_alg"}, + {p.ReqObjEncryptionAlgValues, c.RequestObjectOptions.EncryptionAlg, "request_object_encryption_alg"}, + {p.ReqObjEncryptionEncValues, c.RequestObjectOptions.EncryptionEnc, "request_object_encryption_enc"}, + } + for _, field := range supports { + if field.requested == "" { + continue + } + if !contains(field.supported, field.requested) { + return fmt.Errorf("provider does not support requested value for field %s", field.name) + } + } + + stringsEqual := func(s1, s2 string) bool { return s1 == s2 } + + // For lists, are the list of requested values a subset of the supported ones? + supportsAll := []struct { + supported []string + requested []string + name string + // OAuth2.0 response_type can be space separated lists where order doesn't matter. + // For example "id_token token" is the same as "token id_token" + // Support a custom compare method. + comp func(s1, s2 string) bool + }{ + {p.GrantTypesSupported, c.GrantTypes, "grant_types", stringsEqual}, + {p.ResponseTypesSupported, c.ResponseTypes, "response_type", oauth2.ResponseTypesEqual}, + } + for _, field := range supportsAll { + requestLoop: + for _, req := range field.requested { + for _, sup := range field.supported { + if field.comp(req, sup) { + continue requestLoop + } + } + return fmt.Errorf("provider does not support requested value for field %s", field.name) + } + } + + // TODO(ericchiang): Are there more checks we feel comfortable with begin strict about? + + return nil +} + +func (p ProviderConfig) SupportsGrantType(grantType string) bool { + var supported []string + if len(p.GrantTypesSupported) == 0 { + supported = DefaultGrantTypesSupported + } else { + supported = p.GrantTypesSupported + } + + for _, t := range supported { + if t == grantType { + return true + } + } + return false +} + +type ProviderConfigGetter interface { + Get() (ProviderConfig, error) +} + +type ProviderConfigSetter interface { + Set(ProviderConfig) error +} + +type ProviderConfigSyncer struct { + from ProviderConfigGetter + to ProviderConfigSetter + clock clockwork.Clock + + initialSyncDone bool + initialSyncWait sync.WaitGroup +} + +func NewProviderConfigSyncer(from ProviderConfigGetter, to ProviderConfigSetter) *ProviderConfigSyncer { + return &ProviderConfigSyncer{ + from: from, + to: to, + clock: clockwork.NewRealClock(), + } +} + +func (s *ProviderConfigSyncer) Run() chan struct{} { + stop := make(chan struct{}) + + var next pcsStepper + next = &pcsStepNext{aft: time.Duration(0)} + + s.initialSyncWait.Add(1) + go func() { + for { + select { + case <-s.clock.After(next.after()): + next = next.step(s.sync) + case <-stop: + return + } + } + }() + + return stop +} + +func (s *ProviderConfigSyncer) WaitUntilInitialSync() { + s.initialSyncWait.Wait() +} + +func (s *ProviderConfigSyncer) sync() (time.Duration, error) { + cfg, err := s.from.Get() + if err != nil { + return 0, err + } + + if err = s.to.Set(cfg); err != nil { + return 0, fmt.Errorf("error setting provider config: %v", err) + } + + if !s.initialSyncDone { + s.initialSyncWait.Done() + s.initialSyncDone = true + } + + log.Infof("Updating provider config: config=%#v", cfg) + + return nextSyncAfter(cfg.ExpiresAt, s.clock), nil +} + +type pcsStepFunc func() (time.Duration, error) + +type pcsStepper interface { + after() time.Duration + step(pcsStepFunc) pcsStepper +} + +type pcsStepNext struct { + aft time.Duration +} + +func (n *pcsStepNext) after() time.Duration { + return n.aft +} + +func (n *pcsStepNext) step(fn pcsStepFunc) (next pcsStepper) { + ttl, err := fn() + if err == nil { + next = &pcsStepNext{aft: ttl} + log.Debugf("Synced provider config, next attempt in %v", next.after()) + } else { + next = &pcsStepRetry{aft: time.Second} + log.Errorf("Provider config sync failed, retrying in %v: %v", next.after(), err) + } + return +} + +type pcsStepRetry struct { + aft time.Duration +} + +func (r *pcsStepRetry) after() time.Duration { + return r.aft +} + +func (r *pcsStepRetry) step(fn pcsStepFunc) (next pcsStepper) { + ttl, err := fn() + if err == nil { + next = &pcsStepNext{aft: ttl} + log.Infof("Provider config sync no longer failing") + } else { + next = &pcsStepRetry{aft: timeutil.ExpBackoff(r.aft, time.Minute)} + log.Errorf("Provider config sync still failing, retrying in %v: %v", next.after(), err) + } + return +} + +func nextSyncAfter(exp time.Time, clock clockwork.Clock) time.Duration { + if exp.IsZero() { + return MaximumProviderConfigSyncInterval + } + + t := exp.Sub(clock.Now()) / 2 + if t > MaximumProviderConfigSyncInterval { + t = MaximumProviderConfigSyncInterval + } else if t < minimumProviderConfigSyncInterval { + t = minimumProviderConfigSyncInterval + } + + return t +} + +type httpProviderConfigGetter struct { + hc phttp.Client + issuerURL string + clock clockwork.Clock +} + +func NewHTTPProviderConfigGetter(hc phttp.Client, issuerURL string) *httpProviderConfigGetter { + return &httpProviderConfigGetter{ + hc: hc, + issuerURL: issuerURL, + clock: clockwork.NewRealClock(), + } +} + +func (r *httpProviderConfigGetter) Get() (cfg ProviderConfig, err error) { + // If the Issuer value contains a path component, any terminating / MUST be removed before + // appending /.well-known/openid-configuration. + // https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest + discoveryURL := strings.TrimSuffix(r.issuerURL, "/") + discoveryConfigPath + req, err := http.NewRequest("GET", discoveryURL, nil) + if err != nil { + return + } + + resp, err := r.hc.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + + if err = json.NewDecoder(resp.Body).Decode(&cfg); err != nil { + return + } + + var ttl time.Duration + var ok bool + ttl, ok, err = phttp.Cacheable(resp.Header) + if err != nil { + return + } else if ok { + cfg.ExpiresAt = r.clock.Now().UTC().Add(ttl) + } + + // The issuer value returned MUST be identical to the Issuer URL that was directly used to retrieve the configuration information. + // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation + if !urlEqual(cfg.Issuer.String(), r.issuerURL) { + err = fmt.Errorf(`"issuer" in config (%v) does not match provided issuer URL (%v)`, cfg.Issuer, r.issuerURL) + return + } + + return +} + +func FetchProviderConfig(hc phttp.Client, issuerURL string) (ProviderConfig, error) { + if hc == nil { + hc = http.DefaultClient + } + + g := NewHTTPProviderConfigGetter(hc, issuerURL) + return g.Get() +} + +func WaitForProviderConfig(hc phttp.Client, issuerURL string) (pcfg ProviderConfig) { + return waitForProviderConfig(hc, issuerURL, clockwork.NewRealClock()) +} + +func waitForProviderConfig(hc phttp.Client, issuerURL string, clock clockwork.Clock) (pcfg ProviderConfig) { + var sleep time.Duration + var err error + for { + pcfg, err = FetchProviderConfig(hc, issuerURL) + if err == nil { + break + } + + sleep = timeutil.ExpBackoff(sleep, time.Minute) + fmt.Printf("Failed fetching provider config, trying again in %v: %v\n", sleep, err) + time.Sleep(sleep) + } + + return +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/transport.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/transport.go new file mode 100644 index 000000000000..61c926d7fe72 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/transport.go @@ -0,0 +1,88 @@ +package oidc + +import ( + "fmt" + "net/http" + "sync" + + phttp "github.com/coreos/go-oidc/http" + "github.com/coreos/go-oidc/jose" +) + +type TokenRefresher interface { + // Verify checks if the provided token is currently valid or not. + Verify(jose.JWT) error + + // Refresh attempts to authenticate and retrieve a new token. + Refresh() (jose.JWT, error) +} + +type ClientCredsTokenRefresher struct { + Issuer string + OIDCClient *Client +} + +func (c *ClientCredsTokenRefresher) Verify(jwt jose.JWT) (err error) { + _, err = VerifyClientClaims(jwt, c.Issuer) + return +} + +func (c *ClientCredsTokenRefresher) Refresh() (jwt jose.JWT, err error) { + if err = c.OIDCClient.Healthy(); err != nil { + err = fmt.Errorf("unable to authenticate, unhealthy OIDC client: %v", err) + return + } + + jwt, err = c.OIDCClient.ClientCredsToken([]string{"openid"}) + if err != nil { + err = fmt.Errorf("unable to verify auth code with issuer: %v", err) + return + } + + return +} + +type AuthenticatedTransport struct { + TokenRefresher + http.RoundTripper + + mu sync.Mutex + jwt jose.JWT +} + +func (t *AuthenticatedTransport) verifiedJWT() (jose.JWT, error) { + t.mu.Lock() + defer t.mu.Unlock() + + if t.TokenRefresher.Verify(t.jwt) == nil { + return t.jwt, nil + } + + jwt, err := t.TokenRefresher.Refresh() + if err != nil { + return jose.JWT{}, fmt.Errorf("unable to acquire valid JWT: %v", err) + } + + t.jwt = jwt + return t.jwt, nil +} + +// SetJWT sets the JWT held by the Transport. +// This is useful for cases in which you want to set an initial JWT. +func (t *AuthenticatedTransport) SetJWT(jwt jose.JWT) { + t.mu.Lock() + defer t.mu.Unlock() + + t.jwt = jwt +} + +func (t *AuthenticatedTransport) RoundTrip(r *http.Request) (*http.Response, error) { + jwt, err := t.verifiedJWT() + if err != nil { + return nil, err + } + + req := phttp.CopyRequest(r) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", jwt.Encode())) + return t.RoundTripper.RoundTrip(req) +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/util.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/util.go new file mode 100644 index 000000000000..f2a5a195e4a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/util.go @@ -0,0 +1,109 @@ +package oidc + +import ( + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/coreos/go-oidc/jose" +) + +// RequestTokenExtractor funcs extract a raw encoded token from a request. +type RequestTokenExtractor func(r *http.Request) (string, error) + +// ExtractBearerToken is a RequestTokenExtractor which extracts a bearer token from a request's +// Authorization header. +func ExtractBearerToken(r *http.Request) (string, error) { + ah := r.Header.Get("Authorization") + if ah == "" { + return "", errors.New("missing Authorization header") + } + + if len(ah) <= 6 || strings.ToUpper(ah[0:6]) != "BEARER" { + return "", errors.New("should be a bearer token") + } + + val := ah[7:] + if len(val) == 0 { + return "", errors.New("bearer token is empty") + } + + return val, nil +} + +// CookieTokenExtractor returns a RequestTokenExtractor which extracts a token from the named cookie in a request. +func CookieTokenExtractor(cookieName string) RequestTokenExtractor { + return func(r *http.Request) (string, error) { + ck, err := r.Cookie(cookieName) + if err != nil { + return "", fmt.Errorf("token cookie not found in request: %v", err) + } + + if ck.Value == "" { + return "", errors.New("token cookie found but is empty") + } + + return ck.Value, nil + } +} + +func NewClaims(iss, sub string, aud interface{}, iat, exp time.Time) jose.Claims { + return jose.Claims{ + // required + "iss": iss, + "sub": sub, + "aud": aud, + "iat": iat.Unix(), + "exp": exp.Unix(), + } +} + +func GenClientID(hostport string) (string, error) { + b, err := randBytes(32) + if err != nil { + return "", err + } + + var host string + if strings.Contains(hostport, ":") { + host, _, err = net.SplitHostPort(hostport) + if err != nil { + return "", err + } + } else { + host = hostport + } + + return fmt.Sprintf("%s@%s", base64.URLEncoding.EncodeToString(b), host), nil +} + +func randBytes(n int) ([]byte, error) { + b := make([]byte, n) + got, err := rand.Read(b) + if err != nil { + return nil, err + } else if n != got { + return nil, errors.New("unable to generate enough random data") + } + return b, nil +} + +// urlEqual checks two urls for equality using only the host and path portions. +func urlEqual(url1, url2 string) bool { + u1, err := url.Parse(url1) + if err != nil { + return false + } + u2, err := url.Parse(url2) + if err != nil { + return false + } + + return strings.ToLower(u1.Host+u1.Path) == strings.ToLower(u2.Host+u2.Path) +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/verification.go b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/verification.go new file mode 100644 index 000000000000..002413047949 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-oidc/oidc/verification.go @@ -0,0 +1,188 @@ +package oidc + +import ( + "errors" + "fmt" + "time" + + "github.com/jonboulle/clockwork" + + "github.com/coreos/go-oidc/jose" + "github.com/coreos/go-oidc/key" +) + +func VerifySignature(jwt jose.JWT, keys []key.PublicKey) (bool, error) { + jwtBytes := []byte(jwt.Data()) + for _, k := range keys { + v, err := k.Verifier() + if err != nil { + return false, err + } + if v.Verify(jwt.Signature, jwtBytes) == nil { + return true, nil + } + } + return false, nil +} + +// containsString returns true if the given string(needle) is found +// in the string array(haystack). +func containsString(needle string, haystack []string) bool { + for _, v := range haystack { + if v == needle { + return true + } + } + return false +} + +// Verify claims in accordance with OIDC spec +// http://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation +func VerifyClaims(jwt jose.JWT, issuer, clientID string) error { + now := time.Now().UTC() + + claims, err := jwt.Claims() + if err != nil { + return err + } + + ident, err := IdentityFromClaims(claims) + if err != nil { + return err + } + + if ident.ExpiresAt.Before(now) { + return errors.New("token is expired") + } + + // iss REQUIRED. Issuer Identifier for the Issuer of the response. + // The iss value is a case sensitive URL using the https scheme that contains scheme, + // host, and optionally, port number and path components and no query or fragment components. + if iss, exists := claims["iss"].(string); exists { + if !urlEqual(iss, issuer) { + return fmt.Errorf("invalid claim value: 'iss'. expected=%s, found=%s.", issuer, iss) + } + } else { + return errors.New("missing claim: 'iss'") + } + + // iat REQUIRED. Time at which the JWT was issued. + // Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z + // as measured in UTC until the date/time. + if _, exists := claims["iat"].(float64); !exists { + return errors.New("missing claim: 'iat'") + } + + // aud REQUIRED. Audience(s) that this ID Token is intended for. + // It MUST contain the OAuth 2.0 client_id of the Relying Party as an audience value. + // It MAY also contain identifiers for other audiences. In the general case, the aud + // value is an array of case sensitive strings. In the common special case when there + // is one audience, the aud value MAY be a single case sensitive string. + if aud, ok, err := claims.StringClaim("aud"); err == nil && ok { + if aud != clientID { + return fmt.Errorf("invalid claims, 'aud' claim and 'client_id' do not match, aud=%s, client_id=%s", aud, clientID) + } + } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok { + if !containsString(clientID, aud) { + return fmt.Errorf("invalid claims, cannot find 'client_id' in 'aud' claim, aud=%v, client_id=%s", aud, clientID) + } + } else { + return errors.New("invalid claim value: 'aud' is required, and should be either string or string array") + } + + return nil +} + +// VerifyClientClaims verifies all the required claims are valid for a "client credentials" JWT. +// Returns the client ID if valid, or an error if invalid. +func VerifyClientClaims(jwt jose.JWT, issuer string) (string, error) { + claims, err := jwt.Claims() + if err != nil { + return "", fmt.Errorf("failed to parse JWT claims: %v", err) + } + + iss, ok, err := claims.StringClaim("iss") + if err != nil { + return "", fmt.Errorf("failed to parse 'iss' claim: %v", err) + } else if !ok { + return "", errors.New("missing required 'iss' claim") + } else if !urlEqual(iss, issuer) { + return "", fmt.Errorf("'iss' claim does not match expected issuer, iss=%s", iss) + } + + sub, ok, err := claims.StringClaim("sub") + if err != nil { + return "", fmt.Errorf("failed to parse 'sub' claim: %v", err) + } else if !ok { + return "", errors.New("missing required 'sub' claim") + } + + if aud, ok, err := claims.StringClaim("aud"); err == nil && ok { + if aud != sub { + return "", fmt.Errorf("invalid claims, 'aud' claim and 'sub' claim do not match, aud=%s, sub=%s", aud, sub) + } + } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok { + if !containsString(sub, aud) { + return "", fmt.Errorf("invalid claims, cannot find 'sud' in 'aud' claim, aud=%v, sub=%s", aud, sub) + } + } else { + return "", errors.New("invalid claim value: 'aud' is required, and should be either string or string array") + } + + now := time.Now().UTC() + exp, ok, err := claims.TimeClaim("exp") + if err != nil { + return "", fmt.Errorf("failed to parse 'exp' claim: %v", err) + } else if !ok { + return "", errors.New("missing required 'exp' claim") + } else if exp.Before(now) { + return "", fmt.Errorf("token already expired at: %v", exp) + } + + return sub, nil +} + +type JWTVerifier struct { + issuer string + clientID string + syncFunc func() error + keysFunc func() []key.PublicKey + clock clockwork.Clock +} + +func NewJWTVerifier(issuer, clientID string, syncFunc func() error, keysFunc func() []key.PublicKey) JWTVerifier { + return JWTVerifier{ + issuer: issuer, + clientID: clientID, + syncFunc: syncFunc, + keysFunc: keysFunc, + clock: clockwork.NewRealClock(), + } +} + +func (v *JWTVerifier) Verify(jwt jose.JWT) error { + ok, err := VerifySignature(jwt, v.keysFunc()) + if ok { + goto SignatureVerified + } else if err != nil { + return fmt.Errorf("oidc: JWT signature verification failed: %v", err) + } + + if err = v.syncFunc(); err != nil { + return fmt.Errorf("oidc: failed syncing KeySet: %v", err) + } + + ok, err = VerifySignature(jwt, v.keysFunc()) + if err != nil { + return fmt.Errorf("oidc: JWT signature verification failed: %v", err) + } else if !ok { + return errors.New("oidc: unable to verify JWT signature: no matching keys") + } + +SignatureVerified: + if err := VerifyClaims(jwt, v.issuer, v.clientID); err != nil { + return fmt.Errorf("oidc: JWT claims invalid: %v", err) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/LICENSE b/Godeps/_workspace/src/github.com/coreos/pkg/LICENSE new file mode 100644 index 000000000000..e06d2081865a --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/NOTICE b/Godeps/_workspace/src/github.com/coreos/pkg/NOTICE new file mode 100644 index 000000000000..b39ddfa5cbde --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/README.md b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/README.md new file mode 100644 index 000000000000..1053dd001a37 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/README.md @@ -0,0 +1,38 @@ +# CoreOS Log + +There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?) + +## Design Principles + +* `package main` is the place where logging gets turned on and routed + +A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. + +* All log options are runtime-configurable. + +Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. + +* There is one log object per package. It is registered under its repository and package name. + +`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. + +* There is *one* output stream, and it is an `io.Writer` composed with a formatter. + +Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. + +Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application. + +* Log objects are an interface + +An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. + +* Log levels have specific meanings: + + * Critical: Unrecoverable. Must fail. + * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost + * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. + * Notice: Normal, but important (uncommon) log information. + * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. + * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. + * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. + diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/formatters.go b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/formatters.go new file mode 100644 index 000000000000..edbb351df15d --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/formatters.go @@ -0,0 +1,63 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "fmt" + "io" + "strings" + "time" +) + +type Formatter interface { + Format(pkg string, level LogLevel, depth int, entries ...interface{}) + Flush() +} + +func NewStringFormatter(w io.Writer) *StringFormatter { + return &StringFormatter{ + w: bufio.NewWriter(w), + } +} + +type StringFormatter struct { + w *bufio.Writer +} + +func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { + now := time.Now() + y, m, d := now.Date() + h, min, sec := now.Clock() + s.w.WriteString(fmt.Sprintf("%d/%02d/%d %02d:%02d:%02d ", y, m, d, h, min, sec)) + s.writeEntries(pkg, l, i, entries...) +} + +func (s *StringFormatter) writeEntries(pkg string, _ LogLevel, _ int, entries ...interface{}) { + if pkg != "" { + s.w.WriteString(pkg + ": ") + } + str := fmt.Sprint(entries...) + endsInNL := strings.HasSuffix(str, "\n") + s.w.WriteString(str) + if !endsInNL { + s.w.WriteString("\n") + } + s.Flush() +} + +func (s *StringFormatter) Flush() { + s.w.Flush() +} diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/glog_formatter.go b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/glog_formatter.go new file mode 100644 index 000000000000..cae0749db0f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/glog_formatter.go @@ -0,0 +1,95 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "bytes" + "io" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +var pid = os.Getpid() + +type GlogFormatter struct { + StringFormatter +} + +func NewGlogFormatter(w io.Writer) *GlogFormatter { + g := &GlogFormatter{} + g.w = bufio.NewWriter(w) + return g +} + +func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { + g.w.Write(GlogHeader(level, depth+1)) + g.StringFormatter.Format(pkg, level, depth+1, entries...) +} + +func GlogHeader(level LogLevel, depth int) []byte { + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + now := time.Now() + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + buf := &bytes.Buffer{} + buf.Grow(30) + _, month, day := now.Date() + hour, minute, second := now.Clock() + buf.WriteString(level.Char()) + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) + buf.WriteByte(' ') + buf.WriteString(strconv.Itoa(pid)) + buf.WriteByte(' ') + buf.WriteString(file) + buf.WriteByte(':') + buf.WriteString(strconv.Itoa(line)) + buf.WriteByte(']') + buf.WriteByte(' ') + return buf.Bytes() +} + +const digits = "0123456789" + +func twoDigits(b *bytes.Buffer, d int) { + c2 := digits[d%10] + d /= 10 + c1 := digits[d%10] + b.WriteByte(c1) + b.WriteByte(c2) +} diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/log_hijack.go b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/log_hijack.go new file mode 100644 index 000000000000..1a7f3dc1fb8b --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/log_hijack.go @@ -0,0 +1,39 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "log" +) + +func init() { + pkg := NewPackageLogger("log", "") + w := packageWriter{pkg} + log.SetFlags(0) + log.SetPrefix("") + log.SetOutput(w) +} + +type packageWriter struct { + pl *PackageLogger +} + +func (p packageWriter) Write(b []byte) (int, error) { + if p.pl.level < INFO { + return 0, nil + } + p.pl.internalLog(calldepth+2, INFO, string(b)) + return len(b), nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/logmap.go b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/logmap.go new file mode 100644 index 000000000000..84954488308d --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/logmap.go @@ -0,0 +1,240 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "errors" + "strings" + "sync" +) + +// LogLevel is the set of all log levels. +type LogLevel int8 + +const ( + // CRITICAL is the lowest log level; only errors which will end the program will be propagated. + CRITICAL LogLevel = iota - 1 + // ERROR is for errors that are not fatal but lead to troubling behavior. + ERROR + // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. + WARNING + // NOTICE is for normal but significant conditions. + NOTICE + // INFO is a log level for common, everyday log updates. + INFO + // DEBUG is the default hidden level for more verbose updates about internal processes. + DEBUG + // TRACE is for (potentially) call by call tracing of programs. + TRACE +) + +// Char returns a single-character representation of the log level. +func (l LogLevel) Char() string { + switch l { + case CRITICAL: + return "C" + case ERROR: + return "E" + case WARNING: + return "W" + case NOTICE: + return "N" + case INFO: + return "I" + case DEBUG: + return "D" + case TRACE: + return "T" + default: + panic("Unhandled loglevel") + } +} + +// String returns a multi-character representation of the log level. +func (l LogLevel) String() string { + switch l { + case CRITICAL: + return "CRITICAL" + case ERROR: + return "ERROR" + case WARNING: + return "WARNING" + case NOTICE: + return "NOTICE" + case INFO: + return "INFO" + case DEBUG: + return "DEBUG" + case TRACE: + return "TRACE" + default: + panic("Unhandled loglevel") + } +} + +// Update using the given string value. Fulfills the flag.Value interface. +func (l *LogLevel) Set(s string) error { + value, err := ParseLevel(s) + if err != nil { + return err + } + + *l = value + return nil +} + +// ParseLevel translates some potential loglevel strings into their corresponding levels. +func ParseLevel(s string) (LogLevel, error) { + switch s { + case "CRITICAL", "C": + return CRITICAL, nil + case "ERROR", "0", "E": + return ERROR, nil + case "WARNING", "1", "W": + return WARNING, nil + case "NOTICE", "2", "N": + return NOTICE, nil + case "INFO", "3", "I": + return INFO, nil + case "DEBUG", "4", "D": + return DEBUG, nil + case "TRACE", "5", "T": + return TRACE, nil + } + return CRITICAL, errors.New("couldn't parse log level " + s) +} + +type RepoLogger map[string]*PackageLogger + +type loggerStruct struct { + sync.Mutex + repoMap map[string]RepoLogger + formatter Formatter +} + +// logger is the global logger +var logger = new(loggerStruct) + +// SetGlobalLogLevel sets the log level for all packages in all repositories +// registered with capnslog. +func SetGlobalLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + for _, r := range logger.repoMap { + r.setRepoLogLevelInternal(l) + } +} + +// GetRepoLogger may return the handle to the repository's set of packages' loggers. +func GetRepoLogger(repo string) (RepoLogger, error) { + logger.Lock() + defer logger.Unlock() + r, ok := logger.repoMap[repo] + if !ok { + return nil, errors.New("no packages registered for repo " + repo) + } + return r, nil +} + +// MustRepoLogger returns the handle to the repository's packages' loggers. +func MustRepoLogger(repo string) RepoLogger { + r, err := GetRepoLogger(repo) + if err != nil { + panic(err) + } + return r +} + +// SetRepoLogLevel sets the log level for all packages in the repository. +func (r RepoLogger) SetRepoLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + r.setRepoLogLevelInternal(l) +} + +func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { + for _, v := range r { + v.level = l + } +} + +// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in +// order, and returns a map of the results, for use in SetLogLevel. +func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { + setlist := strings.Split(conf, ",") + out := make(map[string]LogLevel) + for _, setstring := range setlist { + setting := strings.Split(setstring, "=") + if len(setting) != 2 { + return nil, errors.New("oddly structured `pkg=level` option: " + setstring) + } + l, err := ParseLevel(setting[1]) + if err != nil { + return nil, err + } + out[setting[0]] = l + } + return out, nil +} + +// SetLogLevel takes a map of package names within a repository to their desired +// loglevel, and sets the levels appropriately. Unknown packages are ignored. +// "*" is a special package name that corresponds to all packages, and will be +// processed first. +func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { + logger.Lock() + defer logger.Unlock() + if l, ok := m["*"]; ok { + r.setRepoLogLevelInternal(l) + } + for k, v := range m { + l, ok := r[k] + if !ok { + continue + } + l.level = v + } +} + +// SetFormatter sets the formatting function for all logs. +func SetFormatter(f Formatter) { + logger.Lock() + defer logger.Unlock() + logger.formatter = f +} + +// NewPackageLogger creates a package logger object. +// This should be defined as a global var in your package, referencing your repo. +func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { + logger.Lock() + defer logger.Unlock() + if logger.repoMap == nil { + logger.repoMap = make(map[string]RepoLogger) + } + r, rok := logger.repoMap[repo] + if !rok { + logger.repoMap[repo] = make(RepoLogger) + r = logger.repoMap[repo] + } + p, pok := r[pkg] + if !pok { + r[pkg] = &PackageLogger{ + pkg: pkg, + level: INFO, + } + p = r[pkg] + } + return +} diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/pkg_logger.go b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/pkg_logger.go new file mode 100644 index 000000000000..0aa79d9adf33 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/pkg_logger.go @@ -0,0 +1,158 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "fmt" + "os" +) + +type PackageLogger struct { + pkg string + level LogLevel +} + +const calldepth = 3 + +func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { + if inLevel != CRITICAL && p.level < inLevel { + return + } + logger.Lock() + defer logger.Unlock() + if logger.formatter != nil { + logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) + } +} + +func (p *PackageLogger) LevelAt(l LogLevel) bool { + return p.level >= l +} + +// Log a formatted string at any level between ERROR and TRACE +func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) +} + +// Log a message at any level between ERROR and TRACE +func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprint(args...)) +} + +// log stdlib compatibility + +func (p *PackageLogger) Println(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) +} + +func (p *PackageLogger) Printf(format string, args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprintf(format, args...)) +} + +func (p *PackageLogger) Print(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprint(args...)) +} + +// Panic and fatal + +func (p *PackageLogger) Panicf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panic(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Fatalf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +func (p *PackageLogger) Fatal(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +// Error Functions + +func (p *PackageLogger) Errorf(format string, args ...interface{}) { + p.internalLog(calldepth, ERROR, fmt.Sprintf(format, args...)) +} + +func (p *PackageLogger) Error(entries ...interface{}) { + p.internalLog(calldepth, ERROR, entries...) +} + +// Warning Functions + +func (p *PackageLogger) Warningf(format string, args ...interface{}) { + p.internalLog(calldepth, WARNING, fmt.Sprintf(format, args...)) +} + +func (p *PackageLogger) Warning(entries ...interface{}) { + p.internalLog(calldepth, WARNING, entries...) +} + +// Notice Functions + +func (p *PackageLogger) Noticef(format string, args ...interface{}) { + p.internalLog(calldepth, NOTICE, fmt.Sprintf(format, args...)) +} + +func (p *PackageLogger) Notice(entries ...interface{}) { + p.internalLog(calldepth, NOTICE, entries...) +} + +// Info Functions + +func (p *PackageLogger) Infof(format string, args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprintf(format, args...)) +} + +func (p *PackageLogger) Info(entries ...interface{}) { + p.internalLog(calldepth, INFO, entries...) +} + +// Debug Functions + +func (p *PackageLogger) Debugf(format string, args ...interface{}) { + p.internalLog(calldepth, DEBUG, fmt.Sprintf(format, args...)) +} + +func (p *PackageLogger) Debug(entries ...interface{}) { + p.internalLog(calldepth, DEBUG, entries...) +} + +// Trace Functions + +func (p *PackageLogger) Tracef(format string, args ...interface{}) { + p.internalLog(calldepth, TRACE, fmt.Sprintf(format, args...)) +} + +func (p *PackageLogger) Trace(entries ...interface{}) { + p.internalLog(calldepth, TRACE, entries...) +} + +func (p *PackageLogger) Flush() { + logger.Lock() + defer logger.Unlock() + logger.formatter.Flush() +} diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/syslog_formatter.go b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/syslog_formatter.go new file mode 100644 index 000000000000..4be5a1f2de39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/capnslog/syslog_formatter.go @@ -0,0 +1,65 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "fmt" + "log/syslog" +) + +func NewSyslogFormatter(w *syslog.Writer) Formatter { + return &syslogFormatter{w} +} + +func NewDefaultSyslogFormatter(tag string) (Formatter, error) { + w, err := syslog.New(syslog.LOG_DEBUG, tag) + if err != nil { + return nil, err + } + return NewSyslogFormatter(w), nil +} + +type syslogFormatter struct { + w *syslog.Writer +} + +func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + for _, entry := range entries { + str := fmt.Sprint(entry) + switch l { + case CRITICAL: + s.w.Crit(str) + case ERROR: + s.w.Err(str) + case WARNING: + s.w.Warning(str) + case NOTICE: + s.w.Notice(str) + case INFO: + s.w.Info(str) + case DEBUG: + s.w.Debug(str) + case TRACE: + s.w.Debug(str) + default: + panic("Unhandled loglevel") + } + } +} + +func (s *syslogFormatter) Flush() { +} diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/health/README.md b/Godeps/_workspace/src/github.com/coreos/pkg/health/README.md new file mode 100644 index 000000000000..5ec34c21e0fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/health/README.md @@ -0,0 +1,11 @@ +health +==== + +A simple framework for implementing an HTTP health check endpoint on servers. + +Users implement their `health.Checkable` types, and create a `health.Checker`, from which they can get an `http.HandlerFunc` using `health.Checker.MakeHealthHandlerFunc`. + +### Documentation + +For more details, visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/health) + diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/health/health.go b/Godeps/_workspace/src/github.com/coreos/pkg/health/health.go new file mode 100644 index 000000000000..a1c3610fa5c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/health/health.go @@ -0,0 +1,127 @@ +package health + +import ( + "expvar" + "fmt" + "log" + "net/http" + + "github.com/coreos/pkg/httputil" +) + +// Checkables should return nil when the thing they are checking is healthy, and an error otherwise. +type Checkable interface { + Healthy() error +} + +// Checker provides a way to make an endpoint which can be probed for system health. +type Checker struct { + // Checks are the Checkables to be checked when probing. + Checks []Checkable + + // Unhealthyhandler is called when one or more of the checks are unhealthy. + // If not provided DefaultUnhealthyHandler is called. + UnhealthyHandler UnhealthyHandler + + // HealthyHandler is called when all checks are healthy. + // If not provided, DefaultHealthyHandler is called. + HealthyHandler http.HandlerFunc +} + +func (c Checker) ServeHTTP(w http.ResponseWriter, r *http.Request) { + unhealthyHandler := c.UnhealthyHandler + if unhealthyHandler == nil { + unhealthyHandler = DefaultUnhealthyHandler + } + + successHandler := c.HealthyHandler + if successHandler == nil { + successHandler = DefaultHealthyHandler + } + + if r.Method != "GET" { + w.Header().Set("Allow", "GET") + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + if err := Check(c.Checks); err != nil { + unhealthyHandler(w, r, err) + return + } + + successHandler(w, r) +} + +type UnhealthyHandler func(w http.ResponseWriter, r *http.Request, err error) + +type StatusResponse struct { + Status string `json:"status"` + Details *StatusResponseDetails `json:"details,omitempty"` +} + +type StatusResponseDetails struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func Check(checks []Checkable) (err error) { + errs := []error{} + for _, c := range checks { + if e := c.Healthy(); e != nil { + errs = append(errs, e) + } + } + + switch len(errs) { + case 0: + err = nil + case 1: + err = errs[0] + default: + err = fmt.Errorf("multiple health check failure: %v", errs) + } + + return +} + +func DefaultHealthyHandler(w http.ResponseWriter, r *http.Request) { + err := httputil.WriteJSONResponse(w, http.StatusOK, StatusResponse{ + Status: "ok", + }) + if err != nil { + // TODO(bobbyrullo): replace with logging from new logging pkg, + // once it lands. + log.Printf("Failed to write JSON response: %v", err) + } +} + +func DefaultUnhealthyHandler(w http.ResponseWriter, r *http.Request, err error) { + writeErr := httputil.WriteJSONResponse(w, http.StatusInternalServerError, StatusResponse{ + Status: "error", + Details: &StatusResponseDetails{ + Code: http.StatusInternalServerError, + Message: err.Error(), + }, + }) + if writeErr != nil { + // TODO(bobbyrullo): replace with logging from new logging pkg, + // once it lands. + log.Printf("Failed to write JSON response: %v", err) + } +} + +// ExpvarHandler is copied from https://golang.org/src/expvar/expvar.go, where it's sadly unexported. +func ExpvarHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/httputil/README.md b/Godeps/_workspace/src/github.com/coreos/pkg/httputil/README.md new file mode 100644 index 000000000000..44fa751c4a0a --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/httputil/README.md @@ -0,0 +1,13 @@ +httputil +==== + +Common code for dealing with HTTP. + +Includes: + +* Code for returning JSON responses. + +### Documentation + +Visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/httputil) + diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/httputil/json.go b/Godeps/_workspace/src/github.com/coreos/pkg/httputil/json.go new file mode 100644 index 000000000000..0b09235033ff --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/httputil/json.go @@ -0,0 +1,27 @@ +package httputil + +import ( + "encoding/json" + "net/http" +) + +const ( + JSONContentType = "application/json" +) + +func WriteJSONResponse(w http.ResponseWriter, code int, resp interface{}) error { + enc, err := json.Marshal(resp) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return err + } + + w.Header().Set("Content-Type", JSONContentType) + w.WriteHeader(code) + + _, err = w.Write(enc) + if err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/pkg/timeutil/backoff.go b/Godeps/_workspace/src/github.com/coreos/pkg/timeutil/backoff.go new file mode 100644 index 000000000000..b34fb49661b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/pkg/timeutil/backoff.go @@ -0,0 +1,15 @@ +package timeutil + +import ( + "time" +) + +func ExpBackoff(prev, max time.Duration) time.Duration { + if prev == 0 { + return time.Second + } + if prev > max/2 { + return max + } + return 2 * prev +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/LICENSE b/Godeps/_workspace/src/github.com/docker/engine-api/LICENSE new file mode 100644 index 000000000000..c157bff96a05 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/client.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/client.go new file mode 100644 index 000000000000..8c8c6fd18280 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/client.go @@ -0,0 +1,141 @@ +package client + +import ( + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/docker/engine-api/client/transport" + "github.com/docker/go-connections/tlsconfig" +) + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // transport is the interface to send request with, it implements transport.Client. + transport transport.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string +} + +// NewEnvClient initializes a new API client based on environment variables. +// Use DOCKER_HOST to set the url to the docker server. +// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// Use DOCKER_CERT_PATH to load the tls certificates from. +// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func NewEnvClient() (*Client, error) { + var client *http.Client + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + } + } + + host := os.Getenv("DOCKER_HOST") + if host == "" { + host = DefaultDockerHost + } + return NewClient(host, os.Getenv("DOCKER_API_VERSION"), client, nil) +} + +// NewClient initializes a new API client for the given host and API version. +// It won't send any version information if the version number is empty. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + proto, addr, basePath, err := ParseHost(host) + if err != nil { + return nil, err + } + + transport, err := transport.NewTransportWithHTTP(proto, addr, client) + if err != nil { + return nil, err + } + + return &Client{ + proto: proto, + addr: addr, + basePath: basePath, + transport: transport, + version: version, + customHTTPHeaders: httpHeaders, + }, nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(p string, query url.Values) string { + var apiPath string + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) + } else { + apiPath = fmt.Sprintf("%s%s", cli.basePath, p) + } + + u := &url.URL{ + Path: apiPath, + } + if len(query) > 0 { + u.RawQuery = query.Encode() + } + return u.String() +} + +// ClientVersion returns the version string associated with this +// instance of the Client. Note that this value can be changed +// via the DOCKER_API_VERSION env var. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// UpdateClientVersion updates the version string associated with this +// instance of the Client. +func (cli *Client) UpdateClientVersion(v string) { + cli.version = v +} + +// ParseHost verifies that the given host strings is valid. +func ParseHost(host string) (string, string, string, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return "", "", "", err + } + addr = parsed.Host + basePath = parsed.Path + } + return proto, addr, basePath, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/client_darwin.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/client_darwin.go new file mode 100644 index 000000000000..4b47a178c48f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/client_darwin.go @@ -0,0 +1,4 @@ +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "tcp://127.0.0.1:2375" diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/client_unix.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/client_unix.go new file mode 100644 index 000000000000..572c5f87a78f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/client_unix.go @@ -0,0 +1,6 @@ +// +build linux freebsd solaris openbsd + +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/client_windows.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/client_windows.go new file mode 100644 index 000000000000..07c0c7a77492 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/client_windows.go @@ -0,0 +1,4 @@ +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_attach.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_attach.go new file mode 100644 index 000000000000..1b616bf03851 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_attach.go @@ -0,0 +1,34 @@ +package client + +import ( + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + + headers := map[string][]string{"Content-Type": {"text/plain"}} + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_commit.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_commit.go new file mode 100644 index 000000000000..d5c474990664 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_commit.go @@ -0,0 +1,53 @@ +package client + +import ( + "encoding/json" + "errors" + "net/url" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/reference" + "golang.org/x/net/context" +) + +// ContainerCommit applies changes into a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) { + var repository, tag string + if options.Reference != "" { + distributionRef, err := distreference.ParseNamed(options.Reference) + if err != nil { + return types.ContainerCommitResponse{}, err + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return types.ContainerCommitResponse{}, errors.New("refusing to create a tag with a digest reference") + } + + tag = reference.GetTagFromNamedRef(distributionRef) + repository = distributionRef.Name() + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if options.Pause != true { + query.Set("pause", "0") + } + + var response types.ContainerCommitResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_copy.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_copy.go new file mode 100644 index 000000000000..d3dd0b116c0f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_copy.go @@ -0,0 +1,97 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ContainerStatPath returns Stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := fmt.Sprintf("/containers/%s/archive", containerID) + response, err := cli.head(ctx, urlStr, query, nil) + if err != nil { + return types.ContainerPathStat{}, err + } + defer ensureReaderClosed(response) + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + if err != nil { + return err + } + defer ensureReaderClosed(response) + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, err + } + + if response.statusCode != http.StatusOK { + return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_create.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_create.go new file mode 100644 index 000000000000..98935794dad9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_create.go @@ -0,0 +1,46 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/network" + "golang.org/x/net/context" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based in the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) { + var response types.ContainerCreateResponse + query := url.Values{} + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + if err != nil { + if serverResp != nil && serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { + return response, imageNotFoundError{config.Image} + } + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_diff.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_diff.go new file mode 100644 index 000000000000..f4bb3a46b99f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_diff.go @@ -0,0 +1,23 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { + var changes []types.ContainerChange + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + ensureReaderClosed(serverResp) + return changes, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_exec.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_exec.go new file mode 100644 index 000000000000..ff7e1a9d0596 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_exec.go @@ -0,0 +1,49 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) { + var response types.ContainerExecCreateResponse + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { + headers := map[string][]string{"Content-Type": {"application/json"}} + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_export.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_export.go new file mode 100644 index 000000000000..52194f3d3422 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_export.go @@ -0,0 +1,20 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_inspect.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_inspect.go new file mode 100644 index 000000000000..afd71eefcb01 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_inspect.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} + +// ContainerInspectWithRaw returns the container information and it's raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, nil, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} + +func (cli *Client) containerInspectWithResponse(ctx context.Context, containerID string, query url.Values) (types.ContainerJSON, *serverResponse, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + if err != nil { + return types.ContainerJSON{}, serverResp, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, serverResp, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_kill.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_kill.go new file mode 100644 index 000000000000..29f80c73adea --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_kill.go @@ -0,0 +1,17 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + query.Set("signal", signal) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_list.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_list.go new file mode 100644 index 000000000000..87f7333dc7bf --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_list.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit != -1 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filter) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + ensureReaderClosed(resp) + return containers, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_logs.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_logs.go new file mode 100644 index 000000000000..9699ac7dde90 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_logs.go @@ -0,0 +1,48 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + timetypes "github.com/docker/engine-api/types/time" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_pause.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_pause.go new file mode 100644 index 000000000000..412067a7821f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_pause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_remove.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_remove.go new file mode 100644 index 000000000000..cef4b8122089 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_remove.go @@ -0,0 +1,27 @@ +package client + +import ( + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_rename.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_rename.go new file mode 100644 index 000000000000..0e718da7c6ef --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_rename.go @@ -0,0 +1,16 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_resize.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_resize.go new file mode 100644 index 000000000000..b95d26b335a6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_resize.go @@ -0,0 +1,29 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width int) error { + query := url.Values{} + query.Set("h", strconv.Itoa(height)) + query.Set("w", strconv.Itoa(width)) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_restart.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_restart.go new file mode 100644 index 000000000000..1c74b18ca515 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_restart.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + "strconv" + + "golang.org/x/net/context" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon to wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout int) error { + query := url.Values{} + query.Set("t", strconv.Itoa(timeout)) + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_start.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_start.go new file mode 100644 index 000000000000..12a979422eff --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_start.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_stats.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_stats.go new file mode 100644 index 000000000000..2cc67c3af173 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_stats.go @@ -0,0 +1,24 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return nil, err + } + return resp.body, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_stop.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_stop.go new file mode 100644 index 000000000000..34d786291d58 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_stop.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + "strconv" + + "golang.org/x/net/context" +) + +// ContainerStop stops a container without terminating the process. +// The process is blocked until the container stops or the timeout expires. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout int) error { + query := url.Values{} + query.Set("t", strconv.Itoa(timeout)) + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_top.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_top.go new file mode 100644 index 000000000000..5ad926ae088e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_top.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) { + var response types.ContainerProcessList + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_unpause.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_unpause.go new file mode 100644 index 000000000000..5c76211256cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_unpause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_update.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_update.go new file mode 100644 index 000000000000..a5a1826dc4ac --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_update.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/engine-api/types/container" + "golang.org/x/net/context" +) + +// ContainerUpdate updates resources of a container +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/container_wait.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_wait.go new file mode 100644 index 000000000000..c26ff3f37869 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/container_wait.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ContainerWait pauses execution until a container exits. +// It returns the API status code as response of its readiness. +func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int, error) { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + return -1, err + } + defer ensureReaderClosed(resp) + + var res types.ContainerWaitResponse + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + return -1, err + } + + return res.StatusCode, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/errors.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/errors.go new file mode 100644 index 000000000000..17828bb7168b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/errors.go @@ -0,0 +1,94 @@ +package client + +import ( + "errors" + "fmt" +) + +// ErrConnectionFailed is an error raised when the connection between the client and the server failed. +var ErrConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") + +// imageNotFoundError implements an error returned when an image is not in the docker host. +type imageNotFoundError struct { + imageID string +} + +// Error returns a string representation of an imageNotFoundError +func (i imageNotFoundError) Error() string { + return fmt.Sprintf("Error: No such image: %s", i.imageID) +} + +// IsErrImageNotFound returns true if the error is caused +// when an image is not found in the docker host. +func IsErrImageNotFound(err error) bool { + _, ok := err.(imageNotFoundError) + return ok +} + +// containerNotFoundError implements an error returned when a container is not in the docker host. +type containerNotFoundError struct { + containerID string +} + +// Error returns a string representation of a containerNotFoundError +func (e containerNotFoundError) Error() string { + return fmt.Sprintf("Error: No such container: %s", e.containerID) +} + +// IsErrContainerNotFound returns true if the error is caused +// when a container is not found in the docker host. +func IsErrContainerNotFound(err error) bool { + _, ok := err.(containerNotFoundError) + return ok +} + +// networkNotFoundError implements an error returned when a network is not in the docker host. +type networkNotFoundError struct { + networkID string +} + +// Error returns a string representation of a networkNotFoundError +func (e networkNotFoundError) Error() string { + return fmt.Sprintf("Error: No such network: %s", e.networkID) +} + +// IsErrNetworkNotFound returns true if the error is caused +// when a network is not found in the docker host. +func IsErrNetworkNotFound(err error) bool { + _, ok := err.(networkNotFoundError) + return ok +} + +// volumeNotFoundError implements an error returned when a volume is not in the docker host. +type volumeNotFoundError struct { + volumeID string +} + +// Error returns a string representation of a networkNotFoundError +func (e volumeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such volume: %s", e.volumeID) +} + +// IsErrVolumeNotFound returns true if the error is caused +// when a volume is not found in the docker host. +func IsErrVolumeNotFound(err error) bool { + _, ok := err.(volumeNotFoundError) + return ok +} + +// unauthorizedError represents an authorization error in a remote registry. +type unauthorizedError struct { + cause error +} + +// Error returns a string representation of an unauthorizedError +func (u unauthorizedError) Error() string { + return u.cause.Error() +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +func IsErrUnauthorized(err error) bool { + _, ok := err.(unauthorizedError) + return ok +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/events.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/events.go new file mode 100644 index 000000000000..e379ce0a2945 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/events.go @@ -0,0 +1,48 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + timetypes "github.com/docker/engine-api/types/time" +) + +// Events returns a stream of events in the daemon in a ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + serverResponse, err := cli.get(ctx, "/events", query, nil) + if err != nil { + return nil, err + } + return serverResponse.body, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/hijack.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/hijack.go new file mode 100644 index 000000000000..dbd91ef62996 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/hijack.go @@ -0,0 +1,174 @@ +package client + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/http/httputil" + "net/url" + "strings" + "time" + + "github.com/docker/engine-api/types" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// tlsClientCon holds tls information and a dialed connection. +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if conn, ok := c.rawConn.(types.CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + req, err := cli.newRequest("POST", path, query, bodyEncoded, headers) + if err != nil { + return types.HijackedResponse{}, err + } + req.Host = cli.addr + + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") + + conn, err := dial(cli.proto, cli.addr, cli.transport.TLSConfig()) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + return types.HijackedResponse{}, err + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + _, err = clientconn.Do(req) + + rwc, br := clientconn.Hijack() + + return types.HijackedResponse{Conn: rwc, Reader: br}, err +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + proxyDialer, err := sockets.DialerFromEnvironment(dialer) + if err != nil { + return nil, err + } + + rawConn, err := proxyDialer.Dial(network, addr) + if err != nil { + return nil, err + } + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := rawConn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + c := *config + c.ServerName = hostname + config = &c + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + // Notice this isn't Go standard's tls.Dial function + return tlsDial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/image_build.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_build.go new file mode 100644 index 000000000000..4165c4e9a843 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_build.go @@ -0,0 +1,135 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + return query, nil +} + +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// convertKVStringsToMap converts ["key=value"] to {"key":"value"} +func convertKVStringsToMap(values []string) map[string]string { + result := make(map[string]string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = "" + } else { + result[kv[0]] = kv[1] + } + } + + return result +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/image_create.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_create.go new file mode 100644 index 000000000000..6dfc0391c00e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/reference" +) + +// ImageCreate creates a new image based in the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + repository, tag, err := reference.Parse(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", repository) + query.Set("tag", tag) + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/image_history.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_history.go new file mode 100644 index 000000000000..b2840b5ed849 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_history.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) { + var history []types.ImageHistory + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + ensureReaderClosed(serverResp) + return history, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/image_import.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_import.go new file mode 100644 index 000000000000..4e8749a01d5b --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_import.go @@ -0,0 +1,37 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/engine-api/types" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/image_inspect.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_inspect.go new file mode 100644 index 000000000000..859ba640869f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string, getSize bool) (types.ImageInspect, []byte, error) { + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ImageInspect{}, nil, imageNotFoundError{imageID} + } + return types.ImageInspect{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/image_list.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_list.go new file mode 100644 index 000000000000..347810e663d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_list.go @@ -0,0 +1,40 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) { + var images []types.Image + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.MatchName != "" { + // FIXME rename this parameter, to not be confused with the filters flag + query.Set("filter", options.MatchName) + } + if options.All { + query.Set("all", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + ensureReaderClosed(serverResp) + return images, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/image_load.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_load.go new file mode 100644 index 000000000000..84ee19c30993 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_load.go @@ -0,0 +1,30 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser returned by +// this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/image_pull.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_pull.go new file mode 100644 index 000000000000..0584f00bd40a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_pull.go @@ -0,0 +1,46 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/reference" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { + repository, tag, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", repository) + if tag != "" { + query.Set("tag", tag) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/image_push.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_push.go new file mode 100644 index 000000000000..8134f8018cfd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_push.go @@ -0,0 +1,52 @@ +package client + +import ( + "errors" + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/reference" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return nil, err + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + tag := reference.GetTagFromNamedRef(distributionRef) + + query := url.Values{} + query.Set("tag", tag) + + resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/image_remove.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_remove.go new file mode 100644 index 000000000000..47224326e0c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_remove.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + if err != nil { + return nil, err + } + + var dels []types.ImageDelete + err = json.NewDecoder(resp.body).Decode(&dels) + ensureReaderClosed(resp) + return dels, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/image_save.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_save.go new file mode 100644 index 000000000000..ecac880a32cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_save.go @@ -0,0 +1,22 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/image_search.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_search.go new file mode 100644 index 000000000000..3528bda6bd80 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_search.go @@ -0,0 +1,40 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/registry" + "golang.org/x/net/context" +) + +// ImageSearch makes the docker host to search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + ensureReaderClosed(resp) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/image_tag.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_tag.go new file mode 100644 index 000000000000..490de4e5fea0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/image_tag.go @@ -0,0 +1,38 @@ +package client + +import ( + "errors" + "fmt" + "net/url" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/reference" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, imageID, ref string, options types.ImageTagOptions) error { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref) + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + tag := reference.GetTagFromNamedRef(distributionRef) + + query := url.Values{} + query.Set("repo", distributionRef.Name()) + query.Set("tag", tag) + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/info.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/info.go new file mode 100644 index 000000000000..ff0958d65ce8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/info.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + if err != nil { + return info, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/interface.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/interface.go new file mode 100644 index 000000000000..2c6872f534be --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/interface.go @@ -0,0 +1,79 @@ +package client + +import ( + "io" + + "golang.org/x/net/context" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/network" + "github.com/docker/engine-api/types/registry" +) + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + ClientVersion() string + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) + ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, timeout int) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (io.ReadCloser, error) + ContainerStart(ctx context.Context, container string) error + ContainerStop(ctx context.Context, container string, timeout int) error + ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) error + ContainerWait(ctx context.Context, container string) (int, error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string, getSize bool) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string, options types.ImageTagOptions) error + Info(ctx context.Context) (types.Info, error) + NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error + NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, networkID string) error + RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) + ServerVersion(ctx context.Context) (types.Version, error) + UpdateClientVersion(v string) + VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) + VolumeRemove(ctx context.Context, volumeID string) error +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/login.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/login.go new file mode 100644 index 000000000000..482f94789f08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/login.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns UnauthorizerError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + + if resp != nil && resp.statusCode == http.StatusUnauthorized { + return types.AuthResponse{}, unauthorizedError{err} + } + if err != nil { + return types.AuthResponse{}, err + } + + var response types.AuthResponse + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/network_connect.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/network_connect.go new file mode 100644 index 000000000000..9a402a3e6384 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/network_connect.go @@ -0,0 +1,18 @@ +package client + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/network" + "golang.org/x/net/context" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/network_create.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/network_create.go new file mode 100644 index 000000000000..c9c0b9fde772 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/network_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + if err != nil { + return response, err + } + + json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/network_disconnect.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/network_disconnect.go new file mode 100644 index 000000000000..a3e33672fef5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/network_disconnect.go @@ -0,0 +1,14 @@ +package client + +import ( + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/network_inspect.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/network_inspect.go new file mode 100644 index 000000000000..4f81e5ce40f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/network_inspect.go @@ -0,0 +1,24 @@ +package client + +import ( + "encoding/json" + "net/http" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) { + var networkResource types.NetworkResource + resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return networkResource, networkNotFoundError{networkID} + } + return networkResource, err + } + err = json.NewDecoder(resp.body).Decode(&networkResource) + ensureReaderClosed(resp) + return networkResource, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/network_list.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/network_list.go new file mode 100644 index 000000000000..813109c1802c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/network_list.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + ensureReaderClosed(resp) + return networkResources, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/network_remove.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/network_remove.go new file mode 100644 index 000000000000..6bd674892420 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/network_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/request.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/request.go new file mode 100644 index 000000000000..cdbb0975bd28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/request.go @@ -0,0 +1,185 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/docker/engine-api/client/transport/cancellable" + "golang.org/x/net/context" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) +} + +// getWithContext sends an http request to the docker API using the method GET with a specific go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "GET", path, query, nil, headers) +} + +// postWithContext sends an http request to the docker API using the method POST with a specific go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "POST", path, query, obj, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { + return cli.sendClientRequest(ctx, "POST", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "PUT", path, query, obj, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { + return cli.sendClientRequest(ctx, "PUT", path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { + return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { + var body io.Reader + + if obj != nil { + var err error + body, err = encodeData(obj) + if err != nil { + return nil, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + } + + return cli.sendClientRequest(ctx, method, path, query, body, headers) +} + +func (cli *Client) sendClientRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { + serverResp := &serverResponse{ + body: nil, + statusCode: -1, + } + + expectedPayload := (method == "POST" || method == "PUT") + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := cli.newRequest(method, path, query, body, headers) + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + req.URL.Host = cli.addr + req.URL.Scheme = cli.transport.Scheme() + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + + resp, err := cancellable.Do(ctx, cli.transport, req) + if resp != nil { + serverResp.statusCode = resp.StatusCode + } + + if err != nil { + if isTimeout(err) || strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrConnectionFailed + } + + if !cli.transport.Secure() && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + if cli.transport.Secure() && strings.Contains(err.Error(), "remote error: bad certificate") { + return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + } + + return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err) + } + + if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return serverResp, err + } + if len(body) == 0 { + return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) + } + return serverResp, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body)) + } + + serverResp.body = resp.Body + serverResp.header = resp.Header + return serverResp, nil +} + +func (cli *Client) newRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*http.Request, error) { + apiPath := cli.getAPIPath(path, query) + req, err := http.NewRequest(method, apiPath, body) + if err != nil { + return nil, err + } + + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + req.Header.Set(k, v) + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + + return req, nil +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response *serverResponse) { + if response != nil && response.body != nil { + response.body.Close() + } +} + +func isTimeout(err error) bool { + type timeout interface { + Timeout() bool + } + e := err + switch urlErr := err.(type) { + case *url.Error: + e = urlErr.Err + } + t, ok := e.(timeout) + return ok && t.Timeout() +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/cancellable/LICENSE b/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/cancellable/LICENSE new file mode 100644 index 000000000000..6a66aea5eafe --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/cancellable/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/cancellable/canceler.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/cancellable/canceler.go new file mode 100644 index 000000000000..11dff60026c7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/cancellable/canceler.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package cancellable + +import ( + "net/http" + + "github.com/docker/engine-api/client/transport" +) + +func canceler(client transport.Sender, req *http.Request) func() { + // TODO(djd): Respect any existing value of req.Cancel. + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go new file mode 100644 index 000000000000..8ff2845c28e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go @@ -0,0 +1,27 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package cancellable + +import ( + "net/http" + + "github.com/docker/engine-api/client/transport" +) + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +func canceler(client transport.Sender, req *http.Request) func() { + rc, ok := client.(requestCanceler) + if !ok { + return func() {} + } + return func() { + rc.CancelRequest(req) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/cancellable/cancellable.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/cancellable/cancellable.go new file mode 100644 index 000000000000..526feb0f456f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/cancellable/cancellable.go @@ -0,0 +1,113 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cancellable provides helper function to cancel http requests. +package cancellable + +import ( + "io" + "net/http" + + "github.com/docker/engine-api/client/transport" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided transport.Sender and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +// +// FORK INFORMATION: +// +// This function deviates from the upstream version in golang.org/x/net/context/ctxhttp by +// taking a Sender interface rather than a *http.Client directly. That allow us to use +// this funcion with mocked clients and hijacked connections. +func Do(ctx context.Context, client transport.Sender, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // Request cancelation changed in Go 1.5, see canceler.go and canceler_go14.go. + cancel := canceler(client, req) + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + cancel() + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil && r.resp.Body != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cancel() + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/client.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/client.go new file mode 100644 index 000000000000..13d4b3ab3de8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/client.go @@ -0,0 +1,47 @@ +package transport + +import ( + "crypto/tls" + "net/http" +) + +// Sender is an interface that clients must implement +// to be able to send requests to a remote connection. +type Sender interface { + // Do sends request to a remote endpoint. + Do(*http.Request) (*http.Response, error) +} + +// Client is an interface that abstracts all remote connections. +type Client interface { + Sender + // Secure tells whether the connection is secure or not. + Secure() bool + // Scheme returns the connection protocol the client uses. + Scheme() string + // TLSConfig returns any TLS configuration the client uses. + TLSConfig() *tls.Config +} + +// tlsInfo returns information about the TLS configuration. +type tlsInfo struct { + tlsConfig *tls.Config +} + +// TLSConfig returns the TLS configuration. +func (t *tlsInfo) TLSConfig() *tls.Config { + return t.tlsConfig +} + +// Scheme returns protocol scheme to use. +func (t *tlsInfo) Scheme() string { + if t.tlsConfig != nil { + return "https" + } + return "http" +} + +// Secure returns true if there is a TLS configuration. +func (t *tlsInfo) Secure() bool { + return t.tlsConfig != nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/transport.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/transport.go new file mode 100644 index 000000000000..ff28af1855f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/transport/transport.go @@ -0,0 +1,57 @@ +// Package transport provides function to send request to remote endpoints. +package transport + +import ( + "fmt" + "net/http" + + "github.com/docker/go-connections/sockets" +) + +// apiTransport holds information about the http transport to connect with the API. +type apiTransport struct { + *http.Client + *tlsInfo + transport *http.Transport +} + +// NewTransportWithHTTP creates a new transport based on the provided proto, address and http client. +// It uses Docker's default http transport configuration if the client is nil. +// It does not modify the client's transport if it's not nil. +func NewTransportWithHTTP(proto, addr string, client *http.Client) (Client, error) { + var transport *http.Transport + + if client != nil { + tr, ok := client.Transport.(*http.Transport) + if !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) + } + transport = tr + } else { + transport = defaultTransport(proto, addr) + client = &http.Client{ + Transport: transport, + } + } + + return &apiTransport{ + Client: client, + tlsInfo: &tlsInfo{transport.TLSClientConfig}, + transport: transport, + }, nil +} + +// CancelRequest stops a request execution. +func (a *apiTransport) CancelRequest(req *http.Request) { + a.transport.CancelRequest(req) +} + +// defaultTransport creates a new http.Transport with Docker's +// default transport configuration. +func defaultTransport(proto, addr string) *http.Transport { + tr := new(http.Transport) + sockets.ConfigureTransport(tr, proto, addr) + return tr +} + +var _ Client = &apiTransport{} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/version.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/version.go new file mode 100644 index 000000000000..e037551a21b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/version.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + ensureReaderClosed(resp) + return server, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/volume_create.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/volume_create.go new file mode 100644 index 000000000000..cc1e1c177231 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/volume_create.go @@ -0,0 +1,20 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) { + var volume types.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + if err != nil { + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + ensureReaderClosed(resp) + return volume, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/volume_inspect.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/volume_inspect.go new file mode 100644 index 000000000000..4bf4a7b084ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/volume_inspect.go @@ -0,0 +1,24 @@ +package client + +import ( + "encoding/json" + "net/http" + + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + var volume types.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return volume, volumeNotFoundError{volumeID} + } + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + ensureReaderClosed(resp) + return volume, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/volume_list.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/volume_list.go new file mode 100644 index 000000000000..bb4c40d5f981 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/volume_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) { + var volumes types.VolumesListResponse + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParam(filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + ensureReaderClosed(resp) + return volumes, err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/client/volume_remove.go b/Godeps/_workspace/src/github.com/docker/engine-api/client/volume_remove.go new file mode 100644 index 000000000000..0dce24c79b84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/client/volume_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string) error { + resp, err := cli.delete(ctx, "/volumes/"+volumeID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/auth.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/auth.go new file mode 100644 index 000000000000..056af6b84259 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/auth.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/blkiodev/blkio.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/blkiodev/blkio.go new file mode 100644 index 000000000000..931ae10ab1ef --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/client.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/client.go new file mode 100644 index 000000000000..a3453414901f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/client.go @@ -0,0 +1,231 @@ +package types + +import ( + "bufio" + "io" + "net" + + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/filters" + "github.com/docker/go-units" +) + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string + ContainerID string + Running bool + ExitCode int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filter filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Timestamps bool + Follow bool + Tail string +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool +} + +// EventsOptions hold parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + BuildArgs map[string]string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string +} + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) + SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + MatchName string + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +//ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc +} + +// ImageTagOptions holds parameters to tag an image +type ImageTagOptions struct { + Force bool +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height int + Width int +} + +// VersionResponse holds version information for the client and the server +type VersionResponse struct { + Client *Version + Server *Version +} + +// ServerOK returns true when the client could connect to the docker server +// and parse the information received. It returns false otherwise. +func (v VersionResponse) ServerOK() bool { + return v.Server != nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/configs.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/configs.go new file mode 100644 index 000000000000..7d4fcb343e33 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/configs.go @@ -0,0 +1,53 @@ +package types + +import ( + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/network" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ContainerCommitConfig contains build configs for commit operation, +// and is used when making a commit with the current state of the container. +type ContainerCommitConfig struct { + Pause bool + Repo string + Tag string + Author string + Comment string + // merge container config into commit config before commit + MergeConfigs bool + Config *container.Config +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard output + AttachStdout bool // Attach the standard error + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Cmd []string // Execution commands and args +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/container/config.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/config.go new file mode 100644 index 000000000000..1dfc40834800 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/config.go @@ -0,0 +1,37 @@ +package container + +import ( + "github.com/docker/engine-api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (eg. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/container/host_config.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/host_config.go new file mode 100644 index 000000000000..39f6a225169f --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/host_config.go @@ -0,0 +1,301 @@ +package container + +import ( + "strings" + + "github.com/docker/engine-api/types/blkiodev" + "github.com/docker/engine-api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" +) + +// NetworkMode represents the container network stack. +type NetworkMode string + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its private ipc stack. +func (n IpcMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's ipc stack. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's ipc stack. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the ipc stack is valid. +func (n IpcMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid stack of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its private pid stack. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's pid stack. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the pid stack is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DiskQuota int64 // Disk limit (in bytes) + KernelMemory int64 // Kernel memory limit (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit int64 // Setting pids limit for a container + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive + NetworkMaximumBandwidth uint64 // Maximum bandwidth of the network endpoint in bytes per second +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + + // Applicable to Windows + ConsoleSize [2]int // Initial console size + Isolation Isolation // Isolation technology of the container (eg default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_unix.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_unix.go new file mode 100644 index 000000000000..4171059a476a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_unix.go @@ -0,0 +1,81 @@ +// +build !windows + +package container + +import "strings" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// IsPrivate indicates whether container uses it's private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_windows.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_windows.go new file mode 100644 index 000000000000..0ee332ba6899 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/container/hostconfig_windows.go @@ -0,0 +1,87 @@ +package container + +import ( + "strings" +) + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsContainer indicates whether container uses a container network stack. +// Returns false as windows doesn't support this mode +func (n NetworkMode) IsContainer() bool { + return false +} + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// ConnectedContainer is the id of the container which network this container is connected to. +// Returns blank string on windows +func (n NetworkMode) ConnectedContainer() string { + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/filters/parse.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/filters/parse.go new file mode 100644 index 000000000000..0e0d7e380541 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/filters/parse.go @@ -0,0 +1,295 @@ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. +package filters + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + + "github.com/docker/engine-api/types/versions" +) + +// Args stores filter arguments as map key:{map key: bool}. +// It contains an aggregation of the map of arguments (which are in the form +// of -f 'key=value') based on the key, and stores values for the same key +// in a map with string keys and boolean values. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} +type Args struct { + fields map[string]map[string]bool +} + +// NewArgs initializes a new Args struct. +func NewArgs() Args { + return Args{fields: map[string]map[string]bool{}} +} + +// ParseFlag parses the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + + filters.Add(name, value) + + return filters, nil +} + +// ErrBadFormat is an error returned in case of bad format for a filter. +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam packs the Args into a string for easy transport from client to server. +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + buf, err := json.Marshal(a.fields) + if err != nil { + return "", err + } + return string(buf), nil +} + +// ToParamWithVersion packs the Args into a string for easy transport from client to server. +// The generated string will depend on the specified version (corresponding to the API version). +func ToParamWithVersion(version string, a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + // for daemons older than v1.10, filter must be of the form map[string][]string + buf := []byte{} + err := errors.New("") + if version != "" && versions.LessThan(version, "1.22") { + buf, err = json.Marshal(convertArgsToSlice(a.fields)) + } else { + buf, err = json.Marshal(a.fields) + } + if err != nil { + return "", err + } + return string(buf), nil +} + +// FromParam unpacks the filter Args. +func FromParam(p string) (Args, error) { + if len(p) == 0 { + return NewArgs(), nil + } + + r := strings.NewReader(p) + d := json.NewDecoder(r) + + m := map[string]map[string]bool{} + if err := d.Decode(&m); err != nil { + r.Seek(0, 0) + + // Allow parsing old arguments in slice format. + // Because other libraries might be sending them in this format. + deprecated := map[string][]string{} + if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { + m = deprecatedArgs(deprecated) + } else { + return NewArgs(), err + } + } + return Args{m}, nil +} + +// Get returns the list of values associates with a field. +// It returns a slice of strings to keep backwards compatibility with old code. +func (filters Args) Get(field string) []string { + values := filters.fields[field] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add adds a new value to a filter field. +func (filters Args) Add(name, value string) { + if _, ok := filters.fields[name]; ok { + filters.fields[name][value] = true + } else { + filters.fields[name] = map[string]bool{value: true} + } +} + +// Del removes a value from a filter field. +func (filters Args) Del(name, value string) { + if _, ok := filters.fields[name]; ok { + delete(filters.fields[name], value) + } +} + +// Len returns the number of fields in the arguments. +func (filters Args) Len() int { + return len(filters.fields) +} + +// MatchKVList returns true if the values for the specified field matches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label1': '1', 'label2': '2'} +// it returns true. +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters.fields[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if sources == nil || len(sources) == 0 { + return false + } + + for name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. +func (filters Args) Match(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the filters. +func (filters Args) ExactMatch(field, source string) bool { + fieldValues, ok := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + if fieldValues[source] { + return true + } + return false +} + +// FuzzyMatch returns true if the source matches exactly one of the filters, +// or the source has one of the filters as a prefix. +func (filters Args) FuzzyMatch(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Include returns true if the name of the field to filter is in the filters. +func (filters Args) Include(field string) bool { + _, ok := filters.fields[field] + return ok +} + +// Validate ensures that all the fields in the filter are valid. +// It returns an error as soon as it finds an invalid field. +func (filters Args) Validate(accepted map[string]bool) error { + for name := range filters.fields { + if !accepted[name] { + return fmt.Errorf("Invalid filter '%s'", name) + } + } + return nil +} + +// WalkValues iterates over the list of filtered values for a field. +// It stops the iteration if it finds an error and it returns that error. +func (filters Args) WalkValues(field string, op func(value string) error) error { + if _, ok := filters.fields[field]; !ok { + return nil + } + for v := range filters.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/network/network.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/network/network.go new file mode 100644 index 000000000000..bce60f5eec44 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/network/network.go @@ -0,0 +1,52 @@ +package network + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string //Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/reference/image_reference.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/reference/image_reference.go new file mode 100644 index 000000000000..74201582060c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/reference/image_reference.go @@ -0,0 +1,32 @@ +package reference + +import ( + distreference "github.com/docker/distribution/reference" +) + +// Parse parses the given references and returns the repository and +// tag (if present) from it. If there is an error during parsing, it will +// return an error. +func Parse(ref string) (string, string, error) { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return "", "", err + } + + tag := GetTagFromNamedRef(distributionRef) + return distributionRef.Name(), tag, nil +} + +// GetTagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api makes the distinction between repository +// and tags. +func GetTagFromNamedRef(ref distreference.Named) string { + var tag string + switch x := ref.(type) { + case distreference.Digested: + tag = x.Digest().String() + case distreference.NamedTagged: + tag = x.Tag() + } + return tag +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/registry/registry.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/registry/registry.go new file mode 100644 index 000000000000..8a6fe70ea7d4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/registry/registry.go @@ -0,0 +1,101 @@ +package registry + +import ( + "encoding/json" + "net" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial indicates whether the result is an official repository or not + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsTrusted indicates whether the result is trusted + IsTrusted bool `json:"is_trusted"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/seccomp.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/seccomp.go new file mode 100644 index 000000000000..e0305a9e3789 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/seccomp.go @@ -0,0 +1,68 @@ +package types + +// Seccomp represents the config for a seccomp profile for syscall restriction. +type Seccomp struct { + DefaultAction Action `json:"defaultAction"` + Architectures []Arch `json:"architectures"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Arch used for additional architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" +) + +// Action taken upon Seccomp rule match +type Action string + +// Define actions for Seccomp rules +const ( + ActKill Action = "SCMP_ACT_KILL" + ActTrap Action = "SCMP_ACT_TRAP" + ActErrno Action = "SCMP_ACT_ERRNO" + ActTrace Action = "SCMP_ACT_TRACE" + ActAllow Action = "SCMP_ACT_ALLOW" +) + +// Operator used to match syscall arguments in Seccomp +type Operator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual Operator = "SCMP_CMP_NE" + OpLessThan Operator = "SCMP_CMP_LT" + OpLessEqual Operator = "SCMP_CMP_LE" + OpEqualTo Operator = "SCMP_CMP_EQ" + OpGreaterEqual Operator = "SCMP_CMP_GE" + OpGreaterThan Operator = "SCMP_CMP_GT" + OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" +) + +// Arg used for matching specific syscall arguments in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op Operator `json:"op"` +} + +// Syscall is used to match a syscall in Seccomp +type Syscall struct { + Name string `json:"name"` + Action Action `json:"action"` + Args []*Arg `json:"args"` +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/stats.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/stats.go new file mode 100644 index 000000000000..b420ebe7f6ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/stats.go @@ -0,0 +1,115 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds. + TotalUsage uint64 `json:"total_usage"` + // Total CPU time consumed per core. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage"` + // Time spent by tasks of the cgroup in kernel mode. + // Units: nanoseconds. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + // Time spent by tasks of the cgroup in user mode. + // Units: nanoseconds. + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + CPUUsage CPUUsage `json:"cpu_usage"` + SystemUsage uint64 `json:"system_cpu_usage"` + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates All memory stats since container inception +type MemoryStats struct { + // current res_counter usage for memory + Usage uint64 `json:"usage"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt"` + Limit uint64 `json:"limit"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// TODO Windows: This can be factored out +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write +// TODO Windows: This can be factored out +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// NetworkStats aggregates All network stats of one container +// TODO Windows: This will require refactoring +type NetworkStats struct { + RxBytes uint64 `json:"rx_bytes"` + RxPackets uint64 `json:"rx_packets"` + RxErrors uint64 `json:"rx_errors"` + RxDropped uint64 `json:"rx_dropped"` + TxBytes uint64 `json:"tx_bytes"` + TxPackets uint64 `json:"tx_packets"` + TxErrors uint64 `json:"tx_errors"` + TxDropped uint64 `json:"tx_dropped"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + Read time.Time `json:"read"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` + CPUStats CPUStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + PidsStats PidsStats `json:"pids_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/strslice/strslice.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/strslice/strslice.go new file mode 100644 index 000000000000..bad493fb89fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/time/timestamp.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/time/timestamp.go new file mode 100644 index 000000000000..d3695ba723b7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/time/timestamp.go @@ -0,0 +1,124 @@ +package time + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + var parseInLocation bool + + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseonds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/types.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/types.go new file mode 100644 index 000000000000..406b561a6e59 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/types.go @@ -0,0 +1,472 @@ +package types + +import ( + "os" + "time" + + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/network" + "github.com/docker/engine-api/types/registry" + "github.com/docker/go-connections/nat" +) + +// ContainerCreateResponse contains the information returned to a client on the +// creation of a new container. +type ContainerCreateResponse struct { + // ID is the ID of the created container. + ID string `json:"Id"` + + // Warnings are any warnings encountered during the creation of the container. + Warnings []string `json:"Warnings"` +} + +// ContainerExecCreateResponse contains response of Remote API: +// POST "/containers/{name:.*}/exec" +type ContainerExecCreateResponse struct { + // ID is the exec ID. + ID string `json:"Id"` +} + +// ContainerUpdateResponse contains response of Remote API: +// POST /containers/{name:.*}/update +type ContainerUpdateResponse struct { + // Warnings are any warnings encountered during the updating of the container. + Warnings []string `json:"Warnings"` +} + +// AuthResponse contains response of Remote API: +// POST "/auth" +type AuthResponse struct { + // Status is the authentication status + Status string `json:"Status"` + + // IdentityToken is an opaque token used for authenticating + // a user after a successful login. + IdentityToken string `json:"IdentityToken,omitempty"` +} + +// ContainerWaitResponse contains response of Remote API: +// POST "/containers/"+containerID+"/wait" +type ContainerWaitResponse struct { + // StatusCode is the status code of the wait job + StatusCode int `json:"StatusCode"` +} + +// ContainerCommitResponse contains response of Remote API: +// POST "/commit?container="+containerID +type ContainerCommitResponse struct { + ID string `json:"Id"` +} + +// ContainerChange contains response of Remote API: +// GET "/containers/{name:.*}/changes" +type ContainerChange struct { + Kind int + Path string +} + +// ImageHistory contains response of Remote API: +// GET "/images/{name:.*}/history" +type ImageHistory struct { + ID string `json:"Id"` + Created int64 + CreatedBy string + Tags []string + Size int64 + Comment string +} + +// ImageDelete contains response of Remote API: +// DELETE "/images/{name:.*}" +type ImageDelete struct { + Untagged string `json:",omitempty"` + Deleted string `json:",omitempty"` +} + +// Image contains response of Remote API: +// GET "/images/json" +type Image struct { + ID string `json:"Id"` + ParentID string `json:"ParentId"` + RepoTags []string + RepoDigests []string + Created int64 + Size int64 + VirtualSize int64 + Labels map[string]string +} + +// GraphDriverData returns Image's graph driver config info +// when calling inspect command +type GraphDriverData struct { + Name string + Data map[string]string +} + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Remote API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS +} + +// Port stores open ports info of container +// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"} +type Port struct { + IP string `json:",omitempty"` + PrivatePort int + PublicPort int `json:",omitempty"` + Type string +} + +// Container contains response of Remote API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Remote API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerProcessList contains response of Remote API: +// GET "/containers/{name:.*}/top" +type ContainerProcessList struct { + Processes [][]string + Titles []string +} + +// Version contains response of Remote API: +// GET "/version" +type Version struct { + Version string + APIVersion string `json:"ApiVersion"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Info contains response of Remote API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + ExecutionDriver string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string + SecurityOptions []string +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string +} + +// ContainerNode stores information about the node that a container +// is running on. It's only available in Docker Swarm +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int + Labels map[string]string +} + +// ContainerJSONBase contains response of Remote API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` + Name string + RestartCount int + Driver string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string + Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + IPAddress string + IPPrefixLen int + IPv6Gateway string + MacAddress string +} + +// MountPoint represents a mount point configuration inside the container. +type MountPoint struct { + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation string +} + +// Volume represents the configuration of a volume for the remote API +type Volume struct { + Name string // Name is the name of the volume + Driver string // Driver is the Driver name used to create the volume + Mountpoint string // Mountpoint is the location on disk of the volume + Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume + Labels map[string]string // Labels is metadata specific to the volume +} + +// VolumesListResponse contains the response for the remote API: +// GET "/volumes" +type VolumesListResponse struct { + Volumes []*Volume // Volumes is the list of volumes being returned + Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers +} + +// VolumeCreateRequest contains the response for the remote API: +// POST "/volumes/create" +type VolumeCreateRequest struct { + Name string // Name is the requested name of the volume + Driver string // Driver is the name of the driver that should be used to create the volume + DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume. + Labels map[string]string // Labels holds metadata specific to the volume being created. +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string + ID string `json:"Id"` + Scope string + Driver string + EnableIPv6 bool + IPAM network.IPAM + Internal bool + Containers map[string]EndpointResource + Options map[string]string + Labels map[string]string +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + CheckDuplicate bool + Driver string + EnableIPv6 bool + IPAM network.IPAM + Internal bool + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/README.md b/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/README.md new file mode 100644 index 000000000000..76c516e6a317 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/README.md @@ -0,0 +1,14 @@ +## Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +### Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains abount it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/compare.go b/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/compare.go new file mode 100644 index 000000000000..611d4fed66e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/engine-api/types/versions/compare.go @@ -0,0 +1,62 @@ +package versions + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/LICENSE b/Godeps/_workspace/src/github.com/docker/go-connections/LICENSE new file mode 100644 index 000000000000..b55b37bc3162 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/nat/nat.go b/Godeps/_workspace/src/github.com/docker/go-connections/nat/nat.go new file mode 100644 index 000000000000..3d469165ab52 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/nat/nat.go @@ -0,0 +1,223 @@ +// Package nat is a convenience package for manipulation of strings describing network ports. +package nat + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRangeToInt(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRangeToInt parses the port range string and returns start/end ints +func ParsePortRangeToInt(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + if len(portStr) == 0 { + return 0 + } + + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := strconv.ParseUint(portStr, 10, 16) + return int(port) +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRangeToInt(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + + for _, rawPort := range ports { + proto := "tcp" + + if i := strings.LastIndex(rawPort, "/"); i != -1 { + proto = rawPort[i+1:] + rawPort = rawPort[:i] + } + if !strings.Contains(rawPort, ":") { + rawPort = fmt.Sprintf("::%s", rawPort) + } else if len(strings.Split(rawPort, ":")) == 2 { + rawPort = fmt.Sprintf(":%s", rawPort) + } + + parts, err := PartParser(portSpecTemplate, rawPort) + if err != nil { + return nil, nil, err + } + + var ( + containerPort = parts["containerPort"] + rawIP = parts["ip"] + hostPort = parts["hostPort"] + ) + + if rawIP != "" && net.ParseIP(rawIP) == nil { + return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIP) + } + if containerPort == "" { + return nil, nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := ParsePortRange(containerPort) + if err != nil { + return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = ParsePortRange(hostPort) + if err != nil { + return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, nil, fmt.Errorf("Invalid proto: %s", proto) + } + + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, nil, err + } + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + + binding := PortBinding{ + HostIP: rawIP, + HostPort: hostPort, + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, binding) + } + } + return exposedPorts, bindings, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/nat/parse.go b/Godeps/_workspace/src/github.com/docker/go-connections/nat/parse.go new file mode 100644 index 000000000000..872050205f49 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/nat/parse.go @@ -0,0 +1,56 @@ +package nat + +import ( + "fmt" + "strconv" + "strings" +) + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/nat/sort.go b/Godeps/_workspace/src/github.com/docker/go-connections/nat/sort.go new file mode 100644 index 000000000000..ce950171e315 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/nat/sort.go @@ -0,0 +1,96 @@ +package nat + +import ( + "sort" + "strings" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/sockets/README.md b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/README.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/sockets/inmem_socket.go b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/inmem_socket.go new file mode 100644 index 000000000000..3395e40229df --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/inmem_socket.go @@ -0,0 +1,89 @@ +package sockets + +import ( + "errors" + "net" + "sync" +) + +var errClosed = errors.New("use of closed network connection") + +// InmemSocket implements net.Listener using in-memory only connections. +type InmemSocket struct { + chConn chan net.Conn + chClose chan struct{} + addr string + mu sync.Mutex +} + +// dummyAddr is used to satisfy net.Addr for the in-mem socket +// it is just stored as a string and returns the string for all calls +type dummyAddr string + +// NewInmemSocket creates an in-memory only net.Listener +// The addr argument can be any string, but is used to satisfy the `Addr()` part +// of the net.Listener interface +func NewInmemSocket(addr string, bufSize int) *InmemSocket { + return &InmemSocket{ + chConn: make(chan net.Conn, bufSize), + chClose: make(chan struct{}), + addr: addr, + } +} + +// Addr returns the socket's addr string to satisfy net.Listener +func (s *InmemSocket) Addr() net.Addr { + return dummyAddr(s.addr) +} + +// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. +func (s *InmemSocket) Accept() (net.Conn, error) { + select { + case conn := <-s.chConn: + return conn, nil + case <-s.chClose: + return nil, errClosed + } +} + +// Close closes the listener. It will be unavailable for use once closed. +func (s *InmemSocket) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + select { + case <-s.chClose: + default: + close(s.chClose) + } + return nil +} + +// Dial is used to establish a connection with the in-mem server +func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { + srvConn, clientConn := net.Pipe() + select { + case s.chConn <- srvConn: + case <-s.chClose: + return nil, errClosed + } + + return clientConn, nil +} + +// Network returns the addr string, satisfies net.Addr +func (a dummyAddr) Network() string { + return string(a) +} + +// String returns the string form +func (a dummyAddr) String() string { + return string(a) +} + +// timeoutError is used when there is a timeout with a connection +// this implements the net.Error interface +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/sockets/proxy.go b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/proxy.go new file mode 100644 index 000000000000..98e9a1dc61b5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/proxy.go @@ -0,0 +1,51 @@ +package sockets + +import ( + "net" + "net/url" + "os" + "strings" + + "golang.org/x/net/proxy" +) + +// GetProxyEnv allows access to the uppercase and the lowercase forms of +// proxy-related variables. See the Go specification for details on these +// variables. https://golang.org/pkg/net/http/ +func GetProxyEnv(key string) string { + proxyValue := os.Getenv(strings.ToUpper(key)) + if proxyValue == "" { + return os.Getenv(strings.ToLower(key)) + } + return proxyValue +} + +// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a +// proxy.Dialer which will route the connections through the proxy using the +// given dialer. +func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { + allProxy := GetProxyEnv("all_proxy") + if len(allProxy) == 0 { + return direct, nil + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return direct, err + } + + proxyFromURL, err := proxy.FromURL(proxyURL, direct) + if err != nil { + return direct, err + } + + noProxy := GetProxyEnv("no_proxy") + if len(noProxy) == 0 { + return proxyFromURL, nil + } + + perHost := proxy.NewPerHost(proxyFromURL, direct) + perHost.AddFromString(noProxy) + + return perHost, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/sockets/sockets.go b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/sockets.go new file mode 100644 index 000000000000..1739cecf2a56 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/sockets.go @@ -0,0 +1,42 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "net" + "net/http" + "time" +) + +// Why 32? See https://github.com/docker/docker/pull/8035. +const defaultTimeout = 32 * time.Second + +// ConfigureTransport configures the specified Transport according to the +// specified proto and addr. +// If the proto is unix (using a unix socket to communicate) or npipe the +// compression is disabled. +func ConfigureTransport(tr *http.Transport, proto, addr string) error { + switch proto { + case "unix": + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, defaultTimeout) + } + case "npipe": + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return DialPipe(addr, defaultTimeout) + } + default: + tr.Proxy = http.ProxyFromEnvironment + dialer, err := DialerFromEnvironment(&net.Dialer{ + Timeout: defaultTimeout, + }) + if err != nil { + return err + } + tr.Dial = dialer.Dial + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/sockets/sockets_unix.go b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/sockets_unix.go new file mode 100644 index 000000000000..b255ac9ac7af --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/sockets_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package sockets + +import ( + "net" + "syscall" + "time" +) + +// DialPipe connects to a Windows named pipe. +// This is not supported on other OSes. +func DialPipe(_ string, _ time.Duration) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/sockets/sockets_windows.go b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/sockets_windows.go new file mode 100644 index 000000000000..1f3540b2fe87 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/sockets_windows.go @@ -0,0 +1,13 @@ +package sockets + +import ( + "net" + "time" + + "github.com/Microsoft/go-winio" +) + +// DialPipe connects to a Windows named pipe. +func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { + return winio.DialPipe(addr, &timeout) +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/sockets/tcp_socket.go b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/tcp_socket.go new file mode 100644 index 000000000000..8a82727df00a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/tcp_socket.go @@ -0,0 +1,22 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "crypto/tls" + "net" +) + +// NewTCPSocket creates a TCP socket listener with the specified address and +// and the specified tls configuration. If TLSConfig is set, will encapsulate the +// TCP listener inside a TLS one. +func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + if tlsConfig != nil { + tlsConfig.NextProtos = []string{"http/1.1"} + l = tls.NewListener(l, tlsConfig) + } + return l, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/sockets/unix_socket.go b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/unix_socket.go new file mode 100644 index 000000000000..d1627349f826 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/sockets/unix_socket.go @@ -0,0 +1,80 @@ +// +build linux freebsd solaris + +package sockets + +import ( + "fmt" + "net" + "os" + "strconv" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/user" +) + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path, group string) (net.Listener, error) { + if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { + return nil, err + } + mask := syscall.Umask(0777) + defer syscall.Umask(mask) + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + if err := setSocketGroup(path, group); err != nil { + l.Close() + return nil, err + } + if err := os.Chmod(path, 0660); err != nil { + l.Close() + return nil, err + } + return l, nil +} + +func setSocketGroup(path, group string) error { + if group == "" { + return nil + } + if err := changeGroup(path, group); err != nil { + if group != "docker" { + return err + } + logrus.Debugf("Warning: could not change group %s to docker: %v", path, err) + } + return nil +} + +func changeGroup(path string, nameOrGid string) error { + gid, err := lookupGidByName(nameOrGid) + if err != nil { + return err + } + logrus.Debugf("%s group found. gid: %d", nameOrGid, gid) + return os.Chown(path, 0, gid) +} + +func lookupGidByName(nameOrGid string) (int, error) { + groupFile, err := user.GetGroupPath() + if err != nil { + return -1, err + } + groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { + return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid + }) + if err != nil { + return -1, err + } + if groups != nil && len(groups) > 0 { + return groups[0].Gid, nil + } + gid, err := strconv.Atoi(nameOrGid) + if err == nil { + logrus.Warnf("Could not find GID %d", gid) + return gid, nil + } + return -1, fmt.Errorf("Group %s not found", nameOrGid) +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config.go b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config.go new file mode 100644 index 000000000000..1ba04395e2ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config.go @@ -0,0 +1,122 @@ +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +// As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. +// A Config may be reused; the tls package will also not modify it. +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// Options represents the information needed to create client and server TLS configurations. +type Options struct { + CAFile string + + // If either CertFile or KeyFile is empty, Client() will not load them + // preventing the client from authenticating to the server. + // However, Server() requires them and will error out if they are empty. + CertFile string + KeyFile string + + // client-only option + InsecureSkipVerify bool + // server-only option + ClientAuth tls.ClientAuthType +} + +// Extra (server-side) accepted CBC cipher suites - will phase out in the future +var acceptedCBCCiphers = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, +} + +// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls +// options struct but wants to use a commonly accepted set of TLS cipher suites, with +// known weak algorithms removed. +var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) + +// ServerDefault is a secure-enough TLS configuration for the server TLS configuration. +var ServerDefault = tls.Config{ + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, +} + +// ClientDefault is a secure-enough TLS configuration for the client TLS configuration. +var ClientDefault = tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, +} + +// certPool returns an X.509 certificate pool from `caFile`, the certificate file. +func certPool(caFile string) (*x509.CertPool, error) { + // If we should verify the server, we need to load a trusted ca + certPool := x509.NewCertPool() + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) + } + if !certPool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) + } + logrus.Debugf("Trusting %d certs", len(certPool.Subjects())) + return certPool, nil +} + +// Client returns a TLS configuration meant to be used by a client. +func Client(options Options) (*tls.Config, error) { + tlsConfig := ClientDefault + tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify + if !options.InsecureSkipVerify && options.CAFile != "" { + CAs, err := certPool(options.CAFile) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = CAs + } + + if options.CertFile != "" || options.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + } + + return &tlsConfig, nil +} + +// Server returns a TLS configuration meant to be used by a server. +func Server(options Options) (*tls.Config, error) { + tlsConfig := ServerDefault + tlsConfig.ClientAuth = options.ClientAuth + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + } + return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + if options.ClientAuth >= tls.VerifyClientCertIfGiven { + CAs, err := certPool(options.CAFile) + if err != nil { + return nil, err + } + tlsConfig.ClientCAs = CAs + } + return &tlsConfig, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go new file mode 100644 index 000000000000..6b4c6a7c0d06 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -0,0 +1,17 @@ +// +build go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go new file mode 100644 index 000000000000..ee22df47cb29 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go @@ -0,0 +1,15 @@ +// +build !go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/LICENSE b/Godeps/_workspace/src/github.com/gogo/protobuf/LICENSE new file mode 100644 index 000000000000..335e38e19b97 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,36 @@ +Extensions for Protocol Buffers to create more go like structures. + +Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +http://github.com/gogo/protobuf/gogoproto + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/Makefile b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 000000000000..557f29493308 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,36 @@ +# Extensions for Protocol Buffers to create more go like structures. +# +# Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +# http://github.com/gogo/protobuf/gogoproto +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/doc.go b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 000000000000..f0424d4f8ae6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,168 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independant of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 000000000000..760e4e61d86f --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,530 @@ +// Code generated by protoc-gen-gogo. +// source: gogo.proto +// DO NOT EDIT! + +/* +Package gogoproto is a generated protocol buffer package. + +It is generated from these files: + gogo.proto + +It has these top-level messages: +*/ +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 000000000000..f6502e4b9015 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.proto b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 000000000000..3373faf31976 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,113 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + + optional bool protosizer_all = 63028; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; +} + diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/helper.go b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 000000000000..b5a18538447a --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,253 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + if IsProto3(file) { + return false + } + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/defaultcheck/defaultcheck.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/defaultcheck/defaultcheck.go new file mode 100644 index 000000000000..690ad0df3c84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/defaultcheck/defaultcheck.go @@ -0,0 +1,131 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The defaultcheck plugin is used to check whether nullable is not used incorrectly. +For instance: +An error is caused if a nullable field: + - has a default value, + - is an enum which does not start at zero, + - is used for an extension, + - is used for a native proto3 type, + - is used for a repeated native type. + +An error is also caused if a field with a default value is used in a message: + - which is a face. + - without getters. + +It is enabled by the following extensions: + + - nullable + +For incorrect usage of nullable with tests see: + + github.com/gogo/protobuf/test/nullableconflict + +*/ +package defaultcheck + +import ( + "fmt" + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "os" +) + +type plugin struct { + *generator.Generator +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "defaultcheck" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + for _, msg := range file.Messages() { + getters := gogoproto.HasGoGetters(file.FileDescriptorProto, msg.DescriptorProto) + face := gogoproto.IsFace(file.FileDescriptorProto, msg.DescriptorProto) + for _, field := range msg.GetField() { + if len(field.GetDefaultValue()) > 0 { + if !getters { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot have a default value and not have a getter method", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + if face { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot have a default value be in a face", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + } + if gogoproto.IsNullable(field) { + continue + } + if len(field.GetDefaultValue()) > 0 { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot be non-nullable and have a default value", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + if !field.IsMessage() && !gogoproto.IsCustomType(field) { + if field.IsRepeated() { + fmt.Fprintf(os.Stderr, "WARNING: field %v.%v is a repeated non-nullable native type, nullable=false has no effect\n", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + } else if proto3 { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v is a native type and in proto3 syntax with nullable=false there exists conflicting implementations when encoding zero values", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + if field.IsBytes() { + fmt.Fprintf(os.Stderr, "WARNING: field %v.%v is a non-nullable bytes type, nullable=false has no effect\n", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + } + } + if !field.IsEnum() { + continue + } + enum := p.ObjectNamed(field.GetTypeName()).(*generator.EnumDescriptor) + if len(enum.Value) == 0 || enum.Value[0].GetNumber() != 0 { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot be non-nullable and be an enum type %v which does not start with zero", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name), enum.GetName()) + os.Exit(1) + } + } + } + for _, e := range file.GetExtension() { + if !gogoproto.IsNullable(e) { + fmt.Fprintf(os.Stderr, "ERROR: extended field %v cannot be nullable %v", generator.CamelCase(e.GetName()), generator.CamelCase(*e.Name)) + os.Exit(1) + } + } +} + +func (p *plugin) GenerateImports(*generator.FileDescriptor) {} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/description/description.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/description/description.go new file mode 100644 index 000000000000..002e2ebc5cf8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/description/description.go @@ -0,0 +1,148 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The description (experimental) plugin generates a Description method for each message. +The Description method returns a populated google_protobuf.FileDescriptorSet struct. +This contains the description of the files used to generate this message. + +It is enabled by the following extensions: + + - description + - description_all + +The description plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + message B { + option (gogoproto.description) = true; + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +given to the description plugin, will generate the following code: + + func (this *B) Description() (desc *google_protobuf.FileDescriptorSet) { + return ExampleDescription() + } + +and the following test code: + + func TestDescription(t *testing9.T) { + ExampleDescription() + } + +The hope is to use this struct in some way instead of reflect. +This package is subject to change, since a use has not been figured out yet. + +*/ +package description + +import ( + "fmt" + "github.com/gogo/protobuf/gogoproto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type plugin struct { + *generator.Generator + used bool +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "description" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + p.used = false + localName := generator.FileName(file) + for _, message := range file.Messages() { + if !gogoproto.HasDescription(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + p.used = true + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P(`func (this *`, ccTypeName, `) Description() (desc *descriptor.FileDescriptorSet) {`) + p.In() + p.P(`return `, localName, `Description()`) + p.Out() + p.P(`}`) + } + + if p.used { + + p.P(`func `, localName, `Description() (desc *descriptor.FileDescriptorSet) {`) + p.In() + //Don't generate SourceCodeInfo, since it will create too much code. + + ss := make([]*descriptor.SourceCodeInfo, 0) + for _, f := range p.Generator.AllFiles().GetFile() { + ss = append(ss, f.SourceCodeInfo) + f.SourceCodeInfo = nil + } + s := fmt.Sprintf("%#v", p.Generator.AllFiles()) + for i, f := range p.Generator.AllFiles().GetFile() { + f.SourceCodeInfo = ss[i] + } + p.P(`return `, s) + p.Out() + p.P(`}`) + } +} + +func (this *plugin) GenerateImports(file *generator.FileDescriptor) { + if this.used { + this.P(`import "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"`) + } +} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/description/descriptiontest.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/description/descriptiontest.go new file mode 100644 index 000000000000..7feaf3209177 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/description/descriptiontest.go @@ -0,0 +1,71 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package description + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + testingPkg := imports.NewImport("testing") + for _, message := range file.Messages() { + if !gogoproto.HasDescription(file.FileDescriptorProto, message.DescriptorProto) || + !gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + used = true + } + + if used { + localName := generator.FileName(file) + p.P(`func Test`, localName, `Description(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(localName, `Description()`) + p.Out() + p.P(`}`) + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go new file mode 100644 index 000000000000..af8fd96811b2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go @@ -0,0 +1,197 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The embedcheck plugin is used to check whether embed is not used incorrectly. +For instance: +An embedded message has a generated string method, but the is a member of a message which does not. +This causes a warning. +An error is caused by a namespace conflict. + +It is enabled by the following extensions: + + - embed + - embed_all + +For incorrect usage of embed with tests see: + + github.com/gogo/protobuf/test/embedconflict + +*/ +package embedcheck + +import ( + "fmt" + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "os" +) + +type plugin struct { + *generator.Generator +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "embedcheck" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +var overwriters []map[string]gogoproto.EnableFunc = []map[string]gogoproto.EnableFunc{ + { + "stringer": gogoproto.IsStringer, + }, + { + "gostring": gogoproto.HasGoString, + }, + { + "equal": gogoproto.HasEqual, + }, + { + "verboseequal": gogoproto.HasVerboseEqual, + }, + { + "size": gogoproto.IsSizer, + "protosizer": gogoproto.IsProtoSizer, + }, + { + "unmarshaler": gogoproto.IsUnmarshaler, + "unsafe_unmarshaler": gogoproto.IsUnsafeUnmarshaler, + }, + { + "marshaler": gogoproto.IsMarshaler, + "unsafe_marshaler": gogoproto.IsUnsafeMarshaler, + }, +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + for _, msg := range file.Messages() { + for _, os := range overwriters { + possible := true + for _, overwriter := range os { + if overwriter(file.FileDescriptorProto, msg.DescriptorProto) { + possible = false + } + } + if possible { + p.checkOverwrite(msg, os) + } + } + p.checkNameSpace(msg) + for _, field := range msg.GetField() { + if gogoproto.IsEmbed(field) && gogoproto.IsCustomName(field) { + fmt.Fprintf(os.Stderr, "ERROR: field %v with custom name %v cannot be embedded", *field.Name, gogoproto.GetCustomName(field)) + os.Exit(1) + } + } + p.checkRepeated(msg) + } + for _, e := range file.GetExtension() { + if gogoproto.IsEmbed(e) { + fmt.Fprintf(os.Stderr, "ERROR: extended field %v cannot be embedded", generator.CamelCase(*e.Name)) + os.Exit(1) + } + } +} + +func (p *plugin) checkNameSpace(message *generator.Descriptor) map[string]bool { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + names := make(map[string]bool) + for _, field := range message.Field { + fieldname := generator.CamelCase(*field.Name) + if field.IsMessage() && gogoproto.IsEmbed(field) { + desc := p.ObjectNamed(field.GetTypeName()) + moreNames := p.checkNameSpace(desc.(*generator.Descriptor)) + for another := range moreNames { + if names[another] { + fmt.Fprintf(os.Stderr, "ERROR: duplicate embedded fieldname %v in type %v\n", fieldname, ccTypeName) + os.Exit(1) + } + names[another] = true + } + } else { + if names[fieldname] { + fmt.Fprintf(os.Stderr, "ERROR: duplicate embedded fieldname %v in type %v\n", fieldname, ccTypeName) + os.Exit(1) + } + names[fieldname] = true + } + } + return names +} + +func (p *plugin) checkOverwrite(message *generator.Descriptor, enablers map[string]gogoproto.EnableFunc) { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + names := []string{} + for name := range enablers { + names = append(names, name) + } + for _, field := range message.Field { + if field.IsMessage() && gogoproto.IsEmbed(field) { + fieldname := generator.CamelCase(*field.Name) + desc := p.ObjectNamed(field.GetTypeName()) + msg := desc.(*generator.Descriptor) + for errStr, enabled := range enablers { + if enabled(msg.File(), msg.DescriptorProto) { + fmt.Fprintf(os.Stderr, "WARNING: found non-%v %v with embedded %v %v\n", names, ccTypeName, errStr, fieldname) + } + } + p.checkOverwrite(msg, enablers) + } + } +} + +func (p *plugin) checkRepeated(message *generator.Descriptor) { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + for _, field := range message.Field { + if !gogoproto.IsEmbed(field) { + continue + } + if field.IsBytes() { + fieldname := generator.CamelCase(*field.Name) + fmt.Fprintf(os.Stderr, "ERROR: found embedded bytes field %s in message %s\n", fieldname, ccTypeName) + os.Exit(1) + } + if !field.IsRepeated() { + continue + } + fieldname := generator.CamelCase(*field.Name) + fmt.Fprintf(os.Stderr, "ERROR: found repeated embedded field %s in message %s\n", fieldname, ccTypeName) + os.Exit(1) + } +} + +func (p *plugin) GenerateImports(*generator.FileDescriptor) {} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/enumstringer/enumstringer.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/enumstringer/enumstringer.go new file mode 100644 index 000000000000..7feb8be18fab --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/enumstringer/enumstringer.go @@ -0,0 +1,102 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The enumstringer (experimental) plugin generates a String method for each enum. + +It is enabled by the following extensions: + + - enum_stringer + - enum_stringer_all + +This package is subject to change. + +*/ +package enumstringer + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type enumstringer struct { + *generator.Generator + generator.PluginImports + atleastOne bool + localName string +} + +func NewEnumStringer() *enumstringer { + return &enumstringer{} +} + +func (p *enumstringer) Name() string { + return "enumstringer" +} + +func (p *enumstringer) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *enumstringer) Generate(file *generator.FileDescriptor) { + p.PluginImports = generator.NewPluginImports(p.Generator) + p.atleastOne = false + + p.localName = generator.FileName(file) + + strconvPkg := p.NewImport("strconv") + + for _, enum := range file.Enums() { + if !gogoproto.IsEnumStringer(file.FileDescriptorProto, enum.EnumDescriptorProto) { + continue + } + if gogoproto.IsGoEnumStringer(file.FileDescriptorProto, enum.EnumDescriptorProto) { + panic("old enum string method needs to be disabled, please use gogoproto.old_enum_stringer or gogoproto.old_enum_string_all and set it to false") + } + p.atleastOne = true + ccTypeName := generator.CamelCaseSlice(enum.TypeName()) + p.P("func (x ", ccTypeName, ") String() string {") + p.In() + p.P(`s, ok := `, ccTypeName, `_name[int32(x)]`) + p.P(`if ok {`) + p.In() + p.P(`return s`) + p.Out() + p.P(`}`) + p.P(`return `, strconvPkg.Use(), `.Itoa(int(x))`) + p.Out() + p.P(`}`) + } + + if !p.atleastOne { + return + } + +} + +func init() { + generator.RegisterPlugin(NewEnumStringer()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/equal/equal.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/equal/equal.go new file mode 100644 index 000000000000..8c7cd6beeb80 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/equal/equal.go @@ -0,0 +1,602 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The equal plugin generates an Equal and a VerboseEqual method for each message. +These equal methods are quite obvious. +The only difference is that VerboseEqual returns a non nil error if it is not equal. +This error contains more detail on exactly which part of the message was not equal to the other message. +The idea is that this is useful for debugging. + +Equal is enabled using the following extensions: + + - equal + - equal_all + +While VerboseEqual is enable dusing the following extensions: + + - verbose_equal + - verbose_equal_all + +The equal plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.equal_all) = true; + option (gogoproto.verbose_equal_all) = true; + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +given to the equal plugin, will generate the following code: + + func (this *B) VerboseEqual(that interface{}) error { + if that == nil { + if this == nil { + return nil + } + return fmt2.Errorf("that == nil && this != nil") + } + + that1, ok := that.(*B) + if !ok { + return fmt2.Errorf("that is not of type *B") + } + if that1 == nil { + if this == nil { + return nil + } + return fmt2.Errorf("that is type *B but is nil && this != nil") + } else if this == nil { + return fmt2.Errorf("that is type *B but is not nil && this == nil") + } + if !this.A.Equal(&that1.A) { + return fmt2.Errorf("A this(%v) Not Equal that(%v)", this.A, that1.A) + } + if len(this.G) != len(that1.G) { + return fmt2.Errorf("G this(%v) Not Equal that(%v)", len(this.G), len(that1.G)) + } + for i := range this.G { + if !this.G[i].Equal(that1.G[i]) { + return fmt2.Errorf("G this[%v](%v) Not Equal that[%v](%v)", i, this.G[i], i, that1.G[i]) + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return fmt2.Errorf("XXX_unrecognized this(%v) Not Equal that(%v)", this.XXX_unrecognized, that1.XXX_unrecognized) + } + return nil + } + + func (this *B) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*B) + if !ok { + return false + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.A.Equal(&that1.A) { + return false + } + if len(this.G) != len(that1.G) { + return false + } + for i := range this.G { + if !this.G[i].Equal(that1.G[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true + } + +and the following test code: + + func TestBVerboseEqual(t *testing8.T) { + popr := math_rand8.New(math_rand8.NewSource(time8.Now().UnixNano())) + p := NewPopulatedB(popr, false) + data, err := github_com_gogo_protobuf_proto2.Marshal(p) + if err != nil { + panic(err) + } + msg := &B{} + if err := github_com_gogo_protobuf_proto2.Unmarshal(data, msg); err != nil { + panic(err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err) + } + +*/ +package equal + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "github.com/gogo/protobuf/vanity" +) + +type plugin struct { + *generator.Generator + generator.PluginImports + fmtPkg generator.Single + bytesPkg generator.Single +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "equal" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + p.PluginImports = generator.NewPluginImports(p.Generator) + p.fmtPkg = p.NewImport("fmt") + p.bytesPkg = p.NewImport("bytes") + + for _, msg := range file.Messages() { + if msg.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + if gogoproto.HasVerboseEqual(file.FileDescriptorProto, msg.DescriptorProto) { + p.generateMessage(file, msg, true) + } + if gogoproto.HasEqual(file.FileDescriptorProto, msg.DescriptorProto) { + p.generateMessage(file, msg, false) + } + } +} + +func (p *plugin) generateNullableField(fieldname string, verbose bool) { + p.P(`if this.`, fieldname, ` != nil && that1.`, fieldname, ` != nil {`) + p.In() + p.P(`if *this.`, fieldname, ` != *that1.`, fieldname, `{`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", *this.`, fieldname, `, *that1.`, fieldname, `)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`} else if this.`, fieldname, ` != nil {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("this.`, fieldname, ` == nil && that.`, fieldname, ` != nil")`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`} else if that1.`, fieldname, ` != nil {`) +} + +func (p *plugin) generateMsgNullAndTypeCheck(ccTypeName string, verbose bool) { + p.P(`if that == nil {`) + p.In() + p.P(`if this == nil {`) + p.In() + if verbose { + p.P(`return nil`) + } else { + p.P(`return true`) + } + p.Out() + p.P(`}`) + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("that == nil && this != nil")`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.P(``) + p.P(`that1, ok := that.(*`, ccTypeName, `)`) + p.P(`if !ok {`) + p.In() + p.P(`that2, ok := that.(`, ccTypeName, `)`) + p.P(`if ok {`) + p.In() + p.P(`that1 = &that2`) + p.Out() + p.P(`} else {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("that is not of type *`, ccTypeName, `")`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P(`if that1 == nil {`) + p.In() + p.P(`if this == nil {`) + p.In() + if verbose { + p.P(`return nil`) + } else { + p.P(`return true`) + } + p.Out() + p.P(`}`) + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("that is type *`, ccTypeName, ` but is nil && this != nil")`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`} else if this == nil {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("that is type *`, ccTypeName, ` but is not nil && this == nil")`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) +} + +func (p *plugin) generateField(file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto, verbose bool) { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + fieldname := p.GetOneOfFieldName(message, field) + repeated := field.IsRepeated() + ctype := gogoproto.IsCustomType(field) + nullable := gogoproto.IsNullable(field) + // oneof := field.OneofIndex != nil + if !repeated { + if ctype { + if nullable { + p.P(`if that1.`, fieldname, ` == nil {`) + p.In() + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("this.`, fieldname, ` != nil && that1.`, fieldname, ` == nil")`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`} else if !this.`, fieldname, `.Equal(*that1.`, fieldname, `) {`) + } else { + p.P(`if !this.`, fieldname, `.Equal(that1.`, fieldname, `) {`) + } + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", this.`, fieldname, `, that1.`, fieldname, `)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + } else { + if field.IsMessage() || p.IsGroup(field) { + if nullable { + p.P(`if !this.`, fieldname, `.Equal(that1.`, fieldname, `) {`) + } else { + p.P(`if !this.`, fieldname, `.Equal(&that1.`, fieldname, `) {`) + } + } else if field.IsBytes() { + p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `, that1.`, fieldname, `) {`) + } else if field.IsString() { + if nullable && !proto3 { + p.generateNullableField(fieldname, verbose) + } else { + p.P(`if this.`, fieldname, ` != that1.`, fieldname, `{`) + } + } else { + if nullable && !proto3 { + p.generateNullableField(fieldname, verbose) + } else { + p.P(`if this.`, fieldname, ` != that1.`, fieldname, `{`) + } + } + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", this.`, fieldname, `, that1.`, fieldname, `)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + } + } else { + p.P(`if len(this.`, fieldname, `) != len(that1.`, fieldname, `) {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", len(this.`, fieldname, `), len(that1.`, fieldname, `))`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.P(`for i := range this.`, fieldname, ` {`) + p.In() + if ctype { + p.P(`if !this.`, fieldname, `[i].Equal(that1.`, fieldname, `[i]) {`) + } else { + if p.IsMap(field) { + m := p.GoMapType(nil, field) + valuegoTyp, _ := p.GoType(nil, m.ValueField) + valuegoAliasTyp, _ := p.GoType(nil, m.ValueAliasField) + nullable, valuegoTyp, valuegoAliasTyp = generator.GoMapValueTypes(field, m.ValueField, valuegoTyp, valuegoAliasTyp) + + mapValue := m.ValueAliasField + if mapValue.IsMessage() || p.IsGroup(mapValue) { + if nullable && valuegoTyp == valuegoAliasTyp { + p.P(`if !this.`, fieldname, `[i].Equal(that1.`, fieldname, `[i]) {`) + } else { + // Equal() has a pointer receiver, but map value is a value type + a := `this.` + fieldname + `[i]` + b := `that1.` + fieldname + `[i]` + if valuegoTyp != valuegoAliasTyp { + // cast back to the type that has the generated methods on it + a = `(` + valuegoTyp + `)(` + a + `)` + b = `(` + valuegoTyp + `)(` + b + `)` + } + p.P(`a := `, a) + p.P(`b := `, b) + if nullable { + p.P(`if !a.Equal(b) {`) + } else { + p.P(`if !(&a).Equal(&b) {`) + } + } + } else if mapValue.IsBytes() { + p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `[i], that1.`, fieldname, `[i]) {`) + } else if mapValue.IsString() { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + } else { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + } + } else if field.IsMessage() || p.IsGroup(field) { + if nullable { + p.P(`if !this.`, fieldname, `[i].Equal(that1.`, fieldname, `[i]) {`) + } else { + p.P(`if !this.`, fieldname, `[i].Equal(&that1.`, fieldname, `[i]) {`) + } + } else if field.IsBytes() { + p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `[i], that1.`, fieldname, `[i]) {`) + } else if field.IsString() { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + } else { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + } + } + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this[%v](%v) Not Equal that[%v](%v)", i, this.`, fieldname, `[i], i, that1.`, fieldname, `[i])`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } +} + +func (p *plugin) generateMessage(file *generator.FileDescriptor, message *generator.Descriptor, verbose bool) { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if verbose { + p.P(`func (this *`, ccTypeName, `) VerboseEqual(that interface{}) error {`) + } else { + p.P(`func (this *`, ccTypeName, `) Equal(that interface{}) bool {`) + } + p.In() + p.generateMsgNullAndTypeCheck(ccTypeName, verbose) + oneofs := make(map[string]struct{}) + + for _, field := range message.Field { + oneof := field.OneofIndex != nil + if oneof { + fieldname := p.GetFieldName(message, field) + if _, ok := oneofs[fieldname]; ok { + continue + } else { + oneofs[fieldname] = struct{}{} + } + p.P(`if that1.`, fieldname, ` == nil {`) + p.In() + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("this.`, fieldname, ` != nil && that1.`, fieldname, ` == nil")`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`} else if this.`, fieldname, ` == nil {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("this.`, fieldname, ` == nil && that1.`, fieldname, ` != nil")`) + } else { + p.P(`return false`) + } + p.Out() + if verbose { + p.P(`} else if err := this.`, fieldname, `.VerboseEqual(that1.`, fieldname, `); err != nil {`) + } else { + p.P(`} else if !this.`, fieldname, `.Equal(that1.`, fieldname, `) {`) + } + p.In() + if verbose { + p.P(`return err`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + } else { + p.generateField(file, message, field, verbose) + } + } + if message.DescriptorProto.HasExtension() { + fieldname := "XXX_extensions" + if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`for k, v := range this.`, fieldname, ` {`) + p.In() + p.P(`if v2, ok := that1.`, fieldname, `[k]; ok {`) + p.In() + p.P(`if !v.Equal(&v2) {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this[%v](%v) Not Equal that[%v](%v)", k, this.`, fieldname, `[k], k, that1.`, fieldname, `[k])`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`} else {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, `[%v] Not In that", k)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + + p.P(`for k, _ := range that1.`, fieldname, ` {`) + p.In() + p.P(`if _, ok := this.`, fieldname, `[k]; !ok {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, `[%v] Not In this", k)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } else { + p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `, that1.`, fieldname, `) {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", this.`, fieldname, `, that1.`, fieldname, `)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + } + } + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + fieldname := "XXX_unrecognized" + p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `, that1.`, fieldname, `) {`) + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", this.`, fieldname, `, that1.`, fieldname, `)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) + } + if verbose { + p.P(`return nil`) + } else { + p.P(`return true`) + } + p.Out() + p.P(`}`) + + //Generate Equal methods for oneof fields + m := proto.Clone(message.DescriptorProto).(*descriptor.DescriptorProto) + for _, field := range m.Field { + oneof := field.OneofIndex != nil + if !oneof { + continue + } + ccTypeName := p.OneOfTypeName(message, field) + if verbose { + p.P(`func (this *`, ccTypeName, `) VerboseEqual(that interface{}) error {`) + } else { + p.P(`func (this *`, ccTypeName, `) Equal(that interface{}) bool {`) + } + p.In() + + p.generateMsgNullAndTypeCheck(ccTypeName, verbose) + vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly(field) + p.generateField(file, message, field, verbose) + + if verbose { + p.P(`return nil`) + } else { + p.P(`return true`) + } + p.Out() + p.P(`}`) + } +} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/equal/equaltest.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/equal/equaltest.go new file mode 100644 index 000000000000..2fec835515e2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/equal/equaltest.go @@ -0,0 +1,94 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package equal + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + testingPkg := imports.NewImport("testing") + protoPkg := imports.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = imports.NewImport("github.com/golang/protobuf/proto") + } + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if !gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + p.P(`func Test`, ccTypeName, `VerboseEqual(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) + p.P(`data, err := `, protoPkg.Use(), `.Marshal(p)`) + p.P(`if err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(data, msg); err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.P(`if err := p.VerboseEqual(msg); err != nil {`) + p.In() + p.P(`t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/face/face.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/face/face.go new file mode 100644 index 000000000000..06529a30f114 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/face/face.go @@ -0,0 +1,231 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The face plugin generates a function will be generated which can convert a structure which satisfies an interface (face) to the specified structure. +This interface contains getters for each of the fields in the struct. +The specified struct is also generated with the getters. +This means that getters should be turned off so as not to conflict with face getters. +This allows it to satisfy its own face. + +It is enabled by the following extensions: + + - face + - face_all + +Turn off getters by using the following extensions: + + - getters + - getters_all + +The face plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + message A { + option (gogoproto.face) = true; + option (gogoproto.goproto_getters) = false; + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +given to the face plugin, will generate the following code: + + type AFace interface { + Proto() github_com_gogo_protobuf_proto.Message + GetDescription() string + GetNumber() int64 + GetId() github_com_gogo_protobuf_test_custom.Uuid + } + + func (this *A) Proto() github_com_gogo_protobuf_proto.Message { + return this + } + + func (this *A) TestProto() github_com_gogo_protobuf_proto.Message { + return NewAFromFace(this) + } + + func (this *A) GetDescription() string { + return this.Description + } + + func (this *A) GetNumber() int64 { + return this.Number + } + + func (this *A) GetId() github_com_gogo_protobuf_test_custom.Uuid { + return this.Id + } + + func NewAFromFace(that AFace) *A { + this := &A{} + this.Description = that.GetDescription() + this.Number = that.GetNumber() + this.Id = that.GetId() + return this + } + +and the following test code: + + func TestAFace(t *testing7.T) { + popr := math_rand7.New(math_rand7.NewSource(time7.Now().UnixNano())) + p := NewPopulatedA(popr, true) + msg := p.TestProto() + if !p.Equal(msg) { + t.Fatalf("%#v !Face Equal %#v", msg, p) + } + } + +The struct A, representing the message, will also be generated just like always. +As you can see A satisfies its own Face, AFace. + +Creating another struct which satisfies AFace is very easy. +Simply create all these methods specified in AFace. +Implementing The Proto method is done with the helper function NewAFromFace: + + func (this *MyStruct) Proto() proto.Message { + return NewAFromFace(this) + } + +just the like TestProto method which is used to test the NewAFromFace function. + +*/ +package face + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type plugin struct { + *generator.Generator + generator.PluginImports +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "face" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + p.PluginImports = generator.NewPluginImports(p.Generator) + protoPkg := p.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = p.NewImport("github.com/golang/protobuf/proto") + } + for _, message := range file.Messages() { + if !gogoproto.IsFace(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + if message.DescriptorProto.HasExtension() { + panic("face does not support message with extensions") + } + if gogoproto.HasGoGetters(file.FileDescriptorProto, message.DescriptorProto) { + panic("face requires getters to be disabled please use gogoproto.getters or gogoproto.getters_all and set it to false") + } + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P(`type `, ccTypeName, `Face interface{`) + p.In() + p.P(`Proto() `, protoPkg.Use(), `.Message`) + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + goTyp, _ := p.GoType(message, field) + if p.IsMap(field) { + m := p.GoMapType(nil, field) + goTyp = m.GoType + } + p.P(`Get`, fieldname, `() `, goTyp) + } + p.Out() + p.P(`}`) + p.P(``) + p.P(`func (this *`, ccTypeName, `) Proto() `, protoPkg.Use(), `.Message {`) + p.In() + p.P(`return this`) + p.Out() + p.P(`}`) + p.P(``) + p.P(`func (this *`, ccTypeName, `) TestProto() `, protoPkg.Use(), `.Message {`) + p.In() + p.P(`return New`, ccTypeName, `FromFace(this)`) + p.Out() + p.P(`}`) + p.P(``) + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + goTyp, _ := p.GoType(message, field) + if generator.IsMap(file.FileDescriptorProto, field) { + m := p.GoMapType(nil, field) + goTyp = m.GoType + } + p.P(`func (this *`, ccTypeName, `) Get`, fieldname, `() `, goTyp, `{`) + p.In() + p.P(` return this.`, fieldname) + p.Out() + p.P(`}`) + p.P(``) + } + p.P(``) + p.P(`func New`, ccTypeName, `FromFace(that `, ccTypeName, `Face) *`, ccTypeName, ` {`) + p.In() + p.P(`this := &`, ccTypeName, `{}`) + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + p.P(`this.`, fieldname, ` = that.Get`, fieldname, `()`) + } + p.P(`return this`) + p.Out() + p.P(`}`) + p.P(``) + } +} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/face/facetest.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/face/facetest.go new file mode 100644 index 000000000000..305e092ef753 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/face/facetest.go @@ -0,0 +1,80 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package face + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + testingPkg := imports.NewImport("testing") + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if !gogoproto.IsFace(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + + p.P(`func Test`, ccTypeName, `Face(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) + p.P(`msg := p.TestProto()`) + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("%#v !Face Equal %#v", msg, p)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/gostring/gostring.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/gostring/gostring.go new file mode 100644 index 000000000000..e13870ee7d18 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/gostring/gostring.go @@ -0,0 +1,360 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The gostring plugin generates a GoString method for each message. +The GoString method is called whenever you use a fmt.Printf as such: + + fmt.Printf("%#v", mymessage) + +or whenever you actually call GoString() +The output produced by the GoString method can be copied from the output into code and used to set a variable. +It is totally valid Go Code and is populated exactly as the struct that was printed out. + +It is enabled by the following extensions: + + - gostring + - gostring_all + +The gostring plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.gostring_all) = true; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +given to the gostring plugin, will generate the following code: + + func (this *A) GoString() string { + if this == nil { + return "nil" + } + s := strings1.Join([]string{`&test.A{` + `Description:` + fmt1.Sprintf("%#v", this.Description), `Number:` + fmt1.Sprintf("%#v", this.Number), `Id:` + fmt1.Sprintf("%#v", this.Id), `XXX_unrecognized:` + fmt1.Sprintf("%#v", this.XXX_unrecognized) + `}`}, ", ") + return s + } + +and the following test code: + + func TestAGoString(t *testing6.T) { + popr := math_rand6.New(math_rand6.NewSource(time6.Now().UnixNano())) + p := NewPopulatedA(popr, false) + s1 := p.GoString() + s2 := fmt2.Sprintf("%#v", p) + if s1 != s2 { + t.Fatalf("GoString want %v got %v", s1, s2) + } + _, err := go_parser.ParseExpr(s1) + if err != nil { + panic(err) + } + } + +Typically fmt.Printf("%#v") will stop to print when it reaches a pointer and +not print their values, while the generated GoString method will always print all values, recursively. + +*/ +package gostring + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "strconv" + "strings" +) + +type gostring struct { + *generator.Generator + generator.PluginImports + atleastOne bool + localName string +} + +func NewGoString() *gostring { + return &gostring{} +} + +func (p *gostring) Name() string { + return "gostring" +} + +func (p *gostring) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *gostring) Generate(file *generator.FileDescriptor) { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + p.PluginImports = generator.NewPluginImports(p.Generator) + p.atleastOne = false + + p.localName = generator.FileName(file) + + fmtPkg := p.NewImport("fmt") + stringsPkg := p.NewImport("strings") + protoPkg := p.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = p.NewImport("github.com/golang/protobuf/proto") + } + sortPkg := p.NewImport("sort") + strconvPkg := p.NewImport("strconv") + reflectPkg := p.NewImport("reflect") + sortKeysPkg := p.NewImport("github.com/gogo/protobuf/sortkeys") + + for _, message := range file.Messages() { + if !gogoproto.HasGoString(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + p.atleastOne = true + packageName := file.PackageName() + + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P(`func (this *`, ccTypeName, `) GoString() string {`) + p.In() + p.P(`if this == nil {`) + p.In() + p.P(`return "nil"`) + p.Out() + p.P(`}`) + + p.P(`s := make([]string, 0, `, strconv.Itoa(len(message.Field)+4), `)`) + p.P(`s = append(s, "&`, packageName, ".", ccTypeName, `{")`) + + oneofs := make(map[string]struct{}) + for _, field := range message.Field { + nullable := gogoproto.IsNullable(field) + repeated := field.IsRepeated() + fieldname := p.GetFieldName(message, field) + oneof := field.OneofIndex != nil + if oneof { + if _, ok := oneofs[fieldname]; ok { + continue + } else { + oneofs[fieldname] = struct{}{} + } + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `) + ",\n")`) + p.Out() + p.P(`}`) + } else if generator.IsMap(file.FileDescriptorProto, field) { + m := p.GoMapType(nil, field) + mapgoTyp, keyField, keyAliasField := m.GoType, m.KeyField, m.KeyAliasField + keysName := `keysFor` + fieldname + keygoTyp, _ := p.GoType(nil, keyField) + keygoTyp = strings.Replace(keygoTyp, "*", "", 1) + keygoAliasTyp, _ := p.GoType(nil, keyAliasField) + keygoAliasTyp = strings.Replace(keygoAliasTyp, "*", "", 1) + keyCapTyp := generator.CamelCase(keygoTyp) + p.P(keysName, ` := make([]`, keygoTyp, `, 0, len(this.`, fieldname, `))`) + p.P(`for k, _ := range this.`, fieldname, ` {`) + p.In() + if keygoAliasTyp == keygoTyp { + p.P(keysName, ` = append(`, keysName, `, k)`) + } else { + p.P(keysName, ` = append(`, keysName, `, `, keygoTyp, `(k))`) + } + p.Out() + p.P(`}`) + p.P(sortKeysPkg.Use(), `.`, keyCapTyp, `s(`, keysName, `)`) + mapName := `mapStringFor` + fieldname + p.P(mapName, ` := "`, mapgoTyp, `{"`) + p.P(`for _, k := range `, keysName, ` {`) + p.In() + if keygoAliasTyp == keygoTyp { + p.P(mapName, ` += fmt.Sprintf("%#v: %#v,", k, this.`, fieldname, `[k])`) + } else { + p.P(mapName, ` += fmt.Sprintf("%#v: %#v,", k, this.`, fieldname, `[`, keygoAliasTyp, `(k)])`) + } + p.Out() + p.P(`}`) + p.P(mapName, ` += "}"`) + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + p.P(`s = append(s, "`, fieldname, `: " + `, mapName, `+ ",\n")`) + p.Out() + p.P(`}`) + } else if field.IsMessage() || p.IsGroup(field) { + if nullable || repeated { + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + } + if nullable || repeated { + p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `) + ",\n")`) + } else { + p.P(`s = append(s, "`, fieldname, `: " + `, stringsPkg.Use(), `.Replace(this.`, fieldname, `.GoString()`, ",`&`,``,1)", ` + ",\n")`) + } + if nullable || repeated { + p.Out() + p.P(`}`) + } + } else { + if !proto3 && (nullable || repeated) { + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + } + if field.IsEnum() { + if nullable && !repeated && !proto3 { + goTyp, _ := p.GoType(message, field) + p.P(`s = append(s, "`, fieldname, `: " + valueToGoString`, p.localName, `(this.`, fieldname, `,"`, packageName, ".", generator.GoTypeToName(goTyp), `"`, `) + ",\n")`) + } else { + p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `) + ",\n")`) + } + } else { + if nullable && !repeated && !proto3 { + goTyp, _ := p.GoType(message, field) + p.P(`s = append(s, "`, fieldname, `: " + valueToGoString`, p.localName, `(this.`, fieldname, `,"`, generator.GoTypeToName(goTyp), `"`, `) + ",\n")`) + } else { + p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `) + ",\n")`) + } + } + if !proto3 && (nullable || repeated) { + p.Out() + p.P(`}`) + } + } + } + if message.DescriptorProto.HasExtension() { + p.P(`if this.XXX_extensions != nil {`) + p.In() + if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`s = append(s, "XXX_extensions: " + extensionToGoString`, p.localName, `(this.XXX_extensions) + ",\n")`) + } else { + p.P(`s = append(s, "XXX_extensions: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.XXX_extensions) + ",\n")`) + } + p.Out() + p.P(`}`) + } + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if this.XXX_unrecognized != nil {`) + p.In() + p.P(`s = append(s, "XXX_unrecognized:" + `, fmtPkg.Use(), `.Sprintf("%#v", this.XXX_unrecognized) + ",\n")`) + p.Out() + p.P(`}`) + } + + p.P(`s = append(s, "}")`) + //outStr += strings.Join([]string{" + `}`", `}`, `,", "`, ")"}, "") + p.P(`return `, stringsPkg.Use(), `.Join(s, "")`) + p.Out() + p.P(`}`) + + //Generate GoString methods for oneof fields + for _, field := range message.Field { + oneof := field.OneofIndex != nil + if !oneof { + continue + } + ccTypeName := p.OneOfTypeName(message, field) + p.P(`func (this *`, ccTypeName, `) GoString() string {`) + p.In() + p.P(`if this == nil {`) + p.In() + p.P(`return "nil"`) + p.Out() + p.P(`}`) + outFlds := []string{} + fieldname := p.GetOneOfFieldName(message, field) + if field.IsMessage() || p.IsGroup(field) { + tmp := strings.Join([]string{"`", fieldname, ":` + "}, "") + tmp += strings.Join([]string{fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `)`}, "") + outFlds = append(outFlds, tmp) + } else { + tmp := strings.Join([]string{"`", fieldname, ":` + "}, "") + tmp += strings.Join([]string{fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, ")"}, "") + outFlds = append(outFlds, tmp) + } + outStr := strings.Join([]string{"s := ", stringsPkg.Use(), ".Join([]string{`&", packageName, ".", ccTypeName, "{` + \n"}, "") + outStr += strings.Join(outFlds, ",\n") + outStr += strings.Join([]string{" + `}`", `}`, `,", "`, ")"}, "") + p.P(outStr) + p.P(`return s`) + p.Out() + p.P(`}`) + } + } + + if !p.atleastOne { + return + } + + p.P(`func valueToGoString`, p.localName, `(v interface{}, typ string) string {`) + p.In() + p.P(`rv := `, reflectPkg.Use(), `.ValueOf(v)`) + p.P(`if rv.IsNil() {`) + p.In() + p.P(`return "nil"`) + p.Out() + p.P(`}`) + p.P(`pv := `, reflectPkg.Use(), `.Indirect(rv).Interface()`) + p.P(`return `, fmtPkg.Use(), `.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)`) + p.Out() + p.P(`}`) + + p.P(`func extensionToGoString`, p.localName, `(e map[int32]`, protoPkg.Use(), `.Extension) string {`) + p.In() + p.P(`if e == nil { return "nil" }`) + p.P(`s := "map[int32]proto.Extension{"`) + p.P(`keys := make([]int, 0, len(e))`) + p.P(`for k := range e {`) + p.In() + p.P(`keys = append(keys, int(k))`) + p.Out() + p.P(`}`) + p.P(sortPkg.Use(), `.Ints(keys)`) + p.P(`ss := []string{}`) + p.P(`for _, k := range keys {`) + p.In() + p.P(`ss = append(ss, `, strconvPkg.Use(), `.Itoa(k) + ": " + e[int32(k)].GoString())`) + p.Out() + p.P(`}`) + p.P(`s+=`, stringsPkg.Use(), `.Join(ss, ",") + "}"`) + p.P(`return s`) + p.Out() + p.P(`}`) + +} + +func init() { + generator.RegisterPlugin(NewGoString()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/gostring/gostringtest.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/gostring/gostringtest.go new file mode 100644 index 000000000000..539774905657 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/gostring/gostringtest.go @@ -0,0 +1,88 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gostring + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + testingPkg := imports.NewImport("testing") + fmtPkg := imports.NewImport("fmt") + parserPkg := imports.NewImport("go/parser") + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if !gogoproto.HasGoString(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + p.P(`func Test`, ccTypeName, `GoString(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) + p.P(`s1 := p.GoString()`) + p.P(`s2 := `, fmtPkg.Use(), `.Sprintf("%#v", p)`) + p.P(`if s1 != s2 {`) + p.In() + p.P(`t.Fatalf("GoString want %v got %v", s1, s2)`) + p.Out() + p.P(`}`) + p.P(`_, err := `, parserPkg.Use(), `.ParseExpr(s1)`) + p.P(`if err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/grpc/grpc.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/grpc/grpc.go new file mode 100644 index 000000000000..d769adc9a699 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/grpc/grpc.go @@ -0,0 +1,439 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package grpc outputs gRPC service descriptions in Go code. +// It runs as a plugin for the Go protocol buffer compiler plugin. +// It is linked in to protoc-gen-go. +package grpc + +import ( + "fmt" + "path" + "strconv" + "strings" + + pb "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +// Paths for packages used by code generated in this file, +// relative to the import_prefix of the generator.Generator. +const ( + contextPkgPath = "golang.org/x/net/context" + grpcPkgPath = "google.golang.org/grpc" +) + +func init() { + generator.RegisterPlugin(new(grpc)) +} + +// grpc is an implementation of the Go protocol buffer compiler's +// plugin architecture. It generates bindings for gRPC support. +type grpc struct { + gen *generator.Generator +} + +// Name returns the name of this plugin, "grpc". +func (g *grpc) Name() string { + return "grpc" +} + +// The names for packages imported in the generated code. +// They may vary from the final path component of the import path +// if the name is used by other packages. +var ( + contextPkg string + grpcPkg string +) + +// Init initializes the plugin. +func (g *grpc) Init(gen *generator.Generator) { + g.gen = gen + contextPkg = generator.RegisterUniquePackageName("context", nil) + grpcPkg = generator.RegisterUniquePackageName("grpc", nil) +} + +// Given a type name defined in a .proto, return its object. +// Also record that we're using it, to guarantee the associated import. +func (g *grpc) objectNamed(name string) generator.Object { + g.gen.RecordTypeUse(name) + return g.gen.ObjectNamed(name) +} + +// Given a type name defined in a .proto, return its name as we will print it. +func (g *grpc) typeName(str string) string { + return g.gen.TypeName(g.objectNamed(str)) +} + +// P forwards to g.gen.P. +func (g *grpc) P(args ...interface{}) { g.gen.P(args...) } + +// Generate generates code for the services in the given file. +func (g *grpc) Generate(file *generator.FileDescriptor) { + if len(file.FileDescriptorProto.Service) == 0 { + return + } + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ ", contextPkg, ".Context") + g.P("var _ ", grpcPkg, ".ClientConn") + g.P() + for i, service := range file.FileDescriptorProto.Service { + g.generateService(file, service, i) + } +} + +// GenerateImports generates the import declaration for this file. +func (g *grpc) GenerateImports(file *generator.FileDescriptor) { + if len(file.FileDescriptorProto.Service) == 0 { + return + } + g.P("import (") + g.P(contextPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, contextPkgPath))) + g.P(grpcPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, grpcPkgPath))) + g.P(")") + g.P() +} + +// reservedClientName records whether a client name is reserved on the client side. +var reservedClientName = map[string]bool{ +// TODO: do we need any in gRPC? +} + +func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } + +// generateService generates all the code for the named service. +func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) { + path := fmt.Sprintf("6,%d", index) // 6 means service. + + origServName := service.GetName() + fullServName := file.GetPackage() + "." + origServName + servName := generator.CamelCase(origServName) + + g.P() + g.P("// Client API for ", servName, " service") + g.P() + + // Client interface. + g.P("type ", servName, "Client interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + g.P(g.generateClientSignature(servName, method)) + } + g.P("}") + g.P() + + // Client structure. + g.P("type ", unexport(servName), "Client struct {") + g.P("cc *", grpcPkg, ".ClientConn") + g.P("}") + g.P() + + // NewClient factory. + g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {") + g.P("return &", unexport(servName), "Client{cc}") + g.P("}") + g.P() + + var methodIndex, streamIndex int + serviceDescVar := "_" + servName + "_serviceDesc" + // Client method implementations. + for _, method := range service.Method { + var descExpr string + if !method.GetServerStreaming() && !method.GetClientStreaming() { + // Unary RPC method + descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex) + methodIndex++ + } else { + // Streaming RPC method + descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex) + streamIndex++ + } + g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr) + } + + g.P("// Server API for ", servName, " service") + g.P() + + // Server interface. + serverType := servName + "Server" + g.P("type ", serverType, " interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + g.P(g.generateServerSignature(servName, method)) + } + g.P("}") + g.P() + + // Server registration. + g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {") + g.P("s.RegisterService(&", serviceDescVar, `, srv)`) + g.P("}") + g.P() + + // Server handler implementations. + var handlerNames []string + for _, method := range service.Method { + hname := g.generateServerMethod(servName, method) + handlerNames = append(handlerNames, hname) + } + + // Service descriptor. + g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {") + g.P("ServiceName: ", strconv.Quote(fullServName), ",") + g.P("HandlerType: (*", serverType, ")(nil),") + g.P("Methods: []", grpcPkg, ".MethodDesc{") + for i, method := range service.Method { + if method.GetServerStreaming() || method.GetClientStreaming() { + continue + } + g.P("{") + g.P("MethodName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + g.P("},") + } + g.P("},") + g.P("Streams: []", grpcPkg, ".StreamDesc{") + for i, method := range service.Method { + if !method.GetServerStreaming() && !method.GetClientStreaming() { + continue + } + g.P("{") + g.P("StreamName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + if method.GetServerStreaming() { + g.P("ServerStreams: true,") + } + if method.GetClientStreaming() { + g.P("ClientStreams: true,") + } + g.P("},") + } + g.P("},") + g.P("}") + g.P() +} + +// generateClientSignature returns the client-side signature for a method. +func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + reqArg := ", in *" + g.typeName(method.GetInputType()) + if method.GetClientStreaming() { + reqArg = "" + } + respName := "*" + g.typeName(method.GetOutputType()) + if method.GetServerStreaming() || method.GetClientStreaming() { + respName = servName + "_" + generator.CamelCase(origMethName) + "Client" + } + return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName) +} + +func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) { + sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName()) + methName := generator.CamelCase(method.GetName()) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{") + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("out := new(", outType, ")") + // TODO: Pass descExpr to Invoke. + g.P("err := ", grpcPkg, `.Invoke(ctx, "`, sname, `", in, out, c.cc, opts...)`) + g.P("if err != nil { return nil, err }") + g.P("return out, nil") + g.P("}") + g.P() + return + } + streamType := unexport(servName) + methName + "Client" + g.P("stream, err := ", grpcPkg, ".NewClientStream(ctx, ", descExpr, `, c.cc, "`, sname, `", opts...)`) + g.P("if err != nil { return nil, err }") + g.P("x := &", streamType, "{stream}") + if !method.GetClientStreaming() { + g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + } + g.P("return x, nil") + g.P("}") + g.P() + + genSend := method.GetClientStreaming() + genRecv := method.GetServerStreaming() + genCloseAndRecv := !method.GetServerStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Client interface {") + if genSend { + g.P("Send(*", inType, ") error") + } + if genRecv { + g.P("Recv() (*", outType, ", error)") + } + if genCloseAndRecv { + g.P("CloseAndRecv() (*", outType, ", error)") + } + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", inType, ") error {") + g.P("return x.ClientStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + if genCloseAndRecv { + g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } +} + +// generateServerSignature returns the server-side signature for a method. +func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + + var reqArgs []string + ret := "error" + if !method.GetServerStreaming() && !method.GetClientStreaming() { + reqArgs = append(reqArgs, contextPkg+".Context") + ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" + } + if !method.GetClientStreaming() { + reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType())) + } + if method.GetServerStreaming() || method.GetClientStreaming() { + reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server") + } + + return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + +func (g *grpc) generateServerMethod(servName string, method *pb.MethodDescriptorProto) string { + methName := generator.CamelCase(method.GetName()) + hname := fmt.Sprintf("_%s_%s_Handler", servName, methName) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error) (interface{}, error) {") + g.P("in := new(", inType, ")") + g.P("if err := dec(in); err != nil { return nil, err }") + g.P("out, err := srv.(", servName, "Server).", methName, "(ctx, in)") + g.P("if err != nil { return nil, err }") + g.P("return out, nil") + g.P("}") + g.P() + return hname + } + streamType := unexport(servName) + methName + "Server" + g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {") + if !method.GetClientStreaming() { + g.P("m := new(", inType, ")") + g.P("if err := stream.RecvMsg(m); err != nil { return err }") + g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})") + } else { + g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})") + } + g.P("}") + g.P() + + genSend := method.GetServerStreaming() + genSendAndClose := !method.GetServerStreaming() + genRecv := method.GetClientStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Server interface {") + if genSend { + g.P("Send(*", outType, ") error") + } + if genSendAndClose { + g.P("SendAndClose(*", outType, ") error") + } + if genRecv { + g.P("Recv() (*", inType, ", error)") + } + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genSendAndClose { + g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {") + g.P("m := new(", inType, ")") + g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + + return hname +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/marshalto/marshalto.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/marshalto/marshalto.go new file mode 100644 index 000000000000..52d36098fe02 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/marshalto/marshalto.go @@ -0,0 +1,1301 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The marshalto plugin generates a Marshal and MarshalTo method for each message. +The `Marshal() ([]byte, error)` method results in the fact that the message +implements the Marshaler interface. +This allows proto.Marshal to be faster by calling the generated Marshal method rather than using reflect to Marshal the struct. + +If is enabled by the following extensions: + + - marshaler + - marshaler_all + +Or the following extensions: + + - unsafe_marshaler + - unsafe_marshaler_all + +That is if you want to use the unsafe package in your generated code. +The speed up using the unsafe package is not very significant. + +The generation of marshalling tests are enabled using one of the following extensions: + + - testgen + - testgen_all + +And benchmarks given it is enabled using one of the following extensions: + + - benchgen + - benchgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + +option (gogoproto.marshaler_all) = true; + +message B { + option (gogoproto.description) = true; + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; +} + +given to the marshalto plugin, will generate the following code: + + func (m *B) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil + } + + func (m *B) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintExample(data, i, uint64(m.A.Size())) + n2, err := m.A.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.G) > 0 { + for _, msg := range m.G { + data[i] = 0x12 + i++ + i = encodeVarintExample(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(data[i:], m.XXX_unrecognized) + } + return i, nil + } + +As shown above Marshal calculates the size of the not yet marshalled message +and allocates the appropriate buffer. +This is followed by calling the MarshalTo method which requires a preallocated buffer. +The MarshalTo method allows a user to rather preallocated a reusable buffer. + +The Size method is generated using the size plugin and the gogoproto.sizer, gogoproto.sizer_all extensions. +The user can also using the generated Size method to check that his reusable buffer is still big enough. + +The generated tests and benchmarks will keep you safe and show that this is really a significant speed improvement. + +*/ +package marshalto + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "github.com/gogo/protobuf/vanity" +) + +type NumGen interface { + Next() string + Current() string +} + +type numGen struct { + index int +} + +func NewNumGen() NumGen { + return &numGen{0} +} + +func (this *numGen) Next() string { + this.index++ + return this.Current() +} + +func (this *numGen) Current() string { + return strconv.Itoa(this.index) +} + +type marshalto struct { + *generator.Generator + generator.PluginImports + atleastOne bool + unsafePkg generator.Single + errorsPkg generator.Single + protoPkg generator.Single + sortKeysPkg generator.Single + mathPkg generator.Single + localName string + unsafe bool +} + +func NewMarshal() *marshalto { + return &marshalto{} +} + +func NewUnsafeMarshal() *marshalto { + return &marshalto{unsafe: true} +} + +func (p *marshalto) Name() string { + if p.unsafe { + return "unsafemarshaler" + } + return "marshalto" +} + +func (p *marshalto) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *marshalto) callFixed64(varName ...string) { + p.P(`i = encodeFixed64`, p.localName, `(data, i, uint64(`, strings.Join(varName, ""), `))`) +} + +func (p *marshalto) callFixed32(varName ...string) { + p.P(`i = encodeFixed32`, p.localName, `(data, i, uint32(`, strings.Join(varName, ""), `))`) +} + +func (p *marshalto) callVarint(varName ...string) { + p.P(`i = encodeVarint`, p.localName, `(data, i, uint64(`, strings.Join(varName, ""), `))`) +} + +func (p *marshalto) encodeVarint(varName string) { + p.P(`for `, varName, ` >= 1<<7 {`) + p.In() + p.P(`data[i] = uint8(uint64(`, varName, `)&0x7f|0x80)`) + p.P(varName, ` >>= 7`) + p.P(`i++`) + p.Out() + p.P(`}`) + p.P(`data[i] = uint8(`, varName, `)`) + p.P(`i++`) +} + +func (p *marshalto) encodeFixed64(varName string) { + p.P(`data[i] = uint8(`, varName, `)`) + p.P(`i++`) + p.P(`data[i] = uint8(`, varName, ` >> 8)`) + p.P(`i++`) + p.P(`data[i] = uint8(`, varName, ` >> 16)`) + p.P(`i++`) + p.P(`data[i] = uint8(`, varName, ` >> 24)`) + p.P(`i++`) + p.P(`data[i] = uint8(`, varName, ` >> 32)`) + p.P(`i++`) + p.P(`data[i] = uint8(`, varName, ` >> 40)`) + p.P(`i++`) + p.P(`data[i] = uint8(`, varName, ` >> 48)`) + p.P(`i++`) + p.P(`data[i] = uint8(`, varName, ` >> 56)`) + p.P(`i++`) +} + +func (p *marshalto) unsafeFixed64(varName string, someType string) { + p.P(`*(*`, someType, `)(`, p.unsafePkg.Use(), `.Pointer(&data[i])) = `, varName) + p.P(`i+=8`) +} + +func (p *marshalto) encodeFixed32(varName string) { + p.P(`data[i] = uint8(`, varName, `)`) + p.P(`i++`) + p.P(`data[i] = uint8(`, varName, ` >> 8)`) + p.P(`i++`) + p.P(`data[i] = uint8(`, varName, ` >> 16)`) + p.P(`i++`) + p.P(`data[i] = uint8(`, varName, ` >> 24)`) + p.P(`i++`) +} + +func (p *marshalto) unsafeFixed32(varName string, someType string) { + p.P(`*(*`, someType, `)(`, p.unsafePkg.Use(), `.Pointer(&data[i])) = `, varName) + p.P(`i+=4`) +} + +func (p *marshalto) encodeKey(fieldNumber int32, wireType int) { + x := uint32(fieldNumber)<<3 | uint32(wireType) + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + for _, b := range keybuf { + p.P(`data[i] = `, fmt.Sprintf("%#v", b)) + p.P(`i++`) + } +} + +func keySize(fieldNumber int32, wireType int) int { + x := uint32(fieldNumber)<<3 | uint32(wireType) + size := 0 + for size = 0; x > 127; size++ { + x >>= 7 + } + size++ + return size +} + +func wireToType(wire string) int { + switch wire { + case "fixed64": + return proto.WireFixed64 + case "fixed32": + return proto.WireFixed32 + case "varint": + return proto.WireVarint + case "bytes": + return proto.WireBytes + case "group": + return proto.WireBytes + case "zigzag32": + return proto.WireVarint + case "zigzag64": + return proto.WireVarint + } + panic("unreachable") +} + +func (p *marshalto) mapField(numGen NumGen, fieldTyp descriptor.FieldDescriptorProto_Type, varName string, protoSizer bool) { + switch fieldTyp { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(`, varName, `))`) + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(`, varName, `))`) + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM: + p.callVarint(varName) + case descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + p.callFixed64(varName) + case descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + p.callFixed32(varName) + case descriptor.FieldDescriptorProto_TYPE_BOOL: + p.P(`if `, varName, ` {`) + p.In() + p.P(`data[i] = 1`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`data[i] = 0`) + p.Out() + p.P(`}`) + p.P(`i++`) + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + p.callVarint(`len(`, varName, `)`) + p.P(`i+=copy(data[i:], `, varName, `)`) + case descriptor.FieldDescriptorProto_TYPE_SINT32: + p.callVarint(`(uint32(`, varName, `) << 1) ^ uint32((`, varName, ` >> 31))`) + case descriptor.FieldDescriptorProto_TYPE_SINT64: + p.callVarint(`(uint64(`, varName, `) << 1) ^ uint64((`, varName, ` >> 63))`) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if protoSizer { + p.callVarint(varName, `.ProtoSize()`) + } else { + p.callVarint(varName, `.Size()`) + } + p.P(`n`, numGen.Next(), `, err := `, varName, `.MarshalTo(data[i:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return 0, err`) + p.Out() + p.P(`}`) + p.P(`i+=n`, numGen.Current()) + } +} + +type orderFields []*descriptor.FieldDescriptorProto + +func (this orderFields) Len() int { + return len(this) +} + +func (this orderFields) Less(i, j int) bool { + return this[i].GetNumber() < this[j].GetNumber() +} + +func (this orderFields) Swap(i, j int) { + this[i], this[j] = this[j], this[i] +} + +func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto) { + fieldname := p.GetOneOfFieldName(message, field) + nullable := gogoproto.IsNullable(field) + repeated := field.IsRepeated() + required := field.IsRequired() + + protoSizer := gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) + if required && nullable { + p.P(`if m.`, fieldname, `== nil {`) + p.In() + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + p.P(`return 0, new(`, p.protoPkg.Use(), `.RequiredNotSetError)`) + } else { + p.P(`return 0, `, p.protoPkg.Use(), `.NewRequiredNotSetError("`, field.GetName(), `")`) + } + p.Out() + p.P(`} else {`) + } else if repeated { + p.P(`if len(m.`, fieldname, `) > 0 {`) + p.In() + } else if ((!proto3 || field.IsMessage()) && nullable) || + (*field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES && !gogoproto.IsCustomType(field)) { + p.P(`if m.`, fieldname, ` != nil {`) + p.In() + } + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = proto.WireBytes + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + if !p.unsafe || gogoproto.IsCastType(field) { + if packed { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `) * 8`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(float64(num))`) + p.encodeFixed64("f" + numGen.Current()) + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(float64(num))`) + p.encodeFixed64("f" + numGen.Current()) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(m.`+fieldname, `))`) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(m.`+fieldname, `))`) + } else { + p.encodeKey(fieldNumber, wireType) + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(*m.`+fieldname, `))`) + } + } else { + if packed { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `) * 8`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.unsafeFixed64("num", "float64") + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed64("num", "float64") + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed64(`m.`+fieldname, "float64") + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed64(`m.`+fieldname, "float64") + } else { + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed64(`*m.`+fieldname, `float64`) + } + } + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + if !p.unsafe || gogoproto.IsCastType(field) { + if packed { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `) * 4`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(float32(num))`) + p.encodeFixed32("f" + numGen.Current()) + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(float32(num))`) + p.encodeFixed32("f" + numGen.Current()) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(m.`+fieldname, `))`) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(m.`+fieldname, `))`) + } else { + p.encodeKey(fieldNumber, wireType) + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(*m.`+fieldname, `))`) + } + } else { + if packed { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `) * 4`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.unsafeFixed32("num", "float32") + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed32("num", "float32") + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed32(`m.`+fieldname, `float32`) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed32(`m.`+fieldname, `float32`) + } else { + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed32(`*m.`+fieldname, "float32") + } + } + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM: + if packed { + jvar := "j" + numGen.Next() + p.P(`data`, numGen.Next(), ` := make([]byte, len(m.`, fieldname, `)*10)`) + p.P(`var `, jvar, ` int`) + if *field.Type == descriptor.FieldDescriptorProto_TYPE_INT64 || + *field.Type == descriptor.FieldDescriptorProto_TYPE_INT32 { + p.P(`for _, num1 := range m.`, fieldname, ` {`) + p.In() + p.P(`num := uint64(num1)`) + } else { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + } + p.P(`for num >= 1<<7 {`) + p.In() + p.P(`data`, numGen.Current(), `[`, jvar, `] = uint8(uint64(num)&0x7f|0x80)`) + p.P(`num >>= 7`) + p.P(jvar, `++`) + p.Out() + p.P(`}`) + p.P(`data`, numGen.Current(), `[`, jvar, `] = uint8(num)`) + p.P(jvar, `++`) + p.Out() + p.P(`}`) + p.encodeKey(fieldNumber, wireType) + p.callVarint(jvar) + p.P(`i += copy(data[i:], data`, numGen.Current(), `[:`, jvar, `])`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callVarint("num") + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callVarint(`m.`, fieldname) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`m.`, fieldname) + } else { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`*m.`, fieldname) + } + case descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + if !p.unsafe { + if packed { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `) * 8`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeFixed64("num") + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.encodeFixed64("num") + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callFixed64("m." + fieldname) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.callFixed64("m." + fieldname) + } else { + p.encodeKey(fieldNumber, wireType) + p.callFixed64("*m." + fieldname) + } + } else { + typeName := "int64" + if *field.Type == descriptor.FieldDescriptorProto_TYPE_FIXED64 { + typeName = "uint64" + } + if packed { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `) * 8`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.unsafeFixed64("num", typeName) + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed64("num", typeName) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed64("m."+fieldname, typeName) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed64("m."+fieldname, typeName) + } else { + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed64("*m."+fieldname, typeName) + } + } + case descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + if !p.unsafe { + if packed { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `) * 4`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeFixed32("num") + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.encodeFixed32("num") + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callFixed32("m." + fieldname) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.callFixed32("m." + fieldname) + } else { + p.encodeKey(fieldNumber, wireType) + p.callFixed32("*m." + fieldname) + } + } else { + typeName := "int32" + if *field.Type == descriptor.FieldDescriptorProto_TYPE_FIXED32 { + typeName = "uint32" + } + if packed { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `) * 4`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.unsafeFixed32("num", typeName) + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed32("num", typeName) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed32("m."+fieldname, typeName) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed32("m."+fieldname, typeName) + } else { + p.encodeKey(fieldNumber, wireType) + p.unsafeFixed32("*m."+fieldname, typeName) + } + } + case descriptor.FieldDescriptorProto_TYPE_BOOL: + if packed { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `)`) + p.P(`for _, b := range m.`, fieldname, ` {`) + p.In() + p.P(`if b {`) + p.In() + p.P(`data[i] = 1`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`data[i] = 0`) + p.Out() + p.P(`}`) + p.P(`i++`) + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, b := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.P(`if b {`) + p.In() + p.P(`data[i] = 1`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`data[i] = 0`) + p.Out() + p.P(`}`) + p.P(`i++`) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.P(`if m.`, fieldname, ` {`) + p.In() + p.P(`data[i] = 1`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`data[i] = 0`) + p.Out() + p.P(`}`) + p.P(`i++`) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.P(`if m.`, fieldname, ` {`) + p.In() + p.P(`data[i] = 1`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`data[i] = 0`) + p.Out() + p.P(`}`) + p.P(`i++`) + } else { + p.encodeKey(fieldNumber, wireType) + p.P(`if *m.`, fieldname, ` {`) + p.In() + p.P(`data[i] = 1`) + p.Out() + p.P(`} else {`) + p.In() + p.P(`data[i] = 0`) + p.Out() + p.P(`}`) + p.P(`i++`) + } + case descriptor.FieldDescriptorProto_TYPE_STRING: + if repeated { + p.P(`for _, s := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.P(`l = len(s)`) + p.encodeVarint("l") + p.P(`i+=copy(data[i:], s)`) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if len(m.`, fieldname, `) > 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `)`) + p.P(`i+=copy(data[i:], m.`, fieldname, `)`) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `)`) + p.P(`i+=copy(data[i:], m.`, fieldname, `)`) + } else { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(*m.`, fieldname, `)`) + p.P(`i+=copy(data[i:], *m.`, fieldname, `)`) + } + case descriptor.FieldDescriptorProto_TYPE_GROUP: + panic(fmt.Errorf("marshaler does not support group %v", fieldname)) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if generator.IsMap(file.FileDescriptorProto, field) { + m := p.GoMapType(nil, field) + _, keywire := p.GoType(nil, m.KeyField) + valuegoTyp, valuewire := p.GoType(nil, m.ValueField) + valuegoAliasTyp, _ := p.GoType(nil, m.ValueAliasField) + nullable, valuegoTyp, valuegoAliasTyp = generator.GoMapValueTypes(field, m.ValueField, valuegoTyp, valuegoAliasTyp) + keyKeySize := keySize(1, wireToType(keywire)) + valueKeySize := keySize(2, wireToType(valuewire)) + p.P(`for k, _ := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + sum := []string{strconv.Itoa(keyKeySize)} + switch m.KeyField.GetType() { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + sum = append(sum, `8`) + case descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + sum = append(sum, `4`) + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_INT32: + sum = append(sum, `sov`+p.localName+`(uint64(k))`) + case descriptor.FieldDescriptorProto_TYPE_BOOL: + sum = append(sum, `1`) + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + sum = append(sum, `len(k)+sov`+p.localName+`(uint64(len(k)))`) + case descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + sum = append(sum, `soz`+p.localName+`(uint64(k))`) + } + p.P(`v := m.`, fieldname, `[k]`) + accessor := `v` + sum = append(sum, strconv.Itoa(valueKeySize)) + switch m.ValueField.GetType() { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + sum = append(sum, strconv.Itoa(8)) + case descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + sum = append(sum, strconv.Itoa(4)) + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_INT32: + sum = append(sum, `sov`+p.localName+`(uint64(v))`) + case descriptor.FieldDescriptorProto_TYPE_BOOL: + sum = append(sum, `1`) + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + sum = append(sum, `len(v)+sov`+p.localName+`(uint64(len(v)))`) + case descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + sum = append(sum, `soz`+p.localName+`(uint64(v))`) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if nullable { + p.P(`if v == nil {`) + p.In() + p.P(`return 0, `, p.errorsPkg.Use(), `.New("proto: map has nil element")`) + p.Out() + p.P(`}`) + } + if valuegoTyp != valuegoAliasTyp { + if nullable { + // cast back to the type that has the generated methods on it + accessor = `((` + valuegoTyp + `)(` + accessor + `))` + } else { + accessor = `((*` + valuegoTyp + `)(&` + accessor + `))` + } + } else if !nullable { + accessor = `(&v)` + } + if protoSizer { + p.P(`msgSize := `, accessor, `.ProtoSize()`) + } else { + p.P(`msgSize := `, accessor, `.Size()`) + } + sum = append(sum, `msgSize + sov`+p.localName+`(uint64(msgSize))`) + } + p.P(`mapSize := `, strings.Join(sum, " + ")) + p.callVarint("mapSize") + p.encodeKey(1, wireToType(keywire)) + p.mapField(numGen, m.KeyField.GetType(), "k", protoSizer) + p.encodeKey(2, wireToType(valuewire)) + p.mapField(numGen, m.ValueField.GetType(), accessor, protoSizer) + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, msg := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + if protoSizer { + p.callVarint("msg.ProtoSize()") + } else { + p.callVarint("msg.Size()") + } + p.P(`n, err := msg.MarshalTo(data[i:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return 0, err`) + p.Out() + p.P(`}`) + p.P(`i+=n`) + p.Out() + p.P(`}`) + } else { + p.encodeKey(fieldNumber, wireType) + if protoSizer { + p.callVarint(`m.`, fieldname, `.ProtoSize()`) + } else { + p.callVarint(`m.`, fieldname, `.Size()`) + } + p.P(`n`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(data[i:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return 0, err`) + p.Out() + p.P(`}`) + p.P(`i+=n`, numGen.Current()) + } + case descriptor.FieldDescriptorProto_TYPE_BYTES: + if !gogoproto.IsCustomType(field) { + if repeated { + p.P(`for _, b := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callVarint("len(b)") + p.P(`i+=copy(data[i:], b)`) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if len(m.`, fieldname, `) > 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `)`) + p.P(`i+=copy(data[i:], m.`, fieldname, `)`) + p.Out() + p.P(`}`) + } else { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `)`) + p.P(`i+=copy(data[i:], m.`, fieldname, `)`) + } + } else { + if repeated { + p.P(`for _, msg := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + if protoSizer { + p.callVarint(`msg.ProtoSize()`) + } else { + p.callVarint(`msg.Size()`) + } + p.P(`n, err := msg.MarshalTo(data[i:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return 0, err`) + p.Out() + p.P(`}`) + p.P(`i+=n`) + p.Out() + p.P(`}`) + } else { + p.encodeKey(fieldNumber, wireType) + if protoSizer { + p.callVarint(`m.`, fieldname, `.ProtoSize()`) + } else { + p.callVarint(`m.`, fieldname, `.Size()`) + } + p.P(`n`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(data[i:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return 0, err`) + p.Out() + p.P(`}`) + p.P(`i+=n`, numGen.Current()) + } + } + case descriptor.FieldDescriptorProto_TYPE_SINT32: + if packed { + datavar := "data" + numGen.Next() + jvar := "j" + numGen.Next() + p.P(datavar, ` := make([]byte, len(m.`, fieldname, ")*5)") + p.P(`var `, jvar, ` int`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + xvar := "x" + numGen.Next() + p.P(xvar, ` := (uint32(num) << 1) ^ uint32((num >> 31))`) + p.P(`for `, xvar, ` >= 1<<7 {`) + p.In() + p.P(datavar, `[`, jvar, `] = uint8(uint64(`, xvar, `)&0x7f|0x80)`) + p.P(jvar, `++`) + p.P(xvar, ` >>= 7`) + p.Out() + p.P(`}`) + p.P(datavar, `[`, jvar, `] = uint8(`, xvar, `)`) + p.P(jvar, `++`) + p.Out() + p.P(`}`) + p.encodeKey(fieldNumber, wireType) + p.callVarint(jvar) + p.P(`i+=copy(data[i:], `, datavar, `[:`, jvar, `])`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.P(`x`, numGen.Next(), ` := (uint32(num) << 1) ^ uint32((num >> 31))`) + p.encodeVarint("x" + numGen.Current()) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callVarint(`(uint32(m.`, fieldname, `) << 1) ^ uint32((m.`, fieldname, ` >> 31))`) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`(uint32(m.`, fieldname, `) << 1) ^ uint32((m.`, fieldname, ` >> 31))`) + } else { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`(uint32(*m.`, fieldname, `) << 1) ^ uint32((*m.`, fieldname, ` >> 31))`) + } + case descriptor.FieldDescriptorProto_TYPE_SINT64: + if packed { + jvar := "j" + numGen.Next() + xvar := "x" + numGen.Next() + datavar := "data" + numGen.Next() + p.P(`var `, jvar, ` int`) + p.P(datavar, ` := make([]byte, len(m.`, fieldname, `)*10)`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.P(xvar, ` := (uint64(num) << 1) ^ uint64((num >> 63))`) + p.P(`for `, xvar, ` >= 1<<7 {`) + p.In() + p.P(datavar, `[`, jvar, `] = uint8(uint64(`, xvar, `)&0x7f|0x80)`) + p.P(jvar, `++`) + p.P(xvar, ` >>= 7`) + p.Out() + p.P(`}`) + p.P(datavar, `[`, jvar, `] = uint8(`, xvar, `)`) + p.P(jvar, `++`) + p.Out() + p.P(`}`) + p.encodeKey(fieldNumber, wireType) + p.callVarint(jvar) + p.P(`i+=copy(data[i:], `, datavar, `[:`, jvar, `])`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.P(`x`, numGen.Next(), ` := (uint64(num) << 1) ^ uint64((num >> 63))`) + p.encodeVarint("x" + numGen.Current()) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callVarint(`(uint64(m.`, fieldname, `) << 1) ^ uint64((m.`, fieldname, ` >> 63))`) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`(uint64(m.`, fieldname, `) << 1) ^ uint64((m.`, fieldname, ` >> 63))`) + } else { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`(uint64(*m.`, fieldname, `) << 1) ^ uint64((*m.`, fieldname, ` >> 63))`) + } + default: + panic("not implemented") + } + if (required && nullable) || + ((!proto3 || field.IsMessage()) && nullable) || + repeated || + (*field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES && !gogoproto.IsCustomType(field)) { + p.Out() + p.P(`}`) + } +} + +func (p *marshalto) Generate(file *generator.FileDescriptor) { + numGen := NewNumGen() + p.PluginImports = generator.NewPluginImports(p.Generator) + p.atleastOne = false + p.localName = generator.FileName(file) + + p.mathPkg = p.NewImport("math") + p.sortKeysPkg = p.NewImport("github.com/gogo/protobuf/sortkeys") + p.protoPkg = p.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + p.protoPkg = p.NewImport("github.com/golang/protobuf/proto") + } + p.unsafePkg = p.NewImport("unsafe") + p.errorsPkg = p.NewImport("errors") + + for _, message := range file.Messages() { + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if p.unsafe { + if !gogoproto.IsUnsafeMarshaler(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if gogoproto.IsMarshaler(file.FileDescriptorProto, message.DescriptorProto) { + panic(fmt.Sprintf("unsafe_marshaler and marshalto enabled for %v", ccTypeName)) + } + } + if !p.unsafe { + if !gogoproto.IsMarshaler(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if gogoproto.IsUnsafeMarshaler(file.FileDescriptorProto, message.DescriptorProto) { + panic(fmt.Sprintf("unsafe_marshaler and marshalto enabled for %v", ccTypeName)) + } + } + p.atleastOne = true + + p.P(`func (m *`, ccTypeName, `) Marshal() (data []byte, err error) {`) + p.In() + if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`size := m.ProtoSize()`) + } else { + p.P(`size := m.Size()`) + } + p.P(`data = make([]byte, size)`) + p.P(`n, err := m.MarshalTo(data)`) + p.P(`if err != nil {`) + p.In() + p.P(`return nil, err`) + p.Out() + p.P(`}`) + p.P(`return data[:n], nil`) + p.Out() + p.P(`}`) + p.P(``) + p.P(`func (m *`, ccTypeName, `) MarshalTo(data []byte) (int, error) {`) + p.In() + p.P(`var i int`) + p.P(`_ = i`) + p.P(`var l int`) + p.P(`_ = l`) + fields := orderFields(message.GetField()) + sort.Sort(fields) + oneofs := make(map[string]struct{}) + for _, field := range message.Field { + oneof := field.OneofIndex != nil + if !oneof { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + p.generateField(proto3, numGen, file, message, field) + } else { + fieldname := p.GetFieldName(message, field) + if _, ok := oneofs[fieldname]; !ok { + oneofs[fieldname] = struct{}{} + p.P(`if m.`, fieldname, ` != nil {`) + p.In() + p.P(`nn`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(data[i:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return 0, err`) + p.Out() + p.P(`}`) + p.P(`i+=nn`, numGen.Current()) + p.Out() + p.P(`}`) + } + } + } + if message.DescriptorProto.HasExtension() { + if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if len(m.XXX_extensions) > 0 {`) + p.In() + p.P(`n, err := `, p.protoPkg.Use(), `.EncodeExtensionMap(m.XXX_extensions, data[i:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return 0, err`) + p.Out() + p.P(`}`) + p.P(`i+=n`) + p.Out() + p.P(`}`) + } else { + p.P(`if m.XXX_extensions != nil {`) + p.In() + p.P(`i+=copy(data[i:], m.XXX_extensions)`) + p.Out() + p.P(`}`) + } + } + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if m.XXX_unrecognized != nil {`) + p.In() + p.P(`i+=copy(data[i:], m.XXX_unrecognized)`) + p.Out() + p.P(`}`) + } + + p.P(`return i, nil`) + p.Out() + p.P(`}`) + p.P() + + //Generate MarshalTo methods for oneof fields + m := proto.Clone(message.DescriptorProto).(*descriptor.DescriptorProto) + for _, field := range m.Field { + oneof := field.OneofIndex != nil + if !oneof { + continue + } + ccTypeName := p.OneOfTypeName(message, field) + p.P(`func (m *`, ccTypeName, `) MarshalTo(data []byte) (int, error) {`) + p.In() + p.P(`i := 0`) + vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly(field) + p.generateField(false, numGen, file, message, field) + p.P(`return i, nil`) + p.Out() + p.P(`}`) + } + } + + if p.atleastOne { + p.P(`func encodeFixed64`, p.localName, `(data []byte, offset int, v uint64) int {`) + p.In() + p.P(`data[offset] = uint8(v)`) + p.P(`data[offset+1] = uint8(v >> 8)`) + p.P(`data[offset+2] = uint8(v >> 16)`) + p.P(`data[offset+3] = uint8(v >> 24)`) + p.P(`data[offset+4] = uint8(v >> 32)`) + p.P(`data[offset+5] = uint8(v >> 40)`) + p.P(`data[offset+6] = uint8(v >> 48)`) + p.P(`data[offset+7] = uint8(v >> 56)`) + p.P(`return offset+8`) + p.Out() + p.P(`}`) + + p.P(`func encodeFixed32`, p.localName, `(data []byte, offset int, v uint32) int {`) + p.In() + p.P(`data[offset] = uint8(v)`) + p.P(`data[offset+1] = uint8(v >> 8)`) + p.P(`data[offset+2] = uint8(v >> 16)`) + p.P(`data[offset+3] = uint8(v >> 24)`) + p.P(`return offset+4`) + p.Out() + p.P(`}`) + + p.P(`func encodeVarint`, p.localName, `(data []byte, offset int, v uint64) int {`) + p.In() + p.P(`for v >= 1<<7 {`) + p.In() + p.P(`data[offset] = uint8(v&0x7f|0x80)`) + p.P(`v >>= 7`) + p.P(`offset++`) + p.Out() + p.P(`}`) + p.P(`data[offset] = uint8(v)`) + p.P(`return offset+1`) + p.Out() + p.P(`}`) + } + +} + +func init() { + generator.RegisterPlugin(NewMarshal()) + generator.RegisterPlugin(NewUnsafeMarshal()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/oneofcheck/oneofcheck.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/oneofcheck/oneofcheck.go new file mode 100644 index 000000000000..cd0d19a77c23 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/oneofcheck/oneofcheck.go @@ -0,0 +1,91 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The oneofcheck plugin is used to check whether oneof is not used incorrectly. +For instance: +An error is caused if a oneof field: + - is used in a face + - is an embedded field + +*/ +package oneofcheck + +import ( + "fmt" + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "os" +) + +type plugin struct { + *generator.Generator +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "oneofcheck" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + for _, msg := range file.Messages() { + face := gogoproto.IsFace(file.FileDescriptorProto, msg.DescriptorProto) + for _, field := range msg.GetField() { + if field.OneofIndex == nil { + continue + } + if face { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot be in a face and oneof\n", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + if gogoproto.IsEmbed(field) { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot be in an oneof and an embedded field\n", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + if !gogoproto.IsNullable(field) { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot be in an oneof and a non-nullable field\n", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + if gogoproto.IsUnion(file.FileDescriptorProto, msg.DescriptorProto) { + fmt.Fprintf(os.Stderr, "ERROR: field %v.%v cannot be in an oneof and in an union (deprecated)\n", generator.CamelCase(*msg.Name), generator.CamelCase(*field.Name)) + os.Exit(1) + } + } + } +} + +func (p *plugin) GenerateImports(*generator.FileDescriptor) {} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/populate/populate.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/populate/populate.go new file mode 100644 index 000000000000..6594096f7f73 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/populate/populate.go @@ -0,0 +1,774 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The populate plugin generates a NewPopulated function. +This function returns a newly populated structure. + +It is enabled by the following extensions: + + - populate + - populate_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.populate_all) = true; + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +given to the populate plugin, will generate code the following code: + + func NewPopulatedB(r randyExample, easy bool) *B { + this := &B{} + v2 := NewPopulatedA(r, easy) + this.A = *v2 + if r.Intn(10) != 0 { + v3 := r.Intn(10) + this.G = make([]github_com_gogo_protobuf_test_custom.Uint128, v3) + for i := 0; i < v3; i++ { + v4 := github_com_gogo_protobuf_test_custom.NewPopulatedUint128(r) + this.G[i] = *v4 + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedExample(r, 3) + } + return this + } + +The idea that is useful for testing. +Most of the other plugins' generated test code uses it. +You will still be able to use the generated test code of other packages +if you turn off the popluate plugin and write your own custom NewPopulated function. + +If the easy flag is not set the XXX_unrecognized and XXX_extensions fields are also populated. +These have caused problems with JSON marshalling and unmarshalling tests. + +*/ +package populate + +import ( + "fmt" + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "github.com/gogo/protobuf/vanity" + "math" + "strconv" + "strings" +) + +type VarGen interface { + Next() string + Current() string +} + +type varGen struct { + index int64 +} + +func NewVarGen() VarGen { + return &varGen{0} +} + +func (this *varGen) Next() string { + this.index++ + return fmt.Sprintf("v%d", this.index) +} + +func (this *varGen) Current() string { + return fmt.Sprintf("v%d", this.index) +} + +type plugin struct { + *generator.Generator + generator.PluginImports + varGen VarGen + atleastOne bool + localName string +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "populate" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g +} + +func value(typeName string, fieldType descriptor.FieldDescriptorProto_Type) string { + switch fieldType { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + return typeName + "(r.Float64())" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + return typeName + "(r.Float32())" + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64, + descriptor.FieldDescriptorProto_TYPE_SINT64: + return typeName + "(r.Int63())" + case descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_FIXED64: + return typeName + "(uint64(r.Uint32()))" + case descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32, + descriptor.FieldDescriptorProto_TYPE_ENUM: + return typeName + "(r.Int31())" + case descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_FIXED32: + return typeName + "(r.Uint32())" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + return typeName + `(bool(r.Intn(2) == 0))` + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_GROUP, + descriptor.FieldDescriptorProto_TYPE_MESSAGE, + descriptor.FieldDescriptorProto_TYPE_BYTES: + } + panic(fmt.Errorf("unexpected type %v", typeName)) +} + +func negative(fieldType descriptor.FieldDescriptorProto_Type) bool { + switch fieldType { + case descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_BOOL: + return false + } + return true +} + +func getFuncName(goTypName string) string { + funcName := "NewPopulated" + goTypName + goTypNames := strings.Split(goTypName, ".") + if len(goTypNames) == 2 { + funcName = goTypNames[0] + ".NewPopulated" + goTypNames[1] + } else if len(goTypNames) != 1 { + panic(fmt.Errorf("unreachable: too many dots in %v", goTypName)) + } + return funcName +} + +func getFuncCall(goTypName string) string { + funcName := getFuncName(goTypName) + funcCall := funcName + "(r, easy)" + return funcCall +} + +func getCustomFuncCall(goTypName string) string { + funcName := getFuncName(goTypName) + funcCall := funcName + "(r)" + return funcCall +} + +func (p *plugin) getEnumVal(field *descriptor.FieldDescriptorProto, goTyp string) string { + enum := p.ObjectNamed(field.GetTypeName()).(*generator.EnumDescriptor) + l := len(enum.Value) + values := make([]string, l) + for i := range enum.Value { + values[i] = strconv.Itoa(int(*enum.Value[i].Number)) + } + arr := "[]int32{" + strings.Join(values, ",") + "}" + val := strings.Join([]string{generator.GoTypeToName(goTyp), `(`, arr, `[r.Intn(`, fmt.Sprintf("%d", l), `)])`}, "") + return val +} + +func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto) { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + goTyp, _ := p.GoType(message, field) + fieldname := p.GetOneOfFieldName(message, field) + goTypName := generator.GoTypeToName(goTyp) + if p.IsMap(field) { + m := p.GoMapType(nil, field) + keygoTyp, _ := p.GoType(nil, m.KeyField) + keygoTyp = strings.Replace(keygoTyp, "*", "", 1) + keygoAliasTyp, _ := p.GoType(nil, m.KeyAliasField) + keygoAliasTyp = strings.Replace(keygoAliasTyp, "*", "", 1) + + valuegoTyp, _ := p.GoType(nil, m.ValueField) + valuegoAliasTyp, _ := p.GoType(nil, m.ValueAliasField) + keytypName := generator.GoTypeToName(keygoTyp) + keygoAliasTyp = generator.GoTypeToName(keygoAliasTyp) + valuetypAliasName := generator.GoTypeToName(valuegoAliasTyp) + + nullable, valuegoTyp, valuegoAliasTyp := generator.GoMapValueTypes(field, m.ValueField, valuegoTyp, valuegoAliasTyp) + + p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(`this.`, fieldname, ` = make(`, m.GoType, `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + keyval := "" + if m.KeyField.IsString() { + keyval = fmt.Sprintf("randString%v(r)", p.localName) + } else { + keyval = value(keytypName, m.KeyField.GetType()) + } + if keygoAliasTyp != keygoTyp { + keyval = keygoAliasTyp + `(` + keyval + `)` + } + if m.ValueField.IsMessage() || p.IsGroup(field) { + s := `this.` + fieldname + `[` + keyval + `] = ` + goTypName := generator.GoTypeToName(valuegoTyp) + funcCall := getFuncCall(goTypName) + if !nullable { + funcCall = `*` + funcCall + } + if valuegoTyp != valuegoAliasTyp { + funcCall = `(` + valuegoAliasTyp + `)(` + funcCall + `)` + } + s += funcCall + p.P(s) + } else if m.ValueField.IsEnum() { + s := `this.` + fieldname + `[` + keyval + `]` + ` = ` + p.getEnumVal(m.ValueField, valuegoTyp) + p.P(s) + } else if m.ValueField.IsBytes() { + count := p.varGen.Next() + p.P(count, ` := r.Intn(100)`) + p.P(p.varGen.Next(), ` := `, keyval) + p.P(`this.`, fieldname, `[`, p.varGen.Current(), `] = make(`, valuegoTyp, `, `, count, `)`) + p.P(`for i := 0; i < `, count, `; i++ {`) + p.In() + p.P(`this.`, fieldname, `[`, p.varGen.Current(), `][i] = byte(r.Intn(256))`) + p.Out() + p.P(`}`) + } else if m.ValueField.IsString() { + s := `this.` + fieldname + `[` + keyval + `]` + ` = ` + fmt.Sprintf("randString%v(r)", p.localName) + p.P(s) + } else { + p.P(p.varGen.Next(), ` := `, keyval) + p.P(`this.`, fieldname, `[`, p.varGen.Current(), `] = `, value(valuetypAliasName, m.ValueField.GetType())) + if negative(m.ValueField.GetType()) { + p.P(`if r.Intn(2) == 0 {`) + p.In() + p.P(`this.`, fieldname, `[`, p.varGen.Current(), `] *= -1`) + p.Out() + p.P(`}`) + } + } + p.Out() + p.P(`}`) + } else if field.IsMessage() || p.IsGroup(field) { + funcCall := getFuncCall(goTypName) + if field.IsRepeated() { + p.P(p.varGen.Next(), ` := r.Intn(5)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + if gogoproto.IsNullable(field) { + p.P(`this.`, fieldname, `[i] = `, funcCall) + } else { + p.P(p.varGen.Next(), `:= `, funcCall) + p.P(`this.`, fieldname, `[i] = *`, p.varGen.Current()) + } + p.Out() + p.P(`}`) + } else { + if gogoproto.IsNullable(field) { + p.P(`this.`, fieldname, ` = `, funcCall) + } else { + p.P(p.varGen.Next(), `:= `, funcCall) + p.P(`this.`, fieldname, ` = *`, p.varGen.Current()) + } + } + } else { + if field.IsEnum() { + val := p.getEnumVal(field, goTyp) + if field.IsRepeated() { + p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(`this.`, fieldname, `[i] = `, val) + p.Out() + p.P(`}`) + } else if !gogoproto.IsNullable(field) || proto3 { + p.P(`this.`, fieldname, ` = `, val) + } else { + p.P(p.varGen.Next(), ` := `, val) + p.P(`this.`, fieldname, ` = &`, p.varGen.Current()) + } + } else if gogoproto.IsCustomType(field) { + funcCall := getCustomFuncCall(goTypName) + if field.IsRepeated() { + p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(p.varGen.Next(), `:= `, funcCall) + p.P(`this.`, fieldname, `[i] = *`, p.varGen.Current()) + p.Out() + p.P(`}`) + } else if gogoproto.IsNullable(field) { + p.P(`this.`, fieldname, ` = `, funcCall) + } else { + p.P(p.varGen.Next(), `:= `, funcCall) + p.P(`this.`, fieldname, ` = *`, p.varGen.Current()) + } + } else if field.IsBytes() { + if field.IsRepeated() { + p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(p.varGen.Next(), ` := r.Intn(100)`) + p.P(`this.`, fieldname, `[i] = make([]byte,`, p.varGen.Current(), `)`) + p.P(`for j := 0; j < `, p.varGen.Current(), `; j++ {`) + p.In() + p.P(`this.`, fieldname, `[i][j] = byte(r.Intn(256))`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } else { + p.P(p.varGen.Next(), ` := r.Intn(100)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(`this.`, fieldname, `[i] = byte(r.Intn(256))`) + p.Out() + p.P(`}`) + } + } else if field.IsString() { + val := fmt.Sprintf("randString%v(r)", p.localName) + if field.IsRepeated() { + p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(`this.`, fieldname, `[i] = `, val) + p.Out() + p.P(`}`) + } else if !gogoproto.IsNullable(field) || proto3 { + p.P(`this.`, fieldname, ` = `, val) + } else { + p.P(p.varGen.Next(), `:= `, val) + p.P(`this.`, fieldname, ` = &`, p.varGen.Current()) + } + } else { + typName := generator.GoTypeToName(goTyp) + if field.IsRepeated() { + p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(`this.`, fieldname, `[i] = `, value(typName, field.GetType())) + if negative(field.GetType()) { + p.P(`if r.Intn(2) == 0 {`) + p.In() + p.P(`this.`, fieldname, `[i] *= -1`) + p.Out() + p.P(`}`) + } + p.Out() + p.P(`}`) + } else if !gogoproto.IsNullable(field) || proto3 { + p.P(`this.`, fieldname, ` = `, value(typName, field.GetType())) + if negative(field.GetType()) { + p.P(`if r.Intn(2) == 0 {`) + p.In() + p.P(`this.`, fieldname, ` *= -1`) + p.Out() + p.P(`}`) + } + } else { + p.P(p.varGen.Next(), ` := `, value(typName, field.GetType())) + if negative(field.GetType()) { + p.P(`if r.Intn(2) == 0 {`) + p.In() + p.P(p.varGen.Current(), ` *= -1`) + p.Out() + p.P(`}`) + } + p.P(`this.`, fieldname, ` = &`, p.varGen.Current()) + } + } + } +} + +func (p *plugin) hasLoop(field *descriptor.FieldDescriptorProto, visited []*generator.Descriptor, excludes []*generator.Descriptor) *generator.Descriptor { + if field.IsMessage() || p.IsGroup(field) || p.IsMap(field) { + var fieldMessage *generator.Descriptor + if p.IsMap(field) { + m := p.GoMapType(nil, field) + if !m.ValueField.IsMessage() { + return nil + } + fieldMessage = p.ObjectNamed(m.ValueField.GetTypeName()).(*generator.Descriptor) + } else { + fieldMessage = p.ObjectNamed(field.GetTypeName()).(*generator.Descriptor) + } + fieldTypeName := generator.CamelCaseSlice(fieldMessage.TypeName()) + for _, message := range visited { + messageTypeName := generator.CamelCaseSlice(message.TypeName()) + if fieldTypeName == messageTypeName { + for _, e := range excludes { + if fieldTypeName == generator.CamelCaseSlice(e.TypeName()) { + return nil + } + } + return fieldMessage + } + } + for _, f := range fieldMessage.Field { + visited = append(visited, fieldMessage) + loopTo := p.hasLoop(f, visited, excludes) + if loopTo != nil { + return loopTo + } + } + } + return nil +} + +func (p *plugin) loops(field *descriptor.FieldDescriptorProto, message *generator.Descriptor) int { + //fmt.Fprintf(os.Stderr, "loops %v %v\n", field.GetTypeName(), generator.CamelCaseSlice(message.TypeName())) + excludes := []*generator.Descriptor{} + loops := 0 + for { + visited := []*generator.Descriptor{} + loopTo := p.hasLoop(field, visited, excludes) + if loopTo == nil { + break + } + //fmt.Fprintf(os.Stderr, "loopTo %v\n", generator.CamelCaseSlice(loopTo.TypeName())) + excludes = append(excludes, loopTo) + loops++ + } + return loops +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + p.atleastOne = false + p.PluginImports = generator.NewPluginImports(p.Generator) + p.varGen = NewVarGen() + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + + p.localName = generator.FileName(file) + protoPkg := p.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = p.NewImport("github.com/golang/protobuf/proto") + } + + for _, message := range file.Messages() { + if !gogoproto.HasPopulate(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + p.atleastOne = true + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + loopLevels := make([]int, len(message.Field)) + maxLoopLevel := 0 + for i, field := range message.Field { + loopLevels[i] = p.loops(field, message) + if loopLevels[i] > maxLoopLevel { + maxLoopLevel = loopLevels[i] + } + } + ranTotal := 0 + for i := range loopLevels { + ranTotal += int(math.Pow10(maxLoopLevel - loopLevels[i])) + } + p.P(`func NewPopulated`, ccTypeName, `(r randy`, p.localName, `, easy bool) *`, ccTypeName, ` {`) + p.In() + p.P(`this := &`, ccTypeName, `{}`) + if gogoproto.IsUnion(message.File(), message.DescriptorProto) && len(message.Field) > 0 { + p.P(`fieldNum := r.Intn(`, fmt.Sprintf("%d", ranTotal), `)`) + p.P(`switch fieldNum {`) + k := 0 + for i, field := range message.Field { + is := []string{} + ran := int(math.Pow10(maxLoopLevel - loopLevels[i])) + for j := 0; j < ran; j++ { + is = append(is, fmt.Sprintf("%d", j+k)) + } + k += ran + p.P(`case `, strings.Join(is, ","), `:`) + p.In() + p.GenerateField(file, message, field) + p.Out() + } + p.P(`}`) + } else { + var maxFieldNumber int32 + oneofs := make(map[string]struct{}) + for fieldIndex, field := range message.Field { + if field.GetNumber() > maxFieldNumber { + maxFieldNumber = field.GetNumber() + } + oneof := field.OneofIndex != nil + if !oneof { + if field.IsRequired() || (!gogoproto.IsNullable(field) && !field.IsRepeated()) || (proto3 && !field.IsMessage()) { + p.GenerateField(file, message, field) + } else { + if loopLevels[fieldIndex] > 0 { + p.P(`if r.Intn(10) == 0 {`) + } else { + p.P(`if r.Intn(10) != 0 {`) + } + p.In() + p.GenerateField(file, message, field) + p.Out() + p.P(`}`) + } + } else { + fieldname := p.GetFieldName(message, field) + if _, ok := oneofs[fieldname]; ok { + continue + } else { + oneofs[fieldname] = struct{}{} + } + fieldNumbers := []int32{} + for _, f := range message.Field { + fname := p.GetFieldName(message, f) + if fname == fieldname { + fieldNumbers = append(fieldNumbers, f.GetNumber()) + } + } + + p.P(`oneofNumber_`, fieldname, ` := `, fmt.Sprintf("%#v", fieldNumbers), `[r.Intn(`, strconv.Itoa(len(fieldNumbers)), `)]`) + p.P(`switch oneofNumber_`, fieldname, ` {`) + for _, f := range message.Field { + fname := p.GetFieldName(message, f) + if fname != fieldname { + continue + } + p.P(`case `, strconv.Itoa(int(f.GetNumber())), `:`) + p.In() + ccTypeName := p.OneOfTypeName(message, f) + p.P(`this.`, fname, ` = NewPopulated`, ccTypeName, `(r, easy)`) + p.Out() + } + p.P(`}`) + } + } + if message.DescriptorProto.HasExtension() { + p.P(`if !easy && r.Intn(10) != 0 {`) + p.In() + p.P(`l := r.Intn(5)`) + p.P(`for i := 0; i < l; i++ {`) + p.In() + if len(message.DescriptorProto.GetExtensionRange()) > 1 { + p.P(`eIndex := r.Intn(`, strconv.Itoa(len(message.DescriptorProto.GetExtensionRange())), `)`) + p.P(`fieldNumber := 0`) + p.P(`switch eIndex {`) + for i, e := range message.DescriptorProto.GetExtensionRange() { + p.P(`case `, strconv.Itoa(i), `:`) + p.In() + p.P(`fieldNumber = r.Intn(`, strconv.Itoa(int(e.GetEnd()-e.GetStart())), `) + `, strconv.Itoa(int(e.GetStart()))) + p.Out() + if e.GetEnd() > maxFieldNumber { + maxFieldNumber = e.GetEnd() + } + } + p.P(`}`) + } else { + e := message.DescriptorProto.GetExtensionRange()[0] + p.P(`fieldNumber := r.Intn(`, strconv.Itoa(int(e.GetEnd()-e.GetStart())), `) + `, strconv.Itoa(int(e.GetStart()))) + if e.GetEnd() > maxFieldNumber { + maxFieldNumber = e.GetEnd() + } + } + p.P(`wire := r.Intn(4)`) + p.P(`if wire == 3 { wire = 5 }`) + p.P(`data := randField`, p.localName, `(nil, r, fieldNumber, wire)`) + p.P(protoPkg.Use(), `.SetRawExtension(this, int32(fieldNumber), data)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } + + if maxFieldNumber < (1 << 10) { + p.P(`if !easy && r.Intn(10) != 0 {`) + p.In() + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`this.XXX_unrecognized = randUnrecognized`, p.localName, `(r, `, strconv.Itoa(int(maxFieldNumber+1)), `)`) + } + p.Out() + p.P(`}`) + } + } + p.P(`return this`) + p.Out() + p.P(`}`) + p.P(``) + + //Generate NewPopulated functions for oneof fields + m := proto.Clone(message.DescriptorProto).(*descriptor.DescriptorProto) + for _, f := range m.Field { + oneof := f.OneofIndex != nil + if !oneof { + continue + } + ccTypeName := p.OneOfTypeName(message, f) + p.P(`func NewPopulated`, ccTypeName, `(r randy`, p.localName, `, easy bool) *`, ccTypeName, ` {`) + p.In() + p.P(`this := &`, ccTypeName, `{}`) + vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly(f) + p.GenerateField(file, message, f) + p.P(`return this`) + p.Out() + p.P(`}`) + } + } + + if !p.atleastOne { + return + } + + p.P(`type randy`, p.localName, ` interface {`) + p.In() + p.P(`Float32() float32`) + p.P(`Float64() float64`) + p.P(`Int63() int64`) + p.P(`Int31() int32`) + p.P(`Uint32() uint32`) + p.P(`Intn(n int) int`) + p.Out() + p.P(`}`) + + p.P(`func randUTF8Rune`, p.localName, `(r randy`, p.localName, `) rune {`) + p.In() + p.P(`ru := r.Intn(62)`) + p.P(`if ru < 10 {`) + p.In() + p.P(`return rune(ru+48)`) + p.Out() + p.P(`} else if ru < 36 {`) + p.In() + p.P(`return rune(ru+55)`) + p.Out() + p.P(`}`) + p.P(`return rune(ru+61)`) + p.Out() + p.P(`}`) + + p.P(`func randString`, p.localName, `(r randy`, p.localName, `) string {`) + p.In() + p.P(p.varGen.Next(), ` := r.Intn(100)`) + p.P(`tmps := make([]rune, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(`tmps[i] = randUTF8Rune`, p.localName, `(r)`) + p.Out() + p.P(`}`) + p.P(`return string(tmps)`) + p.Out() + p.P(`}`) + + p.P(`func randUnrecognized`, p.localName, `(r randy`, p.localName, `, maxFieldNumber int) (data []byte) {`) + p.In() + p.P(`l := r.Intn(5)`) + p.P(`for i := 0; i < l; i++ {`) + p.In() + p.P(`wire := r.Intn(4)`) + p.P(`if wire == 3 { wire = 5 }`) + p.P(`fieldNumber := maxFieldNumber + r.Intn(100)`) + p.P(`data = randField`, p.localName, `(data, r, fieldNumber, wire)`) + p.Out() + p.P(`}`) + p.P(`return data`) + p.Out() + p.P(`}`) + + p.P(`func randField`, p.localName, `(data []byte, r randy`, p.localName, `, fieldNumber int, wire int) []byte {`) + p.In() + p.P(`key := uint32(fieldNumber)<<3 | uint32(wire)`) + p.P(`switch wire {`) + p.P(`case 0:`) + p.In() + p.P(`data = encodeVarintPopulate`, p.localName, `(data, uint64(key))`) + p.P(p.varGen.Next(), ` := r.Int63()`) + p.P(`if r.Intn(2) == 0 {`) + p.In() + p.P(p.varGen.Current(), ` *= -1`) + p.Out() + p.P(`}`) + p.P(`data = encodeVarintPopulate`, p.localName, `(data, uint64(`, p.varGen.Current(), `))`) + p.Out() + p.P(`case 1:`) + p.In() + p.P(`data = encodeVarintPopulate`, p.localName, `(data, uint64(key))`) + p.P(`data = append(data, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))`) + p.Out() + p.P(`case 2:`) + p.In() + p.P(`data = encodeVarintPopulate`, p.localName, `(data, uint64(key))`) + p.P(`ll := r.Intn(100)`) + p.P(`data = encodeVarintPopulate`, p.localName, `(data, uint64(ll))`) + p.P(`for j := 0; j < ll; j++ {`) + p.In() + p.P(`data = append(data, byte(r.Intn(256)))`) + p.Out() + p.P(`}`) + p.Out() + p.P(`default:`) + p.In() + p.P(`data = encodeVarintPopulate`, p.localName, `(data, uint64(key))`) + p.P(`data = append(data, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))`) + p.Out() + p.P(`}`) + p.P(`return data`) + p.Out() + p.P(`}`) + + p.P(`func encodeVarintPopulate`, p.localName, `(data []byte, v uint64) []byte {`) + p.In() + p.P(`for v >= 1<<7 {`) + p.In() + p.P(`data = append(data, uint8(uint64(v)&0x7f|0x80))`) + p.P(`v >>= 7`) + p.Out() + p.P(`}`) + p.P(`data = append(data, uint8(v))`) + p.P(`return data`) + p.Out() + p.P(`}`) + +} + +func init() { + generator.RegisterPlugin(NewPlugin()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/size.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/size.go new file mode 100644 index 000000000000..49b1f962cd38 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/size.go @@ -0,0 +1,597 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The size plugin generates a Size or ProtoSize method for each message. +This is useful with the MarshalTo method generated by the marshalto plugin and the +gogoproto.marshaler and gogoproto.marshaler_all extensions. + +It is enabled by the following extensions: + + - sizer + - sizer_all + - protosizer + - protosizer_all + +The size plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +And a benchmark given it is enabled using one of the following extensions: + + - benchgen + - benchgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.sizer_all) = true; + + message B { + option (gogoproto.description) = true; + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +given to the size plugin, will generate the following code: + + func (m *B) Size() (n int) { + var l int + _ = l + l = m.A.Size() + n += 1 + l + sovExample(uint64(l)) + if len(m.G) > 0 { + for _, e := range m.G { + l = e.Size() + n += 1 + l + sovExample(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n + } + +and the following test code: + + func TestBSize(t *testing5.T) { + popr := math_rand5.New(math_rand5.NewSource(time5.Now().UnixNano())) + p := NewPopulatedB(popr, true) + data, err := github_com_gogo_protobuf_proto2.Marshal(p) + if err != nil { + panic(err) + } + size := p.Size() + if len(data) != size { + t.Fatalf("size %v != marshalled size %v", size, len(data)) + } + } + + func BenchmarkBSize(b *testing5.B) { + popr := math_rand5.New(math_rand5.NewSource(616)) + total := 0 + pops := make([]*B, 1000) + for i := 0; i < 1000; i++ { + pops[i] = NewPopulatedB(popr, false) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + total += pops[i%1000].Size() + } + b.SetBytes(int64(total / b.N)) + } + +The sovExample function is a size of varint function for the example.pb.go file. + +*/ +package size + +import ( + "fmt" + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "github.com/gogo/protobuf/vanity" + "strconv" + "strings" +) + +type size struct { + *generator.Generator + generator.PluginImports + atleastOne bool + localName string +} + +func NewSize() *size { + return &size{} +} + +func (p *size) Name() string { + return "size" +} + +func (p *size) Init(g *generator.Generator) { + p.Generator = g +} + +func wireToType(wire string) int { + switch wire { + case "fixed64": + return proto.WireFixed64 + case "fixed32": + return proto.WireFixed32 + case "varint": + return proto.WireVarint + case "bytes": + return proto.WireBytes + case "group": + return proto.WireBytes + case "zigzag32": + return proto.WireVarint + case "zigzag64": + return proto.WireVarint + } + panic("unreachable") +} + +func keySize(fieldNumber int32, wireType int) int { + x := uint32(fieldNumber)<<3 | uint32(wireType) + size := 0 + for size = 0; x > 127; size++ { + x >>= 7 + } + size++ + return size +} + +func (p *size) sizeVarint() { + p.P(` + func sov`, p.localName, `(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n + }`) +} + +func (p *size) sizeZigZag() { + p.P(`func soz`, p.localName, `(x uint64) (n int) { + return sov`, p.localName, `(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + }`) +} + +func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto, sizeName string) { + fieldname := p.GetOneOfFieldName(message, field) + nullable := gogoproto.IsNullable(field) + repeated := field.IsRepeated() + if repeated { + p.P(`if len(m.`, fieldname, `) > 0 {`) + p.In() + } else if ((!proto3 || field.IsMessage()) && nullable) || (!gogoproto.IsCustomType(field) && *field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES) { + p.P(`if m.`, fieldname, ` != nil {`) + p.In() + } + packed := field.IsPacked() + _, wire := p.GoType(message, field) + wireType := wireToType(wire) + fieldNumber := field.GetNumber() + if packed { + wireType = proto.WireBytes + } + key := keySize(fieldNumber, wireType) + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + if packed { + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(len(m.`, fieldname, `)*8))`, `+len(m.`, fieldname, `)*8`) + } else if repeated { + p.P(`n+=`, strconv.Itoa(key+8), `*len(m.`, fieldname, `)`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.P(`n+=`, strconv.Itoa(key+8)) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`n+=`, strconv.Itoa(key+8)) + } else { + p.P(`n+=`, strconv.Itoa(key+8)) + } + case descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + if packed { + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(len(m.`, fieldname, `)*4))`, `+len(m.`, fieldname, `)*4`) + } else if repeated { + p.P(`n+=`, strconv.Itoa(key+4), `*len(m.`, fieldname, `)`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.P(`n+=`, strconv.Itoa(key+4)) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`n+=`, strconv.Itoa(key+4)) + } else { + p.P(`n+=`, strconv.Itoa(key+4)) + } + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_INT32: + if packed { + p.P(`l = 0`) + p.P(`for _, e := range m.`, fieldname, ` {`) + p.In() + p.P(`l+=sov`, p.localName, `(uint64(e))`) + p.Out() + p.P(`}`) + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(l))+l`) + } else if repeated { + p.P(`for _, e := range m.`, fieldname, ` {`) + p.In() + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(e))`) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(m.`, fieldname, `))`) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(*m.`, fieldname, `))`) + } else { + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(m.`, fieldname, `))`) + } + case descriptor.FieldDescriptorProto_TYPE_BOOL: + if packed { + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(len(m.`, fieldname, `)))`, `+len(m.`, fieldname, `)*1`) + } else if repeated { + p.P(`n+=`, strconv.Itoa(key+1), `*len(m.`, fieldname, `)`) + } else if proto3 { + p.P(`if m.`, fieldname, ` {`) + p.In() + p.P(`n+=`, strconv.Itoa(key+1)) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`n+=`, strconv.Itoa(key+1)) + } else { + p.P(`n+=`, strconv.Itoa(key+1)) + } + case descriptor.FieldDescriptorProto_TYPE_STRING: + if repeated { + p.P(`for _, s := range m.`, fieldname, ` { `) + p.In() + p.P(`l = len(s)`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`l=len(m.`, fieldname, `)`) + p.P(`if l > 0 {`) + p.In() + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`l=len(*m.`, fieldname, `)`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + } else { + p.P(`l=len(m.`, fieldname, `)`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + } + case descriptor.FieldDescriptorProto_TYPE_GROUP: + panic(fmt.Errorf("size does not support group %v", fieldname)) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if generator.IsMap(file.FileDescriptorProto, field) { + m := p.GoMapType(nil, field) + _, keywire := p.GoType(nil, m.KeyAliasField) + valuegoTyp, _ := p.GoType(nil, m.ValueField) + valuegoAliasTyp, valuewire := p.GoType(nil, m.ValueAliasField) + _, fieldwire := p.GoType(nil, field) + + nullable, valuegoTyp, valuegoAliasTyp = generator.GoMapValueTypes(field, m.ValueField, valuegoTyp, valuegoAliasTyp) + + fieldKeySize := keySize(field.GetNumber(), wireToType(fieldwire)) + keyKeySize := keySize(1, wireToType(keywire)) + valueKeySize := keySize(2, wireToType(valuewire)) + p.P(`for k, v := range m.`, fieldname, ` { `) + p.In() + p.P(`_ = k`) + p.P(`_ = v`) + sum := []string{strconv.Itoa(keyKeySize)} + switch m.KeyField.GetType() { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + sum = append(sum, `8`) + case descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + sum = append(sum, `4`) + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_INT32: + sum = append(sum, `sov`+p.localName+`(uint64(k))`) + case descriptor.FieldDescriptorProto_TYPE_BOOL: + sum = append(sum, `1`) + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + sum = append(sum, `len(k)+sov`+p.localName+`(uint64(len(k)))`) + case descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + sum = append(sum, `soz`+p.localName+`(uint64(k))`) + } + sum = append(sum, strconv.Itoa(valueKeySize)) + switch m.ValueField.GetType() { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + sum = append(sum, strconv.Itoa(8)) + case descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + sum = append(sum, strconv.Itoa(4)) + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_INT32: + sum = append(sum, `sov`+p.localName+`(uint64(v))`) + case descriptor.FieldDescriptorProto_TYPE_BOOL: + sum = append(sum, `1`) + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + sum = append(sum, `len(v)+sov`+p.localName+`(uint64(len(v)))`) + case descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + sum = append(sum, `soz`+p.localName+`(uint64(v))`) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if nullable { + p.P(`l = 0`) + p.P(`if v != nil {`) + p.In() + if valuegoTyp != valuegoAliasTyp { + p.P(`l = ((`, valuegoTyp, `)(v)).`, sizeName, `()`) + } else { + p.P(`l = v.`, sizeName, `()`) + } + p.Out() + p.P(`}`) + } else { + if valuegoTyp != valuegoAliasTyp { + p.P(`l = ((*`, valuegoTyp, `)(&v)).`, sizeName, `()`) + } else { + p.P(`l = v.`, sizeName, `()`) + } + } + sum = append(sum, `l+sov`+p.localName+`(uint64(l))`) + } + p.P(`mapEntrySize := `, strings.Join(sum, "+")) + p.P(`n+=mapEntrySize+`, fieldKeySize, `+sov`, p.localName, `(uint64(mapEntrySize))`) + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, e := range m.`, fieldname, ` { `) + p.In() + p.P(`l=e.`, sizeName, `()`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + p.Out() + p.P(`}`) + } else { + p.P(`l=m.`, fieldname, `.`, sizeName, `()`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + } + case descriptor.FieldDescriptorProto_TYPE_BYTES: + if !gogoproto.IsCustomType(field) { + if repeated { + p.P(`for _, b := range m.`, fieldname, ` { `) + p.In() + p.P(`l = len(b)`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`l=len(m.`, fieldname, `)`) + p.P(`if l > 0 {`) + p.In() + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + p.Out() + p.P(`}`) + } else { + p.P(`l=len(m.`, fieldname, `)`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + } + } else { + if repeated { + p.P(`for _, e := range m.`, fieldname, ` { `) + p.In() + p.P(`l=e.`, sizeName, `()`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + p.Out() + p.P(`}`) + } else { + p.P(`l=m.`, fieldname, `.`, sizeName, `()`) + p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) + } + } + case descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + if packed { + p.P(`l = 0`) + p.P(`for _, e := range m.`, fieldname, ` {`) + p.In() + p.P(`l+=soz`, p.localName, `(uint64(e))`) + p.Out() + p.P(`}`) + p.P(`n+=`, strconv.Itoa(key), `+sov`, p.localName, `(uint64(l))+l`) + } else if repeated { + p.P(`for _, e := range m.`, fieldname, ` {`) + p.In() + p.P(`n+=`, strconv.Itoa(key), `+soz`, p.localName, `(uint64(e))`) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.P(`n+=`, strconv.Itoa(key), `+soz`, p.localName, `(uint64(m.`, fieldname, `))`) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`n+=`, strconv.Itoa(key), `+soz`, p.localName, `(uint64(*m.`, fieldname, `))`) + } else { + p.P(`n+=`, strconv.Itoa(key), `+soz`, p.localName, `(uint64(m.`, fieldname, `))`) + } + default: + panic("not implemented") + } + if ((!proto3 || field.IsMessage()) && nullable) || repeated || (!gogoproto.IsCustomType(field) && *field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES) { + p.Out() + p.P(`}`) + } +} + +func (p *size) Generate(file *generator.FileDescriptor) { + p.PluginImports = generator.NewPluginImports(p.Generator) + p.atleastOne = false + p.localName = generator.FileName(file) + protoPkg := p.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = p.NewImport("github.com/golang/protobuf/proto") + } + for _, message := range file.Messages() { + sizeName := "" + if gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName = "Size" + } else if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName = "ProtoSize" + } else { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + p.atleastOne = true + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P(`func (m *`, ccTypeName, `) `, sizeName, `() (n int) {`) + p.In() + p.P(`var l int`) + p.P(`_ = l`) + oneofs := make(map[string]struct{}) + for _, field := range message.Field { + oneof := field.OneofIndex != nil + if !oneof { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + p.generateField(proto3, file, message, field, sizeName) + } else { + fieldname := p.GetFieldName(message, field) + if _, ok := oneofs[fieldname]; ok { + continue + } else { + oneofs[fieldname] = struct{}{} + } + p.P(`if m.`, fieldname, ` != nil {`) + p.In() + p.P(`n+=m.`, fieldname, `.`, sizeName, `()`) + p.Out() + p.P(`}`) + } + } + if message.DescriptorProto.HasExtension() { + p.P(`if m.XXX_extensions != nil {`) + p.In() + if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`n += `, protoPkg.Use(), `.SizeOfExtensionMap(m.XXX_extensions)`) + } else { + p.P(`n+=len(m.XXX_extensions)`) + } + p.Out() + p.P(`}`) + } + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if m.XXX_unrecognized != nil {`) + p.In() + p.P(`n+=len(m.XXX_unrecognized)`) + p.Out() + p.P(`}`) + } + p.P(`return n`) + p.Out() + p.P(`}`) + p.P() + + //Generate Size methods for oneof fields + m := proto.Clone(message.DescriptorProto).(*descriptor.DescriptorProto) + for _, f := range m.Field { + oneof := f.OneofIndex != nil + if !oneof { + continue + } + ccTypeName := p.OneOfTypeName(message, f) + p.P(`func (m *`, ccTypeName, `) `, sizeName, `() (n int) {`) + p.In() + p.P(`var l int`) + p.P(`_ = l`) + vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly(f) + p.generateField(false, file, message, f, sizeName) + p.P(`return n`) + p.Out() + p.P(`}`) + } + } + + if !p.atleastOne { + return + } + + p.sizeVarint() + p.sizeZigZag() + +} + +func init() { + generator.RegisterPlugin(NewSize()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/sizetest.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/sizetest.go new file mode 100644 index 000000000000..4fa946e57e89 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/size/sizetest.go @@ -0,0 +1,132 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package size + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + testingPkg := imports.NewImport("testing") + protoPkg := imports.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = imports.NewImport("github.com/golang/protobuf/proto") + } + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + sizeName := "" + if gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName = "Size" + } else if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + sizeName = "ProtoSize" + } else { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + p.P(`func Test`, ccTypeName, sizeName, `(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) + p.P(`size2 := `, protoPkg.Use(), `.Size(p)`) + p.P(`data, err := `, protoPkg.Use(), `.Marshal(p)`) + p.P(`if err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + p.P(`size := p.`, sizeName, `()`) + p.P(`if len(data) != size {`) + p.In() + p.P(`t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(data))`) + p.Out() + p.P(`}`) + p.P(`if size2 != size {`) + p.In() + p.P(`t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)`) + p.Out() + p.P(`}`) + p.P(`size3 := `, protoPkg.Use(), `.Size(p)`) + p.P(`if size3 != size {`) + p.In() + p.P(`t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P() + } + + if gogoproto.HasBenchGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + p.P(`func Benchmark`, ccTypeName, sizeName, `(b *`, testingPkg.Use(), `.B) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(616))`) + p.P(`total := 0`) + p.P(`pops := make([]*`, ccTypeName, `, 1000)`) + p.P(`for i := 0; i < 1000; i++ {`) + p.In() + p.P(`pops[i] = NewPopulated`, ccTypeName, `(popr, false)`) + p.Out() + p.P(`}`) + p.P(`b.ResetTimer()`) + p.P(`for i := 0; i < b.N; i++ {`) + p.In() + p.P(`total += pops[i%1000].`, sizeName, `()`) + p.Out() + p.P(`}`) + p.P(`b.SetBytes(int64(total / b.N))`) + p.Out() + p.P(`}`) + p.P() + } + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/stringer/stringer.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/stringer/stringer.go new file mode 100644 index 000000000000..b6360485cafd --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/stringer/stringer.go @@ -0,0 +1,293 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The stringer plugin generates a String method for each message. + +It is enabled by the following extensions: + + - stringer + - stringer_all + +The stringer plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.goproto_stringer_all) = false; + option (gogoproto.stringer_all) = true; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +given to the stringer stringer, will generate the following code: + + func (this *A) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&A{`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s + } + +and the following test code: + + func TestAStringer(t *testing4.T) { + popr := math_rand4.New(math_rand4.NewSource(time4.Now().UnixNano())) + p := NewPopulatedA(popr, false) + s1 := p.String() + s2 := fmt1.Sprintf("%v", p) + if s1 != s2 { + t.Fatalf("String want %v got %v", s1, s2) + } + } + +Typically fmt.Printf("%v") will stop to print when it reaches a pointer and +not print their values, while the generated String method will always print all values, recursively. + +*/ +package stringer + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "strings" +) + +type stringer struct { + *generator.Generator + generator.PluginImports + atleastOne bool + localName string +} + +func NewStringer() *stringer { + return &stringer{} +} + +func (p *stringer) Name() string { + return "stringer" +} + +func (p *stringer) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *stringer) Generate(file *generator.FileDescriptor) { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + p.PluginImports = generator.NewPluginImports(p.Generator) + p.atleastOne = false + + p.localName = generator.FileName(file) + + fmtPkg := p.NewImport("fmt") + stringsPkg := p.NewImport("strings") + reflectPkg := p.NewImport("reflect") + sortKeysPkg := p.NewImport("github.com/gogo/protobuf/sortkeys") + for _, message := range file.Messages() { + if !gogoproto.IsStringer(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if gogoproto.EnabledGoStringer(file.FileDescriptorProto, message.DescriptorProto) { + panic("old string method needs to be disabled, please use gogoproto.goproto_stringer or gogoproto.goproto_stringer_all and set it to false") + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + p.atleastOne = true + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P(`func (this *`, ccTypeName, `) String() string {`) + p.In() + p.P(`if this == nil {`) + p.In() + p.P(`return "nil"`) + p.Out() + p.P(`}`) + for _, field := range message.Field { + if !generator.IsMap(file.FileDescriptorProto, field) { + continue + } + fieldname := p.GetFieldName(message, field) + + m := p.GoMapType(nil, field) + mapgoTyp, keyField, keyAliasField := m.GoType, m.KeyField, m.KeyAliasField + keysName := `keysFor` + fieldname + keygoTyp, _ := p.GoType(nil, keyField) + keygoTyp = strings.Replace(keygoTyp, "*", "", 1) + keygoAliasTyp, _ := p.GoType(nil, keyAliasField) + keygoAliasTyp = strings.Replace(keygoAliasTyp, "*", "", 1) + keyCapTyp := generator.CamelCase(keygoTyp) + p.P(keysName, ` := make([]`, keygoTyp, `, 0, len(this.`, fieldname, `))`) + p.P(`for k, _ := range this.`, fieldname, ` {`) + p.In() + if keygoAliasTyp == keygoTyp { + p.P(keysName, ` = append(`, keysName, `, k)`) + } else { + p.P(keysName, ` = append(`, keysName, `, `, keygoTyp, `(k))`) + } + p.Out() + p.P(`}`) + p.P(sortKeysPkg.Use(), `.`, keyCapTyp, `s(`, keysName, `)`) + mapName := `mapStringFor` + fieldname + p.P(mapName, ` := "`, mapgoTyp, `{"`) + p.P(`for _, k := range `, keysName, ` {`) + p.In() + if keygoAliasTyp == keygoTyp { + p.P(mapName, ` += fmt.Sprintf("%v: %v,", k, this.`, fieldname, `[k])`) + } else { + p.P(mapName, ` += fmt.Sprintf("%v: %v,", k, this.`, fieldname, `[`, keygoAliasTyp, `(k)])`) + } + p.Out() + p.P(`}`) + p.P(mapName, ` += "}"`) + } + p.P("s := ", stringsPkg.Use(), ".Join([]string{`&", ccTypeName, "{`,") + oneofs := make(map[string]struct{}) + for _, field := range message.Field { + nullable := gogoproto.IsNullable(field) + repeated := field.IsRepeated() + fieldname := p.GetFieldName(message, field) + oneof := field.OneofIndex != nil + if oneof { + if _, ok := oneofs[fieldname]; ok { + continue + } else { + oneofs[fieldname] = struct{}{} + } + p.P("`", fieldname, ":`", ` + `, fmtPkg.Use(), `.Sprintf("%v", this.`, fieldname, ") + `,", "`,") + } else if generator.IsMap(file.FileDescriptorProto, field) { + mapName := `mapStringFor` + fieldname + p.P("`", fieldname, ":`", ` + `, mapName, " + `,", "`,") + } else if field.IsMessage() || p.IsGroup(field) { + desc := p.ObjectNamed(field.GetTypeName()) + msgname := p.TypeName(desc) + msgnames := strings.Split(msgname, ".") + typeName := msgnames[len(msgnames)-1] + if nullable { + p.P("`", fieldname, ":`", ` + `, stringsPkg.Use(), `.Replace(`, fmtPkg.Use(), `.Sprintf("%v", this.`, fieldname, `), "`, typeName, `","`, msgname, `"`, ", 1) + `,", "`,") + } else if repeated { + p.P("`", fieldname, ":`", ` + `, stringsPkg.Use(), `.Replace(`, stringsPkg.Use(), `.Replace(`, fmtPkg.Use(), `.Sprintf("%v", this.`, fieldname, `), "`, typeName, `","`, msgname, `"`, ", 1),`&`,``,1) + `,", "`,") + } else { + p.P("`", fieldname, ":`", ` + `, stringsPkg.Use(), `.Replace(`, stringsPkg.Use(), `.Replace(this.`, fieldname, `.String(), "`, typeName, `","`, msgname, `"`, ", 1),`&`,``,1) + `,", "`,") + } + } else { + if nullable && !repeated && !proto3 { + p.P("`", fieldname, ":`", ` + valueToString`, p.localName, `(this.`, fieldname, ") + `,", "`,") + } else { + p.P("`", fieldname, ":`", ` + `, fmtPkg.Use(), `.Sprintf("%v", this.`, fieldname, ") + `,", "`,") + } + } + } + if message.DescriptorProto.HasExtension() { + if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { + p.P("`XXX_extensions:` + proto.StringFromExtensionsMap(this.XXX_extensions) + `,`,") + } else { + p.P("`XXX_extensions:` + proto.StringFromExtensionsBytes(this.XXX_extensions) + `,`,") + } + } + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + p.P("`XXX_unrecognized:` + ", fmtPkg.Use(), `.Sprintf("%v", this.XXX_unrecognized) + `, "`,`,") + } + p.P("`}`,") + p.P(`}`, `,""`, ")") + p.P(`return s`) + p.Out() + p.P(`}`) + + //Generate String methods for oneof fields + for _, field := range message.Field { + oneof := field.OneofIndex != nil + if !oneof { + continue + } + ccTypeName := p.OneOfTypeName(message, field) + p.P(`func (this *`, ccTypeName, `) String() string {`) + p.In() + p.P(`if this == nil {`) + p.In() + p.P(`return "nil"`) + p.Out() + p.P(`}`) + p.P("s := ", stringsPkg.Use(), ".Join([]string{`&", ccTypeName, "{`,") + fieldname := p.GetOneOfFieldName(message, field) + if field.IsMessage() || p.IsGroup(field) { + desc := p.ObjectNamed(field.GetTypeName()) + msgname := p.TypeName(desc) + msgnames := strings.Split(msgname, ".") + typeName := msgnames[len(msgnames)-1] + p.P("`", fieldname, ":`", ` + `, stringsPkg.Use(), `.Replace(`, fmtPkg.Use(), `.Sprintf("%v", this.`, fieldname, `), "`, typeName, `","`, msgname, `"`, ", 1) + `,", "`,") + } else { + p.P("`", fieldname, ":`", ` + `, fmtPkg.Use(), `.Sprintf("%v", this.`, fieldname, ") + `,", "`,") + } + p.P("`}`,") + p.P(`}`, `,""`, ")") + p.P(`return s`) + p.Out() + p.P(`}`) + } + } + + if !p.atleastOne { + return + } + + p.P(`func valueToString`, p.localName, `(v interface{}) string {`) + p.In() + p.P(`rv := `, reflectPkg.Use(), `.ValueOf(v)`) + p.P(`if rv.IsNil() {`) + p.In() + p.P(`return "nil"`) + p.Out() + p.P(`}`) + p.P(`pv := `, reflectPkg.Use(), `.Indirect(rv).Interface()`) + p.P(`return `, fmtPkg.Use(), `.Sprintf("*%v", pv)`) + p.Out() + p.P(`}`) + +} + +func init() { + generator.RegisterPlugin(NewStringer()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/stringer/stringertest.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/stringer/stringertest.go new file mode 100644 index 000000000000..df615ba7845b --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/stringer/stringertest.go @@ -0,0 +1,81 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package stringer + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + testingPkg := imports.NewImport("testing") + fmtPkg := imports.NewImport("fmt") + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if !gogoproto.IsStringer(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + p.P(`func Test`, ccTypeName, `Stringer(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) + p.P(`s1 := p.String()`) + p.P(`s2 := `, fmtPkg.Use(), `.Sprintf("%v", p)`) + p.P(`if s1 != s2 {`) + p.In() + p.P(`t.Fatalf("String want %v got %v", s1, s2)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/testgen/testgen.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/testgen/testgen.go new file mode 100644 index 000000000000..a48a1c2ccbc8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/testgen/testgen.go @@ -0,0 +1,606 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The testgen plugin generates Test and Benchmark functions for each message. + +Tests are enabled using the following extensions: + + - testgen + - testgen_all + +Benchmarks are enabled using the following extensions: + + - benchgen + - benchgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.testgen_all) = true; + option (gogoproto.benchgen_all) = true; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +given to the testgen plugin, will generate the following test code: + + func TestAProto(t *testing.T) { + popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedA(popr, false) + data, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + panic(err) + } + msg := &A{} + if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil { + panic(err) + } + for i := range data { + data[i] = byte(popr.Intn(256)) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("%#v !VerboseProto %#v, since %v", msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("%#v !Proto %#v", msg, p) + } + } + + func BenchmarkAProtoMarshal(b *testing.B) { + popr := math_rand.New(math_rand.NewSource(616)) + total := 0 + pops := make([]*A, 10000) + for i := 0; i < 10000; i++ { + pops[i] = NewPopulatedA(popr, false) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + data, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000]) + if err != nil { + panic(err) + } + total += len(data) + } + b.SetBytes(int64(total / b.N)) + } + + func BenchmarkAProtoUnmarshal(b *testing.B) { + popr := math_rand.New(math_rand.NewSource(616)) + total := 0 + datas := make([][]byte, 10000) + for i := 0; i < 10000; i++ { + data, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedA(popr, false)) + if err != nil { + panic(err) + } + datas[i] = data + } + msg := &A{} + b.ResetTimer() + for i := 0; i < b.N; i++ { + total += len(datas[i%10000]) + if err := github_com_gogo_protobuf_proto.Unmarshal(datas[i%10000], msg); err != nil { + panic(err) + } + } + b.SetBytes(int64(total / b.N)) + } + + + func TestAJSON(t *testing1.T) { + popr := math_rand1.New(math_rand1.NewSource(time1.Now().UnixNano())) + p := NewPopulatedA(popr, true) + jsondata, err := encoding_json.Marshal(p) + if err != nil { + panic(err) + } + msg := &A{} + err = encoding_json.Unmarshal(jsondata, msg) + if err != nil { + panic(err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("%#v !VerboseProto %#v, since %v", msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("%#v !Json Equal %#v", msg, p) + } + } + + func TestAProtoText(t *testing2.T) { + popr := math_rand2.New(math_rand2.NewSource(time2.Now().UnixNano())) + p := NewPopulatedA(popr, true) + data := github_com_gogo_protobuf_proto1.MarshalTextString(p) + msg := &A{} + if err := github_com_gogo_protobuf_proto1.UnmarshalText(data, msg); err != nil { + panic(err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("%#v !VerboseProto %#v, since %v", msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("%#v !Proto %#v", msg, p) + } + } + + func TestAProtoCompactText(t *testing2.T) { + popr := math_rand2.New(math_rand2.NewSource(time2.Now().UnixNano())) + p := NewPopulatedA(popr, true) + data := github_com_gogo_protobuf_proto1.CompactTextString(p) + msg := &A{} + if err := github_com_gogo_protobuf_proto1.UnmarshalText(data, msg); err != nil { + panic(err) + } + if err := p.VerboseEqual(msg); err != nil { + t.Fatalf("%#v !VerboseProto %#v, since %v", msg, p, err) + } + if !p.Equal(msg) { + t.Fatalf("%#v !Proto %#v", msg, p) + } + } + +Other registered tests are also generated. +Tests are registered to this test plugin by calling the following function. + + func RegisterTestPlugin(newFunc NewTestPlugin) + +where NewTestPlugin is: + + type NewTestPlugin func(g *generator.Generator) TestPlugin + +and TestPlugin is an interface: + + type TestPlugin interface { + Generate(imports generator.PluginImports, file *generator.FileDescriptor) (used bool) + } + +Plugins that use this interface include: + + - populate + - gostring + - equal + - union + - and more + +Please look at these plugins as examples of how to create your own. +A good idea is to let each plugin generate its own tests. + +*/ +package testgen + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type TestPlugin interface { + Generate(imports generator.PluginImports, file *generator.FileDescriptor) (used bool) +} + +type NewTestPlugin func(g *generator.Generator) TestPlugin + +var testplugins = make([]NewTestPlugin, 0) + +func RegisterTestPlugin(newFunc NewTestPlugin) { + testplugins = append(testplugins, newFunc) +} + +type plugin struct { + *generator.Generator + generator.PluginImports + tests []TestPlugin +} + +func NewPlugin() *plugin { + return &plugin{} +} + +func (p *plugin) Name() string { + return "testgen" +} + +func (p *plugin) Init(g *generator.Generator) { + p.Generator = g + p.tests = make([]TestPlugin, 0, len(testplugins)) + for i := range testplugins { + p.tests = append(p.tests, testplugins[i](g)) + } +} + +func (p *plugin) Generate(file *generator.FileDescriptor) { + p.PluginImports = generator.NewPluginImports(p.Generator) + atLeastOne := false + for i := range p.tests { + used := p.tests[i].Generate(p.PluginImports, file) + if used { + atLeastOne = true + } + } + if atLeastOne { + p.P(`//These tests are generated by github.com/gogo/protobuf/plugin/testgen`) + } +} + +type testProto struct { + *generator.Generator +} + +func newProto(g *generator.Generator) TestPlugin { + return &testProto{g} +} + +func (p *testProto) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + testingPkg := imports.NewImport("testing") + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + protoPkg := imports.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = imports.NewImport("github.com/golang/protobuf/proto") + } + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + + p.P(`func Test`, ccTypeName, `Proto(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) + p.P(`data, err := `, protoPkg.Use(), `.Marshal(p)`) + p.P(`if err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(data, msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + p.P(`littlefuzz := make([]byte, len(data))`) + p.P(`copy(littlefuzz, data)`) + p.P(`for i := range data {`) + p.In() + p.P(`data[i] = byte(popr.Intn(256))`) + p.Out() + p.P(`}`) + if gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if err := p.VerboseEqual(msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)`) + p.Out() + p.P(`}`) + } + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)`) + p.Out() + p.P(`}`) + p.P(`if len(littlefuzz) > 0 {`) + p.In() + p.P(`fuzzamount := 100`) + p.P(`for i := 0; i < fuzzamount; i++ {`) + p.In() + p.P(`littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))`) + p.P(`littlefuzz = append(littlefuzz, byte(popr.Intn(256)))`) + p.Out() + p.P(`}`) + p.P(`// shouldn't panic`) + p.P(`_ = `, protoPkg.Use(), `.Unmarshal(littlefuzz, msg)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P() + } + + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + if gogoproto.IsMarshaler(file.FileDescriptorProto, message.DescriptorProto) || gogoproto.IsUnsafeMarshaler(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`func Test`, ccTypeName, `MarshalTo(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) + if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`size := p.ProtoSize()`) + } else { + p.P(`size := p.Size()`) + } + p.P(`data := make([]byte, size)`) + p.P(`for i := range data {`) + p.In() + p.P(`data[i] = byte(popr.Intn(256))`) + p.Out() + p.P(`}`) + p.P(`_, err := p.MarshalTo(data)`) + p.P(`if err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(data, msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + p.P(`for i := range data {`) + p.In() + p.P(`data[i] = byte(popr.Intn(256))`) + p.Out() + p.P(`}`) + if gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if err := p.VerboseEqual(msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)`) + p.Out() + p.P(`}`) + } + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P() + } + } + + if gogoproto.HasBenchGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + p.P(`func Benchmark`, ccTypeName, `ProtoMarshal(b *`, testingPkg.Use(), `.B) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(616))`) + p.P(`total := 0`) + p.P(`pops := make([]*`, ccTypeName, `, 10000)`) + p.P(`for i := 0; i < 10000; i++ {`) + p.In() + p.P(`pops[i] = NewPopulated`, ccTypeName, `(popr, false)`) + p.Out() + p.P(`}`) + p.P(`b.ResetTimer()`) + p.P(`for i := 0; i < b.N; i++ {`) + p.In() + p.P(`data, err := `, protoPkg.Use(), `.Marshal(pops[i%10000])`) + p.P(`if err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.P(`total += len(data)`) + p.Out() + p.P(`}`) + p.P(`b.SetBytes(int64(total / b.N))`) + p.Out() + p.P(`}`) + p.P() + + p.P(`func Benchmark`, ccTypeName, `ProtoUnmarshal(b *`, testingPkg.Use(), `.B) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(616))`) + p.P(`total := 0`) + p.P(`datas := make([][]byte, 10000)`) + p.P(`for i := 0; i < 10000; i++ {`) + p.In() + p.P(`data, err := `, protoPkg.Use(), `.Marshal(NewPopulated`, ccTypeName, `(popr, false))`) + p.P(`if err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.P(`datas[i] = data`) + p.Out() + p.P(`}`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`b.ResetTimer()`) + p.P(`for i := 0; i < b.N; i++ {`) + p.In() + p.P(`total += len(datas[i%10000])`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(datas[i%10000], msg); err != nil {`) + p.In() + p.P(`panic(err)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P(`b.SetBytes(int64(total / b.N))`) + p.Out() + p.P(`}`) + p.P() + } + } + return used +} + +type testJson struct { + *generator.Generator +} + +func newJson(g *generator.Generator) TestPlugin { + return &testJson{g} +} + +func (p *testJson) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + testingPkg := imports.NewImport("testing") + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + jsonPkg := imports.NewImport("github.com/gogo/protobuf/jsonpb") + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + p.P(`func Test`, ccTypeName, `JSON(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) + p.P(`marshaler := `, jsonPkg.Use(), `.Marshaler{}`) + p.P(`jsondata, err := marshaler.MarshalToString(p)`) + p.P(`if err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`err = `, jsonPkg.Use(), `.UnmarshalString(jsondata, msg)`) + p.P(`if err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + if gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if err := p.VerboseEqual(msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)`) + p.Out() + p.P(`}`) + } + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + } + } + return used +} + +type testText struct { + *generator.Generator +} + +func newText(g *generator.Generator) TestPlugin { + return &testText{g} +} + +func (p *testText) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + testingPkg := imports.NewImport("testing") + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + protoPkg := imports.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = imports.NewImport("github.com/golang/protobuf/proto") + } + //fmtPkg := imports.NewImport("fmt") + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + used = true + + p.P(`func Test`, ccTypeName, `ProtoText(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) + p.P(`data := `, protoPkg.Use(), `.MarshalTextString(p)`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`if err := `, protoPkg.Use(), `.UnmarshalText(data, msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + if gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if err := p.VerboseEqual(msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)`) + p.Out() + p.P(`}`) + } + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P() + + p.P(`func Test`, ccTypeName, `ProtoCompactText(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) + p.P(`data := `, protoPkg.Use(), `.CompactTextString(p)`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`if err := `, protoPkg.Use(), `.UnmarshalText(data, msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) + p.Out() + p.P(`}`) + if gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if err := p.VerboseEqual(msg); err != nil {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)`) + p.Out() + p.P(`}`) + } + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P() + + } + } + return used +} + +func init() { + RegisterTestPlugin(newProto) + RegisterTestPlugin(newJson) + RegisterTestPlugin(newText) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/union/union.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/union/union.go new file mode 100644 index 000000000000..684047770eba --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/union/union.go @@ -0,0 +1,207 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The onlyone plugin generates code for the onlyone extension. +All fields must be nullable and only one of the fields may be set, like a union. +Two methods are generated + + GetValue() interface{} + +and + + SetValue(v interface{}) (set bool) + +These provide easier interaction with a onlyone. + +The onlyone extension is not called union as this causes compile errors in the C++ generated code. +There can only be one ;) + +It is enabled by the following extensions: + + - onlyone + - onlyone_all + +The onlyone plugin also generates a test given it is enabled using one of the following extensions: + + - testgen + - testgen_all + +Lets look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + message U { + option (gogoproto.onlyone) = true; + optional A A = 1; + optional B B = 2; + } + +given to the onlyone plugin, will generate code which looks a lot like this: + + func (this *U) GetValue() interface{} { + if this.A != nil { + return this.A + } + if this.B != nil { + return this.B + } + return nil + } + + func (this *U) SetValue(value interface{}) bool { + switch vt := value.(type) { + case *A: + this.A = vt + case *B: + this.B = vt + default: + return false + } + return true + } + +and the following test code: + + func TestUUnion(t *testing.T) { + popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedU(popr) + v := p.GetValue() + msg := &U{} + if !msg.SetValue(v) { + t.Fatalf("Union: Could not set Value") + } + if !p.Equal(msg) { + t.Fatalf("%#v !Union Equal %#v", msg, p) + } + } + +*/ +package union + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type union struct { + *generator.Generator + generator.PluginImports +} + +func NewUnion() *union { + return &union{} +} + +func (p *union) Name() string { + return "union" +} + +func (p *union) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *union) Generate(file *generator.FileDescriptor) { + p.PluginImports = generator.NewPluginImports(p.Generator) + + for _, message := range file.Messages() { + if !gogoproto.IsUnion(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.HasExtension() { + panic("onlyone does not currently support extensions") + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P(`func (this *`, ccTypeName, `) GetValue() interface{} {`) + p.In() + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + if fieldname == "Value" { + panic("cannot have a onlyone message " + ccTypeName + " with a field named Value") + } + p.P(`if this.`, fieldname, ` != nil {`) + p.In() + p.P(`return this.`, fieldname) + p.Out() + p.P(`}`) + } + p.P(`return nil`) + p.Out() + p.P(`}`) + p.P(``) + p.P(`func (this *`, ccTypeName, `) SetValue(value interface{}) bool {`) + p.In() + p.P(`switch vt := value.(type) {`) + p.In() + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + goTyp, _ := p.GoType(message, field) + p.P(`case `, goTyp, `:`) + p.In() + p.P(`this.`, fieldname, ` = vt`) + p.Out() + } + p.P(`default:`) + p.In() + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + if field.IsMessage() { + goTyp, _ := p.GoType(message, field) + obj := p.ObjectNamed(field.GetTypeName()).(*generator.Descriptor) + + if gogoproto.IsUnion(obj.File(), obj.DescriptorProto) { + p.P(`this.`, fieldname, ` = new(`, generator.GoTypeToName(goTyp), `)`) + p.P(`if set := this.`, fieldname, `.SetValue(value); set {`) + p.In() + p.P(`return true`) + p.Out() + p.P(`}`) + p.P(`this.`, fieldname, ` = nil`) + } + } + } + p.P(`return false`) + p.Out() + p.P(`}`) + p.P(`return true`) + p.Out() + p.P(`}`) + } +} + +func init() { + generator.RegisterPlugin(NewUnion()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/union/uniontest.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/union/uniontest.go new file mode 100644 index 000000000000..75e68ed57f6d --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/union/uniontest.go @@ -0,0 +1,84 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package union + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + testingPkg := imports.NewImport("testing") + for _, message := range file.Messages() { + if !gogoproto.IsUnion(file.FileDescriptorProto, message.DescriptorProto) || + !gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + used = true + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + + p.P(`func Test`, ccTypeName, `OnlyOne(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) + p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) + p.P(`v := p.GetValue()`) + p.P(`msg := &`, ccTypeName, `{}`) + p.P(`if !msg.SetValue(v) {`) + p.In() + p.P(`t.Fatalf("OnlyOne: Could not set Value")`) + p.Out() + p.P(`}`) + p.P(`if !p.Equal(msg) {`) + p.In() + p.P(`t.Fatalf("%#v !OnlyOne Equal %#v", msg, p)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + + } + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go new file mode 100644 index 000000000000..13285960988e --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go @@ -0,0 +1,1328 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +The unmarshal plugin generates a Unmarshal method for each message. +The `Unmarshal([]byte) error` method results in the fact that the message +implements the Unmarshaler interface. +The allows proto.Unmarshal to be faster by calling the generated Unmarshal method rather than using reflect. + +If is enabled by the following extensions: + + - unmarshaler + - unmarshaler_all + +Or the following extensions: + + - unsafe_unmarshaler + - unsafe_unmarshaler_all + +That is if you want to use the unsafe package in your generated code. +The speed up using the unsafe package is not very significant. + +The generation of unmarshalling tests are enabled using one of the following extensions: + + - testgen + - testgen_all + +And benchmarks given it is enabled using one of the following extensions: + + - benchgen + - benchgen_all + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +Btw all the output can be seen at: + + github.com/gogo/protobuf/test/example/* + +The following message: + + option (gogoproto.unmarshaler_all) = true; + + message B { + option (gogoproto.description) = true; + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +given to the unmarshal plugin, will generate the following code: + + func (m *B) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + switch fieldNum { + case 1: + if wireType != 2 { + return proto.ErrWrongType + } + var msglen int + for shift := uint(0); ; shift += 7 { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.A.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return proto.ErrWrongType + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.G = append(m.G, github_com_gogo_protobuf_test_custom.Uint128{}) + if err := m.G[len(m.G)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + var sizeOfWire int + for { + sizeOfWire++ + wire >>= 7 + if wire == 0 { + break + } + } + iNdEx -= sizeOfWire + skippy, err := skip(data[iNdEx:]) + if err != nil { + return err + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + return nil + } + +Remember when using this code to call proto.Unmarshal. +This will call m.Reset and invoke the generated Unmarshal method for you. +If you call m.Unmarshal without m.Reset you could be merging protocol buffers. + +*/ +package unmarshal + +import ( + "fmt" + "strconv" + "strings" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type unmarshal struct { + *generator.Generator + unsafe bool + generator.PluginImports + atleastOne bool + ioPkg generator.Single + mathPkg generator.Single + unsafePkg generator.Single + localName string +} + +func NewUnmarshal() *unmarshal { + return &unmarshal{} +} + +func NewUnsafeUnmarshal() *unmarshal { + return &unmarshal{unsafe: true} +} + +func (p *unmarshal) Name() string { + if p.unsafe { + return "unsafeunmarshaler" + } + return "unmarshal" +} + +func (p *unmarshal) Init(g *generator.Generator) { + p.Generator = g +} + +func (p *unmarshal) decodeVarint(varName string, typName string) { + p.P(`for shift := uint(0); ; shift += 7 {`) + p.In() + p.P(`if shift >= 64 {`) + p.In() + p.P(`return ErrIntOverflow` + p.localName) + p.Out() + p.P(`}`) + p.P(`if iNdEx >= l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(`b := data[iNdEx]`) + p.P(`iNdEx++`) + p.P(varName, ` |= (`, typName, `(b) & 0x7F) << shift`) + p.P(`if b < 0x80 {`) + p.In() + p.P(`break`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) +} + +func (p *unmarshal) decodeFixed32(varName string, typeName string) { + p.P(`if (iNdEx+4) > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(`iNdEx += 4`) + p.P(varName, ` = `, typeName, `(data[iNdEx-4])`) + p.P(varName, ` |= `, typeName, `(data[iNdEx-3]) << 8`) + p.P(varName, ` |= `, typeName, `(data[iNdEx-2]) << 16`) + p.P(varName, ` |= `, typeName, `(data[iNdEx-1]) << 24`) +} + +func (p *unmarshal) unsafeFixed32(varName string, typeName string) { + p.P(`if iNdEx + 4 > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(varName, ` = *(*`, typeName, `)(`, p.unsafePkg.Use(), `.Pointer(&data[iNdEx]))`) + p.P(`iNdEx += 4`) +} + +func (p *unmarshal) decodeFixed64(varName string, typeName string) { + p.P(`if (iNdEx+8) > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(`iNdEx += 8`) + p.P(varName, ` = `, typeName, `(data[iNdEx-8])`) + p.P(varName, ` |= `, typeName, `(data[iNdEx-7]) << 8`) + p.P(varName, ` |= `, typeName, `(data[iNdEx-6]) << 16`) + p.P(varName, ` |= `, typeName, `(data[iNdEx-5]) << 24`) + p.P(varName, ` |= `, typeName, `(data[iNdEx-4]) << 32`) + p.P(varName, ` |= `, typeName, `(data[iNdEx-3]) << 40`) + p.P(varName, ` |= `, typeName, `(data[iNdEx-2]) << 48`) + p.P(varName, ` |= `, typeName, `(data[iNdEx-1]) << 56`) +} + +func (p *unmarshal) unsafeFixed64(varName string, typeName string) { + p.P(`if iNdEx + 8 > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(varName, ` = *(*`, typeName, `)(`, p.unsafePkg.Use(), `.Pointer(&data[iNdEx]))`) + p.P(`iNdEx += 8`) +} + +func (p *unmarshal) mapField(varName string, field *descriptor.FieldDescriptorProto) { + switch field.GetType() { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + p.P(`var `, varName, `temp uint64`) + p.decodeFixed64(varName+"temp", "uint64") + p.P(varName, ` := `, p.mathPkg.Use(), `.Float64frombits(`, varName, `temp)`) + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + p.P(`var `, varName, `temp uint32`) + p.decodeFixed32(varName+"temp", "uint32") + p.P(varName, ` := `, p.mathPkg.Use(), `.Float32frombits(`, varName, `temp)`) + case descriptor.FieldDescriptorProto_TYPE_INT64: + p.P(`var `, varName, ` int64`) + p.decodeVarint(varName, "int64") + case descriptor.FieldDescriptorProto_TYPE_UINT64: + p.P(`var `, varName, ` uint64`) + p.decodeVarint(varName, "uint64") + case descriptor.FieldDescriptorProto_TYPE_INT32: + p.P(`var `, varName, ` int32`) + p.decodeVarint(varName, "int32") + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + p.P(`var `, varName, ` uint64`) + p.decodeFixed64(varName, "uint64") + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + p.P(`var `, varName, ` uint32`) + p.decodeFixed32(varName, "uint32") + case descriptor.FieldDescriptorProto_TYPE_BOOL: + p.P(`var `, varName, `temp int`) + p.decodeVarint(varName+"temp", "int") + p.P(varName, ` := bool(`, varName, `temp != 0)`) + case descriptor.FieldDescriptorProto_TYPE_STRING: + p.P(`var stringLen`, varName, ` uint64`) + p.decodeVarint("stringLen"+varName, "uint64") + p.P(`intStringLen`, varName, ` := int(stringLen`, varName, `)`) + p.P(`if intStringLen`, varName, ` < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postStringIndex`, varName, ` := iNdEx + intStringLen`, varName) + p.P(`if postStringIndex`, varName, ` > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + cast, _ := p.GoType(nil, field) + cast = strings.Replace(cast, "*", "", 1) + p.P(varName, ` := `, cast, `(data[iNdEx:postStringIndex`, varName, `])`) + p.P(`iNdEx = postStringIndex`, varName) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + p.P(`var mapmsglen int`) + p.decodeVarint("mapmsglen", "int") + p.P(`if mapmsglen < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postmsgIndex := iNdEx + mapmsglen`) + p.P(`if mapmsglen < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`if postmsgIndex > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + desc := p.ObjectNamed(field.GetTypeName()) + msgname := p.TypeName(desc) + p.P(varName, ` := &`, msgname, `{}`) + p.P(`if err := `, varName, `.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + p.P(`iNdEx = postmsgIndex`) + case descriptor.FieldDescriptorProto_TYPE_BYTES: + p.P(`var mapbyteLen uint64`) + p.decodeVarint("mapbyteLen", "uint64") + p.P(`intMapbyteLen := int(mapbyteLen)`) + p.P(`if intMapbyteLen < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postbytesIndex := iNdEx + intMapbyteLen`) + p.P(`if postbytesIndex > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(varName, ` := make([]byte, mapbyteLen)`) + p.P(`copy(`, varName, `, data[iNdEx:postbytesIndex])`) + p.P(`iNdEx = postbytesIndex`) + case descriptor.FieldDescriptorProto_TYPE_UINT32: + p.P(`var `, varName, ` uint32`) + p.decodeVarint(varName, "uint32") + case descriptor.FieldDescriptorProto_TYPE_ENUM: + typName := p.TypeName(p.ObjectNamed(field.GetTypeName())) + p.P(`var `, varName, ` `, typName) + p.decodeVarint(varName, typName) + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + p.P(`var `, varName, ` int32`) + p.decodeFixed32(varName, "int32") + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + p.P(`var `, varName, ` int64`) + p.decodeFixed64(varName, "int64") + case descriptor.FieldDescriptorProto_TYPE_SINT32: + p.P(`var `, varName, `temp int32`) + p.decodeVarint(varName+"temp", "int32") + p.P(varName, `temp = int32((uint32(`, varName, `temp) >> 1) ^ uint32(((`, varName, `temp&1)<<31)>>31))`) + p.P(varName, ` := int32(`, varName, `temp)`) + case descriptor.FieldDescriptorProto_TYPE_SINT64: + p.P(`var `, varName, `temp uint64`) + p.decodeVarint(varName+"temp", "uint64") + p.P(varName, `temp = (`, varName, `temp >> 1) ^ uint64((int64(`, varName, `temp&1)<<63)>>63)`) + p.P(varName, ` := int64(`, varName, `temp)`) + } +} + +func (p *unmarshal) noStarOrSliceType(msg *generator.Descriptor, field *descriptor.FieldDescriptorProto) string { + typ, _ := p.GoType(msg, field) + if typ[0] == '*' { + return typ[1:] + } + if typ[0] == '[' && typ[1] == ']' { + return typ[2:] + } + return typ +} + +func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descriptor, field *descriptor.FieldDescriptorProto, fieldname string, proto3 bool) { + repeated := field.IsRepeated() + nullable := gogoproto.IsNullable(field) + typ := p.noStarOrSliceType(msg, field) + oneof := field.OneofIndex != nil + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + if !p.unsafe || gogoproto.IsCastType(field) { + p.P(`var v uint64`) + p.decodeFixed64("v", "uint64") + if oneof { + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))}`) + } else if repeated { + p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))`) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v2)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = `, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))`) + } else { + p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))`) + p.P(`m.`, fieldname, ` = &v2`) + } + } else { + if oneof { + p.P(`var v float64`) + p.unsafeFixed64("v", "float64") + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v float64`) + p.unsafeFixed64("v", "float64") + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.unsafeFixed64(`m.`+fieldname, "float64") + } else { + p.P(`var v float64`) + p.unsafeFixed64("v", "float64") + p.P(`m.`, fieldname, ` = &v`) + } + } + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + if !p.unsafe || gogoproto.IsCastType(field) { + p.P(`var v uint32`) + p.decodeFixed32("v", "uint32") + if oneof { + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))}`) + } else if repeated { + p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))`) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v2)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = `, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))`) + } else { + p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))`) + p.P(`m.`, fieldname, ` = &v2`) + } + } else { + if oneof { + p.P(`var v float32`) + p.unsafeFixed32("v", "float32") + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v float32`) + p.unsafeFixed32("v", "float32") + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.unsafeFixed32("m."+fieldname, "float32") + } else { + p.P(`var v float32`) + p.unsafeFixed32("v", "float32") + p.P(`m.`, fieldname, ` = &v`) + } + } + case descriptor.FieldDescriptorProto_TYPE_INT64: + if oneof { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeVarint("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_UINT64: + if oneof { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeVarint("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_INT32: + if oneof { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeVarint("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + if !p.unsafe || gogoproto.IsCastType(field) { + if oneof { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeFixed64("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + } else { + if oneof { + p.P(`var v uint64`) + p.unsafeFixed64("v", "uint64") + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v uint64`) + p.unsafeFixed64("v", "uint64") + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.unsafeFixed64("m."+fieldname, "uint64") + } else { + p.P(`var v uint64`) + p.unsafeFixed64("v", "uint64") + p.P(`m.`, fieldname, ` = &v`) + } + } + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + if !p.unsafe || gogoproto.IsCastType(field) { + if oneof { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeFixed32("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + } else { + if oneof { + p.P(`var v uint32`) + p.unsafeFixed32("v", "uint32") + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v uint32`) + p.unsafeFixed32("v", "uint32") + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.unsafeFixed32("m."+fieldname, "uint32") + } else { + p.P(`var v uint32`) + p.unsafeFixed32("v", "uint32") + p.P(`m.`, fieldname, ` = &v`) + } + } + case descriptor.FieldDescriptorProto_TYPE_BOOL: + p.P(`var v int`) + p.decodeVarint("v", "int") + if oneof { + p.P(`b := `, typ, `(v != 0)`) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{b}`) + } else if repeated { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, typ, `(v != 0))`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = `, typ, `(v != 0)`) + } else { + p.P(`b := `, typ, `(v != 0)`) + p.P(`m.`, fieldname, ` = &b`) + } + case descriptor.FieldDescriptorProto_TYPE_STRING: + p.P(`var stringLen uint64`) + p.decodeVarint("stringLen", "uint64") + p.P(`intStringLen := int(stringLen)`) + p.P(`if intStringLen < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postIndex := iNdEx + intStringLen`) + p.P(`if postIndex > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + if oneof { + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, `(data[iNdEx:postIndex])}`) + } else if repeated { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, typ, `(data[iNdEx:postIndex]))`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = `, typ, `(data[iNdEx:postIndex])`) + } else { + p.P(`s := `, typ, `(data[iNdEx:postIndex])`) + p.P(`m.`, fieldname, ` = &s`) + } + p.P(`iNdEx = postIndex`) + case descriptor.FieldDescriptorProto_TYPE_GROUP: + panic(fmt.Errorf("unmarshaler does not support group %v", fieldname)) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + desc := p.ObjectNamed(field.GetTypeName()) + msgname := p.TypeName(desc) + p.P(`var msglen int`) + p.decodeVarint("msglen", "int") + p.P(`if msglen < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postIndex := iNdEx + msglen`) + p.P(`if postIndex > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + if oneof { + p.P(`v := &`, msgname, `{}`) + p.P(`if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if generator.IsMap(file.FileDescriptorProto, field) { + m := p.GoMapType(nil, field) + + keygoTyp, _ := p.GoType(nil, m.KeyField) + keygoAliasTyp, _ := p.GoType(nil, m.KeyAliasField) + // keys may not be pointers + keygoTyp = strings.Replace(keygoTyp, "*", "", 1) + keygoAliasTyp = strings.Replace(keygoAliasTyp, "*", "", 1) + + valuegoTyp, _ := p.GoType(nil, m.ValueField) + valuegoAliasTyp, _ := p.GoType(nil, m.ValueAliasField) + + // if the map type is an alias and key or values are aliases (type Foo map[Bar]Baz), + // we need to explicitly record their use here. + p.RecordTypeUse(m.KeyAliasField.GetTypeName()) + p.RecordTypeUse(m.ValueAliasField.GetTypeName()) + + nullable, valuegoTyp, valuegoAliasTyp = generator.GoMapValueTypes(field, m.ValueField, valuegoTyp, valuegoAliasTyp) + + p.P(`var keykey uint64`) + p.decodeVarint("keykey", "uint64") + p.mapField("mapkey", m.KeyAliasField) + p.P(`var valuekey uint64`) + p.decodeVarint("valuekey", "uint64") + p.mapField("mapvalue", m.ValueAliasField) + p.P(`if m.`, fieldname, ` == nil {`) + p.In() + p.P(`m.`, fieldname, ` = make(`, m.GoType, `)`) + p.Out() + p.P(`}`) + s := `m.` + fieldname + if keygoTyp == keygoAliasTyp { + s += `[mapkey]` + } else { + s += `[` + keygoAliasTyp + `(mapkey)]` + } + v := `mapvalue` + if m.ValueField.IsMessage() && !nullable { + v = `*` + v + } + if valuegoTyp != valuegoAliasTyp { + v = `((` + valuegoAliasTyp + `)(` + v + `))` + } + p.P(s, ` = `, v) + } else if repeated { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, &`, msgname, `{})`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, msgname, `{})`) + } + p.P(`if err := m.`, fieldname, `[len(m.`, fieldname, `)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`if m.`, fieldname, ` == nil {`) + p.In() + p.P(`m.`, fieldname, ` = &`, msgname, `{}`) + p.Out() + p.P(`}`) + p.P(`if err := m.`, fieldname, `.Unmarshal(data[iNdEx:postIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } else { + p.P(`if err := m.`, fieldname, `.Unmarshal(data[iNdEx:postIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } + p.P(`iNdEx = postIndex`) + case descriptor.FieldDescriptorProto_TYPE_BYTES: + p.P(`var byteLen int`) + p.decodeVarint("byteLen", "int") + p.P(`if byteLen < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postIndex := iNdEx + byteLen`) + p.P(`if postIndex > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + if !gogoproto.IsCustomType(field) { + if oneof { + p.P(`v := make([]byte, postIndex-iNdEx)`) + p.P(`copy(v, data[iNdEx:postIndex])`) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, make([]byte, postIndex-iNdEx))`) + p.P(`copy(m.`, fieldname, `[len(m.`, fieldname, `)-1], data[iNdEx:postIndex])`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `[:0] , data[iNdEx:postIndex]...)`) + p.P(`if m.`, fieldname, ` == nil {`) + p.In() + p.P(`m.`, fieldname, ` = []byte{}`) + p.Out() + p.P(`}`) + } + } else { + _, ctyp, err := generator.GetCustomType(field) + if err != nil { + panic(err) + } + if oneof { + p.P(`var vv `, ctyp) + p.P(`v := &vv`) + p.P(`if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{*v}`) + } else if repeated { + p.P(`var v `, ctyp) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + p.P(`if err := m.`, fieldname, `[len(m.`, fieldname, `)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } else if nullable { + p.P(`var v `, ctyp) + p.P(`m.`, fieldname, ` = &v`) + p.P(`if err := m.`, fieldname, `.Unmarshal(data[iNdEx:postIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } else { + p.P(`if err := m.`, fieldname, `.Unmarshal(data[iNdEx:postIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } + } + p.P(`iNdEx = postIndex`) + case descriptor.FieldDescriptorProto_TYPE_UINT32: + if oneof { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeVarint("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_ENUM: + typName := p.TypeName(p.ObjectNamed(field.GetTypeName())) + if oneof { + p.P(`var v `, typName) + p.decodeVarint("v", typName) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typName) + p.decodeVarint("v", typName) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeVarint("m."+fieldname, typName) + } else { + p.P(`var v `, typName) + p.decodeVarint("v", typName) + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + if !p.unsafe || gogoproto.IsCastType(field) { + if oneof { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeFixed32("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + } else { + if oneof { + p.P(`var v int32`) + p.unsafeFixed32("v", "int32") + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v int32`) + p.unsafeFixed32("v", "int32") + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.unsafeFixed32("m."+fieldname, "int32") + } else { + p.P(`var v int32`) + p.unsafeFixed32("v", "int32") + p.P(`m.`, fieldname, ` = &v`) + } + } + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + if !p.unsafe || gogoproto.IsCastType(field) { + if oneof { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeFixed64("m."+fieldname, typ) + } else { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = &v`) + } + } else { + if oneof { + p.P(`var v int64`) + p.unsafeFixed64("v", "int64") + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v int64`) + p.unsafeFixed64("v", "int64") + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.unsafeFixed64("m."+fieldname, "int64") + } else { + p.P(`var v int64`) + p.unsafeFixed64("v", "int64") + p.P(`m.`, fieldname, ` = &v`) + } + } + case descriptor.FieldDescriptorProto_TYPE_SINT32: + p.P(`var v `, typ) + p.decodeVarint("v", typ) + p.P(`v = `, typ, `((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))`) + if oneof { + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = v`) + } else { + p.P(`m.`, fieldname, ` = &v`) + } + case descriptor.FieldDescriptorProto_TYPE_SINT64: + p.P(`var v uint64`) + p.decodeVarint("v", "uint64") + p.P(`v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)`) + if oneof { + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, `(v)}`) + } else if repeated { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, typ, `(v))`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = `, typ, `(v)`) + } else { + p.P(`v2 := `, typ, `(v)`) + p.P(`m.`, fieldname, ` = &v2`) + } + default: + panic("not implemented") + } +} + +func (p *unmarshal) Generate(file *generator.FileDescriptor) { + proto3 := gogoproto.IsProto3(file.FileDescriptorProto) + p.PluginImports = generator.NewPluginImports(p.Generator) + p.atleastOne = false + p.localName = generator.FileName(file) + if p.unsafe { + p.localName += "Unsafe" + } + + p.ioPkg = p.NewImport("io") + p.mathPkg = p.NewImport("math") + p.unsafePkg = p.NewImport("unsafe") + fmtPkg := p.NewImport("fmt") + protoPkg := p.NewImport("github.com/gogo/protobuf/proto") + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + protoPkg = p.NewImport("github.com/golang/protobuf/proto") + } + + for _, message := range file.Messages() { + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + if p.unsafe { + if !gogoproto.IsUnsafeUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if gogoproto.IsUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) { + panic(fmt.Sprintf("unsafe_unmarshaler and unmarshaler enabled for %v", ccTypeName)) + } + } + if !p.unsafe { + if !gogoproto.IsUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if gogoproto.IsUnsafeUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) { + panic(fmt.Sprintf("unsafe_unmarshaler and unmarshaler enabled for %v", ccTypeName)) + } + } + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + p.atleastOne = true + + // build a map required field_id -> bitmask offset + rfMap := make(map[int32]uint) + rfNextId := uint(0) + for _, field := range message.Field { + if field.IsRequired() { + rfMap[field.GetNumber()] = rfNextId + rfNextId++ + } + } + rfCount := len(rfMap) + + p.P(`func (m *`, ccTypeName, `) Unmarshal(data []byte) error {`) + p.In() + if rfCount > 0 { + p.P(`var hasFields [`, strconv.Itoa(1+(rfCount-1)/64), `]uint64`) + } + p.P(`l := len(data)`) + p.P(`iNdEx := 0`) + p.P(`for iNdEx < l {`) + p.In() + p.P(`preIndex := iNdEx`) + p.P(`var wire uint64`) + p.decodeVarint("wire", "uint64") + p.P(`fieldNum := int32(wire >> 3)`) + if len(message.Field) > 0 || !message.IsGroup() { + p.P(`wireType := int(wire & 0x7)`) + } + if !message.IsGroup() { + p.P(`if wireType == `, strconv.Itoa(proto.WireEndGroup), ` {`) + p.In() + p.P(`return `, fmtPkg.Use(), `.Errorf("proto: `+message.GetName()+`: wiretype end group for non-group")`) + p.Out() + p.P(`}`) + } + p.P(`if fieldNum <= 0 {`) + p.In() + p.P(`return `, fmtPkg.Use(), `.Errorf("proto: `+message.GetName()+`: illegal tag %d (wire type %d)", fieldNum, wire)`) + p.Out() + p.P(`}`) + p.P(`switch fieldNum {`) + p.In() + for _, field := range message.Field { + fieldname := p.GetFieldName(message, field) + errFieldname := fieldname + if field.OneofIndex != nil { + errFieldname = p.GetOneOfFieldName(message, field) + } + packed := field.IsPacked() + p.P(`case `, strconv.Itoa(int(field.GetNumber())), `:`) + p.In() + wireType := field.WireType() + if packed { + p.P(`if wireType == `, strconv.Itoa(proto.WireBytes), `{`) + p.In() + p.P(`var packedLen int`) + p.decodeVarint("packedLen", "int") + p.P(`if packedLen < 0 {`) + p.In() + p.P(`return ErrInvalidLength` + p.localName) + p.Out() + p.P(`}`) + p.P(`postIndex := iNdEx + packedLen`) + p.P(`if postIndex > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(`for iNdEx < postIndex {`) + p.In() + p.field(file, message, field, fieldname, false) + p.Out() + p.P(`}`) + p.Out() + p.P(`} else if wireType == `, strconv.Itoa(wireType), `{`) + p.In() + p.field(file, message, field, fieldname, false) + p.Out() + p.P(`} else {`) + p.In() + p.P(`return ` + fmtPkg.Use() + `.Errorf("proto: wrong wireType = %d for field ` + errFieldname + `", wireType)`) + p.Out() + p.P(`}`) + } else { + p.P(`if wireType != `, strconv.Itoa(wireType), `{`) + p.In() + p.P(`return ` + fmtPkg.Use() + `.Errorf("proto: wrong wireType = %d for field ` + errFieldname + `", wireType)`) + p.Out() + p.P(`}`) + p.field(file, message, field, fieldname, proto3) + } + + if field.IsRequired() { + fieldBit, ok := rfMap[field.GetNumber()] + if !ok { + panic("field is required, but no bit registered") + } + p.P(`hasFields[`, strconv.Itoa(int(fieldBit/64)), `] |= uint64(`, fmt.Sprintf("0x%08x", 1<<(fieldBit%64)), `)`) + } + } + p.Out() + p.P(`default:`) + p.In() + if message.DescriptorProto.HasExtension() { + c := []string{} + for _, erange := range message.GetExtensionRange() { + c = append(c, `((fieldNum >= `+strconv.Itoa(int(erange.GetStart()))+") && (fieldNum<"+strconv.Itoa(int(erange.GetEnd()))+`))`) + } + p.P(`if `, strings.Join(c, "||"), `{`) + p.In() + p.P(`var sizeOfWire int`) + p.P(`for {`) + p.In() + p.P(`sizeOfWire++`) + p.P(`wire >>= 7`) + p.P(`if wire == 0 {`) + p.In() + p.P(`break`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + p.P(`iNdEx-=sizeOfWire`) + p.P(`skippy, err := skip`, p.localName+`(data[iNdEx:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + p.P(`if skippy < 0 {`) + p.In() + p.P(`return ErrInvalidLength`, p.localName) + p.Out() + p.P(`}`) + p.P(`if (iNdEx + skippy) > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`if m.XXX_extensions == nil {`) + p.In() + p.P(`m.XXX_extensions = make(map[int32]`, protoPkg.Use(), `.Extension)`) + p.Out() + p.P(`}`) + p.P(`m.XXX_extensions[int32(fieldNum)] = `, protoPkg.Use(), `.NewExtension(data[iNdEx:iNdEx+skippy])`) + } else { + p.P(`m.XXX_extensions = append(m.XXX_extensions, data[iNdEx:iNdEx+skippy]...)`) + } + p.P(`iNdEx += skippy`) + p.Out() + p.P(`} else {`) + p.In() + } + p.P(`iNdEx=preIndex`) + p.P(`skippy, err := skip`, p.localName, `(data[iNdEx:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + p.P(`if skippy < 0 {`) + p.In() + p.P(`return ErrInvalidLength`, p.localName) + p.Out() + p.P(`}`) + p.P(`if (iNdEx + skippy) > l {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { + p.P(`m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)`) + } + p.P(`iNdEx += skippy`) + p.Out() + if message.DescriptorProto.HasExtension() { + p.Out() + p.P(`}`) + } + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + + for _, field := range message.Field { + if !field.IsRequired() { + continue + } + + fieldBit, ok := rfMap[field.GetNumber()] + if !ok { + panic("field is required, but no bit registered") + } + + p.P(`if hasFields[`, strconv.Itoa(int(fieldBit/64)), `] & uint64(`, fmt.Sprintf("0x%08x", 1<<(fieldBit%64)), `) == 0 {`) + p.In() + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + p.P(`return new(`, protoPkg.Use(), `.RequiredNotSetError)`) + } else { + p.P(`return `, protoPkg.Use(), `.NewRequiredNotSetError("`, field.GetName(), `")`) + } + p.Out() + p.P(`}`) + } + p.P() + p.P(`if iNdEx > l {`) + p.In() + p.P(`return ` + p.ioPkg.Use() + `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(`return nil`) + p.Out() + p.P(`}`) + } + if !p.atleastOne { + return + } + + p.P(`func skip` + p.localName + `(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow` + p.localName + ` + } + if iNdEx >= l { + return 0, ` + p.ioPkg.Use() + `.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow` + p.localName + ` + } + if iNdEx >= l { + return 0, ` + p.ioPkg.Use() + `.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow` + p.localName + ` + } + if iNdEx >= l { + return 0, ` + p.ioPkg.Use() + `.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLength` + p.localName + ` + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow` + p.localName + ` + } + if iNdEx >= l { + return 0, ` + p.ioPkg.Use() + `.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skip` + p.localName + `(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, ` + fmtPkg.Use() + `.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") + } + + var ( + ErrInvalidLength` + p.localName + ` = ` + fmtPkg.Use() + `.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow` + p.localName + ` = ` + fmtPkg.Use() + `.Errorf("proto: integer overflow") + ) + `) +} + +func init() { + generator.RegisterPlugin(NewUnmarshal()) + generator.RegisterPlugin(NewUnsafeUnmarshal()) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/Makefile b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 000000000000..23a6b1734408 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc-min-version --version="3.0.0" --proto_path=.:../../../../ --gogo_out=. proto3_proto/proto3.proto + make diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 000000000000..79edb86119a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,228 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsMap); ok { + emOut := out.Addr().Interface().(extensionsMap) + mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) + } else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 000000000000..cb5b213f9b9e --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,872 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + // x, n already 0 + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + // x, err already 0 + + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + if ee, eok := e.(extensionsMap); eok { + ext := ee.ExtensionMap()[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + ee.ExtensionMap()[int32(tag)] = ext + } else if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, o.buf[oi:o.index]...) + } + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() || !valelem.IsValid() { + // We did not decode the key or the value in the map entry. + // Either way, it's an invalid map entry. + return fmt.Errorf("proto: bad map data: missing key/val") + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode_gogo.go new file mode 100644 index 000000000000..6a77aad7661a --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/decode_gogo.go @@ -0,0 +1,175 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +// Decode a reference to a struct pointer. +func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is a pointer receiver") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + bas := structPointer_FieldPointer(base, p.field) + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers ([]struct). +func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { + newBas := appendStructPointer(base, p.field, p.sstype) + + if is_group { + panic("not supported, maybe in future, if requested.") + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is not a pointer receiver.") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) + + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers. +func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_ref_struct(p, false, base) +} + +func setPtrCustomType(base structPointer, f field, v interface{}) { + if v == nil { + return + } + structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer())) +} + +func setCustomType(base structPointer, f field, value interface{}) { + if value == nil { + return + } + v := reflect.ValueOf(value).Elem() + t := reflect.TypeOf(value).Elem() + kind := t.Kind() + switch kind { + case reflect.Slice: + slice := reflect.MakeSlice(t, v.Len(), v.Cap()) + reflect.Copy(slice, v) + oldHeader := structPointer_GetSliceHeader(base, f) + oldHeader.Data = slice.Pointer() + oldHeader.Len = v.Len() + oldHeader.Cap = v.Cap() + default: + l := 1 + size := reflect.TypeOf(value).Elem().Size() + if kind == reflect.Array { + l = reflect.TypeOf(value).Elem().Len() + size = reflect.TypeOf(value).Size() + } + total := int(size) * l + structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), total) + } +} + +func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + setPtrCustomType(base, p.field, custom) + return nil +} + +func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + if custom != nil { + setCustomType(base, p.field, custom) + } + return nil +} + +// Decode a slice of bytes ([]byte) into a slice of custom types. +func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + newBas := appendStructPointer(base, p.field, p.ctype) + + setCustomType(newBas, 0, custom) + + return nil +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 000000000000..7321e1aae12e --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,1335 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + var state errorState + if err != nil && !state.shouldContinue(err, nil) { + return nil, err + } + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + if err != nil { + return err + } + p.buf = append(p.buf, data...) + return nil + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Encode++ + } + + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Size++ + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + v := *structPointer_ExtMap(base, p.field) + if err := encodeExtensionMap(v); err != nil { + return err + } + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := *structPointer_ExtMap(base, p.field) + return sizeExtensionMap(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + // The only illegal map entry values are nil message pointers. + if val.Kind() == reflect.Ptr && val.IsNil() { + return errors.New("proto: map has nil element") + } + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + // TODO: This could be faster and use less reflection. + if prop.oneofMarshaler != nil { + sv := reflect.ValueOf(structPointer_Interface(base, prop.stype)).Elem() + for i := 0; i < prop.stype.NumField(); i++ { + fv := sv.Field(i) + if fv.Kind() != reflect.Interface || fv.IsNil() { + continue + } + if prop.stype.Field(i).Tag.Get("protobuf_oneof") == "" { + continue + } + spv := fv.Elem() // interface -> *T + sv := spv.Elem() // *T -> T + sf := sv.Type().Field(0) // StructField inside T + var prop Properties + prop.Init(sf.Type, "whatever", sf.Tag.Get("protobuf"), &sf) + n += prop.size(&prop, toStructPointer(spv)) + } + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 000000000000..f77cfb1eea4d --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,354 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://github.com/golang/protobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} + +type Sizer interface { + Size() int +} + +func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, s...) + return nil +} + +func size_ext_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return 0 + } + n += len(s) + return +} + +// Encode a reference to bool pointer. +func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + x := 0 + if v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_bool(p *Properties, base structPointer) int { + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode a reference to int32 pointer. +func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a reference to an int64 pointer. +func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_ref_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a reference to a string pointer. +func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_ref_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// Encode a reference to a message struct. +func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +//TODO this is only copied, please fix this +func size_ref_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a slice of references to message struct pointers ([]struct). +func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + ss := structPointer_GetStructPointer(base, p.field) + ss1 := structPointer_GetRefStructPointer(ss, field(0)) + size := p.stype.Size() + l := structPointer_Len(base, p.field) + for i := 0; i < l; i++ { + structp := structPointer_Add(ss1, field(uintptr(i)*size)) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + } + return state.err +} + +//TODO this is only copied, please fix this +func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { + ss := structPointer_GetStructPointer(base, p.field) + ss1 := structPointer_GetRefStructPointer(ss, field(0)) + size := p.stype.Size() + l := structPointer_Len(base, p.field) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := structPointer_Add(ss1, field(uintptr(i)*size)) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return ErrNil + } + custom := i.(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { + custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceAt(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return ErrNil + } + slice := reflect.ValueOf(inter) + l := slice.Len() + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return 0 + } + slice := reflect.ValueOf(inter) + l := slice.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + } + return +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 000000000000..cc3f2c95a7a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,266 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal (a "bytes" field, + although represented by []byte, is not a repeated field) + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +func equalAny(v1, v2 reflect.Value) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2) { + return false + } + } + return true + case reflect.Ptr: + return equalAny(v1.Elem(), v2.Elem()) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i)) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// em1 and em2 are extension maps. +func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 000000000000..9a6374fdbdec --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,519 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange +} + +type extensionsMap interface { + extendableProto + ExtensionMap() map[int32]Extension +} + +type extensionsBytes interface { + extendableProto + GetExtensions() *[]byte +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base extendableProto, id int32, b []byte) { + if ebase, ok := base.(extensionsMap); ok { + ebase.ExtensionMap()[id] = Extension{enc: b} + } else if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + } else { + panic("unreachable") + } +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + // Check the extended type. + if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. +func encodeExtensionMap(m map[int32]Extension) error { + for k, e := range m { + err := encodeExtension(&e) + if err != nil { + return err + } + m[k] = e + } + return nil +} + +func encodeExtension(e *Extension) error { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + return nil + } + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + return nil +} + +func sizeExtensionMap(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + if epb, doki := pb.(extensionsMap); doki { + _, ok := epb.ExtensionMap()[extension.Field] + return ok + } else if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + panic("unreachable") +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} + +func clearExtension(pb extendableProto, fieldNum int32) { + if epb, doki := pb.(extensionsMap); doki { + delete(epb.ExtensionMap(), fieldNum) + } else if epb, doki := pb.(extensionsBytes); doki { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + } else { + panic("unreachable") + } +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb extendableProto, extension *ExtensionDesc) { + // TODO: Check types, field numbers, etc.? + clearExtension(pb, extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present it returns ErrMissingExtension. +func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { + if err := checkExtensionTypes(pb, extension); err != nil { + return nil, err + } + + if epb, doki := pb.(extensionsMap); doki { + emap := epb.ExtensionMap() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil + } else if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + o := 0 + for o < len(*ext) { + tag, n := DecodeVarint((*ext)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size((*ext)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + v, err := decodeExtension((*ext)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) + } + panic("unreachable") +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + rep := extension.repeated() + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if !rep || o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := pb.(extendableProto) + if !ok { + err = errors.New("proto: not an extendable proto") + return + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if err := checkExtensionTypes(pb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + return setExtension(pb, extension, value) +} + +func setExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if epb, doki := pb.(extensionsMap); doki { + epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} + } else if epb, doki := pb.(extensionsBytes); doki { + ClearExtension(pb, extension) + ext := epb.GetExtensions() + et := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + p := NewBuffer(nil) + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + *ext = append(*ext, p.buf...) + } + return nil +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 000000000000..bd55fb68b61e --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,221 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strings" +) + +func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + return bytes.Equal(this.enc, that.enc) +} + +func SizeOfExtensionMap(m map[int32]Extension) (n int) { + return sizeExtensionMap(m) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + if err := encodeExtensionMap(m); err != nil { + return 0, err + } + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + n += copy(data[n:], m[int32(k)].enc) + } + return n, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + if m[id].value == nil || m[id].desc == nil { + return m[id].enc, nil + } + if err := encodeExtensionMap(m); err != nil { + return nil, err + } + return m[id].enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func (this Extension) GoString() string { + if this.enc == nil { + if err := encodeExtension(&this); err != nil { + panic(err) + } + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb extendableProto, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return setExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb extendableProto, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 000000000000..8ffa91a3e908 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,883 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // write point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 000000000000..a6c2c06b23de --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,40 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 000000000000..e25e01e63748 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,280 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { + if err := encodeExtensionMap(m); err != nil { + return nil, err + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000000..749919d250a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,479 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000000..e9be0fe92ee7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,266 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 000000000000..6bc85fa9873f --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,108 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + return r.Interface() +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + if r.Elem().IsNil() { + return nil + } + return r.Elem().Interface() +} + +func copyUintPtr(oldptr, newptr uintptr, size int) { + oldbytes := make([]byte, 0) + oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) + oldslice.Data = oldptr + oldslice.Len = size + oldslice.Cap = size + newbytes := make([]byte, 0) + newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) + newslice.Data = newptr + newslice.Len = size + newslice.Cap = size + copy(newbytes, oldbytes) +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + copyUintPtr(uintptr(oldptr), uintptr(newptr), size) +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + size := typ.Elem().Size() + oldHeader := structPointer_GetSliceHeader(base, f) + newLen := oldHeader.Len + 1 + slice := reflect.MakeSlice(typ, newLen, newLen) + bas := toStructPointer(slice) + for i := 0; i < oldHeader.Len; i++ { + newElemptr := uintptr(bas) + uintptr(i)*size + oldElemptr := oldHeader.Data + uintptr(i)*size + copyUintPtr(oldElemptr, newElemptr, int(size)) + } + + oldHeader.Data = uintptr(bas) + oldHeader.Len = newLen + oldHeader.Cap = newLen + + return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) +} + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_Add(p structPointer, size field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) +} + +func structPointer_Len(p structPointer, f field) int { + return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 000000000000..4711057e2be2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,915 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sstype reflect.Type // set for slices of structs types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + if p.OrigName != p.Name { + s += ",name=" + p.OrigName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + if len(p.CustomType) > 0 { + p.setCustomEncAndDec(typ) + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + } else { + p.enc = (*Buffer).enc_ref_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_ref_bool + } + case reflect.Int32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + } else { + p.enc = (*Buffer).enc_ref_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_int32 + } + case reflect.Uint32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_ref_uint32 + } + case reflect.Int64, reflect.Uint64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.Float32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_uint32 + } + case reflect.Float64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.String: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + } else { + p.enc = (*Buffer).enc_ref_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_ref_string + } + case reflect.Struct: + p.stype = typ + p.isMarshaler = isMarshaler(typ) + p.isUnmarshaler = isUnmarshaler(typ) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_ref_struct_message + p.dec = (*Buffer).dec_ref_struct_message + p.size = size_ref_struct_message + } else { + fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) + } + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_byte + p.dec = (*Buffer).dec_slice_byte + p.size = size_slice_byte + // This is a []byte, which is either a bytes field, + // or the value of a map field. In the latter case, + // we always encode an empty []byte, so we should not + // use the proto3 enc/size funcs. + // f == nil iff this is the key/value of a map field. + if p.proto3 && f != nil { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + case reflect.Struct: + p.setSliceOfNonPointerStructs(t1) + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_extensions" { // special case + if len(f.Tag.Get("protobuf")) > 0 { + p.enc = (*Buffer).enc_ext_slice_byte + p.dec = nil // not needed + p.size = size_ext_slice_byte + } else { + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + } + if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") != "" // special case + if oneof { + isOneofMessage = true + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 000000000000..8daf9f7768c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,64 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "os" + "reflect" +) + +func (p *Properties) setCustomEncAndDec(typ reflect.Type) { + p.ctype = typ + if p.Repeated { + p.enc = (*Buffer).enc_custom_slice_bytes + p.dec = (*Buffer).dec_custom_slice_bytes + p.size = size_custom_slice_bytes + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_custom_bytes + p.dec = (*Buffer).dec_custom_bytes + p.size = size_custom_bytes + } else { + p.enc = (*Buffer).enc_custom_ref_bytes + p.dec = (*Buffer).dec_custom_ref_bytes + p.size = size_custom_ref_bytes + } +} + +func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { + t2 := typ.Elem() + p.sstype = typ + p.stype = t2 + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + p.enc = (*Buffer).enc_slice_ref_struct_message + p.dec = (*Buffer).dec_slice_ref_struct_message + p.size = size_slice_ref_struct_message + if p.Wire != "bytes" { + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/skip_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 000000000000..4fe7e0815c9f --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,117 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 000000000000..7c9ae90f95d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,793 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Printf("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func writeStruct(w *textWriter, sv reflect.Value) error { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := writeEnum(w, v, props); err != nil { + return err + } + } else if err := writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props.Parse(tag) // Overwrite the outer props. + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + if len(props.Enum) > 0 { + if err := writeEnum(w, fv, props); err != nil { + return err + } + } else if err := writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if pv.Type().Implements(extendableProtoType) { + if err := writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil && len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if tm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep := pv.Interface().(extendableProto) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + var m map[int32]Extension + if em, ok := ep.(extensionsMap); ok { + m = em.ExtensionMap() + } else if em, ok := ep.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + } + + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +func marshalText(w io.Writer, pb Message, compact bool) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: compact, + } + + if tm, ok := pb.(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { + return marshalText(w, pb, false) +} + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, false) + return buf.String() +} + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, true) + return buf.String() +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_gogo.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 000000000000..cdb23373c39b --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,55 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser.go b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 000000000000..f3909695ede6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,841 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || p.s[0] != '"' { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]". + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + tok = p.next() + if tok.err != nil { + return tok.err + } + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == tok.value { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", tok.value) + } + // Check the extension terminator. + tok = p.next() + if tok.err != nil { + return tok.err + } + if tok.value != "]" { + return p.errorf("unrecognized extension terminator %q", tok.value) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(extendableProto) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + sv.Field(oop.Field).Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // Technically the "key" and "value" could come in any order, + // but in practice they won't. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + if err := p.consumeToken("key"); err != nil { + return err + } + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken("value"); err != nil { + return err + } + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken(terminator); err != nil { + return err + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } else if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // Either "true", "false", 1 or 0. + switch tok.value { + case "true", "1": + fv.SetBool(true) + return nil + case "false", "0": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 000000000000..d80ceffee21d --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,33 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 000000000000..0eeadb3f7e60 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,1825 @@ +// Code generated by protoc-gen-gogo. +// source: descriptor.proto +// DO NOT EDIT! + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo +*/ +package descriptor + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,def=0" json:"server_streaming,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,def=0" json:"java_multiple_files,omitempty"` + // If set true, then the Java code generator will generate equals() and + // hashCode() methods for all messages defined in the .proto file. + // This increases generated code size, potentially substantially for large + // protos, which may harm a memory-constrained application. + // - In the full runtime this is a speed optimization, as the + // AbstractMessage base class includes reflection-based implementations of + // these methods. + // - In the lite runtime, setting this option changes the semantics of + // equals() and hashCode() to more closely match those of the full runtime; + // the generated methods compute their results based on field values rather + // than object identity. (Implementations should not assume that hashcodes + // will be consistent across runtimes or versions of the protocol compiler.) + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,def=0" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,def=0" json:"py_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace" json:"csharp_namespace,omitempty"` + // Whether the nano proto compiler should generate in the deprecated non-nano + // suffixed package. + JavananoUseDeprecatedPackage *bool `protobuf:"varint,38,opt,name=javanano_use_deprecated_package" json:"javanano_use_deprecated_package,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} + +var extRange_FileOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} +func (m *FileOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaGenerateEqualsAndHash bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return Default_FileOptions_JavaGenerateEqualsAndHash +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetJavananoUseDeprecatedPackage() bool { + if m != nil && m.JavananoUseDeprecatedPackage != nil { + return *m.JavananoUseDeprecatedPackage + } + return false +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} +func (m *MessageOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). By default these types are + // represented as JavaScript strings. This avoids loss of precision that can + // happen when a large value is converted to a floating point JavaScript + // numbers. Specifying JS_NUMBER for the jstype causes the generated + // JavaScript code to use the JavaScript "number" type instead of strings. + // This option is an enum to permit additional types to be added, + // e.g. goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outher message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} +func (m *FieldOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} +func (m *EnumOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} +func (m *EnumValueOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} +func (m *ServiceOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} +func (m *MethodOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_MethodOptions_Deprecated bool = false + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value" json:"aggregate_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension" json:"is_extension,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments" json:"leading_detached_comments,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go new file mode 100644 index 000000000000..76e2c95f9b44 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go @@ -0,0 +1,635 @@ +package descriptor + +import fmt "fmt" + +import strings "strings" +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import sort "sort" +import strconv "strconv" +import reflect "reflect" + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "descriptor.FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "descriptor.FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 20) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "descriptor.FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.JavananoUseDeprecatedPackage != nil { + s = append(s, "JavananoUseDeprecatedPackage: "+valueToGoStringDescriptor(this.JavananoUseDeprecatedPackage, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "descriptor.FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "descriptor.FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(e map[int32]github_com_gogo_protobuf_proto.Extension) string { + if e == nil { + return "nil" + } + s := "map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "}" + return s +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 000000000000..cf88f3cc506d --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,342 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go new file mode 100644 index 000000000000..33c501b3e420 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go @@ -0,0 +1,2907 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + The code generator for the plugin for the Google protocol buffer compiler. + It generates Go code from the protocol buffer description files read by the + main routine. +*/ +package generator + +import ( + "bufio" + "bytes" + "fmt" + "go/parser" + "go/printer" + "go/token" + "log" + "os" + "path" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + plugin "github.com/gogo/protobuf/protoc-gen-gogo/plugin" +) + +// A Plugin provides functionality to add to the output during Go code generation, +// such as to produce RPC stubs. +type Plugin interface { + // Name identifies the plugin. + Name() string + // Init is called once after data structures are built but before + // code generation begins. + Init(g *Generator) + // Generate produces the code generated by the plugin for this file, + // except for the imports, by calling the generator's methods P, In, and Out. + Generate(file *FileDescriptor) + // GenerateImports produces the import declarations for this file. + // It is called after Generate. + GenerateImports(file *FileDescriptor) +} + +type pluginSlice []Plugin + +func (ps pluginSlice) Len() int { + return len(ps) +} + +func (ps pluginSlice) Less(i, j int) bool { + return ps[i].Name() < ps[j].Name() +} + +func (ps pluginSlice) Swap(i, j int) { + ps[i], ps[j] = ps[j], ps[i] +} + +var plugins pluginSlice + +// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. +// It is typically called during initialization. +func RegisterPlugin(p Plugin) { + plugins = append(plugins, p) +} + +// Each type we import as a protocol buffer (other than FileDescriptorProto) needs +// a pointer to the FileDescriptorProto that represents it. These types achieve that +// wrapping by placing each Proto inside a struct with the pointer to its File. The +// structs have the same names as their contents, with "Proto" removed. +// FileDescriptor is used to store the things that it points to. + +// The file and package name method are common to messages and enums. +type common struct { + file *descriptor.FileDescriptorProto // File this object comes from. +} + +// PackageName is name in the package clause in the generated file. +func (c *common) PackageName() string { return uniquePackageOf(c.file) } + +func (c *common) File() *descriptor.FileDescriptorProto { return c.file } + +func fileIsProto3(file *descriptor.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func (c *common) proto3() bool { return fileIsProto3(c.file) } + +// Descriptor represents a protocol buffer message. +type Descriptor struct { + common + *descriptor.DescriptorProto + parent *Descriptor // The containing message, if any. + nested []*Descriptor // Inner messages, if any. + enums []*EnumDescriptor // Inner enums, if any. + ext []*ExtensionDescriptor // Extensions, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or another message. + path string // The SourceCodeInfo path as comma-separated integers. + group bool +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (d *Descriptor) TypeName() []string { + if d.typename != nil { + return d.typename + } + n := 0 + for parent := d; parent != nil; parent = parent.parent { + n++ + } + s := make([]string, n, n) + for parent := d; parent != nil; parent = parent.parent { + n-- + s[n] = parent.GetName() + } + d.typename = s + return s +} + +func (d *Descriptor) allowOneof() bool { + return true +} + +// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type EnumDescriptor struct { + common + *descriptor.EnumDescriptorProto + parent *Descriptor // The containing message, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or a message. + path string // The SourceCodeInfo path as comma-separated integers. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *EnumDescriptor) TypeName() (s []string) { + if e.typename != nil { + return e.typename + } + name := e.GetName() + if e.parent == nil { + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + e.typename = s + return s +} + +// Everything but the last element of the full type name, CamelCased. +// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . +func (e *EnumDescriptor) prefix() string { + if e.parent == nil { + // If the enum is not part of a message, the prefix is just the type name. + return CamelCase(*e.Name) + "_" + } + typeName := e.TypeName() + return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" +} + +// The integer value of the named constant in this enumerated type. +func (e *EnumDescriptor) integerValueAsString(name string) string { + for _, c := range e.Value { + if c.GetName() == name { + return fmt.Sprint(c.GetNumber()) + } + } + log.Fatal("cannot find value for enum constant") + return "" +} + +// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type ExtensionDescriptor struct { + common + *descriptor.FieldDescriptorProto + parent *Descriptor // The containing message, if any. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *ExtensionDescriptor) TypeName() (s []string) { + name := e.GetName() + if e.parent == nil { + // top-level extension + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + return s +} + +// DescName returns the variable name used for the generated descriptor. +func (e *ExtensionDescriptor) DescName() string { + // The full type name. + typeName := e.TypeName() + // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. + for i, s := range typeName { + typeName[i] = CamelCase(s) + } + return "E_" + strings.Join(typeName, "_") +} + +// ImportedDescriptor describes a type that has been publicly imported from another file. +type ImportedDescriptor struct { + common + o Object +} + +func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } + +// FileDescriptor describes an protocol buffer descriptor file (.proto). +// It includes slices of all the messages and enums defined within it. +// Those slices are constructed by WrapTypes. +type FileDescriptor struct { + *descriptor.FileDescriptorProto + desc []*Descriptor // All the messages defined in this file. + enum []*EnumDescriptor // All the enums defined in this file. + ext []*ExtensionDescriptor // All the top-level extensions defined in this file. + imp []*ImportedDescriptor // All types defined in files publicly imported by this file. + + // Comments, stored as a map of path (comma-separated integers) to the comment. + comments map[string]*descriptor.SourceCodeInfo_Location + + // The full list of symbols that are exported, + // as a map from the exported object to its symbols. + // This is used for supporting public imports. + exported map[Object][]symbol + + index int // The index of this file in the list of files to generate code for + + proto3 bool // whether to generate proto3 code for this file +} + +// PackageName is the package name we'll use in the generated code to refer to this file. +func (d *FileDescriptor) PackageName() string { return uniquePackageOf(d.FileDescriptorProto) } + +// goPackageName returns the Go package name to use in the +// generated Go file. The result explicit reports whether the name +// came from an option go_package statement. If explicit is false, +// the name was derived from the protocol buffer's package statement +// or the input file name. +func (d *FileDescriptor) goPackageName() (name string, explicit bool) { + // Does the file have a "go_package" option? + if opts := d.Options; opts != nil { + if pkg := opts.GetGoPackage(); pkg != "" { + return pkg, true + } + } + + // Does the file have a package clause? + if pkg := d.GetPackage(); pkg != "" { + return pkg, false + } + // Use the file base name. + return baseName(d.GetName()), false +} + +func (d *FileDescriptor) addExport(obj Object, sym symbol) { + d.exported[obj] = append(d.exported[obj], sym) +} + +// symbol is an interface representing an exported Go symbol. +type symbol interface { + // GenerateAlias should generate an appropriate alias + // for the symbol from the named package. + GenerateAlias(g *Generator, pkg string) +} + +type messageSymbol struct { + sym string + hasExtensions, isMessageSet bool + hasOneof bool + getters []getterSymbol +} + +type getterSymbol struct { + name string + typ string + typeName string // canonical name in proto world; empty for proto.Message and similar + genType bool // whether typ contains a generated type (message/group/enum) +} + +func (ms *messageSymbol) GenerateAlias(g *Generator, pkg string) { + remoteSym := pkg + "." + ms.sym + + g.P("type ", ms.sym, " ", remoteSym) + g.P("func (m *", ms.sym, ") Reset() { (*", remoteSym, ")(m).Reset() }") + g.P("func (m *", ms.sym, ") String() string { return (*", remoteSym, ")(m).String() }") + g.P("func (*", ms.sym, ") ProtoMessage() {}") + if ms.hasExtensions { + g.P("func (*", ms.sym, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange ", + "{ return (*", remoteSym, ")(nil).ExtensionRangeArray() }") + g.P("func (m *", ms.sym, ") ExtensionMap() map[int32]", g.Pkg["proto"], ".Extension ", + "{ return (*", remoteSym, ")(m).ExtensionMap() }") + if ms.isMessageSet { + g.P("func (m *", ms.sym, ") Marshal() ([]byte, error) ", + "{ return (*", remoteSym, ")(m).Marshal() }") + g.P("func (m *", ms.sym, ") Unmarshal(buf []byte) error ", + "{ return (*", remoteSym, ")(m).Unmarshal(buf) }") + } + } + if ms.hasOneof { + // Oneofs and public imports do not mix well. + // We can make them work okay for the binary format, + // but they're going to break weirdly for text/JSON. + enc := "_" + ms.sym + "_OneofMarshaler" + dec := "_" + ms.sym + "_OneofUnmarshaler" + encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error" + decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)" + g.P("func (m *", ms.sym, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", []interface{}) {") + g.P("return ", enc, ", ", dec, ", nil") + g.P("}") + + g.P("func ", enc, encSig, " {") + g.P("m := msg.(*", ms.sym, ")") + g.P("m0 := (*", remoteSym, ")(m)") + g.P("enc, _, _ := m0.XXX_OneofFuncs()") + g.P("return enc(m0, b)") + g.P("}") + + g.P("func ", dec, decSig, " {") + g.P("m := msg.(*", ms.sym, ")") + g.P("m0 := (*", remoteSym, ")(m)") + g.P("_, dec, _ := m0.XXX_OneofFuncs()") + g.P("return dec(m0, tag, wire, b)") + g.P("}") + } + for _, get := range ms.getters { + + if get.typeName != "" { + g.RecordTypeUse(get.typeName) + } + typ := get.typ + val := "(*" + remoteSym + ")(m)." + get.name + "()" + if get.genType { + // typ will be "*pkg.T" (message/group) or "pkg.T" (enum) + // or "map[t]*pkg.T" (map to message/enum). + // The first two of those might have a "[]" prefix if it is repeated. + // Drop any package qualifier since we have hoisted the type into this package. + rep := strings.HasPrefix(typ, "[]") + if rep { + typ = typ[2:] + } + isMap := strings.HasPrefix(typ, "map[") + star := typ[0] == '*' + if !isMap { // map types handled lower down + typ = typ[strings.Index(typ, ".")+1:] + } + if star { + typ = "*" + typ + } + if rep { + // Go does not permit conversion between slice types where both + // element types are named. That means we need to generate a bit + // of code in this situation. + // typ is the element type. + // val is the expression to get the slice from the imported type. + + ctyp := typ // conversion type expression; "Foo" or "(*Foo)" + if star { + ctyp = "(" + typ + ")" + } + + g.P("func (m *", ms.sym, ") ", get.name, "() []", typ, " {") + g.In() + g.P("o := ", val) + g.P("if o == nil {") + g.In() + g.P("return nil") + g.Out() + g.P("}") + g.P("s := make([]", typ, ", len(o))") + g.P("for i, x := range o {") + g.In() + g.P("s[i] = ", ctyp, "(x)") + g.Out() + g.P("}") + g.P("return s") + g.Out() + g.P("}") + continue + } + if isMap { + // Split map[keyTyp]valTyp. + bra, ket := strings.Index(typ, "["), strings.Index(typ, "]") + keyTyp, valTyp := typ[bra+1:ket], typ[ket+1:] + // Drop any package qualifier. + // Only the value type may be foreign. + star := valTyp[0] == '*' + valTyp = valTyp[strings.Index(valTyp, ".")+1:] + if star { + valTyp = "*" + valTyp + } + + typ := "map[" + keyTyp + "]" + valTyp + g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " {") + g.P("o := ", val) + g.P("if o == nil { return nil }") + g.P("s := make(", typ, ", len(o))") + g.P("for k, v := range o {") + g.P("s[k] = (", valTyp, ")(v)") + g.P("}") + g.P("return s") + g.P("}") + continue + } + // Convert imported type into the forwarding type. + val = "(" + typ + ")(" + val + ")" + } + + g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " { return ", val, " }") + } + +} + +type enumSymbol struct { + name string + proto3 bool // Whether this came from a proto3 file. +} + +func (es enumSymbol) GenerateAlias(g *Generator, pkg string) { + s := es.name + g.P("type ", s, " ", pkg, ".", s) + g.P("var ", s, "_name = ", pkg, ".", s, "_name") + g.P("var ", s, "_value = ", pkg, ".", s, "_value") + g.P("func (x ", s, ") String() string { return (", pkg, ".", s, ")(x).String() }") + if !es.proto3 { + g.P("func (x ", s, ") Enum() *", s, "{ return (*", s, ")((", pkg, ".", s, ")(x).Enum()) }") + g.P("func (x *", s, ") UnmarshalJSON(data []byte) error { return (*", pkg, ".", s, ")(x).UnmarshalJSON(data) }") + } +} + +type constOrVarSymbol struct { + sym string + typ string // either "const" or "var" + cast string // if non-empty, a type cast is required (used for enums) +} + +func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg string) { + v := pkg + "." + cs.sym + if cs.cast != "" { + v = cs.cast + "(" + v + ")" + } + g.P(cs.typ, " ", cs.sym, " = ", v) +} + +// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. +type Object interface { + PackageName() string // The name we use in our output (a_b_c), possibly renamed for uniqueness. + TypeName() []string + File() *descriptor.FileDescriptorProto +} + +// Each package name we generate must be unique. The package we're generating +// gets its own name but every other package must have a unique name that does +// not conflict in the code we generate. These names are chosen globally (although +// they don't have to be, it simplifies things to do them globally). +func uniquePackageOf(fd *descriptor.FileDescriptorProto) string { + s, ok := uniquePackageName[fd] + if !ok { + log.Fatal("internal error: no package name defined for " + fd.GetName()) + } + return s +} + +// Generator is the type whose methods generate the output, stored in the associated response structure. +type Generator struct { + *bytes.Buffer + + Request *plugin.CodeGeneratorRequest // The input. + Response *plugin.CodeGeneratorResponse // The output. + + Param map[string]string // Command-line parameters. + PackageImportPath string // Go import path of the package we're generating code for + ImportPrefix string // String to prefix to imported package file names. + ImportMap map[string]string // Mapping from import name to generated name + + Pkg map[string]string // The names under which we import support packages + + packageName string // What we're calling ourselves. + allFiles []*FileDescriptor // All files in the tree + allFilesByName map[string]*FileDescriptor // All files by filename. + genFiles []*FileDescriptor // Those files we will generate output for. + file *FileDescriptor // The file we are compiling now. + usedPackages map[string]bool // Names of packages used in current file. + typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. + init []string // Lines to emit in the init function. + indent string + writeOutput bool + + customImports []string + writtenImports map[string]bool // For de-duplicating written imports +} + +// New creates a new generator and allocates the request and response protobufs. +func New() *Generator { + g := new(Generator) + g.Buffer = new(bytes.Buffer) + g.Request = new(plugin.CodeGeneratorRequest) + g.Response = new(plugin.CodeGeneratorResponse) + g.writtenImports = make(map[string]bool) + uniquePackageName = make(map[*descriptor.FileDescriptorProto]string) + pkgNamesInUse = make(map[string][]*FileDescriptor) + return g +} + +// Error reports a problem, including an error, and exits the program. +func (g *Generator) Error(err error, msgs ...string) { + s := strings.Join(msgs, " ") + ":" + err.Error() + log.Print("protoc-gen-gogo: error:", s) + os.Exit(1) +} + +// Fail reports a problem and exits the program. +func (g *Generator) Fail(msgs ...string) { + s := strings.Join(msgs, " ") + log.Print("protoc-gen-gogo: error:", s) + os.Exit(1) +} + +// CommandLineParameters breaks the comma-separated list of key=value pairs +// in the parameter (a member of the request protobuf) into a key/value map. +// It then sets file name mappings defined by those entries. +func (g *Generator) CommandLineParameters(parameter string) { + g.Param = make(map[string]string) + for _, p := range strings.Split(parameter, ",") { + if i := strings.Index(p, "="); i < 0 { + g.Param[p] = "" + } else { + g.Param[p[0:i]] = p[i+1:] + } + } + + g.ImportMap = make(map[string]string) + pluginList := "none" // Default list of plugin names to enable (empty means all). + for k, v := range g.Param { + switch k { + case "import_prefix": + g.ImportPrefix = v + case "import_path": + g.PackageImportPath = v + case "plugins": + pluginList = v + default: + if len(k) > 0 && k[0] == 'M' { + g.ImportMap[k[1:]] = v + } + } + } + + if pluginList == "" { + return + } + if pluginList == "none" { + pluginList = "" + } + gogoPluginNames := []string{"unmarshal", "unsafeunmarshaler", "union", "stringer", "size", "protosizer", "populate", "marshalto", "unsafemarshaler", "gostring", "face", "equal", "enumstringer", "embedcheck", "description", "defaultcheck", "oneofcheck"} + pluginList = strings.Join(append(gogoPluginNames, pluginList), "+") + if pluginList != "" { + // Amend the set of plugins. + enabled := make(map[string]bool) + for _, name := range strings.Split(pluginList, "+") { + enabled[name] = true + } + var nplugins pluginSlice + for _, p := range plugins { + if enabled[p.Name()] { + nplugins = append(nplugins, p) + } + } + sort.Sort(nplugins) + plugins = nplugins + } +} + +// DefaultPackageName returns the package name printed for the object. +// If its file is in a different package, it returns the package name we're using for this file, plus ".". +// Otherwise it returns the empty string. +func (g *Generator) DefaultPackageName(obj Object) string { + pkg := obj.PackageName() + if pkg == g.packageName { + return "" + } + return pkg + "." +} + +// For each input file, the unique package name to use, underscored. +var uniquePackageName = make(map[*descriptor.FileDescriptorProto]string) + +// Package names already registered. Key is the name from the .proto file; +// value is the name that appears in the generated code. +var pkgNamesInUse = make(map[string][]*FileDescriptor) + +// Create and remember a guaranteed unique package name for this file descriptor. +// Pkg is the candidate name. If f is nil, it's a builtin package like "proto" and +// has no file descriptor. +func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { + // Convert dots to underscores before finding a unique alias. + pkg = strings.Map(badToUnderscore, pkg) + + var i = -1 + var ptr *FileDescriptor = nil + for i, ptr = range pkgNamesInUse[pkg] { + if ptr == f { + if i == 0 { + return pkg + } + return pkg + strconv.Itoa(i) + } + } + + pkgNamesInUse[pkg] = append(pkgNamesInUse[pkg], f) + i += 1 + + if i > 0 { + pkg = pkg + strconv.Itoa(i) + } + + if f != nil { + uniquePackageName[f.FileDescriptorProto] = pkg + } + return pkg +} + +var isGoKeyword = map[string]bool{ + "break": true, + "case": true, + "chan": true, + "const": true, + "continue": true, + "default": true, + "else": true, + "defer": true, + "fallthrough": true, + "for": true, + "func": true, + "go": true, + "goto": true, + "if": true, + "import": true, + "interface": true, + "map": true, + "package": true, + "range": true, + "return": true, + "select": true, + "struct": true, + "switch": true, + "type": true, + "var": true, +} + +// defaultGoPackage returns the package name to use, +// derived from the import path of the package we're building code for. +func (g *Generator) defaultGoPackage() string { + p := g.PackageImportPath + if i := strings.LastIndex(p, "/"); i >= 0 { + p = p[i+1:] + } + if p == "" { + return "" + } + + p = strings.Map(badToUnderscore, p) + // Identifier must not be keyword: insert _. + if isGoKeyword[p] { + p = "_" + p + } + // Identifier must not begin with digit: insert _. + if r, _ := utf8.DecodeRuneInString(p); unicode.IsDigit(r) { + p = "_" + p + } + return p +} + +// SetPackageNames sets the package name for this run. +// The package name must agree across all files being generated. +// It also defines unique package names for all imported files. +func (g *Generator) SetPackageNames() { + // Register the name for this package. It will be the first name + // registered so is guaranteed to be unmodified. + pkg, explicit := g.genFiles[0].goPackageName() + + // Check all files for an explicit go_package option. + for _, f := range g.genFiles { + thisPkg, thisExplicit := f.goPackageName() + if thisExplicit { + if !explicit { + // Let this file's go_package option serve for all input files. + pkg, explicit = thisPkg, true + } else if thisPkg != pkg { + g.Fail("inconsistent package names:", thisPkg, pkg) + } + } + } + + // If we don't have an explicit go_package option but we have an + // import path, use that. + if !explicit { + p := g.defaultGoPackage() + if p != "" { + pkg, explicit = p, true + } + } + + // If there was no go_package and no import path to use, + // double-check that all the inputs have the same implicit + // Go package name. + if !explicit { + for _, f := range g.genFiles { + thisPkg, _ := f.goPackageName() + if thisPkg != pkg { + g.Fail("inconsistent package names:", thisPkg, pkg) + } + } + } + + g.packageName = RegisterUniquePackageName(pkg, g.genFiles[0]) + + // Register the support package names. They might collide with the + // name of a package we import. + g.Pkg = map[string]string{ + "fmt": RegisterUniquePackageName("fmt", nil), + "math": RegisterUniquePackageName("math", nil), + "proto": RegisterUniquePackageName("proto", nil), + } + +AllFiles: + for _, f := range g.allFiles { + for _, genf := range g.genFiles { + if f == genf { + // In this package already. + uniquePackageName[f.FileDescriptorProto] = g.packageName + continue AllFiles + } + } + // The file is a dependency, so we want to ignore its go_package option + // because that is only relevant for its specific generated output. + pkg := f.GetPackage() + if pkg == "" { + pkg = baseName(*f.Name) + } + RegisterUniquePackageName(pkg, f) + } +} + +// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos +// and FileDescriptorProtos into file-referenced objects within the Generator. +// It also creates the list of files to generate and so should be called before GenerateAllFiles. +func (g *Generator) WrapTypes() { + g.allFiles = make([]*FileDescriptor, len(g.Request.ProtoFile)) + g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) + for i, f := range g.Request.ProtoFile { + // We must wrap the descriptors before we wrap the enums + descs := wrapDescriptors(f) + g.buildNestedDescriptors(descs) + enums := wrapEnumDescriptors(f, descs) + g.buildNestedEnums(descs, enums) + exts := wrapExtensions(f) + fd := &FileDescriptor{ + FileDescriptorProto: f, + desc: descs, + enum: enums, + ext: exts, + exported: make(map[Object][]symbol), + proto3: fileIsProto3(f), + } + extractComments(fd) + g.allFiles[i] = fd + g.allFilesByName[f.GetName()] = fd + } + for _, fd := range g.allFiles { + fd.imp = wrapImported(fd.FileDescriptorProto, g) + } + + g.genFiles = make([]*FileDescriptor, len(g.Request.FileToGenerate)) + for i, fileName := range g.Request.FileToGenerate { + g.genFiles[i] = g.allFilesByName[fileName] + if g.genFiles[i] == nil { + g.Fail("could not find file named", fileName) + } + g.genFiles[i].index = i + } + g.Response.File = make([]*plugin.CodeGeneratorResponse_File, len(g.genFiles)) +} + +// Scan the descriptors in this file. For each one, build the slice of nested descriptors +func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { + for _, desc := range descs { + if len(desc.NestedType) != 0 { + for _, nest := range descs { + if nest.parent == desc { + desc.nested = append(desc.nested, nest) + } + } + if len(desc.nested) != len(desc.NestedType) { + g.Fail("internal error: nesting failure for", desc.GetName()) + } + } + } +} + +func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) { + for _, desc := range descs { + if len(desc.EnumType) != 0 { + for _, enum := range enums { + if enum.parent == desc { + desc.enums = append(desc.enums, enum) + } + } + if len(desc.enums) != len(desc.EnumType) { + g.Fail("internal error: enum nesting failure for", desc.GetName()) + } + } + } +} + +// Construct the Descriptor +func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *Descriptor { + d := &Descriptor{ + common: common{file}, + DescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + d.path = fmt.Sprintf("%d,%d", messagePath, index) + } else { + d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) + } + + // The only way to distinguish a group from a message is whether + // the containing message has a TYPE_GROUP field that matches. + if parent != nil { + parts := d.TypeName() + if file.Package != nil { + parts = append([]string{*file.Package}, parts...) + } + exp := "." + strings.Join(parts, ".") + for _, field := range parent.Field { + if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { + d.group = true + break + } + } + } + + d.ext = make([]*ExtensionDescriptor, len(desc.Extension)) + for i, field := range desc.Extension { + d.ext[i] = &ExtensionDescriptor{common{file}, field, d} + } + + return d +} + +// Return a slice of all the Descriptors defined within this file +func wrapDescriptors(file *descriptor.FileDescriptorProto) []*Descriptor { + sl := make([]*Descriptor, 0, len(file.MessageType)+10) + for i, desc := range file.MessageType { + sl = wrapThisDescriptor(sl, desc, nil, file, i) + } + return sl +} + +// Wrap this Descriptor, recursively +func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) []*Descriptor { + sl = append(sl, newDescriptor(desc, parent, file, index)) + me := sl[len(sl)-1] + for i, nested := range desc.NestedType { + sl = wrapThisDescriptor(sl, nested, me, file, i) + } + return sl +} + +// Construct the EnumDescriptor +func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *EnumDescriptor { + ed := &EnumDescriptor{ + common: common{file}, + EnumDescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + ed.path = fmt.Sprintf("%d,%d", enumPath, index) + } else { + ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) + } + return ed +} + +// Return a slice of all the EnumDescriptors defined within this file +func wrapEnumDescriptors(file *descriptor.FileDescriptorProto, descs []*Descriptor) []*EnumDescriptor { + sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) + // Top-level enums. + for i, enum := range file.EnumType { + sl = append(sl, newEnumDescriptor(enum, nil, file, i)) + } + // Enums within messages. Enums within embedded messages appear in the outer-most message. + for _, nested := range descs { + for i, enum := range nested.EnumType { + sl = append(sl, newEnumDescriptor(enum, nested, file, i)) + } + } + return sl +} + +// Return a slice of all the top-level ExtensionDescriptors defined within this file. +func wrapExtensions(file *descriptor.FileDescriptorProto) []*ExtensionDescriptor { + sl := make([]*ExtensionDescriptor, len(file.Extension)) + for i, field := range file.Extension { + sl[i] = &ExtensionDescriptor{common{file}, field, nil} + } + return sl +} + +// Return a slice of all the types that are publicly imported into this file. +func wrapImported(file *descriptor.FileDescriptorProto, g *Generator) (sl []*ImportedDescriptor) { + for _, index := range file.PublicDependency { + df := g.fileByName(file.Dependency[index]) + for _, d := range df.desc { + if d.GetOptions().GetMapEntry() { + continue + } + sl = append(sl, &ImportedDescriptor{common{file}, d}) + } + for _, e := range df.enum { + sl = append(sl, &ImportedDescriptor{common{file}, e}) + } + for _, ext := range df.ext { + sl = append(sl, &ImportedDescriptor{common{file}, ext}) + } + } + return +} + +func extractComments(file *FileDescriptor) { + file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) + for _, loc := range file.GetSourceCodeInfo().GetLocation() { + if loc.LeadingComments == nil { + continue + } + var p []string + for _, n := range loc.Path { + p = append(p, strconv.Itoa(int(n))) + } + file.comments[strings.Join(p, ",")] = loc + } +} + +// BuildTypeNameMap builds the map from fully qualified type names to objects. +// The key names for the map come from the input data, which puts a period at the beginning. +// It should be called after SetPackageNames and before GenerateAllFiles. +func (g *Generator) BuildTypeNameMap() { + g.typeNameToObject = make(map[string]Object) + for _, f := range g.allFiles { + // The names in this loop are defined by the proto world, not us, so the + // package name may be empty. If so, the dotted package name of X will + // be ".X"; otherwise it will be ".pkg.X". + dottedPkg := "." + f.GetPackage() + if dottedPkg != "." { + dottedPkg += "." + } + for _, enum := range f.enum { + name := dottedPkg + dottedSlice(enum.TypeName()) + g.typeNameToObject[name] = enum + } + for _, desc := range f.desc { + name := dottedPkg + dottedSlice(desc.TypeName()) + g.typeNameToObject[name] = desc + } + } +} + +// ObjectNamed, given a fully-qualified input type name as it appears in the input data, +// returns the descriptor for the message or enum with that name. +func (g *Generator) ObjectNamed(typeName string) Object { + o, ok := g.typeNameToObject[typeName] + if !ok { + g.Fail("can't find object with type", typeName) + } + + // If the file of this object isn't a direct dependency of the current file, + // or in the current file, then this object has been publicly imported into + // a dependency of the current file. + // We should return the ImportedDescriptor object for it instead. + direct := *o.File().Name == *g.file.Name + if !direct { + for _, dep := range g.file.Dependency { + if *g.fileByName(dep).Name == *o.File().Name { + direct = true + break + } + } + } + if !direct { + found := false + Loop: + for _, dep := range g.file.Dependency { + df := g.fileByName(*g.fileByName(dep).Name) + for _, td := range df.imp { + if td.o == o { + // Found it! + o = td + found = true + break Loop + } + } + } + if !found { + log.Printf("protoc-gen-gogo: WARNING: failed finding publicly imported dependency for %v, used in %v", typeName, *g.file.Name) + } + } + + return o +} + +// P prints the arguments to the generated output. It handles strings and int32s, plus +// handling indirections because they may be *string, etc. +func (g *Generator) P(str ...interface{}) { + if !g.writeOutput { + return + } + g.WriteString(g.indent) + for _, v := range str { + switch s := v.(type) { + case string: + g.WriteString(s) + case *string: + g.WriteString(*s) + case bool: + fmt.Fprintf(g, "%t", s) + case *bool: + fmt.Fprintf(g, "%t", *s) + case int: + fmt.Fprintf(g, "%d", s) + case *int32: + fmt.Fprintf(g, "%d", *s) + case *int64: + fmt.Fprintf(g, "%d", *s) + case float64: + fmt.Fprintf(g, "%g", s) + case *float64: + fmt.Fprintf(g, "%g", *s) + default: + g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) + } + } + g.WriteByte('\n') +} + +// addInitf stores the given statement to be printed inside the file's init function. +// The statement is given as a format specifier and arguments. +func (g *Generator) addInitf(stmt string, a ...interface{}) { + g.init = append(g.init, fmt.Sprintf(stmt, a...)) +} + +func (g *Generator) PrintImport(alias, pkg string) { + statement := "import " + alias + " " + strconv.Quote(pkg) + if g.writtenImports[statement] { + return + } + g.P(statement) + g.writtenImports[statement] = true +} + +// In Indents the output one tab stop. +func (g *Generator) In() { g.indent += "\t" } + +// Out unindents the output one tab stop. +func (g *Generator) Out() { + if len(g.indent) > 0 { + g.indent = g.indent[1:] + } +} + +// GenerateAllFiles generates the output for all the files we're outputting. +func (g *Generator) GenerateAllFiles() { + // Initialize the plugins + for _, p := range plugins { + p.Init(g) + } + // Generate the output. The generator runs for every file, even the files + // that we don't generate output for, so that we can collate the full list + // of exported symbols to support public imports. + genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) + for _, file := range g.genFiles { + genFileMap[file] = true + } + i := 0 + for _, file := range g.allFiles { + g.Reset() + g.writeOutput = genFileMap[file] + g.generate(file) + if !g.writeOutput { + continue + } + g.Response.File[i] = new(plugin.CodeGeneratorResponse_File) + g.Response.File[i].Name = proto.String(goFileName(*file.Name)) + g.Response.File[i].Content = proto.String(g.String()) + i++ + } +} + +// Run all the plugins associated with the file. +func (g *Generator) runPlugins(file *FileDescriptor) { + for _, p := range plugins { + p.Generate(file) + } +} + +// FileOf return the FileDescriptor for this FileDescriptorProto. +func (g *Generator) FileOf(fd *descriptor.FileDescriptorProto) *FileDescriptor { + for _, file := range g.allFiles { + if file.FileDescriptorProto == fd { + return file + } + } + g.Fail("could not find file in table:", fd.GetName()) + return nil +} + +// Fill the response protocol buffer with the generated output for all the files we're +// supposed to generate. +func (g *Generator) generate(file *FileDescriptor) { + g.customImports = make([]string, 0) + g.file = g.FileOf(file.FileDescriptorProto) + g.usedPackages = make(map[string]bool) + // Reset on each file + g.writtenImports = make(map[string]bool) + + for _, td := range g.file.imp { + g.generateImported(td) + } + for _, enum := range g.file.enum { + g.generateEnum(enum) + } + for _, desc := range g.file.desc { + // Don't generate virtual messages for maps. + if desc.GetOptions().GetMapEntry() { + continue + } + g.generateMessage(desc) + } + for _, ext := range g.file.ext { + g.generateExtension(ext) + } + g.generateInitFunction() + + // Run the plugins before the imports so we know which imports are necessary. + g.runPlugins(file) + + // Generate header and imports last, though they appear first in the output. + rem := g.Buffer + g.Buffer = new(bytes.Buffer) + g.generateHeader() + g.generateImports() + if !g.writeOutput { + return + } + g.Write(rem.Bytes()) + + // Reformat generated code. + fset := token.NewFileSet() + raw := g.Bytes() + ast, err := parser.ParseFile(fset, "", g, parser.ParseComments) + if err != nil { + // Print out the bad code with line numbers. + // This should never happen in practice, but it can while changing generated code, + // so consider this a debugging aid. + var src bytes.Buffer + s := bufio.NewScanner(bytes.NewReader(raw)) + for line := 1; s.Scan(); line++ { + fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) + } + if serr := s.Err(); serr != nil { + g.Fail("bad Go source code was generated:", err.Error(), "\n"+string(raw)) + } else { + g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) + } + } + g.Reset() + err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, ast) + if err != nil { + g.Fail("generated Go source code could not be reformatted:", err.Error()) + } +} + +// Generate the header, including package definition +func (g *Generator) generateHeader() { + g.P("// Code generated by protoc-gen-gogo.") + g.P("// source: ", *g.file.Name) + g.P("// DO NOT EDIT!") + g.P() + + name := g.file.PackageName() + + if g.file.index == 0 { + // Generate package docs for the first file in the package. + g.P("/*") + g.P("Package ", name, " is a generated protocol buffer package.") + g.P() + if loc, ok := g.file.comments[strconv.Itoa(packagePath)]; ok { + // not using g.PrintComments because this is a /* */ comment block. + text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") + for _, line := range strings.Split(text, "\n") { + line = strings.TrimPrefix(line, " ") + // ensure we don't escape from the block comment + line = strings.Replace(line, "*/", "* /", -1) + g.P(line) + } + g.P() + } + var topMsgs []string + g.P("It is generated from these files:") + for _, f := range g.genFiles { + g.P("\t", f.Name) + for _, msg := range f.desc { + if msg.parent != nil { + continue + } + topMsgs = append(topMsgs, CamelCaseSlice(msg.TypeName())) + } + } + g.P() + g.P("It has these top-level messages:") + for _, msg := range topMsgs { + g.P("\t", msg) + } + g.P("*/") + } + + g.P("package ", name) + g.P() +} + +// PrintComments prints any comments from the source .proto file. +// The path is a comma-separated list of integers. +// It returns an indication of whether any comments were printed. +// See descriptor.proto for its format. +func (g *Generator) PrintComments(path string) bool { + if !g.writeOutput { + return false + } + if loc, ok := g.file.comments[path]; ok { + text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") + for _, line := range strings.Split(text, "\n") { + g.P("// ", strings.TrimPrefix(line, " ")) + } + return true + } + return false +} + +// Comments returns any comments from the source .proto file and empty string if comments not found. +// The path is a comma-separated list of intergers. +// See descriptor.proto for its format. +func (g *Generator) Comments(path string) string { + loc, ok := g.file.comments[path] + if !ok { + return "" + } + text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") + return text +} + +func (g *Generator) fileByName(filename string) *FileDescriptor { + return g.allFilesByName[filename] +} + +// weak returns whether the ith import of the current file is a weak import. +func (g *Generator) weak(i int32) bool { + for _, j := range g.file.WeakDependency { + if j == i { + return true + } + } + return false +} + +// Generate the imports +func (g *Generator) generateImports() { + // We almost always need a proto import. Rather than computing when we + // do, which is tricky when there's a plugin, just import it and + // reference it later. The same argument applies to the fmt and math packages. + if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) { + g.PrintImport(g.Pkg["proto"], g.ImportPrefix+"github.com/gogo/protobuf/proto") + } else { + g.PrintImport(g.Pkg["proto"], g.ImportPrefix+"github.com/golang/protobuf/proto") + } + g.PrintImport(g.Pkg["fmt"], "fmt") + g.PrintImport(g.Pkg["math"], "math") + + for i, s := range g.file.Dependency { + fd := g.fileByName(s) + // Do not import our own package. + if fd.PackageName() == g.packageName { + continue + } + filename := goFileName(s) + // By default, import path is the dirname of the Go filename. + importPath := path.Dir(filename) + if substitution, ok := g.ImportMap[s]; ok { + importPath = substitution + } + importPath = g.ImportPrefix + importPath + // Skip weak imports. + if g.weak(int32(i)) { + g.P("// skipping weak import ", fd.PackageName(), " ", strconv.Quote(importPath)) + continue + } + // We need to import all the dependencies, even if we don't reference them, + // because other code and tools depend on having the full transitive closure + // of protocol buffer types in the binary. + if _, ok := g.usedPackages[fd.PackageName()]; ok { + g.PrintImport(fd.PackageName(), importPath) + } else { + g.P("import _ ", strconv.Quote(importPath)) + } + } + g.P() + for _, s := range g.customImports { + s1 := strings.Map(badToUnderscore, s) + g.PrintImport(s1, s) + } + g.P() + // TODO: may need to worry about uniqueness across plugins + for _, p := range plugins { + p.GenerateImports(g.file) + g.P() + } + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ = ", g.Pkg["proto"], ".Marshal") + g.P("var _ = ", g.Pkg["fmt"], ".Errorf") + g.P("var _ = ", g.Pkg["math"], ".Inf") + g.P() +} + +func (g *Generator) generateImported(id *ImportedDescriptor) { + // Don't generate public import symbols for files that we are generating + // code for, since those symbols will already be in this package. + // We can't simply avoid creating the ImportedDescriptor objects, + // because g.genFiles isn't populated at that stage. + tn := id.TypeName() + sn := tn[len(tn)-1] + df := g.FileOf(id.o.File()) + filename := *df.Name + for _, fd := range g.genFiles { + if *fd.Name == filename { + g.P("// Ignoring public import of ", sn, " from ", filename) + g.P() + return + } + } + g.P("// ", sn, " from public import ", filename) + g.usedPackages[df.PackageName()] = true + + for _, sym := range df.exported[id.o] { + sym.GenerateAlias(g, df.PackageName()) + } + + g.P() +} + +// Generate the enum definitions for this EnumDescriptor. +func (g *Generator) generateEnum(enum *EnumDescriptor) { + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + ccPrefix := enum.prefix() + + g.PrintComments(enum.path) + if !gogoproto.EnabledGoEnumPrefix(enum.file, enum.EnumDescriptorProto) { + ccPrefix = "" + } + g.P("type ", ccTypeName, " int32") + g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) + g.P("const (") + g.In() + for i, e := range enum.Value { + g.PrintComments(fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)) + + name := ccPrefix + *e.Name + g.P(name, " ", ccTypeName, " = ", e.Number) + g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) + } + g.Out() + g.P(")") + g.P("var ", ccTypeName, "_name = map[int32]string{") + g.In() + generated := make(map[int32]bool) // avoid duplicate values + for _, e := range enum.Value { + duplicate := "" + if _, present := generated[*e.Number]; present { + duplicate = "// Duplicate value: " + } + g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") + generated[*e.Number] = true + } + g.Out() + g.P("}") + g.P("var ", ccTypeName, "_value = map[string]int32{") + g.In() + for _, e := range enum.Value { + g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") + } + g.Out() + g.P("}") + + if !enum.proto3() { + g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") + g.In() + g.P("p := new(", ccTypeName, ")") + g.P("*p = x") + g.P("return p") + g.Out() + g.P("}") + } + + if gogoproto.IsGoEnumStringer(g.file.FileDescriptorProto, enum.EnumDescriptorProto) { + g.P("func (x ", ccTypeName, ") String() string {") + g.In() + g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") + g.Out() + g.P("}") + } + + if !enum.proto3() && !gogoproto.IsGoEnumStringer(g.file.FileDescriptorProto, enum.EnumDescriptorProto) { + g.P("func (x ", ccTypeName, ") MarshalJSON() ([]byte, error) {") + g.In() + g.P("return ", g.Pkg["proto"], ".MarshalJSONEnum(", ccTypeName, "_name, int32(x))") + g.Out() + g.P("}") + } + if !enum.proto3() { + g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") + g.In() + g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) + g.P("if err != nil {") + g.In() + g.P("return err") + g.Out() + g.P("}") + g.P("*x = ", ccTypeName, "(value)") + g.P("return nil") + g.Out() + g.P("}") + } + g.P() +} + +// The tag is a string like "varint,2,opt,name=fieldname,def=7" that +// identifies details of the field for the protocol buffer marshaling and unmarshaling +// code. The fields are: +// wire encoding +// protocol tag number +// opt,req,rep for optional, required, or repeated +// packed whether the encoding is "packed" (optional; repeated primitives only) +// name= the original declared name +// enum= the name of the enum type if it is an enum-typed field. +// proto3 if this field is in a proto3 message +// def= string representation of the default value, if any. +// The default value must be in a representation that can be used at run-time +// to generate the default value. Thus bools become 0 and 1, for instance. +func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { + optrepreq := "" + switch { + case isOptional(field): + optrepreq = "opt" + case isRequired(field): + optrepreq = "req" + case isRepeated(field): + optrepreq = "rep" + } + var defaultValue string + if dv := field.DefaultValue; dv != nil { // set means an explicit default + defaultValue = *dv + // Some types need tweaking. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + if defaultValue == "true" { + defaultValue = "1" + } else { + defaultValue = "0" + } + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + // Nothing to do. Quoting is done for the whole tag. + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // For enums we need to provide the integer constant. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + // It is an enum that was publicly imported. + // We need the underlying type. + obj = id.o + } + enum, ok := obj.(*EnumDescriptor) + if !ok { + log.Printf("obj is a %T", obj) + if id, ok := obj.(*ImportedDescriptor); ok { + log.Printf("id.o is a %T", id.o) + } + g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) + } + defaultValue = enum.integerValueAsString(defaultValue) + } + defaultValue = ",def=" + defaultValue + } + enum := "" + if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { + // We avoid using obj.PackageName(), because we want to use the + // original (proto-world) package name. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + obj = id.o + } + enum = ",enum=" + if pkg := obj.File().GetPackage(); pkg != "" { + enum += pkg + "." + } + enum += CamelCaseSlice(obj.TypeName()) + } + packed := "" + if field.Options != nil && field.Options.GetPacked() { + packed = ",packed" + } + fieldName := field.GetName() + name := fieldName + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + // We must use the type name for groups instead of + // the field name to preserve capitalization. + // type_name in FieldDescriptorProto is fully-qualified, + // but we only want the local part. + name = *field.TypeName + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[i+1:] + } + } + name = ",name=" + name + + embed := "" + if gogoproto.IsEmbed(field) { + embed = ",embedded=" + fieldName + } + + ctype := "" + if gogoproto.IsCustomType(field) { + ctype = ",customtype=" + gogoproto.GetCustomType(field) + } + + casttype := "" + if gogoproto.IsCastType(field) { + casttype = ",casttype=" + gogoproto.GetCastType(field) + } + + castkey := "" + if gogoproto.IsCastKey(field) { + castkey = ",castkey=" + gogoproto.GetCastKey(field) + } + + castvalue := "" + if gogoproto.IsCastValue(field) { + castvalue = ",castvalue=" + gogoproto.GetCastValue(field) + // record the original message type for jsonpb reconstruction + desc := g.ObjectNamed(field.GetTypeName()) + if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { + valueField := d.Field[1] + if valueField.IsMessage() { + castvalue += ",castvaluetype=" + strings.TrimPrefix(valueField.GetTypeName(), ".") + } + } + } + + if message.proto3() { + // We only need the extra tag for []byte fields; + // no need to add noise for the others. + if *field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE && + *field.Type != descriptor.FieldDescriptorProto_TYPE_GROUP && + !field.IsRepeated() { + name += ",proto3" + } + } + oneof := "" + if field.OneofIndex != nil { + oneof = ",oneof" + } + return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s%s%s%s%s%s", + wiretype, + field.GetNumber(), + optrepreq, + packed, + name, + enum, + oneof, + defaultValue, + embed, + ctype, + casttype, + castkey, + castvalue)) +} + +func needsStar(field *descriptor.FieldDescriptorProto, proto3 bool, allowOneOf bool) bool { + if isRepeated(field) && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE) && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_GROUP) { + return false + } + if *field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES && !gogoproto.IsCustomType(field) { + return false + } + if !gogoproto.IsNullable(field) { + return false + } + if field.OneofIndex != nil && allowOneOf && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE) && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_GROUP) { + return false + } + if proto3 && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE) && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_GROUP) && + !gogoproto.IsCustomType(field) { + return false + } + return true +} + +// TypeName is the printed name appropriate for an item. If the object is in the current file, +// TypeName drops the package name and underscores the rest. +// Otherwise the object is from another package; and the result is the underscored +// package name followed by the item name. +// The result always has an initial capital. +func (g *Generator) TypeName(obj Object) string { + return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) +} + +// TypeNameWithPackage is like TypeName, but always includes the package +// name even if the object is in our own package. +func (g *Generator) TypeNameWithPackage(obj Object) string { + return obj.PackageName() + CamelCaseSlice(obj.TypeName()) +} + +// GoType returns a string representing the type name, and the wire type +func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { + // TODO: Options. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + typ, wire = "float64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + typ, wire = "float32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_INT64: + typ, wire = "int64", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT64: + typ, wire = "uint64", "varint" + case descriptor.FieldDescriptorProto_TYPE_INT32: + typ, wire = "int32", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT32: + typ, wire = "uint32", "varint" + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + typ, wire = "uint64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + typ, wire = "uint32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + typ, wire = "bool", "varint" + case descriptor.FieldDescriptorProto_TYPE_STRING: + typ, wire = "string", "bytes" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = g.TypeName(desc), "group" + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = g.TypeName(desc), "bytes" + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typ, wire = "[]byte", "bytes" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = g.TypeName(desc), "varint" + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + typ, wire = "int32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + typ, wire = "int64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + typ, wire = "int32", "zigzag32" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + typ, wire = "int64", "zigzag64" + default: + g.Fail("unknown type for", field.GetName()) + } + switch { + case gogoproto.IsCustomType(field) && gogoproto.IsCastType(field): + g.Fail(field.GetName() + " cannot be custom type and cast type") + case gogoproto.IsCustomType(field): + var packageName string + var err error + packageName, typ, err = getCustomType(field) + if err != nil { + g.Fail(err.Error()) + } + if len(packageName) > 0 { + g.customImports = append(g.customImports, packageName) + } + case gogoproto.IsCastType(field): + var packageName string + var err error + packageName, typ, err = getCastType(field) + if err != nil { + g.Fail(err.Error()) + } + if len(packageName) > 0 { + g.customImports = append(g.customImports, packageName) + } + } + if needsStar(field, g.file.proto3, message != nil && message.allowOneof()) { + typ = "*" + typ + } + if isRepeated(field) { + typ = "[]" + typ + } + return +} + +// GoMapDescriptor is a full description of the map output struct. +type GoMapDescriptor struct { + GoType string + + KeyField *descriptor.FieldDescriptorProto + KeyAliasField *descriptor.FieldDescriptorProto + KeyTag string + + ValueField *descriptor.FieldDescriptorProto + ValueAliasField *descriptor.FieldDescriptorProto + ValueTag string +} + +func (g *Generator) GoMapType(d *Descriptor, field *descriptor.FieldDescriptorProto) *GoMapDescriptor { + if d == nil { + byName := g.ObjectNamed(field.GetTypeName()) + desc, ok := byName.(*Descriptor) + if byName == nil || !ok || !desc.GetOptions().GetMapEntry() { + g.Fail(fmt.Sprintf("field %s is not a map", field.GetTypeName())) + return nil + } + d = desc + } + + m := &GoMapDescriptor{ + KeyField: d.Field[0], + ValueField: d.Field[1], + } + + // Figure out the Go types and tags for the key and value types. + m.KeyAliasField, m.ValueAliasField = g.GetMapKeyField(field, m.KeyField), g.GetMapValueField(field, m.ValueField) + keyType, keyWire := g.GoType(d, m.KeyAliasField) + valType, valWire := g.GoType(d, m.ValueAliasField) + + m.KeyTag, m.ValueTag = g.goTag(d, m.KeyField, keyWire), g.goTag(d, m.ValueField, valWire) + + if gogoproto.IsCastType(field) { + var packageName string + var err error + packageName, typ, err := getCastType(field) + if err != nil { + g.Fail(err.Error()) + } + if len(packageName) > 0 { + g.customImports = append(g.customImports, packageName) + } + m.GoType = typ + return m + } + + // We don't use stars, except for message-typed values. + // Message and enum types are the only two possibly foreign types used in maps, + // so record their use. They are not permitted as map keys. + keyType = strings.TrimPrefix(keyType, "*") + switch *m.ValueAliasField.Type { + case descriptor.FieldDescriptorProto_TYPE_ENUM: + valType = strings.TrimPrefix(valType, "*") + g.RecordTypeUse(m.ValueAliasField.GetTypeName()) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if !gogoproto.IsNullable(m.ValueAliasField) { + valType = strings.TrimPrefix(valType, "*") + } + g.RecordTypeUse(m.ValueAliasField.GetTypeName()) + default: + valType = strings.TrimPrefix(valType, "*") + } + + m.GoType = fmt.Sprintf("map[%s]%s", keyType, valType) + return m +} + +func (g *Generator) RecordTypeUse(t string) { + if obj, ok := g.typeNameToObject[t]; ok { + // Call ObjectNamed to get the true object to record the use. + obj = g.ObjectNamed(t) + g.usedPackages[obj.PackageName()] = true + } +} + +// Method names that may be generated. Fields with these names get an +// underscore appended. +var methodNames = [...]string{ + "Reset", + "String", + "ProtoMessage", + "Marshal", + "Unmarshal", + "ExtensionRangeArray", + "ExtensionMap", + "Descriptor", + "MarshalTo", + "Equal", + "VerboseEqual", + "GoString", + "ProtoSize", +} + +// Generate the type and default constant definitions for this Descriptor. +func (g *Generator) generateMessage(message *Descriptor) { + // The full type name + typeName := message.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + + usedNames := make(map[string]bool) + for _, n := range methodNames { + usedNames[n] = true + } + if !gogoproto.IsProtoSizer(message.file, message.DescriptorProto) { + usedNames["Size"] = true + } + fieldNames := make(map[*descriptor.FieldDescriptorProto]string) + fieldGetterNames := make(map[*descriptor.FieldDescriptorProto]string) + fieldTypes := make(map[*descriptor.FieldDescriptorProto]string) + mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) + + oneofFieldName := make(map[int32]string) // indexed by oneof_index field of FieldDescriptorProto + oneofDisc := make(map[int32]string) // name of discriminator method + oneofTypeName := make(map[*descriptor.FieldDescriptorProto]string) // without star + oneofInsertPoints := make(map[int32]int) // oneof_index => offset of g.Buffer + + g.PrintComments(message.path) + g.P("type ", ccTypeName, " struct {") + g.In() + + // allocNames finds a conflict-free variation of the given strings, + // consistently mutating their suffixes. + // It returns the same number of strings. + allocNames := func(ns ...string) []string { + Loop: + for { + for _, n := range ns { + if usedNames[n] { + for i := range ns { + ns[i] += "_" + } + continue Loop + } + } + for _, n := range ns { + usedNames[n] = true + } + return ns + } + } + + for i, field := range message.Field { + // Allocate the getter and the field at the same time so name + // collisions create field/method consistent names. + // TODO: This allocation occurs based on the order of the fields + // in the proto file, meaning that a change in the field + // ordering can change generated Method/Field names. + base := CamelCase(*field.Name) + if gogoproto.IsCustomName(field) { + base = gogoproto.GetCustomName(field) + } + ns := allocNames(base, "Get"+base) + fieldName, fieldGetterName := ns[0], ns[1] + typename, wiretype := g.GoType(message, field) + jsonName := *field.Name + jsonTag := jsonName + ",omitempty" + repeatedNativeType := (!field.IsMessage() && !gogoproto.IsCustomType(field) && field.IsRepeated()) + if !gogoproto.IsNullable(field) && !repeatedNativeType { + jsonTag = jsonName + } + gogoJsonTag := gogoproto.GetJsonTag(field) + if gogoJsonTag != nil { + jsonTag = *gogoJsonTag + } + gogoMoreTags := gogoproto.GetMoreTags(field) + moreTags := "" + if gogoMoreTags != nil { + moreTags = " " + *gogoMoreTags + } + tag := fmt.Sprintf("protobuf:%s json:%q%s", g.goTag(message, field, wiretype), jsonTag, moreTags) + fieldNames[field] = fieldName + fieldGetterNames[field] = fieldGetterName + if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE && gogoproto.IsEmbed(field) { + fieldName = "" + } + + oneof := field.OneofIndex != nil && message.allowOneof() + if oneof && oneofFieldName[*field.OneofIndex] == "" { + odp := message.OneofDecl[int(*field.OneofIndex)] + fname := allocNames(CamelCase(odp.GetName()))[0] + + // This is the first field of a oneof we haven't seen before. + // Generate the union field. + com := g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)) + if com { + g.P("//") + } + g.P("// Types that are valid to be assigned to ", fname, ":") + // Generate the rest of this comment later, + // when we've computed any disambiguation. + oneofInsertPoints[*field.OneofIndex] = g.Buffer.Len() + + dname := "is" + ccTypeName + "_" + fname + oneofFieldName[*field.OneofIndex] = fname + oneofDisc[*field.OneofIndex] = dname + tag := `protobuf_oneof:"` + odp.GetName() + `"` + g.P(fname, " ", dname, " `", tag, "`") + } + + if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { + desc := g.ObjectNamed(field.GetTypeName()) + if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { + m := g.GoMapType(d, field) + typename = m.GoType + mapFieldTypes[field] = typename // record for the getter generation + + tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", m.KeyTag, m.ValueTag) + } + } + + fieldTypes[field] = typename + + if oneof { + tname := ccTypeName + "_" + fieldName + // It is possible for this to collide with a message or enum + // nested in this message. Check for collisions. + for { + ok := true + for _, desc := range message.nested { + if CamelCaseSlice(desc.TypeName()) == tname { + ok = false + break + } + } + for _, enum := range message.enums { + if CamelCaseSlice(enum.TypeName()) == tname { + ok = false + break + } + } + if !ok { + tname += "_" + continue + } + break + } + + oneofTypeName[field] = tname + continue + } + + g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)) + g.P(fieldName, "\t", typename, "\t`", tag, "`") + g.RecordTypeUse(field.GetTypeName()) + } + if len(message.ExtensionRange) > 0 { + if gogoproto.HasExtensionsMap(g.file.FileDescriptorProto, message.DescriptorProto) { + g.P("XXX_extensions\t\tmap[int32]", g.Pkg["proto"], ".Extension `json:\"-\"`") + } else { + g.P("XXX_extensions\t\t[]byte `protobuf:\"bytes,0,opt\" json:\"-\"`") + } + } + if gogoproto.HasUnrecognized(g.file.FileDescriptorProto, message.DescriptorProto) && !message.proto3() { + g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + } + g.Out() + g.P("}") + + // Update g.Buffer to list valid oneof types. + // We do this down here, after we've disambiguated the oneof type names. + // We go in reverse order of insertion point to avoid invalidating offsets. + for oi := int32(len(message.OneofDecl)); oi >= 0; oi-- { + ip := oneofInsertPoints[oi] + all := g.Buffer.Bytes() + rem := all[ip:] + g.Buffer = bytes.NewBuffer(all[:ip:ip]) // set cap so we don't scribble on rem + for _, field := range message.Field { + if field.OneofIndex == nil || *field.OneofIndex != oi { + continue + } + g.P("//\t*", oneofTypeName[field]) + } + g.Buffer.Write(rem) + } + + // Reset, String and ProtoMessage methods. + g.P("func (m *", ccTypeName, ") Reset() { *m = ", ccTypeName, "{} }") + if gogoproto.EnabledGoStringer(g.file.FileDescriptorProto, message.DescriptorProto) { + g.P("func (m *", ccTypeName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") + } + g.P("func (*", ccTypeName, ") ProtoMessage() {}") + + // Extension support methods + var hasExtensions, isMessageSet bool + if len(message.ExtensionRange) > 0 { + hasExtensions = true + // message_set_wire_format only makes sense when extensions are defined. + if opts := message.Options; opts != nil && opts.GetMessageSetWireFormat() { + isMessageSet = true + g.P() + g.P("func (m *", ccTypeName, ") Marshal() ([]byte, error) {") + g.In() + g.P("return ", g.Pkg["proto"], ".MarshalMessageSet(m.ExtensionMap())") + g.Out() + g.P("}") + g.P("func (m *", ccTypeName, ") Unmarshal(buf []byte) error {") + g.In() + g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSet(buf, m.ExtensionMap())") + g.Out() + g.P("}") + g.P("func (m *", ccTypeName, ") MarshalJSON() ([]byte, error) {") + g.In() + g.P("return ", g.Pkg["proto"], ".MarshalMessageSetJSON(m.XXX_extensions)") + g.Out() + g.P("}") + g.P("func (m *", ccTypeName, ") UnmarshalJSON(buf []byte) error {") + g.In() + g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSetJSON(buf, m.XXX_extensions)") + g.Out() + g.P("}") + g.P("// ensure ", ccTypeName, " satisfies proto.Marshaler and proto.Unmarshaler") + g.P("var _ ", g.Pkg["proto"], ".Marshaler = (*", ccTypeName, ")(nil)") + g.P("var _ ", g.Pkg["proto"], ".Unmarshaler = (*", ccTypeName, ")(nil)") + } + + g.P() + g.P("var extRange_", ccTypeName, " = []", g.Pkg["proto"], ".ExtensionRange{") + g.In() + for _, r := range message.ExtensionRange { + end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends + g.P("{", r.Start, ", ", end, "},") + } + g.Out() + g.P("}") + g.P("func (*", ccTypeName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") + g.In() + g.P("return extRange_", ccTypeName) + g.Out() + g.P("}") + if gogoproto.HasExtensionsMap(g.file.FileDescriptorProto, message.DescriptorProto) { + g.P("func (m *", ccTypeName, ") ExtensionMap() map[int32]", g.Pkg["proto"], ".Extension {") + g.In() + g.P("if m.XXX_extensions == nil {") + g.In() + g.P("m.XXX_extensions = make(map[int32]", g.Pkg["proto"], ".Extension)") + g.Out() + g.P("}") + g.P("return m.XXX_extensions") + g.Out() + g.P("}") + } else { + g.P("func (m *", ccTypeName, ") GetExtensions() *[]byte {") + g.In() + g.P("if m.XXX_extensions == nil {") + g.In() + g.P("m.XXX_extensions = make([]byte, 0)") + g.Out() + g.P("}") + g.P("return &m.XXX_extensions") + g.Out() + g.P("}") + } + } + + // Default constants + defNames := make(map[*descriptor.FieldDescriptorProto]string) + for _, field := range message.Field { + def := field.GetDefaultValue() + if def == "" { + continue + } + if !gogoproto.IsNullable(field) { + g.Fail("illegal default value: ", field.GetName(), " in ", message.GetName(), " is not nullable and is thus not allowed to have a default value") + } + fieldname := "Default_" + ccTypeName + "_" + CamelCase(*field.Name) + defNames[field] = fieldname + typename, _ := g.GoType(message, field) + if typename[0] == '*' { + typename = typename[1:] + } + kind := "const " + switch { + case typename == "bool": + case typename == "string": + def = strconv.Quote(def) + case typename == "[]byte": + def = "[]byte(" + strconv.Quote(def) + ")" + kind = "var " + case def == "inf", def == "-inf", def == "nan": + // These names are known to, and defined by, the protocol language. + switch def { + case "inf": + def = "math.Inf(1)" + case "-inf": + def = "math.Inf(-1)" + case "nan": + def = "math.NaN()" + } + if *field.Type == descriptor.FieldDescriptorProto_TYPE_FLOAT { + def = "float32(" + def + ")" + } + kind = "var " + case *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM: + // Must be an enum. Need to construct the prefixed name. + obj := g.ObjectNamed(field.GetTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate constant for %s", fieldname) + continue + } + if gogoproto.EnabledGoEnumPrefix(enum.file, enum.EnumDescriptorProto) { + def = g.DefaultPackageName(obj) + enum.prefix() + def + } else { + def = g.DefaultPackageName(obj) + def + } + } + g.P(kind, fieldname, " ", typename, " = ", def) + g.file.addExport(message, constOrVarSymbol{fieldname, kind, ""}) + } + g.P() + + // Oneof per-field types, discriminants and getters. + if message.allowOneof() { + // Generate unexported named types for the discriminant interfaces. + // We shouldn't have to do this, but there was (~19 Aug 2015) a compiler/linker bug + // that was triggered by using anonymous interfaces here. + // TODO: Revisit this and consider reverting back to anonymous interfaces. + for oi := range message.OneofDecl { + dname := oneofDisc[int32(oi)] + g.P("type ", dname, " interface {") + g.In() + g.P(dname, "()") + if gogoproto.HasEqual(g.file.FileDescriptorProto, message.DescriptorProto) { + g.P(`Equal(interface{}) bool`) + } + if gogoproto.HasVerboseEqual(g.file.FileDescriptorProto, message.DescriptorProto) { + g.P(`VerboseEqual(interface{}) error`) + } + if gogoproto.IsMarshaler(g.file.FileDescriptorProto, message.DescriptorProto) || + gogoproto.IsUnsafeMarshaler(g.file.FileDescriptorProto, message.DescriptorProto) { + g.P(`MarshalTo([]byte) (int, error)`) + } + if gogoproto.IsSizer(g.file.FileDescriptorProto, message.DescriptorProto) { + g.P(`Size() int`) + } + if gogoproto.IsProtoSizer(g.file.FileDescriptorProto, message.DescriptorProto) { + g.P(`ProtoSize() int`) + } + g.Out() + g.P("}") + } + g.P() + for _, field := range message.Field { + if field.OneofIndex == nil { + continue + } + _, wiretype := g.GoType(message, field) + tag := "protobuf:" + g.goTag(message, field, wiretype) + g.P("type ", oneofTypeName[field], " struct{ ", fieldNames[field], " ", fieldTypes[field], " `", tag, "` }") + g.RecordTypeUse(field.GetTypeName()) + } + g.P() + for _, field := range message.Field { + if field.OneofIndex == nil { + continue + } + g.P("func (*", oneofTypeName[field], ") ", oneofDisc[*field.OneofIndex], "() {}") + } + g.P() + for oi := range message.OneofDecl { + fname := oneofFieldName[int32(oi)] + g.P("func (m *", ccTypeName, ") Get", fname, "() ", oneofDisc[int32(oi)], " {") + g.P("if m != nil { return m.", fname, " }") + g.P("return nil") + g.P("}") + } + g.P() + } + + // Field getters + var getters []getterSymbol + for _, field := range message.Field { + oneof := field.OneofIndex != nil && message.allowOneof() + if !oneof && !gogoproto.HasGoGetters(g.file.FileDescriptorProto, message.DescriptorProto) { + continue + } + if gogoproto.IsEmbed(field) || gogoproto.IsCustomType(field) { + continue + } + fname := fieldNames[field] + typename, _ := g.GoType(message, field) + if t, ok := mapFieldTypes[field]; ok { + typename = t + } + mname := fieldGetterNames[field] + star := "" + if (*field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE) && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_GROUP) && + needsStar(field, g.file.proto3, message != nil && message.allowOneof()) && typename[0] == '*' { + typename = typename[1:] + star = "*" + } + + // In proto3, only generate getters for message fields and oneof fields. + if message.proto3() && *field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE && !oneof { + continue + } + + // Only export getter symbols for basic types, + // and for messages and enums in the same package. + // Groups are not exported. + // Foreign types can't be hoisted through a public import because + // the importer may not already be importing the defining .proto. + // As an example, imagine we have an import tree like this: + // A.proto -> B.proto -> C.proto + // If A publicly imports B, we need to generate the getters from B in A's output, + // but if one such getter returns something from C then we cannot do that + // because A is not importing C already. + var getter, genType bool + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_GROUP: + getter = false + case descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_ENUM: + // Only export getter if its return type is in this package. + getter = g.ObjectNamed(field.GetTypeName()).PackageName() == message.PackageName() + genType = true + default: + getter = true + } + if getter { + getters = append(getters, getterSymbol{ + name: mname, + typ: typename, + typeName: field.GetTypeName(), + genType: genType, + }) + } + + g.P("func (m *", ccTypeName, ") "+mname+"() "+typename+" {") + g.In() + def, hasDef := defNames[field] + typeDefaultIsNil := false // whether this field type's default value is a literal nil unless specified + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typeDefaultIsNil = !hasDef + case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE: + typeDefaultIsNil = gogoproto.IsNullable(field) + } + if isRepeated(field) { + typeDefaultIsNil = true + } + if typeDefaultIsNil && !oneof { + // A bytes field with no explicit default needs less generated code, + // as does a message or group field, or a repeated field. + g.P("if m != nil {") + g.In() + g.P("return m." + fname) + g.Out() + g.P("}") + g.P("return nil") + g.Out() + g.P("}") + g.P() + continue + } + if !gogoproto.IsNullable(field) { + g.P("if m != nil {") + g.In() + g.P("return m." + fname) + g.Out() + g.P("}") + } else if !oneof { + g.P("if m != nil && m." + fname + " != nil {") + g.In() + g.P("return " + star + "m." + fname) + g.Out() + g.P("}") + } else { + uname := oneofFieldName[*field.OneofIndex] + tname := oneofTypeName[field] + g.P("if x, ok := m.Get", uname, "().(*", tname, "); ok {") + g.P("return x.", fname) + g.P("}") + } + if hasDef { + if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { + g.P("return " + def) + } else { + // The default is a []byte var. + // Make a copy when returning it to be safe. + g.P("return append([]byte(nil), ", def, "...)") + } + } else { + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_GROUP, + descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if field.OneofIndex != nil { + g.P(`return nil`) + } else { + goTyp, _ := g.GoType(message, field) + goTypName := GoTypeToName(goTyp) + g.P("return ", goTypName, "{}") + } + case descriptor.FieldDescriptorProto_TYPE_BOOL: + g.P("return false") + case descriptor.FieldDescriptorProto_TYPE_STRING: + g.P(`return ""`) + case descriptor.FieldDescriptorProto_TYPE_BYTES: + // This is only possible for oneof fields. + g.P("return nil") + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // The default default for an enum is the first value in the enum, + // not zero. + obj := g.ObjectNamed(field.GetTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate getter for %s", field.GetName()) + continue + } + if len(enum.Value) == 0 { + g.P("return 0 // empty enum") + } else { + first := enum.Value[0].GetName() + if gogoproto.EnabledGoEnumPrefix(enum.file, enum.EnumDescriptorProto) { + g.P("return ", g.DefaultPackageName(obj)+enum.prefix()+first) + } else { + g.P("return ", g.DefaultPackageName(obj)+first) + } + } + default: + g.P("return 0") + } + } + g.Out() + g.P("}") + g.P() + } + + if !message.group { + ms := &messageSymbol{ + sym: ccTypeName, + hasExtensions: hasExtensions, + isMessageSet: isMessageSet, + hasOneof: len(message.OneofDecl) > 0, + getters: getters, + } + g.file.addExport(message, ms) + } + + // Oneof functions + if len(message.OneofDecl) > 0 && message.allowOneof() { + fieldWire := make(map[*descriptor.FieldDescriptorProto]string) + + // method + enc := "_" + ccTypeName + "_OneofMarshaler" + dec := "_" + ccTypeName + "_OneofUnmarshaler" + encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error" + decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)" + + g.P("// XXX_OneofFuncs is for the internal use of the proto package.") + g.P("func (*", ccTypeName, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", []interface{}) {") + g.P("return ", enc, ", ", dec, ", []interface{}{") + for _, field := range message.Field { + if field.OneofIndex == nil { + continue + } + g.P("(*", oneofTypeName[field], ")(nil),") + } + g.P("}") + g.P("}") + g.P() + + // marshaler + g.P("func ", enc, encSig, " {") + g.P("m := msg.(*", ccTypeName, ")") + for oi, odp := range message.OneofDecl { + g.P("// ", odp.GetName()) + fname := oneofFieldName[int32(oi)] + g.P("switch x := m.", fname, ".(type) {") + for _, field := range message.Field { + if field.OneofIndex == nil || int(*field.OneofIndex) != oi { + continue + } + g.P("case *", oneofTypeName[field], ":") + var wire, pre, post string + val := "x." + fieldNames[field] // overridden for TYPE_BOOL + canFail := false // only TYPE_MESSAGE and TYPE_GROUP can fail + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + wire = "WireFixed64" + pre = "b.EncodeFixed64(" + g.Pkg["math"] + ".Float64bits(" + post = "))" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + wire = "WireFixed32" + pre = "b.EncodeFixed32(uint64(" + g.Pkg["math"] + ".Float32bits(" + post = ")))" + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64: + wire = "WireVarint" + pre, post = "b.EncodeVarint(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM: + wire = "WireVarint" + pre, post = "b.EncodeVarint(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + wire = "WireFixed64" + pre, post = "b.EncodeFixed64(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + wire = "WireFixed32" + pre, post = "b.EncodeFixed32(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + // bool needs special handling. + g.P("t := uint64(0)") + g.P("if ", val, " { t = 1 }") + val = "t" + wire = "WireVarint" + pre, post = "b.EncodeVarint(", ")" + case descriptor.FieldDescriptorProto_TYPE_STRING: + wire = "WireBytes" + pre, post = "b.EncodeStringBytes(", ")" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + wire = "WireStartGroup" + pre, post = "b.Marshal(", ")" + canFail = true + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + wire = "WireBytes" + pre, post = "b.EncodeMessage(", ")" + canFail = true + case descriptor.FieldDescriptorProto_TYPE_BYTES: + wire = "WireBytes" + pre, post = "b.EncodeRawBytes(", ")" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + wire = "WireVarint" + pre, post = "b.EncodeZigzag32(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + wire = "WireVarint" + pre, post = "b.EncodeZigzag64(uint64(", "))" + default: + g.Fail("unhandled oneof field type ", field.Type.String()) + } + fieldWire[field] = wire + g.P("_ = b.EncodeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".", wire, ")") + if *field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES && gogoproto.IsCustomType(field) { + g.P(`data, err := `, val, `.Marshal()`) + g.P(`if err != nil {`) + g.In() + g.P(`return err`) + g.Out() + g.P(`}`) + val = "data" + } + if !canFail { + g.P("_ = ", pre, val, post) + } else { + g.P("if err := ", pre, val, post, "; err != nil {") + g.In() + g.P("return err") + g.Out() + g.P("}") + } + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + g.P("_ = b.EncodeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".WireEndGroup)") + } + } + g.P("case nil:") + g.P("default: return ", g.Pkg["fmt"], `.Errorf("`, ccTypeName, ".", fname, ` has unexpected type %T", x)`) + g.P("}") + } + g.P("return nil") + g.P("}") + g.P() + + // unmarshaler + g.P("func ", dec, decSig, " {") + g.P("m := msg.(*", ccTypeName, ")") + g.P("switch tag {") + for _, field := range message.Field { + if field.OneofIndex == nil { + continue + } + odp := message.OneofDecl[int(*field.OneofIndex)] + g.P("case ", field.Number, ": // ", odp.GetName(), ".", *field.Name) + g.P("if wire != ", g.Pkg["proto"], ".", fieldWire[field], " {") + g.P("return true, ", g.Pkg["proto"], ".ErrInternalBadWireType") + g.P("}") + lhs := "x, err" // overridden for TYPE_MESSAGE and TYPE_GROUP + var dec, cast, cast2 string + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + dec, cast = "b.DecodeFixed64()", g.Pkg["math"]+".Float64frombits" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + dec, cast, cast2 = "b.DecodeFixed32()", "uint32", g.Pkg["math"]+".Float32frombits" + case descriptor.FieldDescriptorProto_TYPE_INT64: + dec, cast = "b.DecodeVarint()", "int64" + case descriptor.FieldDescriptorProto_TYPE_UINT64: + dec = "b.DecodeVarint()" + case descriptor.FieldDescriptorProto_TYPE_INT32: + dec, cast = "b.DecodeVarint()", "int32" + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + dec = "b.DecodeFixed64()" + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + dec, cast = "b.DecodeFixed32()", "uint32" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + dec = "b.DecodeVarint()" + // handled specially below + case descriptor.FieldDescriptorProto_TYPE_STRING: + dec = "b.DecodeStringBytes()" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + g.P("msg := new(", fieldTypes[field][1:], ")") // drop star + lhs = "err" + dec = "b.DecodeGroup(msg)" + // handled specially below + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + g.P("msg := new(", fieldTypes[field][1:], ")") // drop star + lhs = "err" + dec = "b.DecodeMessage(msg)" + // handled specially below + case descriptor.FieldDescriptorProto_TYPE_BYTES: + dec = "b.DecodeRawBytes(true)" + case descriptor.FieldDescriptorProto_TYPE_UINT32: + dec, cast = "b.DecodeVarint()", "uint32" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + dec, cast = "b.DecodeVarint()", fieldTypes[field] + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + dec, cast = "b.DecodeFixed32()", "int32" + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + dec, cast = "b.DecodeFixed64()", "int64" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + dec, cast = "b.DecodeZigzag32()", "int32" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + dec, cast = "b.DecodeZigzag64()", "int64" + default: + g.Fail("unhandled oneof field type ", field.Type.String()) + } + g.P(lhs, " := ", dec) + val := "x" + if *field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES && gogoproto.IsCustomType(field) { + g.P(`if err != nil {`) + g.In() + g.P(`return true, err`) + g.Out() + g.P(`}`) + _, ctyp, err := GetCustomType(field) + if err != nil { + panic(err) + } + g.P(`var cc `, ctyp) + g.P(`c := &cc`) + g.P(`err = c.Unmarshal(`, val, `)`) + val = "*c" + } + if cast != "" { + val = cast + "(" + val + ")" + } + if cast2 != "" { + val = cast2 + "(" + val + ")" + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + val += " != 0" + case descriptor.FieldDescriptorProto_TYPE_GROUP, + descriptor.FieldDescriptorProto_TYPE_MESSAGE: + val = "msg" + } + if gogoproto.IsCastType(field) { + _, typ, err := getCastType(field) + if err != nil { + g.Fail(err.Error()) + } + val = typ + "(" + val + ")" + } + g.P("m.", oneofFieldName[*field.OneofIndex], " = &", oneofTypeName[field], "{", val, "}") + g.P("return true, err") + } + g.P("default: return false, nil") + g.P("}") + g.P("}") + g.P() + } + + for _, ext := range message.ext { + g.generateExtension(ext) + } + + fullName := strings.Join(message.TypeName(), ".") + if g.file.Package != nil { + fullName = *g.file.Package + "." + fullName + } + + g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], ccTypeName, fullName) +} + +func (g *Generator) generateExtension(ext *ExtensionDescriptor) { + ccTypeName := ext.DescName() + + extObj := g.ObjectNamed(*ext.Extendee) + var extDesc *Descriptor + if id, ok := extObj.(*ImportedDescriptor); ok { + // This is extending a publicly imported message. + // We need the underlying type for goTag. + extDesc = id.o.(*Descriptor) + } else { + extDesc = extObj.(*Descriptor) + } + extendedType := "*" + g.TypeName(extObj) // always use the original + field := ext.FieldDescriptorProto + fieldType, wireType := g.GoType(ext.parent, field) + tag := g.goTag(extDesc, field, wireType) + g.RecordTypeUse(*ext.Extendee) + if n := ext.FieldDescriptorProto.TypeName; n != nil { + // foreign extension type + g.RecordTypeUse(*n) + } + + typeName := ext.TypeName() + + // Special case for proto2 message sets: If this extension is extending + // proto2_bridge.MessageSet, and its final name component is "message_set_extension", + // then drop that last component. + mset := false + if extendedType == "*proto2_bridge.MessageSet" && typeName[len(typeName)-1] == "message_set_extension" { + typeName = typeName[:len(typeName)-1] + mset = true + } + + // For text formatting, the package must be exactly what the .proto file declares, + // ignoring overrides such as the go_package option, and with no dot/underscore mapping. + extName := strings.Join(typeName, ".") + if g.file.Package != nil { + extName = *g.file.Package + "." + extName + } + + g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") + g.In() + g.P("ExtendedType: (", extendedType, ")(nil),") + g.P("ExtensionType: (", fieldType, ")(nil),") + g.P("Field: ", field.Number, ",") + g.P(`Name: "`, extName, `",`) + g.P("Tag: ", tag, ",") + + g.Out() + g.P("}") + g.P() + + if mset { + // Generate a bit more code to register with message_set.go. + g.addInitf("%s.RegisterMessageSetType((%s)(nil), %d, %q)", g.Pkg["proto"], fieldType, *field.Number, extName) + } + + g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) +} + +func (g *Generator) generateInitFunction() { + for _, enum := range g.file.enum { + g.generateEnumRegistration(enum) + } + for _, d := range g.file.desc { + for _, ext := range d.ext { + g.generateExtensionRegistration(ext) + } + } + for _, ext := range g.file.ext { + g.generateExtensionRegistration(ext) + } + if len(g.init) == 0 { + return + } + g.P("func init() {") + g.In() + for _, l := range g.init { + g.P(l) + } + g.Out() + g.P("}") + g.init = nil +} + +func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { + // // We always print the full (proto-world) package name here. + pkg := enum.File().GetPackage() + if pkg != "" { + pkg += "." + } + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) +} + +func (g *Generator) generateExtensionRegistration(ext *ExtensionDescriptor) { + g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) +} + +// And now lots of helper functions. + +// Is c an ASCII lower-case letter? +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} + +// Is c an ASCII digit? +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +// CamelCase returns the CamelCased name. +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +// There is a remote possibility of this rewrite causing a name collision, +// but it's so remote we're prepared to pretend it's nonexistent - since the +// C++ generator lowercases names, it's extremely unlikely to have two fields +// with different capitalizations. +// In short, _my_field_name_2 becomes XMyFieldName_2. +func CamelCase(s string) string { + if s == "" { + return "" + } + t := make([]byte, 0, 32) + i := 0 + if s[0] == '_' { + // Need a capital letter; drop the '_'. + t = append(t, 'X') + i++ + } + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + for ; i < len(s); i++ { + c := s[i] + if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { + continue // Skip the underscore in s. + } + if isASCIIDigit(c) { + t = append(t, c) + continue + } + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c ^= ' ' // Make it a capital letter. + } + t = append(t, c) // Guaranteed not lower case. + // Accept lower case sequence that follows. + for i+1 < len(s) && isASCIILower(s[i+1]) { + i++ + t = append(t, s[i]) + } + } + return string(t) +} + +// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to +// be joined with "_". +func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } + +// dottedSlice turns a sliced name into a dotted name. +func dottedSlice(elem []string) string { return strings.Join(elem, ".") } + +// Given a .proto file name, return the output name for the generated Go program. +func goFileName(name string) string { + ext := path.Ext(name) + if ext == ".proto" || ext == ".protodevel" { + name = name[0 : len(name)-len(ext)] + } + return name + ".pb.go" +} + +// Is this field optional? +func isOptional(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL +} + +// Is this field required? +func isRequired(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED +} + +// Is this field repeated? +func isRepeated(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED +} + +// badToUnderscore is the mapping function used to generate Go names from package names, +// which can be dotted in the input .proto file. It replaces non-identifier characters such as +// dot or dash with underscore. +func badToUnderscore(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + return r + } + return '_' +} + +// baseName returns the last path element of the name, with the last dotted suffix removed. +func baseName(name string) string { + // First, find the last element + if i := strings.LastIndex(name, "/"); i >= 0 { + name = name[i+1:] + } + // Now drop the suffix + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[0:i] + } + return name +} + +// The SourceCodeInfo message describes the location of elements of a parsed +// .proto file by way of a "path", which is a sequence of integers that +// describe the route from a FileDescriptorProto to the relevant submessage. +// The path alternates between a field number of a repeated field, and an index +// into that repeated field. The constants below define the field numbers that +// are used. +// +// See descriptor.proto for more information about this. +const ( + // tag numbers in FileDescriptorProto + packagePath = 2 // package + messagePath = 4 // message_type + enumPath = 5 // enum_type + // tag numbers in DescriptorProto + messageFieldPath = 2 // field + messageMessagePath = 3 // nested_type + messageEnumPath = 4 // enum_type + messageOneofPath = 8 // oneof_decl + // tag numbers in EnumDescriptorProto + enumValuePath = 2 // value +) diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go new file mode 100644 index 000000000000..258d6a9b6af7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go @@ -0,0 +1,464 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package generator + +import ( + "bytes" + "go/parser" + "go/printer" + "go/token" + "strings" + + "path" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + plugin "github.com/gogo/protobuf/protoc-gen-gogo/plugin" +) + +func (d *FileDescriptor) Messages() []*Descriptor { + return d.desc +} + +func (d *FileDescriptor) Enums() []*EnumDescriptor { + return d.enum +} + +func (d *Descriptor) IsGroup() bool { + return d.group +} + +func (g *Generator) IsGroup(field *descriptor.FieldDescriptorProto) bool { + if d, ok := g.typeNameToObject[field.GetTypeName()].(*Descriptor); ok { + return d.IsGroup() + } + return false +} + +func (g *Generator) TypeNameByObject(typeName string) Object { + o, ok := g.typeNameToObject[typeName] + if !ok { + g.Fail("can't find object with type", typeName) + } + return o +} + +func (g *Generator) OneOfTypeName(message *Descriptor, field *descriptor.FieldDescriptorProto) string { + typeName := message.TypeName() + ccTypeName := CamelCaseSlice(typeName) + fieldName := g.GetOneOfFieldName(message, field) + tname := ccTypeName + "_" + fieldName + // It is possible for this to collide with a message or enum + // nested in this message. Check for collisions. + ok := true + for _, desc := range message.nested { + if strings.Join(desc.TypeName(), "_") == tname { + ok = false + break + } + } + for _, enum := range message.enums { + if strings.Join(enum.TypeName(), "_") == tname { + ok = false + break + } + } + if !ok { + tname += "_" + } + return tname +} + +type PluginImports interface { + NewImport(pkg string) Single + GenerateImports(file *FileDescriptor) +} + +type pluginImports struct { + generator *Generator + singles []Single +} + +func NewPluginImports(generator *Generator) *pluginImports { + return &pluginImports{generator, make([]Single, 0)} +} + +func (this *pluginImports) NewImport(pkg string) Single { + imp := newImportedPackage(this.generator.ImportPrefix, pkg) + this.singles = append(this.singles, imp) + return imp +} + +func (this *pluginImports) GenerateImports(file *FileDescriptor) { + for _, s := range this.singles { + if s.IsUsed() { + this.generator.PrintImport(s.Name(), s.Location()) + } + } +} + +type Single interface { + Use() string + IsUsed() bool + Name() string + Location() string +} + +type importedPackage struct { + used bool + pkg string + name string + importPrefix string +} + +func newImportedPackage(importPrefix, pkg string) *importedPackage { + return &importedPackage{ + pkg: pkg, + importPrefix: importPrefix, + } +} + +func (this *importedPackage) Use() string { + if !this.used { + this.name = RegisterUniquePackageName(this.pkg, nil) + this.used = true + } + return this.name +} + +func (this *importedPackage) IsUsed() bool { + return this.used +} + +func (this *importedPackage) Name() string { + return this.name +} + +func (this *importedPackage) Location() string { + return this.importPrefix + this.pkg +} + +func (g *Generator) GetFieldName(message *Descriptor, field *descriptor.FieldDescriptorProto) string { + goTyp, _ := g.GoType(message, field) + fieldname := CamelCase(*field.Name) + if gogoproto.IsCustomName(field) { + fieldname = gogoproto.GetCustomName(field) + } + if gogoproto.IsEmbed(field) { + fieldname = EmbedFieldName(goTyp) + } + if field.OneofIndex != nil { + fieldname = message.OneofDecl[int(*field.OneofIndex)].GetName() + fieldname = CamelCase(fieldname) + } + for _, f := range methodNames { + if f == fieldname { + return fieldname + "_" + } + } + if !gogoproto.IsProtoSizer(message.file, message.DescriptorProto) { + if fieldname == "Size" { + return fieldname + "_" + } + } + return fieldname +} + +func (g *Generator) GetOneOfFieldName(message *Descriptor, field *descriptor.FieldDescriptorProto) string { + goTyp, _ := g.GoType(message, field) + fieldname := CamelCase(*field.Name) + if gogoproto.IsCustomName(field) { + fieldname = gogoproto.GetCustomName(field) + } + if gogoproto.IsEmbed(field) { + fieldname = EmbedFieldName(goTyp) + } + for _, f := range methodNames { + if f == fieldname { + return fieldname + "_" + } + } + if !gogoproto.IsProtoSizer(message.file, message.DescriptorProto) { + if fieldname == "Size" { + return fieldname + "_" + } + } + return fieldname +} + +func GetMap(file *descriptor.FileDescriptorProto, field *descriptor.FieldDescriptorProto) *descriptor.DescriptorProto { + if !field.IsMessage() { + return nil + } + typeName := strings.TrimPrefix(field.GetTypeName(), "."+file.GetPackage()+".") + if strings.Contains(typeName, "Map") && !strings.HasSuffix(typeName, "Entry") { + typeName += "." + CamelCase(field.GetName()) + "Entry" + } + return file.GetMessage(typeName) +} + +func IsMap(file *descriptor.FileDescriptorProto, field *descriptor.FieldDescriptorProto) bool { + msg := GetMap(file, field) + if msg == nil { + return false + } + return msg.GetOptions().GetMapEntry() +} + +func (g *Generator) IsMap(field *descriptor.FieldDescriptorProto) bool { + if !field.IsMessage() { + return false + } + byName := g.ObjectNamed(field.GetTypeName()) + desc, ok := byName.(*Descriptor) + if byName == nil || !ok || !desc.GetOptions().GetMapEntry() { + return false + } + return true +} + +func (g *Generator) GetMapKeyField(field, keyField *descriptor.FieldDescriptorProto) *descriptor.FieldDescriptorProto { + if !gogoproto.IsCastKey(field) { + return keyField + } + keyField = proto.Clone(keyField).(*descriptor.FieldDescriptorProto) + if keyField.Options == nil { + keyField.Options = &descriptor.FieldOptions{} + } + keyType := gogoproto.GetCastKey(field) + if err := proto.SetExtension(keyField.Options, gogoproto.E_Casttype, &keyType); err != nil { + g.Fail(err.Error()) + } + return keyField +} + +func (g *Generator) GetMapValueField(field, valField *descriptor.FieldDescriptorProto) *descriptor.FieldDescriptorProto { + if !gogoproto.IsCastValue(field) && gogoproto.IsNullable(field) { + return valField + } + valField = proto.Clone(valField).(*descriptor.FieldDescriptorProto) + if valField.Options == nil { + valField.Options = &descriptor.FieldOptions{} + } + if valType := gogoproto.GetCastValue(field); len(valType) > 0 { + if err := proto.SetExtension(valField.Options, gogoproto.E_Casttype, &valType); err != nil { + g.Fail(err.Error()) + } + } + + nullable := gogoproto.IsNullable(field) + if err := proto.SetExtension(valField.Options, gogoproto.E_Nullable, &nullable); err != nil { + g.Fail(err.Error()) + } + return valField +} + +// GoMapValueTypes returns the map value Go type and the alias map value Go type (for casting), taking into +// account whether the map is nullable or the value is a message. +func GoMapValueTypes(mapField, valueField *descriptor.FieldDescriptorProto, goValueType, goValueAliasType string) (nullable bool, outGoType string, outGoAliasType string) { + nullable = gogoproto.IsNullable(mapField) && valueField.IsMessage() + if nullable { + // ensure the non-aliased Go value type is a pointer for consistency + if strings.HasPrefix(goValueType, "*") { + outGoType = goValueType + } else { + outGoType = "*" + goValueType + } + outGoAliasType = goValueAliasType + } else { + outGoType = strings.Replace(goValueType, "*", "", 1) + outGoAliasType = strings.Replace(goValueAliasType, "*", "", 1) + } + return +} + +func GoTypeToName(goTyp string) string { + return strings.Replace(strings.Replace(goTyp, "*", "", -1), "[]", "", -1) +} + +func EmbedFieldName(goTyp string) string { + goTyp = GoTypeToName(goTyp) + goTyps := strings.Split(goTyp, ".") + if len(goTyps) == 1 { + return goTyp + } + if len(goTyps) == 2 { + return goTyps[1] + } + panic("unreachable") +} + +func (g *Generator) GeneratePlugin(p Plugin) { + p.Init(g) + // Generate the output. The generator runs for every file, even the files + // that we don't generate output for, so that we can collate the full list + // of exported symbols to support public imports. + genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) + for _, file := range g.genFiles { + genFileMap[file] = true + } + i := 0 + for _, file := range g.allFiles { + g.Reset() + g.writeOutput = genFileMap[file] + g.generatePlugin(file, p) + if !g.writeOutput { + continue + } + g.Response.File[i] = new(plugin.CodeGeneratorResponse_File) + g.Response.File[i].Name = proto.String(goFileName(*file.Name)) + g.Response.File[i].Content = proto.String(g.String()) + i++ + } +} + +func (g *Generator) SetFile(file *descriptor.FileDescriptorProto) { + g.file = g.FileOf(file) +} + +func (g *Generator) generatePlugin(file *FileDescriptor, p Plugin) { + g.writtenImports = make(map[string]bool) + g.file = g.FileOf(file.FileDescriptorProto) + g.usedPackages = make(map[string]bool) + + // Run the plugins before the imports so we know which imports are necessary. + p.Generate(file) + + // Generate header and imports last, though they appear first in the output. + rem := g.Buffer + g.Buffer = new(bytes.Buffer) + g.generateHeader() + p.GenerateImports(g.file) + g.generateImports() + if !g.writeOutput { + return + } + g.Write(rem.Bytes()) + + // Reformat generated code. + contents := string(g.Buffer.Bytes()) + fset := token.NewFileSet() + ast, err := parser.ParseFile(fset, "", g, parser.ParseComments) + if err != nil { + g.Fail("bad Go source code was generated:", contents, err.Error()) + return + } + g.Reset() + err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, ast) + if err != nil { + g.Fail("generated Go source code could not be reformatted:", err.Error()) + } +} + +func GetCustomType(field *descriptor.FieldDescriptorProto) (packageName string, typ string, err error) { + return getCustomType(field) +} + +func getCustomType(field *descriptor.FieldDescriptorProto) (packageName string, typ string, err error) { + if field.Options != nil { + var v interface{} + v, err = proto.GetExtension(field.Options, gogoproto.E_Customtype) + if err == nil && v.(*string) != nil { + ctype := *(v.(*string)) + packageName, typ = splitCPackageType(ctype) + return packageName, typ, nil + } + } + return "", "", err +} + +func splitCPackageType(ctype string) (packageName string, typ string) { + ss := strings.Split(ctype, ".") + if len(ss) == 1 { + return "", ctype + } + packageName = strings.Join(ss[0:len(ss)-1], ".") + typeName := ss[len(ss)-1] + importStr := strings.Map(badToUnderscore, packageName) + typ = importStr + "." + typeName + return packageName, typ +} + +func getCastType(field *descriptor.FieldDescriptorProto) (packageName string, typ string, err error) { + if field.Options != nil { + var v interface{} + v, err = proto.GetExtension(field.Options, gogoproto.E_Casttype) + if err == nil && v.(*string) != nil { + ctype := *(v.(*string)) + packageName, typ = splitCPackageType(ctype) + return packageName, typ, nil + } + } + return "", "", err +} + +func getCastKey(field *descriptor.FieldDescriptorProto) (packageName string, typ string, err error) { + if field.Options != nil { + var v interface{} + v, err = proto.GetExtension(field.Options, gogoproto.E_Castkey) + if err == nil && v.(*string) != nil { + ctype := *(v.(*string)) + packageName, typ = splitCPackageType(ctype) + return packageName, typ, nil + } + } + return "", "", err +} + +func getCastValue(field *descriptor.FieldDescriptorProto) (packageName string, typ string, err error) { + if field.Options != nil { + var v interface{} + v, err = proto.GetExtension(field.Options, gogoproto.E_Castvalue) + if err == nil && v.(*string) != nil { + ctype := *(v.(*string)) + packageName, typ = splitCPackageType(ctype) + return packageName, typ, nil + } + } + return "", "", err +} + +func FileName(file *FileDescriptor) string { + fname := path.Base(file.FileDescriptorProto.GetName()) + fname = strings.Replace(fname, ".proto", "", -1) + fname = strings.Replace(fname, "-", "_", -1) + return CamelCase(fname) +} + +func (g *Generator) AllFiles() *descriptor.FileDescriptorSet { + set := &descriptor.FileDescriptorSet{} + set.File = make([]*descriptor.FileDescriptorProto, len(g.allFiles)) + for i := range g.allFiles { + set.File[i] = g.allFiles[i].FileDescriptorProto + } + return set +} + +func (d *Descriptor) Path() string { + return d.path +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/plugin/Makefile b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/plugin/Makefile new file mode 100644 index 000000000000..546287cef8a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/plugin/Makefile @@ -0,0 +1,37 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Not stored here, but plugin.proto is in https://github.com/google/protobuf/ +# at src/google/protobuf/compiler/plugin.proto +# Also we need to fix an import. +regenerate: + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. -I=../../protobuf/google/protobuf/compiler/:../../protobuf/ ../../protobuf/google/protobuf/compiler/plugin.proto + diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/plugin/plugin.pb.go b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/plugin/plugin.pb.go new file mode 100644 index 000000000000..8e8ebf7069e1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/protoc-gen-gogo/plugin/plugin.pb.go @@ -0,0 +1,194 @@ +// Code generated by protoc-gen-gogo. +// source: plugin.proto +// DO NOT EDIT! + +/* +Package plugin_go is a generated protocol buffer package. + +It is generated from these files: + plugin.proto + +It has these top-level messages: + CodeGeneratorRequest + CodeGeneratorResponse +*/ +package plugin_go + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } +func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (m *CodeGeneratorRequest) GetFileToGenerate() []string { + if m != nil { + return m.FileToGenerate + } + return nil +} + +func (m *CodeGeneratorRequest) GetParameter() string { + if m != nil && m.Parameter != nil { + return *m.Parameter + } + return "" +} + +func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { + if m != nil { + return m.ProtoFile + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } +func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (m *CodeGeneratorResponse) GetError() string { + if m != nil && m.Error != nil { + return *m.Error + } + return "" +} + +func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if m != nil { + return m.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } +func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (m *CodeGeneratorResponse_File) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { + if m != nil && m.InsertionPoint != nil { + return *m.InsertionPoint + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetContent() string { + if m != nil && m.Content != nil { + return *m.Content + } + return "" +} + +func init() { + proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") + proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") + proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/sortkeys/sortkeys.go b/Godeps/_workspace/src/github.com/gogo/protobuf/sortkeys/sortkeys.go new file mode 100644 index 000000000000..c52878dd5965 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/sortkeys/sortkeys.go @@ -0,0 +1,99 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package sortkeys + +import ( + "sort" +) + +func Strings(l []string) { + sort.Strings(l) +} + +func Float64s(l []float64) { + sort.Float64s(l) +} + +func Float32s(l []float32) { + sort.Sort(Float32Slice(l)) +} + +func Int64s(l []int64) { + sort.Sort(Int64Slice(l)) +} + +func Int32s(l []int32) { + sort.Sort(Int32Slice(l)) +} + +func Uint64s(l []uint64) { + sort.Sort(Uint64Slice(l)) +} + +func Uint32s(l []uint32) { + sort.Sort(Uint32Slice(l)) +} + +func Bools(l []bool) { + sort.Sort(BoolSlice(l)) +} + +type BoolSlice []bool + +func (p BoolSlice) Len() int { return len(p) } +func (p BoolSlice) Less(i, j int) bool { return p[j] } +func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int64Slice []int64 + +func (p Int64Slice) Len() int { return len(p) } +func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int32Slice []int32 + +func (p Int32Slice) Len() int { return len(p) } +func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint32Slice []uint32 + +func (p Uint32Slice) Len() int { return len(p) } +func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Float32Slice []float32 + +func (p Float32Slice) Len() int { return len(p) } +func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/command/command.go b/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/command/command.go new file mode 100644 index 000000000000..2c6191477a84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/command/command.go @@ -0,0 +1,151 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2015, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package command + +import ( + "io/ioutil" + "os" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + plugin "github.com/gogo/protobuf/protoc-gen-gogo/plugin" + + _ "github.com/gogo/protobuf/plugin/grpc" + + _ "github.com/gogo/protobuf/plugin/defaultcheck" + _ "github.com/gogo/protobuf/plugin/description" + _ "github.com/gogo/protobuf/plugin/embedcheck" + _ "github.com/gogo/protobuf/plugin/enumstringer" + _ "github.com/gogo/protobuf/plugin/equal" + _ "github.com/gogo/protobuf/plugin/face" + _ "github.com/gogo/protobuf/plugin/gostring" + _ "github.com/gogo/protobuf/plugin/marshalto" + _ "github.com/gogo/protobuf/plugin/oneofcheck" + _ "github.com/gogo/protobuf/plugin/populate" + _ "github.com/gogo/protobuf/plugin/size" + _ "github.com/gogo/protobuf/plugin/stringer" + _ "github.com/gogo/protobuf/plugin/union" + _ "github.com/gogo/protobuf/plugin/unmarshal" + + "github.com/gogo/protobuf/plugin/testgen" + + "go/format" + "strings" +) + +func Read() *plugin.CodeGeneratorRequest { + g := generator.New() + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + g.Error(err, "reading input") + } + + if err := proto.Unmarshal(data, g.Request); err != nil { + g.Error(err, "parsing input proto") + } + + if len(g.Request.FileToGenerate) == 0 { + g.Fail("no files to generate") + } + return g.Request +} + +func Generate(req *plugin.CodeGeneratorRequest) *plugin.CodeGeneratorResponse { + // Begin by allocating a generator. The request and response structures are stored there + // so we can do error handling easily - the response structure contains the field to + // report failure. + g := generator.New() + g.Request = req + + g.CommandLineParameters(g.Request.GetParameter()) + + // Create a wrapped version of the Descriptors and EnumDescriptors that + // point to the file that defines them. + g.WrapTypes() + + g.SetPackageNames() + g.BuildTypeNameMap() + + g.GenerateAllFiles() + + gtest := generator.New() + + data, err := proto.Marshal(req) + if err != nil { + g.Error(err, "failed to marshal modified proto") + } + if err := proto.Unmarshal(data, gtest.Request); err != nil { + g.Error(err, "parsing modified proto") + } + + if len(gtest.Request.FileToGenerate) == 0 { + gtest.Fail("no files to generate") + } + + gtest.CommandLineParameters(gtest.Request.GetParameter()) + + // Create a wrapped version of the Descriptors and EnumDescriptors that + // point to the file that defines them. + gtest.WrapTypes() + + gtest.SetPackageNames() + gtest.BuildTypeNameMap() + + gtest.GeneratePlugin(testgen.NewPlugin()) + + for i := 0; i < len(gtest.Response.File); i++ { + if strings.Contains(*gtest.Response.File[i].Content, `//These tests are generated by github.com/gogo/protobuf/plugin/testgen`) { + gtest.Response.File[i].Name = proto.String(strings.Replace(*gtest.Response.File[i].Name, ".pb.go", "pb_test.go", -1)) + g.Response.File = append(g.Response.File, gtest.Response.File[i]) + } + } + + for i := 0; i < len(g.Response.File); i++ { + formatted, err := format.Source([]byte(g.Response.File[i].GetContent())) + if err != nil { + g.Error(err, "go format error") + } + fmts := string(formatted) + g.Response.File[i].Content = &fmts + } + return g.Response +} + +func Write(resp *plugin.CodeGeneratorResponse) { + g := generator.New() + // Send back the results. + data, err := proto.Marshal(resp) + if err != nil { + g.Error(err, "failed to marshal output proto") + } + _, err = os.Stdout.Write(data) + if err != nil { + g.Error(err, "failed to write output proto") + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/enum.go b/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/enum.go new file mode 100644 index 000000000000..13d089744143 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/enum.go @@ -0,0 +1,78 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2015, Vastech SA (PTY) LTD. rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package vanity + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +func EnumHasBoolExtension(enum *descriptor.EnumDescriptorProto, extension *proto.ExtensionDesc) bool { + if enum.Options == nil { + return false + } + value, err := proto.GetExtension(enum.Options, extension) + if err != nil { + return false + } + if value == nil { + return false + } + if value.(*bool) == nil { + return false + } + return true +} + +func SetBoolEnumOption(extension *proto.ExtensionDesc, value bool) func(enum *descriptor.EnumDescriptorProto) { + return func(enum *descriptor.EnumDescriptorProto) { + if EnumHasBoolExtension(enum, extension) { + return + } + if enum.Options == nil { + enum.Options = &descriptor.EnumOptions{} + } + if err := proto.SetExtension(enum.Options, extension, &value); err != nil { + panic(err) + } + } +} + +func TurnOffGoEnumPrefix(enum *descriptor.EnumDescriptorProto) { + SetBoolEnumOption(gogoproto.E_GoprotoEnumPrefix, false)(enum) +} + +func TurnOffGoEnumStringer(enum *descriptor.EnumDescriptorProto) { + SetBoolEnumOption(gogoproto.E_GoprotoEnumStringer, false)(enum) +} + +func TurnOnEnumStringer(enum *descriptor.EnumDescriptorProto) { + SetBoolEnumOption(gogoproto.E_EnumStringer, true)(enum) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/field.go b/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/field.go new file mode 100644 index 000000000000..a484d1e1c7a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/field.go @@ -0,0 +1,83 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2015, Vastech SA (PTY) LTD. rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package vanity + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +func FieldHasBoolExtension(field *descriptor.FieldDescriptorProto, extension *proto.ExtensionDesc) bool { + if field.Options == nil { + return false + } + value, err := proto.GetExtension(field.Options, extension) + if err != nil { + return false + } + if value == nil { + return false + } + if value.(*bool) == nil { + return false + } + return true +} + +func SetBoolFieldOption(extension *proto.ExtensionDesc, value bool) func(field *descriptor.FieldDescriptorProto) { + return func(field *descriptor.FieldDescriptorProto) { + if FieldHasBoolExtension(field, extension) { + return + } + if field.Options == nil { + field.Options = &descriptor.FieldOptions{} + } + if err := proto.SetExtension(field.Options, extension, &value); err != nil { + panic(err) + } + } +} + +func TurnOffNullable(field *descriptor.FieldDescriptorProto) { + if field.IsRepeated() && !field.IsMessage() { + return + } + SetBoolFieldOption(gogoproto.E_Nullable, false)(field) +} + +func TurnOffNullableForNativeTypesWithoutDefaultsOnly(field *descriptor.FieldDescriptorProto) { + if field.IsRepeated() || field.IsMessage() { + return + } + if field.DefaultValue != nil { + return + } + SetBoolFieldOption(gogoproto.E_Nullable, false)(field) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/file.go b/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/file.go new file mode 100644 index 000000000000..cfe18f208bbc --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/file.go @@ -0,0 +1,170 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2015, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package vanity + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "strings" +) + +func NotInPackageGoogleProtobuf(file *descriptor.FileDescriptorProto) bool { + return !strings.HasPrefix(file.GetPackage(), "google.protobuf") +} + +func FilterFiles(files []*descriptor.FileDescriptorProto, f func(file *descriptor.FileDescriptorProto) bool) []*descriptor.FileDescriptorProto { + filtered := make([]*descriptor.FileDescriptorProto, 0, len(files)) + for i := range files { + if !f(files[i]) { + continue + } + filtered = append(filtered, files[i]) + } + return filtered +} + +func FileHasBoolExtension(file *descriptor.FileDescriptorProto, extension *proto.ExtensionDesc) bool { + if file.Options == nil { + return false + } + value, err := proto.GetExtension(file.Options, extension) + if err != nil { + return false + } + if value == nil { + return false + } + if value.(*bool) == nil { + return false + } + return true +} + +func SetBoolFileOption(extension *proto.ExtensionDesc, value bool) func(file *descriptor.FileDescriptorProto) { + return func(file *descriptor.FileDescriptorProto) { + if FileHasBoolExtension(file, extension) { + return + } + if file.Options == nil { + file.Options = &descriptor.FileOptions{} + } + if err := proto.SetExtension(file.Options, extension, &value); err != nil { + panic(err) + } + } +} + +func TurnOffGoGettersAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoGettersAll, false)(file) +} + +func TurnOffGoEnumPrefixAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoEnumPrefixAll, false)(file) +} + +func TurnOffGoStringerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoStringerAll, false)(file) +} + +func TurnOnVerboseEqualAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_VerboseEqualAll, true)(file) +} + +func TurnOnFaceAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_FaceAll, true)(file) +} + +func TurnOnGoStringAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GostringAll, true)(file) +} + +func TurnOnPopulateAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_PopulateAll, true)(file) +} + +func TurnOnStringerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_StringerAll, true)(file) +} + +func TurnOnEqualAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_EqualAll, true)(file) +} + +func TurnOnDescriptionAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_DescriptionAll, true)(file) +} + +func TurnOnTestGenAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_TestgenAll, true)(file) +} + +func TurnOnBenchGenAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_BenchgenAll, true)(file) +} + +func TurnOnMarshalerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_MarshalerAll, true)(file) +} + +func TurnOnUnmarshalerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_UnmarshalerAll, true)(file) +} + +func TurnOnSizerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_SizerAll, true)(file) +} + +func TurnOffGoEnumStringerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoEnumStringerAll, false)(file) +} + +func TurnOnEnumStringerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_EnumStringerAll, true)(file) +} + +func TurnOnUnsafeUnmarshalerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_UnsafeUnmarshalerAll, true)(file) +} + +func TurnOnUnsafeMarshalerAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_UnsafeMarshalerAll, true)(file) +} + +func TurnOffGoExtensionsMapAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoExtensionsMapAll, false)(file) +} + +func TurnOffGoUnrecognizedAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GoprotoUnrecognizedAll, false)(file) +} + +func TurnOffGogoImport(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_GogoprotoImport, false)(file) +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/foreach.go b/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/foreach.go new file mode 100644 index 000000000000..0133c9d2bd2a --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/foreach.go @@ -0,0 +1,125 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2015, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package vanity + +import descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +func ForEachFile(files []*descriptor.FileDescriptorProto, f func(file *descriptor.FileDescriptorProto)) { + for _, file := range files { + f(file) + } +} + +func OnlyProto2(files []*descriptor.FileDescriptorProto) []*descriptor.FileDescriptorProto { + outs := make([]*descriptor.FileDescriptorProto, 0, len(files)) + for i, file := range files { + if file.GetSyntax() == "proto3" { + continue + } + outs = append(outs, files[i]) + } + return outs +} + +func OnlyProto3(files []*descriptor.FileDescriptorProto) []*descriptor.FileDescriptorProto { + outs := make([]*descriptor.FileDescriptorProto, 0, len(files)) + for i, file := range files { + if file.GetSyntax() != "proto3" { + continue + } + outs = append(outs, files[i]) + } + return outs +} + +func ForEachMessageInFiles(files []*descriptor.FileDescriptorProto, f func(msg *descriptor.DescriptorProto)) { + for _, file := range files { + ForEachMessage(file.MessageType, f) + } +} + +func ForEachMessage(msgs []*descriptor.DescriptorProto, f func(msg *descriptor.DescriptorProto)) { + for _, msg := range msgs { + f(msg) + ForEachMessage(msg.NestedType, f) + } +} + +func ForEachFieldInFilesExcludingExtensions(files []*descriptor.FileDescriptorProto, f func(field *descriptor.FieldDescriptorProto)) { + for _, file := range files { + ForEachFieldExcludingExtensions(file.MessageType, f) + } +} + +func ForEachFieldInFiles(files []*descriptor.FileDescriptorProto, f func(field *descriptor.FieldDescriptorProto)) { + for _, file := range files { + for _, ext := range file.Extension { + f(ext) + } + ForEachField(file.MessageType, f) + } +} + +func ForEachFieldExcludingExtensions(msgs []*descriptor.DescriptorProto, f func(field *descriptor.FieldDescriptorProto)) { + for _, msg := range msgs { + for _, field := range msg.Field { + f(field) + } + ForEachField(msg.NestedType, f) + } +} + +func ForEachField(msgs []*descriptor.DescriptorProto, f func(field *descriptor.FieldDescriptorProto)) { + for _, msg := range msgs { + for _, field := range msg.Field { + f(field) + } + for _, ext := range msg.Extension { + f(ext) + } + ForEachField(msg.NestedType, f) + } +} + +func ForEachEnumInFiles(files []*descriptor.FileDescriptorProto, f func(enum *descriptor.EnumDescriptorProto)) { + for _, file := range files { + for _, enum := range file.EnumType { + f(enum) + } + } +} + +func ForEachEnum(msgs []*descriptor.DescriptorProto, f func(field *descriptor.EnumDescriptorProto)) { + for _, msg := range msgs { + for _, field := range msg.EnumType { + f(field) + } + ForEachEnum(msg.NestedType, f) + } +} diff --git a/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/msg.go b/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/msg.go new file mode 100644 index 000000000000..3954a1869b8e --- /dev/null +++ b/Godeps/_workspace/src/github.com/gogo/protobuf/vanity/msg.go @@ -0,0 +1,138 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2015, Vastech SA (PTY) LTD. rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package vanity + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +func MessageHasBoolExtension(msg *descriptor.DescriptorProto, extension *proto.ExtensionDesc) bool { + if msg.Options == nil { + return false + } + value, err := proto.GetExtension(msg.Options, extension) + if err != nil { + return false + } + if value == nil { + return false + } + if value.(*bool) == nil { + return false + } + return true +} + +func SetBoolMessageOption(extension *proto.ExtensionDesc, value bool) func(msg *descriptor.DescriptorProto) { + return func(msg *descriptor.DescriptorProto) { + if MessageHasBoolExtension(msg, extension) { + return + } + if msg.Options == nil { + msg.Options = &descriptor.MessageOptions{} + } + if err := proto.SetExtension(msg.Options, extension, &value); err != nil { + panic(err) + } + } +} + +func TurnOffGoGetters(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_GoprotoGetters, false)(msg) +} + +func TurnOffGoStringer(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_GoprotoStringer, false)(msg) +} + +func TurnOnVerboseEqual(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_VerboseEqual, true)(msg) +} + +func TurnOnFace(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Face, true)(msg) +} + +func TurnOnGoString(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Face, true)(msg) +} + +func TurnOnPopulate(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Populate, true)(msg) +} + +func TurnOnStringer(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Stringer, true)(msg) +} + +func TurnOnEqual(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Equal, true)(msg) +} + +func TurnOnDescription(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Description, true)(msg) +} + +func TurnOnTestGen(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Testgen, true)(msg) +} + +func TurnOnBenchGen(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Benchgen, true)(msg) +} + +func TurnOnMarshaler(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Marshaler, true)(msg) +} + +func TurnOnUnmarshaler(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Unmarshaler, true)(msg) +} + +func TurnOnSizer(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Sizer, true)(msg) +} + +func TurnOnUnsafeUnmarshaler(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_UnsafeUnmarshaler, true)(msg) +} + +func TurnOnUnsafeMarshaler(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_UnsafeMarshaler, true)(msg) +} + +func TurnOffGoExtensionsMap(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_GoprotoExtensionsMap, false)(msg) +} + +func TurnOffGoUnrecognized(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_GoprotoUnrecognized, false)(msg) +} diff --git a/Godeps/_workspace/src/github.com/jonboulle/clockwork/.gitignore b/Godeps/_workspace/src/github.com/jonboulle/clockwork/.gitignore new file mode 100644 index 000000000000..010c242bd8a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jonboulle/clockwork/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +*.swp diff --git a/Godeps/_workspace/src/github.com/jonboulle/clockwork/.travis.yml b/Godeps/_workspace/src/github.com/jonboulle/clockwork/.travis.yml new file mode 100644 index 000000000000..6a363c70f86b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jonboulle/clockwork/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - 1.3 diff --git a/Godeps/_workspace/src/github.com/jonboulle/clockwork/LICENSE b/Godeps/_workspace/src/github.com/jonboulle/clockwork/LICENSE new file mode 100644 index 000000000000..5c304d1a4a7b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jonboulle/clockwork/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/jonboulle/clockwork/README.md b/Godeps/_workspace/src/github.com/jonboulle/clockwork/README.md new file mode 100644 index 000000000000..d43a6c799a0c --- /dev/null +++ b/Godeps/_workspace/src/github.com/jonboulle/clockwork/README.md @@ -0,0 +1,61 @@ +clockwork +========= + +[![Build Status](https://travis-ci.org/jonboulle/clockwork.png?branch=master)](https://travis-ci.org/jonboulle/clockwork) +[![godoc](https://godoc.org/github.com/jonboulle/clockwork?status.svg)](http://godoc.org/github.com/jonboulle/clockwork) + +a simple fake clock for golang + +# Usage + +Replace uses of the `time` package with the `clockwork.Clock` interface instead. + +For example, instead of using `time.Sleep` directly: + +``` +func my_func() { + time.Sleep(3 * time.Second) + do_something() +} +``` + +inject a clock and use its `Sleep` method instead: + +``` +func my_func(clock clockwork.Clock) { + clock.Sleep(3 * time.Second) + do_something() +} +``` + +Now you can easily test `my_func` with a `FakeClock`: + +``` +func TestMyFunc(t *testing.T) { + c := clockwork.NewFakeClock() + + // Start our sleepy function + my_func(c) + + // Ensure we wait until my_func is sleeping + c.BlockUntil(1) + + assert_state() + + // Advance the FakeClock forward in time + c.Advance(3) + + assert_state() +} +``` + +and in production builds, simply inject the real clock instead: +``` +my_func(clockwork.NewRealClock()) +``` + +See [example_test.go](example_test.go) for a full example. + +# Credits + +clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time) diff --git a/Godeps/_workspace/src/github.com/jonboulle/clockwork/clockwork.go b/Godeps/_workspace/src/github.com/jonboulle/clockwork/clockwork.go new file mode 100644 index 000000000000..1f1045bc0d93 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jonboulle/clockwork/clockwork.go @@ -0,0 +1,164 @@ +package clockwork + +import ( + "sync" + "time" +) + +// Clock provides an interface that packages can use instead of directly +// using the time module, so that chronology-related behavior can be tested +type Clock interface { + After(d time.Duration) <-chan time.Time + Sleep(d time.Duration) + Now() time.Time +} + +// FakeClock provides an interface for a clock which can be +// manually advanced through time +type FakeClock interface { + Clock + // Advance advances the FakeClock to a new point in time, ensuring any existing + // sleepers are notified appropriately before returning + Advance(d time.Duration) + // BlockUntil will block until the FakeClock has the given number of + // sleepers (callers of Sleep or After) + BlockUntil(n int) +} + +// NewRealClock returns a Clock which simply delegates calls to the actual time +// package; it should be used by packages in production. +func NewRealClock() Clock { + return &realClock{} +} + +// NewFakeClock returns a FakeClock implementation which can be +// manually advanced through time for testing. +func NewFakeClock() FakeClock { + return &fakeClock{ + l: sync.RWMutex{}, + + // use a fixture that does not fulfill Time.IsZero() + time: time.Date(1900, time.January, 1, 0, 0, 0, 0, time.UTC), + } +} + +type realClock struct{} + +func (rc *realClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +func (rc *realClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +func (rc *realClock) Now() time.Time { + return time.Now() +} + +type fakeClock struct { + sleepers []*sleeper + blockers []*blocker + time time.Time + + l sync.RWMutex +} + +// sleeper represents a caller of After or Sleep +type sleeper struct { + until time.Time + done chan time.Time +} + +// blocker represents a caller of BlockUntil +type blocker struct { + count int + ch chan struct{} +} + +// After mimics time.After; it waits for the given duration to elapse on the +// fakeClock, then sends the current time on the returned channel. +func (fc *fakeClock) After(d time.Duration) <-chan time.Time { + fc.l.Lock() + defer fc.l.Unlock() + now := fc.time + done := make(chan time.Time, 1) + if d.Nanoseconds() == 0 { + // special case - trigger immediately + done <- now + } else { + // otherwise, add to the set of sleepers + s := &sleeper{ + until: now.Add(d), + done: done, + } + fc.sleepers = append(fc.sleepers, s) + // and notify any blockers + fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) + } + return done +} + +// notifyBlockers notifies all the blockers waiting until the +// given number of sleepers are waiting on the fakeClock. It +// returns an updated slice of blockers (i.e. those still waiting) +func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) { + for _, b := range blockers { + if b.count == count { + close(b.ch) + } else { + newBlockers = append(newBlockers, b) + } + } + return +} + +// Sleep blocks until the given duration has passed on the fakeClock +func (fc *fakeClock) Sleep(d time.Duration) { + <-fc.After(d) +} + +// Time returns the current time of the fakeClock +func (fc *fakeClock) Now() time.Time { + fc.l.Lock() + defer fc.l.Unlock() + return fc.time +} + +// Advance advances fakeClock to a new point in time, ensuring channels from any +// previous invocations of After are notified appropriately before returning +func (fc *fakeClock) Advance(d time.Duration) { + fc.l.Lock() + defer fc.l.Unlock() + end := fc.time.Add(d) + var newSleepers []*sleeper + for _, s := range fc.sleepers { + if end.Sub(s.until) >= 0 { + s.done <- end + } else { + newSleepers = append(newSleepers, s) + } + } + fc.sleepers = newSleepers + fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) + fc.time = end +} + +// BlockUntil will block until the fakeClock has the given number of sleepers +// (callers of Sleep or After) +func (fc *fakeClock) BlockUntil(n int) { + fc.l.Lock() + // Fast path: current number of sleepers is what we're looking for + if len(fc.sleepers) == n { + fc.l.Unlock() + return + } + // Otherwise, set up a new blocker + b := &blocker{ + count: n, + ch: make(chan struct{}), + } + fc.blockers = append(fc.blockers, b) + fc.l.Unlock() + <-b.ch +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml b/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml new file mode 100644 index 000000000000..a035125c3588 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.3 + - 1.4 + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS b/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS new file mode 100644 index 000000000000..15167cd746c5 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 000000000000..46aa2b12dda8 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 000000000000..1c4577e96806 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE b/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE new file mode 100644 index 000000000000..d02f24fd5288 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The oauth2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/README.md b/Godeps/_workspace/src/golang.org/x/oauth2/README.md new file mode 100644 index 000000000000..0d5141733f57 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/README.md @@ -0,0 +1,64 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means its no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + + import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" + ) + + func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") + } + diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go new file mode 100644 index 000000000000..4a554cb9bf69 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go @@ -0,0 +1,25 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine appenginevm + +// App Engine hooks. + +package oauth2 + +import ( + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" + "google.golang.org/appengine/urlfetch" +) + +func init() { + internal.RegisterContextClientFunc(contextClientAppEngine) +} + +func contextClientAppEngine(ctx context.Context) (*http.Client, error) { + return urlfetch.Client(ctx), nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 000000000000..65dc347314d6 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,83 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// AppEngineTokenSource returns a token source that fetches tokens +// issued to the current App Engine application's service account. +// If you are implementing a 3-legged OAuth 2.0 flow on App Engine +// that involves user accounts, see oauth2.Config instead. +// +// The provided context must have come from appengine.NewContext. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &appEngineTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type appEngineTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go new file mode 100644 index 000000000000..2f9b15432fa8 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go @@ -0,0 +1,13 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineTokenFunc = appengine.AccessToken +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go new file mode 100644 index 000000000000..78f8089853f3 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go @@ -0,0 +1,154 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" + "google.golang.org/cloud/compute/metadata" +) + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +// +// This client should be used when developing services +// that run on Google App Engine or Google Compute Engine +// and use "Application Default Credentials." +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource is a token source that uses +// "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine, it fetches credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + ts, err := tokenSourceFromFile(ctx, filename, scope) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return ts, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + _, err := os.Stat(filename) + if err == nil { + ts, err2 := tokenSourceFromFile(ctx, filename, scope) + if err2 == nil { + return ts, nil + } + err = err2 + } else if os.IsNotExist(err) { + err = nil // ignore this error + } + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on Google App Engine use those credentials. + if appengineTokenFunc != nil { + return AppEngineTokenSource(ctx, scope...), nil + } + + // Fourth, if we're on Google Compute Engine use the metadata server. + if metadata.OnGCE() { + return ComputeTokenSource(""), nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var d struct { + // Common fields + Type string + ClientID string `json:"client_id"` + + // User Credential fields + ClientSecret string `json:"client_secret"` + RefreshToken string `json:"refresh_token"` + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(b, &d); err != nil { + return nil, err + } + switch d.Type { + case "authorized_user": + cfg := &oauth2.Config{ + ClientID: d.ClientID, + ClientSecret: d.ClientSecret, + Scopes: append([]string{}, scopes...), // copy + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: d.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "service_account": + cfg := &jwt.Config{ + Email: d.ClientEmail, + PrivateKey: []byte(d.PrivateKey), + Scopes: append([]string{}, scopes...), // copy + TokenURL: JWTTokenURL, + } + return cfg.TokenSource(ctx), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", d.Type) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go new file mode 100644 index 000000000000..2077d9866faf --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go @@ -0,0 +1,145 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. +// It supports the Web server flow, client-side credentials, service accounts, +// Google Compute Engine service accounts, and Google App Engine service +// accounts. +// +// For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +package google + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" + "google.golang.org/cloud/compute/metadata" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloadable from https://console.developers.google.com, +// under "APIs & Auth" > "Credentials". Download the Web application credentials in the +// JSON format and provide the contents of the file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" page under "APIs & Auth" for your +// project at https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var key struct { + Email string `json:"client_email"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(jsonKey, &key); err != nil { + return nil, err + } + return &jwt.Config{ + Email: key.Email, + PrivateKey: []byte(key.PrivateKey), + Scopes: scope, + TokenURL: JWTTokenURL, + }, nil +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account}) +} + +type computeSource struct { + account string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + return &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + }, nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 000000000000..01ba0ecb0084 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,168 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := internal.ParseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + usr, err := user.Current() + if err == nil { + return usr.HomeDir + } + return os.Getenv("HOME") +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 000000000000..dc8ebfc4f76d --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,76 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "bufio" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": map[string]string{}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go new file mode 100644 index 000000000000..ea6716c98c14 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,213 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" +) + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://www.googleapis.com/", + "https://github.com/", + "https://api.instagram.com/", + "https://www.douban.com/", + "https://api.dropbox.com/", + "https://api.soundcloud.com/", + "https://www.linkedin.com/", + "https://api.twitch.tv/", + "https://oauth.vk.com/", + "https://api.odnoklassniki.ru/", + "https://connect.stripe.com/", + "https://api.pushbullet.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://www.strava.com/oauth/", + "https://app.box.com/", + "https://test-sandbox.auth.corp.google.com", + "https://user.gini.net/", +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + v.Set("client_id", ClientID) + bustedAuth := !providerAuthHeaderWorks(TokenURL) + if bustedAuth && ClientSecret != "" { + v.Set("client_secret", ClientSecret) + } + req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(ClientID, ClientSecret) + } + r, err := hc.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 000000000000..521e7b49e75b --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,67 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go b/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 000000000000..396b3fac827d --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,160 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides encoding and decoding utilities for +// signed JWS messages. +package jws + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion + Iat int64 `json:"iat"` // the time the assertion was issued. + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` + + exp time.Time + iat time.Time +} + +func (c *ClaimSet) encode() (string, error) { + if c.exp.IsZero() || c.iat.IsZero() { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + c.iat = now + c.exp = now.Add(time.Hour) + } + + c.Exp = c.exp.Unix() + c.Iat = c.iat.Unix() + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64Encode(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64Encode(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64Encode(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64Decode(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Encode encodes a signed JWS with provided header and claim set. +func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + h := sha256.New() + h.Write([]byte(ss)) + b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil)) + if err != nil { + return "", err + } + sig := base64Encode(b) + return fmt.Sprintf("%s.%s", ss, sig), nil +} + +// base64Encode returns and Base64url encoded version of the input string with any +// trailing "=" stripped. +func base64Encode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// base64Decode decodes the Base64url encoded string +func base64Decode(s string) ([]byte, error) { + // add back missing padding + switch len(s) % 4 { + case 2: + s += "==" + case 3: + s += "=" + } + return base64.URLEncoding.DecodeString(s) +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 000000000000..205d23ed4387 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,147 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + payload, err := jws.Encode(defaultHeader, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + return token, nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go new file mode 100644 index 000000000000..dfcf238d2304 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,325 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +var NoContext = context.TODO() + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-zero string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + c, err := internal.ContextClient(ctx) + if err != nil { + return &http.Client{Transport: internal.ErrorTransport{err}} + } + return c + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextTransport(ctx), + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/token.go b/Godeps/_workspace/src/golang.org/x/oauth2/token.go new file mode 100644 index 000000000000..ebbdddbdceb4 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/token.go @@ -0,0 +1,143 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if vals, ok := t.raw.(url.Values); ok { + // TODO(jbd): Cast numeric values to int64 or float64. + return vals.Get(key) + } + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + return nil +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/transport.go b/Godeps/_workspace/src/golang.org/x/oauth2/transport.go new file mode 100644 index 000000000000..90db088332b4 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/Godeps/_workspace/src/google.golang.org/api/LICENSE b/Godeps/_workspace/src/google.golang.org/api/LICENSE new file mode 100644 index 000000000000..263aa7a0c123 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-api.json b/Godeps/_workspace/src/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-api.json new file mode 100644 index 000000000000..44c428b877e5 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-api.json @@ -0,0 +1,839 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/A2G_NAa29vne9MPSojupRQ5bVuo\"", + "discoveryVersion": "v1", + "id": "cloudmonitoring:v2beta2", + "name": "cloudmonitoring", + "canonicalName": "Cloud Monitoring", + "version": "v2beta2", + "revision": "20160314", + "title": "Cloud Monitoring API", + "description": "Accesses Google Cloud Monitoring data.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "documentationLink": "https://cloud.google.com/monitoring/v2beta2/", + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/cloudmonitoring/v2beta2/projects/", + "basePath": "/cloudmonitoring/v2beta2/projects/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "cloudmonitoring/v2beta2/projects/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/monitoring": { + "description": "View and write monitoring data for all of your Google and third-party Cloud and API projects" + } + } + } + }, + "schemas": { + "DeleteMetricDescriptorResponse": { + "id": "DeleteMetricDescriptorResponse", + "type": "object", + "description": "The response of cloudmonitoring.metricDescriptors.delete.", + "properties": { + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#deleteMetricDescriptorResponse\".", + "default": "cloudmonitoring#deleteMetricDescriptorResponse" + } + } + }, + "ListMetricDescriptorsRequest": { + "id": "ListMetricDescriptorsRequest", + "type": "object", + "description": "The request of cloudmonitoring.metricDescriptors.list.", + "properties": { + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#listMetricDescriptorsRequest\".", + "default": "cloudmonitoring#listMetricDescriptorsRequest" + } + } + }, + "ListMetricDescriptorsResponse": { + "id": "ListMetricDescriptorsResponse", + "type": "object", + "description": "The response of cloudmonitoring.metricDescriptors.list.", + "properties": { + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#listMetricDescriptorsResponse\".", + "default": "cloudmonitoring#listMetricDescriptorsResponse" + }, + "metrics": { + "type": "array", + "description": "The returned metric descriptors.", + "items": { + "$ref": "MetricDescriptor" + } + }, + "nextPageToken": { + "type": "string", + "description": "Pagination token. If present, indicates that additional results are available for retrieval. To access the results past the pagination limit, pass this value to the pageToken query parameter." + } + } + }, + "ListTimeseriesDescriptorsRequest": { + "id": "ListTimeseriesDescriptorsRequest", + "type": "object", + "description": "The request of cloudmonitoring.timeseriesDescriptors.list", + "properties": { + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#listTimeseriesDescriptorsRequest\".", + "default": "cloudmonitoring#listTimeseriesDescriptorsRequest" + } + } + }, + "ListTimeseriesDescriptorsResponse": { + "id": "ListTimeseriesDescriptorsResponse", + "type": "object", + "description": "The response of cloudmonitoring.timeseriesDescriptors.list", + "properties": { + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#listTimeseriesDescriptorsResponse\".", + "default": "cloudmonitoring#listTimeseriesDescriptorsResponse" + }, + "nextPageToken": { + "type": "string", + "description": "Pagination token. If present, indicates that additional results are available for retrieval. To access the results past the pagination limit, set this value to the pageToken query parameter." + }, + "oldest": { + "type": "string", + "description": "The oldest timestamp of the interval of this query, as an RFC 3339 string.", + "format": "date-time" + }, + "timeseries": { + "type": "array", + "description": "The returned time series descriptors.", + "items": { + "$ref": "TimeseriesDescriptor" + } + }, + "youngest": { + "type": "string", + "description": "The youngest timestamp of the interval of this query, as an RFC 3339 string.", + "format": "date-time" + } + } + }, + "ListTimeseriesRequest": { + "id": "ListTimeseriesRequest", + "type": "object", + "description": "The request of cloudmonitoring.timeseries.list", + "properties": { + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#listTimeseriesRequest\".", + "default": "cloudmonitoring#listTimeseriesRequest" + } + } + }, + "ListTimeseriesResponse": { + "id": "ListTimeseriesResponse", + "type": "object", + "description": "The response of cloudmonitoring.timeseries.list", + "properties": { + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#listTimeseriesResponse\".", + "default": "cloudmonitoring#listTimeseriesResponse" + }, + "nextPageToken": { + "type": "string", + "description": "Pagination token. If present, indicates that additional results are available for retrieval. To access the results past the pagination limit, set the pageToken query parameter to this value. All of the points of a time series will be returned before returning any point of the subsequent time series." + }, + "oldest": { + "type": "string", + "description": "The oldest timestamp of the interval of this query as an RFC 3339 string.", + "format": "date-time" + }, + "timeseries": { + "type": "array", + "description": "The returned time series.", + "items": { + "$ref": "Timeseries" + } + }, + "youngest": { + "type": "string", + "description": "The youngest timestamp of the interval of this query as an RFC 3339 string.", + "format": "date-time" + } + } + }, + "MetricDescriptor": { + "id": "MetricDescriptor", + "type": "object", + "description": "A metricDescriptor defines the name, label keys, and data type of a particular metric.", + "properties": { + "description": { + "type": "string", + "description": "Description of this metric." + }, + "labels": { + "type": "array", + "description": "Labels defined for this metric.", + "items": { + "$ref": "MetricDescriptorLabelDescriptor" + } + }, + "name": { + "type": "string", + "description": "The name of this metric." + }, + "project": { + "type": "string", + "description": "The project ID to which the metric belongs." + }, + "typeDescriptor": { + "$ref": "MetricDescriptorTypeDescriptor", + "description": "Type description for this metric." + } + } + }, + "MetricDescriptorLabelDescriptor": { + "id": "MetricDescriptorLabelDescriptor", + "type": "object", + "description": "A label in a metric is a description of this metric, including the key of this description (what the description is), and the value for this description.", + "properties": { + "description": { + "type": "string", + "description": "Label description." + }, + "key": { + "type": "string", + "description": "Label key." + } + } + }, + "MetricDescriptorTypeDescriptor": { + "id": "MetricDescriptorTypeDescriptor", + "type": "object", + "description": "A type in a metric contains information about how the metric is collected and what its data points look like.", + "properties": { + "metricType": { + "type": "string", + "description": "The method of collecting data for the metric. See Metric types." + }, + "valueType": { + "type": "string", + "description": "The data type of of individual points in the metric's time series. See Metric value types." + } + } + }, + "Point": { + "id": "Point", + "type": "object", + "description": "Point is a single point in a time series. It consists of a start time, an end time, and a value.", + "properties": { + "boolValue": { + "type": "boolean", + "description": "The value of this data point. Either \"true\" or \"false\"." + }, + "distributionValue": { + "$ref": "PointDistribution", + "description": "The value of this data point as a distribution. A distribution value can contain a list of buckets and/or an underflowBucket and an overflowBucket. The values of these points can be used to create a histogram." + }, + "doubleValue": { + "type": "number", + "description": "The value of this data point as a double-precision floating-point number.", + "format": "double" + }, + "end": { + "type": "string", + "description": "The interval [start, end] is the time period to which the point's value applies. For gauge metrics, whose values are instantaneous measurements, this interval should be empty (start should equal end). For cumulative metrics (of which deltas and rates are special cases), the interval should be non-empty. Both start and end are RFC 3339 strings.", + "format": "date-time" + }, + "int64Value": { + "type": "string", + "description": "The value of this data point as a 64-bit integer.", + "format": "int64" + }, + "start": { + "type": "string", + "description": "The interval [start, end] is the time period to which the point's value applies. For gauge metrics, whose values are instantaneous measurements, this interval should be empty (start should equal end). For cumulative metrics (of which deltas and rates are special cases), the interval should be non-empty. Both start and end are RFC 3339 strings.", + "format": "date-time" + }, + "stringValue": { + "type": "string", + "description": "The value of this data point in string format." + } + } + }, + "PointDistribution": { + "id": "PointDistribution", + "type": "object", + "description": "Distribution data point value type. When writing distribution points, try to be consistent with the boundaries of your buckets. If you must modify the bucket boundaries, then do so by merging, partitioning, or appending rather than skewing them.", + "properties": { + "buckets": { + "type": "array", + "description": "The finite buckets.", + "items": { + "$ref": "PointDistributionBucket" + } + }, + "overflowBucket": { + "$ref": "PointDistributionOverflowBucket", + "description": "The overflow bucket." + }, + "underflowBucket": { + "$ref": "PointDistributionUnderflowBucket", + "description": "The underflow bucket." + } + } + }, + "PointDistributionBucket": { + "id": "PointDistributionBucket", + "type": "object", + "description": "The histogram's bucket. Buckets that form the histogram of a distribution value. If the upper bound of a bucket, say U1, does not equal the lower bound of the next bucket, say L2, this means that there is no event in [U1, L2).", + "properties": { + "count": { + "type": "string", + "description": "The number of events whose values are in the interval defined by this bucket.", + "format": "int64" + }, + "lowerBound": { + "type": "number", + "description": "The lower bound of the value interval of this bucket (inclusive).", + "format": "double" + }, + "upperBound": { + "type": "number", + "description": "The upper bound of the value interval of this bucket (exclusive).", + "format": "double" + } + } + }, + "PointDistributionOverflowBucket": { + "id": "PointDistributionOverflowBucket", + "type": "object", + "description": "The overflow bucket is a special bucket that does not have the upperBound field; it includes all of the events that are no less than its lower bound.", + "properties": { + "count": { + "type": "string", + "description": "The number of events whose values are in the interval defined by this bucket.", + "format": "int64" + }, + "lowerBound": { + "type": "number", + "description": "The lower bound of the value interval of this bucket (inclusive).", + "format": "double" + } + } + }, + "PointDistributionUnderflowBucket": { + "id": "PointDistributionUnderflowBucket", + "type": "object", + "description": "The underflow bucket is a special bucket that does not have the lowerBound field; it includes all of the events that are less than its upper bound.", + "properties": { + "count": { + "type": "string", + "description": "The number of events whose values are in the interval defined by this bucket.", + "format": "int64" + }, + "upperBound": { + "type": "number", + "description": "The upper bound of the value interval of this bucket (exclusive).", + "format": "double" + } + } + }, + "Timeseries": { + "id": "Timeseries", + "type": "object", + "description": "The monitoring data is organized as metrics and stored as data points that are recorded over time. Each data point represents information like the CPU utilization of your virtual machine. A historical record of these data points is called a time series.", + "properties": { + "points": { + "type": "array", + "description": "The data points of this time series. The points are listed in order of their end timestamp, from younger to older.", + "items": { + "$ref": "Point" + } + }, + "timeseriesDesc": { + "$ref": "TimeseriesDescriptor", + "description": "The descriptor of this time series." + } + } + }, + "TimeseriesDescriptor": { + "id": "TimeseriesDescriptor", + "type": "object", + "description": "TimeseriesDescriptor identifies a single time series.", + "properties": { + "labels": { + "type": "object", + "description": "The label's name.", + "additionalProperties": { + "type": "string", + "description": "The label's name." + } + }, + "metric": { + "type": "string", + "description": "The name of the metric." + }, + "project": { + "type": "string", + "description": "The Developers Console project number to which this time series belongs." + } + } + }, + "TimeseriesDescriptorLabel": { + "id": "TimeseriesDescriptorLabel", + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "The label's name." + }, + "value": { + "type": "string", + "description": "The label's value." + } + } + }, + "TimeseriesPoint": { + "id": "TimeseriesPoint", + "type": "object", + "description": "When writing time series, TimeseriesPoint should be used instead of Timeseries, to enforce single point for each time series in the timeseries.write request.", + "properties": { + "point": { + "$ref": "Point", + "description": "The data point in this time series snapshot." + }, + "timeseriesDesc": { + "$ref": "TimeseriesDescriptor", + "description": "The descriptor of this time series." + } + } + }, + "WriteTimeseriesRequest": { + "id": "WriteTimeseriesRequest", + "type": "object", + "description": "The request of cloudmonitoring.timeseries.write", + "properties": { + "commonLabels": { + "type": "object", + "description": "The label's name.", + "additionalProperties": { + "type": "string", + "description": "The label's name." + } + }, + "timeseries": { + "type": "array", + "description": "Provide time series specific labels and the data points for each time series. The labels in timeseries and the common_labels should form a complete list of labels that required by the metric.", + "items": { + "$ref": "TimeseriesPoint" + } + } + } + }, + "WriteTimeseriesResponse": { + "id": "WriteTimeseriesResponse", + "type": "object", + "description": "The response of cloudmonitoring.timeseries.write", + "properties": { + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"cloudmonitoring#writeTimeseriesResponse\".", + "default": "cloudmonitoring#writeTimeseriesResponse" + } + } + } + }, + "resources": { + "metricDescriptors": { + "methods": { + "create": { + "id": "cloudmonitoring.metricDescriptors.create", + "path": "{project}/metricDescriptors", + "httpMethod": "POST", + "description": "Create a new metric.", + "parameters": { + "project": { + "type": "string", + "description": "The project id. The value can be the numeric project ID or string-based project name.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "MetricDescriptor" + }, + "response": { + "$ref": "MetricDescriptor" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ] + }, + "delete": { + "id": "cloudmonitoring.metricDescriptors.delete", + "path": "{project}/metricDescriptors/{metric}", + "httpMethod": "DELETE", + "description": "Delete an existing metric.", + "parameters": { + "metric": { + "type": "string", + "description": "Name of the metric.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "The project ID to which the metric belongs.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "metric" + ], + "response": { + "$ref": "DeleteMetricDescriptorResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ] + }, + "list": { + "id": "cloudmonitoring.metricDescriptors.list", + "path": "{project}/metricDescriptors", + "httpMethod": "GET", + "description": "List metric descriptors that match the query. If the query is not set, then all of the metric descriptors will be returned. Large responses will be paginated, use the nextPageToken returned in the response to request subsequent pages of results by setting the pageToken query parameter to the value of the nextPageToken.", + "parameters": { + "count": { + "type": "integer", + "description": "Maximum number of metric descriptors per page. Used for pagination. If not specified, count = 100.", + "default": "100", + "format": "int32", + "minimum": "1", + "maximum": "1000", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The pagination token, which is used to page through large result sets. Set this value to the value of the nextPageToken to retrieve the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "The project id. The value can be the numeric project ID or string-based project name.", + "required": true, + "location": "path" + }, + "query": { + "type": "string", + "description": "The query used to search against existing metrics. Separate keywords with a space; the service joins all keywords with AND, meaning that all keywords must match for a metric to be returned. If this field is omitted, all metrics are returned. If an empty string is passed with this field, no metrics are returned.", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "ListMetricDescriptorsRequest" + }, + "response": { + "$ref": "ListMetricDescriptorsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ] + } + } + }, + "timeseries": { + "methods": { + "list": { + "id": "cloudmonitoring.timeseries.list", + "path": "{project}/timeseries/{metric}", + "httpMethod": "GET", + "description": "List the data points of the time series that match the metric and labels values and that have data points in the interval. Large responses are paginated; use the nextPageToken returned in the response to request subsequent pages of results by setting the pageToken query parameter to the value of the nextPageToken.", + "parameters": { + "aggregator": { + "type": "string", + "description": "The aggregation function that will reduce the data points in each window to a single point. This parameter is only valid for non-cumulative metrics with a value type of INT64 or DOUBLE.", + "enum": [ + "max", + "mean", + "min", + "sum" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "location": "query" + }, + "count": { + "type": "integer", + "description": "Maximum number of data points per page, which is used for pagination of results.", + "default": "6000", + "format": "int32", + "minimum": "1", + "maximum": "12000", + "location": "query" + }, + "labels": { + "type": "string", + "description": "A collection of labels for the matching time series, which are represented as: \n- key==value: key equals the value \n- key=~value: key regex matches the value \n- key!=value: key does not equal the value \n- key!~value: key regex does not match the value For example, to list all of the time series descriptors for the region us-central1, you could specify:\nlabel=cloud.googleapis.com%2Flocation=~us-central1.*", + "pattern": "(.+?)(==|=~|!=|!~)(.+)", + "repeated": true, + "location": "query" + }, + "metric": { + "type": "string", + "description": "Metric names are protocol-free URLs as listed in the Supported Metrics page. For example, compute.googleapis.com/instance/disk/read_ops_count.", + "required": true, + "location": "path" + }, + "oldest": { + "type": "string", + "description": "Start of the time interval (exclusive), which is expressed as an RFC 3339 timestamp. If neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest]", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The pagination token, which is used to page through large result sets. Set this value to the value of the nextPageToken to retrieve the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "The project ID to which this time series belongs. The value can be the numeric project ID or string-based project name.", + "required": true, + "location": "path" + }, + "timespan": { + "type": "string", + "description": "Length of the time interval to query, which is an alternative way to declare the interval: (youngest - timespan, youngest]. The timespan and oldest parameters should not be used together. Units: \n- s: second \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 2s, 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.\n\nIf neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest].", + "pattern": "[0-9]+[smhdw]?", + "location": "query" + }, + "window": { + "type": "string", + "description": "The sampling window. At most one data point will be returned for each window in the requested time interval. This parameter is only valid for non-cumulative metric types. Units: \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.", + "pattern": "[0-9]+[mhdw]?", + "location": "query" + }, + "youngest": { + "type": "string", + "description": "End of the time interval (inclusive), which is expressed as an RFC 3339 timestamp.", + "required": true, + "location": "query" + } + }, + "parameterOrder": [ + "project", + "metric", + "youngest" + ], + "request": { + "$ref": "ListTimeseriesRequest" + }, + "response": { + "$ref": "ListTimeseriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ] + }, + "write": { + "id": "cloudmonitoring.timeseries.write", + "path": "{project}/timeseries:write", + "httpMethod": "POST", + "description": "Put data points to one or more time series for one or more metrics. If a time series does not exist, a new time series will be created. It is not allowed to write a time series point that is older than the existing youngest point of that time series. Points that are older than the existing youngest point of that time series will be discarded silently. Therefore, users should make sure that points of a time series are written sequentially in the order of their end time.", + "parameters": { + "project": { + "type": "string", + "description": "The project ID. The value can be the numeric project ID or string-based project name.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "WriteTimeseriesRequest" + }, + "response": { + "$ref": "WriteTimeseriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ] + } + } + }, + "timeseriesDescriptors": { + "methods": { + "list": { + "id": "cloudmonitoring.timeseriesDescriptors.list", + "path": "{project}/timeseriesDescriptors/{metric}", + "httpMethod": "GET", + "description": "List the descriptors of the time series that match the metric and labels values and that have data points in the interval. Large responses are paginated; use the nextPageToken returned in the response to request subsequent pages of results by setting the pageToken query parameter to the value of the nextPageToken.", + "parameters": { + "aggregator": { + "type": "string", + "description": "The aggregation function that will reduce the data points in each window to a single point. This parameter is only valid for non-cumulative metrics with a value type of INT64 or DOUBLE.", + "enum": [ + "max", + "mean", + "min", + "sum" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "location": "query" + }, + "count": { + "type": "integer", + "description": "Maximum number of time series descriptors per page. Used for pagination. If not specified, count = 100.", + "default": "100", + "format": "int32", + "minimum": "1", + "maximum": "1000", + "location": "query" + }, + "labels": { + "type": "string", + "description": "A collection of labels for the matching time series, which are represented as: \n- key==value: key equals the value \n- key=~value: key regex matches the value \n- key!=value: key does not equal the value \n- key!~value: key regex does not match the value For example, to list all of the time series descriptors for the region us-central1, you could specify:\nlabel=cloud.googleapis.com%2Flocation=~us-central1.*", + "pattern": "(.+?)(==|=~|!=|!~)(.+)", + "repeated": true, + "location": "query" + }, + "metric": { + "type": "string", + "description": "Metric names are protocol-free URLs as listed in the Supported Metrics page. For example, compute.googleapis.com/instance/disk/read_ops_count.", + "required": true, + "location": "path" + }, + "oldest": { + "type": "string", + "description": "Start of the time interval (exclusive), which is expressed as an RFC 3339 timestamp. If neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest]", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "The pagination token, which is used to page through large result sets. Set this value to the value of the nextPageToken to retrieve the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "The project ID to which this time series belongs. The value can be the numeric project ID or string-based project name.", + "required": true, + "location": "path" + }, + "timespan": { + "type": "string", + "description": "Length of the time interval to query, which is an alternative way to declare the interval: (youngest - timespan, youngest]. The timespan and oldest parameters should not be used together. Units: \n- s: second \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 2s, 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.\n\nIf neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest].", + "pattern": "[0-9]+[smhdw]?", + "location": "query" + }, + "window": { + "type": "string", + "description": "The sampling window. At most one data point will be returned for each window in the requested time interval. This parameter is only valid for non-cumulative metric types. Units: \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.", + "pattern": "[0-9]+[mhdw]?", + "location": "query" + }, + "youngest": { + "type": "string", + "description": "End of the time interval (inclusive), which is expressed as an RFC 3339 timestamp.", + "required": true, + "location": "query" + } + }, + "parameterOrder": [ + "project", + "metric", + "youngest" + ], + "request": { + "$ref": "ListTimeseriesDescriptorsRequest" + }, + "response": { + "$ref": "ListTimeseriesDescriptorsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ] + } + } + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go b/Godeps/_workspace/src/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go new file mode 100644 index 000000000000..30f1b6bef26f --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go @@ -0,0 +1,1943 @@ +// Package cloudmonitoring provides access to the Cloud Monitoring API. +// +// See https://cloud.google.com/monitoring/v2beta2/ +// +// Usage example: +// +// import "google.golang.org/api/cloudmonitoring/v2beta2" +// ... +// cloudmonitoringService, err := cloudmonitoring.New(oauthHttpClient) +package cloudmonitoring + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "cloudmonitoring:v2beta2" +const apiName = "cloudmonitoring" +const apiVersion = "v2beta2" +const basePath = "https://www.googleapis.com/cloudmonitoring/v2beta2/projects/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View and write monitoring data for all of your Google and third-party + // Cloud and API projects + MonitoringScope = "https://www.googleapis.com/auth/monitoring" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.MetricDescriptors = NewMetricDescriptorsService(s) + s.Timeseries = NewTimeseriesService(s) + s.TimeseriesDescriptors = NewTimeseriesDescriptorsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + MetricDescriptors *MetricDescriptorsService + + Timeseries *TimeseriesService + + TimeseriesDescriptors *TimeseriesDescriptorsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewMetricDescriptorsService(s *Service) *MetricDescriptorsService { + rs := &MetricDescriptorsService{s: s} + return rs +} + +type MetricDescriptorsService struct { + s *Service +} + +func NewTimeseriesService(s *Service) *TimeseriesService { + rs := &TimeseriesService{s: s} + return rs +} + +type TimeseriesService struct { + s *Service +} + +func NewTimeseriesDescriptorsService(s *Service) *TimeseriesDescriptorsService { + rs := &TimeseriesDescriptorsService{s: s} + return rs +} + +type TimeseriesDescriptorsService struct { + s *Service +} + +// DeleteMetricDescriptorResponse: The response of +// cloudmonitoring.metricDescriptors.delete. +type DeleteMetricDescriptorResponse struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "cloudmonitoring#deleteMetricDescriptorResponse". + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DeleteMetricDescriptorResponse) MarshalJSON() ([]byte, error) { + type noMethod DeleteMetricDescriptorResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ListMetricDescriptorsRequest: The request of +// cloudmonitoring.metricDescriptors.list. +type ListMetricDescriptorsRequest struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "cloudmonitoring#listMetricDescriptorsRequest". + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListMetricDescriptorsRequest) MarshalJSON() ([]byte, error) { + type noMethod ListMetricDescriptorsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ListMetricDescriptorsResponse: The response of +// cloudmonitoring.metricDescriptors.list. +type ListMetricDescriptorsResponse struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "cloudmonitoring#listMetricDescriptorsResponse". + Kind string `json:"kind,omitempty"` + + // Metrics: The returned metric descriptors. + Metrics []*MetricDescriptor `json:"metrics,omitempty"` + + // NextPageToken: Pagination token. If present, indicates that + // additional results are available for retrieval. To access the results + // past the pagination limit, pass this value to the pageToken query + // parameter. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListMetricDescriptorsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListMetricDescriptorsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ListTimeseriesDescriptorsRequest: The request of +// cloudmonitoring.timeseriesDescriptors.list +type ListTimeseriesDescriptorsRequest struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "cloudmonitoring#listTimeseriesDescriptorsRequest". + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListTimeseriesDescriptorsRequest) MarshalJSON() ([]byte, error) { + type noMethod ListTimeseriesDescriptorsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ListTimeseriesDescriptorsResponse: The response of +// cloudmonitoring.timeseriesDescriptors.list +type ListTimeseriesDescriptorsResponse struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "cloudmonitoring#listTimeseriesDescriptorsResponse". + Kind string `json:"kind,omitempty"` + + // NextPageToken: Pagination token. If present, indicates that + // additional results are available for retrieval. To access the results + // past the pagination limit, set this value to the pageToken query + // parameter. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Oldest: The oldest timestamp of the interval of this query, as an RFC + // 3339 string. + Oldest string `json:"oldest,omitempty"` + + // Timeseries: The returned time series descriptors. + Timeseries []*TimeseriesDescriptor `json:"timeseries,omitempty"` + + // Youngest: The youngest timestamp of the interval of this query, as an + // RFC 3339 string. + Youngest string `json:"youngest,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListTimeseriesDescriptorsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListTimeseriesDescriptorsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ListTimeseriesRequest: The request of cloudmonitoring.timeseries.list +type ListTimeseriesRequest struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "cloudmonitoring#listTimeseriesRequest". + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListTimeseriesRequest) MarshalJSON() ([]byte, error) { + type noMethod ListTimeseriesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ListTimeseriesResponse: The response of +// cloudmonitoring.timeseries.list +type ListTimeseriesResponse struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "cloudmonitoring#listTimeseriesResponse". + Kind string `json:"kind,omitempty"` + + // NextPageToken: Pagination token. If present, indicates that + // additional results are available for retrieval. To access the results + // past the pagination limit, set the pageToken query parameter to this + // value. All of the points of a time series will be returned before + // returning any point of the subsequent time series. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Oldest: The oldest timestamp of the interval of this query as an RFC + // 3339 string. + Oldest string `json:"oldest,omitempty"` + + // Timeseries: The returned time series. + Timeseries []*Timeseries `json:"timeseries,omitempty"` + + // Youngest: The youngest timestamp of the interval of this query as an + // RFC 3339 string. + Youngest string `json:"youngest,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListTimeseriesResponse) MarshalJSON() ([]byte, error) { + type noMethod ListTimeseriesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// MetricDescriptor: A metricDescriptor defines the name, label keys, +// and data type of a particular metric. +type MetricDescriptor struct { + // Description: Description of this metric. + Description string `json:"description,omitempty"` + + // Labels: Labels defined for this metric. + Labels []*MetricDescriptorLabelDescriptor `json:"labels,omitempty"` + + // Name: The name of this metric. + Name string `json:"name,omitempty"` + + // Project: The project ID to which the metric belongs. + Project string `json:"project,omitempty"` + + // TypeDescriptor: Type description for this metric. + TypeDescriptor *MetricDescriptorTypeDescriptor `json:"typeDescriptor,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MetricDescriptor) MarshalJSON() ([]byte, error) { + type noMethod MetricDescriptor + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// MetricDescriptorLabelDescriptor: A label in a metric is a description +// of this metric, including the key of this description (what the +// description is), and the value for this description. +type MetricDescriptorLabelDescriptor struct { + // Description: Label description. + Description string `json:"description,omitempty"` + + // Key: Label key. + Key string `json:"key,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MetricDescriptorLabelDescriptor) MarshalJSON() ([]byte, error) { + type noMethod MetricDescriptorLabelDescriptor + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// MetricDescriptorTypeDescriptor: A type in a metric contains +// information about how the metric is collected and what its data +// points look like. +type MetricDescriptorTypeDescriptor struct { + // MetricType: The method of collecting data for the metric. See Metric + // types. + MetricType string `json:"metricType,omitempty"` + + // ValueType: The data type of of individual points in the metric's time + // series. See Metric value types. + ValueType string `json:"valueType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MetricType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MetricDescriptorTypeDescriptor) MarshalJSON() ([]byte, error) { + type noMethod MetricDescriptorTypeDescriptor + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Point: Point is a single point in a time series. It consists of a +// start time, an end time, and a value. +type Point struct { + // BoolValue: The value of this data point. Either "true" or "false". + BoolValue *bool `json:"boolValue,omitempty"` + + // DistributionValue: The value of this data point as a distribution. A + // distribution value can contain a list of buckets and/or an + // underflowBucket and an overflowBucket. The values of these points can + // be used to create a histogram. + DistributionValue *PointDistribution `json:"distributionValue,omitempty"` + + // DoubleValue: The value of this data point as a double-precision + // floating-point number. + DoubleValue *float64 `json:"doubleValue,omitempty"` + + // End: The interval [start, end] is the time period to which the + // point's value applies. For gauge metrics, whose values are + // instantaneous measurements, this interval should be empty (start + // should equal end). For cumulative metrics (of which deltas and rates + // are special cases), the interval should be non-empty. Both start and + // end are RFC 3339 strings. + End string `json:"end,omitempty"` + + // Int64Value: The value of this data point as a 64-bit integer. + Int64Value *int64 `json:"int64Value,omitempty,string"` + + // Start: The interval [start, end] is the time period to which the + // point's value applies. For gauge metrics, whose values are + // instantaneous measurements, this interval should be empty (start + // should equal end). For cumulative metrics (of which deltas and rates + // are special cases), the interval should be non-empty. Both start and + // end are RFC 3339 strings. + Start string `json:"start,omitempty"` + + // StringValue: The value of this data point in string format. + StringValue *string `json:"stringValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BoolValue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Point) MarshalJSON() ([]byte, error) { + type noMethod Point + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PointDistribution: Distribution data point value type. When writing +// distribution points, try to be consistent with the boundaries of your +// buckets. If you must modify the bucket boundaries, then do so by +// merging, partitioning, or appending rather than skewing them. +type PointDistribution struct { + // Buckets: The finite buckets. + Buckets []*PointDistributionBucket `json:"buckets,omitempty"` + + // OverflowBucket: The overflow bucket. + OverflowBucket *PointDistributionOverflowBucket `json:"overflowBucket,omitempty"` + + // UnderflowBucket: The underflow bucket. + UnderflowBucket *PointDistributionUnderflowBucket `json:"underflowBucket,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Buckets") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PointDistribution) MarshalJSON() ([]byte, error) { + type noMethod PointDistribution + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PointDistributionBucket: The histogram's bucket. Buckets that form +// the histogram of a distribution value. If the upper bound of a +// bucket, say U1, does not equal the lower bound of the next bucket, +// say L2, this means that there is no event in [U1, L2). +type PointDistributionBucket struct { + // Count: The number of events whose values are in the interval defined + // by this bucket. + Count int64 `json:"count,omitempty,string"` + + // LowerBound: The lower bound of the value interval of this bucket + // (inclusive). + LowerBound float64 `json:"lowerBound,omitempty"` + + // UpperBound: The upper bound of the value interval of this bucket + // (exclusive). + UpperBound float64 `json:"upperBound,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Count") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PointDistributionBucket) MarshalJSON() ([]byte, error) { + type noMethod PointDistributionBucket + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PointDistributionOverflowBucket: The overflow bucket is a special +// bucket that does not have the upperBound field; it includes all of +// the events that are no less than its lower bound. +type PointDistributionOverflowBucket struct { + // Count: The number of events whose values are in the interval defined + // by this bucket. + Count int64 `json:"count,omitempty,string"` + + // LowerBound: The lower bound of the value interval of this bucket + // (inclusive). + LowerBound float64 `json:"lowerBound,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Count") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PointDistributionOverflowBucket) MarshalJSON() ([]byte, error) { + type noMethod PointDistributionOverflowBucket + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PointDistributionUnderflowBucket: The underflow bucket is a special +// bucket that does not have the lowerBound field; it includes all of +// the events that are less than its upper bound. +type PointDistributionUnderflowBucket struct { + // Count: The number of events whose values are in the interval defined + // by this bucket. + Count int64 `json:"count,omitempty,string"` + + // UpperBound: The upper bound of the value interval of this bucket + // (exclusive). + UpperBound float64 `json:"upperBound,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Count") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PointDistributionUnderflowBucket) MarshalJSON() ([]byte, error) { + type noMethod PointDistributionUnderflowBucket + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Timeseries: The monitoring data is organized as metrics and stored as +// data points that are recorded over time. Each data point represents +// information like the CPU utilization of your virtual machine. A +// historical record of these data points is called a time series. +type Timeseries struct { + // Points: The data points of this time series. The points are listed in + // order of their end timestamp, from younger to older. + Points []*Point `json:"points,omitempty"` + + // TimeseriesDesc: The descriptor of this time series. + TimeseriesDesc *TimeseriesDescriptor `json:"timeseriesDesc,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Points") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Timeseries) MarshalJSON() ([]byte, error) { + type noMethod Timeseries + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TimeseriesDescriptor: TimeseriesDescriptor identifies a single time +// series. +type TimeseriesDescriptor struct { + // Labels: The label's name. + Labels map[string]string `json:"labels,omitempty"` + + // Metric: The name of the metric. + Metric string `json:"metric,omitempty"` + + // Project: The Developers Console project number to which this time + // series belongs. + Project string `json:"project,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Labels") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TimeseriesDescriptor) MarshalJSON() ([]byte, error) { + type noMethod TimeseriesDescriptor + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TimeseriesDescriptorLabel struct { + // Key: The label's name. + Key string `json:"key,omitempty"` + + // Value: The label's value. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TimeseriesDescriptorLabel) MarshalJSON() ([]byte, error) { + type noMethod TimeseriesDescriptorLabel + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TimeseriesPoint: When writing time series, TimeseriesPoint should be +// used instead of Timeseries, to enforce single point for each time +// series in the timeseries.write request. +type TimeseriesPoint struct { + // Point: The data point in this time series snapshot. + Point *Point `json:"point,omitempty"` + + // TimeseriesDesc: The descriptor of this time series. + TimeseriesDesc *TimeseriesDescriptor `json:"timeseriesDesc,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Point") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TimeseriesPoint) MarshalJSON() ([]byte, error) { + type noMethod TimeseriesPoint + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// WriteTimeseriesRequest: The request of +// cloudmonitoring.timeseries.write +type WriteTimeseriesRequest struct { + // CommonLabels: The label's name. + CommonLabels map[string]string `json:"commonLabels,omitempty"` + + // Timeseries: Provide time series specific labels and the data points + // for each time series. The labels in timeseries and the common_labels + // should form a complete list of labels that required by the metric. + Timeseries []*TimeseriesPoint `json:"timeseries,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CommonLabels") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *WriteTimeseriesRequest) MarshalJSON() ([]byte, error) { + type noMethod WriteTimeseriesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// WriteTimeseriesResponse: The response of +// cloudmonitoring.timeseries.write +type WriteTimeseriesResponse struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "cloudmonitoring#writeTimeseriesResponse". + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *WriteTimeseriesResponse) MarshalJSON() ([]byte, error) { + type noMethod WriteTimeseriesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "cloudmonitoring.metricDescriptors.create": + +type MetricDescriptorsCreateCall struct { + s *Service + project string + metricdescriptor *MetricDescriptor + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Create a new metric. +func (r *MetricDescriptorsService) Create(project string, metricdescriptor *MetricDescriptor) *MetricDescriptorsCreateCall { + c := &MetricDescriptorsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.metricdescriptor = metricdescriptor + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MetricDescriptorsCreateCall) Fields(s ...googleapi.Field) *MetricDescriptorsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MetricDescriptorsCreateCall) Context(ctx context.Context) *MetricDescriptorsCreateCall { + c.ctx_ = ctx + return c +} + +func (c *MetricDescriptorsCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metricdescriptor) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/metricDescriptors") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "cloudmonitoring.metricDescriptors.create" call. +// Exactly one of *MetricDescriptor or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *MetricDescriptor.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MetricDescriptorsCreateCall) Do(opts ...googleapi.CallOption) (*MetricDescriptor, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &MetricDescriptor{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Create a new metric.", + // "httpMethod": "POST", + // "id": "cloudmonitoring.metricDescriptors.create", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "The project id. The value can be the numeric project ID or string-based project name.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/metricDescriptors", + // "request": { + // "$ref": "MetricDescriptor" + // }, + // "response": { + // "$ref": "MetricDescriptor" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring" + // ] + // } + +} + +// method id "cloudmonitoring.metricDescriptors.delete": + +type MetricDescriptorsDeleteCall struct { + s *Service + project string + metric string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Delete an existing metric. +func (r *MetricDescriptorsService) Delete(project string, metric string) *MetricDescriptorsDeleteCall { + c := &MetricDescriptorsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.metric = metric + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MetricDescriptorsDeleteCall) Fields(s ...googleapi.Field) *MetricDescriptorsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MetricDescriptorsDeleteCall) Context(ctx context.Context) *MetricDescriptorsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *MetricDescriptorsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/metricDescriptors/{metric}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "metric": c.metric, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "cloudmonitoring.metricDescriptors.delete" call. +// Exactly one of *DeleteMetricDescriptorResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *DeleteMetricDescriptorResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MetricDescriptorsDeleteCall) Do(opts ...googleapi.CallOption) (*DeleteMetricDescriptorResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DeleteMetricDescriptorResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Delete an existing metric.", + // "httpMethod": "DELETE", + // "id": "cloudmonitoring.metricDescriptors.delete", + // "parameterOrder": [ + // "project", + // "metric" + // ], + // "parameters": { + // "metric": { + // "description": "Name of the metric.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "The project ID to which the metric belongs.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/metricDescriptors/{metric}", + // "response": { + // "$ref": "DeleteMetricDescriptorResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring" + // ] + // } + +} + +// method id "cloudmonitoring.metricDescriptors.list": + +type MetricDescriptorsListCall struct { + s *Service + project string + listmetricdescriptorsrequest *ListMetricDescriptorsRequest + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: List metric descriptors that match the query. If the query is +// not set, then all of the metric descriptors will be returned. Large +// responses will be paginated, use the nextPageToken returned in the +// response to request subsequent pages of results by setting the +// pageToken query parameter to the value of the nextPageToken. +func (r *MetricDescriptorsService) List(project string, listmetricdescriptorsrequest *ListMetricDescriptorsRequest) *MetricDescriptorsListCall { + c := &MetricDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.listmetricdescriptorsrequest = listmetricdescriptorsrequest + return c +} + +// Count sets the optional parameter "count": Maximum number of metric +// descriptors per page. Used for pagination. If not specified, count = +// 100. +func (c *MetricDescriptorsListCall) Count(count int64) *MetricDescriptorsListCall { + c.urlParams_.Set("count", fmt.Sprint(count)) + return c +} + +// PageToken sets the optional parameter "pageToken": The pagination +// token, which is used to page through large result sets. Set this +// value to the value of the nextPageToken to retrieve the next page of +// results. +func (c *MetricDescriptorsListCall) PageToken(pageToken string) *MetricDescriptorsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Query sets the optional parameter "query": The query used to search +// against existing metrics. Separate keywords with a space; the service +// joins all keywords with AND, meaning that all keywords must match for +// a metric to be returned. If this field is omitted, all metrics are +// returned. If an empty string is passed with this field, no metrics +// are returned. +func (c *MetricDescriptorsListCall) Query(query string) *MetricDescriptorsListCall { + c.urlParams_.Set("query", query) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MetricDescriptorsListCall) Fields(s ...googleapi.Field) *MetricDescriptorsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MetricDescriptorsListCall) IfNoneMatch(entityTag string) *MetricDescriptorsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MetricDescriptorsListCall) Context(ctx context.Context) *MetricDescriptorsListCall { + c.ctx_ = ctx + return c +} + +func (c *MetricDescriptorsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/metricDescriptors") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "cloudmonitoring.metricDescriptors.list" call. +// Exactly one of *ListMetricDescriptorsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListMetricDescriptorsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MetricDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMetricDescriptorsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListMetricDescriptorsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List metric descriptors that match the query. If the query is not set, then all of the metric descriptors will be returned. Large responses will be paginated, use the nextPageToken returned in the response to request subsequent pages of results by setting the pageToken query parameter to the value of the nextPageToken.", + // "httpMethod": "GET", + // "id": "cloudmonitoring.metricDescriptors.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "count": { + // "default": "100", + // "description": "Maximum number of metric descriptors per page. Used for pagination. If not specified, count = 100.", + // "format": "int32", + // "location": "query", + // "maximum": "1000", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The pagination token, which is used to page through large result sets. Set this value to the value of the nextPageToken to retrieve the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "The project id. The value can be the numeric project ID or string-based project name.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "query": { + // "description": "The query used to search against existing metrics. Separate keywords with a space; the service joins all keywords with AND, meaning that all keywords must match for a metric to be returned. If this field is omitted, all metrics are returned. If an empty string is passed with this field, no metrics are returned.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/metricDescriptors", + // "request": { + // "$ref": "ListMetricDescriptorsRequest" + // }, + // "response": { + // "$ref": "ListMetricDescriptorsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MetricDescriptorsListCall) Pages(ctx context.Context, f func(*ListMetricDescriptorsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "cloudmonitoring.timeseries.list": + +type TimeseriesListCall struct { + s *Service + project string + metric string + listtimeseriesrequest *ListTimeseriesRequest + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: List the data points of the time series that match the metric +// and labels values and that have data points in the interval. Large +// responses are paginated; use the nextPageToken returned in the +// response to request subsequent pages of results by setting the +// pageToken query parameter to the value of the nextPageToken. +func (r *TimeseriesService) List(project string, metric string, youngest string, listtimeseriesrequest *ListTimeseriesRequest) *TimeseriesListCall { + c := &TimeseriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.metric = metric + c.urlParams_.Set("youngest", youngest) + c.listtimeseriesrequest = listtimeseriesrequest + return c +} + +// Aggregator sets the optional parameter "aggregator": The aggregation +// function that will reduce the data points in each window to a single +// point. This parameter is only valid for non-cumulative metrics with a +// value type of INT64 or DOUBLE. +// +// Possible values: +// "max" +// "mean" +// "min" +// "sum" +func (c *TimeseriesListCall) Aggregator(aggregator string) *TimeseriesListCall { + c.urlParams_.Set("aggregator", aggregator) + return c +} + +// Count sets the optional parameter "count": Maximum number of data +// points per page, which is used for pagination of results. +func (c *TimeseriesListCall) Count(count int64) *TimeseriesListCall { + c.urlParams_.Set("count", fmt.Sprint(count)) + return c +} + +// Labels sets the optional parameter "labels": A collection of labels +// for the matching time series, which are represented as: +// - key==value: key equals the value +// - key=~value: key regex matches the value +// - key!=value: key does not equal the value +// - key!~value: key regex does not match the value For example, to +// list all of the time series descriptors for the region us-central1, +// you could +// specify: +// label=cloud.googleapis.com%2Flocation=~us-central1.* +func (c *TimeseriesListCall) Labels(labels ...string) *TimeseriesListCall { + c.urlParams_.SetMulti("labels", append([]string{}, labels...)) + return c +} + +// Oldest sets the optional parameter "oldest": Start of the time +// interval (exclusive), which is expressed as an RFC 3339 timestamp. If +// neither oldest nor timespan is specified, the default time interval +// will be (youngest - 4 hours, youngest] +func (c *TimeseriesListCall) Oldest(oldest string) *TimeseriesListCall { + c.urlParams_.Set("oldest", oldest) + return c +} + +// PageToken sets the optional parameter "pageToken": The pagination +// token, which is used to page through large result sets. Set this +// value to the value of the nextPageToken to retrieve the next page of +// results. +func (c *TimeseriesListCall) PageToken(pageToken string) *TimeseriesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Timespan sets the optional parameter "timespan": Length of the time +// interval to query, which is an alternative way to declare the +// interval: (youngest - timespan, youngest]. The timespan and oldest +// parameters should not be used together. Units: +// - s: second +// - m: minute +// - h: hour +// - d: day +// - w: week Examples: 2s, 3m, 4w. Only one unit is allowed, for +// example: 2w3d is not allowed; you should use 17d instead. +// +// If neither oldest nor timespan is specified, the default time +// interval will be (youngest - 4 hours, youngest]. +func (c *TimeseriesListCall) Timespan(timespan string) *TimeseriesListCall { + c.urlParams_.Set("timespan", timespan) + return c +} + +// Window sets the optional parameter "window": The sampling window. At +// most one data point will be returned for each window in the requested +// time interval. This parameter is only valid for non-cumulative metric +// types. Units: +// - m: minute +// - h: hour +// - d: day +// - w: week Examples: 3m, 4w. Only one unit is allowed, for example: +// 2w3d is not allowed; you should use 17d instead. +func (c *TimeseriesListCall) Window(window string) *TimeseriesListCall { + c.urlParams_.Set("window", window) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TimeseriesListCall) Fields(s ...googleapi.Field) *TimeseriesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TimeseriesListCall) IfNoneMatch(entityTag string) *TimeseriesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TimeseriesListCall) Context(ctx context.Context) *TimeseriesListCall { + c.ctx_ = ctx + return c +} + +func (c *TimeseriesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/timeseries/{metric}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "metric": c.metric, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "cloudmonitoring.timeseries.list" call. +// Exactly one of *ListTimeseriesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListTimeseriesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TimeseriesListCall) Do(opts ...googleapi.CallOption) (*ListTimeseriesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListTimeseriesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List the data points of the time series that match the metric and labels values and that have data points in the interval. Large responses are paginated; use the nextPageToken returned in the response to request subsequent pages of results by setting the pageToken query parameter to the value of the nextPageToken.", + // "httpMethod": "GET", + // "id": "cloudmonitoring.timeseries.list", + // "parameterOrder": [ + // "project", + // "metric", + // "youngest" + // ], + // "parameters": { + // "aggregator": { + // "description": "The aggregation function that will reduce the data points in each window to a single point. This parameter is only valid for non-cumulative metrics with a value type of INT64 or DOUBLE.", + // "enum": [ + // "max", + // "mean", + // "min", + // "sum" + // ], + // "enumDescriptions": [ + // "", + // "", + // "", + // "" + // ], + // "location": "query", + // "type": "string" + // }, + // "count": { + // "default": "6000", + // "description": "Maximum number of data points per page, which is used for pagination of results.", + // "format": "int32", + // "location": "query", + // "maximum": "12000", + // "minimum": "1", + // "type": "integer" + // }, + // "labels": { + // "description": "A collection of labels for the matching time series, which are represented as: \n- key==value: key equals the value \n- key=~value: key regex matches the value \n- key!=value: key does not equal the value \n- key!~value: key regex does not match the value For example, to list all of the time series descriptors for the region us-central1, you could specify:\nlabel=cloud.googleapis.com%2Flocation=~us-central1.*", + // "location": "query", + // "pattern": "(.+?)(==|=~|!=|!~)(.+)", + // "repeated": true, + // "type": "string" + // }, + // "metric": { + // "description": "Metric names are protocol-free URLs as listed in the Supported Metrics page. For example, compute.googleapis.com/instance/disk/read_ops_count.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "oldest": { + // "description": "Start of the time interval (exclusive), which is expressed as an RFC 3339 timestamp. If neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest]", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "The pagination token, which is used to page through large result sets. Set this value to the value of the nextPageToken to retrieve the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "The project ID to which this time series belongs. The value can be the numeric project ID or string-based project name.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "timespan": { + // "description": "Length of the time interval to query, which is an alternative way to declare the interval: (youngest - timespan, youngest]. The timespan and oldest parameters should not be used together. Units: \n- s: second \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 2s, 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.\n\nIf neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest].", + // "location": "query", + // "pattern": "[0-9]+[smhdw]?", + // "type": "string" + // }, + // "window": { + // "description": "The sampling window. At most one data point will be returned for each window in the requested time interval. This parameter is only valid for non-cumulative metric types. Units: \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.", + // "location": "query", + // "pattern": "[0-9]+[mhdw]?", + // "type": "string" + // }, + // "youngest": { + // "description": "End of the time interval (inclusive), which is expressed as an RFC 3339 timestamp.", + // "location": "query", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/timeseries/{metric}", + // "request": { + // "$ref": "ListTimeseriesRequest" + // }, + // "response": { + // "$ref": "ListTimeseriesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TimeseriesListCall) Pages(ctx context.Context, f func(*ListTimeseriesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "cloudmonitoring.timeseries.write": + +type TimeseriesWriteCall struct { + s *Service + project string + writetimeseriesrequest *WriteTimeseriesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Write: Put data points to one or more time series for one or more +// metrics. If a time series does not exist, a new time series will be +// created. It is not allowed to write a time series point that is older +// than the existing youngest point of that time series. Points that are +// older than the existing youngest point of that time series will be +// discarded silently. Therefore, users should make sure that points of +// a time series are written sequentially in the order of their end +// time. +func (r *TimeseriesService) Write(project string, writetimeseriesrequest *WriteTimeseriesRequest) *TimeseriesWriteCall { + c := &TimeseriesWriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.writetimeseriesrequest = writetimeseriesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TimeseriesWriteCall) Fields(s ...googleapi.Field) *TimeseriesWriteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TimeseriesWriteCall) Context(ctx context.Context) *TimeseriesWriteCall { + c.ctx_ = ctx + return c +} + +func (c *TimeseriesWriteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.writetimeseriesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/timeseries:write") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "cloudmonitoring.timeseries.write" call. +// Exactly one of *WriteTimeseriesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *WriteTimeseriesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TimeseriesWriteCall) Do(opts ...googleapi.CallOption) (*WriteTimeseriesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &WriteTimeseriesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Put data points to one or more time series for one or more metrics. If a time series does not exist, a new time series will be created. It is not allowed to write a time series point that is older than the existing youngest point of that time series. Points that are older than the existing youngest point of that time series will be discarded silently. Therefore, users should make sure that points of a time series are written sequentially in the order of their end time.", + // "httpMethod": "POST", + // "id": "cloudmonitoring.timeseries.write", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "The project ID. The value can be the numeric project ID or string-based project name.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/timeseries:write", + // "request": { + // "$ref": "WriteTimeseriesRequest" + // }, + // "response": { + // "$ref": "WriteTimeseriesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring" + // ] + // } + +} + +// method id "cloudmonitoring.timeseriesDescriptors.list": + +type TimeseriesDescriptorsListCall struct { + s *Service + project string + metric string + listtimeseriesdescriptorsrequest *ListTimeseriesDescriptorsRequest + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: List the descriptors of the time series that match the metric +// and labels values and that have data points in the interval. Large +// responses are paginated; use the nextPageToken returned in the +// response to request subsequent pages of results by setting the +// pageToken query parameter to the value of the nextPageToken. +func (r *TimeseriesDescriptorsService) List(project string, metric string, youngest string, listtimeseriesdescriptorsrequest *ListTimeseriesDescriptorsRequest) *TimeseriesDescriptorsListCall { + c := &TimeseriesDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.metric = metric + c.urlParams_.Set("youngest", youngest) + c.listtimeseriesdescriptorsrequest = listtimeseriesdescriptorsrequest + return c +} + +// Aggregator sets the optional parameter "aggregator": The aggregation +// function that will reduce the data points in each window to a single +// point. This parameter is only valid for non-cumulative metrics with a +// value type of INT64 or DOUBLE. +// +// Possible values: +// "max" +// "mean" +// "min" +// "sum" +func (c *TimeseriesDescriptorsListCall) Aggregator(aggregator string) *TimeseriesDescriptorsListCall { + c.urlParams_.Set("aggregator", aggregator) + return c +} + +// Count sets the optional parameter "count": Maximum number of time +// series descriptors per page. Used for pagination. If not specified, +// count = 100. +func (c *TimeseriesDescriptorsListCall) Count(count int64) *TimeseriesDescriptorsListCall { + c.urlParams_.Set("count", fmt.Sprint(count)) + return c +} + +// Labels sets the optional parameter "labels": A collection of labels +// for the matching time series, which are represented as: +// - key==value: key equals the value +// - key=~value: key regex matches the value +// - key!=value: key does not equal the value +// - key!~value: key regex does not match the value For example, to +// list all of the time series descriptors for the region us-central1, +// you could +// specify: +// label=cloud.googleapis.com%2Flocation=~us-central1.* +func (c *TimeseriesDescriptorsListCall) Labels(labels ...string) *TimeseriesDescriptorsListCall { + c.urlParams_.SetMulti("labels", append([]string{}, labels...)) + return c +} + +// Oldest sets the optional parameter "oldest": Start of the time +// interval (exclusive), which is expressed as an RFC 3339 timestamp. If +// neither oldest nor timespan is specified, the default time interval +// will be (youngest - 4 hours, youngest] +func (c *TimeseriesDescriptorsListCall) Oldest(oldest string) *TimeseriesDescriptorsListCall { + c.urlParams_.Set("oldest", oldest) + return c +} + +// PageToken sets the optional parameter "pageToken": The pagination +// token, which is used to page through large result sets. Set this +// value to the value of the nextPageToken to retrieve the next page of +// results. +func (c *TimeseriesDescriptorsListCall) PageToken(pageToken string) *TimeseriesDescriptorsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Timespan sets the optional parameter "timespan": Length of the time +// interval to query, which is an alternative way to declare the +// interval: (youngest - timespan, youngest]. The timespan and oldest +// parameters should not be used together. Units: +// - s: second +// - m: minute +// - h: hour +// - d: day +// - w: week Examples: 2s, 3m, 4w. Only one unit is allowed, for +// example: 2w3d is not allowed; you should use 17d instead. +// +// If neither oldest nor timespan is specified, the default time +// interval will be (youngest - 4 hours, youngest]. +func (c *TimeseriesDescriptorsListCall) Timespan(timespan string) *TimeseriesDescriptorsListCall { + c.urlParams_.Set("timespan", timespan) + return c +} + +// Window sets the optional parameter "window": The sampling window. At +// most one data point will be returned for each window in the requested +// time interval. This parameter is only valid for non-cumulative metric +// types. Units: +// - m: minute +// - h: hour +// - d: day +// - w: week Examples: 3m, 4w. Only one unit is allowed, for example: +// 2w3d is not allowed; you should use 17d instead. +func (c *TimeseriesDescriptorsListCall) Window(window string) *TimeseriesDescriptorsListCall { + c.urlParams_.Set("window", window) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TimeseriesDescriptorsListCall) Fields(s ...googleapi.Field) *TimeseriesDescriptorsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TimeseriesDescriptorsListCall) IfNoneMatch(entityTag string) *TimeseriesDescriptorsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TimeseriesDescriptorsListCall) Context(ctx context.Context) *TimeseriesDescriptorsListCall { + c.ctx_ = ctx + return c +} + +func (c *TimeseriesDescriptorsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/timeseriesDescriptors/{metric}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "metric": c.metric, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "cloudmonitoring.timeseriesDescriptors.list" call. +// Exactly one of *ListTimeseriesDescriptorsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListTimeseriesDescriptorsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *TimeseriesDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListTimeseriesDescriptorsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListTimeseriesDescriptorsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "List the descriptors of the time series that match the metric and labels values and that have data points in the interval. Large responses are paginated; use the nextPageToken returned in the response to request subsequent pages of results by setting the pageToken query parameter to the value of the nextPageToken.", + // "httpMethod": "GET", + // "id": "cloudmonitoring.timeseriesDescriptors.list", + // "parameterOrder": [ + // "project", + // "metric", + // "youngest" + // ], + // "parameters": { + // "aggregator": { + // "description": "The aggregation function that will reduce the data points in each window to a single point. This parameter is only valid for non-cumulative metrics with a value type of INT64 or DOUBLE.", + // "enum": [ + // "max", + // "mean", + // "min", + // "sum" + // ], + // "enumDescriptions": [ + // "", + // "", + // "", + // "" + // ], + // "location": "query", + // "type": "string" + // }, + // "count": { + // "default": "100", + // "description": "Maximum number of time series descriptors per page. Used for pagination. If not specified, count = 100.", + // "format": "int32", + // "location": "query", + // "maximum": "1000", + // "minimum": "1", + // "type": "integer" + // }, + // "labels": { + // "description": "A collection of labels for the matching time series, which are represented as: \n- key==value: key equals the value \n- key=~value: key regex matches the value \n- key!=value: key does not equal the value \n- key!~value: key regex does not match the value For example, to list all of the time series descriptors for the region us-central1, you could specify:\nlabel=cloud.googleapis.com%2Flocation=~us-central1.*", + // "location": "query", + // "pattern": "(.+?)(==|=~|!=|!~)(.+)", + // "repeated": true, + // "type": "string" + // }, + // "metric": { + // "description": "Metric names are protocol-free URLs as listed in the Supported Metrics page. For example, compute.googleapis.com/instance/disk/read_ops_count.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "oldest": { + // "description": "Start of the time interval (exclusive), which is expressed as an RFC 3339 timestamp. If neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest]", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "The pagination token, which is used to page through large result sets. Set this value to the value of the nextPageToken to retrieve the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "The project ID to which this time series belongs. The value can be the numeric project ID or string-based project name.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "timespan": { + // "description": "Length of the time interval to query, which is an alternative way to declare the interval: (youngest - timespan, youngest]. The timespan and oldest parameters should not be used together. Units: \n- s: second \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 2s, 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.\n\nIf neither oldest nor timespan is specified, the default time interval will be (youngest - 4 hours, youngest].", + // "location": "query", + // "pattern": "[0-9]+[smhdw]?", + // "type": "string" + // }, + // "window": { + // "description": "The sampling window. At most one data point will be returned for each window in the requested time interval. This parameter is only valid for non-cumulative metric types. Units: \n- m: minute \n- h: hour \n- d: day \n- w: week Examples: 3m, 4w. Only one unit is allowed, for example: 2w3d is not allowed; you should use 17d instead.", + // "location": "query", + // "pattern": "[0-9]+[mhdw]?", + // "type": "string" + // }, + // "youngest": { + // "description": "End of the time interval (inclusive), which is expressed as an RFC 3339 timestamp.", + // "location": "query", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/timeseriesDescriptors/{metric}", + // "request": { + // "$ref": "ListTimeseriesDescriptorsRequest" + // }, + // "response": { + // "$ref": "ListTimeseriesDescriptorsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TimeseriesDescriptorsListCall) Pages(ctx context.Context, f func(*ListTimeseriesDescriptorsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/api/compute/v1/compute-api.json b/Godeps/_workspace/src/google.golang.org/api/compute/v1/compute-api.json new file mode 100644 index 000000000000..b9e0b1ac50cd --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/compute/v1/compute-api.json @@ -0,0 +1,14569 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"jQLIOHBVnDZie4rQHGH1WJF-INE/-kKJM_jdN_4N4POlnVybNFH0Kag\"", + "discoveryVersion": "v1", + "id": "compute:v1", + "name": "compute", + "version": "v1", + "revision": "20160426", + "title": "Compute Engine API", + "description": "Creates and runs virtual machines on Google Cloud Platform.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", + "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" + }, + "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/compute/v1/projects/", + "basePath": "/compute/v1/projects/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "compute/v1/projects/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/compute": { + "description": "View and manage your Google Compute Engine resources" + }, + "https://www.googleapis.com/auth/compute.readonly": { + "description": "View your Google Compute Engine resources" + }, + "https://www.googleapis.com/auth/devstorage.full_control": { + "description": "Manage your data and permissions in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_only": { + "description": "View your data in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_write": { + "description": "Manage your data in Google Cloud Storage" + } + } + } + }, + "schemas": { + "AccessConfig": { + "id": "AccessConfig", + "type": "object", + "description": "An access configuration attached to an instance's network interface.", + "properties": { + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#accessConfig for access configs.", + "default": "compute#accessConfig" + }, + "name": { + "type": "string", + "description": "Name of this access configuration." + }, + "natIP": { + "type": "string", + "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance." + }, + "type": { + "type": "string", + "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", + "default": "ONE_TO_ONE_NAT", + "enum": [ + "ONE_TO_ONE_NAT" + ], + "enumDescriptions": [ + "" + ] + } + } + }, + "Address": { + "id": "Address", + "type": "object", + "description": "A reserved address resource.", + "properties": { + "address": { + "type": "string", + "description": "The static external IP address represented by this resource." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#address for addresses.", + "default": "compute#address" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.addresses.insert" + ] + } + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the regional address resides. This field is not applicable to global addresses." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the address, which can be either IN_USE or RESERVED. An address that is RESERVED is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", + "enum": [ + "IN_USE", + "RESERVED" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "users": { + "type": "array", + "description": "[Output Only] The URLs of the resources that are using this address.", + "items": { + "type": "string" + } + } + } + }, + "AddressAggregatedList": { + "id": "AddressAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped address lists.", + "additionalProperties": { + "$ref": "AddressesScopedList", + "description": "[Output Only] Name of the scope containing this set of addresses." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#addressAggregatedList for aggregated lists of addresses.", + "default": "compute#addressAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "AddressList": { + "id": "AddressList", + "type": "object", + "description": "Contains a list of addresses.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of addresses.", + "items": { + "$ref": "Address" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#addressList for lists of addresses.", + "default": "compute#addressList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "AddressesScopedList": { + "id": "AddressesScopedList", + "type": "object", + "properties": { + "addresses": { + "type": "array", + "description": "[Output Only] List of addresses contained in this scope.", + "items": { + "$ref": "Address" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "AttachedDisk": { + "id": "AttachedDisk", + "type": "object", + "description": "An instance-attached disk resource.", + "properties": { + "autoDelete": { + "type": "boolean", + "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance)." + }, + "boot": { + "type": "boolean", + "description": "Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem." + }, + "deviceName": { + "type": "string", + "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks." + }, + "index": { + "type": "integer", + "description": "Assigns a zero-based index to this disk, where 0 is reserved for the boot disk. For example, if you have many disks attached to an instance, each disk would have a unique index number. If not specified, the server will choose an appropriate value.", + "format": "int32" + }, + "initializeParams": { + "$ref": "AttachedDiskInitializeParams", + "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance.\n\nThis property is mutually exclusive with the source property; you can only define one or the other, but not both." + }, + "interface": { + "type": "string", + "description": "Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. Persistent disks must always use SCSI and the request will fail if you attempt to attach a persistent disk in any other format than SCSI. Local SSDs can use either NVME or SCSI. For performance characteristics of SCSI over NVMe, see Local SSD performance.", + "enum": [ + "NVME", + "SCSI" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#attachedDisk for attached disks.", + "default": "compute#attachedDisk" + }, + "licenses": { + "type": "array", + "description": "[Output Only] Any valid publicly visible licenses.", + "items": { + "type": "string" + } + }, + "mode": { + "type": "string", + "description": "The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.", + "enum": [ + "READ_ONLY", + "READ_WRITE" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "source": { + "type": "string", + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. This field is only applicable for persistent disks." + }, + "type": { + "type": "string", + "description": "Specifies the type of the disk, either SCRATCH or PERSISTENT. If not specified, the default is PERSISTENT.", + "enum": [ + "PERSISTENT", + "SCRATCH" + ], + "enumDescriptions": [ + "", + "" + ], + "annotations": { + "required": [ + "compute.instances.insert" + ] + } + } + } + }, + "AttachedDiskInitializeParams": { + "id": "AttachedDiskInitializeParams", + "type": "object", + "description": "[Input Only] Specifies the parameters for a new disk that will be created alongside the new instance. Use initialization parameters to create boot disks or local SSDs attached to the new instance.\n\nThis property is mutually exclusive with the source property; you can only define one or the other, but not both.", + "properties": { + "diskName": { + "type": "string", + "description": "Specifies the disk name. If not specified, the default is to use the name of the instance." + }, + "diskSizeGb": { + "type": "string", + "description": "Specifies the size of the disk in base-2 GB.", + "format": "int64" + }, + "diskType": { + "type": "string", + "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example:\n\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/pd-standard \n\nOther values include pd-ssd and local-ssd. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType \n- projects/project/zones/zone/diskTypes/diskType \n- zones/zone/diskTypes/diskType" + }, + "sourceImage": { + "type": "string", + "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family" + } + } + }, + "Autoscaler": { + "id": "Autoscaler", + "type": "object", + "description": "Represents an Autoscaler resource. Autoscalers allow you to automatically scale virtual machine instances in managed instance groups according to an autoscaling policy that you define. For more information, read Autoscaling Groups of Instances.", + "properties": { + "autoscalingPolicy": { + "$ref": "AutoscalingPolicy", + "description": "The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization.\n\nIf none of these are specified, the default will be to autoscale based on cpuUtilization to 0.8 or 80%." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#autoscaler for autoscalers.", + "default": "compute#autoscaler" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.instanceGroups.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "target": { + "type": "string", + "description": "URL of the managed instance group that this autoscaler will scale." + }, + "zone": { + "type": "string", + "description": "[Output Only] URL of the zone where the instance group resides." + } + } + }, + "AutoscalerAggregatedList": { + "id": "AutoscalerAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "A map of scoped autoscaler lists.", + "additionalProperties": { + "$ref": "AutoscalersScopedList", + "description": "[Output Only] Name of the scope containing this set of autoscalers." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#autoscalerAggregatedList for aggregated lists of autoscalers.", + "default": "compute#autoscalerAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "AutoscalerList": { + "id": "AutoscalerList", + "type": "object", + "description": "Contains a list of Autoscaler resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of Autoscaler resources.", + "items": { + "$ref": "Autoscaler" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#autoscalerList for lists of autoscalers.", + "default": "compute#autoscalerList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "AutoscalersScopedList": { + "id": "AutoscalersScopedList", + "type": "object", + "properties": { + "autoscalers": { + "type": "array", + "description": "[Output Only] List of autoscalers contained in this scope.", + "items": { + "$ref": "Autoscaler" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of autoscalers when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "AutoscalingPolicy": { + "id": "AutoscalingPolicy", + "type": "object", + "description": "Cloud Autoscaler policy.", + "properties": { + "coolDownPeriodSec": { + "type": "integer", + "description": "The number of seconds that the autoscaler should wait before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds.\n\nVirtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.", + "format": "int32" + }, + "cpuUtilization": { + "$ref": "AutoscalingPolicyCpuUtilization", + "description": "Defines the CPU utilization policy that allows the autoscaler to scale based on the average CPU utilization of a managed instance group." + }, + "customMetricUtilizations": { + "type": "array", + "description": "Configuration parameters of autoscaling based on a custom metric.", + "items": { + "$ref": "AutoscalingPolicyCustomMetricUtilization" + } + }, + "loadBalancingUtilization": { + "$ref": "AutoscalingPolicyLoadBalancingUtilization", + "description": "Configuration parameters of autoscaling based on load balancer." + }, + "maxNumReplicas": { + "type": "integer", + "description": "The maximum number of instances that the autoscaler can scale up to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas.", + "format": "int32" + }, + "minNumReplicas": { + "type": "integer", + "description": "The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.", + "format": "int32" + } + } + }, + "AutoscalingPolicyCpuUtilization": { + "id": "AutoscalingPolicyCpuUtilization", + "type": "object", + "description": "CPU utilization policy.", + "properties": { + "utilizationTarget": { + "type": "number", + "description": "The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.8.\n\nIf the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization.\n\nIf the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.", + "format": "double" + } + } + }, + "AutoscalingPolicyCustomMetricUtilization": { + "id": "AutoscalingPolicyCustomMetricUtilization", + "type": "object", + "description": "Custom utilization metric policy.", + "properties": { + "metric": { + "type": "string", + "description": "The identifier of the Cloud Monitoring metric. The metric cannot have negative values and should be a utilization metric, which means that the number of virtual machines handling requests should increase or decrease proportionally to the metric. The metric must also have a label of compute.googleapis.com/resource_id with the value of the instance's unique ID, although this alone does not guarantee that the metric is valid.\n\nFor example, the following is a valid metric:\ncompute.googleapis.com/instance/network/received_bytes_count\n\n\nThe following is not a valid metric because it does not increase or decrease based on usage:\ncompute.googleapis.com/instance/cpu/reserved_cores" + }, + "utilizationTarget": { + "type": "number", + "description": "Target value of the metric which autoscaler should maintain. Must be a positive value.", + "format": "double" + }, + "utilizationTargetType": { + "type": "string", + "description": "Defines how target utilization value is expressed for a Cloud Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or DELTA_PER_MINUTE. If not specified, the default is GAUGE.", + "enum": [ + "DELTA_PER_MINUTE", + "DELTA_PER_SECOND", + "GAUGE" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } + } + }, + "AutoscalingPolicyLoadBalancingUtilization": { + "id": "AutoscalingPolicyLoadBalancingUtilization", + "type": "object", + "description": "Configuration parameters of autoscaling based on load balancing.", + "properties": { + "utilizationTarget": { + "type": "number", + "description": "Fraction of backend capacity utilization (set in HTTP(s) load balancing configuration) that autoscaler should maintain. Must be a positive float value. If not defined, the default is 0.8.", + "format": "double" + } + } + }, + "Backend": { + "id": "Backend", + "type": "object", + "description": "Message containing information of one individual backend.", + "properties": { + "balancingMode": { + "type": "string", + "description": "Specifies the balancing mode for this backend. For global HTTP(S) load balancing, the default is UTILIZATION. Valid values are UTILIZATION and RATE.", + "enum": [ + "RATE", + "UTILIZATION" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "capacityScaler": { + "type": "number", + "description": "A multiplier applied to the group's maximum servicing capacity (either UTILIZATION or RATE). Default value is 1, which means the group will serve up to 100% of its configured CPU or RPS (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available CPU or RPS. Valid range is [0.0,1.0].", + "format": "float" + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "group": { + "type": "string", + "description": "The fully-qualified URL of a zonal Instance Group resource. This instance group defines the list of instances that serve traffic. Member virtual machine instances from each instance group must live in the same zone as the instance group itself. No two backends in a backend service are allowed to use same Instance Group resource.\n\nNote that you must specify an Instance Group resource using the fully-qualified URL, rather than a partial URL." + }, + "maxRate": { + "type": "integer", + "description": "The max requests per second (RPS) of the group. Can be used with either RATE or UTILIZATION balancing modes, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.", + "format": "int32" + }, + "maxRatePerInstance": { + "type": "number", + "description": "The max requests per second (RPS) that a single backend instance can handle.This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.", + "format": "float" + }, + "maxUtilization": { + "type": "number", + "description": "Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization target for the group. The default is 0.8. Valid range is [0.0, 1.0].", + "format": "float" + } + } + }, + "BackendService": { + "id": "BackendService", + "type": "object", + "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity.", + "properties": { + "backends": { + "type": "array", + "description": "The list of backends that serve this BackendService.", + "items": { + "$ref": "Backend" + } + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "fingerprint": { + "type": "string", + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService.", + "format": "byte" + }, + "healthChecks": { + "type": "array", + "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required.", + "items": { + "type": "string" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#backendService for backend services.", + "default": "compute#backendService" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "port": { + "type": "integer", + "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.", + "format": "int32" + }, + "portName": { + "type": "string", + "description": "Name of backend port. The same name should appear in the instance groups referenced by this service. Required." + }, + "protocol": { + "type": "string", + "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP and SSL.", + "enum": [ + "HTTP", + "HTTPS" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the regional backend service resides. This field is not applicable to global backend services." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "timeoutSec": { + "type": "integer", + "description": "How many seconds to wait for the backend before considering it a failed request. Default is 30 seconds.", + "format": "int32" + } + } + }, + "BackendServiceGroupHealth": { + "id": "BackendServiceGroupHealth", + "type": "object", + "properties": { + "healthStatus": { + "type": "array", + "items": { + "$ref": "HealthStatus" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#backendServiceGroupHealth for the health of backend services.", + "default": "compute#backendServiceGroupHealth" + } + } + }, + "BackendServiceList": { + "id": "BackendServiceList", + "type": "object", + "description": "Contains a list of BackendService resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of BackendService resources.", + "items": { + "$ref": "BackendService" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#backendServiceList for lists of backend services.", + "default": "compute#backendServiceList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "DeprecationStatus": { + "id": "DeprecationStatus", + "type": "object", + "description": "Deprecation status for a public resource.", + "properties": { + "deleted": { + "type": "string", + "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to DELETED." + }, + "deprecated": { + "type": "string", + "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to DEPRECATED." + }, + "obsolete": { + "type": "string", + "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to OBSOLETE." + }, + "replacement": { + "type": "string", + "description": "The URL of the suggested replacement for a deprecated resource. The suggested replacement resource must be the same kind of resource as the deprecated resource." + }, + "state": { + "type": "string", + "description": "The deprecation state of this resource. This can be DEPRECATED, OBSOLETE, or DELETED. Operations which create a new resource using a DEPRECATED resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. Operations which use OBSOLETE or DELETED resources will be rejected and result in an error.", + "enum": [ + "DELETED", + "DEPRECATED", + "OBSOLETE" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } + } + }, + "Disk": { + "id": "Disk", + "type": "object", + "description": "A Disk resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#disk for disks.", + "default": "compute#disk" + }, + "lastAttachTimestamp": { + "type": "string", + "description": "[Output Only] Last attach timestamp in RFC3339 text format." + }, + "lastDetachTimestamp": { + "type": "string", + "description": "[Output Only] Last detach timestamp in RFC3339 text format." + }, + "licenses": { + "type": "array", + "description": "[Output Only] Any applicable publicly visible licenses.", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.disks.insert" + ] + } + }, + "options": { + "type": "string", + "description": "Internal use only." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined fully-qualified URL for this resource." + }, + "sizeGb": { + "type": "string", + "description": "Size of the persistent disk, specified in GB. You can specify this field when creating a persistent disk using the sourceImage or sourceSnapshot parameter, or specify it alone to create an empty persistent disk.\n\nIf you specify this field along with sourceImage or sourceSnapshot, the value of sizeGb must not be less than the size of the sourceImage or the size of the snapshot.", + "format": "int64" + }, + "sourceImage": { + "type": "string", + "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family" + }, + "sourceImageId": { + "type": "string", + "description": "[Output Only] The ID value of the image used to create this disk. This value identifies the exact image that was used to create this persistent disk. For example, if you created the persistent disk from an image that was later deleted and recreated under the same name, the source image ID would identify the exact version of the image that was used." + }, + "sourceSnapshot": { + "type": "string", + "description": "The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot \n- projects/project/global/snapshots/snapshot \n- global/snapshots/snapshot" + }, + "sourceSnapshotId": { + "type": "string", + "description": "[Output Only] The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of disk creation. Applicable statuses includes: CREATING, FAILED, READY, RESTORING.", + "enum": [ + "CREATING", + "FAILED", + "READY", + "RESTORING" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "type": { + "type": "string", + "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk." + }, + "users": { + "type": "array", + "description": "[Output Only] Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance", + "items": { + "type": "string" + } + }, + "zone": { + "type": "string", + "description": "[Output Only] URL of the zone where the disk resides." + } + } + }, + "DiskAggregatedList": { + "id": "DiskAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped disk lists.", + "additionalProperties": { + "$ref": "DisksScopedList", + "description": "[Output Only] Name of the scope containing this set of disks." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#diskAggregatedList for aggregated lists of persistent disks.", + "default": "compute#diskAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "DiskList": { + "id": "DiskList", + "type": "object", + "description": "A list of Disk resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of persistent disks.", + "items": { + "$ref": "Disk" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#diskList for lists of disks.", + "default": "compute#diskList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "DiskMoveRequest": { + "id": "DiskMoveRequest", + "type": "object", + "properties": { + "destinationZone": { + "type": "string", + "description": "The URL of the destination zone to move the disk. This can be a full or partial URL. For example, the following are all valid URLs to a zone: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone \n- projects/project/zones/zone \n- zones/zone" + }, + "targetDisk": { + "type": "string", + "description": "The URL of the target disk to move. This can be a full or partial URL. For example, the following are all valid URLs to a disk: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk" + } + } + }, + "DiskType": { + "id": "DiskType", + "type": "object", + "description": "A DiskType resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "defaultDiskSizeGb": { + "type": "string", + "description": "[Output Only] Server-defined default disk size in GB.", + "format": "int64" + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this disk type." + }, + "description": { + "type": "string", + "description": "[Output Only] An optional description of this resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#diskType for disk types.", + "default": "compute#diskType" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "validDiskSize": { + "type": "string", + "description": "[Output Only] An optional textual description of the valid disk size, such as \"10GB-10TB\"." + }, + "zone": { + "type": "string", + "description": "[Output Only] URL of the zone where the disk type resides." + } + } + }, + "DiskTypeAggregatedList": { + "id": "DiskTypeAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped disk type lists.", + "additionalProperties": { + "$ref": "DiskTypesScopedList", + "description": "[Output Only] Name of the scope containing this set of disk types." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#diskTypeAggregatedList.", + "default": "compute#diskTypeAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "DiskTypeList": { + "id": "DiskTypeList", + "type": "object", + "description": "Contains a list of disk types.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Disk Type resources.", + "items": { + "$ref": "DiskType" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#diskTypeList for disk types.", + "default": "compute#diskTypeList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "DiskTypesScopedList": { + "id": "DiskTypesScopedList", + "type": "object", + "properties": { + "diskTypes": { + "type": "array", + "description": "[Output Only] List of disk types contained in this scope.", + "items": { + "$ref": "DiskType" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of disk types when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "DisksResizeRequest": { + "id": "DisksResizeRequest", + "type": "object", + "properties": { + "sizeGb": { + "type": "string", + "description": "The new size of the persistent disk, which is specified in GB.", + "format": "int64" + } + } + }, + "DisksScopedList": { + "id": "DisksScopedList", + "type": "object", + "properties": { + "disks": { + "type": "array", + "description": "[Output Only] List of disks contained in this scope.", + "items": { + "$ref": "Disk" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of disks when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "Firewall": { + "id": "Firewall", + "type": "object", + "description": "Represents a Firewall resource.", + "properties": { + "allowed": { + "type": "array", + "description": "The list of rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", + "items": { + "type": "object", + "properties": { + "IPProtocol": { + "type": "string", + "description": "The IP protocol that is allowed for this rule. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP protocol number." + }, + "ports": { + "type": "array", + "description": "An optional list of ports which are allowed. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, connections through any port are allowed\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "items": { + "type": "string" + } + } + } + } + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Ony] Type of the resource. Always compute#firewall for firewall rules.", + "default": "compute#firewall" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.firewalls.insert", + "compute.firewalls.patch" + ] + } + }, + "network": { + "type": "string", + "description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used:\nglobal/networks/default\nIf you choose to specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network \n- projects/myproject/global/networks/my-network \n- global/networks/default" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sourceRanges": { + "type": "array", + "description": "The IP address blocks that this rule applies to, expressed in CIDR format. One or both of sourceRanges and sourceTags may be set.\n\nIf both properties are set, an inbound connection is allowed if the range matches the sourceRanges OR the tag of the source matches the sourceTags property. The connection does not need to match both properties.", + "items": { + "type": "string" + } + }, + "sourceTags": { + "type": "array", + "description": "A list of instance tags which this rule applies to. One or both of sourceRanges and sourceTags may be set.\n\nIf both properties are set, an inbound connection is allowed if the range matches the sourceRanges OR the tag of the source matches the sourceTags property. The connection does not need to match both properties.", + "items": { + "type": "string" + } + }, + "targetTags": { + "type": "array", + "description": "A list of instance tags indicating sets of instances located in the network that may make network connections as specified in allowed[]. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", + "items": { + "type": "string" + } + } + } + }, + "FirewallList": { + "id": "FirewallList", + "type": "object", + "description": "Contains a list of firewalls.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Firewall resources.", + "items": { + "$ref": "Firewall" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#firewallList for lists of firewalls.", + "default": "compute#firewallList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "ForwardingRule": { + "id": "ForwardingRule", + "type": "object", + "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol, portRange] tuple.", + "properties": { + "IPAddress": { + "type": "string", + "description": "Value of the reserved IP address that this forwarding rule is serving on behalf of. For global forwarding rules, the address must be a global IP; for regional forwarding rules, the address must live in the same region as the forwarding rule. If left empty (default value), an ephemeral IP from the same scope (global or regional) will be assigned." + }, + "IPProtocol": { + "type": "string", + "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.", + "enum": [ + "AH", + "ESP", + "SCTP", + "TCP", + "UDP" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", + "default": "compute#forwardingRule" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "portRange": { + "type": "string", + "description": "Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges." + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the regional forwarding rule resides. This field is not applicable to global forwarding rules." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "target": { + "type": "string", + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global TargetHttpProxy or TargetHttpsProxy resource. The forwarded traffic must be of a type appropriate to the target object. For example, TargetHttpProxy requires HTTP traffic, and TargetHttpsProxy requires HTTPS traffic." + } + } + }, + "ForwardingRuleAggregatedList": { + "id": "ForwardingRuleAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "A map of scoped forwarding rule lists.", + "additionalProperties": { + "$ref": "ForwardingRulesScopedList", + "description": "Name of the scope containing this set of addresses." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#forwardingRuleAggregatedList for lists of forwarding rules.", + "default": "compute#forwardingRuleAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "ForwardingRuleList": { + "id": "ForwardingRuleList", + "type": "object", + "description": "Contains a list of ForwardingRule resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Set by the server." + }, + "items": { + "type": "array", + "description": "A list of ForwardingRule resources.", + "items": { + "$ref": "ForwardingRule" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#forwardingRuleList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "ForwardingRulesScopedList": { + "id": "ForwardingRulesScopedList", + "type": "object", + "properties": { + "forwardingRules": { + "type": "array", + "description": "List of forwarding rules contained in this scope.", + "items": { + "$ref": "ForwardingRule" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of forwarding rules when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "HealthCheckReference": { + "id": "HealthCheckReference", + "type": "object", + "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check \n- projects/project-id/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", + "properties": { + "healthCheck": { + "type": "string" + } + } + }, + "HealthStatus": { + "id": "HealthStatus", + "type": "object", + "properties": { + "healthState": { + "type": "string", + "description": "Health state of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "instance": { + "type": "string", + "description": "URL of the instance resource." + }, + "ipAddress": { + "type": "string", + "description": "The IP address represented by this resource." + }, + "port": { + "type": "integer", + "description": "The port on the instance.", + "format": "int32" + } + } + }, + "HostRule": { + "id": "HostRule", + "type": "object", + "description": "UrlMaps A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService.", + "properties": { + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "hosts": { + "type": "array", + "description": "The list of host patterns to match. They must be valid hostnames, except * will match any string of ([a-z0-9-.]*). In that case, * must be the first character and must be followed in the pattern by either - or ..", + "items": { + "type": "string" + } + }, + "pathMatcher": { + "type": "string", + "description": "The name of the PathMatcher to use to match the path portion of the URL if the hostRule matches the URL's host portion." + } + } + }, + "HttpHealthCheck": { + "id": "HttpHealthCheck", + "type": "object", + "description": "An HttpHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTP.", + "properties": { + "checkIntervalSec": { + "type": "integer", + "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", + "format": "int32" + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "healthyThreshold": { + "type": "integer", + "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", + "format": "int32" + }, + "host": { + "type": "string", + "description": "The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#httpHealthCheck for HTTP health checks.", + "default": "compute#httpHealthCheck" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "port": { + "type": "integer", + "description": "The TCP port number for the HTTP health check request. The default value is 80.", + "format": "int32" + }, + "requestPath": { + "type": "string", + "description": "The request path of the HTTP health check request. The default value is /." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "timeoutSec": { + "type": "integer", + "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.", + "format": "int32" + }, + "unhealthyThreshold": { + "type": "integer", + "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", + "format": "int32" + } + } + }, + "HttpHealthCheckList": { + "id": "HttpHealthCheckList", + "type": "object", + "description": "Contains a list of HttpHealthCheck resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Defined by the server." + }, + "items": { + "type": "array", + "description": "A list of HttpHealthCheck resources.", + "items": { + "$ref": "HttpHealthCheck" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#httpHealthCheckList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "HttpsHealthCheck": { + "id": "HttpsHealthCheck", + "type": "object", + "description": "An HttpsHealthCheck resource. This resource defines a template for how individual instances should be checked for health, via HTTPS.", + "properties": { + "checkIntervalSec": { + "type": "integer", + "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", + "format": "int32" + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "healthyThreshold": { + "type": "integer", + "description": "A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.", + "format": "int32" + }, + "host": { + "type": "string", + "description": "The value of the host header in the HTTPS health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#httpsHealthCheck" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "port": { + "type": "integer", + "description": "The TCP port number for the HTTPS health check request. The default value is 443.", + "format": "int32" + }, + "requestPath": { + "type": "string", + "description": "The request path of the HTTPS health check request. The default value is \"/\"." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "timeoutSec": { + "type": "integer", + "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have a greater value than checkIntervalSec.", + "format": "int32" + }, + "unhealthyThreshold": { + "type": "integer", + "description": "A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.", + "format": "int32" + } + } + }, + "HttpsHealthCheckList": { + "id": "HttpsHealthCheckList", + "type": "object", + "description": "Contains a list of HttpsHealthCheck resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of HttpsHealthCheck resources.", + "items": { + "$ref": "HttpsHealthCheck" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#httpsHealthCheckList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "Image": { + "id": "Image", + "type": "object", + "description": "An Image resource.", + "properties": { + "archiveSizeBytes": { + "type": "string", + "description": "Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).", + "format": "int64" + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "The deprecation status associated with this image." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "diskSizeGb": { + "type": "string", + "description": "Size of the image when restored onto a persistent disk (in GB).", + "format": "int64" + }, + "family": { + "type": "string", + "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#image for images.", + "default": "compute#image" + }, + "licenses": { + "type": "array", + "description": "Any applicable publicly visible licenses.", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.images.insert" + ] + } + }, + "rawDisk": { + "type": "object", + "description": "The parameters of the raw disk image.", + "properties": { + "containerType": { + "type": "string", + "description": "The format used to encode and transmit the block device, which should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created.", + "enum": [ + "TAR" + ], + "enumDescriptions": [ + "" + ] + }, + "sha1Checksum": { + "type": "string", + "description": "An optional SHA1 checksum of the disk image before unpackaging; provided by the client when the disk image is created.", + "pattern": "[a-f0-9]{40}" + }, + "source": { + "type": "string", + "description": "The full Google Cloud Storage URL where the disk image is stored. You must provide either this property or the sourceDisk property but not both.", + "annotations": { + "required": [ + "compute.images.insert" + ] + } + } + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sourceDisk": { + "type": "string", + "description": "URL of the The source disk used to create this image. This can be a full or valid partial URL. You must provide either this property or the rawDisk.source property but not both to create an image. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disk/disk \n- projects/project/zones/zone/disk/disk \n- zones/zone/disks/disk" + }, + "sourceDiskId": { + "type": "string", + "description": "The ID value of the disk used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given disk name." + }, + "sourceType": { + "type": "string", + "description": "The type of the image used to create this disk. The default and only value is RAW", + "default": "RAW", + "enum": [ + "RAW" + ], + "enumDescriptions": [ + "" + ] + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the image. An image can be used to create other resources, such as instances, only after the image has been successfully created and the status is set to READY. Possible values are FAILED, PENDING, or READY.", + "enum": [ + "FAILED", + "PENDING", + "READY" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } + } + }, + "ImageList": { + "id": "ImageList", + "type": "object", + "description": "Contains a list of images.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Image resources.", + "items": { + "$ref": "Image" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#imageList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "Instance": { + "id": "Instance", + "type": "object", + "description": "An Instance resource.", + "properties": { + "canIpForward": { + "type": "boolean", + "description": "Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. For more information, see Enabling IP Forwarding." + }, + "cpuPlatform": { + "type": "string", + "description": "[Output Only] The CPU platform used by this instance." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "disks": { + "type": "array", + "description": "Array of disks associated with this instance. Persistent disks must be created before you can assign them.", + "items": { + "$ref": "AttachedDisk" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#instance for instances.", + "default": "compute#instance" + }, + "machineType": { + "type": "string", + "description": "Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type. This is provided by the client when the instance is created. For example, the following is a valid partial url to a predefined machine type:\n\nzones/us-central1-f/machineTypes/n1-standard-1 \n\nTo create a custom machine type, provide a URL to a machine type in the following format, where CPUS is 1 or an even number up to 32 (2, 4, 6, ... 24, etc), and MEMORY is the total memory for this instance. Memory must be a multiple of 256 MB and must be supplied in MB (e.g. 5 GB of memory is 5120 MB):\n\nzones/zone/machineTypes/custom-CPUS-MEMORY \n\nFor example: zones/us-central1-f/machineTypes/custom-4-5120 \n\nFor a full list of restrictions, read the Specifications for custom machine types.", + "annotations": { + "required": [ + "compute.instances.insert" + ] + } + }, + "metadata": { + "$ref": "Metadata", + "description": "The metadata key/value pairs assigned to this instance. This includes custom metadata and predefined keys." + }, + "name": { + "type": "string", + "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash." + }, + "networkInterfaces": { + "type": "array", + "description": "An array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet.", + "items": { + "$ref": "NetworkInterface" + } + }, + "scheduling": { + "$ref": "Scheduling", + "description": "Scheduling options for this instance." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "serviceAccounts": { + "type": "array", + "description": "A list of service accounts, with their specified scopes, authorized for this instance. Service accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Authenticating from Google Compute Engine for more information.", + "items": { + "$ref": "ServiceAccount" + } + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, and TERMINATED.", + "enum": [ + "PROVISIONING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "statusMessage": { + "type": "string", + "description": "[Output Only] An optional, human-readable explanation of the status." + }, + "tags": { + "$ref": "Tags", + "description": "A list of tags to apply to this instance. Tags are used to identify valid sources or targets for network firewalls and are specified by the client during instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035." + }, + "zone": { + "type": "string", + "description": "[Output Only] URL of the zone where the instance resides." + } + } + }, + "InstanceAggregatedList": { + "id": "InstanceAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped instance lists.", + "additionalProperties": { + "$ref": "InstancesScopedList", + "description": "[Output Only] Name of the scope containing this set of instances." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#instanceAggregatedList for aggregated lists of Instance resources.", + "default": "compute#instanceAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "InstanceGroup": { + "id": "InstanceGroup", + "type": "object", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] The creation timestamp for this instance group in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "fingerprint": { + "type": "string", + "description": "[Output Only] The fingerprint of the named ports. The system uses this fingerprint to detect conflicts when multiple users change the named ports concurrently.", + "format": "byte" + }, + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroup for instance groups.", + "default": "compute#instanceGroup" + }, + "name": { + "type": "string", + "description": "The name of the instance group. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert" + ] + } + }, + "namedPorts": { + "type": "array", + "description": "Assigns a name to a port number. For example: {name: \"http\", port: 80}\n\nThis allows the system to reference ports by the assigned name instead of a port number. Named ports can also contain multiple ports. For example: [{name: \"http\", port: 80},{name: \"http\", port: 8080}] \n\nNamed ports apply to all instances in this instance group.", + "items": { + "$ref": "NamedPort" + } + }, + "network": { + "type": "string", + "description": "The URL of the network to which all instances in the instance group belong." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this instance group. The server generates this URL." + }, + "size": { + "type": "integer", + "description": "[Output Only] The total number of instances in the instance group.", + "format": "int32" + }, + "subnetwork": { + "type": "string", + "description": "The URL of the subnetwork to which all instances in the instance group belong." + }, + "zone": { + "type": "string", + "description": "[Output Only] The URL of the zone where the instance group is located." + } + } + }, + "InstanceGroupAggregatedList": { + "id": "InstanceGroupAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this aggregated list of instance groups. The server generates this identifier." + }, + "items": { + "type": "object", + "description": "A map of scoped instance group lists.", + "additionalProperties": { + "$ref": "InstanceGroupsScopedList", + "description": "The name of the scope that contains this set of instance groups." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroupAggregatedList for aggregated lists of instance groups.", + "default": "compute#instanceGroupAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this resource type. The server generates this URL." + } + } + }, + "InstanceGroupList": { + "id": "InstanceGroupList", + "type": "object", + "description": "A list of InstanceGroup resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this list of instance groups. The server generates this identifier." + }, + "items": { + "type": "array", + "description": "A list of instance groups.", + "items": { + "$ref": "InstanceGroup" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroupList for instance group lists.", + "default": "compute#instanceGroupList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this resource type. The server generates this URL." + } + } + }, + "InstanceGroupManager": { + "id": "InstanceGroupManager", + "type": "object", + "properties": { + "baseInstanceName": { + "type": "string", + "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", + "pattern": "[a-z][-a-z0-9]{0,57}", + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert" + ] + } + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] The creation timestamp for this managed instance group in RFC3339 text format." + }, + "currentActions": { + "$ref": "InstanceGroupManagerActionsSummary", + "description": "[Output Only] The list of instance actions and the number of instances in this managed instance group that are scheduled for each of those actions." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "fingerprint": { + "type": "string", + "description": "[Output Only] The fingerprint of the resource data. You can use this optional field for optimistic locking when you update the resource.", + "format": "byte" + }, + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier.", + "format": "uint64" + }, + "instanceGroup": { + "type": "string", + "description": "[Output Only] The URL of the Instance Group resource." + }, + "instanceTemplate": { + "type": "string", + "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group." + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManager for managed instance groups.", + "default": "compute#instanceGroupManager" + }, + "name": { + "type": "string", + "description": "The name of the managed instance group. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert" + ] + } + }, + "namedPorts": { + "type": "array", + "description": "Named ports configured for the Instance Groups complementary to this Instance Group Manager.", + "items": { + "$ref": "NamedPort" + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this managed instance group. The server defines this URL." + }, + "targetPools": { + "type": "array", + "description": "The URLs for all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", + "items": { + "type": "string" + } + }, + "targetSize": { + "type": "integer", + "description": "The target number of running instances for this managed instance group. Deleting or abandoning instances reduces this number. Resizing the group changes this number.", + "format": "int32", + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert" + ] + } + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located." + } + } + }, + "InstanceGroupManagerActionsSummary": { + "id": "InstanceGroupManagerActionsSummary", + "type": "object", + "properties": { + "abandoning": { + "type": "integer", + "description": "[Output Only] The total number of instances in the managed instance group that are scheduled to be abandoned. Abandoning an instance removes it from the managed instance group without deleting it.", + "format": "int32" + }, + "creating": { + "type": "integer", + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be created or are currently being created. If the group fails to create one of these instances, it tries again until it creates the instance successfully.", + "format": "int32" + }, + "deleting": { + "type": "integer", + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be deleted or are currently being deleted.", + "format": "int32" + }, + "none": { + "type": "integer", + "description": "[Output Only] The number of instances in the managed instance group that are running and have no scheduled actions.", + "format": "int32" + }, + "recreating": { + "type": "integer", + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be recreated or are currently being being recreated. Recreating an instance deletes the existing root persistent disk and creates a new disk from the image that is defined in the instance template.", + "format": "int32" + }, + "refreshing": { + "type": "integer", + "description": "[Output Only] The number of instances in the managed instance group that are being reconfigured with properties that do not require a restart or a recreate action. For example, setting or removing target pools for the instance.", + "format": "int32" + }, + "restarting": { + "type": "integer", + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be restarted or are currently being restarted.", + "format": "int32" + } + } + }, + "InstanceGroupManagerAggregatedList": { + "id": "InstanceGroupManagerAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this aggregated list of managed instance groups. The server generates this identifier." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of filtered managed instance group lists.", + "additionalProperties": { + "$ref": "InstanceGroupManagersScopedList", + "description": "[Output Only] The name of the scope that contains this set of managed instance groups." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerAggregatedList for an aggregated list of managed instance groups.", + "default": "compute#instanceGroupManagerAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this resource type. The server generates this URL." + } + } + }, + "InstanceGroupManagerList": { + "id": "InstanceGroupManagerList", + "type": "object", + "description": "[Output Only] A list of managed instance groups.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of managed instance groups.", + "items": { + "$ref": "InstanceGroupManager" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups.", + "default": "compute#instanceGroupManagerList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this resource type. The server generates this URL." + } + } + }, + "InstanceGroupManagersAbandonInstancesRequest": { + "id": "InstanceGroupManagersAbandonInstancesRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The URL for one or more instances to abandon from the managed instance group.", + "items": { + "type": "string" + } + } + } + }, + "InstanceGroupManagersDeleteInstancesRequest": { + "id": "InstanceGroupManagersDeleteInstancesRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The list of instances to delete from this managed instance group. Specify one or more instance URLs.", + "items": { + "type": "string" + } + } + } + }, + "InstanceGroupManagersListManagedInstancesResponse": { + "id": "InstanceGroupManagersListManagedInstancesResponse", + "type": "object", + "properties": { + "managedInstances": { + "type": "array", + "description": "[Output Only] The list of instances in the managed instance group.", + "items": { + "$ref": "ManagedInstance" + } + } + } + }, + "InstanceGroupManagersRecreateInstancesRequest": { + "id": "InstanceGroupManagersRecreateInstancesRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The URL for one or more instances to recreate.", + "items": { + "type": "string" + } + } + } + }, + "InstanceGroupManagersScopedList": { + "id": "InstanceGroupManagersScopedList", + "type": "object", + "properties": { + "instanceGroupManagers": { + "type": "array", + "description": "[Output Only] The list of managed instance groups that are contained in the specified project and zone.", + "items": { + "$ref": "InstanceGroupManager" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] The warning that replaces the list of managed instance groups when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InstanceGroupManagersSetInstanceTemplateRequest": { + "id": "InstanceGroupManagersSetInstanceTemplateRequest", + "type": "object", + "properties": { + "instanceTemplate": { + "type": "string", + "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group." + } + } + }, + "InstanceGroupManagersSetTargetPoolsRequest": { + "id": "InstanceGroupManagersSetTargetPoolsRequest", + "type": "object", + "properties": { + "fingerprint": { + "type": "string", + "description": "The fingerprint of the target pools information. Use this optional property to prevent conflicts when multiple users change the target pools settings concurrently. Obtain the fingerprint with the instanceGroupManagers.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", + "format": "byte" + }, + "targetPools": { + "type": "array", + "description": "The list of target pool URLs that instances in this managed instance group belong to. The managed instance group applies these target pools to all of the instances in the group. Existing instances and new instances in the group all receive these target pool settings.", + "items": { + "type": "string" + } + } + } + }, + "InstanceGroupsAddInstancesRequest": { + "id": "InstanceGroupsAddInstancesRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The list of instances to add to the instance group.", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "InstanceGroupsListInstances": { + "id": "InstanceGroupsListInstances", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this list of instance groups. The server generates this identifier." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of instances and any named ports that are assigned to those instances.", + "items": { + "$ref": "InstanceWithNamedPorts" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroupsListInstances for lists of instance groups.", + "default": "compute#instanceGroupsListInstances" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this list of instance groups. The server generates this URL." + } + } + }, + "InstanceGroupsListInstancesRequest": { + "id": "InstanceGroupsListInstancesRequest", + "type": "object", + "properties": { + "instanceState": { + "type": "string", + "description": "A filter for the state of the instances in the instance group. Valid options are ALL or RUNNING. If you do not specify this parameter the list includes all instances regardless of their state.", + "enum": [ + "ALL", + "RUNNING" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "InstanceGroupsRemoveInstancesRequest": { + "id": "InstanceGroupsRemoveInstancesRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The list of instances to remove from the instance group.", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "InstanceGroupsScopedList": { + "id": "InstanceGroupsScopedList", + "type": "object", + "properties": { + "instanceGroups": { + "type": "array", + "description": "[Output Only] The list of instance groups that are contained in this scope.", + "items": { + "$ref": "InstanceGroup" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] An informational warning that replaces the list of instance groups when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InstanceGroupsSetNamedPortsRequest": { + "id": "InstanceGroupsSetNamedPortsRequest", + "type": "object", + "properties": { + "fingerprint": { + "type": "string", + "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", + "format": "byte" + }, + "namedPorts": { + "type": "array", + "description": "The list of named ports to set for this instance group.", + "items": { + "$ref": "NamedPort" + } + } + } + }, + "InstanceList": { + "id": "InstanceList", + "type": "object", + "description": "Contains a list of instances.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of instances.", + "items": { + "$ref": "Instance" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#instanceList for lists of Instance resources.", + "default": "compute#instanceList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "InstanceMoveRequest": { + "id": "InstanceMoveRequest", + "type": "object", + "properties": { + "destinationZone": { + "type": "string", + "description": "The URL of the destination zone to move the instance. This can be a full or partial URL. For example, the following are all valid URLs to a zone: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone \n- projects/project/zones/zone \n- zones/zone" + }, + "targetInstance": { + "type": "string", + "description": "The URL of the target instance to move. This can be a full or partial URL. For example, the following are all valid URLs to an instance: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance \n- zones/zone/instances/instance" + } + } + }, + "InstanceProperties": { + "id": "InstanceProperties", + "type": "object", + "description": "", + "properties": { + "canIpForward": { + "type": "boolean", + "description": "Enables instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the canIpForward documentation for more information." + }, + "description": { + "type": "string", + "description": "An optional text description for the instances that are created from this instance template." + }, + "disks": { + "type": "array", + "description": "An array of disks that are associated with the instances that are created from this template.", + "items": { + "$ref": "AttachedDisk" + } + }, + "machineType": { + "type": "string", + "description": "The machine type to use for instances that are created from this template.", + "annotations": { + "required": [ + "compute.instanceTemplates.insert" + ] + } + }, + "metadata": { + "$ref": "Metadata", + "description": "The metadata key/value pairs to assign to instances that are created from this template. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." + }, + "networkInterfaces": { + "type": "array", + "description": "An array of network access configurations for this interface.", + "items": { + "$ref": "NetworkInterface" + } + }, + "scheduling": { + "$ref": "Scheduling", + "description": "Specifies the scheduling options for the instances that are created from this template." + }, + "serviceAccounts": { + "type": "array", + "description": "A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from this template. Use metadata queries to obtain the access tokens for these instances.", + "items": { + "$ref": "ServiceAccount" + } + }, + "tags": { + "$ref": "Tags", + "description": "A list of tags to apply to the instances that are created from this template. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." + } + } + }, + "InstanceReference": { + "id": "InstanceReference", + "type": "object", + "properties": { + "instance": { + "type": "string", + "description": "The URL for a specific instance." + } + } + }, + "InstanceTemplate": { + "id": "InstanceTemplate", + "type": "object", + "description": "An Instance Template resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] The creation timestamp for this instance template in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this instance template. The server defines this identifier.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceTemplate for instance templates.", + "default": "compute#instanceTemplate" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.instanceTemplates.insert" + ] + } + }, + "properties": { + "$ref": "InstanceProperties", + "description": "The instance properties for this instance template." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this instance template. The server defines this URL." + } + } + }, + "InstanceTemplateList": { + "id": "InstanceTemplateList", + "type": "object", + "description": "A list of instance templates.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this instance template. The server defines this identifier." + }, + "items": { + "type": "array", + "description": "[Output Only] list of InstanceTemplate resources.", + "items": { + "$ref": "InstanceTemplate" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceTemplatesListResponse for instance template lists.", + "default": "compute#instanceTemplateList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] The URL for this instance template list. The server defines this URL." + } + } + }, + "InstanceWithNamedPorts": { + "id": "InstanceWithNamedPorts", + "type": "object", + "properties": { + "instance": { + "type": "string", + "description": "[Output Only] The URL of the instance." + }, + "namedPorts": { + "type": "array", + "description": "[Output Only] The named ports that belong to this instance group.", + "items": { + "$ref": "NamedPort" + } + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the instance.", + "enum": [ + "PROVISIONING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "" + ] + } + } + }, + "InstancesScopedList": { + "id": "InstancesScopedList", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "[Output Only] List of instances contained in this scope.", + "items": { + "$ref": "Instance" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of instances when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InstancesSetMachineTypeRequest": { + "id": "InstancesSetMachineTypeRequest", + "type": "object", + "properties": { + "machineType": { + "type": "string", + "description": "Full or partial URL of the machine type resource. See Machine Types for a full list of machine types. For example: zones/us-central1-f/machineTypes/n1-standard-1" + } + } + }, + "License": { + "id": "License", + "type": "object", + "description": "A license resource.", + "properties": { + "chargesUseFee": { + "type": "boolean", + "description": "[Output Only] If true, the customer will be charged license fee for running software that contains this license on an instance." + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#license for licenses.", + "default": "compute#license" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource. The name is 1-63 characters long and complies with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.images.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "MachineType": { + "id": "MachineType", + "type": "object", + "description": "A Machine Type resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this machine type." + }, + "description": { + "type": "string", + "description": "[Output Only] An optional textual description of the resource." + }, + "guestCpus": { + "type": "integer", + "description": "[Output Only] The number of virtual CPUs that are available to the instance.", + "format": "int32" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "imageSpaceGb": { + "type": "integer", + "description": "[Deprecated] This property is deprecated and will never be populated with any relevant values.", + "format": "int32" + }, + "kind": { + "type": "string", + "description": "[Output Only] The type of the resource. Always compute#machineType for machine types.", + "default": "compute#machineType" + }, + "maximumPersistentDisks": { + "type": "integer", + "description": "[Output Only] Maximum persistent disks allowed.", + "format": "int32" + }, + "maximumPersistentDisksSizeGb": { + "type": "string", + "description": "[Output Only] Maximum total persistent disks size (GB) allowed.", + "format": "int64" + }, + "memoryMb": { + "type": "integer", + "description": "[Output Only] The amount of physical memory available to the instance, defined in MB.", + "format": "int32" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "scratchDisks": { + "type": "array", + "description": "[Output Only] List of extended scratch disks assigned to the instance.", + "items": { + "type": "object", + "properties": { + "diskGb": { + "type": "integer", + "description": "Size of the scratch disk, defined in GB.", + "format": "int32" + } + } + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "zone": { + "type": "string", + "description": "[Output Only] The name of the zone where the machine type resides, such as us-central1-a." + } + } + }, + "MachineTypeAggregatedList": { + "id": "MachineTypeAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped machine type lists.", + "additionalProperties": { + "$ref": "MachineTypesScopedList", + "description": "[Output Only] Name of the scope containing this set of machine types." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#machineTypeAggregatedList for aggregated lists of machine types.", + "default": "compute#machineTypeAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "MachineTypeList": { + "id": "MachineTypeList", + "type": "object", + "description": "Contains a list of machine types.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Machine Type resources.", + "items": { + "$ref": "MachineType" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#machineTypeList for lists of machine types.", + "default": "compute#machineTypeList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "MachineTypesScopedList": { + "id": "MachineTypesScopedList", + "type": "object", + "properties": { + "machineTypes": { + "type": "array", + "description": "[Output Only] List of machine types contained in this scope.", + "items": { + "$ref": "MachineType" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] An informational warning that appears when the machine types list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "ManagedInstance": { + "id": "ManagedInstance", + "type": "object", + "properties": { + "currentAction": { + "type": "string", + "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: \n- NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. \n- CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. \n- CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's target_size value is decreased. \n- RECREATING The managed instance group is recreating this instance. \n- DELETING The managed instance group is permanently deleting this instance. \n- ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. \n- RESTARTING The managed instance group is restarting the instance. \n- REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance.", + "enum": [ + "ABANDONING", + "CREATING", + "DELETING", + "NONE", + "RECREATING", + "REFRESHING", + "RESTARTING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ] + }, + "id": { + "type": "string", + "description": "[Output only] The unique identifier for this resource. This field is empty when instance does not exist.", + "format": "uint64" + }, + "instance": { + "type": "string", + "description": "[Output Only] The URL of the instance. The URL can exist even if the instance has not yet been created." + }, + "instanceStatus": { + "type": "string", + "description": "[Output Only] The status of the instance. This field is empty when the instance does not exist.", + "enum": [ + "PROVISIONING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "lastAttempt": { + "$ref": "ManagedInstanceLastAttempt", + "description": "[Output Only] Information about the last attempt to create or delete the instance." + } + } + }, + "ManagedInstanceLastAttempt": { + "id": "ManagedInstanceLastAttempt", + "type": "object", + "properties": { + "errors": { + "type": "object", + "description": "[Output Only] Encountered errors during the last attempt to create or delete the instance.", + "properties": { + "errors": { + "type": "array", + "description": "[Output Only] The array of errors encountered while processing this operation.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] The error type identifier for this error." + }, + "location": { + "type": "string", + "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional." + }, + "message": { + "type": "string", + "description": "[Output Only] An optional, human-readable error message." + } + } + } + } + } + } + } + }, + "Metadata": { + "id": "Metadata", + "type": "object", + "description": "A metadata key/value entry.", + "properties": { + "fingerprint": { + "type": "string", + "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.", + "format": "byte" + }, + "items": { + "type": "array", + "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.", + "pattern": "[a-zA-Z0-9-_]{1,128}", + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + } + }, + "value": { + "type": "string", + "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 32768 bytes.", + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + } + } + } + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#metadata for metadata.", + "default": "compute#metadata" + } + } + }, + "NamedPort": { + "id": "NamedPort", + "type": "object", + "description": "The named port. For example: .", + "properties": { + "name": { + "type": "string", + "description": "The name for this named port. The name must be 1-63 characters long, and comply with RFC1035." + }, + "port": { + "type": "integer", + "description": "The port number, which can be a value between 1 and 65535.", + "format": "int32" + } + } + }, + "Network": { + "id": "Network", + "type": "object", + "description": "Represents a Network resource. Read Networks and Firewalls for more information.", + "properties": { + "IPv4Range": { + "type": "string", + "description": "The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}" + }, + "autoCreateSubnetworks": { + "type": "boolean", + "description": "When set to true, the network is created in \"auto subnet mode\". When set to false, the network is in \"custom subnet mode\".\n\nIn \"auto subnet mode\", a newly created network is assigned the default CIDR of 10.128.0.0/9 and it automatically creates one subnetwork per region." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "gatewayIPv4": { + "type": "string", + "description": "A gateway address for default routing to other networks. This value is read only and is selected by the Google Compute Engine, typically as the first usable address in the IPv4Range.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#network for networks.", + "default": "compute#network" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.networks.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "subnetworks": { + "type": "array", + "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this network.", + "items": { + "type": "string" + } + } + } + }, + "NetworkInterface": { + "id": "NetworkInterface", + "type": "object", + "description": "A network interface resource attached to an instance.", + "properties": { + "accessConfigs": { + "type": "array", + "description": "An array of configurations for this interface. Currently, ONE_TO_ONE_NAT is the only access config supported. If there are no accessConfigs specified, then this instance will have no external internet access.", + "items": { + "$ref": "AccessConfig" + } + }, + "name": { + "type": "string", + "description": "[Output Only] The name of the network interface, generated by the server. For network devices, these are eth0, eth1, etc." + }, + "network": { + "type": "string", + "description": "URL of the network resource for this instance. This is required for creating an instance but optional when creating a firewall rule. If not specified when creating a firewall rule, the default network is used:\n\nglobal/networks/default \n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default", + "annotations": { + "required": [ + "compute.instances.insert" + ] + } + }, + "networkIP": { + "type": "string", + "description": "An IPV4 internal network address to assign to the instance for this network interface. If not specified by user an unused internal IP is assigned by system." + }, + "subnetwork": { + "type": "string", + "description": "The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork \n- regions/region/subnetworks/subnetwork" + } + } + }, + "NetworkList": { + "id": "NetworkList", + "type": "object", + "description": "Contains a list of networks.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Network resources.", + "items": { + "$ref": "Network" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#networkList for lists of networks.", + "default": "compute#networkList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource ." + } + } + }, + "Operation": { + "id": "Operation", + "type": "object", + "description": "An Operation resource, used to manage asynchronous API requests.", + "properties": { + "clientOperationId": { + "type": "string", + "description": "[Output Only] Reserved for future use." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "[Output Only] A textual description of the operation, which is set when the operation is created." + }, + "endTime": { + "type": "string", + "description": "[Output Only] The time that this operation was completed. This value is in RFC3339 text format." + }, + "error": { + "type": "object", + "description": "[Output Only] If errors are generated during processing of the operation, this field will be populated.", + "properties": { + "errors": { + "type": "array", + "description": "[Output Only] The array of errors encountered while processing this operation.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] The error type identifier for this error." + }, + "location": { + "type": "string", + "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional." + }, + "message": { + "type": "string", + "description": "[Output Only] An optional, human-readable error message." + } + } + } + } + } + }, + "httpErrorMessage": { + "type": "string", + "description": "[Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as NOT FOUND." + }, + "httpErrorStatusCode": { + "type": "integer", + "description": "[Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a 404 means the resource was not found.", + "format": "int32" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "insertTime": { + "type": "string", + "description": "[Output Only] The time that this operation was requested. This value is in RFC3339 text format." + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#operation for Operation resources.", + "default": "compute#operation" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource." + }, + "operationType": { + "type": "string", + "description": "[Output Only] The type of operation, such as insert, update, or delete, and so on." + }, + "progress": { + "type": "integer", + "description": "[Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses.", + "format": "int32" + }, + "region": { + "type": "string", + "description": "[Output Only] The URL of the region where the operation resides. Only available when performing regional operations." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "startTime": { + "type": "string", + "description": "[Output Only] The time that this operation was started by the server. This value is in RFC3339 text format." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the operation, which can be one of the following: PENDING, RUNNING, or DONE.", + "enum": [ + "DONE", + "PENDING", + "RUNNING" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, + "statusMessage": { + "type": "string", + "description": "[Output Only] An optional textual description of the current status of the operation." + }, + "targetId": { + "type": "string", + "description": "[Output Only] The unique target ID, which identifies a specific incarnation of the target resource.", + "format": "uint64" + }, + "targetLink": { + "type": "string", + "description": "[Output Only] The URL of the resource that the operation modifies." + }, + "user": { + "type": "string", + "description": "[Output Only] User who requested the operation, for example: user@example.com." + }, + "warnings": { + "type": "array", + "description": "[Output Only] If warning messages are generated during processing of the operation, this field will be populated.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + }, + "zone": { + "type": "string", + "description": "[Output Only] The URL of the zone where the operation resides. Only available when performing per-zone operations." + } + } + }, + "OperationAggregatedList": { + "id": "OperationAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped operation lists.", + "additionalProperties": { + "$ref": "OperationsScopedList", + "description": "[Output Only] Name of the scope containing this set of operations." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#operationAggregatedList for aggregated lists of operations.", + "default": "compute#operationAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "OperationList": { + "id": "OperationList", + "type": "object", + "description": "Contains a list of Operation resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Operation resources.", + "items": { + "$ref": "Operation" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#operations for Operations resource.", + "default": "compute#operationList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "OperationsScopedList": { + "id": "OperationsScopedList", + "type": "object", + "properties": { + "operations": { + "type": "array", + "description": "[Output Only] List of operations contained in this scope.", + "items": { + "$ref": "Operation" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of operations when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "PathMatcher": { + "id": "PathMatcher", + "type": "object", + "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service will be used.", + "properties": { + "defaultService": { + "type": "string", + "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules defined by this PathMatcher is matched by the URL's path portion. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService" + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "name": { + "type": "string", + "description": "The name to which this PathMatcher is referred by the HostRule." + }, + "pathRules": { + "type": "array", + "description": "The list of path rules.", + "items": { + "$ref": "PathRule" + } + } + } + }, + "PathRule": { + "id": "PathRule", + "type": "object", + "description": "A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL.", + "properties": { + "paths": { + "type": "array", + "description": "The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here.", + "items": { + "type": "string" + } + }, + "service": { + "type": "string", + "description": "The URL of the BackendService resource if this rule is matched." + } + } + }, + "Project": { + "id": "Project", + "type": "object", + "description": "A Project resource. Projects can only be created in the Google Cloud Platform Console. Unless marked otherwise, values can only be modified in the console.", + "properties": { + "commonInstanceMetadata": { + "$ref": "Metadata", + "description": "Metadata key/value pairs available to all instances contained in this project. See Custom metadata for more information." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource." + }, + "enabledFeatures": { + "type": "array", + "description": "Restricted features enabled for use on this project.", + "items": { + "type": "string" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server. This is not the project ID, and is just a unique ID used by Compute Engine to identify resources.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#project for projects.", + "default": "compute#project" + }, + "name": { + "type": "string", + "description": "The project ID. For example: my-example-project. Use the project ID to make requests to Compute Engine." + }, + "quotas": { + "type": "array", + "description": "[Output Only] Quotas assigned to this project.", + "items": { + "$ref": "Quota" + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "usageExportLocation": { + "$ref": "UsageExportLocation", + "description": "The naming prefix for daily usage reports and the Google Cloud Storage bucket where they are stored." + } + } + }, + "Quota": { + "id": "Quota", + "type": "object", + "description": "A quotas entry.", + "properties": { + "limit": { + "type": "number", + "description": "[Output Only] Quota limit for this metric.", + "format": "double" + }, + "metric": { + "type": "string", + "description": "[Output Only] Name of the quota metric.", + "enum": [ + "AUTOSCALERS", + "BACKEND_SERVICES", + "CPUS", + "DISKS_TOTAL_GB", + "FIREWALLS", + "FORWARDING_RULES", + "HEALTH_CHECKS", + "IMAGES", + "INSTANCES", + "INSTANCE_GROUPS", + "INSTANCE_GROUP_MANAGERS", + "INSTANCE_TEMPLATES", + "IN_USE_ADDRESSES", + "LOCAL_SSD_TOTAL_GB", + "NETWORKS", + "ROUTES", + "SNAPSHOTS", + "SSD_TOTAL_GB", + "SSL_CERTIFICATES", + "STATIC_ADDRESSES", + "SUBNETWORKS", + "TARGET_HTTPS_PROXIES", + "TARGET_HTTP_PROXIES", + "TARGET_INSTANCES", + "TARGET_POOLS", + "TARGET_VPN_GATEWAYS", + "URL_MAPS", + "VPN_TUNNELS" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "usage": { + "type": "number", + "description": "[Output Only] Current usage of this metric.", + "format": "double" + } + } + }, + "Region": { + "id": "Region", + "type": "object", + "description": "Region resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this region." + }, + "description": { + "type": "string", + "description": "[Output Only] Textual description of the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#region for regions.", + "default": "compute#region" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource." + }, + "quotas": { + "type": "array", + "description": "[Output Only] Quotas assigned to this region.", + "items": { + "$ref": "Quota" + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "status": { + "type": "string", + "description": "[Output Only] Status of the region, either UP or DOWN.", + "enum": [ + "DOWN", + "UP" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "zones": { + "type": "array", + "description": "[Output Only] A list of zones available in this region, in the form of resource URLs.", + "items": { + "type": "string" + } + } + } + }, + "RegionList": { + "id": "RegionList", + "type": "object", + "description": "Contains a list of region resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Region resources.", + "items": { + "$ref": "Region" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#regionList for lists of regions.", + "default": "compute#regionList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "ResourceGroupReference": { + "id": "ResourceGroupReference", + "type": "object", + "properties": { + "group": { + "type": "string", + "description": "A URI referencing one of the resource views listed in the backend service." + } + } + }, + "Route": { + "id": "Route", + "type": "object", + "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving a instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, a instance gateway or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "destRange": { + "type": "string", + "description": "The destination range of outgoing packets that this route applies to.", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of this resource. Always compute#routes for Route resources.", + "default": "compute#route" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "network": { + "type": "string", + "description": "Fully-qualified URL of the network that this route applies to.", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "nextHopGateway": { + "type": "string", + "description": "The URL to a gateway that should handle matching packets. You can only specify the internet gateway using a full or partial valid URL: projects/\u003cproject-id\u003e/global/gateways/default-internet-gateway" + }, + "nextHopInstance": { + "type": "string", + "description": "The URL to an instance that should handle matching packets. You can specify this as a full or partial URL. For example:\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/" + }, + "nextHopIp": { + "type": "string", + "description": "The network IP address of an instance that should handle matching packets." + }, + "nextHopNetwork": { + "type": "string", + "description": "The URL of the local network if it should handle matching packets." + }, + "nextHopVpnTunnel": { + "type": "string", + "description": "The URL to a VpnTunnel that should handle matching packets." + }, + "priority": { + "type": "integer", + "description": "The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In the case of two routes with equal prefix length, the one with the lowest-numbered priority value wins. Default value is 1000. Valid range is 0 through 65535.", + "format": "uint32", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined fully-qualified URL for this resource." + }, + "tags": { + "type": "array", + "description": "A list of instance tags to which this route applies.", + "items": { + "type": "string" + }, + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "warnings": { + "type": "array", + "description": "[Output Only] If potential misconfigurations are detected for this route, this field will be populated with warning messages.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + } + }, + "RouteList": { + "id": "RouteList", + "type": "object", + "description": "Contains a list of Route resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Route resources.", + "items": { + "$ref": "Route" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#routeList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "Scheduling": { + "id": "Scheduling", + "type": "object", + "description": "Sets the scheduling options for an Instance.", + "properties": { + "automaticRestart": { + "type": "boolean", + "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted." + }, + "onHostMaintenance": { + "type": "string", + "description": "Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options.", + "enum": [ + "MIGRATE", + "TERMINATE" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "preemptible": { + "type": "boolean", + "description": "Whether the instance is preemptible." + } + } + }, + "SerialPortOutput": { + "id": "SerialPortOutput", + "type": "object", + "description": "An instance's serial console output.", + "properties": { + "contents": { + "type": "string", + "description": "[Output Only] The contents of the console output." + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#serialPortOutput for serial port output.", + "default": "compute#serialPortOutput" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "ServiceAccount": { + "id": "ServiceAccount", + "type": "object", + "description": "A service account.", + "properties": { + "email": { + "type": "string", + "description": "Email address of the service account." + }, + "scopes": { + "type": "array", + "description": "The list of scopes to be made available for this service account.", + "items": { + "type": "string" + } + } + } + }, + "Snapshot": { + "id": "Snapshot", + "type": "object", + "description": "A persistent disk snapshot resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "diskSizeGb": { + "type": "string", + "description": "[Output Only] Size of the snapshot, specified in GB.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#snapshot for Snapshot resources.", + "default": "compute#snapshot" + }, + "licenses": { + "type": "array", + "description": "[Output Only] A list of public visible licenses that apply to this snapshot. This can be because the original image had licenses attached (such as a Windows image).", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sourceDisk": { + "type": "string", + "description": "[Output Only] The source disk used to create this snapshot." + }, + "sourceDiskId": { + "type": "string", + "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the snapshot. This can be CREATING, DELETING, FAILED, READY, or UPLOADING.", + "enum": [ + "CREATING", + "DELETING", + "FAILED", + "READY", + "UPLOADING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] + }, + "storageBytes": { + "type": "string", + "description": "[Output Only] A size of the the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", + "format": "int64" + }, + "storageBytesStatus": { + "type": "string", + "description": "[Output Only] An indicator whether storageBytes is in a stable state or it is being adjusted as a result of shared storage reallocation. This status can either be UPDATING, meaning the size of the snapshot is being updated, or UP_TO_DATE, meaning the size of the snapshot is up-to-date.", + "enum": [ + "UPDATING", + "UP_TO_DATE" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "SnapshotList": { + "id": "SnapshotList", + "type": "object", + "description": "Contains a list of Snapshot resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Snapshot resources.", + "items": { + "$ref": "Snapshot" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#snapshotList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "SslCertificate": { + "id": "SslCertificate", + "type": "object", + "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user.", + "properties": { + "certificate": { + "type": "string", + "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#sslCertificate for SSL certificates.", + "default": "compute#sslCertificate" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "privateKey": { + "type": "string", + "description": "A write-only private key in PEM format. Only insert RPCs will include this field." + }, + "selfLink": { + "type": "string", + "description": "[Output only] Server-defined URL for the resource." + } + } + }, + "SslCertificateList": { + "id": "SslCertificateList", + "type": "object", + "description": "Contains a list of SslCertificate resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Defined by the server." + }, + "items": { + "type": "array", + "description": "A list of SslCertificate resources.", + "items": { + "$ref": "SslCertificate" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#sslCertificateList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "Subnetwork": { + "id": "Subnetwork", + "type": "object", + "description": "A Subnetwork resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "gatewayAddress": { + "type": "string", + "description": "[Output Only] The gateway address for default routes to reach destination addresses outside this subnetwork." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "ipCidrRange": { + "type": "string", + "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network." + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#subnetwork for Subnetwork resources.", + "default": "compute#subnetwork" + }, + "name": { + "type": "string", + "description": "The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "network": { + "type": "string", + "description": "The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. Only networks that are in the distributed mode can have subnetworks." + }, + "region": { + "type": "string", + "description": "URL of the region where the Subnetwork resides." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "SubnetworkAggregatedList": { + "id": "SubnetworkAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output] A map of scoped Subnetwork lists.", + "additionalProperties": { + "$ref": "SubnetworksScopedList", + "description": "Name of the scope containing this set of Subnetworks." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#subnetworkAggregatedList for aggregated lists of subnetworks.", + "default": "compute#subnetworkAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "SubnetworkList": { + "id": "SubnetworkList", + "type": "object", + "description": "Contains a list of Subnetwork resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "The Subnetwork resources.", + "items": { + "$ref": "Subnetwork" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#subnetworkList for lists of subnetworks.", + "default": "compute#subnetworkList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "SubnetworksScopedList": { + "id": "SubnetworksScopedList", + "type": "object", + "properties": { + "subnetworks": { + "type": "array", + "description": "List of subnetworks contained in this scope.", + "items": { + "$ref": "Subnetwork" + } + }, + "warning": { + "type": "object", + "description": "An informational warning that appears when the list of addresses is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "Tags": { + "id": "Tags", + "type": "object", + "description": "A set of instance tags.", + "properties": { + "fingerprint": { + "type": "string", + "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.\n\nTo see the latest fingerprint, make get() request to the instance.", + "format": "byte" + }, + "items": { + "type": "array", + "description": "An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.", + "items": { + "type": "string" + } + } + } + }, + "TargetHttpProxy": { + "id": "TargetHttpProxy", + "type": "object", + "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetHttpProxy for target HTTP proxies.", + "default": "compute#targetHttpProxy" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "urlMap": { + "type": "string", + "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService." + } + } + }, + "TargetHttpProxyList": { + "id": "TargetHttpProxyList", + "type": "object", + "description": "A list of TargetHttpProxy resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetHttpProxy resources.", + "items": { + "$ref": "TargetHttpProxy" + } + }, + "kind": { + "type": "string", + "description": "Type of resource. Always compute#targetHttpProxyList for lists of target HTTP proxies.", + "default": "compute#targetHttpProxyList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "TargetHttpsProxiesSetSslCertificatesRequest": { + "id": "TargetHttpsProxiesSetSslCertificatesRequest", + "type": "object", + "properties": { + "sslCertificates": { + "type": "array", + "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", + "items": { + "type": "string" + } + } + } + }, + "TargetHttpsProxy": { + "id": "TargetHttpsProxy", + "type": "object", + "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetHttpsProxy for target HTTPS proxies.", + "default": "compute#targetHttpsProxy" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sslCertificates": { + "type": "array", + "description": "URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. Currently, exactly one SSL certificate must be specified.", + "items": { + "type": "string" + } + }, + "urlMap": { + "type": "string", + "description": "A fully-qualified or valid partial URL to the UrlMap resource that defines the mapping from URL to the BackendService. For example, the following are all valid URLs for specifying a URL map: \n- https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map \n- projects/project/global/urlMaps/url-map \n- global/urlMaps/url-map" + } + } + }, + "TargetHttpsProxyList": { + "id": "TargetHttpsProxyList", + "type": "object", + "description": "Contains a list of TargetHttpsProxy resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetHttpsProxy resources.", + "items": { + "$ref": "TargetHttpsProxy" + } + }, + "kind": { + "type": "string", + "description": "Type of resource. Always compute#targetHttpsProxyList for lists of target HTTPS proxies.", + "default": "compute#targetHttpsProxyList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "TargetInstance": { + "id": "TargetInstance", + "type": "object", + "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "instance": { + "type": "string", + "description": "A URL to the virtual machine instance that handles traffic for this target instance. When creating a target instance, you can provide the fully-qualified URL or a valid partial URL to the desired virtual machine. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance \n- zones/zone/instances/instance" + }, + "kind": { + "type": "string", + "description": "[Output Only] The type of the resource. Always compute#targetInstance for target instances.", + "default": "compute#targetInstance" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "natPolicy": { + "type": "string", + "description": "NAT option controlling how IPs are NAT'ed to the instance. Currently only NO_NAT (default value) is supported.", + "enum": [ + "NO_NAT" + ], + "enumDescriptions": [ + "" + ] + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "zone": { + "type": "string", + "description": "[Output Only] URL of the zone where the target instance resides." + } + } + }, + "TargetInstanceAggregatedList": { + "id": "TargetInstanceAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "A map of scoped target instance lists.", + "additionalProperties": { + "$ref": "TargetInstancesScopedList", + "description": "Name of the scope containing this set of target instances." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetInstanceAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "TargetInstanceList": { + "id": "TargetInstanceList", + "type": "object", + "description": "Contains a list of TargetInstance resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetInstance resources.", + "items": { + "$ref": "TargetInstance" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetInstanceList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "TargetInstancesScopedList": { + "id": "TargetInstancesScopedList", + "type": "object", + "properties": { + "targetInstances": { + "type": "array", + "description": "List of target instances contained in this scope.", + "items": { + "$ref": "TargetInstance" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetPool": { + "id": "TargetPool", + "type": "object", + "description": "A TargetPool resource. This resource defines a pool of instances, associated HttpHealthCheck resources, and the fallback target pool.", + "properties": { + "backupPool": { + "type": "string", + "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its failoverRatio field is properly set to a value between [0, 1].\n\nbackupPool and failoverRatio together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below failoverRatio, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio and backupPool are not set, or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "failoverRatio": { + "type": "number", + "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool (i.e., not as a backup pool to some other target pool). The value of the field must be in [0, 1].\n\nIf set, backupPool must also be set. They together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below this number, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio is not set or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy.", + "format": "float" + }, + "healthChecks": { + "type": "array", + "description": "A list of URLs to the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if all specified health checks pass. An empty list means all member instances will be considered healthy at all times.", + "items": { + "type": "string" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "instances": { + "type": "array", + "description": "A list of resource URLs to the virtual machine instances serving this pool. They must live in zones contained in the same region as this pool.", + "items": { + "type": "string" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#targetPool for target pools.", + "default": "compute#targetPool" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the target pool resides." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sessionAffinity": { + "type": "string", + "description": "Sesssion affinity option, must be one of the following values:\nNONE: Connections from the same client IP may go to any instance in the pool.\nCLIENT_IP: Connections from the same client IP will go to the same instance in the pool while that instance remains healthy.\nCLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol will go to the same instance in the pool while that instance remains healthy.", + "enum": [ + "CLIENT_IP", + "CLIENT_IP_PROTO", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } + } + }, + "TargetPoolAggregatedList": { + "id": "TargetPoolAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped target pool lists.", + "additionalProperties": { + "$ref": "TargetPoolsScopedList", + "description": "Name of the scope containing this set of target pools." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetPoolAggregatedList for aggregated lists of target pools.", + "default": "compute#targetPoolAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "TargetPoolInstanceHealth": { + "id": "TargetPoolInstanceHealth", + "type": "object", + "properties": { + "healthStatus": { + "type": "array", + "items": { + "$ref": "HealthStatus" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetPoolInstanceHealth when checking the health of an instance.", + "default": "compute#targetPoolInstanceHealth" + } + } + }, + "TargetPoolList": { + "id": "TargetPoolList", + "type": "object", + "description": "Contains a list of TargetPool resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetPool resources.", + "items": { + "$ref": "TargetPool" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetPoolList for lists of target pools.", + "default": "compute#targetPoolList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "TargetPoolsAddHealthCheckRequest": { + "id": "TargetPoolsAddHealthCheckRequest", + "type": "object", + "properties": { + "healthChecks": { + "type": "array", + "description": "A list of HttpHealthCheck resources to add to the target pool.", + "items": { + "$ref": "HealthCheckReference" + } + } + } + }, + "TargetPoolsAddInstanceRequest": { + "id": "TargetPoolsAddInstanceRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "A full or partial URL to an instance to add to this target pool. This can be a full or partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name \n- projects/project-id/zones/zone/instances/instance-name \n- zones/zone/instances/instance-name", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "TargetPoolsRemoveHealthCheckRequest": { + "id": "TargetPoolsRemoveHealthCheckRequest", + "type": "object", + "properties": { + "healthChecks": { + "type": "array", + "description": "Health check URL to be removed. This can be a full or valid partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check \n- projects/project/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", + "items": { + "$ref": "HealthCheckReference" + } + } + } + }, + "TargetPoolsRemoveInstanceRequest": { + "id": "TargetPoolsRemoveInstanceRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "URLs of the instances to be removed from target pool.", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "TargetPoolsScopedList": { + "id": "TargetPoolsScopedList", + "type": "object", + "properties": { + "targetPools": { + "type": "array", + "description": "List of target pools contained in this scope.", + "items": { + "$ref": "TargetPool" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetReference": { + "id": "TargetReference", + "type": "object", + "properties": { + "target": { + "type": "string" + } + } + }, + "TargetVpnGateway": { + "id": "TargetVpnGateway", + "type": "object", + "description": "Represents a Target VPN gateway resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "forwardingRules": { + "type": "array", + "description": "[Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated to a VPN gateway.", + "items": { + "type": "string" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "default": "compute#targetVpnGateway" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.targetVpnGateways.insert" + ] + } + }, + "network": { + "type": "string", + "description": "URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.", + "annotations": { + "required": [ + "compute.targetVpnGateways.insert" + ] + } + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the target VPN gateway resides." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the VPN gateway.", + "enum": [ + "CREATING", + "DELETING", + "FAILED", + "READY" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "tunnels": { + "type": "array", + "description": "[Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using compute.vpntunnels.insert method and associated to a VPN gateway.", + "items": { + "type": "string" + } + } + } + }, + "TargetVpnGatewayAggregatedList": { + "id": "TargetVpnGatewayAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "A map of scoped target vpn gateway lists.", + "additionalProperties": { + "$ref": "TargetVpnGatewaysScopedList", + "description": "[Output Only] Name of the scope containing this set of target VPN gateways." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "default": "compute#targetVpnGatewayAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "TargetVpnGatewayList": { + "id": "TargetVpnGatewayList", + "type": "object", + "description": "Contains a list of TargetVpnGateway resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of TargetVpnGateway resources.", + "items": { + "$ref": "TargetVpnGateway" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "default": "compute#targetVpnGatewayList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "TargetVpnGatewaysScopedList": { + "id": "TargetVpnGatewaysScopedList", + "type": "object", + "properties": { + "targetVpnGateways": { + "type": "array", + "description": "[Output Only] List of target vpn gateways contained in this scope.", + "items": { + "$ref": "TargetVpnGateway" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TestFailure": { + "id": "TestFailure", + "type": "object", + "properties": { + "actualService": { + "type": "string" + }, + "expectedService": { + "type": "string" + }, + "host": { + "type": "string" + }, + "path": { + "type": "string" + } + } + }, + "UrlMap": { + "id": "UrlMap", + "type": "object", + "description": "A UrlMap resource. This resource defines the mapping from URL to the BackendService resource, based on the \"longest-match\" of the URL's host and path.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "defaultService": { + "type": "string", + "description": "The URL of the BackendService resource if none of the hostRules match." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "fingerprint": { + "type": "string", + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap.", + "format": "byte" + }, + "hostRules": { + "type": "array", + "description": "The list of HostRules to use against the URL.", + "items": { + "$ref": "HostRule" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#urlMaps for url maps.", + "default": "compute#urlMap" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "pathMatchers": { + "type": "array", + "description": "The list of named PathMatchers to use against the URL.", + "items": { + "$ref": "PathMatcher" + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "tests": { + "type": "array", + "description": "The list of expected URL mappings. Request to update this UrlMap will succeed only if all of the test cases pass.", + "items": { + "$ref": "UrlMapTest" + } + } + } + }, + "UrlMapList": { + "id": "UrlMapList", + "type": "object", + "description": "Contains a list of UrlMap resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Set by the server." + }, + "items": { + "type": "array", + "description": "A list of UrlMap resources.", + "items": { + "$ref": "UrlMap" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#urlMapList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "UrlMapReference": { + "id": "UrlMapReference", + "type": "object", + "properties": { + "urlMap": { + "type": "string" + } + } + }, + "UrlMapTest": { + "id": "UrlMapTest", + "type": "object", + "description": "Message for the expected URL mappings.", + "properties": { + "description": { + "type": "string", + "description": "Description of this test case." + }, + "host": { + "type": "string", + "description": "Host portion of the URL." + }, + "path": { + "type": "string", + "description": "Path portion of the URL." + }, + "service": { + "type": "string", + "description": "Expected BackendService resource the given URL should be mapped to." + } + } + }, + "UrlMapValidationResult": { + "id": "UrlMapValidationResult", + "type": "object", + "description": "Message representing the validation result for a UrlMap.", + "properties": { + "loadErrors": { + "type": "array", + "items": { + "type": "string" + } + }, + "loadSucceeded": { + "type": "boolean", + "description": "Whether the given UrlMap can be successfully loaded. If false, 'loadErrors' indicates the reasons." + }, + "testFailures": { + "type": "array", + "items": { + "$ref": "TestFailure" + } + }, + "testPassed": { + "type": "boolean", + "description": "If successfully loaded, this field indicates whether the test passed. If false, 'testFailures's indicate the reason of failure." + } + } + }, + "UrlMapsValidateRequest": { + "id": "UrlMapsValidateRequest", + "type": "object", + "properties": { + "resource": { + "$ref": "UrlMap", + "description": "Content of the UrlMap to be validated." + } + } + }, + "UrlMapsValidateResponse": { + "id": "UrlMapsValidateResponse", + "type": "object", + "properties": { + "result": { + "$ref": "UrlMapValidationResult" + } + } + }, + "UsageExportLocation": { + "id": "UsageExportLocation", + "type": "object", + "description": "The location in Cloud Storage and naming method of the daily usage report. Contains bucket_name and report_name prefix.", + "properties": { + "bucketName": { + "type": "string", + "description": "The name of an existing bucket in Cloud Storage where the usage report object is stored. The Google Service Account is granted write access to this bucket. This can either be the bucket name by itself, such as example-bucket, or the bucket name with gs:// or https://storage.googleapis.com/ in front of it, such as gs://example-bucket." + }, + "reportNamePrefix": { + "type": "string", + "description": "An optional prefix for the name of the usage report object stored in bucketName. If not supplied, defaults to usage. The report is stored as a CSV file named report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the usage according to Pacific Time. If you supply a prefix, it should conform to Cloud Storage object naming conventions." + } + } + }, + "VpnTunnel": { + "id": "VpnTunnel", + "type": "object", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "detailedStatus": { + "type": "string", + "description": "[Output Only] Detailed status message for the VPN tunnel." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "ikeVersion": { + "type": "integer", + "description": "IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2.", + "format": "int32" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", + "default": "compute#vpnTunnel" + }, + "localTrafficSelector": { + "type": "array", + "description": "Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint.", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.vpnTunnels.insert" + ] + } + }, + "peerIp": { + "type": "string", + "description": "IP address of the peer VPN gateway." + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the VPN tunnel resides." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sharedSecret": { + "type": "string", + "description": "Shared secret used to set the secure session between the Cloud VPN gateway and the peer VPN gateway." + }, + "sharedSecretHash": { + "type": "string", + "description": "Hash of the shared secret." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the VPN tunnel.", + "enum": [ + "ALLOCATING_RESOURCES", + "AUTHORIZATION_ERROR", + "DEPROVISIONING", + "ESTABLISHED", + "FAILED", + "FIRST_HANDSHAKE", + "NEGOTIATION_FAILURE", + "NETWORK_ERROR", + "NO_INCOMING_PACKETS", + "PROVISIONING", + "REJECTED", + "WAITING_FOR_FULL_CONFIG" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "targetVpnGateway": { + "type": "string", + "description": "URL of the VPN gateway with which this VPN tunnel is associated. Provided by the client when the VPN tunnel is created.", + "annotations": { + "required": [ + "compute.vpnTunnels.insert" + ] + } + } + } + }, + "VpnTunnelAggregatedList": { + "id": "VpnTunnelAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "[Output Only] A map of scoped vpn tunnel lists.", + "additionalProperties": { + "$ref": "VpnTunnelsScopedList", + "description": "Name of the scope containing this set of vpn tunnels." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", + "default": "compute#vpnTunnelAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "VpnTunnelList": { + "id": "VpnTunnelList", + "type": "object", + "description": "Contains a list of VpnTunnel resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of VpnTunnel resources.", + "items": { + "$ref": "VpnTunnel" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", + "default": "compute#vpnTunnelList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "VpnTunnelsScopedList": { + "id": "VpnTunnelsScopedList", + "type": "object", + "properties": { + "vpnTunnels": { + "type": "array", + "description": "List of vpn tunnels contained in this scope.", + "items": { + "$ref": "VpnTunnel" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "Zone": { + "id": "Zone", + "type": "object", + "description": "A Zone resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this zone." + }, + "description": { + "type": "string", + "description": "[Output Only] Textual description of the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#zone for zones.", + "default": "compute#zone" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource." + }, + "region": { + "type": "string", + "description": "[Output Only] Full URL reference to the region which hosts the zone." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "status": { + "type": "string", + "description": "[Output Only] Status of the zone, either UP or DOWN.", + "enum": [ + "DOWN", + "UP" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "ZoneList": { + "id": "ZoneList", + "type": "object", + "description": "Contains a list of zone resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of Zone resources.", + "items": { + "$ref": "Zone" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#zoneList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + } + }, + "resources": { + "addresses": { + "methods": { + "aggregatedList": { + "id": "compute.addresses.aggregatedList", + "path": "{project}/aggregated/addresses", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of addresses.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "AddressAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.addresses.delete", + "path": "{project}/regions/{region}/addresses/{address}", + "httpMethod": "DELETE", + "description": "Deletes the specified address resource.", + "parameters": { + "address": { + "type": "string", + "description": "Name of the address resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "address" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.addresses.get", + "path": "{project}/regions/{region}/addresses/{address}", + "httpMethod": "GET", + "description": "Returns the specified address resource.", + "parameters": { + "address": { + "type": "string", + "description": "Name of the address resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "address" + ], + "response": { + "$ref": "Address" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.addresses.insert", + "path": "{project}/regions/{region}/addresses", + "httpMethod": "POST", + "description": "Creates an address resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "Address" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.addresses.list", + "path": "{project}/regions/{region}/addresses", + "httpMethod": "GET", + "description": "Retrieves a list of addresses contained within the specified region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "AddressList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "autoscalers": { + "methods": { + "aggregatedList": { + "id": "compute.autoscalers.aggregatedList", + "path": "{project}/aggregated/autoscalers", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of autoscalers.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "AutoscalerAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.autoscalers.delete", + "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + "httpMethod": "DELETE", + "description": "Deletes the specified autoscaler.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the autoscaler to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "autoscaler" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.autoscalers.get", + "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + "httpMethod": "GET", + "description": "Returns the specified autoscaler resource. Get a list of available autoscalers by making a list() request.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the autoscaler to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "autoscaler" + ], + "response": { + "$ref": "Autoscaler" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.autoscalers.insert", + "path": "{project}/zones/{zone}/autoscalers", + "httpMethod": "POST", + "description": "Creates an autoscaler in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "Autoscaler" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.autoscalers.list", + "path": "{project}/zones/{zone}/autoscalers", + "httpMethod": "GET", + "description": "Retrieves a list of autoscalers contained within the specified zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "AutoscalerList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.autoscalers.patch", + "path": "{project}/zones/{zone}/autoscalers", + "httpMethod": "PATCH", + "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports patch semantics.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the autoscaler to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "autoscaler" + ], + "request": { + "$ref": "Autoscaler" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.autoscalers.update", + "path": "{project}/zones/{zone}/autoscalers", + "httpMethod": "PUT", + "description": "Updates an autoscaler in the specified project using the data included in the request.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the autoscaler to update.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "Autoscaler" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "backendServices": { + "methods": { + "delete": { + "id": "compute.backendServices.delete", + "path": "{project}/global/backendServices/{backendService}", + "httpMethod": "DELETE", + "description": "Deletes the specified BackendService resource.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "backendService" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.backendServices.get", + "path": "{project}/global/backendServices/{backendService}", + "httpMethod": "GET", + "description": "Returns the specified BackendService resource. Get a list of available backend services by making a list() request.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "backendService" + ], + "response": { + "$ref": "BackendService" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getHealth": { + "id": "compute.backendServices.getHealth", + "path": "{project}/global/backendServices/{backendService}/getHealth", + "httpMethod": "POST", + "description": "Gets the most recent health check results for this BackendService.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to which the queried instance belongs.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "backendService" + ], + "request": { + "$ref": "ResourceGroupReference" + }, + "response": { + "$ref": "BackendServiceGroupHealth" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.backendServices.insert", + "path": "{project}/global/backendServices", + "httpMethod": "POST", + "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.backendServices.list", + "path": "{project}/global/backendServices", + "httpMethod": "GET", + "description": "Retrieves the list of BackendService resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "BackendServiceList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.backendServices.patch", + "path": "{project}/global/backendServices/{backendService}", + "httpMethod": "PATCH", + "description": "Updates the entire content of the BackendService resource. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "backendService" + ], + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.backendServices.update", + "path": "{project}/global/backendServices/{backendService}", + "httpMethod": "PUT", + "description": "Updates the entire content of the BackendService resource. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "backendService" + ], + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "diskTypes": { + "methods": { + "aggregatedList": { + "id": "compute.diskTypes.aggregatedList", + "path": "{project}/aggregated/diskTypes", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of disk types.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "DiskTypeAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "get": { + "id": "compute.diskTypes.get", + "path": "{project}/zones/{zone}/diskTypes/{diskType}", + "httpMethod": "GET", + "description": "Returns the specified disk type. Get a list of available disk types by making a list() request.", + "parameters": { + "diskType": { + "type": "string", + "description": "Name of the disk type to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "diskType" + ], + "response": { + "$ref": "DiskType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.diskTypes.list", + "path": "{project}/zones/{zone}/diskTypes", + "httpMethod": "GET", + "description": "Retrieves a list of disk types available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "DiskTypeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "disks": { + "methods": { + "aggregatedList": { + "id": "compute.disks.aggregatedList", + "path": "{project}/aggregated/disks", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of persistent disks.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "DiskAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "createSnapshot": { + "id": "compute.disks.createSnapshot", + "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", + "httpMethod": "POST", + "description": "Creates a snapshot of a specified persistent disk.", + "parameters": { + "disk": { + "type": "string", + "description": "Name of the persistent disk to snapshot.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "request": { + "$ref": "Snapshot" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "delete": { + "id": "compute.disks.delete", + "path": "{project}/zones/{zone}/disks/{disk}", + "httpMethod": "DELETE", + "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + "parameters": { + "disk": { + "type": "string", + "description": "Name of the persistent disk to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.disks.get", + "path": "{project}/zones/{zone}/disks/{disk}", + "httpMethod": "GET", + "description": "Returns a specified persistent disk. Get a list of available persistent disks by making a list() request.", + "parameters": { + "disk": { + "type": "string", + "description": "Name of the persistent disk to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "response": { + "$ref": "Disk" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.disks.insert", + "path": "{project}/zones/{zone}/disks", + "httpMethod": "POST", + "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "sourceImage": { + "type": "string", + "description": "Optional. Source image to restore onto a disk.", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "Disk" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.disks.list", + "path": "{project}/zones/{zone}/disks", + "httpMethod": "GET", + "description": "Retrieves a list of persistent disks contained within the specified zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "DiskList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "resize": { + "id": "compute.disks.resize", + "path": "{project}/zones/{zone}/disks/{disk}/resize", + "httpMethod": "POST", + "description": "Resizes the specified persistent disk.", + "parameters": { + "disk": { + "type": "string", + "description": "The name of the persistent disk.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "request": { + "$ref": "DisksResizeRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "firewalls": { + "methods": { + "delete": { + "id": "compute.firewalls.delete", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "DELETE", + "description": "Deletes the specified firewall.", + "parameters": { + "firewall": { + "type": "string", + "description": "Name of the firewall rule to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "firewall" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.firewalls.get", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "GET", + "description": "Returns the specified firewall.", + "parameters": { + "firewall": { + "type": "string", + "description": "Name of the firewall rule to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "firewall" + ], + "response": { + "$ref": "Firewall" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.firewalls.insert", + "path": "{project}/global/firewalls", + "httpMethod": "POST", + "description": "Creates a firewall rule in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.firewalls.list", + "path": "{project}/global/firewalls", + "httpMethod": "GET", + "description": "Retrieves the list of firewall rules available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "FirewallList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.firewalls.patch", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "PATCH", + "description": "Updates the specified firewall rule with the data included in the request. This method supports patch semantics.", + "parameters": { + "firewall": { + "type": "string", + "description": "Name of the firewall rule to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "firewall" + ], + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.firewalls.update", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "PUT", + "description": "Updates the specified firewall rule with the data included in the request.", + "parameters": { + "firewall": { + "type": "string", + "description": "Name of the firewall rule to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "firewall" + ], + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "forwardingRules": { + "methods": { + "aggregatedList": { + "id": "compute.forwardingRules.aggregatedList", + "path": "{project}/aggregated/forwardingRules", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of forwarding rules.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ForwardingRuleAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.forwardingRules.delete", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + "httpMethod": "DELETE", + "description": "Deletes the specified ForwardingRule resource.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "forwardingRule" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.forwardingRules.get", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + "httpMethod": "GET", + "description": "Returns the specified ForwardingRule resource.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "forwardingRule" + ], + "response": { + "$ref": "ForwardingRule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.forwardingRules.insert", + "path": "{project}/regions/{region}/forwardingRules", + "httpMethod": "POST", + "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "ForwardingRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.forwardingRules.list", + "path": "{project}/regions/{region}/forwardingRules", + "httpMethod": "GET", + "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "ForwardingRuleList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setTarget": { + "id": "compute.forwardingRules.setTarget", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", + "httpMethod": "POST", + "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource in which target is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "forwardingRule" + ], + "request": { + "$ref": "TargetReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "globalAddresses": { + "methods": { + "delete": { + "id": "compute.globalAddresses.delete", + "path": "{project}/global/addresses/{address}", + "httpMethod": "DELETE", + "description": "Deletes the specified address resource.", + "parameters": { + "address": { + "type": "string", + "description": "Name of the address resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "address" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.globalAddresses.get", + "path": "{project}/global/addresses/{address}", + "httpMethod": "GET", + "description": "Returns the specified address resource. Get a list of available addresses by making a list() request.", + "parameters": { + "address": { + "type": "string", + "description": "Name of the address resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "address" + ], + "response": { + "$ref": "Address" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.globalAddresses.insert", + "path": "{project}/global/addresses", + "httpMethod": "POST", + "description": "Creates an address resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Address" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.globalAddresses.list", + "path": "{project}/global/addresses", + "httpMethod": "GET", + "description": "Retrieves a list of global addresses.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "AddressList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "globalForwardingRules": { + "methods": { + "delete": { + "id": "compute.globalForwardingRules.delete", + "path": "{project}/global/forwardingRules/{forwardingRule}", + "httpMethod": "DELETE", + "description": "Deletes the specified ForwardingRule resource.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "forwardingRule" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.globalForwardingRules.get", + "path": "{project}/global/forwardingRules/{forwardingRule}", + "httpMethod": "GET", + "description": "Returns the specified ForwardingRule resource. Get a list of available forwarding rules by making a list() request.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "forwardingRule" + ], + "response": { + "$ref": "ForwardingRule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.globalForwardingRules.insert", + "path": "{project}/global/forwardingRules", + "httpMethod": "POST", + "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "ForwardingRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.globalForwardingRules.list", + "path": "{project}/global/forwardingRules", + "httpMethod": "GET", + "description": "Retrieves a list of ForwardingRule resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ForwardingRuleList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setTarget": { + "id": "compute.globalForwardingRules.setTarget", + "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", + "httpMethod": "POST", + "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource in which target is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "forwardingRule" + ], + "request": { + "$ref": "TargetReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "globalOperations": { + "methods": { + "aggregatedList": { + "id": "compute.globalOperations.aggregatedList", + "path": "{project}/aggregated/operations", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of all operations.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "OperationAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.globalOperations.delete", + "path": "{project}/global/operations/{operation}", + "httpMethod": "DELETE", + "description": "Deletes the specified Operations resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the Operations resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "operation" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.globalOperations.get", + "path": "{project}/global/operations/{operation}", + "httpMethod": "GET", + "description": "Retrieves the specified Operations resource. Get a list of operations by making a list() request.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the Operations resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "operation" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.globalOperations.list", + "path": "{project}/global/operations", + "httpMethod": "GET", + "description": "Retrieves a list of Operation resources contained within the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "httpHealthChecks": { + "methods": { + "delete": { + "id": "compute.httpHealthChecks.delete", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "DELETE", + "description": "Deletes the specified HttpHealthCheck resource.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.httpHealthChecks.get", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "GET", + "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "response": { + "$ref": "HttpHealthCheck" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.httpHealthChecks.insert", + "path": "{project}/global/httpHealthChecks", + "httpMethod": "POST", + "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "HttpHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.httpHealthChecks.list", + "path": "{project}/global/httpHealthChecks", + "httpMethod": "GET", + "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "HttpHealthCheckList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.httpHealthChecks.patch", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "PATCH", + "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "request": { + "$ref": "HttpHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.httpHealthChecks.update", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "PUT", + "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "request": { + "$ref": "HttpHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "httpsHealthChecks": { + "methods": { + "delete": { + "id": "compute.httpsHealthChecks.delete", + "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + "httpMethod": "DELETE", + "description": "Deletes the specified HttpsHealthCheck resource.", + "parameters": { + "httpsHealthCheck": { + "type": "string", + "description": "Name of the HttpsHealthCheck resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpsHealthCheck" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.httpsHealthChecks.get", + "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + "httpMethod": "GET", + "description": "Returns the specified HttpsHealthCheck resource. Get a list of available HTTPS health checks by making a list() request.", + "parameters": { + "httpsHealthCheck": { + "type": "string", + "description": "Name of the HttpsHealthCheck resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpsHealthCheck" + ], + "response": { + "$ref": "HttpsHealthCheck" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.httpsHealthChecks.insert", + "path": "{project}/global/httpsHealthChecks", + "httpMethod": "POST", + "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "HttpsHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.httpsHealthChecks.list", + "path": "{project}/global/httpsHealthChecks", + "httpMethod": "GET", + "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "HttpsHealthCheckList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.httpsHealthChecks.patch", + "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + "httpMethod": "PATCH", + "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + "parameters": { + "httpsHealthCheck": { + "type": "string", + "description": "Name of the HttpsHealthCheck resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpsHealthCheck" + ], + "request": { + "$ref": "HttpsHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.httpsHealthChecks.update", + "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + "httpMethod": "PUT", + "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + "parameters": { + "httpsHealthCheck": { + "type": "string", + "description": "Name of the HttpsHealthCheck resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpsHealthCheck" + ], + "request": { + "$ref": "HttpsHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "images": { + "methods": { + "delete": { + "id": "compute.images.delete", + "path": "{project}/global/images/{image}", + "httpMethod": "DELETE", + "description": "Deletes the specified image.", + "parameters": { + "image": { + "type": "string", + "description": "Name of the image resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "image" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "deprecate": { + "id": "compute.images.deprecate", + "path": "{project}/global/images/{image}/deprecate", + "httpMethod": "POST", + "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + "parameters": { + "image": { + "type": "string", + "description": "Image name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "image" + ], + "request": { + "$ref": "DeprecationStatus" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.images.get", + "path": "{project}/global/images/{image}", + "httpMethod": "GET", + "description": "Returns the specified image. Get a list of available images by making a list() request.", + "parameters": { + "image": { + "type": "string", + "description": "Name of the image resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "image" + ], + "response": { + "$ref": "Image" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getFromFamily": { + "id": "compute.images.getFromFamily", + "path": "{project}/global/images/family/{family}", + "httpMethod": "GET", + "description": "Returns the latest image that is part of an image family and is not deprecated.", + "parameters": { + "family": { + "type": "string", + "description": "Name of the image resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "family" + ], + "response": { + "$ref": "Image" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.images.insert", + "path": "{project}/global/images", + "httpMethod": "POST", + "description": "Creates an image in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Image" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "id": "compute.images.list", + "path": "{project}/global/images", + "httpMethod": "GET", + "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 7. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.\n\nSee Accessing images for more information.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ImageList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "instanceGroupManagers": { + "methods": { + "abandonInstances": { + "id": "compute.instanceGroupManagers.abandonInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + "httpMethod": "POST", + "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "request": { + "$ref": "InstanceGroupManagersAbandonInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "aggregatedList": { + "id": "compute.instanceGroupManagers.aggregatedList", + "path": "{project}/aggregated/instanceGroupManagers", + "httpMethod": "GET", + "description": "Retrieves the list of managed instance groups and groups them by zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InstanceGroupManagerAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.instanceGroupManagers.delete", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + "httpMethod": "DELETE", + "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group to delete.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "deleteInstances": { + "id": "compute.instanceGroupManagers.deleteInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + "httpMethod": "POST", + "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "request": { + "$ref": "InstanceGroupManagersDeleteInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.instanceGroupManagers.get", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + "httpMethod": "GET", + "description": "Returns all of the details about the specified managed instance group. Get a list of available managed instance groups by making a list() request.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "response": { + "$ref": "InstanceGroupManager" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.instanceGroupManagers.insert", + "path": "{project}/zones/{zone}/instanceGroupManagers", + "httpMethod": "POST", + "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where you want to create the managed instance group.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "InstanceGroupManager" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.instanceGroupManagers.list", + "path": "{project}/zones/{zone}/instanceGroupManagers", + "httpMethod": "GET", + "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "InstanceGroupManagerList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "listManagedInstances": { + "id": "compute.instanceGroupManagers.listManagedInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + "httpMethod": "POST", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "response": { + "$ref": "InstanceGroupManagersListManagedInstancesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "recreateInstances": { + "id": "compute.instanceGroupManagers.recreateInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "httpMethod": "POST", + "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "request": { + "$ref": "InstanceGroupManagersRecreateInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "resize": { + "id": "compute.instanceGroupManagers.resize", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + "httpMethod": "POST", + "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "size": { + "type": "integer", + "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + "required": true, + "format": "int32", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager", + "size" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setInstanceTemplate": { + "id": "compute.instanceGroupManagers.setInstanceTemplate", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + "httpMethod": "POST", + "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "request": { + "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setTargetPools": { + "id": "compute.instanceGroupManagers.setTargetPools", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + "httpMethod": "POST", + "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" + ], + "request": { + "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "instanceGroups": { + "methods": { + "addInstances": { + "id": "compute.instanceGroups.addInstances", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + "httpMethod": "POST", + "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", + "parameters": { + "instanceGroup": { + "type": "string", + "description": "The name of the instance group where you are adding instances.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroup" + ], + "request": { + "$ref": "InstanceGroupsAddInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "aggregatedList": { + "id": "compute.instanceGroups.aggregatedList", + "path": "{project}/aggregated/instanceGroups", + "httpMethod": "GET", + "description": "Retrieves the list of instance groups and sorts them by zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InstanceGroupAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.instanceGroups.delete", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + "httpMethod": "DELETE", + "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", + "parameters": { + "instanceGroup": { + "type": "string", + "description": "The name of the instance group to delete.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroup" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.instanceGroups.get", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + "httpMethod": "GET", + "description": "Returns the specified instance group. Get a list of available instance groups by making a list() request.", + "parameters": { + "instanceGroup": { + "type": "string", + "description": "The name of the instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroup" + ], + "response": { + "$ref": "InstanceGroup" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.instanceGroups.insert", + "path": "{project}/zones/{zone}/instanceGroups", + "httpMethod": "POST", + "description": "Creates an instance group in the specified project using the parameters that are included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where you want to create the instance group.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "InstanceGroup" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.instanceGroups.list", + "path": "{project}/zones/{zone}/instanceGroups", + "httpMethod": "GET", + "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "InstanceGroupList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "listInstances": { + "id": "compute.instanceGroups.listInstances", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + "httpMethod": "POST", + "description": "Lists the instances in the specified instance group.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "instanceGroup": { + "type": "string", + "description": "The name of the instance group from which you want to generate a list of included instances.", + "required": true, + "location": "path" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroup" + ], + "request": { + "$ref": "InstanceGroupsListInstancesRequest" + }, + "response": { + "$ref": "InstanceGroupsListInstances" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "removeInstances": { + "id": "compute.instanceGroups.removeInstances", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + "httpMethod": "POST", + "description": "Removes one or more instances from the specified instance group, but does not delete those instances.", + "parameters": { + "instanceGroup": { + "type": "string", + "description": "The name of the instance group where the specified instances will be removed.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroup" + ], + "request": { + "$ref": "InstanceGroupsRemoveInstancesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setNamedPorts": { + "id": "compute.instanceGroups.setNamedPorts", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + "httpMethod": "POST", + "description": "Sets the named ports for the specified instance group.", + "parameters": { + "instanceGroup": { + "type": "string", + "description": "The name of the instance group where the named ports are updated.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroup" + ], + "request": { + "$ref": "InstanceGroupsSetNamedPortsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "instanceTemplates": { + "methods": { + "delete": { + "id": "compute.instanceTemplates.delete", + "path": "{project}/global/instanceTemplates/{instanceTemplate}", + "httpMethod": "DELETE", + "description": "Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone.", + "parameters": { + "instanceTemplate": { + "type": "string", + "description": "The name of the instance template to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instanceTemplate" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.instanceTemplates.get", + "path": "{project}/global/instanceTemplates/{instanceTemplate}", + "httpMethod": "GET", + "description": "Returns the specified instance template. Get a list of available instance templates by making a list() request.", + "parameters": { + "instanceTemplate": { + "type": "string", + "description": "The name of the instance template.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "instanceTemplate" + ], + "response": { + "$ref": "InstanceTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.instanceTemplates.insert", + "path": "{project}/global/instanceTemplates", + "httpMethod": "POST", + "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "InstanceTemplate" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.instanceTemplates.list", + "path": "{project}/global/instanceTemplates", + "httpMethod": "GET", + "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InstanceTemplateList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "instances": { + "methods": { + "addAccessConfig": { + "id": "compute.instances.addAccessConfig", + "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", + "httpMethod": "POST", + "description": "Adds an access config to an instance's network interface.", + "parameters": { + "instance": { + "type": "string", + "description": "The instance name for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "networkInterface": { + "type": "string", + "description": "The name of the network interface to add to this instance.", + "required": true, + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance", + "networkInterface" + ], + "request": { + "$ref": "AccessConfig" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "aggregatedList": { + "id": "compute.instances.aggregatedList", + "path": "{project}/aggregated/instances", + "httpMethod": "GET", + "description": "Retrieves aggregated list of instances.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InstanceAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "attachDisk": { + "id": "compute.instances.attachDisk", + "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", + "httpMethod": "POST", + "description": "Attaches a Disk resource to an instance.", + "parameters": { + "instance": { + "type": "string", + "description": "The instance name for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "AttachedDisk" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "delete": { + "id": "compute.instances.delete", + "path": "{project}/zones/{zone}/instances/{instance}", + "httpMethod": "DELETE", + "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "deleteAccessConfig": { + "id": "compute.instances.deleteAccessConfig", + "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + "httpMethod": "POST", + "description": "Deletes an access config from an instance's network interface.", + "parameters": { + "accessConfig": { + "type": "string", + "description": "The name of the access config to delete.", + "required": true, + "location": "query" + }, + "instance": { + "type": "string", + "description": "The instance name for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "networkInterface": { + "type": "string", + "description": "The name of the network interface.", + "required": true, + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance", + "accessConfig", + "networkInterface" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "detachDisk": { + "id": "compute.instances.detachDisk", + "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + "httpMethod": "POST", + "description": "Detaches a disk from an instance.", + "parameters": { + "deviceName": { + "type": "string", + "description": "Disk device name to detach.", + "required": true, + "pattern": "\\w[\\w.-]{0,254}", + "location": "query" + }, + "instance": { + "type": "string", + "description": "Instance name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance", + "deviceName" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.instances.get", + "path": "{project}/zones/{zone}/instances/{instance}", + "httpMethod": "GET", + "description": "Returns the specified Instance resource. Get a list of available instances by making a list() request.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Instance" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getSerialPortOutput": { + "id": "compute.instances.getSerialPortOutput", + "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + "httpMethod": "GET", + "description": "Returns the specified instance's serial port output.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "port": { + "type": "integer", + "description": "Specifies which COM or serial port to retrieve data from.", + "default": "1", + "format": "int32", + "minimum": "1", + "maximum": "4", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "SerialPortOutput" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.instances.insert", + "path": "{project}/zones/{zone}/instances", + "httpMethod": "POST", + "description": "Creates an instance resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "Instance" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.instances.list", + "path": "{project}/zones/{zone}/instances", + "httpMethod": "GET", + "description": "Retrieves the list of instances contained within the specified zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "InstanceList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "reset": { + "id": "compute.instances.reset", + "path": "{project}/zones/{zone}/instances/{instance}/reset", + "httpMethod": "POST", + "description": "Performs a hard reset on the instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setDiskAutoDelete": { + "id": "compute.instances.setDiskAutoDelete", + "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + "httpMethod": "POST", + "description": "Sets the auto-delete flag for a disk attached to an instance.", + "parameters": { + "autoDelete": { + "type": "boolean", + "description": "Whether to auto-delete the disk when the instance is deleted.", + "required": true, + "location": "query" + }, + "deviceName": { + "type": "string", + "description": "The device name of the disk to modify.", + "required": true, + "pattern": "\\w[\\w.-]{0,254}", + "location": "query" + }, + "instance": { + "type": "string", + "description": "The instance name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance", + "autoDelete", + "deviceName" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setMachineType": { + "id": "compute.instances.setMachineType", + "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", + "httpMethod": "POST", + "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "InstancesSetMachineTypeRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setMetadata": { + "id": "compute.instances.setMetadata", + "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", + "httpMethod": "POST", + "description": "Sets metadata for the specified instance to the data included in the request.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "Metadata" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setScheduling": { + "id": "compute.instances.setScheduling", + "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + "httpMethod": "POST", + "description": "Sets an instance's scheduling options.", + "parameters": { + "instance": { + "type": "string", + "description": "Instance name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "Scheduling" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setTags": { + "id": "compute.instances.setTags", + "path": "{project}/zones/{zone}/instances/{instance}/setTags", + "httpMethod": "POST", + "description": "Sets tags for the specified instance to the data included in the request.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "Tags" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "start": { + "id": "compute.instances.start", + "path": "{project}/zones/{zone}/instances/{instance}/start", + "httpMethod": "POST", + "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to start.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "stop": { + "id": "compute.instances.stop", + "path": "{project}/zones/{zone}/instances/{instance}/stop", + "httpMethod": "POST", + "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to stop.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "licenses": { + "methods": { + "get": { + "id": "compute.licenses.get", + "path": "{project}/global/licenses/{license}", + "httpMethod": "GET", + "description": "Returns the specified License resource. Get a list of available licenses by making a list() request.", + "parameters": { + "license": { + "type": "string", + "description": "Name of the License resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "license" + ], + "response": { + "$ref": "License" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "machineTypes": { + "methods": { + "aggregatedList": { + "id": "compute.machineTypes.aggregatedList", + "path": "{project}/aggregated/machineTypes", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of machine types.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "MachineTypeAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "get": { + "id": "compute.machineTypes.get", + "path": "{project}/zones/{zone}/machineTypes/{machineType}", + "httpMethod": "GET", + "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", + "parameters": { + "machineType": { + "type": "string", + "description": "Name of the machine type to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "machineType" + ], + "response": { + "$ref": "MachineType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.machineTypes.list", + "path": "{project}/zones/{zone}/machineTypes", + "httpMethod": "GET", + "description": "Retrieves a list of machine types available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "MachineTypeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "networks": { + "methods": { + "delete": { + "id": "compute.networks.delete", + "path": "{project}/global/networks/{network}", + "httpMethod": "DELETE", + "description": "Deletes the specified network.", + "parameters": { + "network": { + "type": "string", + "description": "Name of the network to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "network" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.networks.get", + "path": "{project}/global/networks/{network}", + "httpMethod": "GET", + "description": "Returns the specified network. Get a list of available networks by making a list() request.", + "parameters": { + "network": { + "type": "string", + "description": "Name of the network to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "network" + ], + "response": { + "$ref": "Network" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.networks.insert", + "path": "{project}/global/networks", + "httpMethod": "POST", + "description": "Creates a network in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Network" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.networks.list", + "path": "{project}/global/networks", + "httpMethod": "GET", + "description": "Retrieves the list of networks available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "NetworkList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "projects": { + "methods": { + "get": { + "id": "compute.projects.get", + "path": "{project}", + "httpMethod": "GET", + "description": "Returns the specified Project resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "Project" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "moveDisk": { + "id": "compute.projects.moveDisk", + "path": "{project}/moveDisk", + "httpMethod": "POST", + "description": "Moves a persistent disk from one zone to another.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "DiskMoveRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "moveInstance": { + "id": "compute.projects.moveInstance", + "path": "{project}/moveInstance", + "httpMethod": "POST", + "description": "Moves an instance and its attached persistent disks from one zone to another.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "InstanceMoveRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setCommonInstanceMetadata": { + "id": "compute.projects.setCommonInstanceMetadata", + "path": "{project}/setCommonInstanceMetadata", + "httpMethod": "POST", + "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Metadata" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setUsageExportBucket": { + "id": "compute.projects.setUsageExportBucket", + "path": "{project}/setUsageExportBucket", + "httpMethod": "POST", + "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "UsageExportLocation" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "regionOperations": { + "methods": { + "delete": { + "id": "compute.regionOperations.delete", + "path": "{project}/regions/{region}/operations/{operation}", + "httpMethod": "DELETE", + "description": "Deletes the specified region-specific Operations resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the Operations resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "operation" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.regionOperations.get", + "path": "{project}/regions/{region}/operations/{operation}", + "httpMethod": "GET", + "description": "Retrieves the specified region-specific Operations resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the Operations resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "operation" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.regionOperations.list", + "path": "{project}/regions/{region}/operations", + "httpMethod": "GET", + "description": "Retrieves a list of Operation resources contained within the specified region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "regions": { + "methods": { + "get": { + "id": "compute.regions.get", + "path": "{project}/regions/{region}", + "httpMethod": "GET", + "description": "Returns the specified Region resource. Get a list of available regions by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "Region" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.regions.list", + "path": "{project}/regions", + "httpMethod": "GET", + "description": "Retrieves the list of region resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "RegionList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "routes": { + "methods": { + "delete": { + "id": "compute.routes.delete", + "path": "{project}/global/routes/{route}", + "httpMethod": "DELETE", + "description": "Deletes the specified Route resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "route": { + "type": "string", + "description": "Name of the Route resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "route" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.routes.get", + "path": "{project}/global/routes/{route}", + "httpMethod": "GET", + "description": "Returns the specified Route resource. Get a list of available routes by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "route": { + "type": "string", + "description": "Name of the Route resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "route" + ], + "response": { + "$ref": "Route" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.routes.insert", + "path": "{project}/global/routes", + "httpMethod": "POST", + "description": "Creates a Route resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Route" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.routes.list", + "path": "{project}/global/routes", + "httpMethod": "GET", + "description": "Retrieves the list of Route resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "RouteList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "snapshots": { + "methods": { + "delete": { + "id": "compute.snapshots.delete", + "path": "{project}/global/snapshots/{snapshot}", + "httpMethod": "DELETE", + "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snaphots.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "snapshot": { + "type": "string", + "description": "Name of the Snapshot resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "snapshot" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.snapshots.get", + "path": "{project}/global/snapshots/{snapshot}", + "httpMethod": "GET", + "description": "Returns the specified Snapshot resource. Get a list of available snapshots by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "snapshot": { + "type": "string", + "description": "Name of the Snapshot resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "snapshot" + ], + "response": { + "$ref": "Snapshot" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.snapshots.list", + "path": "{project}/global/snapshots", + "httpMethod": "GET", + "description": "Retrieves the list of Snapshot resources contained within the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "SnapshotList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "sslCertificates": { + "methods": { + "delete": { + "id": "compute.sslCertificates.delete", + "path": "{project}/global/sslCertificates/{sslCertificate}", + "httpMethod": "DELETE", + "description": "Deletes the specified SslCertificate resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "sslCertificate": { + "type": "string", + "description": "Name of the SslCertificate resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "sslCertificate" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.sslCertificates.get", + "path": "{project}/global/sslCertificates/{sslCertificate}", + "httpMethod": "GET", + "description": "Returns the specified SslCertificate resource. Get a list of available SSL certificates by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "sslCertificate": { + "type": "string", + "description": "Name of the SslCertificate resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "sslCertificate" + ], + "response": { + "$ref": "SslCertificate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.sslCertificates.insert", + "path": "{project}/global/sslCertificates", + "httpMethod": "POST", + "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "SslCertificate" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.sslCertificates.list", + "path": "{project}/global/sslCertificates", + "httpMethod": "GET", + "description": "Retrieves the list of SslCertificate resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "SslCertificateList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "subnetworks": { + "methods": { + "aggregatedList": { + "id": "compute.subnetworks.aggregatedList", + "path": "{project}/aggregated/subnetworks", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of subnetworks.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "SubnetworkAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.subnetworks.delete", + "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + "httpMethod": "DELETE", + "description": "Deletes the specified subnetwork.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "subnetwork": { + "type": "string", + "description": "Name of the Subnetwork resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "subnetwork" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.subnetworks.get", + "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + "httpMethod": "GET", + "description": "Returns the specified subnetwork. Get a list of available subnetworks list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "subnetwork": { + "type": "string", + "description": "Name of the Subnetwork resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "subnetwork" + ], + "response": { + "$ref": "Subnetwork" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.subnetworks.insert", + "path": "{project}/regions/{region}/subnetworks", + "httpMethod": "POST", + "description": "Creates a subnetwork in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "Subnetwork" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.subnetworks.list", + "path": "{project}/regions/{region}/subnetworks", + "httpMethod": "GET", + "description": "Retrieves a list of subnetworks available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "SubnetworkList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "targetHttpProxies": { + "methods": { + "delete": { + "id": "compute.targetHttpProxies.delete", + "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + "httpMethod": "DELETE", + "description": "Deletes the specified TargetHttpProxy resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpProxy": { + "type": "string", + "description": "Name of the TargetHttpProxy resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpProxy" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.targetHttpProxies.get", + "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + "httpMethod": "GET", + "description": "Returns the specified TargetHttpProxy resource. Get a list of available target HTTP proxies by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpProxy": { + "type": "string", + "description": "Name of the TargetHttpProxy resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpProxy" + ], + "response": { + "$ref": "TargetHttpProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.targetHttpProxies.insert", + "path": "{project}/global/targetHttpProxies", + "httpMethod": "POST", + "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "TargetHttpProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.targetHttpProxies.list", + "path": "{project}/global/targetHttpProxies", + "httpMethod": "GET", + "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TargetHttpProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setUrlMap": { + "id": "compute.targetHttpProxies.setUrlMap", + "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "httpMethod": "POST", + "description": "Changes the URL map for TargetHttpProxy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpProxy": { + "type": "string", + "description": "Name of the TargetHttpProxy to set a URL map for.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpProxy" + ], + "request": { + "$ref": "UrlMapReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "targetHttpsProxies": { + "methods": { + "delete": { + "id": "compute.targetHttpsProxies.delete", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "DELETE", + "description": "Deletes the specified TargetHttpsProxy resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpsProxy": { + "type": "string", + "description": "Name of the TargetHttpsProxy resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.targetHttpsProxies.get", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "GET", + "description": "Returns the specified TargetHttpsProxy resource. Get a list of available target HTTPS proxies by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpsProxy": { + "type": "string", + "description": "Name of the TargetHttpsProxy resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "response": { + "$ref": "TargetHttpsProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.targetHttpsProxies.insert", + "path": "{project}/global/targetHttpsProxies", + "httpMethod": "POST", + "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "TargetHttpsProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.targetHttpsProxies.list", + "path": "{project}/global/targetHttpsProxies", + "httpMethod": "GET", + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TargetHttpsProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setSslCertificates": { + "id": "compute.targetHttpsProxies.setSslCertificates", + "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "httpMethod": "POST", + "description": "Replaces SslCertificates for TargetHttpsProxy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpsProxy": { + "type": "string", + "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "request": { + "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setUrlMap": { + "id": "compute.targetHttpsProxies.setUrlMap", + "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + "httpMethod": "POST", + "description": "Changes the URL map for TargetHttpsProxy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpsProxy": { + "type": "string", + "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "request": { + "$ref": "UrlMapReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "targetInstances": { + "methods": { + "aggregatedList": { + "id": "compute.targetInstances.aggregatedList", + "path": "{project}/aggregated/targetInstances", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of target instances.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TargetInstanceAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.targetInstances.delete", + "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + "httpMethod": "DELETE", + "description": "Deletes the specified TargetInstance resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetInstance": { + "type": "string", + "description": "Name of the TargetInstance resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "targetInstance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.targetInstances.get", + "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + "httpMethod": "GET", + "description": "Returns the specified TargetInstance resource. Get a list of available target instances by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetInstance": { + "type": "string", + "description": "Name of the TargetInstance resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "targetInstance" + ], + "response": { + "$ref": "TargetInstance" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.targetInstances.insert", + "path": "{project}/zones/{zone}/targetInstances", + "httpMethod": "POST", + "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "TargetInstance" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.targetInstances.list", + "path": "{project}/zones/{zone}/targetInstances", + "httpMethod": "GET", + "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "TargetInstanceList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "targetPools": { + "methods": { + "addHealthCheck": { + "id": "compute.targetPools.addHealthCheck", + "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", + "httpMethod": "POST", + "description": "Adds health check URLs to a target pool.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the target pool to add a health check to.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetPoolsAddHealthCheckRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "addInstance": { + "id": "compute.targetPools.addInstance", + "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance", + "httpMethod": "POST", + "description": "Adds an instance to a target pool.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to add instances to.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetPoolsAddInstanceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "aggregatedList": { + "id": "compute.targetPools.aggregatedList", + "path": "{project}/aggregated/targetPools", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of target pools.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TargetPoolAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.targetPools.delete", + "path": "{project}/regions/{region}/targetPools/{targetPool}", + "httpMethod": "DELETE", + "description": "Deletes the specified target pool.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.targetPools.get", + "path": "{project}/regions/{region}/targetPools/{targetPool}", + "httpMethod": "GET", + "description": "Returns the specified target pool. Get a list of available target pools by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "response": { + "$ref": "TargetPool" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getHealth": { + "id": "compute.targetPools.getHealth", + "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth", + "httpMethod": "POST", + "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to which the queried instance belongs.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "InstanceReference" + }, + "response": { + "$ref": "TargetPoolInstanceHealth" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.targetPools.insert", + "path": "{project}/regions/{region}/targetPools", + "httpMethod": "POST", + "description": "Creates a target pool in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "TargetPool" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.targetPools.list", + "path": "{project}/regions/{region}/targetPools", + "httpMethod": "GET", + "description": "Retrieves a list of target pools available to the specified project and region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "TargetPoolList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "removeHealthCheck": { + "id": "compute.targetPools.removeHealthCheck", + "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + "httpMethod": "POST", + "description": "Removes health check URL from a target pool.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the target pool to remove health checks from.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetPoolsRemoveHealthCheckRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "removeInstance": { + "id": "compute.targetPools.removeInstance", + "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + "httpMethod": "POST", + "description": "Removes instance URL from a target pool.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to remove instances from.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetPoolsRemoveInstanceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setBackup": { + "id": "compute.targetPools.setBackup", + "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup", + "httpMethod": "POST", + "description": "Changes a backup target pool's configurations.", + "parameters": { + "failoverRatio": { + "type": "number", + "description": "New failoverRatio value for the target pool.", + "format": "float", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to set a backup pool for.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "targetVpnGateways": { + "methods": { + "aggregatedList": { + "id": "compute.targetVpnGateways.aggregatedList", + "path": "{project}/aggregated/targetVpnGateways", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of target VPN gateways.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TargetVpnGatewayAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.targetVpnGateways.delete", + "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "httpMethod": "DELETE", + "description": "Deletes the specified target VPN gateway.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetVpnGateway": { + "type": "string", + "description": "Name of the target VPN gateway to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetVpnGateway" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.targetVpnGateways.get", + "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + "httpMethod": "GET", + "description": "Returns the specified target VPN gateway. Get a list of available target VPN gateways by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetVpnGateway": { + "type": "string", + "description": "Name of the target VPN gateway to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetVpnGateway" + ], + "response": { + "$ref": "TargetVpnGateway" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.targetVpnGateways.insert", + "path": "{project}/regions/{region}/targetVpnGateways", + "httpMethod": "POST", + "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "TargetVpnGateway" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.targetVpnGateways.list", + "path": "{project}/regions/{region}/targetVpnGateways", + "httpMethod": "GET", + "description": "Retrieves a list of target VPN gateways available to the specified project and region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "TargetVpnGatewayList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "urlMaps": { + "methods": { + "delete": { + "id": "compute.urlMaps.delete", + "path": "{project}/global/urlMaps/{urlMap}", + "httpMethod": "DELETE", + "description": "Deletes the specified UrlMap resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "urlMap": { + "type": "string", + "description": "Name of the UrlMap resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "urlMap" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.urlMaps.get", + "path": "{project}/global/urlMaps/{urlMap}", + "httpMethod": "GET", + "description": "Returns the specified UrlMap resource. Get a list of available URL maps by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "urlMap": { + "type": "string", + "description": "Name of the UrlMap resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "urlMap" + ], + "response": { + "$ref": "UrlMap" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.urlMaps.insert", + "path": "{project}/global/urlMaps", + "httpMethod": "POST", + "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.urlMaps.list", + "path": "{project}/global/urlMaps", + "httpMethod": "GET", + "description": "Retrieves the list of UrlMap resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "UrlMapList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.urlMaps.patch", + "path": "{project}/global/urlMaps/{urlMap}", + "httpMethod": "PATCH", + "description": "Updates the entire content of the UrlMap resource. This method supports patch semantics.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "urlMap": { + "type": "string", + "description": "Name of the UrlMap resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "urlMap" + ], + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.urlMaps.update", + "path": "{project}/global/urlMaps/{urlMap}", + "httpMethod": "PUT", + "description": "Updates the entire content of the UrlMap resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "urlMap": { + "type": "string", + "description": "Name of the UrlMap resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "urlMap" + ], + "request": { + "$ref": "UrlMap" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "validate": { + "id": "compute.urlMaps.validate", + "path": "{project}/global/urlMaps/{urlMap}/validate", + "httpMethod": "POST", + "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "urlMap": { + "type": "string", + "description": "Name of the UrlMap resource to be validated as.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "urlMap" + ], + "request": { + "$ref": "UrlMapsValidateRequest" + }, + "response": { + "$ref": "UrlMapsValidateResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "vpnTunnels": { + "methods": { + "aggregatedList": { + "id": "compute.vpnTunnels.aggregatedList", + "path": "{project}/aggregated/vpnTunnels", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of VPN tunnels.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "VpnTunnelAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.vpnTunnels.delete", + "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "httpMethod": "DELETE", + "description": "Deletes the specified VpnTunnel resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "vpnTunnel": { + "type": "string", + "description": "Name of the VpnTunnel resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "vpnTunnel" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.vpnTunnels.get", + "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + "httpMethod": "GET", + "description": "Returns the specified VpnTunnel resource. Get a list of available VPN tunnels by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "vpnTunnel": { + "type": "string", + "description": "Name of the VpnTunnel resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "vpnTunnel" + ], + "response": { + "$ref": "VpnTunnel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.vpnTunnels.insert", + "path": "{project}/regions/{region}/vpnTunnels", + "httpMethod": "POST", + "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "VpnTunnel" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.vpnTunnels.list", + "path": "{project}/regions/{region}/vpnTunnels", + "httpMethod": "GET", + "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "VpnTunnelList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "zoneOperations": { + "methods": { + "delete": { + "id": "compute.zoneOperations.delete", + "path": "{project}/zones/{zone}/operations/{operation}", + "httpMethod": "DELETE", + "description": "Deletes the specified zone-specific Operations resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the Operations resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "operation" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.zoneOperations.get", + "path": "{project}/zones/{zone}/operations/{operation}", + "httpMethod": "GET", + "description": "Retrieves the specified zone-specific Operations resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the Operations resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "operation" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.zoneOperations.list", + "path": "{project}/zones/{zone}/operations", + "httpMethod": "GET", + "description": "Retrieves a list of Operation resources contained within the specified zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone for request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "zones": { + "methods": { + "get": { + "id": "compute.zones.get", + "path": "{project}/zones/{zone}", + "httpMethod": "GET", + "description": "Returns the specified Zone resource. Get a list of available zones by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "Zone" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.zones.list", + "path": "{project}/zones", + "httpMethod": "GET", + "description": "Retrieves the list of Zone resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ZoneList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/api/compute/v1/compute-gen.go b/Godeps/_workspace/src/google.golang.org/api/compute/v1/compute-gen.go new file mode 100644 index 000000000000..cf7442c2fdbd --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/compute/v1/compute-gen.go @@ -0,0 +1,40433 @@ +// Package compute provides access to the Compute Engine API. +// +// See https://developers.google.com/compute/docs/reference/latest/ +// +// Usage example: +// +// import "google.golang.org/api/compute/v1" +// ... +// computeService, err := compute.New(oauthHttpClient) +package compute + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "compute:v1" +const apiName = "compute" +const apiVersion = "v1" +const basePath = "https://www.googleapis.com/compute/v1/projects/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View and manage your Google Compute Engine resources + ComputeScope = "https://www.googleapis.com/auth/compute" + + // View your Google Compute Engine resources + ComputeReadonlyScope = "https://www.googleapis.com/auth/compute.readonly" + + // Manage your data and permissions in Google Cloud Storage + DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" + + // View your data in Google Cloud Storage + DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" + + // Manage your data in Google Cloud Storage + DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Addresses = NewAddressesService(s) + s.Autoscalers = NewAutoscalersService(s) + s.BackendServices = NewBackendServicesService(s) + s.DiskTypes = NewDiskTypesService(s) + s.Disks = NewDisksService(s) + s.Firewalls = NewFirewallsService(s) + s.ForwardingRules = NewForwardingRulesService(s) + s.GlobalAddresses = NewGlobalAddressesService(s) + s.GlobalForwardingRules = NewGlobalForwardingRulesService(s) + s.GlobalOperations = NewGlobalOperationsService(s) + s.HttpHealthChecks = NewHttpHealthChecksService(s) + s.HttpsHealthChecks = NewHttpsHealthChecksService(s) + s.Images = NewImagesService(s) + s.InstanceGroupManagers = NewInstanceGroupManagersService(s) + s.InstanceGroups = NewInstanceGroupsService(s) + s.InstanceTemplates = NewInstanceTemplatesService(s) + s.Instances = NewInstancesService(s) + s.Licenses = NewLicensesService(s) + s.MachineTypes = NewMachineTypesService(s) + s.Networks = NewNetworksService(s) + s.Projects = NewProjectsService(s) + s.RegionOperations = NewRegionOperationsService(s) + s.Regions = NewRegionsService(s) + s.Routes = NewRoutesService(s) + s.Snapshots = NewSnapshotsService(s) + s.SslCertificates = NewSslCertificatesService(s) + s.Subnetworks = NewSubnetworksService(s) + s.TargetHttpProxies = NewTargetHttpProxiesService(s) + s.TargetHttpsProxies = NewTargetHttpsProxiesService(s) + s.TargetInstances = NewTargetInstancesService(s) + s.TargetPools = NewTargetPoolsService(s) + s.TargetVpnGateways = NewTargetVpnGatewaysService(s) + s.UrlMaps = NewUrlMapsService(s) + s.VpnTunnels = NewVpnTunnelsService(s) + s.ZoneOperations = NewZoneOperationsService(s) + s.Zones = NewZonesService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Addresses *AddressesService + + Autoscalers *AutoscalersService + + BackendServices *BackendServicesService + + DiskTypes *DiskTypesService + + Disks *DisksService + + Firewalls *FirewallsService + + ForwardingRules *ForwardingRulesService + + GlobalAddresses *GlobalAddressesService + + GlobalForwardingRules *GlobalForwardingRulesService + + GlobalOperations *GlobalOperationsService + + HttpHealthChecks *HttpHealthChecksService + + HttpsHealthChecks *HttpsHealthChecksService + + Images *ImagesService + + InstanceGroupManagers *InstanceGroupManagersService + + InstanceGroups *InstanceGroupsService + + InstanceTemplates *InstanceTemplatesService + + Instances *InstancesService + + Licenses *LicensesService + + MachineTypes *MachineTypesService + + Networks *NetworksService + + Projects *ProjectsService + + RegionOperations *RegionOperationsService + + Regions *RegionsService + + Routes *RoutesService + + Snapshots *SnapshotsService + + SslCertificates *SslCertificatesService + + Subnetworks *SubnetworksService + + TargetHttpProxies *TargetHttpProxiesService + + TargetHttpsProxies *TargetHttpsProxiesService + + TargetInstances *TargetInstancesService + + TargetPools *TargetPoolsService + + TargetVpnGateways *TargetVpnGatewaysService + + UrlMaps *UrlMapsService + + VpnTunnels *VpnTunnelsService + + ZoneOperations *ZoneOperationsService + + Zones *ZonesService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewAddressesService(s *Service) *AddressesService { + rs := &AddressesService{s: s} + return rs +} + +type AddressesService struct { + s *Service +} + +func NewAutoscalersService(s *Service) *AutoscalersService { + rs := &AutoscalersService{s: s} + return rs +} + +type AutoscalersService struct { + s *Service +} + +func NewBackendServicesService(s *Service) *BackendServicesService { + rs := &BackendServicesService{s: s} + return rs +} + +type BackendServicesService struct { + s *Service +} + +func NewDiskTypesService(s *Service) *DiskTypesService { + rs := &DiskTypesService{s: s} + return rs +} + +type DiskTypesService struct { + s *Service +} + +func NewDisksService(s *Service) *DisksService { + rs := &DisksService{s: s} + return rs +} + +type DisksService struct { + s *Service +} + +func NewFirewallsService(s *Service) *FirewallsService { + rs := &FirewallsService{s: s} + return rs +} + +type FirewallsService struct { + s *Service +} + +func NewForwardingRulesService(s *Service) *ForwardingRulesService { + rs := &ForwardingRulesService{s: s} + return rs +} + +type ForwardingRulesService struct { + s *Service +} + +func NewGlobalAddressesService(s *Service) *GlobalAddressesService { + rs := &GlobalAddressesService{s: s} + return rs +} + +type GlobalAddressesService struct { + s *Service +} + +func NewGlobalForwardingRulesService(s *Service) *GlobalForwardingRulesService { + rs := &GlobalForwardingRulesService{s: s} + return rs +} + +type GlobalForwardingRulesService struct { + s *Service +} + +func NewGlobalOperationsService(s *Service) *GlobalOperationsService { + rs := &GlobalOperationsService{s: s} + return rs +} + +type GlobalOperationsService struct { + s *Service +} + +func NewHttpHealthChecksService(s *Service) *HttpHealthChecksService { + rs := &HttpHealthChecksService{s: s} + return rs +} + +type HttpHealthChecksService struct { + s *Service +} + +func NewHttpsHealthChecksService(s *Service) *HttpsHealthChecksService { + rs := &HttpsHealthChecksService{s: s} + return rs +} + +type HttpsHealthChecksService struct { + s *Service +} + +func NewImagesService(s *Service) *ImagesService { + rs := &ImagesService{s: s} + return rs +} + +type ImagesService struct { + s *Service +} + +func NewInstanceGroupManagersService(s *Service) *InstanceGroupManagersService { + rs := &InstanceGroupManagersService{s: s} + return rs +} + +type InstanceGroupManagersService struct { + s *Service +} + +func NewInstanceGroupsService(s *Service) *InstanceGroupsService { + rs := &InstanceGroupsService{s: s} + return rs +} + +type InstanceGroupsService struct { + s *Service +} + +func NewInstanceTemplatesService(s *Service) *InstanceTemplatesService { + rs := &InstanceTemplatesService{s: s} + return rs +} + +type InstanceTemplatesService struct { + s *Service +} + +func NewInstancesService(s *Service) *InstancesService { + rs := &InstancesService{s: s} + return rs +} + +type InstancesService struct { + s *Service +} + +func NewLicensesService(s *Service) *LicensesService { + rs := &LicensesService{s: s} + return rs +} + +type LicensesService struct { + s *Service +} + +func NewMachineTypesService(s *Service) *MachineTypesService { + rs := &MachineTypesService{s: s} + return rs +} + +type MachineTypesService struct { + s *Service +} + +func NewNetworksService(s *Service) *NetworksService { + rs := &NetworksService{s: s} + return rs +} + +type NetworksService struct { + s *Service +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + return rs +} + +type ProjectsService struct { + s *Service +} + +func NewRegionOperationsService(s *Service) *RegionOperationsService { + rs := &RegionOperationsService{s: s} + return rs +} + +type RegionOperationsService struct { + s *Service +} + +func NewRegionsService(s *Service) *RegionsService { + rs := &RegionsService{s: s} + return rs +} + +type RegionsService struct { + s *Service +} + +func NewRoutesService(s *Service) *RoutesService { + rs := &RoutesService{s: s} + return rs +} + +type RoutesService struct { + s *Service +} + +func NewSnapshotsService(s *Service) *SnapshotsService { + rs := &SnapshotsService{s: s} + return rs +} + +type SnapshotsService struct { + s *Service +} + +func NewSslCertificatesService(s *Service) *SslCertificatesService { + rs := &SslCertificatesService{s: s} + return rs +} + +type SslCertificatesService struct { + s *Service +} + +func NewSubnetworksService(s *Service) *SubnetworksService { + rs := &SubnetworksService{s: s} + return rs +} + +type SubnetworksService struct { + s *Service +} + +func NewTargetHttpProxiesService(s *Service) *TargetHttpProxiesService { + rs := &TargetHttpProxiesService{s: s} + return rs +} + +type TargetHttpProxiesService struct { + s *Service +} + +func NewTargetHttpsProxiesService(s *Service) *TargetHttpsProxiesService { + rs := &TargetHttpsProxiesService{s: s} + return rs +} + +type TargetHttpsProxiesService struct { + s *Service +} + +func NewTargetInstancesService(s *Service) *TargetInstancesService { + rs := &TargetInstancesService{s: s} + return rs +} + +type TargetInstancesService struct { + s *Service +} + +func NewTargetPoolsService(s *Service) *TargetPoolsService { + rs := &TargetPoolsService{s: s} + return rs +} + +type TargetPoolsService struct { + s *Service +} + +func NewTargetVpnGatewaysService(s *Service) *TargetVpnGatewaysService { + rs := &TargetVpnGatewaysService{s: s} + return rs +} + +type TargetVpnGatewaysService struct { + s *Service +} + +func NewUrlMapsService(s *Service) *UrlMapsService { + rs := &UrlMapsService{s: s} + return rs +} + +type UrlMapsService struct { + s *Service +} + +func NewVpnTunnelsService(s *Service) *VpnTunnelsService { + rs := &VpnTunnelsService{s: s} + return rs +} + +type VpnTunnelsService struct { + s *Service +} + +func NewZoneOperationsService(s *Service) *ZoneOperationsService { + rs := &ZoneOperationsService{s: s} + return rs +} + +type ZoneOperationsService struct { + s *Service +} + +func NewZonesService(s *Service) *ZonesService { + rs := &ZonesService{s: s} + return rs +} + +type ZonesService struct { + s *Service +} + +// AccessConfig: An access configuration attached to an instance's +// network interface. +type AccessConfig struct { + // Kind: [Output Only] Type of the resource. Always compute#accessConfig + // for access configs. + Kind string `json:"kind,omitempty"` + + // Name: Name of this access configuration. + Name string `json:"name,omitempty"` + + // NatIP: An external IP address associated with this instance. Specify + // an unused static external IP address available to the project or + // leave this field undefined to use an IP from a shared ephemeral IP + // address pool. If you specify a static external IP address, it must + // live in the same region as the zone of the instance. + NatIP string `json:"natIP,omitempty"` + + // Type: The type of configuration. The default and only option is + // ONE_TO_ONE_NAT. + // + // Possible values: + // "ONE_TO_ONE_NAT" (default) + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AccessConfig) MarshalJSON() ([]byte, error) { + type noMethod AccessConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Address: A reserved address resource. +type Address struct { + // Address: The static external IP address represented by this resource. + Address string `json:"address,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#address for + // addresses. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Region: [Output Only] URL of the region where the regional address + // resides. This field is not applicable to global addresses. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] The status of the address, which can be either + // IN_USE or RESERVED. An address that is RESERVED is currently reserved + // and available to use. An IN_USE address is currently being used by + // another resource and is not available. + // + // Possible values: + // "IN_USE" + // "RESERVED" + Status string `json:"status,omitempty"` + + // Users: [Output Only] The URLs of the resources that are using this + // address. + Users []string `json:"users,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Address) MarshalJSON() ([]byte, error) { + type noMethod Address + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type AddressAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped address lists. + Items map[string]AddressesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#addressAggregatedList for aggregated lists of addresses. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AddressAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod AddressAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AddressList: Contains a list of addresses. +type AddressList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of addresses. + Items []*Address `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#addressList for + // lists of addresses. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AddressList) MarshalJSON() ([]byte, error) { + type noMethod AddressList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type AddressesScopedList struct { + // Addresses: [Output Only] List of addresses contained in this scope. + Addresses []*Address `json:"addresses,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *AddressesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Addresses") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AddressesScopedList) MarshalJSON() ([]byte, error) { + type noMethod AddressesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AddressesScopedListWarning: [Output Only] Informational warning which +// replaces the list of addresses when the list is empty. +type AddressesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AddressesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AddressesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AddressesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type AddressesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AddressesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AddressesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AttachedDisk: An instance-attached disk resource. +type AttachedDisk struct { + // AutoDelete: Specifies whether the disk will be auto-deleted when the + // instance is deleted (but not when the disk is detached from the + // instance). + AutoDelete bool `json:"autoDelete,omitempty"` + + // Boot: Indicates that this is a boot disk. The virtual machine will + // use the first partition of the disk for its root filesystem. + Boot bool `json:"boot,omitempty"` + + // DeviceName: Specifies a unique device name of your choice that is + // reflected into the /dev/disk/by-id/google-* tree of a Linux operating + // system running within the instance. This name can be used to + // reference the device for mounting, resizing, and so on, from within + // the instance. + // + // If not specified, the server chooses a default device name to apply + // to this disk, in the form persistent-disks-x, where x is a number + // assigned by Google Compute Engine. This field is only applicable for + // persistent disks. + DeviceName string `json:"deviceName,omitempty"` + + // Index: Assigns a zero-based index to this disk, where 0 is reserved + // for the boot disk. For example, if you have many disks attached to an + // instance, each disk would have a unique index number. If not + // specified, the server will choose an appropriate value. + Index int64 `json:"index,omitempty"` + + // InitializeParams: [Input Only] Specifies the parameters for a new + // disk that will be created alongside the new instance. Use + // initialization parameters to create boot disks or local SSDs attached + // to the new instance. + // + // This property is mutually exclusive with the source property; you can + // only define one or the other, but not both. + InitializeParams *AttachedDiskInitializeParams `json:"initializeParams,omitempty"` + + // Interface: Specifies the disk interface to use for attaching this + // disk, which is either SCSI or NVME. The default is SCSI. Persistent + // disks must always use SCSI and the request will fail if you attempt + // to attach a persistent disk in any other format than SCSI. Local SSDs + // can use either NVME or SCSI. For performance characteristics of SCSI + // over NVMe, see Local SSD performance. + // + // Possible values: + // "NVME" + // "SCSI" + Interface string `json:"interface,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#attachedDisk + // for attached disks. + Kind string `json:"kind,omitempty"` + + // Licenses: [Output Only] Any valid publicly visible licenses. + Licenses []string `json:"licenses,omitempty"` + + // Mode: The mode in which to attach this disk, either READ_WRITE or + // READ_ONLY. If not specified, the default is to attach the disk in + // READ_WRITE mode. + // + // Possible values: + // "READ_ONLY" + // "READ_WRITE" + Mode string `json:"mode,omitempty"` + + // Source: Specifies a valid partial or full URL to an existing + // Persistent Disk resource. This field is only applicable for + // persistent disks. + Source string `json:"source,omitempty"` + + // Type: Specifies the type of the disk, either SCRATCH or PERSISTENT. + // If not specified, the default is PERSISTENT. + // + // Possible values: + // "PERSISTENT" + // "SCRATCH" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoDelete") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AttachedDisk) MarshalJSON() ([]byte, error) { + type noMethod AttachedDisk + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AttachedDiskInitializeParams: [Input Only] Specifies the parameters +// for a new disk that will be created alongside the new instance. Use +// initialization parameters to create boot disks or local SSDs attached +// to the new instance. +// +// This property is mutually exclusive with the source property; you can +// only define one or the other, but not both. +type AttachedDiskInitializeParams struct { + // DiskName: Specifies the disk name. If not specified, the default is + // to use the name of the instance. + DiskName string `json:"diskName,omitempty"` + + // DiskSizeGb: Specifies the size of the disk in base-2 GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + + // DiskType: Specifies the disk type to use to create the instance. If + // not specified, the default is pd-standard, specified using the full + // URL. For + // example: + // + // https://www.googleapis.com/compute/v1/projects/project/zones + // /zone/diskTypes/pd-standard + // + // Other values include pd-ssd and local-ssd. If you define this field, + // you can provide either the full or partial URL. For example, the + // following are valid values: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType + // - projects/project/zones/zone/diskTypes/diskType + // - zones/zone/diskTypes/diskType + DiskType string `json:"diskType,omitempty"` + + // SourceImage: The source image used to create this disk. If the source + // image is deleted, this field will not be set. + // + // To create a disk with one of the public operating system images, + // specify the image by its family name. For example, specify + // family/debian-8 to use the latest Debian 8 + // image: + // + // projects/debian-cloud/global/images/family/debian-8 + // + // Alternatively, use a specific version of a public operating system + // image: + // + // projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD + // + // To create a disk with a private image that you created, specify the + // image name in the following format: + // + // global/images/my-private-image + // + // You can also specify a private image by its image family, which + // returns the latest version of the image in that family. Replace the + // image name with + // family/family-name: + // + // global/images/family/my-private-family + SourceImage string `json:"sourceImage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { + type noMethod AttachedDiskInitializeParams + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Autoscaler: Represents an Autoscaler resource. Autoscalers allow you +// to automatically scale virtual machine instances in managed instance +// groups according to an autoscaling policy that you define. For more +// information, read Autoscaling Groups of Instances. +type Autoscaler struct { + // AutoscalingPolicy: The configuration parameters for the autoscaling + // algorithm. You can define one or more of the policies for an + // autoscaler: cpuUtilization, customMetricUtilizations, and + // loadBalancingUtilization. + // + // If none of these are specified, the default will be to autoscale + // based on cpuUtilization to 0.8 or 80%. + AutoscalingPolicy *AutoscalingPolicy `json:"autoscalingPolicy,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#autoscaler + // for autoscalers. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Target: URL of the managed instance group that this autoscaler will + // scale. + Target string `json:"target,omitempty"` + + // Zone: [Output Only] URL of the zone where the instance group resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AutoscalingPolicy") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Autoscaler) MarshalJSON() ([]byte, error) { + type noMethod Autoscaler + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type AutoscalerAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A map of scoped autoscaler lists. + Items map[string]AutoscalersScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#autoscalerAggregatedList for aggregated lists of autoscalers. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalerAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AutoscalerList: Contains a list of Autoscaler resources. +type AutoscalerList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of Autoscaler resources. + Items []*Autoscaler `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#autoscalerList + // for lists of autoscalers. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalerList) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type AutoscalersScopedList struct { + // Autoscalers: [Output Only] List of autoscalers contained in this + // scope. + Autoscalers []*Autoscaler `json:"autoscalers,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of autoscalers when the list is empty. + Warning *AutoscalersScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Autoscalers") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalersScopedList) MarshalJSON() ([]byte, error) { + type noMethod AutoscalersScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AutoscalersScopedListWarning: [Output Only] Informational warning +// which replaces the list of autoscalers when the list is empty. +type AutoscalersScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AutoscalersScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalersScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AutoscalersScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type AutoscalersScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AutoscalersScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AutoscalingPolicy: Cloud Autoscaler policy. +type AutoscalingPolicy struct { + // CoolDownPeriodSec: The number of seconds that the autoscaler should + // wait before it starts collecting information from a new instance. + // This prevents the autoscaler from collecting information when the + // instance is initializing, during which the collected usage would not + // be reliable. The default time autoscaler waits is 60 + // seconds. + // + // Virtual machine initialization times might vary because of numerous + // factors. We recommend that you test how long an instance may take to + // initialize. To do this, create an instance and time the startup + // process. + CoolDownPeriodSec int64 `json:"coolDownPeriodSec,omitempty"` + + // CpuUtilization: Defines the CPU utilization policy that allows the + // autoscaler to scale based on the average CPU utilization of a managed + // instance group. + CpuUtilization *AutoscalingPolicyCpuUtilization `json:"cpuUtilization,omitempty"` + + // CustomMetricUtilizations: Configuration parameters of autoscaling + // based on a custom metric. + CustomMetricUtilizations []*AutoscalingPolicyCustomMetricUtilization `json:"customMetricUtilizations,omitempty"` + + // LoadBalancingUtilization: Configuration parameters of autoscaling + // based on load balancer. + LoadBalancingUtilization *AutoscalingPolicyLoadBalancingUtilization `json:"loadBalancingUtilization,omitempty"` + + // MaxNumReplicas: The maximum number of instances that the autoscaler + // can scale up to. This is required when creating or updating an + // autoscaler. The maximum number of replicas should not be lower than + // minimal number of replicas. + MaxNumReplicas int64 `json:"maxNumReplicas,omitempty"` + + // MinNumReplicas: The minimum number of replicas that the autoscaler + // can scale down to. This cannot be less than 0. If not provided, + // autoscaler will choose a default value depending on maximum number of + // instances allowed. + MinNumReplicas int64 `json:"minNumReplicas,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CoolDownPeriodSec") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalingPolicy) MarshalJSON() ([]byte, error) { + type noMethod AutoscalingPolicy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AutoscalingPolicyCpuUtilization: CPU utilization policy. +type AutoscalingPolicyCpuUtilization struct { + // UtilizationTarget: The target CPU utilization that the autoscaler + // should maintain. Must be a float value in the range (0, 1]. If not + // specified, the default is 0.8. + // + // If the CPU level is below the target utilization, the autoscaler + // scales down the number of instances until it reaches the minimum + // number of instances you specified or until the average CPU of your + // instances reaches the target utilization. + // + // If the average CPU is above the target utilization, the autoscaler + // scales up until it reaches the maximum number of instances you + // specified or until the average utilization reaches the target + // utilization. + UtilizationTarget float64 `json:"utilizationTarget,omitempty"` + + // ForceSendFields is a list of field names (e.g. "UtilizationTarget") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalingPolicyCpuUtilization) MarshalJSON() ([]byte, error) { + type noMethod AutoscalingPolicyCpuUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AutoscalingPolicyCustomMetricUtilization: Custom utilization metric +// policy. +type AutoscalingPolicyCustomMetricUtilization struct { + // Metric: The identifier of the Cloud Monitoring metric. The metric + // cannot have negative values and should be a utilization metric, which + // means that the number of virtual machines handling requests should + // increase or decrease proportionally to the metric. The metric must + // also have a label of compute.googleapis.com/resource_id with the + // value of the instance's unique ID, although this alone does not + // guarantee that the metric is valid. + // + // For example, the following is a valid + // metric: + // compute.googleapis.com/instance/network/received_bytes_count + // + // + // + // The following is not a valid metric because it does not increase or + // decrease based on + // usage: + // compute.googleapis.com/instance/cpu/reserved_cores + Metric string `json:"metric,omitempty"` + + // UtilizationTarget: Target value of the metric which autoscaler should + // maintain. Must be a positive value. + UtilizationTarget float64 `json:"utilizationTarget,omitempty"` + + // UtilizationTargetType: Defines how target utilization value is + // expressed for a Cloud Monitoring metric. Either GAUGE, + // DELTA_PER_SECOND, or DELTA_PER_MINUTE. If not specified, the default + // is GAUGE. + // + // Possible values: + // "DELTA_PER_MINUTE" + // "DELTA_PER_SECOND" + // "GAUGE" + UtilizationTargetType string `json:"utilizationTargetType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Metric") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalingPolicyCustomMetricUtilization) MarshalJSON() ([]byte, error) { + type noMethod AutoscalingPolicyCustomMetricUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// AutoscalingPolicyLoadBalancingUtilization: Configuration parameters +// of autoscaling based on load balancing. +type AutoscalingPolicyLoadBalancingUtilization struct { + // UtilizationTarget: Fraction of backend capacity utilization (set in + // HTTP(s) load balancing configuration) that autoscaler should + // maintain. Must be a positive float value. If not defined, the default + // is 0.8. + UtilizationTarget float64 `json:"utilizationTarget,omitempty"` + + // ForceSendFields is a list of field names (e.g. "UtilizationTarget") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AutoscalingPolicyLoadBalancingUtilization) MarshalJSON() ([]byte, error) { + type noMethod AutoscalingPolicyLoadBalancingUtilization + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Backend: Message containing information of one individual backend. +type Backend struct { + // BalancingMode: Specifies the balancing mode for this backend. For + // global HTTP(S) load balancing, the default is UTILIZATION. Valid + // values are UTILIZATION and RATE. + // + // Possible values: + // "RATE" + // "UTILIZATION" + BalancingMode string `json:"balancingMode,omitempty"` + + // CapacityScaler: A multiplier applied to the group's maximum servicing + // capacity (either UTILIZATION or RATE). Default value is 1, which + // means the group will serve up to 100% of its configured CPU or RPS + // (depending on balancingMode). A setting of 0 means the group is + // completely drained, offering 0% of its available CPU or RPS. Valid + // range is [0.0,1.0]. + CapacityScaler float64 `json:"capacityScaler,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Group: The fully-qualified URL of a zonal Instance Group resource. + // This instance group defines the list of instances that serve traffic. + // Member virtual machine instances from each instance group must live + // in the same zone as the instance group itself. No two backends in a + // backend service are allowed to use same Instance Group + // resource. + // + // Note that you must specify an Instance Group resource using the + // fully-qualified URL, rather than a partial URL. + Group string `json:"group,omitempty"` + + // MaxRate: The max requests per second (RPS) of the group. Can be used + // with either RATE or UTILIZATION balancing modes, but required if RATE + // mode. For RATE mode, either maxRate or maxRatePerInstance must be + // set. + MaxRate int64 `json:"maxRate,omitempty"` + + // MaxRatePerInstance: The max requests per second (RPS) that a single + // backend instance can handle.This is used to calculate the capacity of + // the group. Can be used in either balancing mode. For RATE mode, + // either maxRate or maxRatePerInstance must be set. + MaxRatePerInstance float64 `json:"maxRatePerInstance,omitempty"` + + // MaxUtilization: Used when balancingMode is UTILIZATION. This ratio + // defines the CPU utilization target for the group. The default is 0.8. + // Valid range is [0.0, 1.0]. + MaxUtilization float64 `json:"maxUtilization,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BalancingMode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Backend) MarshalJSON() ([]byte, error) { + type noMethod Backend + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BackendService: A BackendService resource. This resource defines a +// group of backend virtual machines and their serving capacity. +type BackendService struct { + // Backends: The list of backends that serve this BackendService. + Backends []*Backend `json:"backends,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a BackendService. An up-to-date + // fingerprint must be provided in order to update the BackendService. + Fingerprint string `json:"fingerprint,omitempty"` + + // HealthChecks: The list of URLs to the HttpHealthCheck or + // HttpsHealthCheck resource for health checking this BackendService. + // Currently at most one health check can be specified, and a health + // check is required. + HealthChecks []string `json:"healthChecks,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#backendService + // for backend services. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Port: Deprecated in favor of portName. The TCP port to connect on the + // backend. The default value is 80. + Port int64 `json:"port,omitempty"` + + // PortName: Name of backend port. The same name should appear in the + // instance groups referenced by this service. Required. + PortName string `json:"portName,omitempty"` + + // Protocol: The protocol this BackendService uses to communicate with + // backends. + // + // Possible values are HTTP, HTTPS, HTTP2, TCP and SSL. + // + // Possible values: + // "HTTP" + // "HTTPS" + Protocol string `json:"protocol,omitempty"` + + // Region: [Output Only] URL of the region where the regional backend + // service resides. This field is not applicable to global backend + // services. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // TimeoutSec: How many seconds to wait for the backend before + // considering it a failed request. Default is 30 seconds. + TimeoutSec int64 `json:"timeoutSec,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Backends") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BackendService) MarshalJSON() ([]byte, error) { + type noMethod BackendService + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type BackendServiceGroupHealth struct { + HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#backendServiceGroupHealth for the health of backend services. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceGroupHealth + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// BackendServiceList: Contains a list of BackendService resources. +type BackendServiceList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of BackendService resources. + Items []*BackendService `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#backendServiceList for lists of backend services. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BackendServiceList) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DeprecationStatus: Deprecation status for a public resource. +type DeprecationStatus struct { + // Deleted: An optional RFC3339 timestamp on or after which the + // deprecation state of this resource will be changed to DELETED. + Deleted string `json:"deleted,omitempty"` + + // Deprecated: An optional RFC3339 timestamp on or after which the + // deprecation state of this resource will be changed to DEPRECATED. + Deprecated string `json:"deprecated,omitempty"` + + // Obsolete: An optional RFC3339 timestamp on or after which the + // deprecation state of this resource will be changed to OBSOLETE. + Obsolete string `json:"obsolete,omitempty"` + + // Replacement: The URL of the suggested replacement for a deprecated + // resource. The suggested replacement resource must be the same kind of + // resource as the deprecated resource. + Replacement string `json:"replacement,omitempty"` + + // State: The deprecation state of this resource. This can be + // DEPRECATED, OBSOLETE, or DELETED. Operations which create a new + // resource using a DEPRECATED resource will return successfully, but + // with a warning indicating the deprecated resource and recommending + // its replacement. Operations which use OBSOLETE or DELETED resources + // will be rejected and result in an error. + // + // Possible values: + // "DELETED" + // "DEPRECATED" + // "OBSOLETE" + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Deleted") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DeprecationStatus) MarshalJSON() ([]byte, error) { + type noMethod DeprecationStatus + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Disk: A Disk resource. +type Disk struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#disk for + // disks. + Kind string `json:"kind,omitempty"` + + // LastAttachTimestamp: [Output Only] Last attach timestamp in RFC3339 + // text format. + LastAttachTimestamp string `json:"lastAttachTimestamp,omitempty"` + + // LastDetachTimestamp: [Output Only] Last detach timestamp in RFC3339 + // text format. + LastDetachTimestamp string `json:"lastDetachTimestamp,omitempty"` + + // Licenses: [Output Only] Any applicable publicly visible licenses. + Licenses []string `json:"licenses,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Options: Internal use only. + Options string `json:"options,omitempty"` + + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. + SelfLink string `json:"selfLink,omitempty"` + + // SizeGb: Size of the persistent disk, specified in GB. You can specify + // this field when creating a persistent disk using the sourceImage or + // sourceSnapshot parameter, or specify it alone to create an empty + // persistent disk. + // + // If you specify this field along with sourceImage or sourceSnapshot, + // the value of sizeGb must not be less than the size of the sourceImage + // or the size of the snapshot. + SizeGb int64 `json:"sizeGb,omitempty,string"` + + // SourceImage: The source image used to create this disk. If the source + // image is deleted, this field will not be set. + // + // To create a disk with one of the public operating system images, + // specify the image by its family name. For example, specify + // family/debian-8 to use the latest Debian 8 + // image: + // + // projects/debian-cloud/global/images/family/debian-8 + // + // Alternatively, use a specific version of a public operating system + // image: + // + // projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD + // + // To create a disk with a private image that you created, specify the + // image name in the following format: + // + // global/images/my-private-image + // + // You can also specify a private image by its image family, which + // returns the latest version of the image in that family. Replace the + // image name with + // family/family-name: + // + // global/images/family/my-private-family + SourceImage string `json:"sourceImage,omitempty"` + + // SourceImageId: [Output Only] The ID value of the image used to create + // this disk. This value identifies the exact image that was used to + // create this persistent disk. For example, if you created the + // persistent disk from an image that was later deleted and recreated + // under the same name, the source image ID would identify the exact + // version of the image that was used. + SourceImageId string `json:"sourceImageId,omitempty"` + + // SourceSnapshot: The source snapshot used to create this disk. You can + // provide this as a partial or full URL to the resource. For example, + // the following are valid values: + // - + // https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot + // - projects/project/global/snapshots/snapshot + // - global/snapshots/snapshot + SourceSnapshot string `json:"sourceSnapshot,omitempty"` + + // SourceSnapshotId: [Output Only] The unique ID of the snapshot used to + // create this disk. This value identifies the exact snapshot that was + // used to create this persistent disk. For example, if you created the + // persistent disk from a snapshot that was later deleted and recreated + // under the same name, the source snapshot ID would identify the exact + // version of the snapshot that was used. + SourceSnapshotId string `json:"sourceSnapshotId,omitempty"` + + // Status: [Output Only] The status of disk creation. Applicable + // statuses includes: CREATING, FAILED, READY, RESTORING. + // + // Possible values: + // "CREATING" + // "FAILED" + // "READY" + // "RESTORING" + Status string `json:"status,omitempty"` + + // Type: URL of the disk type resource describing which disk type to use + // to create the disk. Provide this when creating the disk. + Type string `json:"type,omitempty"` + + // Users: [Output Only] Links to the users of the disk (attached + // instances) in form: project/zones/zone/instances/instance + Users []string `json:"users,omitempty"` + + // Zone: [Output Only] URL of the zone where the disk resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Disk) MarshalJSON() ([]byte, error) { + type noMethod Disk + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DiskAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped disk lists. + Items map[string]DisksScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#diskAggregatedList for aggregated lists of persistent disks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod DiskAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DiskList: A list of Disk resources. +type DiskList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of persistent disks. + Items []*Disk `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#diskList for + // lists of disks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskList) MarshalJSON() ([]byte, error) { + type noMethod DiskList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DiskMoveRequest struct { + // DestinationZone: The URL of the destination zone to move the disk. + // This can be a full or partial URL. For example, the following are all + // valid URLs to a zone: + // - https://www.googleapis.com/compute/v1/projects/project/zones/zone + // + // - projects/project/zones/zone + // - zones/zone + DestinationZone string `json:"destinationZone,omitempty"` + + // TargetDisk: The URL of the target disk to move. This can be a full or + // partial URL. For example, the following are all valid URLs to a disk: + // + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk + // - projects/project/zones/zone/disks/disk + // - zones/zone/disks/disk + TargetDisk string `json:"targetDisk,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestinationZone") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskMoveRequest) MarshalJSON() ([]byte, error) { + type noMethod DiskMoveRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DiskType: A DiskType resource. +type DiskType struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // DefaultDiskSizeGb: [Output Only] Server-defined default disk size in + // GB. + DefaultDiskSizeGb int64 `json:"defaultDiskSizeGb,omitempty,string"` + + // Deprecated: [Output Only] The deprecation status associated with this + // disk type. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] An optional description of this resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#diskType for + // disk types. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ValidDiskSize: [Output Only] An optional textual description of the + // valid disk size, such as "10GB-10TB". + ValidDiskSize string `json:"validDiskSize,omitempty"` + + // Zone: [Output Only] URL of the zone where the disk type resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskType) MarshalJSON() ([]byte, error) { + type noMethod DiskType + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DiskTypeAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped disk type lists. + Items map[string]DiskTypesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#diskTypeAggregatedList. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskTypeAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DiskTypeList: Contains a list of disk types. +type DiskTypeList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Disk Type resources. + Items []*DiskType `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#diskTypeList for + // disk types. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskTypeList) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DiskTypesScopedList struct { + // DiskTypes: [Output Only] List of disk types contained in this scope. + DiskTypes []*DiskType `json:"diskTypes,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of disk types when the list is empty. + Warning *DiskTypesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskTypesScopedList) MarshalJSON() ([]byte, error) { + type noMethod DiskTypesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DiskTypesScopedListWarning: [Output Only] Informational warning which +// replaces the list of disk types when the list is empty. +type DiskTypesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskTypesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskTypesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskTypesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DiskTypesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DiskTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskTypesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DisksResizeRequest struct { + // SizeGb: The new size of the persistent disk, which is specified in + // GB. + SizeGb int64 `json:"sizeGb,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "SizeGb") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DisksResizeRequest) MarshalJSON() ([]byte, error) { + type noMethod DisksResizeRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DisksScopedList struct { + // Disks: [Output Only] List of disks contained in this scope. + Disks []*Disk `json:"disks,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of disks when the list is empty. + Warning *DisksScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Disks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DisksScopedList) MarshalJSON() ([]byte, error) { + type noMethod DisksScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// DisksScopedListWarning: [Output Only] Informational warning which +// replaces the list of disks when the list is empty. +type DisksScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DisksScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DisksScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod DisksScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type DisksScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DisksScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Firewall: Represents a Firewall resource. +type Firewall struct { + // Allowed: The list of rules specified by this firewall. Each rule + // specifies a protocol and port-range tuple that describes a permitted + // connection. + Allowed []*FirewallAllowed `json:"allowed,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Ony] Type of the resource. Always compute#firewall for + // firewall rules. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network resource for this firewall rule. If not + // specified when creating a firewall rule, the default network is + // used: + // global/networks/default + // If you choose to specify this property, you can specify the network + // as a full or partial URL. For example, the following are all valid + // URLs: + // - + // https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network + // - projects/myproject/global/networks/my-network + // - global/networks/default + Network string `json:"network,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SourceRanges: The IP address blocks that this rule applies to, + // expressed in CIDR format. One or both of sourceRanges and sourceTags + // may be set. + // + // If both properties are set, an inbound connection is allowed if the + // range matches the sourceRanges OR the tag of the source matches the + // sourceTags property. The connection does not need to match both + // properties. + SourceRanges []string `json:"sourceRanges,omitempty"` + + // SourceTags: A list of instance tags which this rule applies to. One + // or both of sourceRanges and sourceTags may be set. + // + // If both properties are set, an inbound connection is allowed if the + // range matches the sourceRanges OR the tag of the source matches the + // sourceTags property. The connection does not need to match both + // properties. + SourceTags []string `json:"sourceTags,omitempty"` + + // TargetTags: A list of instance tags indicating sets of instances + // located in the network that may make network connections as specified + // in allowed[]. If no targetTags are specified, the firewall rule + // applies to all instances on the specified network. + TargetTags []string `json:"targetTags,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Allowed") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Firewall) MarshalJSON() ([]byte, error) { + type noMethod Firewall + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type FirewallAllowed struct { + // IPProtocol: The IP protocol that is allowed for this rule. The + // protocol type is required when creating a firewall rule. This value + // can either be one of the following well known protocol strings (tcp, + // udp, icmp, esp, ah, sctp), or the IP protocol number. + IPProtocol string `json:"IPProtocol,omitempty"` + + // Ports: An optional list of ports which are allowed. This field is + // only applicable for UDP or TCP protocol. Each entry must be either an + // integer or a range. If not specified, connections through any port + // are allowed + // + // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. + Ports []string `json:"ports,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IPProtocol") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FirewallAllowed) MarshalJSON() ([]byte, error) { + type noMethod FirewallAllowed + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// FirewallList: Contains a list of firewalls. +type FirewallList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Firewall resources. + Items []*Firewall `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#firewallList for + // lists of firewalls. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *FirewallList) MarshalJSON() ([]byte, error) { + type noMethod FirewallList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ForwardingRule: A ForwardingRule resource. A ForwardingRule resource +// specifies which pool of target virtual machines to forward a packet +// to if it matches the given [IPAddress, IPProtocol, portRange] tuple. +type ForwardingRule struct { + // IPAddress: Value of the reserved IP address that this forwarding rule + // is serving on behalf of. For global forwarding rules, the address + // must be a global IP; for regional forwarding rules, the address must + // live in the same region as the forwarding rule. If left empty + // (default value), an ephemeral IP from the same scope (global or + // regional) will be assigned. + IPAddress string `json:"IPAddress,omitempty"` + + // IPProtocol: The IP protocol to which this rule applies. Valid options + // are TCP, UDP, ESP, AH, SCTP or ICMP. + // + // Possible values: + // "AH" + // "ESP" + // "SCTP" + // "TCP" + // "UDP" + IPProtocol string `json:"IPProtocol,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#forwardingRule for Forwarding Rule resources. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PortRange: Applicable only when IPProtocol is TCP, UDP, or SCTP, only + // packets addressed to ports in the specified range will be forwarded + // to target. Forwarding rules with the same [IPAddress, IPProtocol] + // pair must have disjoint port ranges. + PortRange string `json:"portRange,omitempty"` + + // Region: [Output Only] URL of the region where the regional forwarding + // rule resides. This field is not applicable to global forwarding + // rules. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Target: The URL of the target resource to receive the matched + // traffic. For regional forwarding rules, this target must live in the + // same region as the forwarding rule. For global forwarding rules, this + // target must be a global TargetHttpProxy or TargetHttpsProxy resource. + // The forwarded traffic must be of a type appropriate to the target + // object. For example, TargetHttpProxy requires HTTP traffic, and + // TargetHttpsProxy requires HTTPS traffic. + Target string `json:"target,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "IPAddress") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ForwardingRule) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ForwardingRuleAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A map of scoped forwarding rule lists. + Items map[string]ForwardingRulesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#forwardingRuleAggregatedList for lists of forwarding rules. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ForwardingRuleAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ForwardingRuleList: Contains a list of ForwardingRule resources. +type ForwardingRuleList struct { + // Id: [Output Only] Unique identifier for the resource. Set by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of ForwardingRule resources. + Items []*ForwardingRule `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ForwardingRuleList) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ForwardingRulesScopedList struct { + // ForwardingRules: List of forwarding rules contained in this scope. + ForwardingRules []*ForwardingRule `json:"forwardingRules,omitempty"` + + // Warning: Informational warning which replaces the list of forwarding + // rules when the list is empty. + Warning *ForwardingRulesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ForwardingRules") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ForwardingRulesScopedList) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRulesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ForwardingRulesScopedListWarning: Informational warning which +// replaces the list of forwarding rules when the list is empty. +type ForwardingRulesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ForwardingRulesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ForwardingRulesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRulesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ForwardingRulesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRulesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// HealthCheckReference: A full or valid partial URL to a health check. +// For example, the following are valid URLs: +// - +// https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check +// - projects/project-id/global/httpHealthChecks/health-check +// - global/httpHealthChecks/health-check +type HealthCheckReference struct { + HealthCheck string `json:"healthCheck,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthCheck") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HealthCheckReference) MarshalJSON() ([]byte, error) { + type noMethod HealthCheckReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type HealthStatus struct { + // HealthState: Health state of the instance. + // + // Possible values: + // "HEALTHY" + // "UNHEALTHY" + HealthState string `json:"healthState,omitempty"` + + // Instance: URL of the instance resource. + Instance string `json:"instance,omitempty"` + + // IpAddress: The IP address represented by this resource. + IpAddress string `json:"ipAddress,omitempty"` + + // Port: The port on the instance. + Port int64 `json:"port,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthState") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HealthStatus) MarshalJSON() ([]byte, error) { + type noMethod HealthStatus + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// HostRule: UrlMaps A host-matching rule for a URL. If matched, will +// use the named PathMatcher to select the BackendService. +type HostRule struct { + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Hosts: The list of host patterns to match. They must be valid + // hostnames, except * will match any string of ([a-z0-9-.]*). In that + // case, * must be the first character and must be followed in the + // pattern by either - or .. + Hosts []string `json:"hosts,omitempty"` + + // PathMatcher: The name of the PathMatcher to use to match the path + // portion of the URL if the hostRule matches the URL's host portion. + PathMatcher string `json:"pathMatcher,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HostRule) MarshalJSON() ([]byte, error) { + type noMethod HostRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// HttpHealthCheck: An HttpHealthCheck resource. This resource defines a +// template for how individual instances should be checked for health, +// via HTTP. +type HttpHealthCheck struct { + // CheckIntervalSec: How often (in seconds) to send a health check. The + // default value is 5 seconds. + CheckIntervalSec int64 `json:"checkIntervalSec,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // HealthyThreshold: A so-far unhealthy instance will be marked healthy + // after this many consecutive successes. The default value is 2. + HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + + // Host: The value of the host header in the HTTP health check request. + // If left empty (default value), the public IP on behalf of which this + // health check is performed will be used. + Host string `json:"host,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#httpHealthCheck for HTTP health checks. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Port: The TCP port number for the HTTP health check request. The + // default value is 80. + Port int64 `json:"port,omitempty"` + + // RequestPath: The request path of the HTTP health check request. The + // default value is /. + RequestPath string `json:"requestPath,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // TimeoutSec: How long (in seconds) to wait before claiming failure. + // The default value is 5 seconds. It is invalid for timeoutSec to have + // greater value than checkIntervalSec. + TimeoutSec int64 `json:"timeoutSec,omitempty"` + + // UnhealthyThreshold: A so-far healthy instance will be marked + // unhealthy after this many consecutive failures. The default value is + // 2. + UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CheckIntervalSec") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HttpHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod HttpHealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// HttpHealthCheckList: Contains a list of HttpHealthCheck resources. +type HttpHealthCheckList struct { + // Id: [Output Only] Unique identifier for the resource. Defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of HttpHealthCheck resources. + Items []*HttpHealthCheck `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HttpHealthCheckList) MarshalJSON() ([]byte, error) { + type noMethod HttpHealthCheckList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// HttpsHealthCheck: An HttpsHealthCheck resource. This resource defines +// a template for how individual instances should be checked for health, +// via HTTPS. +type HttpsHealthCheck struct { + // CheckIntervalSec: How often (in seconds) to send a health check. The + // default value is 5 seconds. + CheckIntervalSec int64 `json:"checkIntervalSec,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // HealthyThreshold: A so-far unhealthy instance will be marked healthy + // after this many consecutive successes. The default value is 2. + HealthyThreshold int64 `json:"healthyThreshold,omitempty"` + + // Host: The value of the host header in the HTTPS health check request. + // If left empty (default value), the public IP on behalf of which this + // health check is performed will be used. + Host string `json:"host,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: Type of the resource. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Port: The TCP port number for the HTTPS health check request. The + // default value is 443. + Port int64 `json:"port,omitempty"` + + // RequestPath: The request path of the HTTPS health check request. The + // default value is "/". + RequestPath string `json:"requestPath,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // TimeoutSec: How long (in seconds) to wait before claiming failure. + // The default value is 5 seconds. It is invalid for timeoutSec to have + // a greater value than checkIntervalSec. + TimeoutSec int64 `json:"timeoutSec,omitempty"` + + // UnhealthyThreshold: A so-far healthy instance will be marked + // unhealthy after this many consecutive failures. The default value is + // 2. + UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CheckIntervalSec") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HttpsHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod HttpsHealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// HttpsHealthCheckList: Contains a list of HttpsHealthCheck resources. +type HttpsHealthCheckList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of HttpsHealthCheck resources. + Items []*HttpsHealthCheck `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HttpsHealthCheckList) MarshalJSON() ([]byte, error) { + type noMethod HttpsHealthCheckList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Image: An Image resource. +type Image struct { + // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google + // Cloud Storage (in bytes). + ArchiveSizeBytes int64 `json:"archiveSizeBytes,omitempty,string"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: The deprecation status associated with this image. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DiskSizeGb: Size of the image when restored onto a persistent disk + // (in GB). + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + + // Family: The name of the image family to which this image belongs. You + // can create disks by specifying an image family instead of a specific + // image name. The image family always returns its latest image that is + // not deprecated. + Family string `json:"family,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#image for + // images. + Kind string `json:"kind,omitempty"` + + // Licenses: Any applicable publicly visible licenses. + Licenses []string `json:"licenses,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // RawDisk: The parameters of the raw disk image. + RawDisk *ImageRawDisk `json:"rawDisk,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SourceDisk: URL of the The source disk used to create this image. + // This can be a full or valid partial URL. You must provide either this + // property or the rawDisk.source property but not both to create an + // image. For example, the following are valid values: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/disk/disk + // - projects/project/zones/zone/disk/disk + // - zones/zone/disks/disk + SourceDisk string `json:"sourceDisk,omitempty"` + + // SourceDiskId: The ID value of the disk used to create this image. + // This value may be used to determine whether the image was taken from + // the current or a previous instance of a given disk name. + SourceDiskId string `json:"sourceDiskId,omitempty"` + + // SourceType: The type of the image used to create this disk. The + // default and only value is RAW + // + // Possible values: + // "RAW" (default) + SourceType string `json:"sourceType,omitempty"` + + // Status: [Output Only] The status of the image. An image can be used + // to create other resources, such as instances, only after the image + // has been successfully created and the status is set to READY. + // Possible values are FAILED, PENDING, or READY. + // + // Possible values: + // "FAILED" + // "PENDING" + // "READY" + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ArchiveSizeBytes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Image) MarshalJSON() ([]byte, error) { + type noMethod Image + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ImageRawDisk: The parameters of the raw disk image. +type ImageRawDisk struct { + // ContainerType: The format used to encode and transmit the block + // device, which should be TAR. This is just a container and + // transmission format and not a runtime format. Provided by the client + // when the disk image is created. + // + // Possible values: + // "TAR" + ContainerType string `json:"containerType,omitempty"` + + // Sha1Checksum: An optional SHA1 checksum of the disk image before + // unpackaging; provided by the client when the disk image is created. + Sha1Checksum string `json:"sha1Checksum,omitempty"` + + // Source: The full Google Cloud Storage URL where the disk image is + // stored. You must provide either this property or the sourceDisk + // property but not both. + Source string `json:"source,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ContainerType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ImageRawDisk) MarshalJSON() ([]byte, error) { + type noMethod ImageRawDisk + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ImageList: Contains a list of images. +type ImageList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Image resources. + Items []*Image `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ImageList) MarshalJSON() ([]byte, error) { + type noMethod ImageList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Instance: An Instance resource. +type Instance struct { + // CanIpForward: Allows this instance to send and receive packets with + // non-matching destination or source IPs. This is required if you plan + // to use this instance to forward routes. For more information, see + // Enabling IP Forwarding. + CanIpForward bool `json:"canIpForward,omitempty"` + + // CpuPlatform: [Output Only] The CPU platform used by this instance. + CpuPlatform string `json:"cpuPlatform,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Disks: Array of disks associated with this instance. Persistent disks + // must be created before you can assign them. + Disks []*AttachedDisk `json:"disks,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#instance for + // instances. + Kind string `json:"kind,omitempty"` + + // MachineType: Full or partial URL of the machine type resource to use + // for this instance, in the format: + // zones/zone/machineTypes/machine-type. This is provided by the client + // when the instance is created. For example, the following is a valid + // partial url to a predefined machine + // type: + // + // zones/us-central1-f/machineTypes/n1-standard-1 + // + // To create a custom machine type, provide a URL to a machine type in + // the following format, where CPUS is 1 or an even number up to 32 (2, + // 4, 6, ... 24, etc), and MEMORY is the total memory for this instance. + // Memory must be a multiple of 256 MB and must be supplied in MB (e.g. + // 5 GB of memory is 5120 + // MB): + // + // zones/zone/machineTypes/custom-CPUS-MEMORY + // + // For example: zones/us-central1-f/machineTypes/custom-4-5120 + // + // For a full list of restrictions, read the Specifications for custom + // machine types. + MachineType string `json:"machineType,omitempty"` + + // Metadata: The metadata key/value pairs assigned to this instance. + // This includes custom metadata and predefined keys. + Metadata *Metadata `json:"metadata,omitempty"` + + // Name: The name of the resource, provided by the client when initially + // creating the resource. The resource name must be 1-63 characters + // long, and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a + // lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + // NetworkInterfaces: An array of configurations for this interface. + // This specifies how this interface is configured to interact with + // other network services, such as connecting to the internet. + NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + + // Scheduling: Scheduling options for this instance. + Scheduling *Scheduling `json:"scheduling,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServiceAccounts: A list of service accounts, with their specified + // scopes, authorized for this instance. Service accounts generate + // access tokens that can be accessed through the metadata server and + // used to authenticate applications on the instance. See Authenticating + // from Google Compute Engine for more information. + ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` + + // Status: [Output Only] The status of the instance. One of the + // following values: PROVISIONING, STAGING, RUNNING, STOPPING, and + // TERMINATED. + // + // Possible values: + // "PROVISIONING" + // "RUNNING" + // "STAGING" + // "STOPPED" + // "STOPPING" + // "SUSPENDED" + // "SUSPENDING" + // "TERMINATED" + Status string `json:"status,omitempty"` + + // StatusMessage: [Output Only] An optional, human-readable explanation + // of the status. + StatusMessage string `json:"statusMessage,omitempty"` + + // Tags: A list of tags to apply to this instance. Tags are used to + // identify valid sources or targets for network firewalls and are + // specified by the client during instance creation. The tags can be + // later modified by the setTags method. Each tag within the list must + // comply with RFC1035. + Tags *Tags `json:"tags,omitempty"` + + // Zone: [Output Only] URL of the zone where the instance resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CanIpForward") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Instance) MarshalJSON() ([]byte, error) { + type noMethod Instance + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped instance lists. + Items map[string]InstancesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#instanceAggregatedList for aggregated lists of Instance + // resources. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod InstanceAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroup struct { + // CreationTimestamp: [Output Only] The creation timestamp for this + // instance group in RFC3339 text format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: [Output Only] The fingerprint of the named ports. The + // system uses this fingerprint to detect conflicts when multiple users + // change the named ports concurrently. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] A unique identifier for this resource type. The + // server generates this identifier. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroup for instance groups. + Kind string `json:"kind,omitempty"` + + // Name: The name of the instance group. The name must be 1-63 + // characters long, and comply with RFC1035. + Name string `json:"name,omitempty"` + + // NamedPorts: Assigns a name to a port number. For example: {name: + // "http", port: 80} + // + // This allows the system to reference ports by the assigned name + // instead of a port number. Named ports can also contain multiple + // ports. For example: [{name: "http", port: 80},{name: "http", port: + // 8080}] + // + // Named ports apply to all instances in this instance group. + NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + + // Network: The URL of the network to which all instances in the + // instance group belong. + Network string `json:"network,omitempty"` + + // SelfLink: [Output Only] The URL for this instance group. The server + // generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // Size: [Output Only] The total number of instances in the instance + // group. + Size int64 `json:"size,omitempty"` + + // Subnetwork: The URL of the subnetwork to which all instances in the + // instance group belong. + Subnetwork string `json:"subnetwork,omitempty"` + + // Zone: [Output Only] The URL of the zone where the instance group is + // located. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroup) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroup + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupAggregatedList struct { + // Id: [Output Only] A unique identifier for this aggregated list of + // instance groups. The server generates this identifier. + Id string `json:"id,omitempty"` + + // Items: A map of scoped instance group lists. + Items map[string]InstanceGroupsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupAggregatedList for aggregated lists of instance + // groups. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] The URL for this resource type. The server + // generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceGroupList: A list of InstanceGroup resources. +type InstanceGroupList struct { + // Id: [Output Only] A unique identifier for this list of instance + // groups. The server generates this identifier. + Id string `json:"id,omitempty"` + + // Items: A list of instance groups. + Items []*InstanceGroup `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupList for instance group lists. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] The URL for this resource type. The server + // generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupList) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManager struct { + // BaseInstanceName: The base instance name to use for instances in this + // group. The value must be 1-58 characters long. Instances are named by + // appending a hyphen and a random four-character string to the base + // instance name. The base instance name must comply with RFC1035. + BaseInstanceName string `json:"baseInstanceName,omitempty"` + + // CreationTimestamp: [Output Only] The creation timestamp for this + // managed instance group in RFC3339 text format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // CurrentActions: [Output Only] The list of instance actions and the + // number of instances in this managed instance group that are scheduled + // for each of those actions. + CurrentActions *InstanceGroupManagerActionsSummary `json:"currentActions,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: [Output Only] The fingerprint of the resource data. You + // can use this optional field for optimistic locking when you update + // the resource. + Fingerprint string `json:"fingerprint,omitempty"` + + // Id: [Output Only] A unique identifier for this resource type. The + // server generates this identifier. + Id uint64 `json:"id,omitempty,string"` + + // InstanceGroup: [Output Only] The URL of the Instance Group resource. + InstanceGroup string `json:"instanceGroup,omitempty"` + + // InstanceTemplate: The URL of the instance template that is specified + // for this managed instance group. The group uses this template to + // create all new instances in the managed instance group. + InstanceTemplate string `json:"instanceTemplate,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupManager for managed instance groups. + Kind string `json:"kind,omitempty"` + + // Name: The name of the managed instance group. The name must be 1-63 + // characters long, and comply with RFC1035. + Name string `json:"name,omitempty"` + + // NamedPorts: Named ports configured for the Instance Groups + // complementary to this Instance Group Manager. + NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + + // SelfLink: [Output Only] The URL for this managed instance group. The + // server defines this URL. + SelfLink string `json:"selfLink,omitempty"` + + // TargetPools: The URLs for all TargetPool resources to which instances + // in the instanceGroup field are added. The target pools automatically + // apply to all of the instances in the managed instance group. + TargetPools []string `json:"targetPools,omitempty"` + + // TargetSize: The target number of running instances for this managed + // instance group. Deleting or abandoning instances reduces this number. + // Resizing the group changes this number. + TargetSize int64 `json:"targetSize,omitempty"` + + // Zone: The name of the zone where the managed instance group is + // located. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "BaseInstanceName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManager) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManager + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagerActionsSummary struct { + // Abandoning: [Output Only] The total number of instances in the + // managed instance group that are scheduled to be abandoned. Abandoning + // an instance removes it from the managed instance group without + // deleting it. + Abandoning int64 `json:"abandoning,omitempty"` + + // Creating: [Output Only] The number of instances in the managed + // instance group that are scheduled to be created or are currently + // being created. If the group fails to create one of these instances, + // it tries again until it creates the instance successfully. + Creating int64 `json:"creating,omitempty"` + + // Deleting: [Output Only] The number of instances in the managed + // instance group that are scheduled to be deleted or are currently + // being deleted. + Deleting int64 `json:"deleting,omitempty"` + + // None: [Output Only] The number of instances in the managed instance + // group that are running and have no scheduled actions. + None int64 `json:"none,omitempty"` + + // Recreating: [Output Only] The number of instances in the managed + // instance group that are scheduled to be recreated or are currently + // being being recreated. Recreating an instance deletes the existing + // root persistent disk and creates a new disk from the image that is + // defined in the instance template. + Recreating int64 `json:"recreating,omitempty"` + + // Refreshing: [Output Only] The number of instances in the managed + // instance group that are being reconfigured with properties that do + // not require a restart or a recreate action. For example, setting or + // removing target pools for the instance. + Refreshing int64 `json:"refreshing,omitempty"` + + // Restarting: [Output Only] The number of instances in the managed + // instance group that are scheduled to be restarted or are currently + // being restarted. + Restarting int64 `json:"restarting,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Abandoning") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagerActionsSummary) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerActionsSummary + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagerAggregatedList struct { + // Id: [Output Only] A unique identifier for this aggregated list of + // managed instance groups. The server generates this identifier. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of filtered managed instance group lists. + Items map[string]InstanceGroupManagersScopedList `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupManagerAggregatedList for an aggregated list of + // managed instance groups. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] The URL for this resource type. The server + // generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagerAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceGroupManagerList: [Output Only] A list of managed instance +// groups. +type InstanceGroupManagerList struct { + // Id: [Output Only] A unique identifier for this resource type. The + // server generates this identifier. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of managed instance groups. + Items []*InstanceGroupManager `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupManagerList for a list of managed instance + // groups. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] The URL for this resource type. The server + // generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagerList) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersAbandonInstancesRequest struct { + // Instances: The URL for one or more instances to abandon from the + // managed instance group. + Instances []string `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersAbandonInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The list of instances to delete from this managed instance + // group. Specify one or more instance URLs. + Instances []string `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersDeleteInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersListManagedInstancesResponse struct { + // ManagedInstances: [Output Only] The list of instances in the managed + // instance group. + ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ManagedInstances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersListManagedInstancesResponse) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersListManagedInstancesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersRecreateInstancesRequest struct { + // Instances: The URL for one or more instances to recreate. + Instances []string `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersRecreateInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersRecreateInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersScopedList struct { + // InstanceGroupManagers: [Output Only] The list of managed instance + // groups that are contained in the specified project and zone. + InstanceGroupManagers []*InstanceGroupManager `json:"instanceGroupManagers,omitempty"` + + // Warning: [Output Only] The warning that replaces the list of managed + // instance groups when the list is empty. + Warning *InstanceGroupManagersScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "InstanceGroupManagers") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersScopedList) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceGroupManagersScopedListWarning: [Output Only] The warning +// that replaces the list of managed instance groups when the list is +// empty. +type InstanceGroupManagersScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupManagersScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersSetInstanceTemplateRequest struct { + // InstanceTemplate: The URL of the instance template that is specified + // for this managed instance group. The group uses this template to + // create all new instances in the managed instance group. + InstanceTemplate string `json:"instanceTemplate,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersSetInstanceTemplateRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersSetInstanceTemplateRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupManagersSetTargetPoolsRequest struct { + // Fingerprint: The fingerprint of the target pools information. Use + // this optional property to prevent conflicts when multiple users + // change the target pools settings concurrently. Obtain the fingerprint + // with the instanceGroupManagers.get method. Then, include the + // fingerprint in your request to ensure that you do not overwrite + // changes that were applied from another concurrent request. + Fingerprint string `json:"fingerprint,omitempty"` + + // TargetPools: The list of target pool URLs that instances in this + // managed instance group belong to. The managed instance group applies + // these target pools to all of the instances in the group. Existing + // instances and new instances in the group all receive these target + // pool settings. + TargetPools []string `json:"targetPools,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersSetTargetPoolsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsAddInstancesRequest struct { + // Instances: The list of instances to add to the instance group. + Instances []*InstanceReference `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsAddInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsAddInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsListInstances struct { + // Id: [Output Only] A unique identifier for this list of instance + // groups. The server generates this identifier. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of instances and any named ports that are + // assigned to those instances. + Items []*InstanceWithNamedPorts `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupsListInstances for lists of instance groups. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] The URL for this list of instance groups. The + // server generates this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsListInstances) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsListInstances + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsListInstancesRequest struct { + // InstanceState: A filter for the state of the instances in the + // instance group. Valid options are ALL or RUNNING. If you do not + // specify this parameter the list includes all instances regardless of + // their state. + // + // Possible values: + // "ALL" + // "RUNNING" + InstanceState string `json:"instanceState,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceState") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsListInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsRemoveInstancesRequest struct { + // Instances: The list of instances to remove from the instance group. + Instances []*InstanceReference `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsRemoveInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsRemoveInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsScopedList struct { + // InstanceGroups: [Output Only] The list of instance groups that are + // contained in this scope. + InstanceGroups []*InstanceGroup `json:"instanceGroups,omitempty"` + + // Warning: [Output Only] An informational warning that replaces the + // list of instance groups when the list is empty. + Warning *InstanceGroupsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceGroups") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsScopedList) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceGroupsScopedListWarning: [Output Only] An informational +// warning that replaces the list of instance groups when the list is +// empty. +type InstanceGroupsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceGroupsSetNamedPortsRequest struct { + // Fingerprint: The fingerprint of the named ports information for this + // instance group. Use this optional property to prevent conflicts when + // multiple users change the named ports settings concurrently. Obtain + // the fingerprint with the instanceGroups.get method. Then, include the + // fingerprint in your request to ensure that you do not overwrite + // changes that were applied from another concurrent request. + Fingerprint string `json:"fingerprint,omitempty"` + + // NamedPorts: The list of named ports to set for this instance group. + NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsSetNamedPortsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceList: Contains a list of instances. +type InstanceList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of instances. + Items []*Instance `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#instanceList for + // lists of Instance resources. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceList) MarshalJSON() ([]byte, error) { + type noMethod InstanceList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceMoveRequest struct { + // DestinationZone: The URL of the destination zone to move the + // instance. This can be a full or partial URL. For example, the + // following are all valid URLs to a zone: + // - https://www.googleapis.com/compute/v1/projects/project/zones/zone + // + // - projects/project/zones/zone + // - zones/zone + DestinationZone string `json:"destinationZone,omitempty"` + + // TargetInstance: The URL of the target instance to move. This can be a + // full or partial URL. For example, the following are all valid URLs to + // an instance: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance + // - projects/project/zones/zone/instances/instance + // - zones/zone/instances/instance + TargetInstance string `json:"targetInstance,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestinationZone") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceMoveRequest) MarshalJSON() ([]byte, error) { + type noMethod InstanceMoveRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceProperties struct { + // CanIpForward: Enables instances created based on this template to + // send packets with source IP addresses other than their own and + // receive packets with destination IP addresses other than their own. + // If these instances will be used as an IP gateway or it will be set as + // the next-hop in a Route resource, specify true. If unsure, leave this + // set to false. See the canIpForward documentation for more + // information. + CanIpForward bool `json:"canIpForward,omitempty"` + + // Description: An optional text description for the instances that are + // created from this instance template. + Description string `json:"description,omitempty"` + + // Disks: An array of disks that are associated with the instances that + // are created from this template. + Disks []*AttachedDisk `json:"disks,omitempty"` + + // MachineType: The machine type to use for instances that are created + // from this template. + MachineType string `json:"machineType,omitempty"` + + // Metadata: The metadata key/value pairs to assign to instances that + // are created from this template. These pairs can consist of custom + // metadata or predefined keys. See Project and instance metadata for + // more information. + Metadata *Metadata `json:"metadata,omitempty"` + + // NetworkInterfaces: An array of network access configurations for this + // interface. + NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + + // Scheduling: Specifies the scheduling options for the instances that + // are created from this template. + Scheduling *Scheduling `json:"scheduling,omitempty"` + + // ServiceAccounts: A list of service accounts with specified scopes. + // Access tokens for these service accounts are available to the + // instances that are created from this template. Use metadata queries + // to obtain the access tokens for these instances. + ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` + + // Tags: A list of tags to apply to the instances that are created from + // this template. The tags identify valid sources or targets for network + // firewalls. The setTags method can modify this list of tags. Each tag + // within the list must comply with RFC1035. + Tags *Tags `json:"tags,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CanIpForward") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceProperties) MarshalJSON() ([]byte, error) { + type noMethod InstanceProperties + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceReference struct { + // Instance: The URL for a specific instance. + Instance string `json:"instance,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instance") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceReference) MarshalJSON() ([]byte, error) { + type noMethod InstanceReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceTemplate: An Instance Template resource. +type InstanceTemplate struct { + // CreationTimestamp: [Output Only] The creation timestamp for this + // instance template in RFC3339 text format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] A unique identifier for this instance template. The + // server defines this identifier. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceTemplate for instance templates. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Properties: The instance properties for this instance template. + Properties *InstanceProperties `json:"properties,omitempty"` + + // SelfLink: [Output Only] The URL for this instance template. The + // server defines this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceTemplate) MarshalJSON() ([]byte, error) { + type noMethod InstanceTemplate + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstanceTemplateList: A list of instance templates. +type InstanceTemplateList struct { + // Id: [Output Only] A unique identifier for this instance template. The + // server defines this identifier. + Id string `json:"id,omitempty"` + + // Items: [Output Only] list of InstanceTemplate resources. + Items []*InstanceTemplate `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceTemplatesListResponse for instance template lists. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] The URL for this instance template list. The + // server defines this URL. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceTemplateList) MarshalJSON() ([]byte, error) { + type noMethod InstanceTemplateList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstanceWithNamedPorts struct { + // Instance: [Output Only] The URL of the instance. + Instance string `json:"instance,omitempty"` + + // NamedPorts: [Output Only] The named ports that belong to this + // instance group. + NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + + // Status: [Output Only] The status of the instance. + // + // Possible values: + // "PROVISIONING" + // "RUNNING" + // "STAGING" + // "STOPPED" + // "STOPPING" + // "SUSPENDED" + // "SUSPENDING" + // "TERMINATED" + Status string `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instance") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstanceWithNamedPorts) MarshalJSON() ([]byte, error) { + type noMethod InstanceWithNamedPorts + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstancesScopedList struct { + // Instances: [Output Only] List of instances contained in this scope. + Instances []*Instance `json:"instances,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of instances when the list is empty. + Warning *InstancesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesScopedList) MarshalJSON() ([]byte, error) { + type noMethod InstancesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// InstancesScopedListWarning: [Output Only] Informational warning which +// replaces the list of instances when the list is empty. +type InstancesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstancesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstancesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstancesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstancesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type InstancesSetMachineTypeRequest struct { + // MachineType: Full or partial URL of the machine type resource. See + // Machine Types for a full list of machine types. For example: + // zones/us-central1-f/machineTypes/n1-standard-1 + MachineType string `json:"machineType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MachineType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesSetMachineTypeRequest) MarshalJSON() ([]byte, error) { + type noMethod InstancesSetMachineTypeRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// License: A license resource. +type License struct { + // ChargesUseFee: [Output Only] If true, the customer will be charged + // license fee for running software that contains this license on an + // instance. + ChargesUseFee bool `json:"chargesUseFee,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#license for + // licenses. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. The name is 1-63 characters + // long and complies with RFC1035. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ChargesUseFee") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *License) MarshalJSON() ([]byte, error) { + type noMethod License + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// MachineType: A Machine Type resource. +type MachineType struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: [Output Only] The deprecation status associated with this + // machine type. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] An optional textual description of the + // resource. + Description string `json:"description,omitempty"` + + // GuestCpus: [Output Only] The number of virtual CPUs that are + // available to the instance. + GuestCpus int64 `json:"guestCpus,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // ImageSpaceGb: [Deprecated] This property is deprecated and will never + // be populated with any relevant values. + ImageSpaceGb int64 `json:"imageSpaceGb,omitempty"` + + // Kind: [Output Only] The type of the resource. Always + // compute#machineType for machine types. + Kind string `json:"kind,omitempty"` + + // MaximumPersistentDisks: [Output Only] Maximum persistent disks + // allowed. + MaximumPersistentDisks int64 `json:"maximumPersistentDisks,omitempty"` + + // MaximumPersistentDisksSizeGb: [Output Only] Maximum total persistent + // disks size (GB) allowed. + MaximumPersistentDisksSizeGb int64 `json:"maximumPersistentDisksSizeGb,omitempty,string"` + + // MemoryMb: [Output Only] The amount of physical memory available to + // the instance, defined in MB. + MemoryMb int64 `json:"memoryMb,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // ScratchDisks: [Output Only] List of extended scratch disks assigned + // to the instance. + ScratchDisks []*MachineTypeScratchDisks `json:"scratchDisks,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Zone: [Output Only] The name of the zone where the machine type + // resides, such as us-central1-a. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineType) MarshalJSON() ([]byte, error) { + type noMethod MachineType + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type MachineTypeScratchDisks struct { + // DiskGb: Size of the scratch disk, defined in GB. + DiskGb int64 `json:"diskGb,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskGb") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineTypeScratchDisks) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeScratchDisks + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type MachineTypeAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped machine type lists. + Items map[string]MachineTypesScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#machineTypeAggregatedList for aggregated lists of machine + // types. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineTypeAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// MachineTypeList: Contains a list of machine types. +type MachineTypeList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Machine Type resources. + Items []*MachineType `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#machineTypeList + // for lists of machine types. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineTypeList) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type MachineTypesScopedList struct { + // MachineTypes: [Output Only] List of machine types contained in this + // scope. + MachineTypes []*MachineType `json:"machineTypes,omitempty"` + + // Warning: [Output Only] An informational warning that appears when the + // machine types list is empty. + Warning *MachineTypesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MachineTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineTypesScopedList) MarshalJSON() ([]byte, error) { + type noMethod MachineTypesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// MachineTypesScopedListWarning: [Output Only] An informational warning +// that appears when the machine types list is empty. +type MachineTypesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*MachineTypesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineTypesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod MachineTypesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type MachineTypesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MachineTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod MachineTypesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ManagedInstance struct { + // CurrentAction: [Output Only] The current action that the managed + // instance group has scheduled for the instance. Possible values: + // - NONE The instance is running, and the managed instance group does + // not have any scheduled actions for this instance. + // - CREATING The managed instance group is creating this instance. If + // the group fails to create this instance, it will try again until it + // is successful. + // - CREATING_WITHOUT_RETRIES The managed instance group is attempting + // to create this instance only once. If the group fails to create this + // instance, it does not try again and the group's target_size value is + // decreased. + // - RECREATING The managed instance group is recreating this instance. + // + // - DELETING The managed instance group is permanently deleting this + // instance. + // - ABANDONING The managed instance group is abandoning this instance. + // The instance will be removed from the instance group and from any + // target pools that are associated with this group. + // - RESTARTING The managed instance group is restarting the instance. + // + // - REFRESHING The managed instance group is applying configuration + // changes to the instance without stopping it. For example, the group + // can update the target pool list for an instance without stopping that + // instance. + // + // Possible values: + // "ABANDONING" + // "CREATING" + // "DELETING" + // "NONE" + // "RECREATING" + // "REFRESHING" + // "RESTARTING" + CurrentAction string `json:"currentAction,omitempty"` + + // Id: [Output only] The unique identifier for this resource. This field + // is empty when instance does not exist. + Id uint64 `json:"id,omitempty,string"` + + // Instance: [Output Only] The URL of the instance. The URL can exist + // even if the instance has not yet been created. + Instance string `json:"instance,omitempty"` + + // InstanceStatus: [Output Only] The status of the instance. This field + // is empty when the instance does not exist. + // + // Possible values: + // "PROVISIONING" + // "RUNNING" + // "STAGING" + // "STOPPED" + // "STOPPING" + // "SUSPENDED" + // "SUSPENDING" + // "TERMINATED" + InstanceStatus string `json:"instanceStatus,omitempty"` + + // LastAttempt: [Output Only] Information about the last attempt to + // create or delete the instance. + LastAttempt *ManagedInstanceLastAttempt `json:"lastAttempt,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CurrentAction") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ManagedInstance) MarshalJSON() ([]byte, error) { + type noMethod ManagedInstance + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ManagedInstanceLastAttempt struct { + // Errors: [Output Only] Encountered errors during the last attempt to + // create or delete the instance. + Errors *ManagedInstanceLastAttemptErrors `json:"errors,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Errors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ManagedInstanceLastAttempt) MarshalJSON() ([]byte, error) { + type noMethod ManagedInstanceLastAttempt + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ManagedInstanceLastAttemptErrors: [Output Only] Encountered errors +// during the last attempt to create or delete the instance. +type ManagedInstanceLastAttemptErrors struct { + // Errors: [Output Only] The array of errors encountered while + // processing this operation. + Errors []*ManagedInstanceLastAttemptErrorsErrors `json:"errors,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Errors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ManagedInstanceLastAttemptErrors) MarshalJSON() ([]byte, error) { + type noMethod ManagedInstanceLastAttemptErrors + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ManagedInstanceLastAttemptErrorsErrors struct { + // Code: [Output Only] The error type identifier for this error. + Code string `json:"code,omitempty"` + + // Location: [Output Only] Indicates the field in the request that + // caused the error. This property is optional. + Location string `json:"location,omitempty"` + + // Message: [Output Only] An optional, human-readable error message. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { + type noMethod ManagedInstanceLastAttemptErrorsErrors + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Metadata: A metadata key/value entry. +type Metadata struct { + // Fingerprint: Specifies a fingerprint for this request, which is + // essentially a hash of the metadata's contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update metadata. You must + // always provide an up-to-date fingerprint hash in order to update or + // change metadata. + Fingerprint string `json:"fingerprint,omitempty"` + + // Items: Array of key/value pairs. The total size of all keys and + // values must be less than 512 KB. + Items []*MetadataItems `json:"items,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#metadata for + // metadata. + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Metadata) MarshalJSON() ([]byte, error) { + type noMethod Metadata + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type MetadataItems struct { + // Key: Key for the metadata entry. Keys must conform to the following + // regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is + // reflected as part of a URL in the metadata server. Additionally, to + // avoid ambiguity, keys must not conflict with any other metadata keys + // for the project. + Key string `json:"key,omitempty"` + + // Value: Value for the metadata entry. These are free-form strings, and + // only have meaning as interpreted by the image running in the + // instance. The only restriction placed on values is that their size + // must be less than or equal to 32768 bytes. + Value *string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MetadataItems) MarshalJSON() ([]byte, error) { + type noMethod MetadataItems + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// NamedPort: The named port. For example: . +type NamedPort struct { + // Name: The name for this named port. The name must be 1-63 characters + // long, and comply with RFC1035. + Name string `json:"name,omitempty"` + + // Port: The port number, which can be a value between 1 and 65535. + Port int64 `json:"port,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *NamedPort) MarshalJSON() ([]byte, error) { + type noMethod NamedPort + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Network: Represents a Network resource. Read Networks and Firewalls +// for more information. +type Network struct { + // IPv4Range: The range of internal addresses that are legal on this + // network. This range is a CIDR specification, for example: + // 192.168.0.0/16. Provided by the client when the network is created. + IPv4Range string `json:"IPv4Range,omitempty"` + + // AutoCreateSubnetworks: When set to true, the network is created in + // "auto subnet mode". When set to false, the network is in "custom + // subnet mode". + // + // In "auto subnet mode", a newly created network is assigned the + // default CIDR of 10.128.0.0/9 and it automatically creates one + // subnetwork per region. + AutoCreateSubnetworks bool `json:"autoCreateSubnetworks,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // GatewayIPv4: A gateway address for default routing to other networks. + // This value is read only and is selected by the Google Compute Engine, + // typically as the first usable address in the IPv4Range. + GatewayIPv4 string `json:"gatewayIPv4,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#network for + // networks. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Subnetworks: [Output Only] Server-defined fully-qualified URLs for + // all subnetworks in this network. + Subnetworks []string `json:"subnetworks,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "IPv4Range") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Network) MarshalJSON() ([]byte, error) { + type noMethod Network + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// NetworkInterface: A network interface resource attached to an +// instance. +type NetworkInterface struct { + // AccessConfigs: An array of configurations for this interface. + // Currently, ONE_TO_ONE_NAT is the only access config supported. If + // there are no accessConfigs specified, then this instance will have no + // external internet access. + AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"` + + // Name: [Output Only] The name of the network interface, generated by + // the server. For network devices, these are eth0, eth1, etc. + Name string `json:"name,omitempty"` + + // Network: URL of the network resource for this instance. This is + // required for creating an instance but optional when creating a + // firewall rule. If not specified when creating a firewall rule, the + // default network is used: + // + // global/networks/default + // + // If you specify this property, you can specify the network as a full + // or partial URL. For example, the following are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/global/networks/network + // - projects/project/global/networks/network + // - global/networks/default + Network string `json:"network,omitempty"` + + // NetworkIP: An IPV4 internal network address to assign to the instance + // for this network interface. If not specified by user an unused + // internal IP is assigned by system. + NetworkIP string `json:"networkIP,omitempty"` + + // Subnetwork: The URL of the Subnetwork resource for this instance. If + // the network resource is in legacy mode, do not provide this property. + // If the network is in auto subnet mode, providing the subnetwork is + // optional. If the network is in custom subnet mode, then this field + // should be specified. If you specify this property, you can specify + // the subnetwork as a full or partial URL. For example, the following + // are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork + // - regions/region/subnetworks/subnetwork + Subnetwork string `json:"subnetwork,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccessConfigs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *NetworkInterface) MarshalJSON() ([]byte, error) { + type noMethod NetworkInterface + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// NetworkList: Contains a list of networks. +type NetworkList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Network resources. + Items []*Network `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#networkList for + // lists of networks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource . + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *NetworkList) MarshalJSON() ([]byte, error) { + type noMethod NetworkList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Operation: An Operation resource, used to manage asynchronous API +// requests. +type Operation struct { + // ClientOperationId: [Output Only] Reserved for future use. + ClientOperationId string `json:"clientOperationId,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: [Output Only] A textual description of the operation, + // which is set when the operation is created. + Description string `json:"description,omitempty"` + + // EndTime: [Output Only] The time that this operation was completed. + // This value is in RFC3339 text format. + EndTime string `json:"endTime,omitempty"` + + // Error: [Output Only] If errors are generated during processing of the + // operation, this field will be populated. + Error *OperationError `json:"error,omitempty"` + + // HttpErrorMessage: [Output Only] If the operation fails, this field + // contains the HTTP error message that was returned, such as NOT FOUND. + HttpErrorMessage string `json:"httpErrorMessage,omitempty"` + + // HttpErrorStatusCode: [Output Only] If the operation fails, this field + // contains the HTTP error status code that was returned. For example, a + // 404 means the resource was not found. + HttpErrorStatusCode int64 `json:"httpErrorStatusCode,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // InsertTime: [Output Only] The time that this operation was requested. + // This value is in RFC3339 text format. + InsertTime string `json:"insertTime,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#operation + // for Operation resources. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // OperationType: [Output Only] The type of operation, such as insert, + // update, or delete, and so on. + OperationType string `json:"operationType,omitempty"` + + // Progress: [Output Only] An optional progress indicator that ranges + // from 0 to 100. There is no requirement that this be linear or support + // any granularity of operations. This should not be used to guess when + // the operation will be complete. This number should monotonically + // increase as the operation progresses. + Progress int64 `json:"progress,omitempty"` + + // Region: [Output Only] The URL of the region where the operation + // resides. Only available when performing regional operations. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // StartTime: [Output Only] The time that this operation was started by + // the server. This value is in RFC3339 text format. + StartTime string `json:"startTime,omitempty"` + + // Status: [Output Only] The status of the operation, which can be one + // of the following: PENDING, RUNNING, or DONE. + // + // Possible values: + // "DONE" + // "PENDING" + // "RUNNING" + Status string `json:"status,omitempty"` + + // StatusMessage: [Output Only] An optional textual description of the + // current status of the operation. + StatusMessage string `json:"statusMessage,omitempty"` + + // TargetId: [Output Only] The unique target ID, which identifies a + // specific incarnation of the target resource. + TargetId uint64 `json:"targetId,omitempty,string"` + + // TargetLink: [Output Only] The URL of the resource that the operation + // modifies. + TargetLink string `json:"targetLink,omitempty"` + + // User: [Output Only] User who requested the operation, for example: + // user@example.com. + User string `json:"user,omitempty"` + + // Warnings: [Output Only] If warning messages are generated during + // processing of the operation, this field will be populated. + Warnings []*OperationWarnings `json:"warnings,omitempty"` + + // Zone: [Output Only] The URL of the zone where the operation resides. + // Only available when performing per-zone operations. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ClientOperationId") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Operation) MarshalJSON() ([]byte, error) { + type noMethod Operation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// OperationError: [Output Only] If errors are generated during +// processing of the operation, this field will be populated. +type OperationError struct { + // Errors: [Output Only] The array of errors encountered while + // processing this operation. + Errors []*OperationErrorErrors `json:"errors,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Errors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationError) MarshalJSON() ([]byte, error) { + type noMethod OperationError + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type OperationErrorErrors struct { + // Code: [Output Only] The error type identifier for this error. + Code string `json:"code,omitempty"` + + // Location: [Output Only] Indicates the field in the request that + // caused the error. This property is optional. + Location string `json:"location,omitempty"` + + // Message: [Output Only] An optional, human-readable error message. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { + type noMethod OperationErrorErrors + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type OperationWarnings struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationWarningsData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationWarnings) MarshalJSON() ([]byte, error) { + type noMethod OperationWarnings + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type OperationWarningsData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { + type noMethod OperationWarningsData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type OperationAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped operation lists. + Items map[string]OperationsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#operationAggregatedList for aggregated lists of operations. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod OperationAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// OperationList: Contains a list of Operation resources. +type OperationList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Operation resources. + Items []*Operation `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#operations for + // Operations resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationList) MarshalJSON() ([]byte, error) { + type noMethod OperationList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type OperationsScopedList struct { + // Operations: [Output Only] List of operations contained in this scope. + Operations []*Operation `json:"operations,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of operations when the list is empty. + Warning *OperationsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Operations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { + type noMethod OperationsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// OperationsScopedListWarning: [Output Only] Informational warning +// which replaces the list of operations when the list is empty. +type OperationsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod OperationsScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type OperationsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod OperationsScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PathMatcher: A matcher for the path portion of the URL. The +// BackendService from the longest-matched rule will serve the URL. If +// no rule was matched, the default service will be used. +type PathMatcher struct { + // DefaultService: The full or partial URL to the BackendService + // resource. This will be used if none of the pathRules defined by this + // PathMatcher is matched by the URL's path portion. For example, the + // following are all valid URLs to a BackendService resource: + // - + // https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService + // - compute/v1/projects/project/global/backendServices/backendService + // + // - global/backendServices/backendService + DefaultService string `json:"defaultService,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Name: The name to which this PathMatcher is referred by the HostRule. + Name string `json:"name,omitempty"` + + // PathRules: The list of path rules. + PathRules []*PathRule `json:"pathRules,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DefaultService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PathMatcher) MarshalJSON() ([]byte, error) { + type noMethod PathMatcher + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// PathRule: A path-matching rule for a URL. If matched, will use the +// specified BackendService to handle the traffic arriving at this URL. +type PathRule struct { + // Paths: The list of path patterns to match. Each must start with / and + // the only place a * is allowed is at the end following a /. The string + // fed to the path matcher does not include any text after the first ? + // or #, and those chars are not allowed here. + Paths []string `json:"paths,omitempty"` + + // Service: The URL of the BackendService resource if this rule is + // matched. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Paths") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *PathRule) MarshalJSON() ([]byte, error) { + type noMethod PathRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Project: A Project resource. Projects can only be created in the +// Google Cloud Platform Console. Unless marked otherwise, values can +// only be modified in the console. +type Project struct { + // CommonInstanceMetadata: Metadata key/value pairs available to all + // instances contained in this project. See Custom metadata for more + // information. + CommonInstanceMetadata *Metadata `json:"commonInstanceMetadata,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional textual description of the resource. + Description string `json:"description,omitempty"` + + // EnabledFeatures: Restricted features enabled for use on this project. + EnabledFeatures []string `json:"enabledFeatures,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. This is not the project ID, and + // is just a unique ID used by Compute Engine to identify resources. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#project for + // projects. + Kind string `json:"kind,omitempty"` + + // Name: The project ID. For example: my-example-project. Use the + // project ID to make requests to Compute Engine. + Name string `json:"name,omitempty"` + + // Quotas: [Output Only] Quotas assigned to this project. + Quotas []*Quota `json:"quotas,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // UsageExportLocation: The naming prefix for daily usage reports and + // the Google Cloud Storage bucket where they are stored. + UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "CommonInstanceMetadata") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Project) MarshalJSON() ([]byte, error) { + type noMethod Project + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Quota: A quotas entry. +type Quota struct { + // Limit: [Output Only] Quota limit for this metric. + Limit float64 `json:"limit,omitempty"` + + // Metric: [Output Only] Name of the quota metric. + // + // Possible values: + // "AUTOSCALERS" + // "BACKEND_SERVICES" + // "CPUS" + // "DISKS_TOTAL_GB" + // "FIREWALLS" + // "FORWARDING_RULES" + // "HEALTH_CHECKS" + // "IMAGES" + // "INSTANCES" + // "INSTANCE_GROUPS" + // "INSTANCE_GROUP_MANAGERS" + // "INSTANCE_TEMPLATES" + // "IN_USE_ADDRESSES" + // "LOCAL_SSD_TOTAL_GB" + // "NETWORKS" + // "ROUTES" + // "SNAPSHOTS" + // "SSD_TOTAL_GB" + // "SSL_CERTIFICATES" + // "STATIC_ADDRESSES" + // "SUBNETWORKS" + // "TARGET_HTTPS_PROXIES" + // "TARGET_HTTP_PROXIES" + // "TARGET_INSTANCES" + // "TARGET_POOLS" + // "TARGET_VPN_GATEWAYS" + // "URL_MAPS" + // "VPN_TUNNELS" + Metric string `json:"metric,omitempty"` + + // Usage: [Output Only] Current usage of this metric. + Usage float64 `json:"usage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Limit") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Quota) MarshalJSON() ([]byte, error) { + type noMethod Quota + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Region: Region resource. +type Region struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: [Output Only] The deprecation status associated with this + // region. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] Textual description of the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#region for + // regions. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // Quotas: [Output Only] Quotas assigned to this region. + Quotas []*Quota `json:"quotas,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] Status of the region, either UP or DOWN. + // + // Possible values: + // "DOWN" + // "UP" + Status string `json:"status,omitempty"` + + // Zones: [Output Only] A list of zones available in this region, in the + // form of resource URLs. + Zones []string `json:"zones,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Region) MarshalJSON() ([]byte, error) { + type noMethod Region + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RegionList: Contains a list of region resources. +type RegionList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Region resources. + Items []*Region `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#regionList for + // lists of regions. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RegionList) MarshalJSON() ([]byte, error) { + type noMethod RegionList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ResourceGroupReference struct { + // Group: A URI referencing one of the resource views listed in the + // backend service. + Group string `json:"group,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Group") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { + type noMethod ResourceGroupReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Route: Represents a Route resource. A route specifies how certain +// packets should be handled by the network. Routes are associated with +// instances by tags and the set of routes for a particular instance is +// called its routing table. +// +// For each packet leaving a instance, the system searches that +// instance's routing table for a single best matching route. Routes +// match packets by destination IP address, preferring smaller or more +// specific ranges over larger ones. If there is a tie, the system +// selects the route with the smallest priority value. If there is still +// a tie, it uses the layer three and four packet headers to select just +// one of the remaining matching routes. The packet is then forwarded as +// specified by the nextHop field of the winning route - either to +// another instance destination, a instance gateway or a Google Compute +// Engine-operated gateway. +// +// Packets that do not match any route in the sending instance's routing +// table are dropped. +type Route struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DestRange: The destination range of outgoing packets that this route + // applies to. + DestRange string `json:"destRange,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of this resource. Always compute#routes for + // Route resources. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: Fully-qualified URL of the network that this route applies + // to. + Network string `json:"network,omitempty"` + + // NextHopGateway: The URL to a gateway that should handle matching + // packets. You can only specify the internet gateway using a full or + // partial valid URL: + // projects//global/gateways/default-internet-gateway + NextHopGateway string `json:"nextHopGateway,omitempty"` + + // NextHopInstance: The URL to an instance that should handle matching + // packets. You can specify this as a full or partial URL. For + // example: + // https://www.googleapis.com/compute/v1/projects/project/zones/ + // zone/instances/ + NextHopInstance string `json:"nextHopInstance,omitempty"` + + // NextHopIp: The network IP address of an instance that should handle + // matching packets. + NextHopIp string `json:"nextHopIp,omitempty"` + + // NextHopNetwork: The URL of the local network if it should handle + // matching packets. + NextHopNetwork string `json:"nextHopNetwork,omitempty"` + + // NextHopVpnTunnel: The URL to a VpnTunnel that should handle matching + // packets. + NextHopVpnTunnel string `json:"nextHopVpnTunnel,omitempty"` + + // Priority: The priority of this route. Priority is used to break ties + // in cases where there is more than one matching route of equal prefix + // length. In the case of two routes with equal prefix length, the one + // with the lowest-numbered priority value wins. Default value is 1000. + // Valid range is 0 through 65535. + Priority int64 `json:"priority,omitempty"` + + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. + SelfLink string `json:"selfLink,omitempty"` + + // Tags: A list of instance tags to which this route applies. + Tags []string `json:"tags,omitempty"` + + // Warnings: [Output Only] If potential misconfigurations are detected + // for this route, this field will be populated with warning messages. + Warnings []*RouteWarnings `json:"warnings,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Route) MarshalJSON() ([]byte, error) { + type noMethod Route + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type RouteWarnings struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouteWarningsData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouteWarnings) MarshalJSON() ([]byte, error) { + type noMethod RouteWarnings + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type RouteWarningsData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { + type noMethod RouteWarningsData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RouteList: Contains a list of Route resources. +type RouteList struct { + // Id: [Output Only] Unique identifier for the resource. Defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Route resources. + Items []*Route `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouteList) MarshalJSON() ([]byte, error) { + type noMethod RouteList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Scheduling: Sets the scheduling options for an Instance. +type Scheduling struct { + // AutomaticRestart: Specifies whether the instance should be + // automatically restarted if it is terminated by Compute Engine (not + // terminated by a user). You can only set the automatic restart option + // for standard instances. Preemptible instances cannot be automatically + // restarted. + AutomaticRestart bool `json:"automaticRestart,omitempty"` + + // OnHostMaintenance: Defines the maintenance behavior for this + // instance. For standard instances, the default behavior is MIGRATE. + // For preemptible instances, the default and only possible behavior is + // TERMINATE. For more information, see Setting Instance Scheduling + // Options. + // + // Possible values: + // "MIGRATE" + // "TERMINATE" + OnHostMaintenance string `json:"onHostMaintenance,omitempty"` + + // Preemptible: Whether the instance is preemptible. + Preemptible bool `json:"preemptible,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Scheduling) MarshalJSON() ([]byte, error) { + type noMethod Scheduling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SerialPortOutput: An instance's serial console output. +type SerialPortOutput struct { + // Contents: [Output Only] The contents of the console output. + Contents string `json:"contents,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#serialPortOutput for serial port output. + Kind string `json:"kind,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Contents") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { + type noMethod SerialPortOutput + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ServiceAccount: A service account. +type ServiceAccount struct { + // Email: Email address of the service account. + Email string `json:"email,omitempty"` + + // Scopes: The list of scopes to be made available for this service + // account. + Scopes []string `json:"scopes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Email") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + type noMethod ServiceAccount + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Snapshot: A persistent disk snapshot resource. +type Snapshot struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DiskSizeGb: [Output Only] Size of the snapshot, specified in GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#snapshot for + // Snapshot resources. + Kind string `json:"kind,omitempty"` + + // Licenses: [Output Only] A list of public visible licenses that apply + // to this snapshot. This can be because the original image had licenses + // attached (such as a Windows image). + Licenses []string `json:"licenses,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SourceDisk: [Output Only] The source disk used to create this + // snapshot. + SourceDisk string `json:"sourceDisk,omitempty"` + + // SourceDiskId: [Output Only] The ID value of the disk used to create + // this snapshot. This value may be used to determine whether the + // snapshot was taken from the current or a previous instance of a given + // disk name. + SourceDiskId string `json:"sourceDiskId,omitempty"` + + // Status: [Output Only] The status of the snapshot. This can be + // CREATING, DELETING, FAILED, READY, or UPLOADING. + // + // Possible values: + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + // "UPLOADING" + Status string `json:"status,omitempty"` + + // StorageBytes: [Output Only] A size of the the storage used by the + // snapshot. As snapshots share storage, this number is expected to + // change with snapshot creation/deletion. + StorageBytes int64 `json:"storageBytes,omitempty,string"` + + // StorageBytesStatus: [Output Only] An indicator whether storageBytes + // is in a stable state or it is being adjusted as a result of shared + // storage reallocation. This status can either be UPDATING, meaning the + // size of the snapshot is being updated, or UP_TO_DATE, meaning the + // size of the snapshot is up-to-date. + // + // Possible values: + // "UPDATING" + // "UP_TO_DATE" + StorageBytesStatus string `json:"storageBytesStatus,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Snapshot) MarshalJSON() ([]byte, error) { + type noMethod Snapshot + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SnapshotList: Contains a list of Snapshot resources. +type SnapshotList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Snapshot resources. + Items []*Snapshot `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SnapshotList) MarshalJSON() ([]byte, error) { + type noMethod SnapshotList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SslCertificate: An SslCertificate resource. This resource provides a +// mechanism to upload an SSL key and certificate to the load balancer +// to serve secure connections from the user. +type SslCertificate struct { + // Certificate: A local certificate file. The certificate must be in PEM + // format. The certificate chain must be no greater than 5 certs long. + // The chain must include at least one intermediate cert. + Certificate string `json:"certificate,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#sslCertificate for SSL certificates. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PrivateKey: A write-only private key in PEM format. Only insert RPCs + // will include this field. + PrivateKey string `json:"privateKey,omitempty"` + + // SelfLink: [Output only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Certificate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SslCertificate) MarshalJSON() ([]byte, error) { + type noMethod SslCertificate + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SslCertificateList: Contains a list of SslCertificate resources. +type SslCertificateList struct { + // Id: [Output Only] Unique identifier for the resource. Defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of SslCertificate resources. + Items []*SslCertificate `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SslCertificateList) MarshalJSON() ([]byte, error) { + type noMethod SslCertificateList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Subnetwork: A Subnetwork resource. +type Subnetwork struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // GatewayAddress: [Output Only] The gateway address for default routes + // to reach destination addresses outside this subnetwork. + GatewayAddress string `json:"gatewayAddress,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // IpCidrRange: The range of internal addresses that are owned by this + // subnetwork. Provide this property when you create the subnetwork. For + // example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and + // non-overlapping within a network. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#subnetwork + // for Subnetwork resources. + Kind string `json:"kind,omitempty"` + + // Name: The name of the resource, provided by the client when initially + // creating the resource. The name must be 1-63 characters long, and + // comply with RFC1035. Specifically, the name must be 1-63 characters + // long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? + // which means the first character must be a lowercase letter, and all + // following characters must be a dash, lowercase letter, or digit, + // except the last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: The URL of the network to which this subnetwork belongs, + // provided by the client when initially creating the subnetwork. Only + // networks that are in the distributed mode can have subnetworks. + Network string `json:"network,omitempty"` + + // Region: URL of the region where the Subnetwork resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Subnetwork) MarshalJSON() ([]byte, error) { + type noMethod Subnetwork + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type SubnetworkAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output] A map of scoped Subnetwork lists. + Items map[string]SubnetworksScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#subnetworkAggregatedList for aggregated lists of subnetworks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SubnetworkList: Contains a list of Subnetwork resources. +type SubnetworkList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: The Subnetwork resources. + Items []*Subnetwork `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#subnetworkList + // for lists of subnetworks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SubnetworkList) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type SubnetworksScopedList struct { + // Subnetworks: List of subnetworks contained in this scope. + Subnetworks []*Subnetwork `json:"subnetworks,omitempty"` + + // Warning: An informational warning that appears when the list of + // addresses is empty. + Warning *SubnetworksScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Subnetworks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// SubnetworksScopedListWarning: An informational warning that appears +// when the list of addresses is empty. +type SubnetworksScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SubnetworksScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type SubnetworksScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Tags: A set of instance tags. +type Tags struct { + // Fingerprint: Specifies a fingerprint for this request, which is + // essentially a hash of the metadata's contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update metadata. You must + // always provide an up-to-date fingerprint hash in order to update or + // change metadata. + // + // To see the latest fingerprint, make get() request to the instance. + Fingerprint string `json:"fingerprint,omitempty"` + + // Items: An array of tags. Each tag must be 1-63 characters long, and + // comply with RFC1035. + Items []string `json:"items,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Tags) MarshalJSON() ([]byte, error) { + type noMethod Tags + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetHttpProxy: A TargetHttpProxy resource. This resource defines an +// HTTP proxy. +type TargetHttpProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy + // for target HTTP proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // UrlMap: URL to the UrlMap resource that defines the mapping from URL + // to the BackendService. + UrlMap string `json:"urlMap,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetHttpProxyList: A list of TargetHttpProxy resources. +type TargetHttpProxyList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetHttpProxy resources. + Items []*TargetHttpProxy `json:"items,omitempty"` + + // Kind: Type of resource. Always compute#targetHttpProxyList for lists + // of target HTTP proxies. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetHttpsProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of SslCertificate resources to associate + // with this TargetHttpsProxy resource. Currently exactly one + // SslCertificate resource must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxiesSetSslCertificatesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines +// an HTTPS proxy. +type TargetHttpsProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetHttpsProxy + // for target HTTPS proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to + // authenticate connections between users and the load balancer. + // Currently, exactly one SSL certificate must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource + // that defines the mapping from URL to the BackendService. For example, + // the following are all valid URLs for specifying a URL map: + // - + // https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map + // - projects/project/global/urlMaps/url-map + // - global/urlMaps/url-map + UrlMap string `json:"urlMap,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. +type TargetHttpsProxyList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetHttpsProxy resources. + Items []*TargetHttpsProxy `json:"items,omitempty"` + + // Kind: Type of resource. Always compute#targetHttpsProxyList for lists + // of target HTTPS proxies. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetInstance: A TargetInstance resource. This resource defines an +// endpoint instance that terminates traffic of certain protocols. +type TargetInstance struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Instance: A URL to the virtual machine instance that handles traffic + // for this target instance. When creating a target instance, you can + // provide the fully-qualified URL or a valid partial URL to the desired + // virtual machine. For example, the following are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance + // - projects/project/zones/zone/instances/instance + // - zones/zone/instances/instance + Instance string `json:"instance,omitempty"` + + // Kind: [Output Only] The type of the resource. Always + // compute#targetInstance for target instances. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // NatPolicy: NAT option controlling how IPs are NAT'ed to the instance. + // Currently only NO_NAT (default value) is supported. + // + // Possible values: + // "NO_NAT" + NatPolicy string `json:"natPolicy,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Zone: [Output Only] URL of the zone where the target instance + // resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetInstance) MarshalJSON() ([]byte, error) { + type noMethod TargetInstance + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetInstanceAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A map of scoped target instance lists. + Items map[string]TargetInstancesScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetInstanceList: Contains a list of TargetInstance resources. +type TargetInstanceList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetInstance resources. + Items []*TargetInstance `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetInstancesScopedList struct { + // TargetInstances: List of target instances contained in this scope. + TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetInstances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { + type noMethod TargetInstancesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetInstancesScopedListWarning: Informational warning which +// replaces the list of addresses when the list is empty. +type TargetInstancesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetInstancesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetInstancesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetInstancesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetPool: A TargetPool resource. This resource defines a pool of +// instances, associated HttpHealthCheck resources, and the fallback +// target pool. +type TargetPool struct { + // BackupPool: This field is applicable only when the containing target + // pool is serving a forwarding rule as the primary pool, and its + // failoverRatio field is properly set to a value between [0, + // 1]. + // + // backupPool and failoverRatio together define the fallback behavior of + // the primary target pool: if the ratio of the healthy instances in the + // primary pool is at or below failoverRatio, traffic arriving at the + // load-balanced IP will be directed to the backup pool. + // + // In case where failoverRatio and backupPool are not set, or all the + // instances in the backup pool are unhealthy, the traffic will be + // directed back to the primary pool in the "force" mode, where traffic + // will be spread to the healthy instances with the best effort, or to + // all instances when no instance is healthy. + BackupPool string `json:"backupPool,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // FailoverRatio: This field is applicable only when the containing + // target pool is serving a forwarding rule as the primary pool (i.e., + // not as a backup pool to some other target pool). The value of the + // field must be in [0, 1]. + // + // If set, backupPool must also be set. They together define the + // fallback behavior of the primary target pool: if the ratio of the + // healthy instances in the primary pool is at or below this number, + // traffic arriving at the load-balanced IP will be directed to the + // backup pool. + // + // In case where failoverRatio is not set or all the instances in the + // backup pool are unhealthy, the traffic will be directed back to the + // primary pool in the "force" mode, where traffic will be spread to the + // healthy instances with the best effort, or to all instances when no + // instance is healthy. + FailoverRatio float64 `json:"failoverRatio,omitempty"` + + // HealthChecks: A list of URLs to the HttpHealthCheck resource. A + // member instance in this pool is considered healthy if and only if all + // specified health checks pass. An empty list means all member + // instances will be considered healthy at all times. + HealthChecks []string `json:"healthChecks,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Instances: A list of resource URLs to the virtual machine instances + // serving this pool. They must live in zones contained in the same + // region as this pool. + Instances []string `json:"instances,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#targetPool + // for target pools. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Region: [Output Only] URL of the region where the target pool + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SessionAffinity: Sesssion affinity option, must be one of the + // following values: + // NONE: Connections from the same client IP may go to any instance in + // the pool. + // CLIENT_IP: Connections from the same client IP will go to the same + // instance in the pool while that instance remains + // healthy. + // CLIENT_IP_PROTO: Connections from the same client IP with the same IP + // protocol will go to the same instance in the pool while that instance + // remains healthy. + // + // Possible values: + // "CLIENT_IP" + // "CLIENT_IP_PROTO" + // "NONE" + SessionAffinity string `json:"sessionAffinity,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "BackupPool") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPool) MarshalJSON() ([]byte, error) { + type noMethod TargetPool + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource. Defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped target pool lists. + Items map[string]TargetPoolsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#targetPoolAggregatedList for aggregated lists of target + // pools. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolInstanceHealth struct { + HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#targetPoolInstanceHealth when checking the health of an + // instance. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolInstanceHealth + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetPoolList: Contains a list of TargetPool resources. +type TargetPoolList struct { + // Id: [Output Only] Unique identifier for the resource. Defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetPool resources. + Items []*TargetPool `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetPoolList + // for lists of target pools. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolList) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolsAddHealthCheckRequest struct { + // HealthChecks: A list of HttpHealthCheck resources to add to the + // target pool. + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsAddHealthCheckRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolsAddInstanceRequest struct { + // Instances: A full or partial URL to an instance to add to this target + // pool. This can be a full or partial URL. For example, the following + // are valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name + // - projects/project-id/zones/zone/instances/instance-name + // - zones/zone/instances/instance-name + Instances []*InstanceReference `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsAddInstanceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolsRemoveHealthCheckRequest struct { + // HealthChecks: Health check URL to be removed. This can be a full or + // valid partial URL. For example, the following are valid URLs: + // - + // https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check + // - projects/project/global/httpHealthChecks/health-check + // - global/httpHealthChecks/health-check + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsRemoveHealthCheckRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolsRemoveInstanceRequest struct { + // Instances: URLs of the instances to be removed from target pool. + Instances []*InstanceReference `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsRemoveInstanceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolsScopedList struct { + // TargetPools: List of target pools contained in this scope. + TargetPools []*TargetPool `json:"targetPools,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetPools") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetPoolsScopedListWarning: Informational warning which replaces +// the list of addresses when the list is empty. +type TargetPoolsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetPoolsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetReference struct { + Target string `json:"target,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Target") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetReference) MarshalJSON() ([]byte, error) { + type noMethod TargetReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetVpnGateway: Represents a Target VPN gateway resource. +type TargetVpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule + // resources. ForwardingRules are created using + // compute.forwardingRules.insert and associated to a VPN gateway. + ForwardingRules []string `json:"forwardingRules,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network to which this VPN gateway is attached. + // Provided by the client when the VPN gateway is created. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URL of the region where the target VPN gateway + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] The status of the VPN gateway. + // + // Possible values: + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + Status string `json:"status,omitempty"` + + // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. + // VpnTunnels are created using compute.vpntunnels.insert method and + // associated to a VPN gateway. + Tunnels []string `json:"tunnels,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGateway + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetVpnGatewayAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A map of scoped target vpn gateway lists. + Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. +type TargetVpnGatewayList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of TargetVpnGateway resources. + Items []*TargetVpnGateway `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetVpnGatewaysScopedList struct { + // TargetVpnGateways: [Output Only] List of target vpn gateways + // contained in this scope. + TargetVpnGateways []*TargetVpnGateway `json:"targetVpnGateways,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *TargetVpnGatewaysScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetVpnGateways") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewaysScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// TargetVpnGatewaysScopedListWarning: [Output Only] Informational +// warning which replaces the list of addresses when the list is empty. +type TargetVpnGatewaysScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewaysScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewaysScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TargetVpnGatewaysScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewaysScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type TestFailure struct { + ActualService string `json:"actualService,omitempty"` + + ExpectedService string `json:"expectedService,omitempty"` + + Host string `json:"host,omitempty"` + + Path string `json:"path,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ActualService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *TestFailure) MarshalJSON() ([]byte, error) { + type noMethod TestFailure + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// UrlMap: A UrlMap resource. This resource defines the mapping from URL +// to the BackendService resource, based on the "longest-match" of the +// URL's host and path. +type UrlMap struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // DefaultService: The URL of the BackendService resource if none of the + // hostRules match. + DefaultService string `json:"defaultService,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a UrlMap. An up-to-date + // fingerprint must be provided in order to update the UrlMap. + Fingerprint string `json:"fingerprint,omitempty"` + + // HostRules: The list of HostRules to use against the URL. + HostRules []*HostRule `json:"hostRules,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#urlMaps for + // url maps. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PathMatchers: The list of named PathMatchers to use against the URL. + PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Tests: The list of expected URL mappings. Request to update this + // UrlMap will succeed only if all of the test cases pass. + Tests []*UrlMapTest `json:"tests,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMap) MarshalJSON() ([]byte, error) { + type noMethod UrlMap + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// UrlMapList: Contains a list of UrlMap resources. +type UrlMapList struct { + // Id: [Output Only] Unique identifier for the resource. Set by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of UrlMap resources. + Items []*UrlMap `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMapList) MarshalJSON() ([]byte, error) { + type noMethod UrlMapList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type UrlMapReference struct { + UrlMap string `json:"urlMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "UrlMap") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMapReference) MarshalJSON() ([]byte, error) { + type noMethod UrlMapReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// UrlMapTest: Message for the expected URL mappings. +type UrlMapTest struct { + // Description: Description of this test case. + Description string `json:"description,omitempty"` + + // Host: Host portion of the URL. + Host string `json:"host,omitempty"` + + // Path: Path portion of the URL. + Path string `json:"path,omitempty"` + + // Service: Expected BackendService resource the given URL should be + // mapped to. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMapTest) MarshalJSON() ([]byte, error) { + type noMethod UrlMapTest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// UrlMapValidationResult: Message representing the validation result +// for a UrlMap. +type UrlMapValidationResult struct { + LoadErrors []string `json:"loadErrors,omitempty"` + + // LoadSucceeded: Whether the given UrlMap can be successfully loaded. + // If false, 'loadErrors' indicates the reasons. + LoadSucceeded bool `json:"loadSucceeded,omitempty"` + + TestFailures []*TestFailure `json:"testFailures,omitempty"` + + // TestPassed: If successfully loaded, this field indicates whether the + // test passed. If false, 'testFailures's indicate the reason of + // failure. + TestPassed bool `json:"testPassed,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LoadErrors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { + type noMethod UrlMapValidationResult + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type UrlMapsValidateRequest struct { + // Resource: Content of the UrlMap to be validated. + Resource *UrlMap `json:"resource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Resource") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { + type noMethod UrlMapsValidateRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type UrlMapsValidateResponse struct { + Result *UrlMapValidationResult `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { + type noMethod UrlMapsValidateResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// UsageExportLocation: The location in Cloud Storage and naming method +// of the daily usage report. Contains bucket_name and report_name +// prefix. +type UsageExportLocation struct { + // BucketName: The name of an existing bucket in Cloud Storage where the + // usage report object is stored. The Google Service Account is granted + // write access to this bucket. This can either be the bucket name by + // itself, such as example-bucket, or the bucket name with gs:// or + // https://storage.googleapis.com/ in front of it, such as + // gs://example-bucket. + BucketName string `json:"bucketName,omitempty"` + + // ReportNamePrefix: An optional prefix for the name of the usage report + // object stored in bucketName. If not supplied, defaults to usage. The + // report is stored as a CSV file named + // report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the + // usage according to Pacific Time. If you supply a prefix, it should + // conform to Cloud Storage object naming conventions. + ReportNamePrefix string `json:"reportNamePrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BucketName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { + type noMethod UsageExportLocation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type VpnTunnel struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DetailedStatus: [Output Only] Detailed status message for the VPN + // tunnel. + DetailedStatus string `json:"detailedStatus,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // IkeVersion: IKE protocol version to use when establishing the VPN + // tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. + // Default version is 2. + IkeVersion int64 `json:"ikeVersion,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // LocalTrafficSelector: Local traffic selector to use when establishing + // the VPN tunnel with peer VPN gateway. The value should be a CIDR + // formatted string, for example: 192.168.0.0/16. The ranges should be + // disjoint. + LocalTrafficSelector []string `json:"localTrafficSelector,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PeerIp: IP address of the peer VPN gateway. + PeerIp string `json:"peerIp,omitempty"` + + // Region: [Output Only] URL of the region where the VPN tunnel resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SharedSecret: Shared secret used to set the secure session between + // the Cloud VPN gateway and the peer VPN gateway. + SharedSecret string `json:"sharedSecret,omitempty"` + + // SharedSecretHash: Hash of the shared secret. + SharedSecretHash string `json:"sharedSecretHash,omitempty"` + + // Status: [Output Only] The status of the VPN tunnel. + // + // Possible values: + // "ALLOCATING_RESOURCES" + // "AUTHORIZATION_ERROR" + // "DEPROVISIONING" + // "ESTABLISHED" + // "FAILED" + // "FIRST_HANDSHAKE" + // "NEGOTIATION_FAILURE" + // "NETWORK_ERROR" + // "NO_INCOMING_PACKETS" + // "PROVISIONING" + // "REJECTED" + // "WAITING_FOR_FULL_CONFIG" + Status string `json:"status,omitempty"` + + // TargetVpnGateway: URL of the VPN gateway with which this VPN tunnel + // is associated. Provided by the client when the VPN tunnel is created. + TargetVpnGateway string `json:"targetVpnGateway,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *VpnTunnel) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnel + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type VpnTunnelAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A map of scoped vpn tunnel lists. + Items map[string]VpnTunnelsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// VpnTunnelList: Contains a list of VpnTunnel resources. +type VpnTunnelList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of VpnTunnel resources. + Items []*VpnTunnel `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type VpnTunnelsScopedList struct { + // VpnTunnels: List of vpn tunnels contained in this scope. + VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *VpnTunnelsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VpnTunnels") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// VpnTunnelsScopedListWarning: Informational warning which replaces the +// list of addresses when the list is empty. +type VpnTunnelsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelsScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type VpnTunnelsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelsScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Zone: A Zone resource. +type Zone struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: [Output Only] The deprecation status associated with this + // zone. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] Textual description of the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#zone for + // zones. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // Region: [Output Only] Full URL reference to the region which hosts + // the zone. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] Status of the zone, either UP or DOWN. + // + // Possible values: + // "DOWN" + // "UP" + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Zone) MarshalJSON() ([]byte, error) { + type noMethod Zone + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ZoneList: Contains a list of zone resources. +type ZoneList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of Zone resources. + Items []*Zone `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ZoneList) MarshalJSON() ([]byte, error) { + type noMethod ZoneList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "compute.addresses.aggregatedList": + +type AddressesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves an aggregated list of addresses. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList +func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { + c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.addresses.aggregatedList" call. +// Exactly one of *AddressAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AddressAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of addresses.", + // "httpMethod": "GET", + // "id": "compute.addresses.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/addresses", + // "response": { + // "$ref": "AddressAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.delete": + +type AddressesDeleteCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete +func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { + c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.addresses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified address resource.", + // "httpMethod": "DELETE", + // "id": "compute.addresses.delete", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.get": + +type AddressesGetCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get +func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { + c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { + c.ctx_ = ctx + return c +} + +func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.addresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Address{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified address resource.", + // "httpMethod": "GET", + // "id": "compute.addresses.get", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Address" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.addresses.insert": + +type AddressesInsertCall struct { + s *Service + project string + region string + address *Address + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an address resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert +func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { + c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.addresses.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an address resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.addresses.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "request": { + // "$ref": "Address" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.list": + +type AddressesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of addresses contained within the specified +// region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list +func (r *AddressesService) List(project string, region string) *AddressesListCall { + c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *AddressesListCall) Filter(filter string) *AddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { + c.ctx_ = ctx + return c +} + +func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.addresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AddressList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of addresses contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.addresses.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "response": { + // "$ref": "AddressList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.aggregatedList": + +type AutoscalersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves an aggregated list of autoscalers. +func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { + c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.aggregatedList" call. +// Exactly one of *AutoscalerAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AutoscalerAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of autoscalers.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/autoscalers", + // "response": { + // "$ref": "AutoscalerAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.delete": + +type AutoscalersDeleteCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified autoscaler. +func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { + c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified autoscaler.", + // "httpMethod": "DELETE", + // "id": "compute.autoscalers.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.get": + +type AutoscalersGetCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified autoscaler resource. Get a list of +// available autoscalers by making a list() request. +func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { + c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Autoscaler.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Autoscaler{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified autoscaler resource. Get a list of available autoscalers by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.get", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Autoscaler" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.insert": + +type AutoscalersInsertCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an autoscaler in the specified project using the data +// included in the request. +func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { + c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.autoscalers.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.list": + +type AutoscalersListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of autoscalers contained within the specified +// zone. +func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { + c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.list" call. +// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AutoscalerList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AutoscalerList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of autoscalers contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "response": { + // "$ref": "AutoscalerList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.patch": + +type AutoscalersPatchCall struct { + s *Service + project string + zone string + autoscaler2 *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates an autoscaler in the specified project using the data +// included in the request. This method supports patch semantics. +func (r *AutoscalersService) Patch(project string, zone string, autoscaler string, autoscaler2 *Autoscaler) *AutoscalersPatchCall { + c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.urlParams_.Set("autoscaler", autoscaler) + c.autoscaler2 = autoscaler2 + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.autoscalers.patch", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.update": + +type AutoscalersUpdateCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates an autoscaler in the specified project using the data +// included in the request. +func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { + c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to update. +func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.autoscalers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.autoscalers.update", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.delete": + +type BackendServicesDeleteCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified BackendService resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete +func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { + c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified BackendService resource.", + // "httpMethod": "DELETE", + // "id": "compute.backendServices.delete", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.get": + +type BackendServicesGetCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified BackendService resource. Get a list of +// available backend services by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get +func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { + c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendService{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified BackendService resource. Get a list of available backend services by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.backendServices.get", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "response": { + // "$ref": "BackendService" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.getHealth": + +type BackendServicesGetHealthCall struct { + s *Service + project string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// GetHealth: Gets the most recent health check results for this +// BackendService. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth +func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { + c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendServiceGroupHealth{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the most recent health check results for this BackendService.", + // "httpMethod": "POST", + // "id": "compute.backendServices.getHealth", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the queried instance belongs.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}/getHealth", + // "request": { + // "$ref": "ResourceGroupReference" + // }, + // "response": { + // "$ref": "BackendServiceGroupHealth" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.insert": + +type BackendServicesInsertCall struct { + s *Service + project string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a BackendService resource in the specified project +// using the data included in the request. There are several +// restrictions and guidelines to keep in mind when creating a backend +// service. Read Restrictions and Guidelines for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert +func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { + c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendservice = backendservice + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "POST", + // "id": "compute.backendServices.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.list": + +type BackendServicesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of BackendService resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list +func (r *BackendServicesService) List(project string) *BackendServicesListCall { + c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendServiceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendServiceList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of BackendService resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.backendServices.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices", + // "response": { + // "$ref": "BackendServiceList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.backendServices.patch": + +type BackendServicesPatchCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates the entire content of the BackendService resource. +// There are several restrictions and guidelines to keep in mind when +// updating a backend service. Read Restrictions and Guidelines for +// more information. This method supports patch semantics. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch +func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { + c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.backendservice = backendservice + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the entire content of the BackendService resource. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.backendServices.patch", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.update": + +type BackendServicesUpdateCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates the entire content of the BackendService resource. +// There are several restrictions and guidelines to keep in mind when +// updating a backend service. Read Restrictions and Guidelines for +// more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update +func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { + c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.backendservice = backendservice + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.backendServices.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the entire content of the BackendService resource. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "PUT", + // "id": "compute.backendServices.update", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.diskTypes.aggregatedList": + +type DiskTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves an aggregated list of disk types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList +func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { + c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.diskTypes.aggregatedList" call. +// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of disk types.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/diskTypes", + // "response": { + // "$ref": "DiskTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.diskTypes.get": + +type DiskTypesGetCall struct { + s *Service + project string + zone string + diskType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified disk type. Get a list of available disk +// types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get +func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { + c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.diskType = diskType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { + c.ctx_ = ctx + return c +} + +func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "diskType": c.diskType, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.diskTypes.get" call. +// Exactly one of *DiskType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified disk type. Get a list of available disk types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "diskType" + // ], + // "parameters": { + // "diskType": { + // "description": "Name of the disk type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/diskTypes/{diskType}", + // "response": { + // "$ref": "DiskType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.diskTypes.list": + +type DiskTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of disk types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list +func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { + c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { + c.ctx_ = ctx + return c +} + +func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.diskTypes.list" call. +// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *DiskTypeList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of disk types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/diskTypes", + // "response": { + // "$ref": "DiskTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.disks.aggregatedList": + +type DisksAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves an aggregated list of persistent disks. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList +func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { + c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.disks.aggregatedList" call. +// Exactly one of *DiskAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of persistent disks.", + // "httpMethod": "GET", + // "id": "compute.disks.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/disks", + // "response": { + // "$ref": "DiskAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.disks.createSnapshot": + +type DisksCreateSnapshotCall struct { + s *Service + project string + zone string + disk string + snapshot *Snapshot + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// CreateSnapshot: Creates a snapshot of a specified persistent disk. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot +func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { + c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.snapshot = snapshot + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { + c.ctx_ = ctx + return c +} + +func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.disks.createSnapshot" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a snapshot of a specified persistent disk.", + // "httpMethod": "POST", + // "id": "compute.disks.createSnapshot", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to snapshot.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", + // "request": { + // "$ref": "Snapshot" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.delete": + +type DisksDeleteCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified persistent disk. Deleting a disk +// removes its data permanently and is irreversible. However, deleting a +// disk does not delete any snapshots previously made from the disk. You +// must separately delete snapshots. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete +func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { + c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.disks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + // "httpMethod": "DELETE", + // "id": "compute.disks.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.get": + +type DisksGetCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns a specified persistent disk. Get a list of available +// persistent disks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get +func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { + c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { + c.ctx_ = ctx + return c +} + +func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.disks.get" call. +// Exactly one of *Disk or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Disk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Disk{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns a specified persistent disk. Get a list of available persistent disks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.disks.get", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}", + // "response": { + // "$ref": "Disk" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.disks.insert": + +type DisksInsertCall struct { + s *Service + project string + zone string + disk *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a persistent disk in the specified project using the +// data in the request. You can create a disk with a sourceImage, a +// sourceSnapshot, or create an empty 500 GB data disk by omitting all +// properties. You can also create a disk that is larger than the +// default size by specifying the sizeGb property. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert +func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { + c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// SourceImage sets the optional parameter "sourceImage": Source image +// to restore onto a disk. +func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { + c.urlParams_.Set("sourceImage", sourceImage) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { + c.ctx_ = ctx + return c +} + +func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.disks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + // "httpMethod": "POST", + // "id": "compute.disks.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "sourceImage": { + // "description": "Optional. Source image to restore onto a disk.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks", + // "request": { + // "$ref": "Disk" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.list": + +type DisksListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of persistent disks contained within the +// specified zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list +func (r *DisksService) List(project string, zone string) *DisksListCall { + c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *DisksListCall) Filter(filter string) *DisksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { + c.ctx_ = ctx + return c +} + +func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.disks.list" call. +// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DiskList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of persistent disks contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.disks.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks", + // "response": { + // "$ref": "DiskList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.disks.resize": + +type DisksResizeCall struct { + s *Service + project string + zone string + disk string + disksresizerequest *DisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Resize: Resizes the specified persistent disk. +func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { + c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.disksresizerequest = disksresizerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { + c.ctx_ = ctx + return c +} + +func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.disks.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Resizes the specified persistent disk.", + // "httpMethod": "POST", + // "id": "compute.disks.resize", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The name of the persistent disk.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}/resize", + // "request": { + // "$ref": "DisksResizeRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.firewalls.delete": + +type FirewallsDeleteCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete +func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { + c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "firewall": c.firewall, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.firewalls.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified firewall.", + // "httpMethod": "DELETE", + // "id": "compute.firewalls.delete", + // "parameterOrder": [ + // "project", + // "firewall" + // ], + // "parameters": { + // "firewall": { + // "description": "Name of the firewall rule to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls/{firewall}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.firewalls.get": + +type FirewallsGetCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get +func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { + c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { + c.ctx_ = ctx + return c +} + +func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "firewall": c.firewall, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.firewalls.get" call. +// Exactly one of *Firewall or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Firewall.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Firewall{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified firewall.", + // "httpMethod": "GET", + // "id": "compute.firewalls.get", + // "parameterOrder": [ + // "project", + // "firewall" + // ], + // "parameters": { + // "firewall": { + // "description": "Name of the firewall rule to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls/{firewall}", + // "response": { + // "$ref": "Firewall" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.firewalls.insert": + +type FirewallsInsertCall struct { + s *Service + project string + firewall *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a firewall rule in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert +func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { + c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.firewalls.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a firewall rule in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.firewalls.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls", + // "request": { + // "$ref": "Firewall" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.firewalls.list": + +type FirewallsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of firewall rules available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list +func (r *FirewallsService) List(project string) *FirewallsListCall { + c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { + c.ctx_ = ctx + return c +} + +func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.firewalls.list" call. +// Exactly one of *FirewallList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *FirewallList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &FirewallList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of firewall rules available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.firewalls.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls", + // "response": { + // "$ref": "FirewallList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.firewalls.patch": + +type FirewallsPatchCall struct { + s *Service + project string + firewall string + firewall2 *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates the specified firewall rule with the data included in +// the request. This method supports patch semantics. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch +func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { + c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall + c.firewall2 = firewall2 + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "firewall": c.firewall, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.firewalls.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified firewall rule with the data included in the request. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.firewalls.patch", + // "parameterOrder": [ + // "project", + // "firewall" + // ], + // "parameters": { + // "firewall": { + // "description": "Name of the firewall rule to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls/{firewall}", + // "request": { + // "$ref": "Firewall" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.firewalls.update": + +type FirewallsUpdateCall struct { + s *Service + project string + firewall string + firewall2 *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates the specified firewall rule with the data included in +// the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update +func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { + c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall + c.firewall2 = firewall2 + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "firewall": c.firewall, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.firewalls.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified firewall rule with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.firewalls.update", + // "parameterOrder": [ + // "project", + // "firewall" + // ], + // "parameters": { + // "firewall": { + // "description": "Name of the firewall rule to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls/{firewall}", + // "request": { + // "$ref": "Firewall" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.forwardingRules.aggregatedList": + +type ForwardingRulesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves an aggregated list of forwarding rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList +func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { + c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.forwardingRules.aggregatedList" call. +// Exactly one of *ForwardingRuleAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ForwardingRuleAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of forwarding rules.", + // "httpMethod": "GET", + // "id": "compute.forwardingRules.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/forwardingRules", + // "response": { + // "$ref": "ForwardingRuleAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.forwardingRules.delete": + +type ForwardingRulesDeleteCall struct { + s *Service + project string + region string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete +func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { + c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingRule = forwardingRule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.forwardingRules.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified ForwardingRule resource.", + // "httpMethod": "DELETE", + // "id": "compute.forwardingRules.delete", + // "parameterOrder": [ + // "project", + // "region", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.forwardingRules.get": + +type ForwardingRulesGetCall struct { + s *Service + project string + region string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get +func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { + c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingRule = forwardingRule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { + c.ctx_ = ctx + return c +} + +func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.forwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ForwardingRule{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified ForwardingRule resource.", + // "httpMethod": "GET", + // "id": "compute.forwardingRules.get", + // "parameterOrder": [ + // "project", + // "region", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "response": { + // "$ref": "ForwardingRule" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.forwardingRules.insert": + +type ForwardingRulesInsertCall struct { + s *Service + project string + region string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a ForwardingRule resource in the specified project +// and region using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert +func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { + c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingrule = forwardingrule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.forwardingRules.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules", + // "request": { + // "$ref": "ForwardingRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.forwardingRules.list": + +type ForwardingRulesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of ForwardingRule resources available to the +// specified project and region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list +func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { + c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { + c.ctx_ = ctx + return c +} + +func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.forwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ForwardingRuleList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ForwardingRuleList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", + // "httpMethod": "GET", + // "id": "compute.forwardingRules.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules", + // "response": { + // "$ref": "ForwardingRuleList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.forwardingRules.setTarget": + +type ForwardingRulesSetTargetCall struct { + s *Service + project string + region string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetTarget: Changes target URL for forwarding rule. The new target +// should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget +func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { + c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingRule = forwardingRule + c.targetreference = targetreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { + c.ctx_ = ctx + return c +} + +func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.forwardingRules.setTarget" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.setTarget", + // "parameterOrder": [ + // "project", + // "region", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", + // "request": { + // "$ref": "TargetReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalAddresses.delete": + +type GlobalAddressesDeleteCall struct { + s *Service + project string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete +func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { + c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "address": c.address, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalAddresses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified address resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalAddresses.delete", + // "parameterOrder": [ + // "project", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/addresses/{address}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalAddresses.get": + +type GlobalAddressesGetCall struct { + s *Service + project string + address string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified address resource. Get a list of available +// addresses by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get +func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { + c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "address": c.address, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalAddresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Address{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified address resource. Get a list of available addresses by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.globalAddresses.get", + // "parameterOrder": [ + // "project", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/addresses/{address}", + // "response": { + // "$ref": "Address" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.globalAddresses.insert": + +type GlobalAddressesInsertCall struct { + s *Service + project string + address *Address + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an address resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert +func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { + c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalAddresses.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an address resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.globalAddresses.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/addresses", + // "request": { + // "$ref": "Address" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalAddresses.list": + +type GlobalAddressesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of global addresses. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list +func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { + c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalAddresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AddressList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of global addresses.", + // "httpMethod": "GET", + // "id": "compute.globalAddresses.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/addresses", + // "response": { + // "$ref": "AddressList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.globalForwardingRules.delete": + +type GlobalForwardingRulesDeleteCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete +func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { + c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.forwardingRule = forwardingRule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "forwardingRule": c.forwardingRule, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalForwardingRules.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified ForwardingRule resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalForwardingRules.delete", + // "parameterOrder": [ + // "project", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalForwardingRules.get": + +type GlobalForwardingRulesGetCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified ForwardingRule resource. Get a list of +// available forwarding rules by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get +func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { + c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.forwardingRule = forwardingRule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "forwardingRule": c.forwardingRule, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalForwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ForwardingRule{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified ForwardingRule resource. Get a list of available forwarding rules by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.globalForwardingRules.get", + // "parameterOrder": [ + // "project", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "response": { + // "$ref": "ForwardingRule" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.globalForwardingRules.insert": + +type GlobalForwardingRulesInsertCall struct { + s *Service + project string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a ForwardingRule resource in the specified project +// and region using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert +func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { + c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.forwardingrule = forwardingrule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalForwardingRules.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/forwardingRules", + // "request": { + // "$ref": "ForwardingRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalForwardingRules.list": + +type GlobalForwardingRulesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of ForwardingRule resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list +func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { + c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalForwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ForwardingRuleList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ForwardingRuleList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of ForwardingRule resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.globalForwardingRules.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/forwardingRules", + // "response": { + // "$ref": "ForwardingRuleList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.globalForwardingRules.setTarget": + +type GlobalForwardingRulesSetTargetCall struct { + s *Service + project string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetTarget: Changes target URL for forwarding rule. The new target +// should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget +func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { + c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.forwardingRule = forwardingRule + c.targetreference = targetreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "forwardingRule": c.forwardingRule, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalForwardingRules.setTarget" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.setTarget", + // "parameterOrder": [ + // "project", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", + // "request": { + // "$ref": "TargetReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalOperations.aggregatedList": + +type GlobalOperationsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves an aggregated list of all operations. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList +func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { + c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalOperations.aggregatedList" call. +// Exactly one of *OperationAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *OperationAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &OperationAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of all operations.", + // "httpMethod": "GET", + // "id": "compute.globalOperations.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/operations", + // "response": { + // "$ref": "OperationAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.globalOperations.delete": + +type GlobalOperationsDeleteCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete +func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { + c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "operation": c.operation, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalOperations.delete" call. +func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes the specified Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalOperations.delete", + // "parameterOrder": [ + // "project", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/operations/{operation}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalOperations.get": + +type GlobalOperationsGetCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves the specified Operations resource. Get a list of +// operations by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get +func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { + c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "operation": c.operation, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalOperations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the specified Operations resource. Get a list of operations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.globalOperations.get", + // "parameterOrder": [ + // "project", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/operations/{operation}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.globalOperations.list": + +type GlobalOperationsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of Operation resources contained within the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list +func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { + c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { + c.ctx_ = ctx + return c +} + +func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.globalOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &OperationList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of Operation resources contained within the specified project.", + // "httpMethod": "GET", + // "id": "compute.globalOperations.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/operations", + // "response": { + // "$ref": "OperationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.httpHealthChecks.delete": + +type HttpHealthChecksDeleteCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified HttpHealthCheck resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete +func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { + c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpHealthCheck = httpHealthCheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpHealthChecks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified HttpHealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.httpHealthChecks.delete", + // "parameterOrder": [ + // "project", + // "httpHealthCheck" + // ], + // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpHealthChecks.get": + +type HttpHealthChecksGetCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified HttpHealthCheck resource. Get a list of +// available HTTP health checks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get +func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { + c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpHealthCheck = httpHealthCheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { + c.ctx_ = ctx + return c +} + +func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpHealthChecks.get" call. +// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HttpHealthCheck.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HttpHealthCheck{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.httpHealthChecks.get", + // "parameterOrder": [ + // "project", + // "httpHealthCheck" + // ], + // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "response": { + // "$ref": "HttpHealthCheck" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.httpHealthChecks.insert": + +type HttpHealthChecksInsertCall struct { + s *Service + project string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert +func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { + c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httphealthcheck = httphealthcheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { + c.ctx_ = ctx + return c +} + +func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpHealthChecks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.httpHealthChecks.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks", + // "request": { + // "$ref": "HttpHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpHealthChecks.list": + +type HttpHealthChecksListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of HttpHealthCheck resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list +func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { + c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { + c.ctx_ = ctx + return c +} + +func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpHealthChecks.list" call. +// Exactly one of *HttpHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HttpHealthCheckList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.httpHealthChecks.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks", + // "response": { + // "$ref": "HttpHealthCheckList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.httpHealthChecks.patch": + +type HttpHealthChecksPatchCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. This method supports patch +// semantics. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch +func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { + c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { + c.ctx_ = ctx + return c +} + +func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.httpHealthChecks.patch", + // "parameterOrder": [ + // "project", + // "httpHealthCheck" + // ], + // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "request": { + // "$ref": "HttpHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpHealthChecks.update": + +type HttpHealthChecksUpdateCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update +func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { + c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpHealthChecks.update", + // "parameterOrder": [ + // "project", + // "httpHealthCheck" + // ], + // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "request": { + // "$ref": "HttpHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpsHealthChecks.delete": + +type HttpsHealthChecksDeleteCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified HttpsHealthCheck resource. +func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { + c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpsHealthCheck = httpsHealthCheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpsHealthChecks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified HttpsHealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.httpsHealthChecks.delete", + // "parameterOrder": [ + // "project", + // "httpsHealthCheck" + // ], + // "parameters": { + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpsHealthChecks.get": + +type HttpsHealthChecksGetCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified HttpsHealthCheck resource. Get a list of +// available HTTPS health checks by making a list() request. +func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { + c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpsHealthCheck = httpsHealthCheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { + c.ctx_ = ctx + return c +} + +func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpsHealthChecks.get" call. +// Exactly one of *HttpsHealthCheck or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheck.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HttpsHealthCheck{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified HttpsHealthCheck resource. Get a list of available HTTPS health checks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.httpsHealthChecks.get", + // "parameterOrder": [ + // "project", + // "httpsHealthCheck" + // ], + // "parameters": { + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "response": { + // "$ref": "HttpsHealthCheck" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.httpsHealthChecks.insert": + +type HttpsHealthChecksInsertCall struct { + s *Service + project string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { + c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpshealthcheck = httpshealthcheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { + c.ctx_ = ctx + return c +} + +func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpsHealthChecks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.httpsHealthChecks.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpsHealthChecks.list": + +type HttpsHealthChecksListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of HttpsHealthCheck resources available to +// the specified project. +func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { + c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { + c.ctx_ = ctx + return c +} + +func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpsHealthChecks.list" call. +// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HttpsHealthCheckList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.httpsHealthChecks.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks", + // "response": { + // "$ref": "HttpsHealthCheckList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.httpsHealthChecks.patch": + +type HttpsHealthChecksPatchCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. This method supports patch +// semantics. +func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { + c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { + c.ctx_ = ctx + return c +} + +func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpsHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.httpsHealthChecks.patch", + // "parameterOrder": [ + // "project", + // "httpsHealthCheck" + // ], + // "parameters": { + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpsHealthChecks.update": + +type HttpsHealthChecksUpdateCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { + c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.httpsHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpsHealthChecks.update", + // "parameterOrder": [ + // "project", + // "httpsHealthCheck" + // ], + // "parameters": { + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.images.delete": + +type ImagesDeleteCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified image. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete +func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { + c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "image": c.image, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.images.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified image.", + // "httpMethod": "DELETE", + // "id": "compute.images.delete", + // "parameterOrder": [ + // "project", + // "image" + // ], + // "parameters": { + // "image": { + // "description": "Name of the image resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/{image}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.images.deprecate": + +type ImagesDeprecateCall struct { + s *Service + project string + image string + deprecationstatus *DeprecationStatus + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Deprecate: Sets the deprecation status of an image. +// +// If an empty request body is given, clears the deprecation status +// instead. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate +func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { + c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + c.deprecationstatus = deprecationstatus + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { + c.ctx_ = ctx + return c +} + +func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "image": c.image, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.images.deprecate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + // "httpMethod": "POST", + // "id": "compute.images.deprecate", + // "parameterOrder": [ + // "project", + // "image" + // ], + // "parameters": { + // "image": { + // "description": "Image name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/{image}/deprecate", + // "request": { + // "$ref": "DeprecationStatus" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.images.get": + +type ImagesGetCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified image. Get a list of available images by +// making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get +func (r *ImagesService) Get(project string, image string) *ImagesGetCall { + c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { + c.ctx_ = ctx + return c +} + +func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "image": c.image, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.images.get" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Image{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified image. Get a list of available images by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.images.get", + // "parameterOrder": [ + // "project", + // "image" + // ], + // "parameters": { + // "image": { + // "description": "Name of the image resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/{image}", + // "response": { + // "$ref": "Image" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.images.getFromFamily": + +type ImagesGetFromFamilyCall struct { + s *Service + project string + family string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GetFromFamily: Returns the latest image that is part of an image +// family and is not deprecated. +func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { + c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.family = family + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { + c.ctx_ = ctx + return c +} + +func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "family": c.family, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.images.getFromFamily" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Image{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "httpMethod": "GET", + // "id": "compute.images.getFromFamily", + // "parameterOrder": [ + // "project", + // "family" + // ], + // "parameters": { + // "family": { + // "description": "Name of the image resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/family/{family}", + // "response": { + // "$ref": "Image" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.images.insert": + +type ImagesInsertCall struct { + s *Service + project string + image *Image + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an image in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert +func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { + c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.images.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an image in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.images.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images", + // "request": { + // "$ref": "Image" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "compute.images.list": + +type ImagesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of private images available to the specified +// project. Private images are images you create that belong to your +// project. This method does not get any images that belong to other +// projects, including publicly-available images, like Debian 7. If you +// want to get a list of publicly-available images, use this method to +// make a request to the respective image project, such as debian-cloud +// or windows-cloud. +// +// See Accessing images for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list +func (r *ImagesService) List(project string) *ImagesListCall { + c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *ImagesListCall) Filter(filter string) *ImagesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { + c.ctx_ = ctx + return c +} + +func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.images.list" call. +// Exactly one of *ImageList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ImageList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ImageList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 7. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.\n\nSee Accessing images for more information.", + // "httpMethod": "GET", + // "id": "compute.images.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images", + // "response": { + // "$ref": "ImageList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instanceGroupManagers.abandonInstances": + +type InstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// AbandonInstances: Schedules a group action to remove the specified +// instances from the managed instance group. Abandoning an instance +// does not delete the instance, but it does remove the instance from +// any target pools that are applied by the managed instance group. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you abandon. This operation is marked as +// DONE when the action is scheduled even if the instances have not yet +// been removed from the group. You must separately verify the status of +// the abandoning action with the listmanagedinstances method. +func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { + c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.abandonInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.abandonInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "request": { + // "$ref": "InstanceGroupManagersAbandonInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.aggregatedList": + +type InstanceGroupManagersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of managed instance groups and +// groups them by zone. +func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { + c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.aggregatedList" call. +// Exactly one of *InstanceGroupManagerAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManagerAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of managed instance groups and groups them by zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/instanceGroupManagers", + // "response": { + // "$ref": "InstanceGroupManagerAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instanceGroupManagers.delete": + +type InstanceGroupManagersDeleteCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. Note that the instance group must not belong +// to a backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { + c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroupManagers.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.deleteInstances": + +type InstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// DeleteInstances: Schedules a group action to delete the specified +// instances in the managed instance group. The instances are also +// removed from any target pools of which they were a member. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you delete. This operation is marked as DONE +// when the action is scheduled even if the instances are still being +// deleted. You must separately verify the status of the deleting action +// with the listmanagedinstances method. +func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { + c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.deleteInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.deleteInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "request": { + // "$ref": "InstanceGroupManagersDeleteInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.get": + +type InstanceGroupManagersGetCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns all of the details about the specified managed instance +// group. Get a list of available managed instance groups by making a +// list() request. +func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { + c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManager{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns all of the details about the specified managed instance group. Get a list of available managed instance groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.get", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "response": { + // "$ref": "InstanceGroupManager" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.insert": + +type InstanceGroupManagersInsertCall struct { + s *Service + project string + zone string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, it schedules +// an action to create instances in the group using the specified +// instance template. This operation is marked as DONE when the group is +// created even if the instances in the group have not yet been created. +// You must separately verify the status of the individual instances +// with the listmanagedinstances method. +func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { + c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instancegroupmanager = instancegroupmanager + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "request": { + // "$ref": "InstanceGroupManager" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.list": + +type InstanceGroupManagersListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of managed instance groups that are contained +// within the specified project and zone. +func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { + c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.list" call. +// Exactly one of *InstanceGroupManagerList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupManagerList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManagerList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "response": { + // "$ref": "InstanceGroupManagerList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instanceGroupManagers.listManagedInstances": + +type InstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// ListManagedInstances: Lists all of the instances in the managed +// instance group. Each instance in the list has a currentAction, which +// indicates the action that the managed instance group is performing on +// the instance. For example, if the group is still creating an +// instance, the currentAction is CREATING. If a previous action failed, +// the list displays the errors for that failed action. +func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { + c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. +// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head +// er or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupManagersListManagedInstancesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.listManagedInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "response": { + // "$ref": "InstanceGroupManagersListManagedInstancesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.recreateInstances": + +type InstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// RecreateInstances: Schedules a group action to recreate the specified +// instances in the managed instance group. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the action is +// scheduled even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. +func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { + c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.recreateInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "request": { + // "$ref": "InstanceGroupManagersRecreateInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.resize": + +type InstanceGroupManagersResizeCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Resize: Resizes the managed instance group. If you increase the size, +// the group creates new instances using the current instance template. +// If you decrease the size, the group deletes instances. The resize +// operation is marked DONE when the resize actions are scheduled even +// if the group has not yet added or deleted any instances. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. +func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { + c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.resize", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager", + // "size" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "size": { + // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + // "format": "int32", + // "location": "query", + // "required": true, + // "type": "integer" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.setInstanceTemplate": + +type InstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetInstanceTemplate: Specifies the instance template to use when +// creating new instances in this group. The templates for existing +// instances in the group do not change unless you recreate them. +func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { + c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setInstanceTemplate", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { + // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroupManagers.setTargetPools": + +type InstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetTargetPools: Modifies the target pools to which all instances in +// this managed instance group are assigned. The target pools +// automatically apply to all of the instances in the managed instance +// group. This operation is marked DONE when you make the request even +// if the instances have not yet been added to their target pools. The +// change might take some time to apply to all of the instances in the +// group depending on the size of the group. +func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { + c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setTargetPools", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "request": { + // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.addInstances": + +type InstanceGroupsAddInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// AddInstances: Adds a list of instances to the specified instance +// group. All of the instances in the instance group must be in the same +// network/subnetwork. Read Adding instances for more information. +func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { + c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.addInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.addInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where you are adding instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + // "request": { + // "$ref": "InstanceGroupsAddInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.aggregatedList": + +type InstanceGroupsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves the list of instance groups and sorts them +// by zone. +func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { + c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.aggregatedList" call. +// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of instance groups and sorts them by zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/instanceGroups", + // "response": { + // "$ref": "InstanceGroupAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instanceGroups.delete": + +type InstanceGroupsDeleteCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified instance group. The instances in the +// group are not deleted. Note that instance group must not belong to a +// backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { + c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroups.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.get": + +type InstanceGroupsGetCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified instance group. Get a list of available +// instance groups by making a list() request. +func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { + c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroup{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance group. Get a list of available instance groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.get", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "response": { + // "$ref": "InstanceGroup" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroups.insert": + +type InstanceGroupsInsertCall struct { + s *Service + project string + zone string + instancegroup *InstanceGroup + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an instance group in the specified project using the +// parameters that are included in the request. +func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { + c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instancegroup = instancegroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups", + // "request": { + // "$ref": "InstanceGroup" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.list": + +type InstanceGroupsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of instance groups that are located in the +// specified project and zone. +func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { + c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.list" call. +// Exactly one of *InstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups", + // "response": { + // "$ref": "InstanceGroupList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instanceGroups.listInstances": + +type InstanceGroupsListInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// ListInstances: Lists the instances in the specified instance group. +func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { + c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.listInstances" call. +// Exactly one of *InstanceGroupsListInstances or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupsListInstances.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceGroupsListInstances{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the instances in the specified instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.listInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroup": { + // "description": "The name of the instance group from which you want to generate a list of included instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "InstanceGroupsListInstancesRequest" + // }, + // "response": { + // "$ref": "InstanceGroupsListInstances" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceGroups.removeInstances": + +type InstanceGroupsRemoveInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// RemoveInstances: Removes one or more instances from the specified +// instance group, but does not delete those instances. +func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { + c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.removeInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.removeInstances", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where the specified instances will be removed.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "request": { + // "$ref": "InstanceGroupsRemoveInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.setNamedPorts": + +type InstanceGroupsSetNamedPortsCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetNamedPorts: Sets the named ports for the specified instance group. +func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { + c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceGroups.setNamedPorts" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the named ports for the specified instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.setNamedPorts", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where the named ports are updated.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "request": { + // "$ref": "InstanceGroupsSetNamedPortsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceTemplates.delete": + +type InstanceTemplatesDeleteCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified instance template. If you delete an +// instance template that is being referenced from another instance +// group, the instance group will not be able to create or recreate +// virtual machine instances. Deleting an instance template is permanent +// and cannot be undone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete +func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { + c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instanceTemplate = instanceTemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instanceTemplate": c.instanceTemplate, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceTemplates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone.", + // "httpMethod": "DELETE", + // "id": "compute.instanceTemplates.delete", + // "parameterOrder": [ + // "project", + // "instanceTemplate" + // ], + // "parameters": { + // "instanceTemplate": { + // "description": "The name of the instance template to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceTemplates.get": + +type InstanceTemplatesGetCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified instance template. Get a list of available +// instance templates by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get +func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { + c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instanceTemplate = instanceTemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instanceTemplate": c.instanceTemplate, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceTemplates.get" call. +// Exactly one of *InstanceTemplate or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplate.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance template. Get a list of available instance templates by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.get", + // "parameterOrder": [ + // "project", + // "instanceTemplate" + // ], + // "parameters": { + // "instanceTemplate": { + // "description": "The name of the instance template.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "response": { + // "$ref": "InstanceTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instanceTemplates.insert": + +type InstanceTemplatesInsertCall struct { + s *Service + project string + instancetemplate *InstanceTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an instance template in the specified project using +// the data that is included in the request. If you are creating a new +// template to update an existing instance group, your new instance +// template must use the same network or, if applicable, the same +// subnetwork as the original template. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert +func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { + c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instancetemplate = instancetemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceTemplates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + // "httpMethod": "POST", + // "id": "compute.instanceTemplates.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates", + // "request": { + // "$ref": "InstanceTemplate" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceTemplates.list": + +type InstanceTemplatesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of instance templates that are contained +// within the specified project and zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list +func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { + c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { + c.ctx_ = ctx + return c +} + +func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instanceTemplates.list" call. +// Exactly one of *InstanceTemplateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceTemplateList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/instanceTemplates", + // "response": { + // "$ref": "InstanceTemplateList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instances.addAccessConfig": + +type InstancesAddAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// AddAccessConfig: Adds an access config to an instance's network +// interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig +func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { + c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.addAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds an access config to an instance's network interface.", + // "httpMethod": "POST", + // "id": "compute.instances.addAccessConfig", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "networkInterface" + // ], + // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to add to this instance.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", + // "request": { + // "$ref": "AccessConfig" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.aggregatedList": + +type InstancesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves aggregated list of instances. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList +func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { + c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.aggregatedList" call. +// Exactly one of *InstanceAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves aggregated list of instances.", + // "httpMethod": "GET", + // "id": "compute.instances.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/instances", + // "response": { + // "$ref": "InstanceAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instances.attachDisk": + +type InstancesAttachDiskCall struct { + s *Service + project string + zone string + instance string + attacheddisk *AttachedDisk + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// AttachDisk: Attaches a Disk resource to an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk +func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { + c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.attacheddisk = attacheddisk + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.attachDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Attaches a Disk resource to an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.attachDisk", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", + // "request": { + // "$ref": "AttachedDisk" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.delete": + +type InstancesDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified Instance resource. For more +// information, see Stopping or Deleting an Instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete +func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { + c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + // "httpMethod": "DELETE", + // "id": "compute.instances.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.deleteAccessConfig": + +type InstancesDeleteAccessConfigCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// DeleteAccessConfig: Deletes an access config from an instance's +// network interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig +func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { + c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("accessConfig", accessConfig) + c.urlParams_.Set("networkInterface", networkInterface) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.deleteAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an access config from an instance's network interface.", + // "httpMethod": "POST", + // "id": "compute.instances.deleteAccessConfig", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "accessConfig", + // "networkInterface" + // ], + // "parameters": { + // "accessConfig": { + // "description": "The name of the access config to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.detachDisk": + +type InstancesDetachDiskCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// DetachDisk: Detaches a disk from an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk +func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { + c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("deviceName", deviceName) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.detachDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Detaches a disk from an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.detachDisk", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "deviceName" + // ], + // "parameters": { + // "deviceName": { + // "description": "Disk device name to detach.", + // "location": "query", + // "pattern": "\\w[\\w.-]{0,254}", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Instance name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.get": + +type InstancesGetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified Instance resource. Get a list of available +// instances by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get +func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { + c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.get" call. +// Exactly one of *Instance or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Instance.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Instance{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified Instance resource. Get a list of available instances by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instances.get", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}", + // "response": { + // "$ref": "Instance" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instances.getSerialPortOutput": + +type InstancesGetSerialPortOutputCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GetSerialPortOutput: Returns the specified instance's serial port +// output. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput +func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { + c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Port sets the optional parameter "port": Specifies which COM or +// serial port to retrieve data from. +func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("port", fmt.Sprint(port)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.getSerialPortOutput" call. +// Exactly one of *SerialPortOutput or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SerialPortOutput.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SerialPortOutput{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance's serial port output.", + // "httpMethod": "GET", + // "id": "compute.instances.getSerialPortOutput", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "port": { + // "default": "1", + // "description": "Specifies which COM or serial port to retrieve data from.", + // "format": "int32", + // "location": "query", + // "maximum": "4", + // "minimum": "1", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + // "response": { + // "$ref": "SerialPortOutput" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.instances.insert": + +type InstancesInsertCall struct { + s *Service + project string + zone string + instance *Instance + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates an instance resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert +func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { + c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an instance resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances", + // "request": { + // "$ref": "Instance" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.list": + +type InstancesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of instances contained within the specified +// zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list +func (r *InstancesService) List(project string, zone string) *InstancesListCall { + c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *InstancesListCall) Filter(filter string) *InstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.list" call. +// Exactly one of *InstanceList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InstanceList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of instances contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.instances.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances", + // "response": { + // "$ref": "InstanceList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.instances.reset": + +type InstancesResetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Reset: Performs a hard reset on the instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset +func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { + c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.reset" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Performs a hard reset on the instance.", + // "httpMethod": "POST", + // "id": "compute.instances.reset", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/reset", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.setDiskAutoDelete": + +type InstancesSetDiskAutoDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete +func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { + c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) + c.urlParams_.Set("deviceName", deviceName) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.setDiskAutoDelete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the auto-delete flag for a disk attached to an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setDiskAutoDelete", + // "parameterOrder": [ + // "project", + // "zone", + // "instance", + // "autoDelete", + // "deviceName" + // ], + // "parameters": { + // "autoDelete": { + // "description": "Whether to auto-delete the disk when the instance is deleted.", + // "location": "query", + // "required": true, + // "type": "boolean" + // }, + // "deviceName": { + // "description": "The device name of the disk to modify.", + // "location": "query", + // "pattern": "\\w[\\w.-]{0,254}", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.setMachineType": + +type InstancesSetMachineTypeCall struct { + s *Service + project string + zone string + instance string + instancessetmachinetyperequest *InstancesSetMachineTypeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetMachineType: Changes the machine type for a stopped instance to +// the machine type specified in the request. +func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { + c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.instancessetmachinetyperequest = instancessetmachinetyperequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.setMachineType" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMachineType", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", + // "request": { + // "$ref": "InstancesSetMachineTypeRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.setMetadata": + +type InstancesSetMetadataCall struct { + s *Service + project string + zone string + instance string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetMetadata: Sets metadata for the specified instance to the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata +func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { + c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.metadata = metadata + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.setMetadata" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets metadata for the specified instance to the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMetadata", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", + // "request": { + // "$ref": "Metadata" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.setScheduling": + +type InstancesSetSchedulingCall struct { + s *Service + project string + zone string + instance string + scheduling *Scheduling + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetScheduling: Sets an instance's scheduling options. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling +func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { + c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.scheduling = scheduling + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.setScheduling" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets an instance's scheduling options.", + // "httpMethod": "POST", + // "id": "compute.instances.setScheduling", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Instance name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + // "request": { + // "$ref": "Scheduling" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.setTags": + +type InstancesSetTagsCall struct { + s *Service + project string + zone string + instance string + tags *Tags + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetTags: Sets tags for the specified instance to the data included in +// the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags +func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { + c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.tags = tags + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.setTags" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets tags for the specified instance to the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setTags", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setTags", + // "request": { + // "$ref": "Tags" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.start": + +type InstancesStartCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Start: Starts an instance that was stopped using the using the +// instances().stop method. For more information, see Restart an +// instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start +func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { + c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.start" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.start", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/start", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.stop": + +type InstancesStopCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Stop: Stops a running instance, shutting it down cleanly, and allows +// you to restart the instance at a later time. Stopped instances do not +// incur per-minute, virtual machine usage charges while they are +// stopped, but any resources that the virtual machine is using, such as +// persistent disks and static IP addresses, will continue to be charged +// until they are deleted. For more information, see Stopping an +// instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop +func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { + c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.instances.stop" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.stop", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance resource to stop.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/stop", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.licenses.get": + +type LicensesGetCall struct { + s *Service + project string + license string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified License resource. Get a list of available +// licenses by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get +func (r *LicensesService) Get(project string, license string) *LicensesGetCall { + c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.license = license + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { + c.ctx_ = ctx + return c +} + +func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "license": c.license, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.licenses.get" call. +// Exactly one of *License or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *License.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &License{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified License resource. Get a list of available licenses by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.licenses.get", + // "parameterOrder": [ + // "project", + // "license" + // ], + // "parameters": { + // "license": { + // "description": "Name of the License resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/licenses/{license}", + // "response": { + // "$ref": "License" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.machineTypes.aggregatedList": + +type MachineTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves an aggregated list of machine types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList +func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { + c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.machineTypes.aggregatedList" call. +// Exactly one of *MachineTypeAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *MachineTypeAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &MachineTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of machine types.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/machineTypes", + // "response": { + // "$ref": "MachineTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.machineTypes.get": + +type MachineTypesGetCall struct { + s *Service + project string + zone string + machineType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified machine type. Get a list of available +// machine types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get +func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { + c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.machineType = machineType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { + c.ctx_ = ctx + return c +} + +func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "machineType": c.machineType, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.machineTypes.get" call. +// Exactly one of *MachineType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *MachineType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &MachineType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "machineType" + // ], + // "parameters": { + // "machineType": { + // "description": "Name of the machine type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/machineTypes/{machineType}", + // "response": { + // "$ref": "MachineType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.machineTypes.list": + +type MachineTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of machine types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list +func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { + c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { + c.ctx_ = ctx + return c +} + +func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.machineTypes.list" call. +// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *MachineTypeList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &MachineTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of machine types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/machineTypes", + // "response": { + // "$ref": "MachineTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.networks.delete": + +type NetworksDeleteCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified network. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete +func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { + c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "network": c.network, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.networks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified network.", + // "httpMethod": "DELETE", + // "id": "compute.networks.delete", + // "parameterOrder": [ + // "project", + // "network" + // ], + // "parameters": { + // "network": { + // "description": "Name of the network to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/networks/{network}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networks.get": + +type NetworksGetCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified network. Get a list of available networks +// by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get +func (r *NetworksService) Get(project string, network string) *NetworksGetCall { + c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { + c.ctx_ = ctx + return c +} + +func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "network": c.network, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.networks.get" call. +// Exactly one of *Network or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Network.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Network{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified network. Get a list of available networks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.networks.get", + // "parameterOrder": [ + // "project", + // "network" + // ], + // "parameters": { + // "network": { + // "description": "Name of the network to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/networks/{network}", + // "response": { + // "$ref": "Network" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.networks.insert": + +type NetworksInsertCall struct { + s *Service + project string + network *Network + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a network in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert +func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { + c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { + c.ctx_ = ctx + return c +} + +func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.networks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a network in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.networks.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/networks", + // "request": { + // "$ref": "Network" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networks.list": + +type NetworksListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of networks available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list +func (r *NetworksService) List(project string) *NetworksListCall { + c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *NetworksListCall) Filter(filter string) *NetworksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { + c.ctx_ = ctx + return c +} + +func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.networks.list" call. +// Exactly one of *NetworkList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NetworkList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NetworkList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of networks available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.networks.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/networks", + // "response": { + // "$ref": "NetworkList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.projects.get": + +type ProjectsGetCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified Project resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get +func (r *ProjectsService) Get(project string) *ProjectsGetCall { + c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.projects.get" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Project{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified Project resource.", + // "httpMethod": "GET", + // "id": "compute.projects.get", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}", + // "response": { + // "$ref": "Project" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.projects.moveDisk": + +type ProjectsMoveDiskCall struct { + s *Service + project string + diskmoverequest *DiskMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// MoveDisk: Moves a persistent disk from one zone to another. +func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { + c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.diskmoverequest = diskmoverequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.projects.moveDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Moves a persistent disk from one zone to another.", + // "httpMethod": "POST", + // "id": "compute.projects.moveDisk", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/moveDisk", + // "request": { + // "$ref": "DiskMoveRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.moveInstance": + +type ProjectsMoveInstanceCall struct { + s *Service + project string + instancemoverequest *InstanceMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// MoveInstance: Moves an instance and its attached persistent disks +// from one zone to another. +func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { + c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instancemoverequest = instancemoverequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.projects.moveInstance" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Moves an instance and its attached persistent disks from one zone to another.", + // "httpMethod": "POST", + // "id": "compute.projects.moveInstance", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/moveInstance", + // "request": { + // "$ref": "InstanceMoveRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.setCommonInstanceMetadata": + +type ProjectsSetCommonInstanceMetadataCall struct { + s *Service + project string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetCommonInstanceMetadata: Sets metadata common to all instances +// within the specified project using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata +func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { + c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.metadata = metadata + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.projects.setCommonInstanceMetadata" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.projects.setCommonInstanceMetadata", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/setCommonInstanceMetadata", + // "request": { + // "$ref": "Metadata" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.projects.setUsageExportBucket": + +type ProjectsSetUsageExportBucketCall struct { + s *Service + project string + usageexportlocation *UsageExportLocation + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetUsageExportBucket: Enables the usage export feature and sets the +// usage export bucket where reports are stored. If you provide an empty +// request body using this method, the usage export feature will be +// disabled. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket +func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { + c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.usageexportlocation = usageexportlocation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.projects.setUsageExportBucket" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + // "httpMethod": "POST", + // "id": "compute.projects.setUsageExportBucket", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/setUsageExportBucket", + // "request": { + // "$ref": "UsageExportLocation" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "compute.regionOperations.delete": + +type RegionOperationsDeleteCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete +func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { + c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "operation": c.operation, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.regionOperations.delete" call. +func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes the specified region-specific Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionOperations.delete", + // "parameterOrder": [ + // "project", + // "region", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/operations/{operation}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionOperations.get": + +type RegionOperationsGetCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get +func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { + c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { + c.ctx_ = ctx + return c +} + +func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "operation": c.operation, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.regionOperations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the specified region-specific Operations resource.", + // "httpMethod": "GET", + // "id": "compute.regionOperations.get", + // "parameterOrder": [ + // "project", + // "region", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/operations/{operation}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionOperations.list": + +type RegionOperationsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of Operation resources contained within the +// specified region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list +func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { + c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { + c.ctx_ = ctx + return c +} + +func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.regionOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &OperationList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of Operation resources contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionOperations.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/operations", + // "response": { + // "$ref": "OperationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regions.get": + +type RegionsGetCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified Region resource. Get a list of available +// regions by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get +func (r *RegionsService) Get(project string, region string) *RegionsGetCall { + c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { + c.ctx_ = ctx + return c +} + +func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.regions.get" call. +// Exactly one of *Region or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Region.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Region{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified Region resource. Get a list of available regions by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regions.get", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}", + // "response": { + // "$ref": "Region" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regions.list": + +type RegionsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of region resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list +func (r *RegionsService) List(project string) *RegionsListCall { + c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *RegionsListCall) Filter(filter string) *RegionsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { + c.ctx_ = ctx + return c +} + +func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.regions.list" call. +// Exactly one of *RegionList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RegionList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RegionList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of region resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.regions.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions", + // "response": { + // "$ref": "RegionList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.routes.delete": + +type RoutesDeleteCall struct { + s *Service + project string + route string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified Route resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete +func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { + c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.route = route + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutesDeleteCall) Context(ctx context.Context) *RoutesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "route": c.route, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.routes.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified Route resource.", + // "httpMethod": "DELETE", + // "id": "compute.routes.delete", + // "parameterOrder": [ + // "project", + // "route" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "route": { + // "description": "Name of the Route resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/routes/{route}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.routes.get": + +type RoutesGetCall struct { + s *Service + project string + route string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified Route resource. Get a list of available +// routes by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get +func (r *RoutesService) Get(project string, route string) *RoutesGetCall { + c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.route = route + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutesGetCall) Context(ctx context.Context) *RoutesGetCall { + c.ctx_ = ctx + return c +} + +func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "route": c.route, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.routes.get" call. +// Exactly one of *Route or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Route.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Route{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified Route resource. Get a list of available routes by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.routes.get", + // "parameterOrder": [ + // "project", + // "route" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "route": { + // "description": "Name of the Route resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/routes/{route}", + // "response": { + // "$ref": "Route" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.routes.insert": + +type RoutesInsertCall struct { + s *Service + project string + route *Route + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a Route resource in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert +func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { + c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.route = route + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutesInsertCall) Context(ctx context.Context) *RoutesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.route) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.routes.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a Route resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.routes.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/routes", + // "request": { + // "$ref": "Route" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.routes.list": + +type RoutesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of Route resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list +func (r *RoutesService) List(project string) *RoutesListCall { + c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *RoutesListCall) Filter(filter string) *RoutesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutesListCall) IfNoneMatch(entityTag string) *RoutesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutesListCall) Context(ctx context.Context) *RoutesListCall { + c.ctx_ = ctx + return c +} + +func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.routes.list" call. +// Exactly one of *RouteList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RouteList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RouteList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of Route resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.routes.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/routes", + // "response": { + // "$ref": "RouteList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RoutesListCall) Pages(ctx context.Context, f func(*RouteList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.snapshots.delete": + +type SnapshotsDeleteCall struct { + s *Service + project string + snapshot string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified Snapshot resource. Keep in mind that +// deleting a single snapshot might not necessarily delete all the data +// on that snapshot. If any data on the snapshot that is marked for +// deletion is needed for subsequent snapshots, the data will be moved +// to the next corresponding snapshot. +// +// For more information, see Deleting snaphots. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/delete +func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { + c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.snapshot = snapshot + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SnapshotsDeleteCall) Context(ctx context.Context) *SnapshotsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "snapshot": c.snapshot, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.snapshots.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snaphots.", + // "httpMethod": "DELETE", + // "id": "compute.snapshots.delete", + // "parameterOrder": [ + // "project", + // "snapshot" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "snapshot": { + // "description": "Name of the Snapshot resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/snapshots/{snapshot}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.snapshots.get": + +type SnapshotsGetCall struct { + s *Service + project string + snapshot string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified Snapshot resource. Get a list of available +// snapshots by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/get +func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall { + c := &SnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.snapshot = snapshot + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SnapshotsGetCall) Fields(s ...googleapi.Field) *SnapshotsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SnapshotsGetCall) IfNoneMatch(entityTag string) *SnapshotsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SnapshotsGetCall) Context(ctx context.Context) *SnapshotsGetCall { + c.ctx_ = ctx + return c +} + +func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "snapshot": c.snapshot, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.snapshots.get" call. +// Exactly one of *Snapshot or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Snapshot.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Snapshot{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified Snapshot resource. Get a list of available snapshots by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.snapshots.get", + // "parameterOrder": [ + // "project", + // "snapshot" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "snapshot": { + // "description": "Name of the Snapshot resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/snapshots/{snapshot}", + // "response": { + // "$ref": "Snapshot" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.snapshots.list": + +type SnapshotsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of Snapshot resources contained within the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/list +func (r *SnapshotsService) List(project string) *SnapshotsListCall { + c := &SnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SnapshotsListCall) IfNoneMatch(entityTag string) *SnapshotsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SnapshotsListCall) Context(ctx context.Context) *SnapshotsListCall { + c.ctx_ = ctx + return c +} + +func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.snapshots.list" call. +// Exactly one of *SnapshotList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SnapshotList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SnapshotList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of Snapshot resources contained within the specified project.", + // "httpMethod": "GET", + // "id": "compute.snapshots.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/snapshots", + // "response": { + // "$ref": "SnapshotList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SnapshotsListCall) Pages(ctx context.Context, f func(*SnapshotList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.sslCertificates.delete": + +type SslCertificatesDeleteCall struct { + s *Service + project string + sslCertificate string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified SslCertificate resource. +func (r *SslCertificatesService) Delete(project string, sslCertificate string) *SslCertificatesDeleteCall { + c := &SslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.sslCertificate = sslCertificate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertificatesDeleteCall) Fields(s ...googleapi.Field) *SslCertificatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertificatesDeleteCall) Context(ctx context.Context) *SslCertificatesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "sslCertificate": c.sslCertificate, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.sslCertificates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified SslCertificate resource.", + // "httpMethod": "DELETE", + // "id": "compute.sslCertificates.delete", + // "parameterOrder": [ + // "project", + // "sslCertificate" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslCertificates/{sslCertificate}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.sslCertificates.get": + +type SslCertificatesGetCall struct { + s *Service + project string + sslCertificate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified SslCertificate resource. Get a list of +// available SSL certificates by making a list() request. +func (r *SslCertificatesService) Get(project string, sslCertificate string) *SslCertificatesGetCall { + c := &SslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.sslCertificate = sslCertificate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertificatesGetCall) Fields(s ...googleapi.Field) *SslCertificatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslCertificatesGetCall) IfNoneMatch(entityTag string) *SslCertificatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertificatesGetCall) Context(ctx context.Context) *SslCertificatesGetCall { + c.ctx_ = ctx + return c +} + +func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "sslCertificate": c.sslCertificate, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.sslCertificates.get" call. +// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslCertificate.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslCertificate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified SslCertificate resource. Get a list of available SSL certificates by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.sslCertificates.get", + // "parameterOrder": [ + // "project", + // "sslCertificate" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslCertificates/{sslCertificate}", + // "response": { + // "$ref": "SslCertificate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.sslCertificates.insert": + +type SslCertificatesInsertCall struct { + s *Service + project string + sslcertificate *SslCertificate + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a SslCertificate resource in the specified project +// using the data included in the request. +func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCertificate) *SslCertificatesInsertCall { + c := &SslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.sslcertificate = sslcertificate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertificatesInsertCall) Fields(s ...googleapi.Field) *SslCertificatesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertificatesInsertCall) Context(ctx context.Context) *SslCertificatesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.sslCertificates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.sslCertificates.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslCertificates", + // "request": { + // "$ref": "SslCertificate" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.sslCertificates.list": + +type SslCertificatesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of SslCertificate resources available to the +// specified project. +func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { + c := &SslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *SslCertificatesListCall) MaxResults(maxResults int64) *SslCertificatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslCertificatesListCall) Fields(s ...googleapi.Field) *SslCertificatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslCertificatesListCall) IfNoneMatch(entityTag string) *SslCertificatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslCertificatesListCall) Context(ctx context.Context) *SslCertificatesListCall { + c.ctx_ = ctx + return c +} + +func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.sslCertificates.list" call. +// Exactly one of *SslCertificateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SslCertificateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslCertificateList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of SslCertificate resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.sslCertificates.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslCertificates", + // "response": { + // "$ref": "SslCertificateList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.subnetworks.aggregatedList": + +type SubnetworksAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves an aggregated list of subnetworks. +func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregatedListCall { + c := &SubnetworksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *SubnetworksAggregatedListCall) Filter(filter string) *SubnetworksAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *SubnetworksAggregatedListCall) MaxResults(maxResults int64) *SubnetworksAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SubnetworksAggregatedListCall) PageToken(pageToken string) *SubnetworksAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SubnetworksAggregatedListCall) Fields(s ...googleapi.Field) *SubnetworksAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SubnetworksAggregatedListCall) IfNoneMatch(entityTag string) *SubnetworksAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SubnetworksAggregatedListCall) Context(ctx context.Context) *SubnetworksAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.subnetworks.aggregatedList" call. +// Exactly one of *SubnetworkAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *SubnetworkAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*SubnetworkAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SubnetworkAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of subnetworks.", + // "httpMethod": "GET", + // "id": "compute.subnetworks.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/subnetworks", + // "response": { + // "$ref": "SubnetworkAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SubnetworksAggregatedListCall) Pages(ctx context.Context, f func(*SubnetworkAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.subnetworks.delete": + +type SubnetworksDeleteCall struct { + s *Service + project string + region string + subnetwork string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified subnetwork. +func (r *SubnetworksService) Delete(project string, region string, subnetwork string) *SubnetworksDeleteCall { + c := &SubnetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.subnetwork = subnetwork + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SubnetworksDeleteCall) Fields(s ...googleapi.Field) *SubnetworksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SubnetworksDeleteCall) Context(ctx context.Context) *SubnetworksDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "subnetwork": c.subnetwork, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.subnetworks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified subnetwork.", + // "httpMethod": "DELETE", + // "id": "compute.subnetworks.delete", + // "parameterOrder": [ + // "project", + // "region", + // "subnetwork" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "subnetwork": { + // "description": "Name of the Subnetwork resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.subnetworks.get": + +type SubnetworksGetCall struct { + s *Service + project string + region string + subnetwork string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified subnetwork. Get a list of available +// subnetworks list() request. +func (r *SubnetworksService) Get(project string, region string, subnetwork string) *SubnetworksGetCall { + c := &SubnetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.subnetwork = subnetwork + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SubnetworksGetCall) Fields(s ...googleapi.Field) *SubnetworksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SubnetworksGetCall) IfNoneMatch(entityTag string) *SubnetworksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SubnetworksGetCall) Context(ctx context.Context) *SubnetworksGetCall { + c.ctx_ = ctx + return c +} + +func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "subnetwork": c.subnetwork, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.subnetworks.get" call. +// Exactly one of *Subnetwork or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Subnetwork.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Subnetwork{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified subnetwork. Get a list of available subnetworks list() request.", + // "httpMethod": "GET", + // "id": "compute.subnetworks.get", + // "parameterOrder": [ + // "project", + // "region", + // "subnetwork" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "subnetwork": { + // "description": "Name of the Subnetwork resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/subnetworks/{subnetwork}", + // "response": { + // "$ref": "Subnetwork" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.subnetworks.insert": + +type SubnetworksInsertCall struct { + s *Service + project string + region string + subnetwork *Subnetwork + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a subnetwork in the specified project using the data +// included in the request. +func (r *SubnetworksService) Insert(project string, region string, subnetwork *Subnetwork) *SubnetworksInsertCall { + c := &SubnetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.subnetwork = subnetwork + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SubnetworksInsertCall) Fields(s ...googleapi.Field) *SubnetworksInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SubnetworksInsertCall) Context(ctx context.Context) *SubnetworksInsertCall { + c.ctx_ = ctx + return c +} + +func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetwork) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.subnetworks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a subnetwork in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.subnetworks.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/subnetworks", + // "request": { + // "$ref": "Subnetwork" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.subnetworks.list": + +type SubnetworksListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of subnetworks available to the specified +// project. +func (r *SubnetworksService) List(project string, region string) *SubnetworksListCall { + c := &SubnetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *SubnetworksListCall) MaxResults(maxResults int64) *SubnetworksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SubnetworksListCall) PageToken(pageToken string) *SubnetworksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SubnetworksListCall) Fields(s ...googleapi.Field) *SubnetworksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SubnetworksListCall) IfNoneMatch(entityTag string) *SubnetworksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SubnetworksListCall) Context(ctx context.Context) *SubnetworksListCall { + c.ctx_ = ctx + return c +} + +func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.subnetworks.list" call. +// Exactly one of *SubnetworkList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SubnetworkList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SubnetworkList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of subnetworks available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.subnetworks.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/subnetworks", + // "response": { + // "$ref": "SubnetworkList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SubnetworksListCall) Pages(ctx context.Context, f func(*SubnetworkList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.targetHttpProxies.delete": + +type TargetHttpProxiesDeleteCall struct { + s *Service + project string + targetHttpProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified TargetHttpProxy resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/delete +func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string) *TargetHttpProxiesDeleteCall { + c := &TargetHttpProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpProxy = targetHttpProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpProxiesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesDeleteCall) Context(ctx context.Context) *TargetHttpProxiesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetHttpProxy resource.", + // "httpMethod": "DELETE", + // "id": "compute.targetHttpProxies.delete", + // "parameterOrder": [ + // "project", + // "targetHttpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetHttpProxies.get": + +type TargetHttpProxiesGetCall struct { + s *Service + project string + targetHttpProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified TargetHttpProxy resource. Get a list of +// available target HTTP proxies by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/get +func (r *TargetHttpProxiesService) Get(project string, targetHttpProxy string) *TargetHttpProxiesGetCall { + c := &TargetHttpProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpProxy = targetHttpProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpProxiesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetHttpProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesGetCall) Context(ctx context.Context) *TargetHttpProxiesGetCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpProxies.get" call. +// Exactly one of *TargetHttpProxy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetHttpProxy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpProxy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetHttpProxy resource. Get a list of available target HTTP proxies by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.targetHttpProxies.get", + // "parameterOrder": [ + // "project", + // "targetHttpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}", + // "response": { + // "$ref": "TargetHttpProxy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetHttpProxies.insert": + +type TargetHttpProxiesInsertCall struct { + s *Service + project string + targethttpproxy *TargetHttpProxy + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a TargetHttpProxy resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/insert +func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesInsertCall { + c := &TargetHttpProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targethttpproxy = targethttpproxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpProxiesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesInsertCall) Context(ctx context.Context) *TargetHttpProxiesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpProxies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.targetHttpProxies.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpProxies", + // "request": { + // "$ref": "TargetHttpProxy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetHttpProxies.list": + +type TargetHttpProxiesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of TargetHttpProxy resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/list +func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCall { + c := &TargetHttpProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *TargetHttpProxiesListCall) MaxResults(maxResults int64) *TargetHttpProxiesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxiesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpProxiesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetHttpProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpProxiesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesListCall) Context(ctx context.Context) *TargetHttpProxiesListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpProxies.list" call. +// Exactly one of *TargetHttpProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpProxyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpProxyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.targetHttpProxies.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpProxies", + // "response": { + // "$ref": "TargetHttpProxyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetHttpProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpProxyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.targetHttpProxies.setUrlMap": + +type TargetHttpProxiesSetUrlMapCall struct { + s *Service + project string + targetHttpProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetUrlMap: Changes the URL map for TargetHttpProxy. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies/setUrlMap +func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy string, urlmapreference *UrlMapReference) *TargetHttpProxiesSetUrlMapCall { + c := &TargetHttpProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpProxy = targetHttpProxy + c.urlmapreference = urlmapreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpProxiesSetUrlMapCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpProxiesSetUrlMapCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpProxy": c.targetHttpProxy, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpProxies.setUrlMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the URL map for TargetHttpProxy.", + // "httpMethod": "POST", + // "id": "compute.targetHttpProxies.setUrlMap", + // "parameterOrder": [ + // "project", + // "targetHttpProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpProxy": { + // "description": "Name of the TargetHttpProxy to set a URL map for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetHttpsProxies.delete": + +type TargetHttpsProxiesDeleteCall struct { + s *Service + project string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified TargetHttpsProxy resource. +func (r *TargetHttpsProxiesService) Delete(project string, targetHttpsProxy string) *TargetHttpsProxiesDeleteCall { + c := &TargetHttpsProxiesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpsProxy = targetHttpsProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesDeleteCall) Context(ctx context.Context) *TargetHttpsProxiesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpsProxies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetHttpsProxy resource.", + // "httpMethod": "DELETE", + // "id": "compute.targetHttpsProxies.delete", + // "parameterOrder": [ + // "project", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetHttpsProxies.get": + +type TargetHttpsProxiesGetCall struct { + s *Service + project string + targetHttpsProxy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified TargetHttpsProxy resource. Get a list of +// available target HTTPS proxies by making a list() request. +func (r *TargetHttpsProxiesService) Get(project string, targetHttpsProxy string) *TargetHttpsProxiesGetCall { + c := &TargetHttpsProxiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpsProxy = targetHttpsProxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetHttpsProxiesGetCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesGetCall) Context(ctx context.Context) *TargetHttpsProxiesGetCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpsProxies.get" call. +// Exactly one of *TargetHttpsProxy or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxy.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpsProxy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetHttpsProxy resource. Get a list of available target HTTPS proxies by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.targetHttpsProxies.get", + // "parameterOrder": [ + // "project", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + // "response": { + // "$ref": "TargetHttpsProxy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetHttpsProxies.insert": + +type TargetHttpsProxiesInsertCall struct { + s *Service + project string + targethttpsproxy *TargetHttpsProxy + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a TargetHttpsProxy resource in the specified project +// using the data included in the request. +func (r *TargetHttpsProxiesService) Insert(project string, targethttpsproxy *TargetHttpsProxy) *TargetHttpsProxiesInsertCall { + c := &TargetHttpsProxiesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targethttpsproxy = targethttpsproxy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesInsertCall) Context(ctx context.Context) *TargetHttpsProxiesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpsProxies.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpsProxies", + // "request": { + // "$ref": "TargetHttpsProxy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetHttpsProxies.list": + +type TargetHttpsProxiesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of TargetHttpsProxy resources available to +// the specified project. +func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesListCall { + c := &TargetHttpsProxiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *TargetHttpsProxiesListCall) MaxResults(maxResults int64) *TargetHttpsProxiesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetHttpsProxiesListCall) PageToken(pageToken string) *TargetHttpsProxiesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetHttpsProxiesListCall) IfNoneMatch(entityTag string) *TargetHttpsProxiesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesListCall) Context(ctx context.Context) *TargetHttpsProxiesListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpsProxies.list" call. +// Exactly one of *TargetHttpsProxyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetHttpsProxyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHttpsProxyList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetHttpsProxyList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.targetHttpsProxies.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpsProxies", + // "response": { + // "$ref": "TargetHttpsProxyList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetHttpsProxiesListCall) Pages(ctx context.Context, f func(*TargetHttpsProxyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.targetHttpsProxies.setSslCertificates": + +type TargetHttpsProxiesSetSslCertificatesCall struct { + s *Service + project string + targetHttpsProxy string + targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetSslCertificates: Replaces SslCertificates for TargetHttpsProxy. +func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHttpsProxy string, targethttpsproxiessetsslcertificatesrequest *TargetHttpsProxiesSetSslCertificatesRequest) *TargetHttpsProxiesSetSslCertificatesCall { + c := &TargetHttpsProxiesSetSslCertificatesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpsProxy = targetHttpsProxy + c.targethttpsproxiessetsslcertificatesrequest = targethttpsproxiessetsslcertificatesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesSetSslCertificatesCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetSslCertificatesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) *TargetHttpsProxiesSetSslCertificatesCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxiessetsslcertificatesrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpsProxies.setSslCertificates" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Replaces SslCertificates for TargetHttpsProxy.", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.setSslCertificates", + // "parameterOrder": [ + // "project", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + // "request": { + // "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetHttpsProxies.setUrlMap": + +type TargetHttpsProxiesSetUrlMapCall struct { + s *Service + project string + targetHttpsProxy string + urlmapreference *UrlMapReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetUrlMap: Changes the URL map for TargetHttpsProxy. +func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy string, urlmapreference *UrlMapReference) *TargetHttpsProxiesSetUrlMapCall { + c := &TargetHttpsProxiesSetUrlMapCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpsProxy = targetHttpsProxy + c.urlmapreference = urlmapreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetUrlMapCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHttpsProxiesSetUrlMapCall { + c.ctx_ = ctx + return c +} + +func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetHttpsProxies.setUrlMap" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the URL map for TargetHttpsProxy.", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.setUrlMap", + // "parameterOrder": [ + // "project", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap", + // "request": { + // "$ref": "UrlMapReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetInstances.aggregatedList": + +type TargetInstancesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves an aggregated list of target instances. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/aggregatedList +func (r *TargetInstancesService) AggregatedList(project string) *TargetInstancesAggregatedListCall { + c := &TargetInstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *TargetInstancesAggregatedListCall) MaxResults(maxResults int64) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetInstancesAggregatedListCall) Fields(s ...googleapi.Field) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetInstancesAggregatedListCall) IfNoneMatch(entityTag string) *TargetInstancesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetInstancesAggregatedListCall) Context(ctx context.Context) *TargetInstancesAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetInstances.aggregatedList" call. +// Exactly one of *TargetInstanceAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetInstanceAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetInstanceAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of target instances.", + // "httpMethod": "GET", + // "id": "compute.targetInstances.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/targetInstances", + // "response": { + // "$ref": "TargetInstanceAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetInstancesAggregatedListCall) Pages(ctx context.Context, f func(*TargetInstanceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.targetInstances.delete": + +type TargetInstancesDeleteCall struct { + s *Service + project string + zone string + targetInstance string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified TargetInstance resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/delete +func (r *TargetInstancesService) Delete(project string, zone string, targetInstance string) *TargetInstancesDeleteCall { + c := &TargetInstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.targetInstance = targetInstance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetInstancesDeleteCall) Fields(s ...googleapi.Field) *TargetInstancesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetInstancesDeleteCall) Context(ctx context.Context) *TargetInstancesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "targetInstance": c.targetInstance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetInstances.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified TargetInstance resource.", + // "httpMethod": "DELETE", + // "id": "compute.targetInstances.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "targetInstance" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetInstance": { + // "description": "Name of the TargetInstance resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetInstances.get": + +type TargetInstancesGetCall struct { + s *Service + project string + zone string + targetInstance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified TargetInstance resource. Get a list of +// available target instances by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/get +func (r *TargetInstancesService) Get(project string, zone string, targetInstance string) *TargetInstancesGetCall { + c := &TargetInstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.targetInstance = targetInstance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetInstancesGetCall) Fields(s ...googleapi.Field) *TargetInstancesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetInstancesGetCall) IfNoneMatch(entityTag string) *TargetInstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetInstancesGetCall) Context(ctx context.Context) *TargetInstancesGetCall { + c.ctx_ = ctx + return c +} + +func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "targetInstance": c.targetInstance, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetInstances.get" call. +// Exactly one of *TargetInstance or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetInstance.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstance, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetInstance{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified TargetInstance resource. Get a list of available target instances by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.targetInstances.get", + // "parameterOrder": [ + // "project", + // "zone", + // "targetInstance" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "targetInstance": { + // "description": "Name of the TargetInstance resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}", + // "response": { + // "$ref": "TargetInstance" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetInstances.insert": + +type TargetInstancesInsertCall struct { + s *Service + project string + zone string + targetinstance *TargetInstance + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a TargetInstance resource in the specified project +// and zone using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/insert +func (r *TargetInstancesService) Insert(project string, zone string, targetinstance *TargetInstance) *TargetInstancesInsertCall { + c := &TargetInstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.targetinstance = targetinstance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetInstancesInsertCall) Fields(s ...googleapi.Field) *TargetInstancesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetInstancesInsertCall) Context(ctx context.Context) *TargetInstancesInsertCall { + c.ctx_ = ctx + return c +} + +func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetinstance) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetInstances.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.targetInstances.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/targetInstances", + // "request": { + // "$ref": "TargetInstance" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetInstances.list": + +type TargetInstancesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of TargetInstance resources available to the +// specified project and zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetInstances/list +func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall { + c := &TargetInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetInstancesListCall) Fields(s ...googleapi.Field) *TargetInstancesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetInstancesListCall) IfNoneMatch(entityTag string) *TargetInstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetInstancesListCall) Context(ctx context.Context) *TargetInstancesListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetInstances.list" call. +// Exactly one of *TargetInstanceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetInstanceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetInstanceList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.targetInstances.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/targetInstances", + // "response": { + // "$ref": "TargetInstanceList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInstanceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.targetPools.addHealthCheck": + +type TargetPoolsAddHealthCheckCall struct { + s *Service + project string + region string + targetPool string + targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// AddHealthCheck: Adds health check URLs to a target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addHealthCheck +func (r *TargetPoolsService) AddHealthCheck(project string, region string, targetPool string, targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest) *TargetPoolsAddHealthCheckCall { + c := &TargetPoolsAddHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.targetpoolsaddhealthcheckrequest = targetpoolsaddhealthcheckrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsAddHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsAddHealthCheckCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsAddHealthCheckCall) Context(ctx context.Context) *TargetPoolsAddHealthCheckCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddhealthcheckrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.addHealthCheck" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds health check URLs to a target pool.", + // "httpMethod": "POST", + // "id": "compute.targetPools.addHealthCheck", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the target pool to add a health check to.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", + // "request": { + // "$ref": "TargetPoolsAddHealthCheckRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.addInstance": + +type TargetPoolsAddInstanceCall struct { + s *Service + project string + region string + targetPool string + targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// AddInstance: Adds an instance to a target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/addInstance +func (r *TargetPoolsService) AddInstance(project string, region string, targetPool string, targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest) *TargetPoolsAddInstanceCall { + c := &TargetPoolsAddInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.targetpoolsaddinstancerequest = targetpoolsaddinstancerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsAddInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsAddInstanceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsAddInstanceCall) Context(ctx context.Context) *TargetPoolsAddInstanceCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddinstancerequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addInstance") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.addInstance" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds an instance to a target pool.", + // "httpMethod": "POST", + // "id": "compute.targetPools.addInstance", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to add instances to.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance", + // "request": { + // "$ref": "TargetPoolsAddInstanceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.aggregatedList": + +type TargetPoolsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves an aggregated list of target pools. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/aggregatedList +func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregatedListCall { + c := &TargetPoolsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *TargetPoolsAggregatedListCall) MaxResults(maxResults int64) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsAggregatedListCall) Fields(s ...googleapi.Field) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetPoolsAggregatedListCall) IfNoneMatch(entityTag string) *TargetPoolsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsAggregatedListCall) Context(ctx context.Context) *TargetPoolsAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetPools") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.aggregatedList" call. +// Exactly one of *TargetPoolAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TargetPoolAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetPoolAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetPoolAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of target pools.", + // "httpMethod": "GET", + // "id": "compute.targetPools.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/targetPools", + // "response": { + // "$ref": "TargetPoolAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetPoolsAggregatedListCall) Pages(ctx context.Context, f func(*TargetPoolAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.targetPools.delete": + +type TargetPoolsDeleteCall struct { + s *Service + project string + region string + targetPool string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/delete +func (r *TargetPoolsService) Delete(project string, region string, targetPool string) *TargetPoolsDeleteCall { + c := &TargetPoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsDeleteCall) Fields(s ...googleapi.Field) *TargetPoolsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsDeleteCall) Context(ctx context.Context) *TargetPoolsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified target pool.", + // "httpMethod": "DELETE", + // "id": "compute.targetPools.delete", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.get": + +type TargetPoolsGetCall struct { + s *Service + project string + region string + targetPool string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified target pool. Get a list of available +// target pools by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/get +func (r *TargetPoolsService) Get(project string, region string, targetPool string) *TargetPoolsGetCall { + c := &TargetPoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsGetCall) Fields(s ...googleapi.Field) *TargetPoolsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetPoolsGetCall) IfNoneMatch(entityTag string) *TargetPoolsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsGetCall) Context(ctx context.Context) *TargetPoolsGetCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.get" call. +// Exactly one of *TargetPool or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetPool.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetPool{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified target pool. Get a list of available target pools by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.targetPools.get", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}", + // "response": { + // "$ref": "TargetPool" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetPools.getHealth": + +type TargetPoolsGetHealthCall struct { + s *Service + project string + region string + targetPool string + instancereference *InstanceReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// GetHealth: Gets the most recent health check results for each IP for +// the instance that is referenced by the given target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/getHealth +func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall { + c := &TargetPoolsGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.instancereference = instancereference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsGetHealthCall) Fields(s ...googleapi.Field) *TargetPoolsGetHealthCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsGetHealthCall) Context(ctx context.Context) *TargetPoolsGetHealthCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancereference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/getHealth") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.getHealth" call. +// Exactly one of *TargetPoolInstanceHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TargetPoolInstanceHealth.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPoolInstanceHealth, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetPoolInstanceHealth{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the most recent health check results for each IP for the instance that is referenced by the given target pool.", + // "httpMethod": "POST", + // "id": "compute.targetPools.getHealth", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to which the queried instance belongs.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth", + // "request": { + // "$ref": "InstanceReference" + // }, + // "response": { + // "$ref": "TargetPoolInstanceHealth" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetPools.insert": + +type TargetPoolsInsertCall struct { + s *Service + project string + region string + targetpool *TargetPool + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a target pool in the specified project and region +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/insert +func (r *TargetPoolsService) Insert(project string, region string, targetpool *TargetPool) *TargetPoolsInsertCall { + c := &TargetPoolsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetpool = targetpool + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsInsertCall) Fields(s ...googleapi.Field) *TargetPoolsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsInsertCall) Context(ctx context.Context) *TargetPoolsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpool) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a target pool in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.targetPools.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools", + // "request": { + // "$ref": "TargetPool" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.list": + +type TargetPoolsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of target pools available to the specified +// project and region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/list +func (r *TargetPoolsService) List(project string, region string) *TargetPoolsListCall { + c := &TargetPoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *TargetPoolsListCall) MaxResults(maxResults int64) *TargetPoolsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsListCall) Fields(s ...googleapi.Field) *TargetPoolsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetPoolsListCall) IfNoneMatch(entityTag string) *TargetPoolsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsListCall) Context(ctx context.Context) *TargetPoolsListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.list" call. +// Exactly one of *TargetPoolList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *TargetPoolList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetPoolList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of target pools available to the specified project and region.", + // "httpMethod": "GET", + // "id": "compute.targetPools.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools", + // "response": { + // "$ref": "TargetPoolList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetPoolsListCall) Pages(ctx context.Context, f func(*TargetPoolList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.targetPools.removeHealthCheck": + +type TargetPoolsRemoveHealthCheckCall struct { + s *Service + project string + region string + targetPool string + targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// RemoveHealthCheck: Removes health check URL from a target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeHealthCheck +func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, targetPool string, targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest) *TargetPoolsRemoveHealthCheckCall { + c := &TargetPoolsRemoveHealthCheckCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.targetpoolsremovehealthcheckrequest = targetpoolsremovehealthcheckrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsRemoveHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveHealthCheckCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsRemoveHealthCheckCall) Context(ctx context.Context) *TargetPoolsRemoveHealthCheckCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremovehealthcheckrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.removeHealthCheck" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes health check URL from a target pool.", + // "httpMethod": "POST", + // "id": "compute.targetPools.removeHealthCheck", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the target pool to remove health checks from.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + // "request": { + // "$ref": "TargetPoolsRemoveHealthCheckRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.removeInstance": + +type TargetPoolsRemoveInstanceCall struct { + s *Service + project string + region string + targetPool string + targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// RemoveInstance: Removes instance URL from a target pool. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/removeInstance +func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { + c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.targetpoolsremoveinstancerequest = targetpoolsremoveinstancerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsRemoveInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveInstanceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsRemoveInstanceCall) Context(ctx context.Context) *TargetPoolsRemoveInstanceCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremoveinstancerequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeInstance") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.removeInstance" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes instance URL from a target pool.", + // "httpMethod": "POST", + // "id": "compute.targetPools.removeInstance", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to remove instances from.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + // "request": { + // "$ref": "TargetPoolsRemoveInstanceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.setBackup": + +type TargetPoolsSetBackupCall struct { + s *Service + project string + region string + targetPool string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// SetBackup: Changes a backup target pool's configurations. +// For details, see https://cloud.google.com/compute/docs/reference/latest/targetPools/setBackup +func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { + c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.targetreference = targetreference + return c +} + +// FailoverRatio sets the optional parameter "failoverRatio": New +// failoverRatio value for the target pool. +func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetPoolsSetBackupCall { + c.urlParams_.Set("failoverRatio", fmt.Sprint(failoverRatio)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetBackupCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsSetBackupCall) Context(ctx context.Context) *TargetPoolsSetBackupCall { + c.ctx_ = ctx + return c +} + +func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/setBackup") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetPools.setBackup" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes a backup target pool's configurations.", + // "httpMethod": "POST", + // "id": "compute.targetPools.setBackup", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "failoverRatio": { + // "description": "New failoverRatio value for the target pool.", + // "format": "float", + // "location": "query", + // "type": "number" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to set a backup pool for.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup", + // "request": { + // "$ref": "TargetReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetVpnGateways.aggregatedList": + +type TargetVpnGatewaysAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves an aggregated list of target VPN gateways. +func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGatewaysAggregatedListCall { + c := &TargetVpnGatewaysAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *TargetVpnGatewaysAggregatedListCall) Filter(filter string) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *TargetVpnGatewaysAggregatedListCall) MaxResults(maxResults int64) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetVpnGatewaysAggregatedListCall) PageToken(pageToken string) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysAggregatedListCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetVpnGatewaysAggregatedListCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysAggregatedListCall) Context(ctx context.Context) *TargetVpnGatewaysAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetVpnGateways") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetVpnGateways.aggregatedList" call. +// Exactly one of *TargetVpnGatewayAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *TargetVpnGatewayAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*TargetVpnGatewayAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetVpnGatewayAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of target VPN gateways.", + // "httpMethod": "GET", + // "id": "compute.targetVpnGateways.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/targetVpnGateways", + // "response": { + // "$ref": "TargetVpnGatewayAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetVpnGatewaysAggregatedListCall) Pages(ctx context.Context, f func(*TargetVpnGatewayAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.targetVpnGateways.delete": + +type TargetVpnGatewaysDeleteCall struct { + s *Service + project string + region string + targetVpnGateway string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified target VPN gateway. +func (r *TargetVpnGatewaysService) Delete(project string, region string, targetVpnGateway string) *TargetVpnGatewaysDeleteCall { + c := &TargetVpnGatewaysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetVpnGateway = targetVpnGateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysDeleteCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysDeleteCall) Context(ctx context.Context) *TargetVpnGatewaysDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetVpnGateway": c.targetVpnGateway, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetVpnGateways.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified target VPN gateway.", + // "httpMethod": "DELETE", + // "id": "compute.targetVpnGateways.delete", + // "parameterOrder": [ + // "project", + // "region", + // "targetVpnGateway" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetVpnGateway": { + // "description": "Name of the target VPN gateway to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetVpnGateways.get": + +type TargetVpnGatewaysGetCall struct { + s *Service + project string + region string + targetVpnGateway string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified target VPN gateway. Get a list of +// available target VPN gateways by making a list() request. +func (r *TargetVpnGatewaysService) Get(project string, region string, targetVpnGateway string) *TargetVpnGatewaysGetCall { + c := &TargetVpnGatewaysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetVpnGateway = targetVpnGateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysGetCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetVpnGatewaysGetCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysGetCall) Context(ctx context.Context) *TargetVpnGatewaysGetCall { + c.ctx_ = ctx + return c +} + +func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetVpnGateway": c.targetVpnGateway, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetVpnGateways.get" call. +// Exactly one of *TargetVpnGateway or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetVpnGateway.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnGateway, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetVpnGateway{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified target VPN gateway. Get a list of available target VPN gateways by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.targetVpnGateways.get", + // "parameterOrder": [ + // "project", + // "region", + // "targetVpnGateway" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "targetVpnGateway": { + // "description": "Name of the target VPN gateway to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", + // "response": { + // "$ref": "TargetVpnGateway" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.targetVpnGateways.insert": + +type TargetVpnGatewaysInsertCall struct { + s *Service + project string + region string + targetvpngateway *TargetVpnGateway + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a target VPN gateway in the specified project and +// region using the data included in the request. +func (r *TargetVpnGatewaysService) Insert(project string, region string, targetvpngateway *TargetVpnGateway) *TargetVpnGatewaysInsertCall { + c := &TargetVpnGatewaysInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetvpngateway = targetvpngateway + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysInsertCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysInsertCall) Context(ctx context.Context) *TargetVpnGatewaysInsertCall { + c.ctx_ = ctx + return c +} + +func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetvpngateway) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetVpnGateways.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a target VPN gateway in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.targetVpnGateways.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetVpnGateways", + // "request": { + // "$ref": "TargetVpnGateway" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetVpnGateways.list": + +type TargetVpnGatewaysListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of target VPN gateways available to the +// specified project and region. +func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVpnGatewaysListCall { + c := &TargetVpnGatewaysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *TargetVpnGatewaysListCall) Filter(filter string) *TargetVpnGatewaysListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *TargetVpnGatewaysListCall) MaxResults(maxResults int64) *TargetVpnGatewaysListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *TargetVpnGatewaysListCall) PageToken(pageToken string) *TargetVpnGatewaysListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetVpnGatewaysListCall) Fields(s ...googleapi.Field) *TargetVpnGatewaysListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetVpnGatewaysListCall) IfNoneMatch(entityTag string) *TargetVpnGatewaysListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetVpnGatewaysListCall) Context(ctx context.Context) *TargetVpnGatewaysListCall { + c.ctx_ = ctx + return c +} + +func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.targetVpnGateways.list" call. +// Exactly one of *TargetVpnGatewayList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetVpnGatewayList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpnGatewayList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TargetVpnGatewayList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of target VPN gateways available to the specified project and region.", + // "httpMethod": "GET", + // "id": "compute.targetVpnGateways.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/targetVpnGateways", + // "response": { + // "$ref": "TargetVpnGatewayList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetVpnGatewaysListCall) Pages(ctx context.Context, f func(*TargetVpnGatewayList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.urlMaps.delete": + +type UrlMapsDeleteCall struct { + s *Service + project string + urlMap string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified UrlMap resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/delete +func (r *UrlMapsService) Delete(project string, urlMap string) *UrlMapsDeleteCall { + c := &UrlMapsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlMap = urlMap + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsDeleteCall) Fields(s ...googleapi.Field) *UrlMapsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsDeleteCall) Context(ctx context.Context) *UrlMapsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "urlMap": c.urlMap, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified UrlMap resource.", + // "httpMethod": "DELETE", + // "id": "compute.urlMaps.delete", + // "parameterOrder": [ + // "project", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps/{urlMap}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.urlMaps.get": + +type UrlMapsGetCall struct { + s *Service + project string + urlMap string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified UrlMap resource. Get a list of available +// URL maps by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/get +func (r *UrlMapsService) Get(project string, urlMap string) *UrlMapsGetCall { + c := &UrlMapsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlMap = urlMap + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsGetCall) Fields(s ...googleapi.Field) *UrlMapsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UrlMapsGetCall) IfNoneMatch(entityTag string) *UrlMapsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsGetCall) Context(ctx context.Context) *UrlMapsGetCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "urlMap": c.urlMap, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.get" call. +// Exactly one of *UrlMap or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *UrlMap.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UrlMap{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified UrlMap resource. Get a list of available URL maps by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.urlMaps.get", + // "parameterOrder": [ + // "project", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps/{urlMap}", + // "response": { + // "$ref": "UrlMap" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.urlMaps.insert": + +type UrlMapsInsertCall struct { + s *Service + project string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a UrlMap resource in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/insert +func (r *UrlMapsService) Insert(project string, urlmap *UrlMap) *UrlMapsInsertCall { + c := &UrlMapsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlmap = urlmap + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsInsertCall) Fields(s ...googleapi.Field) *UrlMapsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsInsertCall) Context(ctx context.Context) *UrlMapsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a UrlMap resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.urlMaps.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps", + // "request": { + // "$ref": "UrlMap" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.urlMaps.list": + +type UrlMapsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of UrlMap resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/list +func (r *UrlMapsService) List(project string) *UrlMapsListCall { + c := &UrlMapsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *UrlMapsListCall) MaxResults(maxResults int64) *UrlMapsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *UrlMapsListCall) PageToken(pageToken string) *UrlMapsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsListCall) Fields(s ...googleapi.Field) *UrlMapsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *UrlMapsListCall) IfNoneMatch(entityTag string) *UrlMapsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsListCall) Context(ctx context.Context) *UrlMapsListCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.list" call. +// Exactly one of *UrlMapList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *UrlMapList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UrlMapList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of UrlMap resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.urlMaps.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps", + // "response": { + // "$ref": "UrlMapList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *UrlMapsListCall) Pages(ctx context.Context, f func(*UrlMapList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.urlMaps.patch": + +type UrlMapsPatchCall struct { + s *Service + project string + urlMap string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates the entire content of the UrlMap resource. This method +// supports patch semantics. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/patch +func (r *UrlMapsService) Patch(project string, urlMap string, urlmap *UrlMap) *UrlMapsPatchCall { + c := &UrlMapsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlMap = urlMap + c.urlmap = urlmap + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsPatchCall) Fields(s ...googleapi.Field) *UrlMapsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsPatchCall) Context(ctx context.Context) *UrlMapsPatchCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "urlMap": c.urlMap, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the entire content of the UrlMap resource. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.urlMaps.patch", + // "parameterOrder": [ + // "project", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps/{urlMap}", + // "request": { + // "$ref": "UrlMap" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.urlMaps.update": + +type UrlMapsUpdateCall struct { + s *Service + project string + urlMap string + urlmap *UrlMap + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates the entire content of the UrlMap resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/update +func (r *UrlMapsService) Update(project string, urlMap string, urlmap *UrlMap) *UrlMapsUpdateCall { + c := &UrlMapsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlMap = urlMap + c.urlmap = urlmap + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsUpdateCall) Fields(s ...googleapi.Field) *UrlMapsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsUpdateCall) Context(ctx context.Context) *UrlMapsUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "urlMap": c.urlMap, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the entire content of the UrlMap resource.", + // "httpMethod": "PUT", + // "id": "compute.urlMaps.update", + // "parameterOrder": [ + // "project", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps/{urlMap}", + // "request": { + // "$ref": "UrlMap" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.urlMaps.validate": + +type UrlMapsValidateCall struct { + s *Service + project string + urlMap string + urlmapsvalidaterequest *UrlMapsValidateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Validate: Runs static validation for the UrlMap. In particular, the +// tests of the provided UrlMap will be run. Calling this method does +// NOT create the UrlMap. +// For details, see https://cloud.google.com/compute/docs/reference/latest/urlMaps/validate +func (r *UrlMapsService) Validate(project string, urlMap string, urlmapsvalidaterequest *UrlMapsValidateRequest) *UrlMapsValidateCall { + c := &UrlMapsValidateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlMap = urlMap + c.urlmapsvalidaterequest = urlmapsvalidaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsValidateCall) Fields(s ...googleapi.Field) *UrlMapsValidateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsValidateCall) Context(ctx context.Context) *UrlMapsValidateCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapsvalidaterequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/validate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "urlMap": c.urlMap, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.urlMaps.validate" call. +// Exactly one of *UrlMapsValidateResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *UrlMapsValidateResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidateResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UrlMapsValidateResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Runs static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.", + // "httpMethod": "POST", + // "id": "compute.urlMaps.validate", + // "parameterOrder": [ + // "project", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap resource to be validated as.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps/{urlMap}/validate", + // "request": { + // "$ref": "UrlMapsValidateRequest" + // }, + // "response": { + // "$ref": "UrlMapsValidateResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.vpnTunnels.aggregatedList": + +type VpnTunnelsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves an aggregated list of VPN tunnels. +func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregatedListCall { + c := &VpnTunnelsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *VpnTunnelsAggregatedListCall) Filter(filter string) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *VpnTunnelsAggregatedListCall) MaxResults(maxResults int64) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *VpnTunnelsAggregatedListCall) PageToken(pageToken string) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsAggregatedListCall) Fields(s ...googleapi.Field) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnTunnelsAggregatedListCall) IfNoneMatch(entityTag string) *VpnTunnelsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsAggregatedListCall) Context(ctx context.Context) *VpnTunnelsAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/vpnTunnels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.vpnTunnels.aggregatedList" call. +// Exactly one of *VpnTunnelAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *VpnTunnelAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &VpnTunnelAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of VPN tunnels.", + // "httpMethod": "GET", + // "id": "compute.vpnTunnels.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/vpnTunnels", + // "response": { + // "$ref": "VpnTunnelAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *VpnTunnelsAggregatedListCall) Pages(ctx context.Context, f func(*VpnTunnelAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.vpnTunnels.delete": + +type VpnTunnelsDeleteCall struct { + s *Service + project string + region string + vpnTunnel string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified VpnTunnel resource. +func (r *VpnTunnelsService) Delete(project string, region string, vpnTunnel string) *VpnTunnelsDeleteCall { + c := &VpnTunnelsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.vpnTunnel = vpnTunnel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsDeleteCall) Fields(s ...googleapi.Field) *VpnTunnelsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsDeleteCall) Context(ctx context.Context) *VpnTunnelsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "vpnTunnel": c.vpnTunnel, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.vpnTunnels.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified VpnTunnel resource.", + // "httpMethod": "DELETE", + // "id": "compute.vpnTunnels.delete", + // "parameterOrder": [ + // "project", + // "region", + // "vpnTunnel" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "vpnTunnel": { + // "description": "Name of the VpnTunnel resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.vpnTunnels.get": + +type VpnTunnelsGetCall struct { + s *Service + project string + region string + vpnTunnel string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified VpnTunnel resource. Get a list of +// available VPN tunnels by making a list() request. +func (r *VpnTunnelsService) Get(project string, region string, vpnTunnel string) *VpnTunnelsGetCall { + c := &VpnTunnelsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.vpnTunnel = vpnTunnel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsGetCall) Fields(s ...googleapi.Field) *VpnTunnelsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnTunnelsGetCall) IfNoneMatch(entityTag string) *VpnTunnelsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsGetCall) Context(ctx context.Context) *VpnTunnelsGetCall { + c.ctx_ = ctx + return c +} + +func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "vpnTunnel": c.vpnTunnel, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.vpnTunnels.get" call. +// Exactly one of *VpnTunnel or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *VpnTunnel.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &VpnTunnel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified VpnTunnel resource. Get a list of available VPN tunnels by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.vpnTunnels.get", + // "parameterOrder": [ + // "project", + // "region", + // "vpnTunnel" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "vpnTunnel": { + // "description": "Name of the VpnTunnel resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnTunnels/{vpnTunnel}", + // "response": { + // "$ref": "VpnTunnel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.vpnTunnels.insert": + +type VpnTunnelsInsertCall struct { + s *Service + project string + region string + vpntunnel *VpnTunnel + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a VpnTunnel resource in the specified project and +// region using the data included in the request. +func (r *VpnTunnelsService) Insert(project string, region string, vpntunnel *VpnTunnel) *VpnTunnelsInsertCall { + c := &VpnTunnelsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.vpntunnel = vpntunnel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsInsertCall) Fields(s ...googleapi.Field) *VpnTunnelsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsInsertCall) Context(ctx context.Context) *VpnTunnelsInsertCall { + c.ctx_ = ctx + return c +} + +func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.vpntunnel) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.vpnTunnels.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *VpnTunnelsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a VpnTunnel resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.vpnTunnels.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnTunnels", + // "request": { + // "$ref": "VpnTunnel" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.vpnTunnels.list": + +type VpnTunnelsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of VpnTunnel resources contained in the +// specified project and region. +func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListCall { + c := &VpnTunnelsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *VpnTunnelsListCall) Filter(filter string) *VpnTunnelsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *VpnTunnelsListCall) MaxResults(maxResults int64) *VpnTunnelsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *VpnTunnelsListCall) PageToken(pageToken string) *VpnTunnelsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *VpnTunnelsListCall) Fields(s ...googleapi.Field) *VpnTunnelsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *VpnTunnelsListCall) IfNoneMatch(entityTag string) *VpnTunnelsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *VpnTunnelsListCall) Context(ctx context.Context) *VpnTunnelsListCall { + c.ctx_ = ctx + return c +} + +func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.vpnTunnels.list" call. +// Exactly one of *VpnTunnelList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *VpnTunnelList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &VpnTunnelList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of VpnTunnel resources contained in the specified project and region.", + // "httpMethod": "GET", + // "id": "compute.vpnTunnels.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/vpnTunnels", + // "response": { + // "$ref": "VpnTunnelList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *VpnTunnelsListCall) Pages(ctx context.Context, f func(*VpnTunnelList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.zoneOperations.delete": + +type ZoneOperationsDeleteCall struct { + s *Service + project string + zone string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified zone-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/delete +func (r *ZoneOperationsService) Delete(project string, zone string, operation string) *ZoneOperationsDeleteCall { + c := &ZoneOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ZoneOperationsDeleteCall) Fields(s ...googleapi.Field) *ZoneOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ZoneOperationsDeleteCall) Context(ctx context.Context) *ZoneOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "operation": c.operation, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.zoneOperations.delete" call. +func (c *ZoneOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes the specified zone-specific Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.zoneOperations.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/operations/{operation}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.zoneOperations.get": + +type ZoneOperationsGetCall struct { + s *Service + project string + zone string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves the specified zone-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/get +func (r *ZoneOperationsService) Get(project string, zone string, operation string) *ZoneOperationsGetCall { + c := &ZoneOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.operation = operation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ZoneOperationsGetCall) Fields(s ...googleapi.Field) *ZoneOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ZoneOperationsGetCall) IfNoneMatch(entityTag string) *ZoneOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ZoneOperationsGetCall) Context(ctx context.Context) *ZoneOperationsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "operation": c.operation, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.zoneOperations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the specified zone-specific Operations resource.", + // "httpMethod": "GET", + // "id": "compute.zoneOperations.get", + // "parameterOrder": [ + // "project", + // "zone", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/operations/{operation}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.zoneOperations.list": + +type ZoneOperationsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of Operation resources contained within the +// specified zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/zoneOperations/list +func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperationsListCall { + c := &ZoneOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *ZoneOperationsListCall) Filter(filter string) *ZoneOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *ZoneOperationsListCall) MaxResults(maxResults int64) *ZoneOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ZoneOperationsListCall) PageToken(pageToken string) *ZoneOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ZoneOperationsListCall) Fields(s ...googleapi.Field) *ZoneOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ZoneOperationsListCall) IfNoneMatch(entityTag string) *ZoneOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ZoneOperationsListCall) Context(ctx context.Context) *ZoneOperationsListCall { + c.ctx_ = ctx + return c +} + +func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.zoneOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &OperationList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of Operation resources contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.zoneOperations.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/operations", + // "response": { + // "$ref": "OperationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ZoneOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.zones.get": + +type ZonesGetCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified Zone resource. Get a list of available +// zones by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/zones/get +func (r *ZonesService) Get(project string, zone string) *ZonesGetCall { + c := &ZonesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ZonesGetCall) Fields(s ...googleapi.Field) *ZonesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ZonesGetCall) IfNoneMatch(entityTag string) *ZonesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ZonesGetCall) Context(ctx context.Context) *ZonesGetCall { + c.ctx_ = ctx + return c +} + +func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.zones.get" call. +// Exactly one of *Zone or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Zone.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Zone{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified Zone resource. Get a list of available zones by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.zones.get", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}", + // "response": { + // "$ref": "Zone" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.zones.list": + +type ZonesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves the list of Zone resources available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/zones/list +func (r *ZonesService) List(project string) *ZonesListCall { + c := &ZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *ZonesListCall) Filter(filter string) *ZonesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *ZonesListCall) MaxResults(maxResults int64) *ZonesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *ZonesListCall) PageToken(pageToken string) *ZonesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ZonesListCall) Fields(s ...googleapi.Field) *ZonesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ZonesListCall) IfNoneMatch(entityTag string) *ZonesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ZonesListCall) Context(ctx context.Context) *ZonesListCall { + c.ctx_ = ctx + return c +} + +func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "compute.zones.list" call. +// Exactly one of *ZoneList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *ZoneList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ZoneList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of Zone resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.zones.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones", + // "response": { + // "$ref": "ZoneList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ZonesListCall) Pages(ctx context.Context, f func(*ZoneList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/api/container/v1/container-api.json b/Godeps/_workspace/src/google.golang.org/api/container/v1/container-api.json new file mode 100644 index 000000000000..f9f5ba1df9ad --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/container/v1/container-api.json @@ -0,0 +1,1057 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"jQLIOHBVnDZie4rQHGH1WJF-INE/cpP4K9eaLrLwMGtsdl5oXjxb8rw\"", + "discoveryVersion": "v1", + "id": "container:v1", + "name": "container", + "version": "v1", + "revision": "20160421", + "title": "Google Container Engine API", + "description": "Builds and manages clusters that run container-based applications, powered by open source Kubernetes technology.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "documentationLink": "https://cloud.google.com/container-engine/", + "protocol": "rest", + "baseUrl": "https://container.googleapis.com/", + "basePath": "", + "rootUrl": "https://container.googleapis.com/", + "servicePath": "", + "batchPath": "batch", + "parameters": { + "access_token": { + "type": "string", + "description": "OAuth access token.", + "location": "query" + }, + "alt": { + "type": "string", + "description": "Data format for response.", + "default": "json", + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query" + }, + "bearer_token": { + "type": "string", + "description": "OAuth bearer token.", + "location": "query" + }, + "callback": { + "type": "string", + "description": "JSONP", + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "pp": { + "type": "boolean", + "description": "Pretty-print response.", + "default": "true", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query" + }, + "upload_protocol": { + "type": "string", + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "location": "query" + }, + "uploadType": { + "type": "string", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query" + }, + "$.xgafv": { + "type": "string", + "description": "V1 error format.", + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } + } + }, + "schemas": { + "ListClustersResponse": { + "id": "ListClustersResponse", + "type": "object", + "description": "ListClustersResponse is the result of ListClustersRequest.", + "properties": { + "clusters": { + "type": "array", + "description": "A list of clusters in the project in the specified zone, or across all ones.", + "items": { + "$ref": "Cluster" + } + }, + "missingZones": { + "type": "array", + "description": "If any zones are listed here, the list of clusters returned may be missing those zones.", + "items": { + "type": "string" + } + } + } + }, + "Cluster": { + "id": "Cluster", + "type": "object", + "description": "A Google Container Engine cluster.", + "properties": { + "name": { + "type": "string", + "description": "The name of this cluster. The name must be unique within this project and zone, and can be up to 40 characters with the following restrictions: * Lowercase letters, numbers, and hyphens only. * Must start with a letter. * Must end with a number or a letter." + }, + "description": { + "type": "string", + "description": "An optional description of this cluster." + }, + "initialNodeCount": { + "type": "integer", + "description": "The number of nodes to create in this cluster. You must ensure that your Compute Engine resource quota is sufficient for this number of instances. You must also have available firewall and routes quota. For requests, this field should only be used in lieu of a \"node_pool\" object, since this configuration (along with the \"node_config\") will be used to create a \"NodePool\" object with an auto-generated name. Do not use this and a node_pool at the same time.", + "format": "int32" + }, + "nodeConfig": { + "$ref": "NodeConfig", + "description": "Parameters used in creating the cluster's nodes. See `nodeConfig` for the description of its properties. For requests, this field should only be used in lieu of a \"node_pool\" object, since this configuration (along with the \"initial_node_count\") will be used to create a \"NodePool\" object with an auto-generated name. Do not use this and a node_pool at the same time. For responses, this field will be populated with the node configuration of the first node pool. If unspecified, the defaults are used." + }, + "masterAuth": { + "$ref": "MasterAuth", + "description": "The authentication information for accessing the master endpoint." + }, + "loggingService": { + "type": "string", + "description": "The logging service the cluster should use to write logs. Currently available options: * `logging.googleapis.com` - the Google Cloud Logging service. * `none` - no logs will be exported from the cluster. * if left as an empty string,`logging.googleapis.com` will be used." + }, + "monitoringService": { + "type": "string", + "description": "The monitoring service the cluster should use to write metrics. Currently available options: * `monitoring.googleapis.com` - the Google Cloud Monitoring service. * `none` - no metrics will be exported from the cluster. * if left as an empty string, `monitoring.googleapis.com` will be used." + }, + "network": { + "type": "string", + "description": "The name of the Google Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the cluster is connected. If left unspecified, the `default` network will be used." + }, + "clusterIpv4Cidr": { + "type": "string", + "description": "The IP address range of the container pods in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `10.96.0.0/14`). Leave blank to have one automatically chosen or specify a `/14` block in `10.0.0.0/8`." + }, + "addonsConfig": { + "$ref": "AddonsConfig", + "description": "Configurations for the various addons available to run in the cluster." + }, + "subnetwork": { + "type": "string", + "description": "The name of the Google Compute Engine [subnetwork](/compute/docs/subnetworks) to which the cluster is connected." + }, + "nodePools": { + "type": "array", + "description": "The node pools associated with this cluster. When creating a new cluster, only a single node pool should be specified. This field should not be set if \"node_config\" or \"initial_node_count\" are specified.", + "items": { + "$ref": "NodePool" + } + }, + "locations": { + "type": "array", + "description": "The list of Google Compute Engine [locations](/compute/docs/zones#available) in which the cluster's nodes should be located.", + "items": { + "type": "string" + } + }, + "selfLink": { + "type": "string", + "description": "[Output only] Server-defined URL for the resource." + }, + "zone": { + "type": "string", + "description": "[Output only] The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides." + }, + "endpoint": { + "type": "string", + "description": "[Output only] The IP address of this cluster's master endpoint. The endpoint can be accessed from the internet at `https://username:password@endpoint/`. See the `masterAuth` property of this resource for username and password information." + }, + "initialClusterVersion": { + "type": "string", + "description": "[Output only] The software version of the master endpoint and kubelets used in the cluster when it was first created. The version can be upgraded over time." + }, + "currentMasterVersion": { + "type": "string", + "description": "[Output only] The current software version of the master endpoint." + }, + "currentNodeVersion": { + "type": "string", + "description": "[Output only] The current version of the node software components. If they are currently at multiple versions because they're in the process of being upgraded, this reflects the minimum version of all nodes." + }, + "createTime": { + "type": "string", + "description": "[Output only] The time the cluster was created, in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format." + }, + "status": { + "type": "string", + "description": "[Output only] The current status of this cluster.", + "enum": [ + "STATUS_UNSPECIFIED", + "PROVISIONING", + "RUNNING", + "RECONCILING", + "STOPPING", + "ERROR" + ] + }, + "statusMessage": { + "type": "string", + "description": "[Output only] Additional information about the current status of this cluster, if available." + }, + "nodeIpv4CidrSize": { + "type": "integer", + "description": "[Output only] The size of the address space on each node for hosting containers. This is provisioned from within the `container_ipv4_cidr` range.", + "format": "int32" + }, + "servicesIpv4Cidr": { + "type": "string", + "description": "[Output only] The IP address range of the Kubernetes services in this cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). Service addresses are typically put in the last `/16` from the container CIDR." + }, + "instanceGroupUrls": { + "type": "array", + "description": "[Output only] The resource URLs of [instance groups](/compute/docs/instance-groups/) associated with this cluster.", + "items": { + "type": "string" + } + }, + "currentNodeCount": { + "type": "integer", + "description": "[Output only] The number of nodes currently in the cluster.", + "format": "int32" + } + } + }, + "NodeConfig": { + "id": "NodeConfig", + "type": "object", + "description": "Parameters that describe the nodes in a cluster.", + "properties": { + "machineType": { + "type": "string", + "description": "The name of a Google Compute Engine [machine type](/compute/docs/machine-types) (e.g. `n1-standard-1`). If unspecified, the default machine type is `n1-standard-1`." + }, + "diskSizeGb": { + "type": "integer", + "description": "Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB. If unspecified, the default disk size is 100GB.", + "format": "int32" + }, + "oauthScopes": { + "type": "array", + "description": "The set of Google API scopes to be made available on all of the node VMs under the \"default\" service account. The following scopes are recommended, but not required, and by default are not included: * `https://www.googleapis.com/auth/compute` is required for mounting persistent storage on your nodes. * `https://www.googleapis.com/auth/devstorage.read_only` is required for communicating with **gcr.io** (the [Google Container Registry](/container-registry/)). If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring are enabled, in which case their required scopes will be added.", + "items": { + "type": "string" + } + }, + "metadata": { + "type": "object", + "description": "The metadata key/value pairs assigned to instances in the cluster. Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes in length. These are reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project or be one of the four reserved keys: \"instance-template\", \"kube-env\", \"startup-script\", and \"user-data\" Values are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on them is that each value's size must be less than or equal to 32 KB. The total size of all keys and values must be less than 512 KB.", + "additionalProperties": { + "type": "string" + } + } + } + }, + "MasterAuth": { + "id": "MasterAuth", + "type": "object", + "description": "The authentication information for accessing the master endpoint. Authentication can be done using HTTP basic auth or using client certificates.", + "properties": { + "username": { + "type": "string", + "description": "The username to use for HTTP basic authentication to the master endpoint." + }, + "password": { + "type": "string", + "description": "The password to use for HTTP basic authentication to the master endpoint. Because the master endpoint is open to the Internet, you should create a strong password." + }, + "clusterCaCertificate": { + "type": "string", + "description": "[Output only] Base64-encoded public certificate that is the root of trust for the cluster." + }, + "clientCertificate": { + "type": "string", + "description": "[Output only] Base64-encoded public certificate used by clients to authenticate to the cluster endpoint." + }, + "clientKey": { + "type": "string", + "description": "[Output only] Base64-encoded private key used by clients to authenticate to the cluster endpoint." + } + } + }, + "AddonsConfig": { + "id": "AddonsConfig", + "type": "object", + "description": "Configuration for the addons that can be automatically spun up in the cluster, enabling additional functionality.", + "properties": { + "httpLoadBalancing": { + "$ref": "HttpLoadBalancing", + "description": "Configuration for the HTTP (L7) load balancing controller addon, which makes it easy to set up HTTP load balancers for services in a cluster." + }, + "horizontalPodAutoscaling": { + "$ref": "HorizontalPodAutoscaling", + "description": "Configuration for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods." + } + } + }, + "HttpLoadBalancing": { + "id": "HttpLoadBalancing", + "type": "object", + "description": "Configuration options for the HTTP (L7) load balancing controller addon, which makes it easy to set up HTTP load balancers for services in a cluster.", + "properties": { + "disabled": { + "type": "boolean", + "description": "Whether the HTTP Load Balancing controller is enabled in the cluster. When enabled, it runs a small pod in the cluster that manages the load balancers." + } + } + }, + "HorizontalPodAutoscaling": { + "id": "HorizontalPodAutoscaling", + "type": "object", + "description": "Configuration options for the horizontal pod autoscaling feature, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods.", + "properties": { + "disabled": { + "type": "boolean", + "description": "Whether the Horizontal Pod Autoscaling feature is enabled in the cluster. When enabled, it ensures that a Heapster pod is running in the cluster, which is also used by the Cloud Monitoring service." + } + } + }, + "NodePool": { + "id": "NodePool", + "type": "object", + "description": "NodePool contains the name and configuration for a cluster's node pool. Node pools are a set of nodes (i.e. VM's), with a common configuration and specification, under the control of the cluster master. They may have a set of Kubernetes labels applied to them, which may be used to reference them during pod scheduling. They may also be resized up or down, to accommodate the workload.", + "properties": { + "name": { + "type": "string", + "description": "The name of the node pool." + }, + "config": { + "$ref": "NodeConfig", + "description": "The node configuration of the pool." + }, + "initialNodeCount": { + "type": "integer", + "description": "The initial node count for the pool. You must ensure that your Compute Engine resource quota is sufficient for this number of instances. You must also have available firewall and routes quota.", + "format": "int32" + }, + "selfLink": { + "type": "string", + "description": "Server-defined URL for the resource." + }, + "version": { + "type": "string", + "description": "The version of the Kubernetes of this node." + }, + "instanceGroupUrls": { + "type": "array", + "description": "[Output only] The resource URLs of [instance groups](/compute/docs/instance-groups/) associated with this node pool.", + "items": { + "type": "string" + } + }, + "status": { + "type": "string", + "description": "The status of the nodes in this pool instance.", + "enum": [ + "STATUS_UNSPECIFIED", + "PROVISIONING", + "RUNNING", + "RUNNING_WITH_ERROR", + "RECONCILING", + "STOPPING", + "ERROR" + ] + }, + "statusMessage": { + "type": "string", + "description": "[Output only] Additional information about the current status of this node pool instance, if available." + } + } + }, + "CreateClusterRequest": { + "id": "CreateClusterRequest", + "type": "object", + "description": "CreateClusterRequest creates a cluster.", + "properties": { + "cluster": { + "$ref": "Cluster", + "description": "A [cluster resource](/container-engine/reference/rest/v1/projects.zones.clusters)" + } + } + }, + "Operation": { + "id": "Operation", + "type": "object", + "description": "This operation resource represents operations that may have happened or are happening on the cluster. All fields are output only.", + "properties": { + "name": { + "type": "string", + "description": "The server-assigned ID for the operation." + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the operation is taking place." + }, + "operationType": { + "type": "string", + "description": "The operation type.", + "enum": [ + "TYPE_UNSPECIFIED", + "CREATE_CLUSTER", + "DELETE_CLUSTER", + "UPGRADE_MASTER", + "UPGRADE_NODES", + "REPAIR_CLUSTER", + "UPDATE_CLUSTER", + "CREATE_NODE_POOL", + "DELETE_NODE_POOL" + ] + }, + "status": { + "type": "string", + "description": "The current status of the operation.", + "enum": [ + "STATUS_UNSPECIFIED", + "PENDING", + "RUNNING", + "DONE" + ] + }, + "detail": { + "type": "string", + "description": "Detailed operation progress, if available." + }, + "statusMessage": { + "type": "string", + "description": "If an error has occurred, a textual description of the error." + }, + "selfLink": { + "type": "string", + "description": "Server-defined URL for the resource." + }, + "targetLink": { + "type": "string", + "description": "Server-defined URL for the target of the operation." + } + } + }, + "UpdateClusterRequest": { + "id": "UpdateClusterRequest", + "type": "object", + "description": "UpdateClusterRequest updates the settings of a cluster.", + "properties": { + "update": { + "$ref": "ClusterUpdate", + "description": "A description of the update." + } + } + }, + "ClusterUpdate": { + "id": "ClusterUpdate", + "type": "object", + "description": "ClusterUpdate describes an update to the cluster. Exactly one update can be applied to a cluster with each request, so at most one field can be provided.", + "properties": { + "desiredNodeVersion": { + "type": "string", + "description": "The Kubernetes version to change the nodes to (typically an upgrade). Use `-` to upgrade to the latest version supported by the server." + }, + "desiredMonitoringService": { + "type": "string", + "description": "The monitoring service the cluster should use to write metrics. Currently available options: * \"monitoring.googleapis.com\" - the Google Cloud Monitoring service * \"none\" - no metrics will be exported from the cluster" + }, + "desiredAddonsConfig": { + "$ref": "AddonsConfig", + "description": "Configurations for the various addons available to run in the cluster." + }, + "desiredNodePoolId": { + "type": "string", + "description": "The node pool to be upgraded. This field is mandatory if the \"desired_node_version\" or \"desired_image_family\" is specified and there is more than one node pool on the cluster." + }, + "desiredMasterVersion": { + "type": "string", + "description": "The Kubernetes version to change the master to. The only valid value is the latest supported version. Use \"-\" to have the server automatically select the latest version." + } + } + }, + "ListOperationsResponse": { + "id": "ListOperationsResponse", + "type": "object", + "description": "ListOperationsResponse is the result of ListOperationsRequest.", + "properties": { + "operations": { + "type": "array", + "description": "A list of operations in the project in the specified zone.", + "items": { + "$ref": "Operation" + } + }, + "missingZones": { + "type": "array", + "description": "If any zones are listed here, the list of operations returned may be missing the operations from those zones.", + "items": { + "type": "string" + } + } + } + }, + "ServerConfig": { + "id": "ServerConfig", + "type": "object", + "description": "Container Engine service configuration.", + "properties": { + "defaultClusterVersion": { + "type": "string", + "description": "Version of Kubernetes the service deploys by default." + }, + "validNodeVersions": { + "type": "array", + "description": "List of valid node upgrade target versions.", + "items": { + "type": "string" + } + }, + "defaultImageFamily": { + "type": "string", + "description": "Default image family." + }, + "validImageFamilies": { + "type": "array", + "description": "List of valid image families.", + "items": { + "type": "string" + } + } + } + }, + "ListNodePoolsResponse": { + "id": "ListNodePoolsResponse", + "type": "object", + "description": "ListNodePoolsResponse is the result of ListNodePoolsRequest.", + "properties": { + "nodePools": { + "type": "array", + "description": "A list of node pools for a cluster.", + "items": { + "$ref": "NodePool" + } + } + } + }, + "CreateNodePoolRequest": { + "id": "CreateNodePoolRequest", + "type": "object", + "description": "CreateNodePoolRequest creates a node pool for a cluster.", + "properties": { + "nodePool": { + "$ref": "NodePool", + "description": "The node pool to create." + } + } + } + }, + "resources": { + "projects": { + "resources": { + "zones": { + "methods": { + "getServerconfig": { + "id": "container.projects.zones.getServerconfig", + "path": "v1/projects/{projectId}/zones/{zone}/serverconfig", + "httpMethod": "GET", + "description": "Returns configuration info about the Container Engine service.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) to return operations for.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone" + ], + "response": { + "$ref": "ServerConfig" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "clusters": { + "methods": { + "list": { + "id": "container.projects.zones.clusters.list", + "path": "v1/projects/{projectId}/zones/{zone}/clusters", + "httpMethod": "GET", + "description": "Lists all clusters owned by a project in either the specified zone or all zones.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone" + ], + "response": { + "$ref": "ListClustersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "id": "container.projects.zones.clusters.get", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + "httpMethod": "GET", + "description": "Gets the details of a specific cluster.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + }, + "clusterId": { + "type": "string", + "description": "The name of the cluster to retrieve.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "response": { + "$ref": "Cluster" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "create": { + "id": "container.projects.zones.clusters.create", + "path": "v1/projects/{projectId}/zones/{zone}/clusters", + "httpMethod": "POST", + "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. By default, the cluster is created in the project's [default network](/compute/docs/networks-and-firewalls#networks). One firewall is added for the cluster. After cluster creation, the cluster creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range is being used by the cluster.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone" + ], + "request": { + "$ref": "CreateClusterRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "update": { + "id": "container.projects.zones.clusters.update", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + "httpMethod": "PUT", + "description": "Updates the settings of a specific cluster.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + }, + "clusterId": { + "type": "string", + "description": "The name of the cluster to upgrade.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "request": { + "$ref": "UpdateClusterRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "id": "container.projects.zones.clusters.delete", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + "httpMethod": "DELETE", + "description": "Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster (e.g. load balancer resources) will not be deleted if they weren't present at the initial create time.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + }, + "clusterId": { + "type": "string", + "description": "The name of the cluster to delete.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "nodePools": { + "methods": { + "list": { + "id": "container.projects.zones.clusters.nodePools.list", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", + "httpMethod": "GET", + "description": "Lists the node pools for a cluster.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + }, + "clusterId": { + "type": "string", + "description": "The name of the cluster.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "response": { + "$ref": "ListNodePoolsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "id": "container.projects.zones.clusters.nodePools.get", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", + "httpMethod": "GET", + "description": "Retrieves the node pool requested.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + }, + "clusterId": { + "type": "string", + "description": "The name of the cluster.", + "required": true, + "location": "path" + }, + "nodePoolId": { + "type": "string", + "description": "The name of the node pool.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId", + "nodePoolId" + ], + "response": { + "$ref": "NodePool" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "create": { + "id": "container.projects.zones.clusters.nodePools.create", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", + "httpMethod": "POST", + "description": "Creates a node pool for a cluster.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + }, + "clusterId": { + "type": "string", + "description": "The name of the cluster.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId" + ], + "request": { + "$ref": "CreateNodePoolRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "id": "container.projects.zones.clusters.nodePools.delete", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", + "httpMethod": "DELETE", + "description": "Deletes a node pool from a cluster.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + }, + "clusterId": { + "type": "string", + "description": "The name of the cluster.", + "required": true, + "location": "path" + }, + "nodePoolId": { + "type": "string", + "description": "The name of the node pool to delete.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone", + "clusterId", + "nodePoolId" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + }, + "operations": { + "methods": { + "list": { + "id": "container.projects.zones.operations.list", + "path": "v1/projects/{projectId}/zones/{zone}/operations", + "httpMethod": "GET", + "description": "Lists all operations in a project in a specific zone or all zones.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) to return operations for, or `-` for all zones.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone" + ], + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "id": "container.projects.zones.operations.get", + "path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}", + "httpMethod": "GET", + "description": "Gets the specified operation.", + "parameters": { + "projectId": { + "type": "string", + "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + "required": true, + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + "required": true, + "location": "path" + }, + "operationId": { + "type": "string", + "description": "The server-assigned `name` of the operation.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "projectId", + "zone", + "operationId" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + } + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/api/container/v1/container-gen.go b/Godeps/_workspace/src/google.golang.org/api/container/v1/container-gen.go new file mode 100644 index 000000000000..5028d2233b11 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/container/v1/container-gen.go @@ -0,0 +1,2519 @@ +// Package container provides access to the Google Container Engine API. +// +// See https://cloud.google.com/container-engine/ +// +// Usage example: +// +// import "google.golang.org/api/container/v1" +// ... +// containerService, err := container.New(oauthHttpClient) +package container + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "container:v1" +const apiName = "container" +const apiVersion = "v1" +const basePath = "https://container.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.Zones = NewProjectsZonesService(s) + return rs +} + +type ProjectsService struct { + s *Service + + Zones *ProjectsZonesService +} + +func NewProjectsZonesService(s *Service) *ProjectsZonesService { + rs := &ProjectsZonesService{s: s} + rs.Clusters = NewProjectsZonesClustersService(s) + rs.Operations = NewProjectsZonesOperationsService(s) + return rs +} + +type ProjectsZonesService struct { + s *Service + + Clusters *ProjectsZonesClustersService + + Operations *ProjectsZonesOperationsService +} + +func NewProjectsZonesClustersService(s *Service) *ProjectsZonesClustersService { + rs := &ProjectsZonesClustersService{s: s} + rs.NodePools = NewProjectsZonesClustersNodePoolsService(s) + return rs +} + +type ProjectsZonesClustersService struct { + s *Service + + NodePools *ProjectsZonesClustersNodePoolsService +} + +func NewProjectsZonesClustersNodePoolsService(s *Service) *ProjectsZonesClustersNodePoolsService { + rs := &ProjectsZonesClustersNodePoolsService{s: s} + return rs +} + +type ProjectsZonesClustersNodePoolsService struct { + s *Service +} + +func NewProjectsZonesOperationsService(s *Service) *ProjectsZonesOperationsService { + rs := &ProjectsZonesOperationsService{s: s} + return rs +} + +type ProjectsZonesOperationsService struct { + s *Service +} + +// AddonsConfig: Configuration for the addons that can be automatically +// spun up in the cluster, enabling additional functionality. +type AddonsConfig struct { + // HorizontalPodAutoscaling: Configuration for the horizontal pod + // autoscaling feature, which increases or decreases the number of + // replica pods a replication controller has based on the resource usage + // of the existing pods. + HorizontalPodAutoscaling *HorizontalPodAutoscaling `json:"horizontalPodAutoscaling,omitempty"` + + // HttpLoadBalancing: Configuration for the HTTP (L7) load balancing + // controller addon, which makes it easy to set up HTTP load balancers + // for services in a cluster. + HttpLoadBalancing *HttpLoadBalancing `json:"httpLoadBalancing,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "HorizontalPodAutoscaling") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *AddonsConfig) MarshalJSON() ([]byte, error) { + type noMethod AddonsConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Cluster: A Google Container Engine cluster. +type Cluster struct { + // AddonsConfig: Configurations for the various addons available to run + // in the cluster. + AddonsConfig *AddonsConfig `json:"addonsConfig,omitempty"` + + // ClusterIpv4Cidr: The IP address range of the container pods in this + // cluster, in + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `10.96.0.0/14`). Leave blank to have one automatically + // chosen or specify a `/14` block in `10.0.0.0/8`. + ClusterIpv4Cidr string `json:"clusterIpv4Cidr,omitempty"` + + // CreateTime: [Output only] The time the cluster was created, in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + CreateTime string `json:"createTime,omitempty"` + + // CurrentMasterVersion: [Output only] The current software version of + // the master endpoint. + CurrentMasterVersion string `json:"currentMasterVersion,omitempty"` + + // CurrentNodeCount: [Output only] The number of nodes currently in the + // cluster. + CurrentNodeCount int64 `json:"currentNodeCount,omitempty"` + + // CurrentNodeVersion: [Output only] The current version of the node + // software components. If they are currently at multiple versions + // because they're in the process of being upgraded, this reflects the + // minimum version of all nodes. + CurrentNodeVersion string `json:"currentNodeVersion,omitempty"` + + // Description: An optional description of this cluster. + Description string `json:"description,omitempty"` + + // Endpoint: [Output only] The IP address of this cluster's master + // endpoint. The endpoint can be accessed from the internet at + // `https://username:password@endpoint/`. See the `masterAuth` property + // of this resource for username and password information. + Endpoint string `json:"endpoint,omitempty"` + + // InitialClusterVersion: [Output only] The software version of the + // master endpoint and kubelets used in the cluster when it was first + // created. The version can be upgraded over time. + InitialClusterVersion string `json:"initialClusterVersion,omitempty"` + + // InitialNodeCount: The number of nodes to create in this cluster. You + // must ensure that your Compute Engine resource quota is sufficient for + // this number of instances. You must also have available firewall and + // routes quota. For requests, this field should only be used in lieu of + // a "node_pool" object, since this configuration (along with the + // "node_config") will be used to create a "NodePool" object with an + // auto-generated name. Do not use this and a node_pool at the same + // time. + InitialNodeCount int64 `json:"initialNodeCount,omitempty"` + + // InstanceGroupUrls: [Output only] The resource URLs of [instance + // groups](/compute/docs/instance-groups/) associated with this cluster. + InstanceGroupUrls []string `json:"instanceGroupUrls,omitempty"` + + // Locations: The list of Google Compute Engine + // [locations](/compute/docs/zones#available) in which the cluster's + // nodes should be located. + Locations []string `json:"locations,omitempty"` + + // LoggingService: The logging service the cluster should use to write + // logs. Currently available options: * `logging.googleapis.com` - the + // Google Cloud Logging service. * `none` - no logs will be exported + // from the cluster. * if left as an empty + // string,`logging.googleapis.com` will be used. + LoggingService string `json:"loggingService,omitempty"` + + // MasterAuth: The authentication information for accessing the master + // endpoint. + MasterAuth *MasterAuth `json:"masterAuth,omitempty"` + + // MonitoringService: The monitoring service the cluster should use to + // write metrics. Currently available options: * + // `monitoring.googleapis.com` - the Google Cloud Monitoring service. * + // `none` - no metrics will be exported from the cluster. * if left as + // an empty string, `monitoring.googleapis.com` will be used. + MonitoringService string `json:"monitoringService,omitempty"` + + // Name: The name of this cluster. The name must be unique within this + // project and zone, and can be up to 40 characters with the following + // restrictions: * Lowercase letters, numbers, and hyphens only. * Must + // start with a letter. * Must end with a number or a letter. + Name string `json:"name,omitempty"` + + // Network: The name of the Google Compute Engine + // [network](/compute/docs/networks-and-firewalls#networks) to which the + // cluster is connected. If left unspecified, the `default` network will + // be used. + Network string `json:"network,omitempty"` + + // NodeConfig: Parameters used in creating the cluster's nodes. See + // `nodeConfig` for the description of its properties. For requests, + // this field should only be used in lieu of a "node_pool" object, since + // this configuration (along with the "initial_node_count") will be used + // to create a "NodePool" object with an auto-generated name. Do not use + // this and a node_pool at the same time. For responses, this field will + // be populated with the node configuration of the first node pool. If + // unspecified, the defaults are used. + NodeConfig *NodeConfig `json:"nodeConfig,omitempty"` + + // NodeIpv4CidrSize: [Output only] The size of the address space on each + // node for hosting containers. This is provisioned from within the + // `container_ipv4_cidr` range. + NodeIpv4CidrSize int64 `json:"nodeIpv4CidrSize,omitempty"` + + // NodePools: The node pools associated with this cluster. When creating + // a new cluster, only a single node pool should be specified. This + // field should not be set if "node_config" or "initial_node_count" are + // specified. + NodePools []*NodePool `json:"nodePools,omitempty"` + + // SelfLink: [Output only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServicesIpv4Cidr: [Output only] The IP address range of the + // Kubernetes services in this cluster, in + // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) + // notation (e.g. `1.2.3.4/29`). Service addresses are typically put in + // the last `/16` from the container CIDR. + ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"` + + // Status: [Output only] The current status of this cluster. + // + // Possible values: + // "STATUS_UNSPECIFIED" + // "PROVISIONING" + // "RUNNING" + // "RECONCILING" + // "STOPPING" + // "ERROR" + Status string `json:"status,omitempty"` + + // StatusMessage: [Output only] Additional information about the current + // status of this cluster, if available. + StatusMessage string `json:"statusMessage,omitempty"` + + // Subnetwork: The name of the Google Compute Engine + // [subnetwork](/compute/docs/subnetworks) to which the cluster is + // connected. + Subnetwork string `json:"subnetwork,omitempty"` + + // Zone: [Output only] The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the cluster resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AddonsConfig") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Cluster) MarshalJSON() ([]byte, error) { + type noMethod Cluster + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ClusterUpdate: ClusterUpdate describes an update to the cluster. +// Exactly one update can be applied to a cluster with each request, so +// at most one field can be provided. +type ClusterUpdate struct { + // DesiredAddonsConfig: Configurations for the various addons available + // to run in the cluster. + DesiredAddonsConfig *AddonsConfig `json:"desiredAddonsConfig,omitempty"` + + // DesiredMasterVersion: The Kubernetes version to change the master to. + // The only valid value is the latest supported version. Use "-" to have + // the server automatically select the latest version. + DesiredMasterVersion string `json:"desiredMasterVersion,omitempty"` + + // DesiredMonitoringService: The monitoring service the cluster should + // use to write metrics. Currently available options: * + // "monitoring.googleapis.com" - the Google Cloud Monitoring service * + // "none" - no metrics will be exported from the cluster + DesiredMonitoringService string `json:"desiredMonitoringService,omitempty"` + + // DesiredNodePoolId: The node pool to be upgraded. This field is + // mandatory if the "desired_node_version" or "desired_image_family" is + // specified and there is more than one node pool on the cluster. + DesiredNodePoolId string `json:"desiredNodePoolId,omitempty"` + + // DesiredNodeVersion: The Kubernetes version to change the nodes to + // (typically an upgrade). Use `-` to upgrade to the latest version + // supported by the server. + DesiredNodeVersion string `json:"desiredNodeVersion,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DesiredAddonsConfig") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ClusterUpdate) MarshalJSON() ([]byte, error) { + type noMethod ClusterUpdate + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// CreateClusterRequest: CreateClusterRequest creates a cluster. +type CreateClusterRequest struct { + // Cluster: A [cluster + // resource](/container-engine/reference/rest/v1/projects.zones.clusters) + Cluster *Cluster `json:"cluster,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Cluster") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CreateClusterRequest) MarshalJSON() ([]byte, error) { + type noMethod CreateClusterRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// CreateNodePoolRequest: CreateNodePoolRequest creates a node pool for +// a cluster. +type CreateNodePoolRequest struct { + // NodePool: The node pool to create. + NodePool *NodePool `json:"nodePool,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NodePool") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CreateNodePoolRequest) MarshalJSON() ([]byte, error) { + type noMethod CreateNodePoolRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// HorizontalPodAutoscaling: Configuration options for the horizontal +// pod autoscaling feature, which increases or decreases the number of +// replica pods a replication controller has based on the resource usage +// of the existing pods. +type HorizontalPodAutoscaling struct { + // Disabled: Whether the Horizontal Pod Autoscaling feature is enabled + // in the cluster. When enabled, it ensures that a Heapster pod is + // running in the cluster, which is also used by the Cloud Monitoring + // service. + Disabled bool `json:"disabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Disabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HorizontalPodAutoscaling) MarshalJSON() ([]byte, error) { + type noMethod HorizontalPodAutoscaling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// HttpLoadBalancing: Configuration options for the HTTP (L7) load +// balancing controller addon, which makes it easy to set up HTTP load +// balancers for services in a cluster. +type HttpLoadBalancing struct { + // Disabled: Whether the HTTP Load Balancing controller is enabled in + // the cluster. When enabled, it runs a small pod in the cluster that + // manages the load balancers. + Disabled bool `json:"disabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Disabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *HttpLoadBalancing) MarshalJSON() ([]byte, error) { + type noMethod HttpLoadBalancing + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ListClustersResponse: ListClustersResponse is the result of +// ListClustersRequest. +type ListClustersResponse struct { + // Clusters: A list of clusters in the project in the specified zone, or + // across all ones. + Clusters []*Cluster `json:"clusters,omitempty"` + + // MissingZones: If any zones are listed here, the list of clusters + // returned may be missing those zones. + MissingZones []string `json:"missingZones,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Clusters") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListClustersResponse) MarshalJSON() ([]byte, error) { + type noMethod ListClustersResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ListNodePoolsResponse: ListNodePoolsResponse is the result of +// ListNodePoolsRequest. +type ListNodePoolsResponse struct { + // NodePools: A list of node pools for a cluster. + NodePools []*NodePool `json:"nodePools,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NodePools") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListNodePoolsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListNodePoolsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ListOperationsResponse: ListOperationsResponse is the result of +// ListOperationsRequest. +type ListOperationsResponse struct { + // MissingZones: If any zones are listed here, the list of operations + // returned may be missing the operations from those zones. + MissingZones []string `json:"missingZones,omitempty"` + + // Operations: A list of operations in the project in the specified + // zone. + Operations []*Operation `json:"operations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "MissingZones") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { + type noMethod ListOperationsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// MasterAuth: The authentication information for accessing the master +// endpoint. Authentication can be done using HTTP basic auth or using +// client certificates. +type MasterAuth struct { + // ClientCertificate: [Output only] Base64-encoded public certificate + // used by clients to authenticate to the cluster endpoint. + ClientCertificate string `json:"clientCertificate,omitempty"` + + // ClientKey: [Output only] Base64-encoded private key used by clients + // to authenticate to the cluster endpoint. + ClientKey string `json:"clientKey,omitempty"` + + // ClusterCaCertificate: [Output only] Base64-encoded public certificate + // that is the root of trust for the cluster. + ClusterCaCertificate string `json:"clusterCaCertificate,omitempty"` + + // Password: The password to use for HTTP basic authentication to the + // master endpoint. Because the master endpoint is open to the Internet, + // you should create a strong password. + Password string `json:"password,omitempty"` + + // Username: The username to use for HTTP basic authentication to the + // master endpoint. + Username string `json:"username,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClientCertificate") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *MasterAuth) MarshalJSON() ([]byte, error) { + type noMethod MasterAuth + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// NodeConfig: Parameters that describe the nodes in a cluster. +type NodeConfig struct { + // DiskSizeGb: Size of the disk attached to each node, specified in GB. + // The smallest allowed disk size is 10GB. If unspecified, the default + // disk size is 100GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty"` + + // MachineType: The name of a Google Compute Engine [machine + // type](/compute/docs/machine-types) (e.g. `n1-standard-1`). If + // unspecified, the default machine type is `n1-standard-1`. + MachineType string `json:"machineType,omitempty"` + + // Metadata: The metadata key/value pairs assigned to instances in the + // cluster. Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less + // than 128 bytes in length. These are reflected as part of a URL in the + // metadata server. Additionally, to avoid ambiguity, keys must not + // conflict with any other metadata keys for the project or be one of + // the four reserved keys: "instance-template", "kube-env", + // "startup-script", and "user-data" Values are free-form strings, and + // only have meaning as interpreted by the image running in the + // instance. The only restriction placed on them is that each value's + // size must be less than or equal to 32 KB. The total size of all keys + // and values must be less than 512 KB. + Metadata map[string]string `json:"metadata,omitempty"` + + // OauthScopes: The set of Google API scopes to be made available on all + // of the node VMs under the "default" service account. The following + // scopes are recommended, but not required, and by default are not + // included: * `https://www.googleapis.com/auth/compute` is required for + // mounting persistent storage on your nodes. * + // `https://www.googleapis.com/auth/devstorage.read_only` is required + // for communicating with **gcr.io** (the [Google Container + // Registry](/container-registry/)). If unspecified, no scopes are + // added, unless Cloud Logging or Cloud Monitoring are enabled, in which + // case their required scopes will be added. + OauthScopes []string `json:"oauthScopes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskSizeGb") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *NodeConfig) MarshalJSON() ([]byte, error) { + type noMethod NodeConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// NodePool: NodePool contains the name and configuration for a +// cluster's node pool. Node pools are a set of nodes (i.e. VM's), with +// a common configuration and specification, under the control of the +// cluster master. They may have a set of Kubernetes labels applied to +// them, which may be used to reference them during pod scheduling. They +// may also be resized up or down, to accommodate the workload. +type NodePool struct { + // Config: The node configuration of the pool. + Config *NodeConfig `json:"config,omitempty"` + + // InitialNodeCount: The initial node count for the pool. You must + // ensure that your Compute Engine resource quota is sufficient for this + // number of instances. You must also have available firewall and routes + // quota. + InitialNodeCount int64 `json:"initialNodeCount,omitempty"` + + // InstanceGroupUrls: [Output only] The resource URLs of [instance + // groups](/compute/docs/instance-groups/) associated with this node + // pool. + InstanceGroupUrls []string `json:"instanceGroupUrls,omitempty"` + + // Name: The name of the node pool. + Name string `json:"name,omitempty"` + + // SelfLink: Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: The status of the nodes in this pool instance. + // + // Possible values: + // "STATUS_UNSPECIFIED" + // "PROVISIONING" + // "RUNNING" + // "RUNNING_WITH_ERROR" + // "RECONCILING" + // "STOPPING" + // "ERROR" + Status string `json:"status,omitempty"` + + // StatusMessage: [Output only] Additional information about the current + // status of this node pool instance, if available. + StatusMessage string `json:"statusMessage,omitempty"` + + // Version: The version of the Kubernetes of this node. + Version string `json:"version,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Config") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *NodePool) MarshalJSON() ([]byte, error) { + type noMethod NodePool + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Operation: This operation resource represents operations that may +// have happened or are happening on the cluster. All fields are output +// only. +type Operation struct { + // Detail: Detailed operation progress, if available. + Detail string `json:"detail,omitempty"` + + // Name: The server-assigned ID for the operation. + Name string `json:"name,omitempty"` + + // OperationType: The operation type. + // + // Possible values: + // "TYPE_UNSPECIFIED" + // "CREATE_CLUSTER" + // "DELETE_CLUSTER" + // "UPGRADE_MASTER" + // "UPGRADE_NODES" + // "REPAIR_CLUSTER" + // "UPDATE_CLUSTER" + // "CREATE_NODE_POOL" + // "DELETE_NODE_POOL" + OperationType string `json:"operationType,omitempty"` + + // SelfLink: Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: The current status of the operation. + // + // Possible values: + // "STATUS_UNSPECIFIED" + // "PENDING" + // "RUNNING" + // "DONE" + Status string `json:"status,omitempty"` + + // StatusMessage: If an error has occurred, a textual description of the + // error. + StatusMessage string `json:"statusMessage,omitempty"` + + // TargetLink: Server-defined URL for the target of the operation. + TargetLink string `json:"targetLink,omitempty"` + + // Zone: The name of the Google Compute Engine + // [zone](/compute/docs/zones#available) in which the operation is + // taking place. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Detail") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Operation) MarshalJSON() ([]byte, error) { + type noMethod Operation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ServerConfig: Container Engine service configuration. +type ServerConfig struct { + // DefaultClusterVersion: Version of Kubernetes the service deploys by + // default. + DefaultClusterVersion string `json:"defaultClusterVersion,omitempty"` + + // DefaultImageFamily: Default image family. + DefaultImageFamily string `json:"defaultImageFamily,omitempty"` + + // ValidImageFamilies: List of valid image families. + ValidImageFamilies []string `json:"validImageFamilies,omitempty"` + + // ValidNodeVersions: List of valid node upgrade target versions. + ValidNodeVersions []string `json:"validNodeVersions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "DefaultClusterVersion") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ServerConfig) MarshalJSON() ([]byte, error) { + type noMethod ServerConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// UpdateClusterRequest: UpdateClusterRequest updates the settings of a +// cluster. +type UpdateClusterRequest struct { + // Update: A description of the update. + Update *ClusterUpdate `json:"update,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Update") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *UpdateClusterRequest) MarshalJSON() ([]byte, error) { + type noMethod UpdateClusterRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "container.projects.zones.getServerconfig": + +type ProjectsZonesGetServerconfigCall struct { + s *Service + projectId string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GetServerconfig: Returns configuration info about the Container +// Engine service. +func (r *ProjectsZonesService) GetServerconfig(projectId string, zone string) *ProjectsZonesGetServerconfigCall { + c := &ProjectsZonesGetServerconfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesGetServerconfigCall) Fields(s ...googleapi.Field) *ProjectsZonesGetServerconfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsZonesGetServerconfigCall) IfNoneMatch(entityTag string) *ProjectsZonesGetServerconfigCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesGetServerconfigCall) Context(ctx context.Context) *ProjectsZonesGetServerconfigCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesGetServerconfigCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/serverconfig") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.getServerconfig" call. +// Exactly one of *ServerConfig or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ServerConfig.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesGetServerconfigCall) Do(opts ...googleapi.CallOption) (*ServerConfig, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ServerConfig{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns configuration info about the Container Engine service.", + // "httpMethod": "GET", + // "id": "container.projects.zones.getServerconfig", + // "parameterOrder": [ + // "projectId", + // "zone" + // ], + // "parameters": { + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) to return operations for.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/serverconfig", + // "response": { + // "$ref": "ServerConfig" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.create": + +type ProjectsZonesClustersCreateCall struct { + s *Service + projectId string + zone string + createclusterrequest *CreateClusterRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Creates a cluster, consisting of the specified number and +// type of Google Compute Engine instances. By default, the cluster is +// created in the project's [default +// network](/compute/docs/networks-and-firewalls#networks). One firewall +// is added for the cluster. After cluster creation, the cluster creates +// routes for each node to allow the containers on that node to +// communicate with all other instances in the cluster. Finally, an +// entry is added to the project's global metadata indicating which CIDR +// range is being used by the cluster. +func (r *ProjectsZonesClustersService) Create(projectId string, zone string, createclusterrequest *CreateClusterRequest) *ProjectsZonesClustersCreateCall { + c := &ProjectsZonesClustersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.createclusterrequest = createclusterrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersCreateCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersCreateCall) Context(ctx context.Context) *ProjectsZonesClustersCreateCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.createclusterrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesClustersCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. By default, the cluster is created in the project's [default network](/compute/docs/networks-and-firewalls#networks). One firewall is added for the cluster. After cluster creation, the cluster creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range is being used by the cluster.", + // "httpMethod": "POST", + // "id": "container.projects.zones.clusters.create", + // "parameterOrder": [ + // "projectId", + // "zone" + // ], + // "parameters": { + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters", + // "request": { + // "$ref": "CreateClusterRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.delete": + +type ProjectsZonesClustersDeleteCall struct { + s *Service + projectId string + zone string + clusterId string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the cluster, including the Kubernetes endpoint and +// all worker nodes. Firewalls and routes that were configured during +// cluster creation are also deleted. Other Google Compute Engine +// resources that might be in use by the cluster (e.g. load balancer +// resources) will not be deleted if they weren't present at the initial +// create time. +func (r *ProjectsZonesClustersService) Delete(projectId string, zone string, clusterId string) *ProjectsZonesClustersDeleteCall { + c := &ProjectsZonesClustersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.clusterId = clusterId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersDeleteCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersDeleteCall) Context(ctx context.Context) *ProjectsZonesClustersDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + "clusterId": c.clusterId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesClustersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster (e.g. load balancer resources) will not be deleted if they weren't present at the initial create time.", + // "httpMethod": "DELETE", + // "id": "container.projects.zones.clusters.delete", + // "parameterOrder": [ + // "projectId", + // "zone", + // "clusterId" + // ], + // "parameters": { + // "clusterId": { + // "description": "The name of the cluster to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.get": + +type ProjectsZonesClustersGetCall struct { + s *Service + projectId string + zone string + clusterId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets the details of a specific cluster. +func (r *ProjectsZonesClustersService) Get(projectId string, zone string, clusterId string) *ProjectsZonesClustersGetCall { + c := &ProjectsZonesClustersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.clusterId = clusterId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersGetCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsZonesClustersGetCall) IfNoneMatch(entityTag string) *ProjectsZonesClustersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersGetCall) Context(ctx context.Context) *ProjectsZonesClustersGetCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + "clusterId": c.clusterId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.get" call. +// Exactly one of *Cluster or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Cluster.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsZonesClustersGetCall) Do(opts ...googleapi.CallOption) (*Cluster, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Cluster{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the details of a specific cluster.", + // "httpMethod": "GET", + // "id": "container.projects.zones.clusters.get", + // "parameterOrder": [ + // "projectId", + // "zone", + // "clusterId" + // ], + // "parameters": { + // "clusterId": { + // "description": "The name of the cluster to retrieve.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + // "response": { + // "$ref": "Cluster" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.list": + +type ProjectsZonesClustersListCall struct { + s *Service + projectId string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists all clusters owned by a project in either the specified +// zone or all zones. +func (r *ProjectsZonesClustersService) List(projectId string, zone string) *ProjectsZonesClustersListCall { + c := &ProjectsZonesClustersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersListCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsZonesClustersListCall) IfNoneMatch(entityTag string) *ProjectsZonesClustersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersListCall) Context(ctx context.Context) *ProjectsZonesClustersListCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.list" call. +// Exactly one of *ListClustersResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListClustersResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsZonesClustersListCall) Do(opts ...googleapi.CallOption) (*ListClustersResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListClustersResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all clusters owned by a project in either the specified zone or all zones.", + // "httpMethod": "GET", + // "id": "container.projects.zones.clusters.list", + // "parameterOrder": [ + // "projectId", + // "zone" + // ], + // "parameters": { + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides, or \"-\" for all zones.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters", + // "response": { + // "$ref": "ListClustersResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.update": + +type ProjectsZonesClustersUpdateCall struct { + s *Service + projectId string + zone string + clusterId string + updateclusterrequest *UpdateClusterRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates the settings of a specific cluster. +func (r *ProjectsZonesClustersService) Update(projectId string, zone string, clusterId string, updateclusterrequest *UpdateClusterRequest) *ProjectsZonesClustersUpdateCall { + c := &ProjectsZonesClustersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.clusterId = clusterId + c.updateclusterrequest = updateclusterrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersUpdateCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersUpdateCall) Context(ctx context.Context) *ProjectsZonesClustersUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersUpdateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.updateclusterrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + "clusterId": c.clusterId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesClustersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the settings of a specific cluster.", + // "httpMethod": "PUT", + // "id": "container.projects.zones.clusters.update", + // "parameterOrder": [ + // "projectId", + // "zone", + // "clusterId" + // ], + // "parameters": { + // "clusterId": { + // "description": "The name of the cluster to upgrade.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + // "request": { + // "$ref": "UpdateClusterRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.nodePools.create": + +type ProjectsZonesClustersNodePoolsCreateCall struct { + s *Service + projectId string + zone string + clusterId string + createnodepoolrequest *CreateNodePoolRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Creates a node pool for a cluster. +func (r *ProjectsZonesClustersNodePoolsService) Create(projectId string, zone string, clusterId string, createnodepoolrequest *CreateNodePoolRequest) *ProjectsZonesClustersNodePoolsCreateCall { + c := &ProjectsZonesClustersNodePoolsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.clusterId = clusterId + c.createnodepoolrequest = createnodepoolrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersNodePoolsCreateCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersNodePoolsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersNodePoolsCreateCall) Context(ctx context.Context) *ProjectsZonesClustersNodePoolsCreateCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersNodePoolsCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.createnodepoolrequest) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + "clusterId": c.clusterId, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.nodePools.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesClustersNodePoolsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a node pool for a cluster.", + // "httpMethod": "POST", + // "id": "container.projects.zones.clusters.nodePools.create", + // "parameterOrder": [ + // "projectId", + // "zone", + // "clusterId" + // ], + // "parameters": { + // "clusterId": { + // "description": "The name of the cluster.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", + // "request": { + // "$ref": "CreateNodePoolRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.nodePools.delete": + +type ProjectsZonesClustersNodePoolsDeleteCall struct { + s *Service + projectId string + zone string + clusterId string + nodePoolId string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes a node pool from a cluster. +func (r *ProjectsZonesClustersNodePoolsService) Delete(projectId string, zone string, clusterId string, nodePoolId string) *ProjectsZonesClustersNodePoolsDeleteCall { + c := &ProjectsZonesClustersNodePoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.clusterId = clusterId + c.nodePoolId = nodePoolId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersNodePoolsDeleteCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersNodePoolsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersNodePoolsDeleteCall) Context(ctx context.Context) *ProjectsZonesClustersNodePoolsDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersNodePoolsDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + "clusterId": c.clusterId, + "nodePoolId": c.nodePoolId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.nodePools.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesClustersNodePoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a node pool from a cluster.", + // "httpMethod": "DELETE", + // "id": "container.projects.zones.clusters.nodePools.delete", + // "parameterOrder": [ + // "projectId", + // "zone", + // "clusterId", + // "nodePoolId" + // ], + // "parameters": { + // "clusterId": { + // "description": "The name of the cluster.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "nodePoolId": { + // "description": "The name of the node pool to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.nodePools.get": + +type ProjectsZonesClustersNodePoolsGetCall struct { + s *Service + projectId string + zone string + clusterId string + nodePoolId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Retrieves the node pool requested. +func (r *ProjectsZonesClustersNodePoolsService) Get(projectId string, zone string, clusterId string, nodePoolId string) *ProjectsZonesClustersNodePoolsGetCall { + c := &ProjectsZonesClustersNodePoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.clusterId = clusterId + c.nodePoolId = nodePoolId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersNodePoolsGetCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersNodePoolsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsZonesClustersNodePoolsGetCall) IfNoneMatch(entityTag string) *ProjectsZonesClustersNodePoolsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersNodePoolsGetCall) Context(ctx context.Context) *ProjectsZonesClustersNodePoolsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersNodePoolsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + "clusterId": c.clusterId, + "nodePoolId": c.nodePoolId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.nodePools.get" call. +// Exactly one of *NodePool or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *NodePool.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesClustersNodePoolsGetCall) Do(opts ...googleapi.CallOption) (*NodePool, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NodePool{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the node pool requested.", + // "httpMethod": "GET", + // "id": "container.projects.zones.clusters.nodePools.get", + // "parameterOrder": [ + // "projectId", + // "zone", + // "clusterId", + // "nodePoolId" + // ], + // "parameters": { + // "clusterId": { + // "description": "The name of the cluster.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "nodePoolId": { + // "description": "The name of the node pool.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", + // "response": { + // "$ref": "NodePool" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.clusters.nodePools.list": + +type ProjectsZonesClustersNodePoolsListCall struct { + s *Service + projectId string + zone string + clusterId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists the node pools for a cluster. +func (r *ProjectsZonesClustersNodePoolsService) List(projectId string, zone string, clusterId string) *ProjectsZonesClustersNodePoolsListCall { + c := &ProjectsZonesClustersNodePoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.clusterId = clusterId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesClustersNodePoolsListCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersNodePoolsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsZonesClustersNodePoolsListCall) IfNoneMatch(entityTag string) *ProjectsZonesClustersNodePoolsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesClustersNodePoolsListCall) Context(ctx context.Context) *ProjectsZonesClustersNodePoolsListCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesClustersNodePoolsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + "clusterId": c.clusterId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.clusters.nodePools.list" call. +// Exactly one of *ListNodePoolsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListNodePoolsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsZonesClustersNodePoolsListCall) Do(opts ...googleapi.CallOption) (*ListNodePoolsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListNodePoolsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the node pools for a cluster.", + // "httpMethod": "GET", + // "id": "container.projects.zones.clusters.nodePools.list", + // "parameterOrder": [ + // "projectId", + // "zone", + // "clusterId" + // ], + // "parameters": { + // "clusterId": { + // "description": "The name of the cluster.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://developers.google.com/console/help/new/#projectnumber).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", + // "response": { + // "$ref": "ListNodePoolsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.operations.get": + +type ProjectsZonesOperationsGetCall struct { + s *Service + projectId string + zone string + operationId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Gets the specified operation. +func (r *ProjectsZonesOperationsService) Get(projectId string, zone string, operationId string) *ProjectsZonesOperationsGetCall { + c := &ProjectsZonesOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + c.operationId = operationId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsZonesOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsZonesOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesOperationsGetCall) Context(ctx context.Context) *ProjectsZonesOperationsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesOperationsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/operations/{operationId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + "operationId": c.operationId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsZonesOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the specified operation.", + // "httpMethod": "GET", + // "id": "container.projects.zones.operations.get", + // "parameterOrder": [ + // "projectId", + // "zone", + // "operationId" + // ], + // "parameters": { + // "operationId": { + // "description": "The server-assigned `name` of the operation.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) in which the cluster resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.zones.operations.list": + +type ProjectsZonesOperationsListCall struct { + s *Service + projectId string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Lists all operations in a project in a specific zone or all +// zones. +func (r *ProjectsZonesOperationsService) List(projectId string, zone string) *ProjectsZonesOperationsListCall { + c := &ProjectsZonesOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.zone = zone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsZonesOperationsListCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsZonesOperationsListCall) IfNoneMatch(entityTag string) *ProjectsZonesOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsZonesOperationsListCall) Context(ctx context.Context) *ProjectsZonesOperationsListCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsZonesOperationsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}/zones/{zone}/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "zone": c.zone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "container.projects.zones.operations.list" call. +// Exactly one of *ListOperationsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListOperationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsZonesOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListOperationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all operations in a project in a specific zone or all zones.", + // "httpMethod": "GET", + // "id": "container.projects.zones.operations.list", + // "parameterOrder": [ + // "projectId", + // "zone" + // ], + // "parameters": { + // "projectId": { + // "description": "The Google Developers Console [project ID or project number](https://support.google.com/cloud/answer/6158840).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available) to return operations for, or `-` for all zones.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}/zones/{zone}/operations", + // "response": { + // "$ref": "ListOperationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} diff --git a/Godeps/_workspace/src/google.golang.org/api/dns/v1/dns-api.json b/Godeps/_workspace/src/google.golang.org/api/dns/v1/dns-api.json new file mode 100644 index 000000000000..192deeaf7fc1 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/dns/v1/dns-api.json @@ -0,0 +1,708 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"jQLIOHBVnDZie4rQHGH1WJF-INE/ctEt-71wWAltEdgLnIcGLfJZeFE\"", + "discoveryVersion": "v1", + "id": "dns:v1", + "name": "dns", + "version": "v1", + "revision": "20160413", + "title": "Google Cloud DNS API", + "description": "Configures and serves authoritative DNS records.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "documentationLink": "https://developers.google.com/cloud-dns", + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/dns/v1/projects/", + "basePath": "/dns/v1/projects/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "dns/v1/projects/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/ndev.clouddns.readonly": { + "description": "View your DNS records hosted by Google Cloud DNS" + }, + "https://www.googleapis.com/auth/ndev.clouddns.readwrite": { + "description": "View and manage your DNS records hosted by Google Cloud DNS" + } + } + } + }, + "schemas": { + "Change": { + "id": "Change", + "type": "object", + "description": "An atomic update to a collection of ResourceRecordSets.", + "properties": { + "additions": { + "type": "array", + "description": "Which ResourceRecordSets to add?", + "items": { + "$ref": "ResourceRecordSet" + } + }, + "deletions": { + "type": "array", + "description": "Which ResourceRecordSets to remove? Must match existing data exactly.", + "items": { + "$ref": "ResourceRecordSet" + } + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#change\".", + "default": "dns#change" + }, + "startTime": { + "type": "string", + "description": "The time that this operation was started by the server (output only). This is in RFC3339 text format." + }, + "status": { + "type": "string", + "description": "Status of the operation (output only).", + "enum": [ + "done", + "pending" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "ChangesListResponse": { + "id": "ChangesListResponse", + "type": "object", + "description": "The response to a request to enumerate Changes to a ResourceRecordSets collection.", + "properties": { + "changes": { + "type": "array", + "description": "The requested changes.", + "items": { + "$ref": "Change" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "dns#changesListResponse" + }, + "nextPageToken": { + "type": "string", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size." + } + } + }, + "ManagedZone": { + "id": "ManagedZone", + "type": "object", + "description": "A zone is a subtree of the DNS namespace under one administrative responsibility. A ManagedZone is a resource that represents a DNS zone hosted by the Cloud DNS service.", + "properties": { + "creationTime": { + "type": "string", + "description": "The time that this resource was created on the server. This is in RFC3339 text format. Output only." + }, + "description": { + "type": "string", + "description": "A mutable string of at most 1024 characters associated with this resource for the user's convenience. Has no effect on the managed zone's function." + }, + "dnsName": { + "type": "string", + "description": "The DNS name of this managed zone, for instance \"example.com.\"." + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZone\".", + "default": "dns#managedZone" + }, + "name": { + "type": "string", + "description": "User assigned name for this resource. Must be unique within the project. The name must be 1-32 characters long, must begin with a letter, end with a letter or digit, and only contain lowercase letters, digits or dashes." + }, + "nameServerSet": { + "type": "string", + "description": "Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet is a set of DNS name servers that all host the same ManagedZones. Most users will leave this field unset." + }, + "nameServers": { + "type": "array", + "description": "Delegate your managed_zone to these virtual name servers; defined by the server (output only)", + "items": { + "type": "string" + } + } + } + }, + "ManagedZonesListResponse": { + "id": "ManagedZonesListResponse", + "type": "object", + "properties": { + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "dns#managedZonesListResponse" + }, + "managedZones": { + "type": "array", + "description": "The managed zone resources.", + "items": { + "$ref": "ManagedZone" + } + }, + "nextPageToken": { + "type": "string", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size." + } + } + }, + "Project": { + "id": "Project", + "type": "object", + "description": "A project resource. The project is a top level container for resources including Cloud DNS ManagedZones. Projects can be created only in the APIs console.", + "properties": { + "id": { + "type": "string", + "description": "User assigned unique identifier for the resource (output only)." + }, + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#project\".", + "default": "dns#project" + }, + "number": { + "type": "string", + "description": "Unique numeric identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "quota": { + "$ref": "Quota", + "description": "Quotas assigned to this project (output only)." + } + } + }, + "Quota": { + "id": "Quota", + "type": "object", + "description": "Limits associated with a Project.", + "properties": { + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#quota\".", + "default": "dns#quota" + }, + "managedZones": { + "type": "integer", + "description": "Maximum allowed number of managed zones in the project.", + "format": "int32" + }, + "resourceRecordsPerRrset": { + "type": "integer", + "description": "Maximum allowed number of ResourceRecords per ResourceRecordSet.", + "format": "int32" + }, + "rrsetAdditionsPerChange": { + "type": "integer", + "description": "Maximum allowed number of ResourceRecordSets to add per ChangesCreateRequest.", + "format": "int32" + }, + "rrsetDeletionsPerChange": { + "type": "integer", + "description": "Maximum allowed number of ResourceRecordSets to delete per ChangesCreateRequest.", + "format": "int32" + }, + "rrsetsPerManagedZone": { + "type": "integer", + "description": "Maximum allowed number of ResourceRecordSets per zone in the project.", + "format": "int32" + }, + "totalRrdataSizePerChange": { + "type": "integer", + "description": "Maximum allowed size for total rrdata in one ChangesCreateRequest in bytes.", + "format": "int32" + } + } + }, + "ResourceRecordSet": { + "id": "ResourceRecordSet", + "type": "object", + "description": "A unit of data that will be returned by the DNS servers.", + "properties": { + "kind": { + "type": "string", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#resourceRecordSet\".", + "default": "dns#resourceRecordSet" + }, + "name": { + "type": "string", + "description": "For example, www.example.com." + }, + "rrdatas": { + "type": "array", + "description": "As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1).", + "items": { + "type": "string" + } + }, + "ttl": { + "type": "integer", + "description": "Number of seconds that this ResourceRecordSet can be cached by resolvers.", + "format": "int32" + }, + "type": { + "type": "string", + "description": "The identifier of a supported record type, for example, A, AAAA, MX, TXT, and so on." + } + } + }, + "ResourceRecordSetsListResponse": { + "id": "ResourceRecordSetsListResponse", + "type": "object", + "properties": { + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "dns#resourceRecordSetsListResponse" + }, + "nextPageToken": { + "type": "string", + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size." + }, + "rrsets": { + "type": "array", + "description": "The resource record set resources.", + "items": { + "$ref": "ResourceRecordSet" + } + } + } + } + }, + "resources": { + "changes": { + "methods": { + "create": { + "id": "dns.changes.create", + "path": "{project}/managedZones/{managedZone}/changes", + "httpMethod": "POST", + "description": "Atomically update the ResourceRecordSet collection.", + "parameters": { + "managedZone": { + "type": "string", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "managedZone" + ], + "request": { + "$ref": "Change" + }, + "response": { + "$ref": "Change" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "get": { + "id": "dns.changes.get", + "path": "{project}/managedZones/{managedZone}/changes/{changeId}", + "httpMethod": "GET", + "description": "Fetch the representation of an existing Change.", + "parameters": { + "changeId": { + "type": "string", + "description": "The identifier of the requested change, from a previous ResourceRecordSetsChangeResponse.", + "required": true, + "location": "path" + }, + "managedZone": { + "type": "string", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "managedZone", + "changeId" + ], + "response": { + "$ref": "Change" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "list": { + "id": "dns.changes.list", + "path": "{project}/managedZones/{managedZone}/changes", + "httpMethod": "GET", + "description": "Enumerate Changes to a ResourceRecordSet collection.", + "parameters": { + "managedZone": { + "type": "string", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "required": true, + "location": "path" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "format": "int32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + }, + "sortBy": { + "type": "string", + "description": "Sorting criterion. The only supported value is change sequence.", + "default": "changeSequence", + "enum": [ + "changeSequence" + ], + "enumDescriptions": [ + "" + ], + "location": "query" + }, + "sortOrder": { + "type": "string", + "description": "Sorting order direction: 'ascending' or 'descending'.", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "managedZone" + ], + "response": { + "$ref": "ChangesListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + } + } + }, + "managedZones": { + "methods": { + "create": { + "id": "dns.managedZones.create", + "path": "{project}/managedZones", + "httpMethod": "POST", + "description": "Create a new ManagedZone.", + "parameters": { + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "ManagedZone" + }, + "response": { + "$ref": "ManagedZone" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "delete": { + "id": "dns.managedZones.delete", + "path": "{project}/managedZones/{managedZone}", + "httpMethod": "DELETE", + "description": "Delete a previously created ManagedZone.", + "parameters": { + "managedZone": { + "type": "string", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "managedZone" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "get": { + "id": "dns.managedZones.get", + "path": "{project}/managedZones/{managedZone}", + "httpMethod": "GET", + "description": "Fetch the representation of an existing ManagedZone.", + "parameters": { + "managedZone": { + "type": "string", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "managedZone" + ], + "response": { + "$ref": "ManagedZone" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "list": { + "id": "dns.managedZones.list", + "path": "{project}/managedZones", + "httpMethod": "GET", + "description": "Enumerate ManagedZones that have been created but not yet deleted.", + "parameters": { + "dnsName": { + "type": "string", + "description": "Restricts the list to return only zones with this domain name.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "format": "int32", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ManagedZonesListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + } + } + }, + "projects": { + "methods": { + "get": { + "id": "dns.projects.get", + "path": "{project}", + "httpMethod": "GET", + "description": "Fetch the representation of an existing Project.", + "parameters": { + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "Project" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + } + } + }, + "resourceRecordSets": { + "methods": { + "list": { + "id": "dns.resourceRecordSets.list", + "path": "{project}/managedZones/{managedZone}/rrsets", + "httpMethod": "GET", + "description": "Enumerate ResourceRecordSets that have been created but not yet deleted.", + "parameters": { + "managedZone": { + "type": "string", + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "required": true, + "location": "path" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "format": "int32", + "location": "query" + }, + "name": { + "type": "string", + "description": "Restricts the list to return only records with this fully qualified domain name.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Identifies the project addressed by this request.", + "required": true, + "location": "path" + }, + "type": { + "type": "string", + "description": "Restricts the list to return only records of this type. If present, the \"name\" parameter must also be present.", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "managedZone" + ], + "response": { + "$ref": "ResourceRecordSetsListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + } + } + } + } +} diff --git a/Godeps/_workspace/src/google.golang.org/api/dns/v1/dns-gen.go b/Godeps/_workspace/src/google.golang.org/api/dns/v1/dns-gen.go new file mode 100644 index 000000000000..d9d98592d5c0 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/dns/v1/dns-gen.go @@ -0,0 +1,1850 @@ +// Package dns provides access to the Google Cloud DNS API. +// +// See https://developers.google.com/cloud-dns +// +// Usage example: +// +// import "google.golang.org/api/dns/v1" +// ... +// dnsService, err := dns.New(oauthHttpClient) +package dns + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "dns:v1" +const apiName = "dns" +const apiVersion = "v1" +const basePath = "https://www.googleapis.com/dns/v1/projects/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View your data across Google Cloud Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" + + // View your DNS records hosted by Google Cloud DNS + NdevClouddnsReadonlyScope = "https://www.googleapis.com/auth/ndev.clouddns.readonly" + + // View and manage your DNS records hosted by Google Cloud DNS + NdevClouddnsReadwriteScope = "https://www.googleapis.com/auth/ndev.clouddns.readwrite" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Changes = NewChangesService(s) + s.ManagedZones = NewManagedZonesService(s) + s.Projects = NewProjectsService(s) + s.ResourceRecordSets = NewResourceRecordSetsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Changes *ChangesService + + ManagedZones *ManagedZonesService + + Projects *ProjectsService + + ResourceRecordSets *ResourceRecordSetsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewChangesService(s *Service) *ChangesService { + rs := &ChangesService{s: s} + return rs +} + +type ChangesService struct { + s *Service +} + +func NewManagedZonesService(s *Service) *ManagedZonesService { + rs := &ManagedZonesService{s: s} + return rs +} + +type ManagedZonesService struct { + s *Service +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + return rs +} + +type ProjectsService struct { + s *Service +} + +func NewResourceRecordSetsService(s *Service) *ResourceRecordSetsService { + rs := &ResourceRecordSetsService{s: s} + return rs +} + +type ResourceRecordSetsService struct { + s *Service +} + +// Change: An atomic update to a collection of ResourceRecordSets. +type Change struct { + // Additions: Which ResourceRecordSets to add? + Additions []*ResourceRecordSet `json:"additions,omitempty"` + + // Deletions: Which ResourceRecordSets to remove? Must match existing + // data exactly. + Deletions []*ResourceRecordSet `json:"deletions,omitempty"` + + // Id: Unique identifier for the resource; defined by the server (output + // only). + Id string `json:"id,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#change". + Kind string `json:"kind,omitempty"` + + // StartTime: The time that this operation was started by the server + // (output only). This is in RFC3339 text format. + StartTime string `json:"startTime,omitempty"` + + // Status: Status of the operation (output only). + // + // Possible values: + // "done" + // "pending" + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Additions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Change) MarshalJSON() ([]byte, error) { + type noMethod Change + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ChangesListResponse: The response to a request to enumerate Changes +// to a ResourceRecordSets collection. +type ChangesListResponse struct { + // Changes: The requested changes. + Changes []*Change `json:"changes,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The presence of this field indicates that there exist + // more results following your last page of results in pagination order. + // To fetch them, make another list request using this value as your + // pagination token. + // + // In this way you can retrieve the complete contents of even very large + // collections one page at a time. However, if the contents of the + // collection change between the first and last paginated list request, + // the set of all elements returned will be an inconsistent view of the + // collection. There is no way to retrieve a "snapshot" of collections + // larger than the maximum page size. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Changes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ChangesListResponse) MarshalJSON() ([]byte, error) { + type noMethod ChangesListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ManagedZone: A zone is a subtree of the DNS namespace under one +// administrative responsibility. A ManagedZone is a resource that +// represents a DNS zone hosted by the Cloud DNS service. +type ManagedZone struct { + // CreationTime: The time that this resource was created on the server. + // This is in RFC3339 text format. Output only. + CreationTime string `json:"creationTime,omitempty"` + + // Description: A mutable string of at most 1024 characters associated + // with this resource for the user's convenience. Has no effect on the + // managed zone's function. + Description string `json:"description,omitempty"` + + // DnsName: The DNS name of this managed zone, for instance + // "example.com.". + DnsName string `json:"dnsName,omitempty"` + + // Id: Unique identifier for the resource; defined by the server (output + // only) + Id uint64 `json:"id,omitempty,string"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#managedZone". + Kind string `json:"kind,omitempty"` + + // Name: User assigned name for this resource. Must be unique within the + // project. The name must be 1-32 characters long, must begin with a + // letter, end with a letter or digit, and only contain lowercase + // letters, digits or dashes. + Name string `json:"name,omitempty"` + + // NameServerSet: Optionally specifies the NameServerSet for this + // ManagedZone. A NameServerSet is a set of DNS name servers that all + // host the same ManagedZones. Most users will leave this field unset. + NameServerSet string `json:"nameServerSet,omitempty"` + + // NameServers: Delegate your managed_zone to these virtual name + // servers; defined by the server (output only) + NameServers []string `json:"nameServers,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ManagedZone) MarshalJSON() ([]byte, error) { + type noMethod ManagedZone + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ManagedZonesListResponse struct { + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // ManagedZones: The managed zone resources. + ManagedZones []*ManagedZone `json:"managedZones,omitempty"` + + // NextPageToken: The presence of this field indicates that there exist + // more results following your last page of results in pagination order. + // To fetch them, make another list request using this value as your + // page token. + // + // In this way you can retrieve the complete contents of even very large + // collections one page at a time. However, if the contents of the + // collection change between the first and last paginated list request, + // the set of all elements returned will be an inconsistent view of the + // collection. There is no way to retrieve a consistent snapshot of a + // collection larger than the maximum page size. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ManagedZonesListResponse) MarshalJSON() ([]byte, error) { + type noMethod ManagedZonesListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Project: A project resource. The project is a top level container for +// resources including Cloud DNS ManagedZones. Projects can be created +// only in the APIs console. +type Project struct { + // Id: User assigned unique identifier for the resource (output only). + Id string `json:"id,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#project". + Kind string `json:"kind,omitempty"` + + // Number: Unique numeric identifier for the resource; defined by the + // server (output only). + Number uint64 `json:"number,omitempty,string"` + + // Quota: Quotas assigned to this project (output only). + Quota *Quota `json:"quota,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Project) MarshalJSON() ([]byte, error) { + type noMethod Project + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// Quota: Limits associated with a Project. +type Quota struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#quota". + Kind string `json:"kind,omitempty"` + + // ManagedZones: Maximum allowed number of managed zones in the project. + ManagedZones int64 `json:"managedZones,omitempty"` + + // ResourceRecordsPerRrset: Maximum allowed number of ResourceRecords + // per ResourceRecordSet. + ResourceRecordsPerRrset int64 `json:"resourceRecordsPerRrset,omitempty"` + + // RrsetAdditionsPerChange: Maximum allowed number of ResourceRecordSets + // to add per ChangesCreateRequest. + RrsetAdditionsPerChange int64 `json:"rrsetAdditionsPerChange,omitempty"` + + // RrsetDeletionsPerChange: Maximum allowed number of ResourceRecordSets + // to delete per ChangesCreateRequest. + RrsetDeletionsPerChange int64 `json:"rrsetDeletionsPerChange,omitempty"` + + // RrsetsPerManagedZone: Maximum allowed number of ResourceRecordSets + // per zone in the project. + RrsetsPerManagedZone int64 `json:"rrsetsPerManagedZone,omitempty"` + + // TotalRrdataSizePerChange: Maximum allowed size for total rrdata in + // one ChangesCreateRequest in bytes. + TotalRrdataSizePerChange int64 `json:"totalRrdataSizePerChange,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Quota) MarshalJSON() ([]byte, error) { + type noMethod Quota + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// ResourceRecordSet: A unit of data that will be returned by the DNS +// servers. +type ResourceRecordSet struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#resourceRecordSet". + Kind string `json:"kind,omitempty"` + + // Name: For example, www.example.com. + Name string `json:"name,omitempty"` + + // Rrdatas: As defined in RFC 1035 (section 5) and RFC 1034 (section + // 3.6.1). + Rrdatas []string `json:"rrdatas,omitempty"` + + // Ttl: Number of seconds that this ResourceRecordSet can be cached by + // resolvers. + Ttl int64 `json:"ttl,omitempty"` + + // Type: The identifier of a supported record type, for example, A, + // AAAA, MX, TXT, and so on. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ResourceRecordSet) MarshalJSON() ([]byte, error) { + type noMethod ResourceRecordSet + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type ResourceRecordSetsListResponse struct { + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The presence of this field indicates that there exist + // more results following your last page of results in pagination order. + // To fetch them, make another list request using this value as your + // pagination token. + // + // In this way you can retrieve the complete contents of even very large + // collections one page at a time. However, if the contents of the + // collection change between the first and last paginated list request, + // the set of all elements returned will be an inconsistent view of the + // collection. There is no way to retrieve a consistent snapshot of a + // collection larger than the maximum page size. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Rrsets: The resource record set resources. + Rrsets []*ResourceRecordSet `json:"rrsets,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *ResourceRecordSetsListResponse) MarshalJSON() ([]byte, error) { + type noMethod ResourceRecordSetsListResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// method id "dns.changes.create": + +type ChangesCreateCall struct { + s *Service + project string + managedZone string + change *Change + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Atomically update the ResourceRecordSet collection. +func (r *ChangesService) Create(project string, managedZone string, change *Change) *ChangesCreateCall { + c := &ChangesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.change = change + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesCreateCall) Fields(s ...googleapi.Field) *ChangesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesCreateCall) Context(ctx context.Context) *ChangesCreateCall { + c.ctx_ = ctx + return c +} + +func (c *ChangesCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.change) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.changes.create" call. +// Exactly one of *Change or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Change.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Change{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Atomically update the ResourceRecordSet collection.", + // "httpMethod": "POST", + // "id": "dns.changes.create", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}/changes", + // "request": { + // "$ref": "Change" + // }, + // "response": { + // "$ref": "Change" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.changes.get": + +type ChangesGetCall struct { + s *Service + project string + managedZone string + changeId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Fetch the representation of an existing Change. +func (r *ChangesService) Get(project string, managedZone string, changeId string) *ChangesGetCall { + c := &ChangesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.changeId = changeId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesGetCall) Fields(s ...googleapi.Field) *ChangesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ChangesGetCall) IfNoneMatch(entityTag string) *ChangesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesGetCall) Context(ctx context.Context) *ChangesGetCall { + c.ctx_ = ctx + return c +} + +func (c *ChangesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes/{changeId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + "changeId": c.changeId, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.changes.get" call. +// Exactly one of *Change or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Change.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Change{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Fetch the representation of an existing Change.", + // "httpMethod": "GET", + // "id": "dns.changes.get", + // "parameterOrder": [ + // "project", + // "managedZone", + // "changeId" + // ], + // "parameters": { + // "changeId": { + // "description": "The identifier of the requested change, from a previous ResourceRecordSetsChangeResponse.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}/changes/{changeId}", + // "response": { + // "$ref": "Change" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.changes.list": + +type ChangesListCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Enumerate Changes to a ResourceRecordSet collection. +func (r *ChangesService) List(project string, managedZone string) *ChangesListCall { + c := &ChangesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server will decide how +// many results to return. +func (c *ChangesListCall) MaxResults(maxResults int64) *ChangesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *ChangesListCall) PageToken(pageToken string) *ChangesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// SortBy sets the optional parameter "sortBy": Sorting criterion. The +// only supported value is change sequence. +// +// Possible values: +// "changeSequence" (default) +func (c *ChangesListCall) SortBy(sortBy string) *ChangesListCall { + c.urlParams_.Set("sortBy", sortBy) + return c +} + +// SortOrder sets the optional parameter "sortOrder": Sorting order +// direction: 'ascending' or 'descending'. +func (c *ChangesListCall) SortOrder(sortOrder string) *ChangesListCall { + c.urlParams_.Set("sortOrder", sortOrder) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChangesListCall) Fields(s ...googleapi.Field) *ChangesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ChangesListCall) IfNoneMatch(entityTag string) *ChangesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChangesListCall) Context(ctx context.Context) *ChangesListCall { + c.ctx_ = ctx + return c +} + +func (c *ChangesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/changes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.changes.list" call. +// Exactly one of *ChangesListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ChangesListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ChangesListCall) Do(opts ...googleapi.CallOption) (*ChangesListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ChangesListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enumerate Changes to a ResourceRecordSet collection.", + // "httpMethod": "GET", + // "id": "dns.changes.list", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sortBy": { + // "default": "changeSequence", + // "description": "Sorting criterion. The only supported value is change sequence.", + // "enum": [ + // "changeSequence" + // ], + // "enumDescriptions": [ + // "" + // ], + // "location": "query", + // "type": "string" + // }, + // "sortOrder": { + // "description": "Sorting order direction: 'ascending' or 'descending'.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}/changes", + // "response": { + // "$ref": "ChangesListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ChangesListCall) Pages(ctx context.Context, f func(*ChangesListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dns.managedZones.create": + +type ManagedZonesCreateCall struct { + s *Service + project string + managedzone *ManagedZone + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Create: Create a new ManagedZone. +func (r *ManagedZonesService) Create(project string, managedzone *ManagedZone) *ManagedZonesCreateCall { + c := &ManagedZonesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedzone = managedzone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesCreateCall) Fields(s ...googleapi.Field) *ManagedZonesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesCreateCall) Context(ctx context.Context) *ManagedZonesCreateCall { + c.ctx_ = ctx + return c +} + +func (c *ManagedZonesCreateCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) + if err != nil { + return nil, err + } + ctype := "application/json" + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("Content-Type", ctype) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.managedZones.create" call. +// Exactly one of *ManagedZone or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedZone.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ManagedZone{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Create a new ManagedZone.", + // "httpMethod": "POST", + // "id": "dns.managedZones.create", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones", + // "request": { + // "$ref": "ManagedZone" + // }, + // "response": { + // "$ref": "ManagedZone" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZones.delete": + +type ManagedZonesDeleteCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Delete a previously created ManagedZone. +func (r *ManagedZonesService) Delete(project string, managedZone string) *ManagedZonesDeleteCall { + c := &ManagedZonesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesDeleteCall) Fields(s ...googleapi.Field) *ManagedZonesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesDeleteCall) Context(ctx context.Context) *ManagedZonesDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.managedZones.delete" call. +func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Delete a previously created ManagedZone.", + // "httpMethod": "DELETE", + // "id": "dns.managedZones.delete", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZones.get": + +type ManagedZonesGetCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Fetch the representation of an existing ManagedZone. +func (r *ManagedZonesService) Get(project string, managedZone string) *ManagedZonesGetCall { + c := &ManagedZonesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesGetCall) Fields(s ...googleapi.Field) *ManagedZonesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedZonesGetCall) IfNoneMatch(entityTag string) *ManagedZonesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesGetCall) Context(ctx context.Context) *ManagedZonesGetCall { + c.ctx_ = ctx + return c +} + +func (c *ManagedZonesGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.managedZones.get" call. +// Exactly one of *ManagedZone or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedZone.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ManagedZone{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Fetch the representation of an existing ManagedZone.", + // "httpMethod": "GET", + // "id": "dns.managedZones.get", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}", + // "response": { + // "$ref": "ManagedZone" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZones.list": + +type ManagedZonesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Enumerate ManagedZones that have been created but not yet +// deleted. +func (r *ManagedZonesService) List(project string) *ManagedZonesListCall { + c := &ManagedZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// DnsName sets the optional parameter "dnsName": Restricts the list to +// return only zones with this domain name. +func (c *ManagedZonesListCall) DnsName(dnsName string) *ManagedZonesListCall { + c.urlParams_.Set("dnsName", dnsName) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server will decide how +// many results to return. +func (c *ManagedZonesListCall) MaxResults(maxResults int64) *ManagedZonesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *ManagedZonesListCall) PageToken(pageToken string) *ManagedZonesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesListCall) Fields(s ...googleapi.Field) *ManagedZonesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedZonesListCall) IfNoneMatch(entityTag string) *ManagedZonesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesListCall) Context(ctx context.Context) *ManagedZonesListCall { + c.ctx_ = ctx + return c +} + +func (c *ManagedZonesListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.managedZones.list" call. +// Exactly one of *ManagedZonesListResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ManagedZonesListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (*ManagedZonesListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ManagedZonesListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enumerate ManagedZones that have been created but not yet deleted.", + // "httpMethod": "GET", + // "id": "dns.managedZones.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "dnsName": { + // "description": "Restricts the list to return only zones with this domain name.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones", + // "response": { + // "$ref": "ManagedZonesListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ManagedZonesListCall) Pages(ctx context.Context, f func(*ManagedZonesListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dns.projects.get": + +type ProjectsGetCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Fetch the representation of an existing Project. +func (r *ProjectsService) Get(project string) *ProjectsGetCall { + c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { + c.ctx_ = ctx + return c +} + +func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.projects.get" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Project{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Fetch the representation of an existing Project.", + // "httpMethod": "GET", + // "id": "dns.projects.get", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}", + // "response": { + // "$ref": "Project" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.resourceRecordSets.list": + +type ResourceRecordSetsListCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Enumerate ResourceRecordSets that have been created but not yet +// deleted. +func (r *ResourceRecordSetsService) List(project string, managedZone string) *ResourceRecordSetsListCall { + c := &ResourceRecordSetsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server will decide how +// many results to return. +func (c *ResourceRecordSetsListCall) MaxResults(maxResults int64) *ResourceRecordSetsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// Name sets the optional parameter "name": Restricts the list to return +// only records with this fully qualified domain name. +func (c *ResourceRecordSetsListCall) Name(name string) *ResourceRecordSetsListCall { + c.urlParams_.Set("name", name) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *ResourceRecordSetsListCall) PageToken(pageToken string) *ResourceRecordSetsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Type sets the optional parameter "type": Restricts the list to return +// only records of this type. If present, the "name" parameter must also +// be present. +func (c *ResourceRecordSetsListCall) Type(type_ string) *ResourceRecordSetsListCall { + c.urlParams_.Set("type", type_) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ResourceRecordSetsListCall) Fields(s ...googleapi.Field) *ResourceRecordSetsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ResourceRecordSetsListCall) IfNoneMatch(entityTag string) *ResourceRecordSetsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ResourceRecordSetsListCall) Context(ctx context.Context) *ResourceRecordSetsListCall { + c.ctx_ = ctx + return c +} + +func (c *ResourceRecordSetsListCall) doRequest(alt string) (*http.Response, error) { + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/rrsets") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + req.Header.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + req.Header.Set("If-None-Match", c.ifNoneMatch_) + } + if c.ctx_ != nil { + return ctxhttp.Do(c.ctx_, c.s.client, req) + } + return c.s.client.Do(req) +} + +// Do executes the "dns.resourceRecordSets.list" call. +// Exactly one of *ResourceRecordSetsListResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ResourceRecordSetsListResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (*ResourceRecordSetsListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ResourceRecordSetsListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enumerate ResourceRecordSets that have been created but not yet deleted.", + // "httpMethod": "GET", + // "id": "dns.resourceRecordSets.list", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "name": { + // "description": "Restricts the list to return only records with this fully qualified domain name.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "type": { + // "description": "Restricts the list to return only records of this type. If present, the \"name\" parameter must also be present.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}/rrsets", + // "response": { + // "$ref": "ResourceRecordSetsListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ResourceRecordSetsListCall) Pages(ctx context.Context, f func(*ResourceRecordSetsListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/api/gensupport/backoff.go b/Godeps/_workspace/src/google.golang.org/api/gensupport/backoff.go new file mode 100644 index 000000000000..1356140472a6 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/gensupport/backoff.go @@ -0,0 +1,46 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "math/rand" + "time" +) + +type BackoffStrategy interface { + // Pause returns the duration of the next pause and true if the operation should be + // retried, or false if no further retries should be attempted. + Pause() (time.Duration, bool) + + // Reset restores the strategy to its initial state. + Reset() +} + +// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff. +// The initial pause time is given by Base. +// Once the total pause time exceeds Max, Pause will indicate no further retries. +type ExponentialBackoff struct { + Base time.Duration + Max time.Duration + total time.Duration + n uint +} + +func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { + if eb.total > eb.Max { + return 0, false + } + + // The next pause is selected from randomly from [0, 2^n * Base). + d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base))) + eb.total += d + eb.n++ + return d, true +} + +func (eb *ExponentialBackoff) Reset() { + eb.n = 0 + eb.total = 0 +} diff --git a/Godeps/_workspace/src/google.golang.org/api/gensupport/buffer.go b/Godeps/_workspace/src/google.golang.org/api/gensupport/buffer.go new file mode 100644 index 000000000000..992104911538 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/gensupport/buffer.go @@ -0,0 +1,77 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "bytes" + "io" + + "google.golang.org/api/googleapi" +) + +// MediaBuffer buffers data from an io.Reader to support uploading media in retryable chunks. +type MediaBuffer struct { + media io.Reader + + chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. + err error // Any error generated when populating chunk by reading media. + + // The absolute position of chunk in the underlying media. + off int64 +} + +func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer { + return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)} +} + +// Chunk returns the current buffered chunk, the offset in the underlying media +// from which the chunk is drawn, and the size of the chunk. +// Successive calls to Chunk return the same chunk between calls to Next. +func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { + // There may already be data in chunk if Next has not been called since the previous call to Chunk. + if mb.err == nil && len(mb.chunk) == 0 { + mb.err = mb.loadChunk() + } + return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err +} + +// loadChunk will read from media into chunk, up to the capacity of chunk. +func (mb *MediaBuffer) loadChunk() error { + bufSize := cap(mb.chunk) + mb.chunk = mb.chunk[:bufSize] + + read := 0 + var err error + for err == nil && read < bufSize { + var n int + n, err = mb.media.Read(mb.chunk[read:]) + read += n + } + mb.chunk = mb.chunk[:read] + return err +} + +// Next advances to the next chunk, which will be returned by the next call to Chunk. +// Calls to Next without a corresponding prior call to Chunk will have no effect. +func (mb *MediaBuffer) Next() { + mb.off += int64(len(mb.chunk)) + mb.chunk = mb.chunk[0:0] +} + +type readerTyper struct { + io.Reader + googleapi.ContentTyper +} + +// ReaderAtToReader adapts a ReaderAt to be used as a Reader. +// If ra implements googleapi.ContentTyper, then the returned reader +// will also implement googleapi.ContentTyper, delegating to ra. +func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader { + r := io.NewSectionReader(ra, 0, size) + if typer, ok := ra.(googleapi.ContentTyper); ok { + return readerTyper{r, typer} + } + return r +} diff --git a/Godeps/_workspace/src/google.golang.org/api/gensupport/doc.go b/Godeps/_workspace/src/google.golang.org/api/gensupport/doc.go new file mode 100644 index 000000000000..752c4b411b24 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/gensupport/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gensupport is an internal implementation detail used by code +// generated by the google-api-go-generator tool. +// +// This package may be modified at any time without regard for backwards +// compatibility. It should not be used directly by API users. +package gensupport diff --git a/Godeps/_workspace/src/google.golang.org/api/gensupport/json.go b/Godeps/_workspace/src/google.golang.org/api/gensupport/json.go new file mode 100644 index 000000000000..dd7bcd2eb079 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/gensupport/json.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +// MarshalJSON returns a JSON encoding of schema containing only selected fields. +// A field is selected if: +// * it has a non-empty value, or +// * its field name is present in forceSendFields, and +// * it is not a nil pointer or nil interface. +// The JSON key for each selected field is taken from the field's json: struct tag. +func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) { + if len(forceSendFields) == 0 { + return json.Marshal(schema) + } + + mustInclude := make(map[string]struct{}) + for _, f := range forceSendFields { + mustInclude[f] = struct{}{} + } + + dataMap, err := schemaToMap(schema, mustInclude) + if err != nil { + return nil, err + } + return json.Marshal(dataMap) +} + +func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) { + m := make(map[string]interface{}) + s := reflect.ValueOf(schema) + st := s.Type() + + for i := 0; i < s.NumField(); i++ { + jsonTag := st.Field(i).Tag.Get("json") + if jsonTag == "" { + continue + } + tag, err := parseJSONTag(jsonTag) + if err != nil { + return nil, err + } + if tag.ignore { + continue + } + + v := s.Field(i) + f := st.Field(i) + if !includeField(v, f, mustInclude) { + continue + } + + // nil maps are treated as empty maps. + if f.Type.Kind() == reflect.Map && v.IsNil() { + m[tag.apiName] = map[string]string{} + continue + } + + // nil slices are treated as empty slices. + if f.Type.Kind() == reflect.Slice && v.IsNil() { + m[tag.apiName] = []bool{} + continue + } + + if tag.stringFormat { + m[tag.apiName] = formatAsString(v, f.Type.Kind()) + } else { + m[tag.apiName] = v.Interface() + } + } + return m, nil +} + +// formatAsString returns a string representation of v, dereferencing it first if possible. +func formatAsString(v reflect.Value, kind reflect.Kind) string { + if kind == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + + return fmt.Sprintf("%v", v.Interface()) +} + +// jsonTag represents a restricted version of the struct tag format used by encoding/json. +// It is used to describe the JSON encoding of fields in a Schema struct. +type jsonTag struct { + apiName string + stringFormat bool + ignore bool +} + +// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. +// The format of the tag must match that generated by the Schema.writeSchemaStruct method +// in the api generator. +func parseJSONTag(val string) (jsonTag, error) { + if val == "-" { + return jsonTag{ignore: true}, nil + } + + var tag jsonTag + + i := strings.Index(val, ",") + if i == -1 || val[:i] == "" { + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + tag = jsonTag{ + apiName: val[:i], + } + + switch val[i+1:] { + case "omitempty": + case "omitempty,string": + tag.stringFormat = true + default: + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + return tag, nil +} + +// Reports whether the struct field "f" with value "v" should be included in JSON output. +func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool { + // The regular JSON encoding of a nil pointer is "null", which means "delete this field". + // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. + // However, many fields are not pointers, so there would be no way to delete these fields. + // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. + // Deletion will be handled by a separate mechanism. + if f.Type.Kind() == reflect.Ptr && v.IsNil() { + return false + } + + // The "any" type is represented as an interface{}. If this interface + // is nil, there is no reasonable representation to send. We ignore + // these fields, for the same reasons as given above for pointers. + if f.Type.Kind() == reflect.Interface && v.IsNil() { + return false + } + + _, ok := mustInclude[f.Name] + return ok || !isEmptyValue(v) +} + +// isEmptyValue reports whether v is the empty value for its type. This +// implementation is based on that of the encoding/json package, but its +// correctness does not depend on it being identical. What's important is that +// this function return false in situations where v should not be sent as part +// of a PATCH operation. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/Godeps/_workspace/src/google.golang.org/api/gensupport/media.go b/Godeps/_workspace/src/google.golang.org/api/gensupport/media.go new file mode 100644 index 000000000000..c6410e89a97a --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/gensupport/media.go @@ -0,0 +1,199 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/textproto" + + "google.golang.org/api/googleapi" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// If the content type is already known, it can be specified via ctype. +// Otherwise, the content of media will be sniffed to determine the content type. +// If media implements googleapi.ContentTyper (deprecated), this will be used +// instead of sniffing the content. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { + // Note: callers could avoid calling DetectContentType if ctype != "", + // but doing the check inside this function reduces the amount of + // generated code. + if ctype != "" { + return media, ctype + } + + // For backwards compatability, allow clients to set content + // type by providing a ContentTyper for media. + if typer, ok := media.(googleapi.ContentTyper); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} + +type typeReader struct { + io.Reader + typ string +} + +// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body. +// Close must be called if reads from the multipartReader are abandoned before reaching EOF. +type multipartReader struct { + pr *io.PipeReader + pipeOpen bool + ctype string +} + +func newMultipartReader(parts []typeReader) *multipartReader { + mp := &multipartReader{pipeOpen: true} + var pw *io.PipeWriter + mp.pr, pw = io.Pipe() + mpw := multipart.NewWriter(pw) + mp.ctype = "multipart/related; boundary=" + mpw.Boundary() + go func() { + for _, part := range parts { + w, err := mpw.CreatePart(typeHeader(part.typ)) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) + return + } + _, err = io.Copy(w, part.Reader) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) + return + } + } + + mpw.Close() + pw.Close() + }() + return mp +} + +func (mp *multipartReader) Read(data []byte) (n int, err error) { + return mp.pr.Read(data) +} + +func (mp *multipartReader) Close() error { + if !mp.pipeOpen { + return nil + } + mp.pipeOpen = false + return mp.pr.Close() +} + +// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body. +// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary. +// +// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. +func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { + mp := newMultipartReader([]typeReader{ + {body, bodyContentType}, + {media, mediaContentType}, + }) + return mp, mp.ctype +} + +func typeHeader(contentType string) textproto.MIMEHeader { + h := make(textproto.MIMEHeader) + if contentType != "" { + h.Set("Content-Type", contentType) + } + return h +} + +// PrepareUpload determines whether the data in the supplied reader should be +// uploaded in a single request, or in sequential chunks. +// chunkSize is the size of the chunk that media should be split into. +// If chunkSize is non-zero and the contents of media do not fit in a single +// chunk (or there is an error reading media), then media will be returned as a +// MediaBuffer. Otherwise, media will be returned as a Reader. +// +// After PrepareUpload has been called, media should no longer be used: the +// media content should be accessed via one of the return values. +func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, *MediaBuffer) { + if chunkSize == 0 { // do not chunk + return media, nil + } + + mb := NewMediaBuffer(media, chunkSize) + rdr, _, _, err := mb.Chunk() + + if err == io.EOF { // we can upload this in a single request + return rdr, nil + } + // err might be a non-EOF error. If it is, the next call to mb.Chunk will + // return the same error. Returning a MediaBuffer ensures that this error + // will be handled at some point. + + return nil, mb +} diff --git a/Godeps/_workspace/src/google.golang.org/api/gensupport/params.go b/Godeps/_workspace/src/google.golang.org/api/gensupport/params.go new file mode 100644 index 000000000000..3b3c743967ea --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/gensupport/params.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "net/url" + + "google.golang.org/api/googleapi" +) + +// URLParams is a simplified replacement for url.Values +// that safely builds up URL parameters for encoding. +type URLParams map[string][]string + +// Get returns the first value for the given key, or "". +func (u URLParams) Get(key string) string { + vs := u[key] + if len(vs) == 0 { + return "" + } + return vs[0] +} + +// Set sets the key to value. +// It replaces any existing values. +func (u URLParams) Set(key, value string) { + u[key] = []string{value} +} + +// SetMulti sets the key to an array of values. +// It replaces any existing values. +// Note that values must not be modified after calling SetMulti +// so the caller is responsible for making a copy if necessary. +func (u URLParams) SetMulti(key string, values []string) { + u[key] = values +} + +// Encode encodes the values into ``URL encoded'' form +// ("bar=baz&foo=quux") sorted by key. +func (u URLParams) Encode() string { + return url.Values(u).Encode() +} + +func SetOptions(u URLParams, opts ...googleapi.CallOption) { + for _, o := range opts { + u.Set(o.Get()) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/api/gensupport/resumable.go b/Godeps/_workspace/src/google.golang.org/api/gensupport/resumable.go new file mode 100644 index 000000000000..ad169439eeff --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/gensupport/resumable.go @@ -0,0 +1,198 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "net/http" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // statusResumeIncomplete is the code returned by the Google uploader + // when the transfer is not yet complete. + statusResumeIncomplete = 308 + + // statusTooManyRequests is returned by the storage API if the + // per-project limits have been temporarily exceeded. The request + // should be retried. + // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes + statusTooManyRequests = 429 +) + +// ResumableUpload is used by the generated APIs to provide resumable uploads. +// It is not used by developers directly. +type ResumableUpload struct { + Client *http.Client + // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". + URI string + UserAgent string // User-Agent for header of the request + // Media is the object being uploaded. + Media *MediaBuffer + // MediaType defines the media type, e.g. "image/jpeg". + MediaType string + + mu sync.Mutex // guards progress + progress int64 // number of bytes uploaded so far + + // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. + Callback func(int64) + + // If not specified, a default exponential backoff strategy will be used. + Backoff BackoffStrategy +} + +// Progress returns the number of bytes uploaded at this point. +func (rx *ResumableUpload) Progress() int64 { + rx.mu.Lock() + defer rx.mu.Unlock() + return rx.progress +} + +// doUploadRequest performs a single HTTP request to upload data. +// off specifies the offset in rx.Media from which data is drawn. +// size is the number of bytes in data. +// final specifies whether data is the final chunk to be uploaded. +func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) { + req, err := http.NewRequest("POST", rx.URI, data) + if err != nil { + return nil, err + } + + req.ContentLength = size + var contentRange string + if final { + if size == 0 { + contentRange = fmt.Sprintf("bytes */%v", off) + } else { + contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size) + } + } else { + contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1) + } + req.Header.Set("Content-Range", contentRange) + req.Header.Set("Content-Type", rx.MediaType) + req.Header.Set("User-Agent", rx.UserAgent) + return ctxhttp.Do(ctx, rx.Client, req) + +} + +// reportProgress calls a user-supplied callback to report upload progress. +// If old==updated, the callback is not called. +func (rx *ResumableUpload) reportProgress(old, updated int64) { + if updated-old == 0 { + return + } + rx.mu.Lock() + rx.progress = updated + rx.mu.Unlock() + if rx.Callback != nil { + rx.Callback(updated) + } +} + +// transferChunk performs a single HTTP request to upload a single chunk from rx.Media. +func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) { + chunk, off, size, err := rx.Media.Chunk() + + done := err == io.EOF + if !done && err != nil { + return nil, err + } + + res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done) + if err != nil { + return res, err + } + + if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK { + rx.reportProgress(off, off+int64(size)) + } + + if res.StatusCode == statusResumeIncomplete { + rx.Media.Next() + } + return res, nil +} + +func contextDone(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// Upload starts the process of a resumable upload with a cancellable context. +// It retries using the provided back off strategy until cancelled or the +// strategy indicates to stop retrying. +// It is called from the auto-generated API code and is not visible to the user. +// rx is private to the auto-generated API code. +// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. +func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { + var pause time.Duration + backoff := rx.Backoff + if backoff == nil { + backoff = DefaultBackoffStrategy() + } + + for { + // Ensure that we return in the case of cancelled context, even if pause is 0. + if contextDone(ctx) { + return nil, ctx.Err() + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(pause): + } + + resp, err = rx.transferChunk(ctx) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we should retry the request. + if shouldRetry(status, err) { + var retry bool + pause, retry = backoff.Pause() + if retry { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + continue + } + } + + // If the chunk was uploaded successfully, but there's still + // more to go, upload the next chunk without any delay. + if status == statusResumeIncomplete { + pause = 0 + backoff.Reset() + resp.Body.Close() + continue + } + + // It's possible for err and resp to both be non-nil here, but we expose a simpler + // contract to our callers: exactly one of resp and err will be non-nil. This means + // that any response body must be closed here before returning a non-nil error. + if err != nil { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return nil, err + } + + return resp, nil + } +} diff --git a/Godeps/_workspace/src/google.golang.org/api/gensupport/retry.go b/Godeps/_workspace/src/google.golang.org/api/gensupport/retry.go new file mode 100644 index 000000000000..7f83d1da99fa --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/gensupport/retry.go @@ -0,0 +1,77 @@ +package gensupport + +import ( + "io" + "net" + "net/http" + "time" + + "golang.org/x/net/context" +) + +// Retry invokes the given function, retrying it multiple times if the connection failed or +// the HTTP status response indicates the request should be attempted again. ctx may be nil. +func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) { + for { + resp, err := f() + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Return if we shouldn't retry. + pause, retry := backoff.Pause() + if !shouldRetry(status, err) || !retry { + return resp, err + } + + // Ensure the response body is closed, if any. + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + + // Pause, but still listen to ctx.Done if context is not nil. + var done <-chan struct{} + if ctx != nil { + done = ctx.Done() + } + select { + case <-done: + return nil, ctx.Err() + case <-time.After(pause): + } + } +} + +// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests. +func DefaultBackoffStrategy() BackoffStrategy { + return &ExponentialBackoff{ + Base: 250 * time.Millisecond, + Max: 16 * time.Second, + } +} + +// shouldRetry returns true if the HTTP response / error indicates that the +// request should be attempted again. +func shouldRetry(status int, err error) bool { + // Retry for 5xx response codes. + if 500 <= status && status < 600 { + return true + } + + // Retry on statusTooManyRequests{ + if status == statusTooManyRequests { + return true + } + + // Retry on unexpected EOFs and temporary network errors. + if err == io.ErrUnexpectedEOF { + return true + } + if err, ok := err.(net.Error); ok { + return err.Temporary() + } + + return false +} diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go new file mode 100644 index 000000000000..858537e00b07 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/googleapi.go @@ -0,0 +1,432 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package googleapi contains the common code shared by all Google API +// libraries. +package googleapi + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "google.golang.org/api/googleapi/internal/uritemplates" +) + +// ContentTyper is an interface for Readers which know (or would like +// to override) their Content-Type. If a media body doesn't implement +// ContentTyper, the type is sniffed from the content using +// http.DetectContentType. +type ContentTyper interface { + ContentType() string +} + +// A SizeReaderAt is a ReaderAt with a Size method. +// An io.SectionReader implements SizeReaderAt. +type SizeReaderAt interface { + io.ReaderAt + Size() int64 +} + +// ServerResponse is embedded in each Do response and +// provides the HTTP status code and header sent by the server. +type ServerResponse struct { + // HTTPStatusCode is the server's response status code. + // When using a resource method's Do call, this will always be in the 2xx range. + HTTPStatusCode int + // Header contains the response header fields from the server. + Header http.Header +} + +const ( + Version = "0.5" + + // UserAgent is the header string used to identify this package. + UserAgent = "google-api-go-client/" + Version + + // The default chunk size to use for resumable uplods if not specified by the user. + DefaultUploadChunkSize = 8 * 1024 * 1024 + + // The minimum chunk size that can be used for resumable uploads. All + // user-specified chunk sizes must be multiple of this value. + MinUploadChunkSize = 256 * 1024 +) + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code and will always be populated. + Code int `json:"code"` + // Message is the server response message and is only populated when + // explicitly referenced by the JSON server response. + Message string `json:"message"` + // Body is the raw response returned by the server. + // It is often but not always JSON, depending on how the request fails. + Body string + // Header contains the response header fields from the server. + Header http.Header + + Errors []ErrorItem +} + +// ErrorItem is a detailed error code & message from the Google API frontend. +type ErrorItem struct { + // Reason is the typed error code. For example: "some_example". + Reason string `json:"reason"` + // Message is the human-readable description of the error. + Message string `json:"message"` +} + +func (e *Error) Error() string { + if len(e.Errors) == 0 && e.Message == "" { + return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) + if e.Message != "" { + fmt.Fprintf(&buf, "%s", e.Message) + } + if len(e.Errors) == 0 { + return strings.TrimSpace(buf.String()) + } + if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { + fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) + return buf.String() + } + fmt.Fprintln(&buf, "\nMore details:") + for _, v := range e.Errors { + fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) + } + return buf.String() +} + +type errorReply struct { + Error *Error `json:"error"` +} + +// CheckResponse returns an error (of type *Error) if the response +// status code is not 2xx. +func CheckResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, err := ioutil.ReadAll(res.Body) + if err == nil { + jerr := new(errorReply) + err = json.Unmarshal(slurp, jerr) + if err == nil && jerr.Error != nil { + if jerr.Error.Code == 0 { + jerr.Error.Code = res.StatusCode + } + jerr.Error.Body = string(slurp) + return jerr.Error + } + } + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, + } +} + +// IsNotModified reports whether err is the result of the +// server replying with http.StatusNotModified. +// Such error values are sometimes returned by "Do" methods +// on calls when If-None-Match is used. +func IsNotModified(err error) bool { + if err == nil { + return false + } + ae, ok := err.(*Error) + return ok && ae.Code == http.StatusNotModified +} + +// CheckMediaResponse returns an error (of type *Error) if the response +// status code is not 2xx. Unlike CheckResponse it does not assume the +// body is a JSON error document. +func CheckMediaResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + res.Body.Close() + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + } +} + +type MarshalStyle bool + +var WithDataWrapper = MarshalStyle(true) +var WithoutDataWrapper = MarshalStyle(false) + +func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { + buf := new(bytes.Buffer) + if wrap { + buf.Write([]byte(`{"data": `)) + } + err := json.NewEncoder(buf).Encode(v) + if err != nil { + return nil, err + } + if wrap { + buf.Write([]byte(`}`)) + } + return buf, nil +} + +// endingWithErrorReader from r until it returns an error. If the +// final error from r is io.EOF and e is non-nil, e is used instead. +type endingWithErrorReader struct { + r io.Reader + e error +} + +func (er endingWithErrorReader) Read(p []byte) (n int, err error) { + n, err = er.r.Read(p) + if err == io.EOF && er.e != nil { + err = er.e + } + return +} + +// countingWriter counts the number of bytes it receives to write, but +// discards them. +type countingWriter struct { + n *int64 +} + +func (w countingWriter) Write(p []byte) (int, error) { + *w.n += int64(len(p)) + return len(p), nil +} + +// ProgressUpdater is a function that is called upon every progress update of a resumable upload. +// This is the only part of a resumable upload (from googleapi) that is usable by the developer. +// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. +type ProgressUpdater func(current, total int64) + +type MediaOption interface { + setOptions(o *MediaOptions) +} + +type contentTypeOption string + +func (ct contentTypeOption) setOptions(o *MediaOptions) { + o.ContentType = string(ct) + if o.ContentType == "" { + o.ForceEmptyContentType = true + } +} + +// ContentType returns a MediaOption which sets the Content-Type header for media uploads. +// If ctype is empty, the Content-Type header will be omitted. +func ContentType(ctype string) MediaOption { + return contentTypeOption(ctype) +} + +type chunkSizeOption int + +func (cs chunkSizeOption) setOptions(o *MediaOptions) { + size := int(cs) + if size%MinUploadChunkSize != 0 { + size += MinUploadChunkSize - (size % MinUploadChunkSize) + } + o.ChunkSize = size +} + +// ChunkSize returns a MediaOption which sets the chunk size for media uploads. +// size will be rounded up to the nearest multiple of 256K. +// Media which contains fewer than size bytes will be uploaded in a single request. +// Media which contains size bytes or more will be uploaded in separate chunks. +// If size is zero, media will be uploaded in a single request. +func ChunkSize(size int) MediaOption { + return chunkSizeOption(size) +} + +// MediaOptions stores options for customizing media upload. It is not used by developers directly. +type MediaOptions struct { + ContentType string + ForceEmptyContentType bool + + ChunkSize int +} + +// ProcessMediaOptions stores options from opts in a MediaOptions. +// It is not used by developers directly. +func ProcessMediaOptions(opts []MediaOption) *MediaOptions { + mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} + for _, o := range opts { + o.setOptions(mo) + } + return mo +} + +func ResolveRelative(basestr, relstr string) string { + u, _ := url.Parse(basestr) + rel, _ := url.Parse(relstr) + u = u.ResolveReference(rel) + us := u.String() + us = strings.Replace(us, "%7B", "{", -1) + us = strings.Replace(us, "%7D", "}", -1) + return us +} + +// has4860Fix is whether this Go environment contains the fix for +// http://golang.org/issue/4860 +var has4860Fix bool + +// init initializes has4860Fix by checking the behavior of the net/http package. +func init() { + r := http.Request{ + URL: &url.URL{ + Scheme: "http", + Opaque: "//opaque", + }, + } + b := &bytes.Buffer{} + r.Write(b) + has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http")) +} + +// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it +// don't alter any hex-escaped characters in u.Path. +func SetOpaque(u *url.URL) { + u.Opaque = "//" + u.Host + u.Path + if !has4860Fix { + u.Opaque = u.Scheme + ":" + u.Opaque + } +} + +// Expand subsitutes any {encoded} strings in the URL passed in using +// the map supplied. +// +// This calls SetOpaque to avoid encoding of the parameters in the URL path. +func Expand(u *url.URL, expansions map[string]string) { + expanded, err := uritemplates.Expand(u.Path, expansions) + if err == nil { + u.Path = expanded + SetOpaque(u) + } +} + +// CloseBody is used to close res.Body. +// Prior to calling Close, it also tries to Read a small amount to see an EOF. +// Not seeing an EOF can prevent HTTP Transports from reusing connections. +func CloseBody(res *http.Response) { + if res == nil || res.Body == nil { + return + } + // Justification for 3 byte reads: two for up to "\r\n" after + // a JSON/XML document, and then 1 to see EOF if we haven't yet. + // TODO(bradfitz): detect Go 1.3+ and skip these reads. + // See https://codereview.appspot.com/58240043 + // and https://codereview.appspot.com/49570044 + buf := make([]byte, 1) + for i := 0; i < 3; i++ { + _, err := res.Body.Read(buf) + if err != nil { + break + } + } + res.Body.Close() + +} + +// VariantType returns the type name of the given variant. +// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. +// This is used to support "variant" APIs that can return one of a number of different types. +func VariantType(t map[string]interface{}) string { + s, _ := t["type"].(string) + return s +} + +// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. +// This is used to support "variant" APIs that can return one of a number of different types. +// It reports whether the conversion was successful. +func ConvertVariant(v map[string]interface{}, dst interface{}) bool { + var buf bytes.Buffer + err := json.NewEncoder(&buf).Encode(v) + if err != nil { + return false + } + return json.Unmarshal(buf.Bytes(), dst) == nil +} + +// A Field names a field to be retrieved with a partial response. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// +// Partial responses can dramatically reduce the amount of data that must be sent to your application. +// In order to request partial responses, you can specify the full list of fields +// that your application needs by adding the Fields option to your request. +// +// Field strings use camelCase with leading lower-case characters to identify fields within the response. +// +// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, +// you could request just those fields like this: +// +// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// +// or if you were also interested in each Item's "Updated" field, you can combine them like this: +// +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// +// More information about field formatting can be found here: +// https://developers.google.com/+/api/#fields-syntax +// +// Another way to find field names is through the Google API explorer: +// https://developers.google.com/apis-explorer/#p/ +type Field string + +// CombineFields combines fields into a single string. +func CombineFields(s []Field) string { + r := make([]string, len(s)) + for i, v := range s { + r[i] = string(v) + } + return strings.Join(r, ",") +} + +// A CallOption is an optional argument to an API call. +// It should be treated as an opaque value by users of Google APIs. +// +// A CallOption is something that configures an API call in a way that is +// not specific to that API; for instance, controlling the quota user for +// an API call is common across many APIs, and is thus a CallOption. +type CallOption interface { + Get() (key, value string) +} + +// QuotaUser returns a CallOption that will set the quota user for a call. +// The quota user can be used by server-side applications to control accounting. +// It can be an arbitrary string up to 40 characters, and will override UserIP +// if both are provided. +func QuotaUser(u string) CallOption { return quotaUser(u) } + +type quotaUser string + +func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } + +// UserIP returns a CallOption that will set the "userIp" parameter of a call. +// This should be the IP address of the originating request. +func UserIP(ip string) CallOption { return userIP(ip) } + +type userIP string + +func (i userIP) Get() (string, string) { return "userIp", string(i) } + +// Trace returns a CallOption that enables diagnostic tracing for a call. +// traceToken is an ID supplied by Google support. +func Trace(traceToken string) CallOption { return traceTok(traceToken) } + +type traceTok string + +func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) } + +// TODO: Fields too diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE new file mode 100644 index 000000000000..de9c88cb65cb --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go new file mode 100644 index 000000000000..7c103ba1386d --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go @@ -0,0 +1,220 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 3 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// uritemplates does not support composite values (in Go: slices or maps) +// and so does not qualify as a level 4 implementation. +package uritemplates + +import ( + "bytes" + "errors" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) string { + if allowReserved { + return string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) +} + +// A uriTemplate is a parsed representation of a URI template. +type uriTemplate struct { + raw string + parts []templatePart +} + +// parse parses a URI template string into a uriTemplate object. +func parse(rawTemplate string) (*uriTemplate, error) { + split := strings.Split(rawTemplate, "{") + parts := make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + return nil, errors.New("unexpected }") + } + parts[i].raw = s + continue + } + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + return nil, errors.New("malformed template") + } + expression := subsplit[0] + var err error + parts[i*2-1], err = parseExpression(expression) + if err != nil { + return nil, err + } + parts[i*2].raw = subsplit[1] + } + return &uriTemplate{ + raw: rawTemplate, + parts: parts, + }, nil +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + // TODO(djd): Remove "*" suffix parsing once we check that no APIs have + // mistakenly used that attribute. + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (t *uriTemplate) Expand(values map[string]string) string { + var buf bytes.Buffer + for _, p := range t.parts { + p.expand(&buf, values) + } + return buf.String() +} + +func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) { + if len(tp.raw) > 0 { + buf.WriteString(tp.raw) + return + } + var first = true + for _, term := range tp.terms { + value, exists := values[term.name] + if !exists { + continue + } + if first { + buf.WriteString(tp.first) + first = false + } else { + buf.WriteString(tp.sep) + } + tp.expandString(buf, term, value) + } +} + +func (tp *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if tp.named { + buf.WriteString(name) + if empty { + buf.WriteString(tp.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (tp *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + tp.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, tp.allowReserved)) +} diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go new file mode 100644 index 000000000000..eff260a6925f --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uritemplates + +func Expand(path string, values map[string]string) (string, error) { + template, err := parse(path) + if err != nil { + return "", err + } + return template.Expand(values), nil +} diff --git a/Godeps/_workspace/src/google.golang.org/api/googleapi/types.go b/Godeps/_workspace/src/google.golang.org/api/googleapi/types.go new file mode 100644 index 000000000000..a02b4b0716bc --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/api/googleapi/types.go @@ -0,0 +1,182 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "encoding/json" + "strconv" +) + +// Int64s is a slice of int64s that marshal as quoted strings in JSON. +type Int64s []int64 + +func (q *Int64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, int64(v)) + } + return nil +} + +// Int32s is a slice of int32s that marshal as quoted strings in JSON. +type Int32s []int32 + +func (q *Int32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, int32(v)) + } + return nil +} + +// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. +type Uint64s []uint64 + +func (q *Uint64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, uint64(v)) + } + return nil +} + +// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. +type Uint32s []uint32 + +func (q *Uint32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, uint32(v)) + } + return nil +} + +// Float64s is a slice of float64s that marshal as quoted strings in JSON. +type Float64s []float64 + +func (q *Float64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + *q = append(*q, float64(v)) + } + return nil +} + +func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { + dst := make([]byte, 0, 2+n*10) // somewhat arbitrary + dst = append(dst, '[') + for i := 0; i < n; i++ { + if i > 0 { + dst = append(dst, ',') + } + dst = append(dst, '"') + dst = fn(dst, i) + dst = append(dst, '"') + } + dst = append(dst, ']') + return dst, nil +} + +func (s Int64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, s[i], 10) + }) +} + +func (s Int32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, int64(s[i]), 10) + }) +} + +func (s Uint64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, s[i], 10) + }) +} + +func (s Uint32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, uint64(s[i]), 10) + }) +} + +func (s Float64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendFloat(dst, s[i], 'g', -1, 64) + }) +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/Godeps/_workspace/src/google.golang.org/cloud/LICENSE b/Godeps/_workspace/src/google.golang.org/cloud/LICENSE new file mode 100644 index 000000000000..a4c5efd822fb --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go new file mode 100644 index 000000000000..0a709598df02 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go @@ -0,0 +1,382 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" + + "google.golang.org/cloud/internal" +) + +// metadataIP is the documented metadata server IP address. +const metadataIP = "169.254.169.254" + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + metaClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }, + } + subscribeClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }, + } +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(metaClient, suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag using the provided client. This func is otherwise equivalent to Get. +func getETag(client *http.Client, suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv("GCE_METADATA_HOST") + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := client.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var onGCE struct { + sync.Mutex + set bool + v bool +} + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + defer onGCE.Unlock() + onGCE.Lock() + if onGCE.set { + return onGCE.v + } + onGCE.set = true + onGCE.v = testOnGCE() + return onGCE.v +} + +func testOnGCE() bool { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/gcloud-golang/issues/194 + go func() { + res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + return <-resc +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(subscribeClient, suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go new file mode 100644 index 000000000000..59428803dd9c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go @@ -0,0 +1,128 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" + "sync" + + "golang.org/x/net/context" +) + +type contextKey struct{} + +func WithContext(parent context.Context, projID string, c *http.Client) context.Context { + if c == nil { + panic("nil *http.Client passed to WithContext") + } + if projID == "" { + panic("empty project ID passed to WithContext") + } + return context.WithValue(parent, contextKey{}, &cloudContext{ + ProjectID: projID, + HTTPClient: c, + }) +} + +const userAgent = "gcloud-golang/0.1" + +type cloudContext struct { + ProjectID string + HTTPClient *http.Client + + mu sync.Mutex // guards svc + svc map[string]interface{} // e.g. "storage" => *rawStorage.Service +} + +// Service returns the result of the fill function if it's never been +// called before for the given name (which is assumed to be an API +// service name, like "datastore"). If it has already been cached, the fill +// func is not run. +// It's safe for concurrent use by multiple goroutines. +func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { + return cc(ctx).service(name, fill) +} + +func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { + c.mu.Lock() + defer c.mu.Unlock() + + if c.svc == nil { + c.svc = make(map[string]interface{}) + } else if v, ok := c.svc[name]; ok { + return v + } + v := fill(c.HTTPClient) + c.svc[name] = v + return v +} + +// Transport is an http.RoundTripper that appends +// Google Cloud client's user-agent to the original +// request's user-agent header. +type Transport struct { + // Base is the actual http.RoundTripper + // requests will use. It must not be nil. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +func ProjID(ctx context.Context) string { + return cc(ctx).ProjectID +} + +func HTTPClient(ctx context.Context) *http.Client { + return cc(ctx).HTTPClient +} + +// cc returns the internal *cloudContext (cc) state for a context.Context. +// It panics if the user did it wrong. +func cc(ctx context.Context) *cloudContext { + if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { + return c + } + panic("invalid context.Context type; it should be created with cloud.NewContext") +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml b/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml new file mode 100644 index 000000000000..3f83776ec5fb --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/.travis.yml @@ -0,0 +1,14 @@ +language: go + +before_install: + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + +install: + - mkdir -p "$GOPATH/src/google.golang.org" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/grpc" + +script: + - make test testrace + - make coverage diff --git a/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md b/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 000000000000..407d384a7c8b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# How to contribute + +We definitely welcome patches and contribution to grpc! Here is some guideline +and information about how to do so. + +## Getting started + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +### Filing Issues +When filing an issue, make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +### Contributing code +Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/Godeps/_workspace/src/google.golang.org/grpc/LICENSE b/Godeps/_workspace/src/google.golang.org/grpc/LICENSE new file mode 100644 index 000000000000..f4988b450799 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/LICENSE @@ -0,0 +1,28 @@ +Copyright 2014, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/google.golang.org/grpc/Makefile b/Godeps/_workspace/src/google.golang.org/grpc/Makefile new file mode 100644 index 000000000000..12e84e4e5be2 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/Makefile @@ -0,0 +1,50 @@ +.PHONY: \ + all \ + deps \ + updatedeps \ + testdeps \ + updatetestdeps \ + build \ + proto \ + test \ + testrace \ + clean \ + +all: test testrace + +deps: + go get -d -v google.golang.org/grpc/... + +updatedeps: + go get -d -v -u -f google.golang.org/grpc/... + +testdeps: + go get -d -v -t google.golang.org/grpc/... + +updatetestdeps: + go get -d -v -t -u -f google.golang.org/grpc/... + +build: deps + go build google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go get -v github.com/golang/protobuf/protoc-gen-go + for file in $$(git ls-files '*.proto'); do \ + protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \ + done + +test: testdeps + go test -v -cpu 1,4 google.golang.org/grpc/... + +testrace: testdeps + go test -v -race -cpu 1,4 google.golang.org/grpc/... + +clean: + go clean google.golang.org/grpc/... + +coverage: testdeps + ./coverage.sh --coveralls diff --git a/Godeps/_workspace/src/google.golang.org/grpc/PATENTS b/Godeps/_workspace/src/google.golang.org/grpc/PATENTS new file mode 100644 index 000000000000..619f9dbfe637 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the GRPC project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of GRPC, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of GRPC. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of GRPC or any code incorporated within this +implementation of GRPC constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of GRPC +shall terminate as of the date such litigation is filed. diff --git a/Godeps/_workspace/src/google.golang.org/grpc/README.md b/Godeps/_workspace/src/google.golang.org/grpc/README.md new file mode 100644 index 000000000000..37b05f0953d5 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/README.md @@ -0,0 +1,32 @@ +#gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) + +The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. + +Installation +------------ + +To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run: + +``` +$ go get google.golang.org/grpc +``` + +Prerequisites +------------- + +This requires Go 1.4 or above. + +Constraints +----------- +The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. + +Documentation +------------- +See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). + +Status +------ +Beta release + diff --git a/Godeps/_workspace/src/google.golang.org/grpc/call.go b/Godeps/_workspace/src/google.golang.org/grpc/call.go new file mode 100644 index 000000000000..d4ae68bee1f8 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/call.go @@ -0,0 +1,190 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "io" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/transport" +) + +// recvResponse receives and parses an RPC response. +// On error, it returns the error and indicates whether the call should be retried. +// +// TODO(zhaoq): Check whether the received message sequence is valid. +func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { + // Try to acquire header metadata from the server if there is any. + var err error + c.headerMD, err = stream.Header() + if err != nil { + return err + } + p := &parser{s: stream} + for { + if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil { + if err == io.EOF { + break + } + return err + } + } + c.trailerMD = stream.Trailer() + return nil +} + +// sendRequest writes out various information of an RPC such as Context and Message. +func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { + stream, err := t.NewStream(ctx, callHdr) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + if _, ok := err.(transport.ConnectionError); !ok { + t.CloseStream(stream, err) + } + } + }() + var cbuf *bytes.Buffer + if compressor != nil { + cbuf = new(bytes.Buffer) + } + outBuf, err := encode(codec, args, compressor, cbuf) + if err != nil { + return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) + } + err = t.Write(stream, outBuf, opts) + if err != nil { + return nil, err + } + // Sent successfully. + return stream, nil +} + +// Invoke is called by the generated code. It sends the RPC request on the +// wire and returns after response is received. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { + var c callInfo + for _, o := range opts { + if err := o.before(&c); err != nil { + return toRPCErr(err) + } + } + defer func() { + for _, o := range opts { + o.after(&c) + } + }() + if EnableTracing { + c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + defer c.traceInfo.tr.Finish() + c.traceInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) + // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. + defer func() { + if err != nil { + c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + c.traceInfo.tr.SetError() + } + }() + } + topts := &transport.Options{ + Last: true, + Delay: false, + } + var ( + lastErr error // record the error that happened + ) + for { + var ( + err error + t transport.ClientTransport + stream *transport.Stream + ) + // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. + if lastErr != nil && c.failFast { + return toRPCErr(lastErr) + } + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + } + t, err = cc.dopts.picker.Pick(ctx) + if err != nil { + if lastErr != nil { + // This was a retry; return the error from the last attempt. + return toRPCErr(lastErr) + } + return toRPCErr(err) + } + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) + } + stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) + if err != nil { + if _, ok := err.(transport.ConnectionError); ok { + lastErr = err + continue + } + if lastErr != nil { + return toRPCErr(lastErr) + } + return toRPCErr(err) + } + // Receive the response + lastErr = recvResponse(cc.dopts, t, &c, stream, reply) + if _, ok := lastErr.(transport.ConnectionError); ok { + continue + } + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) + } + t.CloseStream(stream, lastErr) + if lastErr != nil { + return toRPCErr(lastErr) + } + return Errorf(stream.StatusCode(), stream.StatusDesc()) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go b/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go new file mode 100644 index 000000000000..28e74da8b92b --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/clientconn.go @@ -0,0 +1,589 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "errors" + "fmt" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/transport" +) + +var ( + // ErrUnspecTarget indicates that the target address is unspecified. + ErrUnspecTarget = errors.New("grpc: target is unspecified") + // ErrNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicityly + // call WithInsecure DialOption to disable security. + ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // ErrCredentialsMisuse indicates that users want to transmit security infomation + // (e.g., oauth2 token) which requires secure connection on an insecure + // connection. + ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)") + // ErrClientConnClosing indicates that the operation is illegal because + // the session is closing. + ErrClientConnClosing = errors.New("grpc: the client connection is closing") + // ErrClientConnTimeout indicates that the connection could not be + // established or re-established within the specified timeout. + ErrClientConnTimeout = errors.New("grpc: timed out trying to connect") + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + codec Codec + cp Compressor + dc Decompressor + picker Picker + block bool + insecure bool + copts transport.ConnectOptions +} + +// DialOption configures how we set up the connection. +type DialOption func(*dialOptions) + +// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. +func WithCodec(c Codec) DialOption { + return func(o *dialOptions) { + o.codec = c + } +} + +// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message +// compressor. +func WithCompressor(cp Compressor) DialOption { + return func(o *dialOptions) { + o.cp = cp + } +} + +// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating +// message decompressor. +func WithDecompressor(dc Decompressor) DialOption { + return func(o *dialOptions) { + o.dc = dc + } +} + +// WithPicker returns a DialOption which sets a picker for connection selection. +func WithPicker(p Picker) DialOption { + return func(o *dialOptions) { + o.picker = p + } +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying +// connection is up. Without this, Dial returns immediately and connecting the server +// happens in background. +func WithBlock() DialOption { + return func(o *dialOptions) { + o.block = true + } +} + +// WithInsecure returns a DialOption which disables transport security for this ClientConn. +// Note that transport security is required unless WithInsecure is set. +func WithInsecure() DialOption { + return func(o *dialOptions) { + o.insecure = true + } +} + +// WithTransportCredentials returns a DialOption which configures a +// connection level security credentials (e.g., TLS/SSL). +func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption { + return func(o *dialOptions) { + o.copts.AuthOptions = append(o.copts.AuthOptions, creds) + } +} + +// WithPerRPCCredentials returns a DialOption which sets +// credentials which will place auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.Credentials) DialOption { + return func(o *dialOptions) { + o.copts.AuthOptions = append(o.copts.AuthOptions, creds) + } +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a client connection. +func WithTimeout(d time.Duration) DialOption { + return func(o *dialOptions) { + o.copts.Timeout = d + } +} + +// WithDialer returns a DialOption that specifies a function to use for dialing network addresses. +func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption { + return func(o *dialOptions) { + o.copts.Dialer = f + } +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. +func WithUserAgent(s string) DialOption { + return func(o *dialOptions) { + o.copts.UserAgent = s + } +} + +// Dial creates a client connection the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + cc := &ClientConn{ + target: target, + } + for _, opt := range opts { + opt(&cc.dopts) + } + if cc.dopts.codec == nil { + // Set the default codec. + cc.dopts.codec = protoCodec{} + } + if cc.dopts.picker == nil { + cc.dopts.picker = &unicastPicker{ + target: target, + } + } + if err := cc.dopts.picker.Init(cc); err != nil { + return nil, err + } + colonPos := strings.LastIndex(target, ":") + if colonPos == -1 { + colonPos = len(target) + } + cc.authority = target[:colonPos] + return cc, nil +} + +// ConnectivityState indicates the state of a client connection. +type ConnectivityState int + +const ( + // Idle indicates the ClientConn is idle. + Idle ConnectivityState = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +func (s ConnectivityState) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + panic(fmt.Sprintf("unknown connectivity state: %d", s)) + } +} + +// ClientConn represents a client connection to an RPC service. +type ClientConn struct { + target string + authority string + dopts dialOptions +} + +// State returns the connectivity state of cc. +// This is EXPERIMENTAL API. +func (cc *ClientConn) State() (ConnectivityState, error) { + return cc.dopts.picker.State() +} + +// WaitForStateChange blocks until the state changes to something other than the sourceState. +// It returns the new state or error. +// This is EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + return cc.dopts.picker.WaitForStateChange(ctx, sourceState) +} + +// Close starts to tear down the ClientConn. +func (cc *ClientConn) Close() error { + return cc.dopts.picker.Close() +} + +// Conn is a client connection to a single destination. +type Conn struct { + target string + dopts dialOptions + resetChan chan int + shutdownChan chan struct{} + events trace.EventLog + + mu sync.Mutex + state ConnectivityState + stateCV *sync.Cond + // ready is closed and becomes nil when a new transport is up or failed + // due to timeout. + ready chan struct{} + transport transport.ClientTransport +} + +// NewConn creates a Conn. +func NewConn(cc *ClientConn) (*Conn, error) { + if cc.target == "" { + return nil, ErrUnspecTarget + } + c := &Conn{ + target: cc.target, + dopts: cc.dopts, + resetChan: make(chan int, 1), + shutdownChan: make(chan struct{}), + } + if EnableTracing { + c.events = trace.NewEventLog("grpc.ClientConn", c.target) + } + if !c.dopts.insecure { + var ok bool + for _, cd := range c.dopts.copts.AuthOptions { + if _, ok := cd.(credentials.TransportAuthenticator); !ok { + continue + } + ok = true + } + if !ok { + return nil, ErrNoTransportSecurity + } + } else { + for _, cd := range c.dopts.copts.AuthOptions { + if cd.RequireTransportSecurity() { + return nil, ErrCredentialsMisuse + } + } + } + c.stateCV = sync.NewCond(&c.mu) + if c.dopts.block { + if err := c.resetTransport(false); err != nil { + c.Close() + return nil, err + } + // Start to monitor the error status of transport. + go c.transportMonitor() + } else { + // Start a goroutine connecting to the server asynchronously. + go func() { + if err := c.resetTransport(false); err != nil { + grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err) + c.Close() + return + } + c.transportMonitor() + }() + } + return c, nil +} + +// printf records an event in cc's event log, unless cc has been closed. +// REQUIRES cc.mu is held. +func (cc *Conn) printf(format string, a ...interface{}) { + if cc.events != nil { + cc.events.Printf(format, a...) + } +} + +// errorf records an error in cc's event log, unless cc has been closed. +// REQUIRES cc.mu is held. +func (cc *Conn) errorf(format string, a ...interface{}) { + if cc.events != nil { + cc.events.Errorf(format, a...) + } +} + +// State returns the connectivity state of the Conn +func (cc *Conn) State() ConnectivityState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.state +} + +// WaitForStateChange blocks until the state changes to something other than the sourceState. +func (cc *Conn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + cc.mu.Lock() + defer cc.mu.Unlock() + if sourceState != cc.state { + return cc.state, nil + } + done := make(chan struct{}) + var err error + go func() { + select { + case <-ctx.Done(): + cc.mu.Lock() + err = ctx.Err() + cc.stateCV.Broadcast() + cc.mu.Unlock() + case <-done: + } + }() + defer close(done) + for sourceState == cc.state { + cc.stateCV.Wait() + if err != nil { + return cc.state, err + } + } + return cc.state, nil +} + +// NotifyReset tries to signal the underlying transport needs to be reset due to +// for example a name resolution change in flight. +func (cc *Conn) NotifyReset() { + select { + case cc.resetChan <- 0: + default: + } +} + +func (cc *Conn) resetTransport(closeTransport bool) error { + var retries int + start := time.Now() + for { + cc.mu.Lock() + cc.printf("connecting") + if cc.state == Shutdown { + // cc.Close() has been invoked. + cc.mu.Unlock() + return ErrClientConnClosing + } + cc.state = Connecting + cc.stateCV.Broadcast() + cc.mu.Unlock() + if closeTransport { + cc.transport.Close() + } + // Adjust timeout for the current try. + copts := cc.dopts.copts + if copts.Timeout < 0 { + cc.Close() + return ErrClientConnTimeout + } + if copts.Timeout > 0 { + copts.Timeout -= time.Since(start) + if copts.Timeout <= 0 { + cc.Close() + return ErrClientConnTimeout + } + } + sleepTime := backoff(retries) + timeout := sleepTime + if timeout < minConnectTimeout { + timeout = minConnectTimeout + } + if copts.Timeout == 0 || copts.Timeout > timeout { + copts.Timeout = timeout + } + connectTime := time.Now() + addr, err := cc.dopts.picker.PickAddr() + var newTransport transport.ClientTransport + if err == nil { + newTransport, err = transport.NewClientTransport(addr, &copts) + } + if err != nil { + cc.mu.Lock() + if cc.state == Shutdown { + // cc.Close() has been invoked. + cc.mu.Unlock() + return ErrClientConnClosing + } + cc.errorf("transient failure: %v", err) + cc.state = TransientFailure + cc.stateCV.Broadcast() + if cc.ready != nil { + close(cc.ready) + cc.ready = nil + } + cc.mu.Unlock() + sleepTime -= time.Since(connectTime) + if sleepTime < 0 { + sleepTime = 0 + } + // Fail early before falling into sleep. + if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) { + cc.mu.Lock() + cc.errorf("connection timeout") + cc.mu.Unlock() + cc.Close() + return ErrClientConnTimeout + } + closeTransport = false + time.Sleep(sleepTime) + retries++ + grpclog.Printf("grpc: Conn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target) + continue + } + cc.mu.Lock() + cc.printf("ready") + if cc.state == Shutdown { + // cc.Close() has been invoked. + cc.mu.Unlock() + newTransport.Close() + return ErrClientConnClosing + } + cc.state = Ready + cc.stateCV.Broadcast() + cc.transport = newTransport + if cc.ready != nil { + close(cc.ready) + cc.ready = nil + } + cc.mu.Unlock() + return nil + } +} + +func (cc *Conn) reconnect() bool { + cc.mu.Lock() + if cc.state == Shutdown { + // cc.Close() has been invoked. + cc.mu.Unlock() + return false + } + cc.state = TransientFailure + cc.stateCV.Broadcast() + cc.mu.Unlock() + if err := cc.resetTransport(true); err != nil { + // The ClientConn is closing. + cc.mu.Lock() + cc.printf("transport exiting: %v", err) + cc.mu.Unlock() + grpclog.Printf("grpc: Conn.transportMonitor exits due to: %v", err) + return false + } + return true +} + +// Run in a goroutine to track the error in transport and create the +// new transport if an error happens. It returns when the channel is closing. +func (cc *Conn) transportMonitor() { + for { + select { + // shutdownChan is needed to detect the teardown when + // the ClientConn is idle (i.e., no RPC in flight). + case <-cc.shutdownChan: + return + case <-cc.resetChan: + if !cc.reconnect() { + return + } + case <-cc.transport.Error(): + if !cc.reconnect() { + return + } + // Tries to drain reset signal if there is any since it is out-dated. + select { + case <-cc.resetChan: + default: + } + } + } +} + +// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed. +func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) { + for { + cc.mu.Lock() + switch { + case cc.state == Shutdown: + cc.mu.Unlock() + return nil, ErrClientConnClosing + case cc.state == Ready: + cc.mu.Unlock() + return cc.transport, nil + default: + ready := cc.ready + if ready == nil { + ready = make(chan struct{}) + cc.ready = ready + } + cc.mu.Unlock() + select { + case <-ctx.Done(): + return nil, transport.ContextErr(ctx.Err()) + // Wait until the new transport is ready or failed. + case <-ready: + } + } + } +} + +// Close starts to tear down the Conn. Returns ErrClientConnClosing if +// it has been closed (mostly due to dial time-out). +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many ClientConn's in a +// tight loop. +func (cc *Conn) Close() error { + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.state == Shutdown { + return ErrClientConnClosing + } + cc.state = Shutdown + cc.stateCV.Broadcast() + if cc.events != nil { + cc.events.Finish() + cc.events = nil + } + if cc.ready != nil { + close(cc.ready) + cc.ready = nil + } + if cc.transport != nil { + cc.transport.Close() + } + if cc.shutdownChan != nil { + close(cc.shutdownChan) + } + return nil +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh b/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh new file mode 100644 index 000000000000..b00948884295 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/Godeps/_workspace/src/google.golang.org/grpc/codes/code_string.go b/Godeps/_workspace/src/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 000000000000..e6762d084558 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=Code; DO NOT EDIT + +package codes + +import "fmt" + +const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated" + +var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} + +func (i Code) String() string { + if i+1 >= Code(len(_Code_index)) { + return fmt.Sprintf("Code(%d)", i) + } + return _Code_name[_Code_index[i]:_Code_index[i+1]] +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/codes/codes.go b/Godeps/_workspace/src/google.golang.org/grpc/codes/codes.go new file mode 100644 index 000000000000..37c5b860bd6d --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,159 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +//go:generate stringer -type=Code + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was cancelled (typically by the caller). + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PermissionDenied Code = 7 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + Unauthenticated Code = 16 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + DataLoss Code = 15 +) diff --git a/Godeps/_workspace/src/google.golang.org/grpc/coverage.sh b/Godeps/_workspace/src/google.golang.org/grpc/coverage.sh new file mode 100644 index 000000000000..120235374a42 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/coverage.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +set -e + +workdir=.cover +profile="$workdir/cover.out" +mode=set +end2endtest="google.golang.org/grpc/test" + +generate_cover_data() { + rm -rf "$workdir" + mkdir "$workdir" + + for pkg in "$@"; do + if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ] + then + f="$workdir/$(echo $pkg | tr / -)" + go test -covermode="$mode" -coverprofile="$f.cover" "$pkg" + go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest" + fi + done + + echo "mode: $mode" >"$profile" + grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" +} + +show_cover_report() { + go tool cover -${1}="$profile" +} + +push_to_coveralls() { + goveralls -coverprofile="$profile" +} + +generate_cover_data $(go list ./...) +show_cover_report func +case "$1" in +"") + ;; +--html) + show_cover_report html ;; +--coveralls) + push_to_coveralls ;; +*) + echo >&2 "error: invalid option: $1" ;; +esac +rm -rf "$workdir" diff --git a/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go b/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 000000000000..0b0b89b6aa6e --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "strings" + "time" + + "golang.org/x/net/context" +) + +var ( + // alpnProtoStr are the specified application level protocols for gRPC. + alpnProtoStr = []string{"h2"} +) + +// Credentials defines the common interface all supported credentials must +// implement. +type Credentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. uri is the URI of the entry point for the request. When + // supported by the underlying implementation, ctx can be used for + // timeout and cancellation. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentails requires + // transport security. + RequireTransportSecurity() bool +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. + SecurityVersion string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +type AuthInfo interface { + AuthType() string +} + +// TransportAuthenticator defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportAuthenticator interface { + // ClientHandshake does the authentication handshake specified by the corresponding + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. + ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportAuthenticator. + Info() ProtocolInfo + Credentials +} + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState +} + +func (t TLSInfo) AuthType() string { + return "tls" +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + } +} + +// GetRequestMetadata returns nil, nil since TLS credentials does not have +// metadata. +func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return nil, nil +} + +func (c *tlsCreds) RequireTransportSecurity() bool { + return true +} + +type timeoutError struct{} + +func (timeoutError) Error() string { return "credentials: Dial timed out" } +func (timeoutError) Timeout() bool { return true } +func (timeoutError) Temporary() bool { return true } + +func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) { + // borrow some code from tls.DialWithDialer + var errChannel chan error + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- timeoutError{} + }) + } + if c.config.ServerName == "" { + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + c.config.ServerName = addr[:colonPos] + } + conn := tls.Client(rawConn, &c.config) + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + err = <-errChannel + } + if err != nil { + rawConn.Close() + return nil, nil, err + } + // TODO(zhaoq): Omit the auth info for client now. It is more for + // information than anything else. + return conn, nil, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, &c.config) + if err := conn.Handshake(); err != nil { + rawConn.Close() + return nil, nil, err + } + return conn, TLSInfo{conn.ConnectionState()}, nil +} + +// NewTLS uses c to construct a TransportAuthenticator based on TLS. +func NewTLS(c *tls.Config) TransportAuthenticator { + tc := &tlsCreds{*c} + tc.config.NextProtos = alpnProtoStr + return tc +} + +// NewClientTLSFromCert constructs a TLS from the input certificate for client. +func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator { + return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs a TLS from the input certificate file for client. +func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs a TLS from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs a TLS from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/doc.go b/Godeps/_workspace/src/google.golang.org/grpc/doc.go new file mode 100644 index 000000000000..c0f54f75943d --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/doc.go @@ -0,0 +1,6 @@ +/* +Package grpc implements an RPC system called gRPC. + +See https://github.com/grpc/grpc for more information about gRPC. +*/ +package grpc diff --git a/Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 000000000000..2cc09be48945 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,93 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package grpclog defines logging for grpc. +*/ +package grpclog + +import ( + "log" + "os" +) + +// Use golang's standard logger by default. +// Access is not mutex-protected: do not modify except in init() +// functions. +var logger Logger = log.New(os.Stderr, "", log.LstdFlags) + +// Logger mimics golang's standard Logger as an interface. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +func SetLogger(l Logger) { + logger = l +} + +// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code. +func Fatal(args ...interface{}) { + logger.Fatal(args...) +} + +// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) +} + +// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +func Print(args ...interface{}) { + logger.Print(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +func Printf(format string, args ...interface{}) { + logger.Printf(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +func Println(args ...interface{}) { + logger.Println(args...) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go b/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 000000000000..58469ddd3fa4 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +package metadata + +import ( + "encoding/base64" + "fmt" + "strings" + + "golang.org/x/net/context" +) + +const ( + binHdrSuffix = "-bin" +) + +// encodeKeyValue encodes key and value qualified for transmission via gRPC. +// Transmitting binary headers violates HTTP/2 spec. +// TODO(zhaoq): Maybe check if k is ASCII also. +func encodeKeyValue(k, v string) (string, string) { + k = strings.ToLower(k) + if strings.HasSuffix(k, binHdrSuffix) { + val := base64.StdEncoding.EncodeToString([]byte(v)) + v = string(val) + } + return k, v +} + +// DecodeKeyValue returns the original key and value corresponding to the +// encoded data in k, v. +func DecodeKeyValue(k, v string) (string, string, error) { + if !strings.HasSuffix(k, binHdrSuffix) { + return k, v, nil + } + val, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return "", "", err + } + return k, string(val), nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates a MD from given key-value map. +func New(m map[string]string) MD { + md := MD{} + for k, v := range m { + key, val := encodeKeyValue(k, v) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + var k string + for i, s := range kv { + if i%2 == 0 { + k = s + continue + } + key, val := encodeKeyValue(k, s) + md[key] = append(md[key], val) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + out := MD{} + for k, v := range md { + for _, i := range v { + out[k] = append(out[k], i) + } + } + return out +} + +type mdKey struct{} + +// NewContext creates a new context with md attached. +func NewContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdKey{}, md) +} + +// FromContext returns the MD in ctx if it exists. +func FromContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdKey{}).(MD) + return +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/naming/naming.go b/Godeps/_workspace/src/google.golang.org/grpc/naming/naming.go new file mode 100644 index 000000000000..06605607c371 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package naming defines the naming API and related data structures for gRPC. +// The interface is EXPERIMENTAL and may be suject to change. +package naming + +// Operation defines the corresponding operations for a name resolution change. +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an exisiting address is deleted. + Delete +) + +// Update defines a name resolution update. Notice that it is not valid having both +// empty string Addr and nil Metadata in an Update. +type Update struct { + // Op indicates the operation of the update. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + Metadata interface{} +} + +// Resolver creates a Watcher for a target to track its resolution changes. +type Resolver interface { + // Resolve creates a Watcher for target. + Resolve(target string) (Watcher, error) +} + +// Watcher watches for the updates on the specified target. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. The first call should get the full set of the results. + Next() ([]*Update, error) + // Close closes the Watcher. + Close() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/peer/peer.go b/Godeps/_workspace/src/google.golang.org/grpc/peer/peer.go new file mode 100644 index 000000000000..bfa6205ba9ea --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/picker.go b/Godeps/_workspace/src/google.golang.org/grpc/picker.go new file mode 100644 index 000000000000..50f315b44f37 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/picker.go @@ -0,0 +1,243 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "container/list" + "fmt" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/transport" +) + +// Picker picks a Conn for RPC requests. +// This is EXPERIMENTAL and please do not implement your own Picker for now. +type Picker interface { + // Init does initial processing for the Picker, e.g., initiate some connections. + Init(cc *ClientConn) error + // Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC + // or some error happens. + Pick(ctx context.Context) (transport.ClientTransport, error) + // PickAddr picks a peer address for connecting. This will be called repeated for + // connecting/reconnecting. + PickAddr() (string, error) + // State returns the connectivity state of the underlying connections. + State() (ConnectivityState, error) + // WaitForStateChange blocks until the state changes to something other than + // the sourceState. It returns the new state or error. + WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) + // Close closes all the Conn's owned by this Picker. + Close() error +} + +// unicastPicker is the default Picker which is used when there is no custom Picker +// specified by users. It always picks the same Conn. +type unicastPicker struct { + target string + conn *Conn +} + +func (p *unicastPicker) Init(cc *ClientConn) error { + c, err := NewConn(cc) + if err != nil { + return err + } + p.conn = c + return nil +} + +func (p *unicastPicker) Pick(ctx context.Context) (transport.ClientTransport, error) { + return p.conn.Wait(ctx) +} + +func (p *unicastPicker) PickAddr() (string, error) { + return p.target, nil +} + +func (p *unicastPicker) State() (ConnectivityState, error) { + return p.conn.State(), nil +} + +func (p *unicastPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + return p.conn.WaitForStateChange(ctx, sourceState) +} + +func (p *unicastPicker) Close() error { + if p.conn != nil { + return p.conn.Close() + } + return nil +} + +// unicastNamingPicker picks an address from a name resolver to set up the connection. +type unicastNamingPicker struct { + cc *ClientConn + resolver naming.Resolver + watcher naming.Watcher + mu sync.Mutex + // The list of the addresses are obtained from watcher. + addrs *list.List + // It tracks the current picked addr by PickAddr(). The next PickAddr may + // push it forward on addrs. + pickedAddr *list.Element + conn *Conn +} + +// NewUnicastNamingPicker creates a Picker to pick addresses from a name resolver +// to connect. +func NewUnicastNamingPicker(r naming.Resolver) Picker { + return &unicastNamingPicker{ + resolver: r, + addrs: list.New(), + } +} + +type addrInfo struct { + addr string + // Set to true if this addrInfo needs to be deleted in the next PickAddrr() call. + deleting bool +} + +// processUpdates calls Watcher.Next() once and processes the obtained updates. +func (p *unicastNamingPicker) processUpdates() error { + updates, err := p.watcher.Next() + if err != nil { + return err + } + for _, update := range updates { + switch update.Op { + case naming.Add: + p.mu.Lock() + p.addrs.PushBack(&addrInfo{ + addr: update.Addr, + }) + p.mu.Unlock() + // Initial connection setup + if p.conn == nil { + conn, err := NewConn(p.cc) + if err != nil { + return err + } + p.conn = conn + } + case naming.Delete: + p.mu.Lock() + for e := p.addrs.Front(); e != nil; e = e.Next() { + if update.Addr == e.Value.(*addrInfo).addr { + if e == p.pickedAddr { + // Do not remove the element now if it is the current picked + // one. We leave the deletion to the next PickAddr() call. + e.Value.(*addrInfo).deleting = true + // Notify Conn to close it. All the live RPCs on this connection + // will be aborted. + p.conn.NotifyReset() + } else { + p.addrs.Remove(e) + } + } + } + p.mu.Unlock() + default: + grpclog.Println("Unknown update.Op ", update.Op) + } + } + return nil +} + +// monitor runs in a standalone goroutine to keep watching name resolution updates until the watcher +// is closed. +func (p *unicastNamingPicker) monitor() { + for { + if err := p.processUpdates(); err != nil { + return + } + } +} + +func (p *unicastNamingPicker) Init(cc *ClientConn) error { + w, err := p.resolver.Resolve(cc.target) + if err != nil { + return err + } + p.watcher = w + p.cc = cc + // Get the initial name resolution. + if err := p.processUpdates(); err != nil { + return err + } + go p.monitor() + return nil +} + +func (p *unicastNamingPicker) Pick(ctx context.Context) (transport.ClientTransport, error) { + return p.conn.Wait(ctx) +} + +func (p *unicastNamingPicker) PickAddr() (string, error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.pickedAddr == nil { + p.pickedAddr = p.addrs.Front() + } else { + pa := p.pickedAddr + p.pickedAddr = pa.Next() + if pa.Value.(*addrInfo).deleting { + p.addrs.Remove(pa) + } + if p.pickedAddr == nil { + p.pickedAddr = p.addrs.Front() + } + } + if p.pickedAddr == nil { + return "", fmt.Errorf("there is no address available to pick") + } + return p.pickedAddr.Value.(*addrInfo).addr, nil +} + +func (p *unicastNamingPicker) State() (ConnectivityState, error) { + return 0, fmt.Errorf("State() is not supported for unicastNamingPicker") +} + +func (p *unicastNamingPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + return 0, fmt.Errorf("WaitForStateChange is not supported for unicastNamingPciker") +} + +func (p *unicastNamingPicker) Close() error { + p.watcher.Close() + p.conn.Close() + return nil +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go b/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go new file mode 100644 index 000000000000..e98ddbcdc5a7 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,430 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "math/rand" + "os" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/transport" +) + +// Codec defines the interface gRPC uses to encode and decode messages. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. The returned + // string will be used as part of content type in transmission. + String() string +} + +// protoCodec is a Codec implemetation with protobuf. It is the default codec for gRPC. +type protoCodec struct{} + +func (protoCodec) Marshal(v interface{}) ([]byte, error) { + return proto.Marshal(v.(proto.Message)) +} + +func (protoCodec) Unmarshal(data []byte, v interface{}) error { + return proto.Unmarshal(data, v.(proto.Message)) +} + +func (protoCodec) String() string { + return "proto" +} + +// Compressor defines the interface gRPC uses to compress a message. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +func NewGZIPCompressor() Compressor { + return &gzipCompressor{} +} + +type gzipCompressor struct { +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := gzip.NewWriter(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + z, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + defer z.Close() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + traceInfo traceInfo // in trace.go +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo) +} + +type beforeCall func(c *callInfo) error + +func (o beforeCall) before(c *callInfo) error { return o(c) } +func (o beforeCall) after(c *callInfo) {} + +type afterCall func(c *callInfo) + +func (o afterCall) before(c *callInfo) error { return nil } +func (o afterCall) after(c *callInfo) { o(c) } + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return afterCall(func(c *callInfo) { + *md = c.headerMD + }) +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return afterCall(func(c *callInfo) { + *md = c.trailerMD + }) +} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = iota // no compression + compressionMade +) + +// parser reads complelete gRPC messages from the underlying reader. +type parser struct { + s io.Reader +} + +// recvMsg is to read a complete gRPC message from the stream. It is blocking if +// the message has not been complete yet. It returns the message and its type, +// EOF is returned with nil msg and 0 pf if the entire stream is done. Other +// non-nil error is returned if something is wrong on reading. +func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { + // The header of a gRPC message. Find more detail + // at http://www.grpc.io/docs/guides/wire.html. + var buf [5]byte + + if _, err := io.ReadFull(p.s, buf[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(buf[0]) + length := binary.BigEndian.Uint32(buf[1:]) + + if length == 0 { + return pf, nil, nil + } + msg = make([]byte, int(length)) + if _, err := io.ReadFull(p.s, msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and prepends the message header. If msg is nil, it +// generates the message header of 0 message length. +func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) { + var b []byte + var length uint + if msg != nil { + var err error + // TODO(zhaoq): optimize to reduce memory alloc and copying. + b, err = c.Marshal(msg) + if err != nil { + return nil, err + } + if cp != nil { + if err := cp.Do(cbuf, b); err != nil { + return nil, err + } + b = cbuf.Bytes() + } + length = uint(len(b)) + } + if length > math.MaxUint32 { + return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length) + } + + const ( + payloadLen = 1 + sizeLen = 4 + ) + + var buf = make([]byte, payloadLen+sizeLen+len(b)) + + // Write payload format + if cp == nil { + buf[0] = byte(compressionNone) + } else { + buf[0] = byte(compressionMade) + } + // Write length of b into buf + binary.BigEndian.PutUint32(buf[1:], uint32(length)) + // Copy encoded msg to buf + copy(buf[5:], b) + + return buf, nil +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" { + return transport.StreamErrorf(codes.InvalidArgument, "grpc: received unexpected payload format %d", pf) + } + if dc == nil || recvCompress != dc.Type() { + return transport.StreamErrorf(codes.InvalidArgument, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return transport.StreamErrorf(codes.InvalidArgument, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}) error { + pf, d, err := p.recvMsg() + if err != nil { + return err + } + if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { + return err + } + if pf == compressionMade { + d, err = dc.Do(bytes.NewReader(d)) + if err != nil { + return transport.StreamErrorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } + if err := c.Unmarshal(d, m); err != nil { + return transport.StreamErrorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + return nil +} + +// rpcError defines the status from an RPC. +type rpcError struct { + code codes.Code + desc string +} + +func (e rpcError) Error() string { + return fmt.Sprintf("rpc error: code = %d desc = %q", e.code, e.desc) +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +func Code(err error) codes.Code { + if err == nil { + return codes.OK + } + if e, ok := err.(rpcError); ok { + return e.code + } + return codes.Unknown +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +func ErrorDesc(err error) string { + if err == nil { + return "" + } + if e, ok := err.(rpcError); ok { + return e.desc + } + return err.Error() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +func Errorf(c codes.Code, format string, a ...interface{}) error { + if c == codes.OK { + return nil + } + return rpcError{ + code: c, + desc: fmt.Sprintf(format, a...), + } +} + +// toRPCErr converts an error into a rpcError. +func toRPCErr(err error) error { + switch e := err.(type) { + case rpcError: + return err + case transport.StreamError: + return rpcError{ + code: e.Code, + desc: e.Desc, + } + case transport.ConnectionError: + return rpcError{ + code: codes.Internal, + desc: e.Desc, + } + } + return Errorf(codes.Unknown, "%v", err) +} + +// convertCode converts a standard Go error into its canonical code. Note that +// this is only used to translate the error returned by the server applications. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled: + return codes.Canceled + case context.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} + +const ( + // how long to wait after the first failure before retrying + baseDelay = 1.0 * time.Second + // upper bound of backoff delay + maxDelay = 120 * time.Second + // backoff increases by this factor on each retry + backoffFactor = 1.6 + // backoff is randomized downwards by this factor + backoffJitter = 0.2 +) + +func backoff(retries int) (t time.Duration) { + if retries == 0 { + return baseDelay + } + backoff, max := float64(baseDelay), float64(maxDelay) + for backoff < max && retries > 0 { + backoff *= backoffFactor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + backoffJitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/server.go b/Godeps/_workspace/src/google.golang.org/grpc/server.go new file mode 100644 index 000000000000..1c42b6eff278 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/server.go @@ -0,0 +1,660 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "reflect" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/transport" +) + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc +} + +// service consists of the information of the server serving this service and +// the methods in this service. +type service struct { + server interface{} // the server for service methods + md map[string]*MethodDesc + sd map[string]*StreamDesc +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts options + mu sync.Mutex + lis map[net.Listener]bool + conns map[transport.ServerTransport]bool + m map[string]*service // service name -> service info + events trace.EventLog +} + +type options struct { + creds credentials.Credentials + codec Codec + cp Compressor + dc Decompressor + maxConcurrentStreams uint32 +} + +// A ServerOption sets options. +type ServerOption func(*options) + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +func CustomCodec(codec Codec) ServerOption { + return func(o *options) { + o.codec = codec + } +} + +func RPCCompressor(cp Compressor) ServerOption { + return func(o *options) { + o.cp = cp + } +} + +func RPCDecompressor(dc Decompressor) ServerOption { + return func(o *options) { + o.dc = dc + } +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return func(o *options) { + o.maxConcurrentStreams = n + } +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.Credentials) ServerOption { + return func(o *options) { + o.creds = c + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + var opts options + for _, o := range opt { + o(&opts) + } + if opts.codec == nil { + // Set the default codec. + opts.codec = protoCodec{} + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[transport.ServerTransport]bool), + m: make(map[string]*service), + } + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// RegisterService register a service and its implementation to the gRPC +// server. Called from the IDL generated code. This must be called before +// invoking Serve. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + srv := &service{ + server: ss, + md: make(map[string]*MethodDesc), + sd: make(map[string]*StreamDesc), + } + for i := range sd.Methods { + d := &sd.Methods[i] + srv.md[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + srv.sd[d.StreamName] = d + } + s.m[sd.ServiceName] = srv +} + +var ( + // ErrServerStopped indicates that the operation is now illegal because of + // the server being stopped. + ErrServerStopped = errors.New("grpc: the server has been stopped") +) + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC request and then call the registered handlers to reply to them. +// Service returns when lis.Accept fails. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + if s.lis == nil { + s.mu.Unlock() + return ErrServerStopped + } + s.lis[lis] = true + s.mu.Unlock() + defer func() { + lis.Close() + s.mu.Lock() + delete(s.lis, lis) + s.mu.Unlock() + }() + for { + c, err := lis.Accept() + if err != nil { + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + return err + } + var authInfo credentials.AuthInfo + if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok { + var conn net.Conn + conn, authInfo, err = creds.ServerHandshake(c) + if err != nil { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Println("grpc: Server.Serve failed to complete security handshake.") + continue + } + c = conn + } + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + c.Close() + return nil + } + s.mu.Unlock() + + go s.serveNewHTTP2Transport(c, authInfo) + } +} + +func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { + st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) + return + } + if !s.addConn(st) { + c.Close() + return + } + s.serveStreams(st) +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer s.removeConn(st) + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + }) + wg.Wait() +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + trInfo = &traceInfo{ + tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), + } + trInfo.firstLine.client = false + trInfo.firstLine.remoteAddr = st.RemoteAddr() + stream.TraceContext(trInfo.tr) + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = dl.Sub(time.Now()) + } + return trInfo +} + +func (s *Server) addConn(st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + return false + } + s.conns[st] = true + return true +} + +func (s *Server) removeConn(st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, st) + } +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { + var cbuf *bytes.Buffer + if cp != nil { + cbuf = new(bytes.Buffer) + } + p, err := encode(s.opts.codec, msg, cp, cbuf) + if err != nil { + // This typically indicates a fatal issue (e.g., memory + // corruption or hardware faults) the application program + // cannot handle. + // + // TODO(zhaoq): There exist other options also such as only closing the + // faulty stream locally and remotely (Other streams can keep going). Find + // the optimal option. + grpclog.Fatalf("grpc: Server failed to encode response %v", err) + } + return t.Write(stream, p, opts) +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { + if trInfo != nil { + defer trInfo.tr.Finish() + trInfo.firstLine.client = false + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + }() + } + p := &parser{s: stream} + for { + pf, req, err := p.recvMsg() + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if err != nil { + switch err := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) + } + return err + } + + if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { + switch err := err.(type) { + case transport.StreamError: + if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } + default: + if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } + + } + return err + } + statusCode := codes.OK + statusDesc := "" + df := func(v interface{}) error { + if pf == compressionMade { + var err error + req, err = s.opts.dc.Do(bytes.NewReader(req)) + if err != nil { + if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } + return err + } + } + if err := s.opts.codec.Unmarshal(req, v); err != nil { + return err + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + reply, appErr := md.Handler(srv.server, stream.Context(), df) + if appErr != nil { + if err, ok := appErr.(rpcError); ok { + statusCode = err.code + statusDesc = err.desc + } else { + statusCode = convertCode(appErr) + statusDesc = appErr.Error() + } + if trInfo != nil && statusCode != codes.OK { + trInfo.tr.LazyLog(stringer(statusDesc), true) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) + return err + } + return nil + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if s.opts.cp != nil { + stream.SetSendCompress(s.opts.cp.Type()) + } + if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { + switch err := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + statusCode = err.Code + statusDesc = err.Desc + default: + statusCode = codes.Unknown + statusDesc = err.Error() + } + return err + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + return t.WriteStatus(stream, statusCode, statusDesc) + } +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if s.opts.cp != nil { + stream.SetSendCompress(s.opts.cp.Type()) + } + ss := &serverStream{ + t: t, + s: stream, + p: &parser{s: stream}, + codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, + trInfo: trInfo, + } + if ss.cp != nil { + ss.cbuf = new(bytes.Buffer) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + }() + } + if appErr := sd.Handler(srv.server, ss); appErr != nil { + if err, ok := appErr.(rpcError); ok { + ss.statusCode = err.code + ss.statusDesc = err.desc + } else if err, ok := appErr.(transport.StreamError); ok { + ss.statusCode = err.Code + ss.statusDesc = err.Desc + } else { + ss.statusCode = convertCode(appErr) + ss.statusDesc = appErr.Error() + } + } + if trInfo != nil { + ss.mu.Lock() + if ss.statusCode != codes.OK { + ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true) + ss.trInfo.tr.SetError() + } else { + ss.trInfo.tr.LazyLog(stringer("OK"), false) + } + ss.mu.Unlock() + } + return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) + +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + srv, ok := s.m[service] + if !ok { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + // Unary RPC or Streaming RPC? + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// Stop stops the gRPC server. Once Stop returns, the server stops accepting +// connection requests and closes all the connected connections. +func (s *Server) Stop() { + s.mu.Lock() + listeners := s.lis + s.lis = nil + cs := s.conns + s.conns = nil + s.mu.Unlock() + for lis := range listeners { + lis.Close() + } + for c := range cs { + c.Close() + } + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// TestingCloseConns closes all exiting transports but keeps s.lis accepting new +// connections. This is for test only now. +func (s *Server) TestingCloseConns() { + s.mu.Lock() + for c := range s.conns { + c.Close() + } + s.conns = make(map[transport.ServerTransport]bool) + s.mu.Unlock() +} + +// SendHeader sends header metadata. It may be called at most once from a unary +// RPC handler. The ctx is the RPC handler's Context or one derived from it. +func SendHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream, ok := transport.StreamFromContext(ctx) + if !ok { + return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) + } + t := stream.ServerTransport() + if t == nil { + grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) + } + return t.WriteHeader(stream, md) +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// It may be called at most once from a unary RPC handler. The ctx is the RPC +// handler's Context or one derived from it. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream, ok := transport.StreamFromContext(ctx) + if !ok { + return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/stream.go b/Godeps/_workspace/src/google.golang.org/grpc/stream.go new file mode 100644 index 000000000000..4974d8a82ab0 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/stream.go @@ -0,0 +1,402 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "errors" + "io" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/transport" +) + +type streamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. +type StreamDesc struct { + StreamName string + Handler streamHandler + + // At least one of these is true. + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +type Stream interface { + // Context returns the context for this stream. + Context() context.Context + // SendMsg blocks until it sends m, the stream is done or the stream + // breaks. + // On error, it aborts the stream and returns an RPC status on client + // side. On server side, it simply returns the error to the caller. + // SendMsg is called by generated code. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message or the stream is + // done. On client side, it returns io.EOF when the stream is done. On + // any other error, it aborts the streama nd returns an RPC status. On + // server side, it simply returns the error to the caller. + RecvMsg(m interface{}) error +} + +// ClientStream defines the interface a client stream has to satify. +type ClientStream interface { + // Header returns the header metedata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server. It must be called + // after stream.Recv() returns non-nil error (including io.EOF) for + // bi-directional streaming and server streaming or stream.CloseAndRecv() + // returns for client streaming in order to receive trailer metadata if + // present. Otherwise, it could returns an empty MD even though trailer + // is present. + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. + CloseSend() error + Stream +} + +// NewClientStream creates a new Stream for the client side. This is called +// by generated code. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + var ( + t transport.ClientTransport + err error + ) + t, err = cc.dopts.picker.Pick(ctx) + if err != nil { + return nil, toRPCErr(err) + } + // TODO(zhaoq): CallOption is omitted. Add support when it is needed. + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + Flush: desc.ServerStreams&&desc.ClientStreams, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + } + cs := &clientStream{ + desc: desc, + codec: cc.dopts.codec, + cp: cc.dopts.cp, + dc: cc.dopts.dc, + tracing: EnableTracing, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cs.cbuf = new(bytes.Buffer) + } + if cs.tracing { + cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + cs.trInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) + ctx = trace.NewContext(ctx, cs.trInfo.tr) + } + s, err := t.NewStream(ctx, callHdr) + if err != nil { + cs.finish(err) + return nil, toRPCErr(err) + } + cs.t = t + cs.s = s + cs.p = &parser{s: s} + // Listen on ctx.Done() to detect cancellation when there is no pending + // I/O operations on this stream. + go func() { + select { + case <-t.Error(): + // Incur transport error, simply exit. + case <-s.Context().Done(): + err := s.Context().Err() + cs.finish(err) + cs.closeTransportStream(transport.ContextErr(err)) + } + }() + return cs, nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + t transport.ClientTransport + s *transport.Stream + p *parser + desc *StreamDesc + codec Codec + cp Compressor + cbuf *bytes.Buffer + dc Decompressor + + tracing bool // set to EnableTracing when the clientStream is created. + + mu sync.Mutex + closed bool + // trInfo.tr is set when the clientStream is created (if EnableTracing is true), + // and is set to nil when the clientStream's finish method is called. + trInfo traceInfo +} + +func (cs *clientStream) Context() context.Context { + return cs.s.Context() +} + +func (cs *clientStream) Header() (metadata.MD, error) { + m, err := cs.s.Header() + if err != nil { + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + return cs.s.Trailer() +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + if cs.tracing { + cs.mu.Lock() + if cs.trInfo.tr != nil { + cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + cs.mu.Unlock() + } + defer func() { + if err == nil || err == io.EOF { + return + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + err = toRPCErr(err) + }() + out, err := encode(cs.codec, m, cs.cp, cs.cbuf) + defer func() { + if cs.cbuf != nil { + cs.cbuf.Reset() + } + }() + if err != nil { + return transport.StreamErrorf(codes.Internal, "grpc: %v", err) + } + return cs.t.Write(cs.s, out, &transport.Options{Last: false}) +} + +func (cs *clientStream) RecvMsg(m interface{}) (err error) { + err = recv(cs.p, cs.codec, cs.s, cs.dc, m) + defer func() { + // err != nil indicates the termination of the stream. + if err != nil { + cs.finish(err) + } + }() + if err == nil { + if cs.tracing { + cs.mu.Lock() + if cs.trInfo.tr != nil { + cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + cs.mu.Unlock() + } + if !cs.desc.ClientStreams || cs.desc.ServerStreams { + return + } + // Special handling for client streaming rpc. + err = recv(cs.p, cs.codec, cs.s, cs.dc, m) + cs.closeTransportStream(err) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + if cs.s.StatusCode() == codes.OK { + return nil + } + return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + } + return toRPCErr(err) + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + if err == io.EOF { + if cs.s.StatusCode() == codes.OK { + // Returns io.EOF to indicate the end of the stream. + return + } + return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + } + return toRPCErr(err) +} + +func (cs *clientStream) CloseSend() (err error) { + err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) + if err == nil || err == io.EOF { + return + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + err = toRPCErr(err) + return +} + +func (cs *clientStream) closeTransportStream(err error) { + cs.mu.Lock() + if cs.closed { + cs.mu.Unlock() + return + } + cs.closed = true + cs.mu.Unlock() + cs.t.CloseStream(cs.s, err) +} + +func (cs *clientStream) finish(err error) { + if !cs.tracing { + return + } + cs.mu.Lock() + defer cs.mu.Unlock() + if cs.trInfo.tr != nil { + if err == nil || err == io.EOF { + cs.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + cs.trInfo.tr.LazyPrintf("RPC: [%v]", err) + cs.trInfo.tr.SetError() + } + cs.trInfo.tr.Finish() + cs.trInfo.tr = nil + } +} + +// ServerStream defines the interface a server stream has to satisfy. +type ServerStream interface { + // SendHeader sends the header metadata. It should not be called + // after SendProto. It fails if called multiple times or if + // called after SendProto. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the + // RPC status. + SetTrailer(metadata.MD) + Stream +} + +// serverStream implements a server side Stream. +type serverStream struct { + t transport.ServerTransport + s *transport.Stream + p *parser + codec Codec + cp Compressor + dc Decompressor + cbuf *bytes.Buffer + statusCode codes.Code + statusDesc string + trInfo *traceInfo + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.s.Context() +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + return ss.t.WriteHeader(ss.s, md) +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) + return +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + }() + out, err := encode(ss.codec, m, ss.cp, ss.cbuf) + defer func() { + if ss.cbuf != nil { + ss.cbuf.Reset() + } + }() + if err != nil { + err = transport.StreamErrorf(codes.Internal, "grpc: %v", err) + return err + } + return ss.t.Write(ss.s, out, &transport.Options{Last: false}) +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + }() + return recv(ss.p, ss.codec, ss.s, ss.dc, m) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/trace.go b/Godeps/_workspace/src/google.golang.org/grpc/trace.go new file mode 100644 index 000000000000..cde04fbfc9e6 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/trace.go @@ -0,0 +1,120 @@ +/* + * + * Copyright 2015, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing = true + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + if i := strings.LastIndex(m, "."); i >= 0 { + m = m[i+1:] // cut down to last dotted component + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +type firstLine struct { + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) String() string { + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return fmt.Sprintf("sent: %v", p.msg) + } else { + return fmt.Sprintf("recv: %v", p.msg) + } +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go new file mode 100644 index 000000000000..f6b38a5a6d27 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/control.go @@ -0,0 +1,260 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "fmt" + "sync" + + "golang.org/x/net/http2" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + initialConnWindowSize = defaultWindowSize * 16 // for a connection +) + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. +type windowUpdate struct { + streamID uint32 + increment uint32 +} + +func (windowUpdate) isItem() bool { + return true +} + +type settings struct { + ack bool + ss []http2.Setting +} + +func (settings) isItem() bool { + return true +} + +type resetStream struct { + streamID uint32 + code http2.ErrCode +} + +func (resetStream) isItem() bool { + return true +} + +type flushIO struct { +} + +func (flushIO) isItem() bool { + return true +} + +type ping struct { + ack bool + data [8]byte +} + +func (ping) isItem() bool { + return true +} + +// quotaPool is a pool which accumulates the quota and sends it to acquire() +// when it is available. +type quotaPool struct { + c chan int + + mu sync.Mutex + quota int +} + +// newQuotaPool creates a quotaPool which has quota q available to consume. +func newQuotaPool(q int) *quotaPool { + qb := "aPool{ + c: make(chan int, 1), + } + if q > 0 { + qb.c <- q + } else { + qb.quota = q + } + return qb +} + +// add adds n to the available quota and tries to send it on acquire. +func (qb *quotaPool) add(n int) { + qb.mu.Lock() + defer qb.mu.Unlock() + qb.quota += n + if qb.quota <= 0 { + return + } + select { + case qb.c <- qb.quota: + qb.quota = 0 + default: + } +} + +// cancel cancels the pending quota sent on acquire, if any. +func (qb *quotaPool) cancel() { + qb.mu.Lock() + defer qb.mu.Unlock() + select { + case n := <-qb.c: + qb.quota += n + default: + } +} + +// reset cancels the pending quota sent on acquired, incremented by v and sends +// it back on acquire. +func (qb *quotaPool) reset(v int) { + qb.mu.Lock() + defer qb.mu.Unlock() + select { + case n := <-qb.c: + qb.quota += n + default: + } + qb.quota += v + if qb.quota <= 0 { + return + } + select { + case qb.c <- qb.quota: + qb.quota = 0 + default: + } +} + +// acquire returns the channel on which available quota amounts are sent. +func (qb *quotaPool) acquire() <-chan int { + return qb.c +} + +// inFlow deals with inbound flow control +type inFlow struct { + // The inbound flow control limit for pending data. + limit uint32 + // conn points to the shared connection-level inFlow that is shared + // by all streams on that conn. It is nil for the inFlow on the conn + // directly. + conn *inFlow + + mu sync.Mutex + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 +} + +// onData is invoked when some data frame is received. It increments not only its +// own pendingData but also that of the associated connection-level flow. +func (f *inFlow) onData(n uint32) error { + if n == 0 { + return nil + } + f.mu.Lock() + defer f.mu.Unlock() + if f.pendingData+f.pendingUpdate+n > f.limit { + return fmt.Errorf("recieved %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate+n, f.limit) + } + if f.conn != nil { + if err := f.conn.onData(n); err != nil { + return ConnectionErrorf("%v", err) + } + } + f.pendingData += n + return nil +} + +// connOnRead updates the connection level states when the application consumes data. +func (f *inFlow) connOnRead(n uint32) uint32 { + if n == 0 || f.conn != nil { + return 0 + } + f.mu.Lock() + defer f.mu.Unlock() + f.pendingData -= n + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + ret := f.pendingUpdate + f.pendingUpdate = 0 + return ret + } + return 0 +} + +// onRead is invoked when the application reads the data. It returns the window updates +// for both stream and connection level. +func (f *inFlow) onRead(n uint32) (swu, cwu uint32) { + if n == 0 { + return + } + f.mu.Lock() + defer f.mu.Unlock() + if f.pendingData == 0 { + // pendingData has been adjusted by restoreConn. + return + } + f.pendingData -= n + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + swu = f.pendingUpdate + f.pendingUpdate = 0 + } + cwu = f.conn.connOnRead(n) + return +} + +// restoreConn is invoked when a stream is terminated. It removes its stake in +// the connection-level flow and resets its own state. +func (f *inFlow) restoreConn() uint32 { + if f.conn == nil { + return 0 + } + f.mu.Lock() + defer f.mu.Unlock() + n := f.pendingData + f.pendingData = 0 + f.pendingUpdate = 0 + return f.conn.connOnRead(n) +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go new file mode 100644 index 000000000000..7cf700fe3e47 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_client.go @@ -0,0 +1,875 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "bytes" + "errors" + "io" + "math" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + target string // server name/addr + userAgent string + conn net.Conn // underlying communication channel + authInfo credentials.AuthInfo // auth info about the connection + nextID uint32 // the next stream ID to be used + + // writableChan synchronizes write access to the transport. + // A writer acquires the write lock by sending a value on writableChan + // and releases it by receiving from writableChan. + writableChan chan int + // shutdownChan is closed when Close is called. + // Blocking operations should select on shutdownChan to avoid + // blocking forever after Close. + // TODO(zhaoq): Maybe have a channel context? + shutdownChan chan struct{} + // errorChan is closed to notify the I/O error to the caller. + errorChan chan struct{} + + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder + + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *recvBuffer + fc *inFlow + // sendQuotaPool provides flow control to outbound message. + sendQuotaPool *quotaPool + // streamsQuota limits the max number of concurrent streams. + streamsQuota *quotaPool + + // The scheme used: https if TLS is on, http otherwise. + scheme string + + authCreds []credentials.Credentials + + mu sync.Mutex // guard the following variables + state transportState // the state of underlying connection + activeStreams map[uint32]*Stream + // The max number of concurrent streams + maxStreams int + // the per-stream outbound flow control window size set by the peer. + streamSendQuota uint32 +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err error) { + if opts.Dialer == nil { + // Set the default Dialer. + opts.Dialer = func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", addr, timeout) + } + } + scheme := "http" + startT := time.Now() + timeout := opts.Timeout + conn, connErr := opts.Dialer(addr, timeout) + if connErr != nil { + return nil, ConnectionErrorf("transport: %v", connErr) + } + var authInfo credentials.AuthInfo + for _, c := range opts.AuthOptions { + if ccreds, ok := c.(credentials.TransportAuthenticator); ok { + scheme = "https" + // TODO(zhaoq): Now the first TransportAuthenticator is used if there are + // multiple ones provided. Revisit this if it is not appropriate. Probably + // place the ClientTransport construction into a separate function to make + // things clear. + if timeout > 0 { + timeout -= time.Since(startT) + } + conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout) + break + } + } + if connErr != nil { + return nil, ConnectionErrorf("transport: %v", connErr) + } + defer func() { + if err != nil { + conn.Close() + } + }() + // Send connection preface to server. + n, err := conn.Write(clientPreface) + if err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + if n != len(clientPreface) { + return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + framer := newFramer(conn) + if initialWindowSize != defaultWindowSize { + err = framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) + } else { + err = framer.writeSettings(true) + } + if err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if err := framer.writeWindowUpdate(true, 0, delta); err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + } + ua := primaryUA + if opts.UserAgent != "" { + ua = opts.UserAgent + " " + ua + } + var buf bytes.Buffer + t := &http2Client{ + target: addr, + userAgent: ua, + conn: conn, + authInfo: authInfo, + // The client initiated stream id is odd starting from 1. + nextID: 1, + writableChan: make(chan int, 1), + shutdownChan: make(chan struct{}), + errorChan: make(chan struct{}), + framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + controlBuf: newRecvBuffer(), + fc: &inFlow{limit: initialConnWindowSize}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + scheme: scheme, + state: reachable, + activeStreams: make(map[uint32]*Stream), + authCreds: opts.AuthOptions, + maxStreams: math.MaxInt32, + streamSendQuota: defaultWindowSize, + } + go t.controller() + t.writableChan <- 0 + // Start the reader goroutine for incoming message. The threading model + // on receiving is that each transport has a dedicated goroutine which + // reads HTTP2 frame from network. Then it dispatches the frame to the + // corresponding stream entity. + go t.reader() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + fc := &inFlow{ + limit: initialWindowSize, + conn: t.fc, + } + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + id: t.nextID, + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + fc: fc, + sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), + headerChan: make(chan struct{}), + } + t.nextID += 2 + s.windowHandler = func(n int) { + t.updateWindow(s, uint32(n)) + } + // Make a stream be able to cancel the pending operations by itself. + s.ctx, s.cancel = context.WithCancel(ctx) + s.dec = &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, + } + return s +} + +// NewStream creates a stream and register it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + // Record the timeout value on the context. + var timeout time.Duration + if dl, ok := ctx.Deadline(); ok { + timeout = dl.Sub(time.Now()) + if timeout <= 0 { + return nil, ContextErr(context.DeadlineExceeded) + } + } + pr := &peer.Peer{ + Addr: t.conn.RemoteAddr(), + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + ctx = peer.NewContext(ctx, pr) + authData := make(map[string]string) + for _, c := range t.authCreds { + // Construct URI required to get auth request metadata. + var port string + if pos := strings.LastIndex(t.target, ":"); pos != -1 { + // Omit port if it is the default one. + if t.target[pos+1:] != "443" { + port = ":" + t.target[pos+1:] + } + } + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) + } + audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err) + } + for k, v := range data { + authData[k] = v + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + checkStreamsQuota := t.streamsQuota != nil + t.mu.Unlock() + if checkStreamsQuota { + sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire()) + if err != nil { + return nil, err + } + // Returns the quota balance back. + if sq > 1 { + t.streamsQuota.add(sq - 1) + } + } + if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil { + // t.streamsQuota will be updated when t.CloseStream is invoked. + return nil, err + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + s := t.newStream(ctx, callHdr) + t.activeStreams[s.id] = s + + // This stream is not counted when applySetings(...) initialize t.streamsQuota. + // Reset t.streamsQuota to the right value. + var reset bool + if !checkStreamsQuota && t.streamsQuota != nil { + reset = true + } + t.mu.Unlock() + if reset { + t.streamsQuota.reset(-1) + } + + // HPACK encodes various headers. Note that once WriteField(...) is + // called, the corresponding headers/continuation frame has to be sent + // because hpack.Encoder is stateful. + t.hBuf.Reset() + t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) + t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) + + if callHdr.SendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + } + if timeout > 0 { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) + } + for k, v := range authData { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + } + var ( + hasMD bool + endHeaders bool + ) + if md, ok := metadata.FromContext(ctx); ok { + hasMD = true + for k, v := range md { + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } + } + } + first := true + // Sends the headers in a single batch even when they span multiple frames. + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + var flush bool + if endHeaders && (hasMD || callHdr.Flush) { + flush = true + } + if first { + // Sends a HeadersFrame to server to start a new stream. + p := http2.HeadersFrameParam{ + StreamID: s.id, + BlockFragment: t.hBuf.Next(size), + EndStream: false, + EndHeaders: endHeaders, + } + // Do a force flush for the buffered frames iff it is the last headers frame + // and there is header metadata to be sent. Otherwise, there is flushing until + // the corresponding data frame is written. + err = t.framer.writeHeaders(flush, p) + first = false + } else { + // Sends Continuation frames for the leftover headers. + err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size)) + } + if err != nil { + t.notifyError(err) + return nil, ConnectionErrorf("transport: %v", err) + } + } + t.writableChan <- 0 + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var updateStreams bool + t.mu.Lock() + if t.streamsQuota != nil { + updateStreams = true + } + delete(t.activeStreams, s.id) + t.mu.Unlock() + if updateStreams { + t.streamsQuota.add(1) + } + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), the caller needs + // to call cancel on the stream to interrupt the blocking on + // other goroutines. + s.cancel() + s.mu.Lock() + if q := s.fc.restoreConn(); q > 0 { + t.controlBuf.put(&windowUpdate{0, q}) + } + if s.state == streamDone { + s.mu.Unlock() + return + } + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.state = streamDone + s.mu.Unlock() + if _, ok := err.(StreamError); ok { + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel}) + } +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +func (t *http2Client) Close() (err error) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + t.mu.Unlock() + close(t.shutdownChan) + err = t.conn.Close() + t.mu.Lock() + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + // Notify all active streams. + for _, s := range streams { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: ErrConnClosing}) + } + return +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later +// if it improves the performance. +func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { + r := bytes.NewBuffer(data) + for { + var p []byte + if r.Len() > 0 { + size := http2MaxFrameLen + s.sendQuotaPool.add(0) + // Wait until the stream has some quota to send the data. + sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) + if err != nil { + return err + } + t.sendQuotaPool.add(0) + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) + if err != nil { + if _, ok := err.(StreamError); ok { + t.sendQuotaPool.cancel() + } + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + p = r.Next(size) + ps := len(p) + if ps < sq { + // Overbooked stream quota. Return it back. + s.sendQuotaPool.add(sq - ps) + } + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + } + var ( + endStream bool + forceFlush bool + ) + if opts.Last && r.Len() == 0 { + endStream = true + } + // Indicate there is a writer who is about to write a data frame. + t.framer.adjustNumWriters(1) + // Got some quota. Try to acquire writing privilege on the transport. + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if t.framer.adjustNumWriters(-1) == 0 { + // This writer is the last one in this batch and has the + // responsibility to flush the buffered frames. It queues + // a flush request to controlBuf instead of flushing directly + // in order to avoid the race with other writing or flushing. + t.controlBuf.put(&flushIO{}) + } + return err + } + if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { + // Do a force flush iff this is last frame for the entire gRPC message + // and the caller is the only writer at this moment. + forceFlush = true + } + // If WriteData fails, all the pending streams will be handled + // by http2Client.Close(). No explicit CloseStream() needs to be + // invoked. + if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { + t.notifyError(err) + return ConnectionErrorf("transport: %v", err) + } + if t.framer.adjustNumWriters(-1) == 0 { + t.framer.flushWrite() + } + t.writableChan <- 0 + if r.Len() == 0 { + break + } + } + if !opts.Last { + return nil + } + s.mu.Lock() + if s.state != streamDone { + if s.state == streamReadDone { + s.state = streamDone + } else { + s.state = streamWriteDone + } + } + s.mu.Unlock() + return nil +} + +func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + if s, ok := t.activeStreams[f.Header().StreamID]; ok { + return s, true + } + return nil, false +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + swu, cwu := s.fc.onRead(n) + if swu > 0 { + t.controlBuf.put(&windowUpdate{s.id, swu}) + } + if cwu > 0 { + t.controlBuf.put(&windowUpdate{0, cwu}) + } +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + size := len(f.Data()) + if size > 0 { + if err := s.fc.onData(uint32(size)); err != nil { + if _, ok := err.(ConnectionError); ok { + t.notifyError(err) + return + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + s.statusCode = codes.Internal + s.statusDesc = err.Error() + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) + return + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + data := make([]byte, size) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + s.mu.Lock() + if s.state == streamWriteDone { + s.state = streamDone + } else { + s.state = streamReadDone + } + s.statusCode = codes.Internal + s.statusDesc = "server closed the stream without sending trailers" + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)] + if !ok { + grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) + } + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + f.ForeachSetting(func(s http2.Setting) error { + ss = append(ss, s) + return nil + }) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + // TODO(zhaoq): GoAwayFrame handler to be implemented +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + id := f.Header().StreamID + incr := f.Increment + if id == 0 { + t.sendQuotaPool.add(int(incr)) + return + } + if s, ok := t.getStream(f); ok { + s.sendQuotaPool.add(int(incr)) + } +} + +// operateHeader takes action on the decoded headers. It returns the current +// stream if there are remaining headers on the wire (in the following +// Continuation frame). +func (t *http2Client) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool) (pendingStream *Stream) { + defer func() { + if pendingStream == nil { + hDec.state = decodeState{} + } + }() + endHeaders, err := hDec.decodeClientHTTP2Headers(frame) + if s == nil { + // s has been closed. + return nil + } + if err != nil { + s.write(recvMsg{err: err}) + // Something wrong. Stops reading even when there is remaining. + return nil + } + if !endHeaders { + return s + } + s.mu.Lock() + if !endStream { + s.recvCompress = hDec.state.encoding + } + if !s.headerDone { + if !endStream && len(hDec.state.mdata) > 0 { + s.header = hDec.state.mdata + } + close(s.headerChan) + s.headerDone = true + } + if !endStream || s.state == streamDone { + s.mu.Unlock() + return nil + } + + if len(hDec.state.mdata) > 0 { + s.trailer = hDec.state.mdata + } + s.state = streamDone + s.statusCode = hDec.state.statusCode + s.statusDesc = hDec.state.statusDesc + s.mu.Unlock() + + s.write(recvMsg{err: io.EOF}) + return nil +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + // Check the validity of server preface. + frame, err := t.framer.readFrame() + if err != nil { + t.notifyError(err) + return + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + t.notifyError(err) + return + } + t.handleSettings(sf) + + hDec := newHPACKDecoder() + var curStream *Stream + // loop to keep reading incoming messages on this transport. + for { + frame, err := t.framer.readFrame() + if err != nil { + t.notifyError(err) + return + } + switch frame := frame.(type) { + case *http2.HeadersFrame: + // operateHeaders has to be invoked regardless the value of curStream + // because the HPACK decoder needs to be updated using the received + // headers. + curStream, _ = t.getStream(frame) + endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) + curStream = t.operateHeaders(hDec, curStream, frame, endStream) + case *http2.ContinuationFrame: + curStream = t.operateHeaders(hDec, curStream, frame, frame.HeadersEnded()) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } +} + +func (t *http2Client) applySettings(ss []http2.Setting) { + for _, s := range ss { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + // TODO(zhaoq): This is a hack to avoid significant refactoring of the + // code to deal with the unrealistic int32 overflow. Probably will try + // to find a better way to handle this later. + if s.Val > math.MaxInt32 { + s.Val = math.MaxInt32 + } + t.mu.Lock() + reset := t.streamsQuota != nil + if !reset { + t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams)) + } + ms := t.maxStreams + t.maxStreams = int(s.Val) + t.mu.Unlock() + if reset { + t.streamsQuota.reset(int(s.Val) - ms) + } + case http2.SettingInitialWindowSize: + t.mu.Lock() + for _, stream := range t.activeStreams { + // Adjust the sending quota for each stream. + stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) + } + t.streamSendQuota = s.Val + t.mu.Unlock() + } + } +} + +// controller running in a separate goroutine takes charge of sending control +// frames (e.g., window update, reset stream, setting, etc.) to the server. +func (t *http2Client) controller() { + for { + select { + case i := <-t.controlBuf.get(): + t.controlBuf.load() + select { + case <-t.writableChan: + switch i := i.(type) { + case *windowUpdate: + t.framer.writeWindowUpdate(true, i.streamID, i.increment) + case *settings: + if i.ack { + t.framer.writeSettingsAck(true) + t.applySettings(i.ss) + } else { + t.framer.writeSettings(true, i.ss...) + } + case *resetStream: + t.framer.writeRSTStream(true, i.streamID, i.code) + case *flushIO: + t.framer.flushWrite() + case *ping: + t.framer.writePing(true, i.ack, i.data) + default: + grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) + } + t.writableChan <- 0 + continue + case <-t.shutdownChan: + return + } + case <-t.shutdownChan: + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.errorChan +} + +func (t *http2Client) notifyError(err error) { + t.mu.Lock() + defer t.mu.Unlock() + // make sure t.errorChan is closed only once. + if t.state == reachable { + t.state = unreachable + close(t.errorChan) + grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) + } +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go new file mode 100644 index 000000000000..cce2e12d914c --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/http2_server.go @@ -0,0 +1,707 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "bytes" + "errors" + "io" + "math" + "net" + "strconv" + "sync" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +// ErrIllegalHeaderWrite indicates that setting header is illegal because of +// the stream's state. +var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + conn net.Conn + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + // writableChan synchronizes write access to the transport. + // A writer acquires the write lock by sending a value on writableChan + // and releases it by receiving from writableChan. + writableChan chan int + // shutdownChan is closed when Close is called. + // Blocking operations should select on shutdownChan to avoid + // blocking forever after Close. + shutdownChan chan struct{} + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder + + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *recvBuffer + fc *inFlow + // sendQuotaPool provides flow control to outbound message. + sendQuotaPool *quotaPool + + mu sync.Mutex // guard the following + state transportState + activeStreams map[uint32]*Stream + // the per-stream outbound flow control window size set by the peer. + streamSendQuota uint32 +} + +// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is +// returned if something goes wrong. +func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) { + framer := newFramer(conn) + // Send initial settings as connection preface to client. + var settings []http2.Setting + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + settings = append(settings, http2.Setting{http2.SettingMaxConcurrentStreams, maxStreams}) + } + if initialWindowSize != defaultWindowSize { + settings = append(settings, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) + } + if err := framer.writeSettings(true, settings...); err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if err := framer.writeWindowUpdate(true, 0, delta); err != nil { + return nil, ConnectionErrorf("transport: %v", err) + } + } + var buf bytes.Buffer + t := &http2Server{ + conn: conn, + authInfo: authInfo, + framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + maxStreams: maxStreams, + controlBuf: newRecvBuffer(), + fc: &inFlow{limit: initialConnWindowSize}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + state: reachable, + writableChan: make(chan int, 1), + shutdownChan: make(chan struct{}), + activeStreams: make(map[uint32]*Stream), + streamSendQuota: defaultWindowSize, + } + go t.controller() + t.writableChan <- 0 + return t, nil +} + +// operateHeader takes action on the decoded headers. It returns the current +// stream if there are remaining headers on the wire (in the following +// Continuation frame). +func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream)) (pendingStream *Stream) { + defer func() { + if pendingStream == nil { + hDec.state = decodeState{} + } + }() + endHeaders, err := hDec.decodeServerHTTP2Headers(frame) + if s == nil { + // s has been closed. + return nil + } + if err != nil { + grpclog.Printf("transport: http2Server.operateHeader found %v", err) + if se, ok := err.(StreamError); ok { + t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) + } + return nil + } + if endStream { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if !endHeaders { + return s + } + s.recvCompress = hDec.state.encoding + if hDec.state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(context.TODO()) + } + pr := &peer.Peer{ + Addr: t.conn.RemoteAddr(), + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Cache the current stream to the context so that the server application + // can find out. Required when the server wants to send some metadata + // back to the client (unary call only). + s.ctx = newContextWithStream(s.ctx, s) + // Attach the received metadata to the context. + if len(hDec.state.mdata) > 0 { + s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata) + } + + s.dec = &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, + } + s.recvCompress = hDec.state.encoding + s.method = hDec.state.method + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return nil + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) + return nil + } + s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) + t.activeStreams[s.id] = s + t.mu.Unlock() + s.windowHandler = func(n int) { + t.updateWindow(s, uint32(n)) + } + handle(s) + return nil +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +func (t *http2Server) HandleStreams(handle func(*Stream)) { + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + t.Close() + return + } + if !bytes.Equal(preface, clientPreface) { + grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + t.Close() + return + } + + frame, err := t.framer.readFrame() + if err != nil { + grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) + t.Close() + return + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + t.Close() + return + } + t.handleSettings(sf) + + hDec := newHPACKDecoder() + var curStream *Stream + for { + frame, err := t.framer.readFrame() + if err != nil { + t.Close() + return + } + switch frame := frame.(type) { + case *http2.HeadersFrame: + id := frame.Header().StreamID + if id%2 != 1 || id <= t.maxStreamID { + // illegal gRPC stream id. + grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id) + t.Close() + break + } + t.maxStreamID = id + buf := newRecvBuffer() + fc := &inFlow{ + limit: initialWindowSize, + conn: t.fc, + } + curStream = &Stream{ + id: frame.Header().StreamID, + st: t, + buf: buf, + fc: fc, + } + endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) + curStream = t.operateHeaders(hDec, curStream, frame, endStream, handle) + case *http2.ContinuationFrame: + curStream = t.operateHeaders(hDec, curStream, frame, frame.HeadersEnded(), handle) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + break + default: + grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + swu, cwu := s.fc.onRead(n) + if swu > 0 { + t.controlBuf.put(&windowUpdate{s.id, swu}) + } + if cwu > 0 { + t.controlBuf.put(&windowUpdate{0, cwu}) + } +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + size := len(f.Data()) + if size > 0 { + if err := s.fc.onData(uint32(size)); err != nil { + if _, ok := err.(ConnectionError); ok { + grpclog.Printf("transport: http2Server %v", err) + t.Close() + return + } + t.closeStream(s) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) + return + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + data := make([]byte, size) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.mu.Lock() + if s.state != streamDone { + if s.state == streamWriteDone { + s.state = streamDone + } else { + s.state = streamReadDone + } + } + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + t.closeStream(s) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + f.ForeachSetting(func(s http2.Setting) error { + ss = append(ss, s) + return nil + }) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) +} + +func (t *http2Server) handlePing(f *http2.PingFrame) { + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + id := f.Header().StreamID + incr := f.Increment + if id == 0 { + t.sendQuotaPool.add(int(incr)) + return + } + if s, ok := t.getStream(f); ok { + s.sendQuotaPool.add(int(incr)) + } +} + +func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error { + first := true + endHeaders := false + var err error + // Sends the headers in a single batch. + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + p := http2.HeadersFrameParam{ + StreamID: s.id, + BlockFragment: b.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + } + err = t.framer.writeHeaders(endHeaders, p) + first = false + } else { + err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size)) + } + if err != nil { + t.Close() + return ConnectionErrorf("transport: %v", err) + } + } + return nil +} + +// WriteHeader sends the header metedata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + s.mu.Lock() + if s.headerOk || s.state == streamDone { + s.mu.Unlock() + return ErrIllegalHeaderWrite + } + s.headerOk = true + s.mu.Unlock() + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + return err + } + t.hBuf.Reset() + t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + for k, v := range md { + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } + } + if err := t.writeHeaders(s, t.hBuf, false); err != nil { + return err + } + t.writableChan <- 0 + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { + var headersSent bool + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return nil + } + if s.headerOk { + headersSent = true + } + s.mu.Unlock() + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + return err + } + t.hBuf.Reset() + if !headersSent { + t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + } + t.hEnc.WriteField( + hpack.HeaderField{ + Name: "grpc-status", + Value: strconv.Itoa(int(statusCode)), + }) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc}) + // Attach the trailer metadata. + for k, v := range s.trailer { + for _, entry := range v { + t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) + } + } + if err := t.writeHeaders(s, t.hBuf, true); err != nil { + t.Close() + return err + } + t.closeStream(s) + t.writableChan <- 0 + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { + // TODO(zhaoq): Support multi-writers for a single stream. + var writeHeaderFrame bool + s.mu.Lock() + if !s.headerOk { + writeHeaderFrame = true + s.headerOk = true + } + s.mu.Unlock() + if writeHeaderFrame { + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + return err + } + t.hBuf.Reset() + t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + p := http2.HeadersFrameParam{ + StreamID: s.id, + BlockFragment: t.hBuf.Bytes(), + EndHeaders: true, + } + if err := t.framer.writeHeaders(false, p); err != nil { + t.Close() + return ConnectionErrorf("transport: %v", err) + } + t.writableChan <- 0 + } + r := bytes.NewBuffer(data) + for { + if r.Len() == 0 { + return nil + } + size := http2MaxFrameLen + s.sendQuotaPool.add(0) + // Wait until the stream has some quota to send the data. + sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) + if err != nil { + return err + } + t.sendQuotaPool.add(0) + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) + if err != nil { + if _, ok := err.(StreamError); ok { + t.sendQuotaPool.cancel() + } + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + p := r.Next(size) + ps := len(p) + if ps < sq { + // Overbooked stream quota. Return it back. + s.sendQuotaPool.add(sq - ps) + } + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + t.framer.adjustNumWriters(1) + // Got some quota. Try to acquire writing privilege on the + // transport. + if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if t.framer.adjustNumWriters(-1) == 0 { + // This writer is the last one in this batch and has the + // responsibility to flush the buffered frames. It queues + // a flush request to controlBuf instead of flushing directly + // in order to avoid the race with other writing or flushing. + t.controlBuf.put(&flushIO{}) + } + return err + } + var forceFlush bool + if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { + forceFlush = true + } + if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { + t.Close() + return ConnectionErrorf("transport: %v", err) + } + if t.framer.adjustNumWriters(-1) == 0 { + t.framer.flushWrite() + } + t.writableChan <- 0 + } + +} + +func (t *http2Server) applySettings(ss []http2.Setting) { + for _, s := range ss { + if s.ID == http2.SettingInitialWindowSize { + t.mu.Lock() + defer t.mu.Unlock() + for _, stream := range t.activeStreams { + stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) + } + t.streamSendQuota = s.Val + } + + } +} + +// controller running in a separate goroutine takes charge of sending control +// frames (e.g., window update, reset stream, setting, etc.) to the server. +func (t *http2Server) controller() { + for { + select { + case i := <-t.controlBuf.get(): + t.controlBuf.load() + select { + case <-t.writableChan: + switch i := i.(type) { + case *windowUpdate: + t.framer.writeWindowUpdate(true, i.streamID, i.increment) + case *settings: + if i.ack { + t.framer.writeSettingsAck(true) + t.applySettings(i.ss) + } else { + t.framer.writeSettings(true, i.ss...) + } + case *resetStream: + t.framer.writeRSTStream(true, i.streamID, i.code) + case *flushIO: + t.framer.flushWrite() + case *ping: + t.framer.writePing(true, i.ack, i.data) + default: + grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) + } + t.writableChan <- 0 + continue + case <-t.shutdownChan: + return + } + case <-t.shutdownChan: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() (err error) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + close(t.shutdownChan) + err = t.conn.Close() + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + return +} + +// closeStream clears the footprint of a stream when the stream is not needed +// any more. +func (t *http2Server) closeStream(s *Stream) { + t.mu.Lock() + delete(t.activeStreams, s.id) + t.mu.Unlock() + if q := s.fc.restoreConn(); q > 0 { + t.controlBuf.put(&windowUpdate{0, q}) + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + s.mu.Unlock() + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.conn.RemoteAddr() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go new file mode 100644 index 000000000000..f9d9fdf0afdc --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/http_util.go @@ -0,0 +1,454 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "bufio" + "fmt" + "io" + "net" + "strconv" + "strings" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" +) + +const ( + // The primary user agent + primaryUA = "grpc-go/0.11" + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // http2IOBufSize specifies the buffer size for sending frames. + http2IOBufSize = 32 * 1024 +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2RSTErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } +) + +// Records the states during HPACK decoding. Must be reset once the +// decoding of the entire headers are finished. +type decodeState struct { + encoding string + // statusCode caches the stream status received from the trailer + // the server sent. Client side only. + statusCode codes.Code + statusDesc string + // Server side only fields. + timeoutSet bool + timeout time.Duration + method string + // key-value metadata map from the peer. + mdata map[string][]string +} + +// An hpackDecoder decodes HTTP2 headers which may span multiple frames. +type hpackDecoder struct { + h *hpack.Decoder + state decodeState + err error // The err when decoding +} + +// A headerFrame is either a http2.HeaderFrame or http2.ContinuationFrame. +type headerFrame interface { + Header() http2.FrameHeader + HeaderBlockFragment() []byte + HeadersEnded() bool +} + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "te": + return true + default: + return false + } +} + +func newHPACKDecoder() *hpackDecoder { + d := &hpackDecoder{} + d.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) { + switch f.Name { + case "content-type": + if !strings.Contains(f.Value, "application/grpc") { + d.err = StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected header") + return + } + case "grpc-encoding": + d.state.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.err = StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) + return + } + d.state.statusCode = codes.Code(code) + case "grpc-message": + d.state.statusDesc = f.Value + case "grpc-timeout": + d.state.timeoutSet = true + var err error + d.state.timeout, err = timeoutDecode(f.Value) + if err != nil { + d.err = StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err) + return + } + case ":path": + d.state.method = f.Value + default: + if !isReservedHeader(f.Name) { + if f.Name == "user-agent" { + i := strings.LastIndex(f.Value, " ") + if i == -1 { + // There is no application user agent string being set. + return + } + // Extract the application user agent string. + f.Value = f.Value[:i] + } + if d.state.mdata == nil { + d.state.mdata = make(map[string][]string) + } + k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) + if err != nil { + grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) + return + } + d.state.mdata[k] = append(d.state.mdata[k], v) + } + } + }) + return d +} + +func (d *hpackDecoder) decodeClientHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { + d.err = nil + _, err = d.h.Write(frame.HeaderBlockFragment()) + if err != nil { + err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) + } + + if frame.HeadersEnded() { + if closeErr := d.h.Close(); closeErr != nil && err == nil { + err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) + } + endHeaders = true + } + + if err == nil && d.err != nil { + err = d.err + } + return +} + +func (d *hpackDecoder) decodeServerHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { + d.err = nil + _, err = d.h.Write(frame.HeaderBlockFragment()) + if err != nil { + err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) + } + + if frame.HeadersEnded() { + if closeErr := d.h.Close(); closeErr != nil && err == nil { + err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) + } + endHeaders = true + } + + if err == nil && d.err != nil { + err = d.err + } + return +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if m := d % r; m > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. +func timeoutEncode(t time.Duration) string { + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +type framer struct { + numWriters int32 + reader io.Reader + writer *bufio.Writer + fr *http2.Framer +} + +func newFramer(conn net.Conn) *framer { + f := &framer{ + reader: conn, + writer: bufio.NewWriterSize(conn, http2IOBufSize), + } + f.fr = http2.NewFramer(f.writer, f.reader) + return f +} + +func (f *framer) adjustNumWriters(i int32) int32 { + return atomic.AddInt32(&f.numWriters, i) +} + +// The following writeXXX functions can only be called when the caller gets +// unblocked from writableChan channel (i.e., owns the privilege to write). + +func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error { + if err := f.fr.WriteData(streamID, endStream, data); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error { + if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error { + if err := f.fr.WriteHeaders(p); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error { + if err := f.fr.WritePing(ack, data); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error { + if err := f.fr.WritePriority(streamID, p); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error { + if err := f.fr.WritePushPromise(p); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error { + if err := f.fr.WriteRSTStream(streamID, code); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error { + if err := f.fr.WriteSettings(settings...); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeSettingsAck(forceFlush bool) error { + if err := f.fr.WriteSettingsAck(); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error { + if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil { + return err + } + if forceFlush { + return f.writer.Flush() + } + return nil +} + +func (f *framer) flushWrite() error { + return f.writer.Flush() +} + +func (f *framer) readFrame() (http2.Frame, error) { + return f.fr.ReadFrame() +} diff --git a/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go b/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go new file mode 100644 index 000000000000..d99233d2ea24 --- /dev/null +++ b/Godeps/_workspace/src/google.golang.org/grpc/transport/transport.go @@ -0,0 +1,488 @@ +/* + * + * Copyright 2014, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package transport defines and implements message oriented communication channel +to complete various transactions (e.g., an RPC). +*/ +package transport + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" +) + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + data []byte + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +func (recvMsg) isItem() bool { + return true +} + +// All items in an out of a recvBuffer should be the same type. +type item interface { + isItem() bool +} + +// recvBuffer is an unbounded channel of item. +type recvBuffer struct { + c chan item + mu sync.Mutex + backlog []item +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan item, 1), + } + return b +} + +func (b *recvBuffer) put(r item) { + b.mu.Lock() + defer b.mu.Unlock() + b.backlog = append(b.backlog, r) + select { + case b.c <- b.backlog[0]: + b.backlog = b.backlog[1:] + default: + } +} + +func (b *recvBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog = b.backlog[1:] + default: + } + } +} + +// get returns the channel that receives an item in the buffer. +// +// Upon receipt of an item, the caller should call load to send another +// item onto the channel if there is any. +func (b *recvBuffer) get() <-chan item { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + ctx context.Context + recv *recvBuffer + last *bytes.Reader // Stores the remaining data in the previous calls. + err error +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + defer func() { r.err = err }() + if r.last != nil && r.last.Len() > 0 { + // Read remaining data left in last call. + return r.last.Read(p) + } + select { + case <-r.ctx.Done(): + return 0, ContextErr(r.ctx.Err()) + case i := <-r.recv.get(): + r.recv.load() + m := i.(*recvMsg) + if m.err != nil { + return 0, m.err + } + r.last = bytes.NewReader(m.data) + return r.last.Read(p) + } +} + +type streamState uint8 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // sendDone and recvDone or RSTStreamFrame is sent or received. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + // nil for client side Stream. + st ServerTransport + // ctx is the associated context of the stream. + ctx context.Context + cancel context.CancelFunc + // method records the associated RPC method of the stream. + method string + recvCompress string + sendCompress string + buf *recvBuffer + dec io.Reader + fc *inFlow + recvQuota uint32 + // The accumulated inbound quota pending for window update. + updateQuota uint32 + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + + sendQuotaPool *quotaPool + // Close headerChan to indicate the end of reception of header metadata. + headerChan chan struct{} + // header caches the received header metadata. + header metadata.MD + // The key-value map of trailer metadata. + trailer metadata.MD + + mu sync.RWMutex // guard the following + // headerOK becomes true from the first header is about to send. + headerOk bool + state streamState + // true iff headerChan is closed. Used to avoid closing headerChan + // multiple times. + headerDone bool + // the status received from the server. + statusCode codes.Code + statusDesc string +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Header acquires the key-value pairs of header metadata once it +// is available. It blocks until i) the metadata is ready or ii) there is no +// header metadata or iii) the stream is cancelled/expired. +func (s *Stream) Header() (metadata.MD, error) { + select { + case <-s.ctx.Done(): + return nil, ContextErr(s.ctx.Err()) + case <-s.headerChan: + return s.header.Copy(), nil + } +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +func (s *Stream) Trailer() metadata.MD { + s.mu.RLock() + defer s.mu.RUnlock() + return s.trailer.Copy() +} + +// ServerTransport returns the underlying ServerTransport for the stream. +// The client side stream always returns nil. +func (s *Stream) ServerTransport() ServerTransport { + return s.st +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// TraceContext recreates the context of s with a trace.Trace. +func (s *Stream) TraceContext(tr trace.Trace) { + s.ctx = trace.NewContext(s.ctx, tr) +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// StatusCode returns statusCode received from the server. +func (s *Stream) StatusCode() codes.Code { + return s.statusCode +} + +// StatusDesc returns statusDesc received from the server. +func (s *Stream) StatusDesc() string { + return s.statusDesc +} + +// ErrIllegalTrailerSet indicates that the trailer has already been set or it +// is too late to do so. +var ErrIllegalTrailerSet = errors.New("transport: trailer has been set") + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can only be called at most once. Server side only. +func (s *Stream) SetTrailer(md metadata.MD) error { + s.mu.Lock() + defer s.mu.Unlock() + if s.trailer != nil { + return ErrIllegalTrailerSet + } + s.trailer = md.Copy() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(&m) +} + +// Read reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +func (s *Stream) Read(p []byte) (n int, err error) { + n, err = s.dec.Read(p) + if err != nil { + return + } + s.windowHandler(n) + return +} + +type key int + +// The key to save transport.Stream in the context. +const streamKey = key(0) + +// newContextWithStream creates a new context from ctx and attaches stream +// to it. +func newContextWithStream(ctx context.Context, stream *Stream) context.Context { + return context.WithValue(ctx, streamKey, stream) +} + +// StreamFromContext returns the stream saved in ctx. +func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { + s, ok = ctx.Value(streamKey).(*Stream) + return +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + unreachable + closing +) + +// NewServerTransport creates a ServerTransport with conn or non-nil error +// if it fails. +func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) { + return newHTTP2Server(conn, maxStreams, authInfo) +} + +// ConnectOptions covers all relevant options for dialing a server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(string, time.Duration) (net.Conn, error) + // AuthOptions stores the credentials required to setup a client connection and/or issue RPCs. + AuthOptions []credentials.Credentials + // Timeout specifies the timeout for dialing a client connection. + Timeout time.Duration +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, error) { + return newHTTP2Client(target, opts) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Indicate whether it is the last piece for this stream. + Last bool + // The hint to transport impl whether the data could be buffered for + // batching write. Transport impl can feel free to ignore it. + Delay bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies peer host. + Host string + // Method specifies the operation to perform. + Method string + // RecvCompress specifies the compression algorithm applied on inbound messages. + RecvCompress string + // SendCompress specifies the compression algorithm applied on outbound message. + SendCompress string + // Flush indicates if new stream command should be sent to the peer without + // waiting for the first data. This is a hint though. The transport may modify + // the flush decision for performance purpose. + Flush bool +} + +// ClientTransport is the common interface for all gRPC client side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close() error + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} +} + +// ServerTransport is the common interface for all gRPC server side transport +// implementations. +type ServerTransport interface { + // WriteStatus sends the status of a stream to the client. + WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error + // Write sends the data for the given stream. + Write(s *Stream, data []byte, opts *Options) error + // WriteHeader sends the header metedata for the given stream. + WriteHeader(s *Stream, md metadata.MD) error + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream)) + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() error + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr +} + +// StreamErrorf creates an StreamError with the specified error code and description. +func StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError { + return StreamError{ + Code: c, + Desc: fmt.Sprintf(format, a...), + } +} + +// ConnectionErrorf creates an ConnectionError with the specified error description. +func ConnectionErrorf(format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Define some common ConnectionErrors. +var ErrConnClosing = ConnectionError{Desc: "transport is closing"} + +// StreamError is an error that only affects one stream within a connection. +type StreamError struct { + Code codes.Code + Desc string +} + +func (e StreamError) Error() string { + return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc) +} + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded: + return StreamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled: + return StreamErrorf(codes.Canceled, "%v", err) + } + panic(fmt.Sprintf("Unexpected error from context packet: %v", err)) +} + +// wait blocks until it can receive from ctx.Done, closing, or proceed. +// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. +// If it receives from closing, it returns 0, ErrConnClosing. +// If it receives from proceed, it returns the received integer, nil. +func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) { + select { + case <-ctx.Done(): + return 0, ContextErr(ctx.Err()) + case <-closing: + return 0, ErrConnClosing + case i := <-proceed: + return i, nil + } +} diff --git a/Godeps/_workspace/src/gopkg.in/inf.v0/LICENSE b/Godeps/_workspace/src/gopkg.in/inf.v0/LICENSE new file mode 100644 index 000000000000..87a5cede3392 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/inf.v0/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/gopkg.in/inf.v0/dec.go b/Godeps/_workspace/src/gopkg.in/inf.v0/dec.go new file mode 100644 index 000000000000..d17ad945d42d --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/inf.v0/dec.go @@ -0,0 +1,615 @@ +// Package inf (type inf.Dec) implements "infinite-precision" decimal +// arithmetic. +// "Infinite precision" describes two characteristics: practically unlimited +// precision for decimal number representation and no support for calculating +// with any specific fixed precision. +// (Although there is no practical limit on precision, inf.Dec can only +// represent finite decimals.) +// +// This package is currently in experimental stage and the API may change. +// +// This package does NOT support: +// - rounding to specific precisions (as opposed to specific decimal positions) +// - the notion of context (each rounding must be explicit) +// - NaN and Inf values, and distinguishing between positive and negative zero +// - conversions to and from float32/64 types +// +// Features considered for possible addition: +// + formatting options +// + Exp method +// + combined operations such as AddRound/MulAdd etc +// + exchanging data in decimal32/64/128 formats +// +package inf + +// TODO: +// - avoid excessive deep copying (quo and rounders) + +import ( + "fmt" + "io" + "math/big" + "strings" +) + +// A Dec represents a signed arbitrary-precision decimal. +// It is a combination of a sign, an arbitrary-precision integer coefficient +// value, and a signed fixed-precision exponent value. +// The sign and the coefficient value are handled together as a signed value +// and referred to as the unscaled value. +// (Positive and negative zero values are not distinguished.) +// Since the exponent is most commonly non-positive, it is handled in negated +// form and referred to as scale. +// +// The mathematical value of a Dec equals: +// +// unscaled * 10**(-scale) +// +// Note that different Dec representations may have equal mathematical values. +// +// unscaled scale String() +// ------------------------- +// 0 0 "0" +// 0 2 "0.00" +// 0 -2 "0" +// 1 0 "1" +// 100 2 "1.00" +// 10 0 "10" +// 1 -1 "10" +// +// The zero value for a Dec represents the value 0 with scale 0. +// +// Operations are typically performed through the *Dec type. +// The semantics of the assignment operation "=" for "bare" Dec values is +// undefined and should not be relied on. +// +// Methods are typically of the form: +// +// func (z *Dec) Op(x, y *Dec) *Dec +// +// and implement operations z = x Op y with the result as receiver; if it +// is one of the operands it may be overwritten (and its memory reused). +// To enable chaining of operations, the result is also returned. Methods +// returning a result other than *Dec take one of the operands as the receiver. +// +// A "bare" Quo method (quotient / division operation) is not provided, as the +// result is not always a finite decimal and thus in general cannot be +// represented as a Dec. +// Instead, in the common case when rounding is (potentially) necessary, +// QuoRound should be used with a Scale and a Rounder. +// QuoExact or QuoRound with RoundExact can be used in the special cases when it +// is known that the result is always a finite decimal. +// +type Dec struct { + unscaled big.Int + scale Scale +} + +// Scale represents the type used for the scale of a Dec. +type Scale int32 + +const scaleSize = 4 // bytes in a Scale value + +// Scaler represents a method for obtaining the scale to use for the result of +// an operation on x and y. +type scaler interface { + Scale(x *Dec, y *Dec) Scale +} + +var bigInt = [...]*big.Int{ + big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4), + big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9), + big.NewInt(10), +} + +var exp10cache [64]big.Int = func() [64]big.Int { + e10, e10i := [64]big.Int{}, bigInt[1] + for i, _ := range e10 { + e10[i].Set(e10i) + e10i = new(big.Int).Mul(e10i, bigInt[10]) + } + return e10 +}() + +// NewDec allocates and returns a new Dec set to the given int64 unscaled value +// and scale. +func NewDec(unscaled int64, scale Scale) *Dec { + return new(Dec).SetUnscaled(unscaled).SetScale(scale) +} + +// NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled +// value and scale. +func NewDecBig(unscaled *big.Int, scale Scale) *Dec { + return new(Dec).SetUnscaledBig(unscaled).SetScale(scale) +} + +// Scale returns the scale of x. +func (x *Dec) Scale() Scale { + return x.scale +} + +// Unscaled returns the unscaled value of x for u and true for ok when the +// unscaled value can be represented as int64; otherwise it returns an undefined +// int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid +// checking the validity of the value when the check is known to be redundant. +func (x *Dec) Unscaled() (u int64, ok bool) { + u = x.unscaled.Int64() + var i big.Int + ok = i.SetInt64(u).Cmp(&x.unscaled) == 0 + return +} + +// UnscaledBig returns the unscaled value of x as *big.Int. +func (x *Dec) UnscaledBig() *big.Int { + return &x.unscaled +} + +// SetScale sets the scale of z, with the unscaled value unchanged, and returns +// z. +// The mathematical value of the Dec changes as if it was multiplied by +// 10**(oldscale-scale). +func (z *Dec) SetScale(scale Scale) *Dec { + z.scale = scale + return z +} + +// SetUnscaled sets the unscaled value of z, with the scale unchanged, and +// returns z. +func (z *Dec) SetUnscaled(unscaled int64) *Dec { + z.unscaled.SetInt64(unscaled) + return z +} + +// SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and +// returns z. +func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec { + z.unscaled.Set(unscaled) + return z +} + +// Set sets z to the value of x and returns z. +// It does nothing if z == x. +func (z *Dec) Set(x *Dec) *Dec { + if z != x { + z.SetUnscaledBig(x.UnscaledBig()) + z.SetScale(x.Scale()) + } + return z +} + +// Sign returns: +// +// -1 if x < 0 +// 0 if x == 0 +// +1 if x > 0 +// +func (x *Dec) Sign() int { + return x.UnscaledBig().Sign() +} + +// Neg sets z to -x and returns z. +func (z *Dec) Neg(x *Dec) *Dec { + z.SetScale(x.Scale()) + z.UnscaledBig().Neg(x.UnscaledBig()) + return z +} + +// Cmp compares x and y and returns: +// +// -1 if x < y +// 0 if x == y +// +1 if x > y +// +func (x *Dec) Cmp(y *Dec) int { + xx, yy := upscale(x, y) + return xx.UnscaledBig().Cmp(yy.UnscaledBig()) +} + +// Abs sets z to |x| (the absolute value of x) and returns z. +func (z *Dec) Abs(x *Dec) *Dec { + z.SetScale(x.Scale()) + z.UnscaledBig().Abs(x.UnscaledBig()) + return z +} + +// Add sets z to the sum x+y and returns z. +// The scale of z is the greater of the scales of x and y. +func (z *Dec) Add(x, y *Dec) *Dec { + xx, yy := upscale(x, y) + z.SetScale(xx.Scale()) + z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig()) + return z +} + +// Sub sets z to the difference x-y and returns z. +// The scale of z is the greater of the scales of x and y. +func (z *Dec) Sub(x, y *Dec) *Dec { + xx, yy := upscale(x, y) + z.SetScale(xx.Scale()) + z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig()) + return z +} + +// Mul sets z to the product x*y and returns z. +// The scale of z is the sum of the scales of x and y. +func (z *Dec) Mul(x, y *Dec) *Dec { + z.SetScale(x.Scale() + y.Scale()) + z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig()) + return z +} + +// Round sets z to the value of x rounded to Scale s using Rounder r, and +// returns z. +func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec { + return z.QuoRound(x, NewDec(1, 0), s, r) +} + +// QuoRound sets z to the quotient x/y, rounded using the given Rounder to the +// specified scale. +// +// If the rounder is RoundExact but the result can not be expressed exactly at +// the specified scale, QuoRound returns nil, and the value of z is undefined. +// +// There is no corresponding Div method; the equivalent can be achieved through +// the choice of Rounder used. +// +func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec { + return z.quo(x, y, sclr{s}, r) +} + +func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec { + scl := s.Scale(x, y) + var zzz *Dec + if r.UseRemainder() { + zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int)) + zzz = r.Round(new(Dec), zz, rA, rB) + } else { + zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil) + zzz = r.Round(new(Dec), zz, nil, nil) + } + if zzz == nil { + return nil + } + return z.Set(zzz) +} + +// QuoExact sets z to the quotient x/y and returns z when x/y is a finite +// decimal. Otherwise it returns nil and the value of z is undefined. +// +// The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is +// calculated so that the remainder will be zero whenever x/y is a finite +// decimal. +func (z *Dec) QuoExact(x, y *Dec) *Dec { + return z.quo(x, y, scaleQuoExact{}, RoundExact) +} + +// quoRem sets z to the quotient x/y with the scale s, and if useRem is true, +// it sets remNum and remDen to the numerator and denominator of the remainder. +// It returns z, remNum and remDen. +// +// The remainder is normalized to the range -1 < r < 1 to simplify rounding; +// that is, the results satisfy the following equation: +// +// x / y = z + (remNum/remDen) * 10**(-z.Scale()) +// +// See Rounder for more details about rounding. +// +func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool, + remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) { + // difference (required adjustment) compared to "canonical" result scale + shift := s - (x.Scale() - y.Scale()) + // pointers to adjusted unscaled dividend and divisor + var ix, iy *big.Int + switch { + case shift > 0: + // increased scale: decimal-shift dividend left + ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift)) + iy = y.UnscaledBig() + case shift < 0: + // decreased scale: decimal-shift divisor left + ix = x.UnscaledBig() + iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift)) + default: + ix = x.UnscaledBig() + iy = y.UnscaledBig() + } + // save a copy of iy in case it to be overwritten with the result + iy2 := iy + if iy == z.UnscaledBig() { + iy2 = new(big.Int).Set(iy) + } + // set scale + z.SetScale(s) + // set unscaled + if useRem { + // Int division + _, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int)) + // set remainder + remNum.Set(intr) + remDen.Set(iy2) + } else { + z.UnscaledBig().Quo(ix, iy) + } + return z, remNum, remDen +} + +type sclr struct{ s Scale } + +func (s sclr) Scale(x, y *Dec) Scale { + return s.s +} + +type scaleQuoExact struct{} + +func (sqe scaleQuoExact) Scale(x, y *Dec) Scale { + rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig()) + f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5]) + var f10 Scale + if f2 > f5 { + f10 = Scale(f2) + } else { + f10 = Scale(f5) + } + return x.Scale() - y.Scale() + f10 +} + +func factor(n *big.Int, p *big.Int) int { + // could be improved for large factors + d, f := n, 0 + for { + dd, dm := new(big.Int).DivMod(d, p, new(big.Int)) + if dm.Sign() == 0 { + f++ + d = dd + } else { + break + } + } + return f +} + +func factor2(n *big.Int) int { + // could be improved for large factors + f := 0 + for ; n.Bit(f) == 0; f++ { + } + return f +} + +func upscale(a, b *Dec) (*Dec, *Dec) { + if a.Scale() == b.Scale() { + return a, b + } + if a.Scale() > b.Scale() { + bb := b.rescale(a.Scale()) + return a, bb + } + aa := a.rescale(b.Scale()) + return aa, b +} + +func exp10(x Scale) *big.Int { + if int(x) < len(exp10cache) { + return &exp10cache[int(x)] + } + return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil) +} + +func (x *Dec) rescale(newScale Scale) *Dec { + shift := newScale - x.Scale() + switch { + case shift < 0: + e := exp10(-shift) + return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale) + case shift > 0: + e := exp10(shift) + return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale) + } + return x +} + +var zeros = []byte("00000000000000000000000000000000" + + "00000000000000000000000000000000") +var lzeros = Scale(len(zeros)) + +func appendZeros(s []byte, n Scale) []byte { + for i := Scale(0); i < n; i += lzeros { + if n > i+lzeros { + s = append(s, zeros...) + } else { + s = append(s, zeros[0:n-i]...) + } + } + return s +} + +func (x *Dec) String() string { + if x == nil { + return "" + } + scale := x.Scale() + s := []byte(x.UnscaledBig().String()) + if scale <= 0 { + if scale != 0 && x.unscaled.Sign() != 0 { + s = appendZeros(s, -scale) + } + return string(s) + } + negbit := Scale(-((x.Sign() - 1) / 2)) + // scale > 0 + lens := Scale(len(s)) + if lens-negbit <= scale { + ss := make([]byte, 0, scale+2) + if negbit == 1 { + ss = append(ss, '-') + } + ss = append(ss, '0', '.') + ss = appendZeros(ss, scale-lens+negbit) + ss = append(ss, s[negbit:]...) + return string(ss) + } + // lens > scale + ss := make([]byte, 0, lens+1) + ss = append(ss, s[:lens-scale]...) + ss = append(ss, '.') + ss = append(ss, s[lens-scale:]...) + return string(ss) +} + +// Format is a support routine for fmt.Formatter. It accepts the decimal +// formats 'd' and 'f', and handles both equivalently. +// Width, precision, flags and bases 2, 8, 16 are not supported. +func (x *Dec) Format(s fmt.State, ch rune) { + if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' { + fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String()) + return + } + fmt.Fprintf(s, x.String()) +} + +func (z *Dec) scan(r io.RuneScanner) (*Dec, error) { + unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes + dp, dg := -1, -1 // indexes of decimal point, first digit +loop: + for { + ch, _, err := r.ReadRune() + if err == io.EOF { + break loop + } + if err != nil { + return nil, err + } + switch { + case ch == '+' || ch == '-': + if len(unscaled) > 0 || dp >= 0 { // must be first character + r.UnreadRune() + break loop + } + case ch == '.': + if dp >= 0 { + r.UnreadRune() + break loop + } + dp = len(unscaled) + continue // don't add to unscaled + case ch >= '0' && ch <= '9': + if dg == -1 { + dg = len(unscaled) + } + default: + r.UnreadRune() + break loop + } + unscaled = append(unscaled, byte(ch)) + } + if dg == -1 { + return nil, fmt.Errorf("no digits read") + } + if dp >= 0 { + z.SetScale(Scale(len(unscaled) - dp)) + } else { + z.SetScale(0) + } + _, ok := z.UnscaledBig().SetString(string(unscaled), 10) + if !ok { + return nil, fmt.Errorf("invalid decimal: %s", string(unscaled)) + } + return z, nil +} + +// SetString sets z to the value of s, interpreted as a decimal (base 10), +// and returns z and a boolean indicating success. The scale of z is the +// number of digits after the decimal point (including any trailing 0s), +// or 0 if there is no decimal point. If SetString fails, the value of z +// is undefined but the returned value is nil. +func (z *Dec) SetString(s string) (*Dec, bool) { + r := strings.NewReader(s) + _, err := z.scan(r) + if err != nil { + return nil, false + } + _, _, err = r.ReadRune() + if err != io.EOF { + return nil, false + } + // err == io.EOF => scan consumed all of s + return z, true +} + +// Scan is a support routine for fmt.Scanner; it sets z to the value of +// the scanned number. It accepts the decimal formats 'd' and 'f', and +// handles both equivalently. Bases 2, 8, 16 are not supported. +// The scale of z is the number of digits after the decimal point +// (including any trailing 0s), or 0 if there is no decimal point. +func (z *Dec) Scan(s fmt.ScanState, ch rune) error { + if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' { + return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch) + } + s.SkipSpace() + _, err := z.scan(s) + return err +} + +// Gob encoding version +const decGobVersion byte = 1 + +func scaleBytes(s Scale) []byte { + buf := make([]byte, scaleSize) + i := scaleSize + for j := 0; j < scaleSize; j++ { + i-- + buf[i] = byte(s) + s >>= 8 + } + return buf +} + +func scale(b []byte) (s Scale) { + for j := 0; j < scaleSize; j++ { + s <<= 8 + s |= Scale(b[j]) + } + return +} + +// GobEncode implements the gob.GobEncoder interface. +func (x *Dec) GobEncode() ([]byte, error) { + buf, err := x.UnscaledBig().GobEncode() + if err != nil { + return nil, err + } + buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion) + return buf, nil +} + +// GobDecode implements the gob.GobDecoder interface. +func (z *Dec) GobDecode(buf []byte) error { + if len(buf) == 0 { + return fmt.Errorf("Dec.GobDecode: no data") + } + b := buf[len(buf)-1] + if b != decGobVersion { + return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b) + } + l := len(buf) - scaleSize - 1 + err := z.UnscaledBig().GobDecode(buf[:l]) + if err != nil { + return err + } + z.SetScale(scale(buf[l : l+scaleSize])) + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (x *Dec) MarshalText() ([]byte, error) { + return []byte(x.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (z *Dec) UnmarshalText(data []byte) error { + _, ok := z.SetString(string(data)) + if !ok { + return fmt.Errorf("invalid inf.Dec") + } + return nil +} diff --git a/Godeps/_workspace/src/gopkg.in/inf.v0/rounder.go b/Godeps/_workspace/src/gopkg.in/inf.v0/rounder.go new file mode 100644 index 000000000000..3a97ef529b97 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/inf.v0/rounder.go @@ -0,0 +1,145 @@ +package inf + +import ( + "math/big" +) + +// Rounder represents a method for rounding the (possibly infinite decimal) +// result of a division to a finite Dec. It is used by Dec.Round() and +// Dec.Quo(). +// +// See the Example for results of using each Rounder with some sample values. +// +type Rounder rounder + +// See http://speleotrove.com/decimal/damodel.html#refround for more detailed +// definitions of these rounding modes. +var ( + RoundDown Rounder // towards 0 + RoundUp Rounder // away from 0 + RoundFloor Rounder // towards -infinity + RoundCeil Rounder // towards +infinity + RoundHalfDown Rounder // to nearest; towards 0 if same distance + RoundHalfUp Rounder // to nearest; away from 0 if same distance + RoundHalfEven Rounder // to nearest; even last digit if same distance +) + +// RoundExact is to be used in the case when rounding is not necessary. +// When used with Quo or Round, it returns the result verbatim when it can be +// expressed exactly with the given precision, and it returns nil otherwise. +// QuoExact is a shorthand for using Quo with RoundExact. +var RoundExact Rounder + +type rounder interface { + + // When UseRemainder() returns true, the Round() method is passed the + // remainder of the division, expressed as the numerator and denominator of + // a rational. + UseRemainder() bool + + // Round sets the rounded value of a quotient to z, and returns z. + // quo is rounded down (truncated towards zero) to the scale obtained from + // the Scaler in Quo(). + // + // When the remainder is not used, remNum and remDen are nil. + // When used, the remainder is normalized between -1 and 1; that is: + // + // -|remDen| < remNum < |remDen| + // + // remDen has the same sign as y, and remNum is zero or has the same sign + // as x. + Round(z, quo *Dec, remNum, remDen *big.Int) *Dec +} + +type rndr struct { + useRem bool + round func(z, quo *Dec, remNum, remDen *big.Int) *Dec +} + +func (r rndr) UseRemainder() bool { + return r.useRem +} + +func (r rndr) Round(z, quo *Dec, remNum, remDen *big.Int) *Dec { + return r.round(z, quo, remNum, remDen) +} + +var intSign = []*big.Int{big.NewInt(-1), big.NewInt(0), big.NewInt(1)} + +func roundHalf(f func(c int, odd uint) (roundUp bool)) func(z, q *Dec, rA, rB *big.Int) *Dec { + return func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + brA, brB := rA.BitLen(), rB.BitLen() + if brA < brB-1 { + // brA < brB-1 => |rA| < |rB/2| + return z + } + roundUp := false + srA, srB := rA.Sign(), rB.Sign() + s := srA * srB + if brA == brB-1 { + rA2 := new(big.Int).Lsh(rA, 1) + if s < 0 { + rA2.Neg(rA2) + } + roundUp = f(rA2.Cmp(rB)*srB, z.UnscaledBig().Bit(0)) + } else { + // brA > brB-1 => |rA| > |rB/2| + roundUp = true + } + if roundUp { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[s+1]) + } + return z + } +} + +func init() { + RoundExact = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + if rA.Sign() != 0 { + return nil + } + return z.Set(q) + }} + RoundDown = rndr{false, + func(z, q *Dec, rA, rB *big.Int) *Dec { + return z.Set(q) + }} + RoundUp = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + if rA.Sign() != 0 { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[rA.Sign()*rB.Sign()+1]) + } + return z + }} + RoundFloor = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + if rA.Sign()*rB.Sign() < 0 { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[0]) + } + return z + }} + RoundCeil = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + if rA.Sign()*rB.Sign() > 0 { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[2]) + } + return z + }} + RoundHalfDown = rndr{true, roundHalf( + func(c int, odd uint) bool { + return c > 0 + })} + RoundHalfUp = rndr{true, roundHalf( + func(c int, odd uint) bool { + return c >= 0 + })} + RoundHalfEven = rndr{true, roundHalf( + func(c int, odd uint) bool { + return c > 0 || c == 0 && odd == 1 + })} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/conversion.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/conversion.go new file mode 100644 index 000000000000..d6aba9dbc6bf --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/conversion.go @@ -0,0 +1,56 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { + scheme.AddDefaultingFuncs( + func(obj *api.ListOptions) { + if obj.LabelSelector == nil { + obj.LabelSelector = labels.Everything() + } + if obj.FieldSelector == nil { + obj.FieldSelector = fields.Everything() + } + }, + ) +} + +func addConversionFuncs(scheme *runtime.Scheme) { + scheme.AddConversionFuncs( + api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta, + api.Convert_unversioned_ListMeta_To_unversioned_ListMeta, + api.Convert_intstr_IntOrString_To_intstr_IntOrString, + api.Convert_unversioned_Time_To_unversioned_Time, + api.Convert_Slice_string_To_unversioned_Time, + api.Convert_string_To_labels_Selector, + api.Convert_string_To_fields_Selector, + api.Convert_Pointer_bool_To_bool, + api.Convert_bool_To_Pointer_bool, + api.Convert_Pointer_string_To_string, + api.Convert_string_To_Pointer_string, + api.Convert_labels_Selector_To_string, + api.Convert_fields_Selector_To_string, + api.Convert_resource_Quantity_To_resource_Quantity, + ) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/deep_copy.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/deep_copy.go new file mode 100644 index 000000000000..931523f19825 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/deep_copy.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/runtime" +) + +func addDeepCopyFuncs(scheme *runtime.Scheme) { + if err := scheme.AddGeneratedDeepCopyFuncs( + api.DeepCopy_api_DeleteOptions, + api.DeepCopy_api_ExportOptions, + api.DeepCopy_api_List, + api.DeepCopy_api_ListOptions, + api.DeepCopy_api_ObjectMeta, + api.DeepCopy_api_ObjectReference, + api.DeepCopy_api_OwnerReference, + api.DeepCopy_api_Service, + api.DeepCopy_api_ServiceList, + api.DeepCopy_api_ServicePort, + api.DeepCopy_api_ServiceSpec, + api.DeepCopy_api_ServiceStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/install/install.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/install/install.go new file mode 100644 index 000000000000..5659821109cc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/install/install.go @@ -0,0 +1,160 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package install + +import ( + "fmt" + + "github.com/golang/glog" + + core "k8s.io/kubernetes/federation/apis/core" + core_v1 "k8s.io/kubernetes/federation/apis/core/v1" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/federation/api" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{core_v1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", core.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +// userResources is a group of resources mostly used by a kubectl user +var userResources = []string{"svc"} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + // the list of kinds that are scoped at the root of the api hierarchy + // if a kind is not enumerated here, it is assumed to have a namespace scope + rootScoped := sets.NewString() + + // these kinds should be excluded from the list of resources + ignoredKinds := sets.NewString( + "ListOptions", + "DeleteOptions", + "Status") + + mapper := api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) + // setup aliases for groups of resources + mapper.AddResourceAlias("all", userResources...) + + return mapper +} + +// InterfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case core_v1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: core.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(core.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + core.AddToScheme(core.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case core_v1.SchemeGroupVersion: + core_v1.AddToScheme(core.Scheme) + } + } + + // This is a "fast-path" that avoids reflection for common types. It focuses on the objects that are + // converted the most in the cluster. + // TODO: generate one of these for every external API group - this is to prove the impact + core.Scheme.AddGenericConversionFunc(func(objA, objB interface{}, s conversion.Scope) (bool, error) { + switch a := objA.(type) { + case *v1.Service: + switch b := objB.(type) { + case *api.Service: + return true, v1.Convert_v1_Service_To_api_Service(a, b, s) + } + case *api.Service: + switch b := objB.(type) { + case *v1.Service: + return true, v1.Convert_api_Service_To_v1_Service(a, b, s) + } + + } + return false, nil + }) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/register.go new file mode 100644 index 000000000000..b0c6e0ac0f42 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/register.go @@ -0,0 +1,79 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer" +) + +// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. +var Scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme +var Codecs = serializer.NewCodecFactory(Scheme) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Unversiond is group version for unversioned API objects +// TODO: this should be v1 probably +var Unversioned = unversioned.GroupVersion{Group: "", Version: "v1"} + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) unversioned.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) unversioned.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func AddToScheme(scheme *runtime.Scheme) { + if err := Scheme.AddIgnoredConversionType(&unversioned.TypeMeta{}, &unversioned.TypeMeta{}); err != nil { + panic(err) + } + scheme.AddKnownTypes(SchemeGroupVersion, + &api.ServiceList{}, + &api.Service{}, + &api.ListOptions{}, + &api.DeleteOptions{}, + ) + + // Register Unversioned types under their own special group + Scheme.AddUnversionedTypes(Unversioned, + &unversioned.ExportOptions{}, + &unversioned.Status{}, + &unversioned.APIVersions{}, + &unversioned.APIGroupList{}, + &unversioned.APIGroup{}, + &unversioned.APIResourceList{}, + ) + + addDeepCopyFuncs(scheme) + addDefaultingFuncs(scheme) + addConversionFuncs(scheme) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/v1/conversion.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/v1/conversion.go new file mode 100644 index 000000000000..f7864f431301 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/v1/conversion.go @@ -0,0 +1,82 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + v1.Convert_v1_DeleteOptions_To_api_DeleteOptions, + v1.Convert_api_DeleteOptions_To_v1_DeleteOptions, + v1.Convert_v1_ExportOptions_To_api_ExportOptions, + v1.Convert_api_ExportOptions_To_v1_ExportOptions, + v1.Convert_v1_List_To_api_List, + v1.Convert_api_List_To_v1_List, + v1.Convert_v1_ListOptions_To_api_ListOptions, + v1.Convert_api_ListOptions_To_v1_ListOptions, + v1.Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, + v1.Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, + v1.Convert_v1_ObjectMeta_To_api_ObjectMeta, + v1.Convert_api_ObjectMeta_To_v1_ObjectMeta, + v1.Convert_v1_ObjectReference_To_api_ObjectReference, + v1.Convert_api_ObjectReference_To_v1_ObjectReference, + v1.Convert_v1_OwnerReference_To_api_OwnerReference, + v1.Convert_api_OwnerReference_To_v1_OwnerReference, + v1.Convert_v1_Service_To_api_Service, + v1.Convert_api_Service_To_v1_Service, + v1.Convert_v1_ServiceList_To_api_ServiceList, + v1.Convert_api_ServiceList_To_v1_ServiceList, + v1.Convert_v1_ServicePort_To_api_ServicePort, + v1.Convert_api_ServicePort_To_v1_ServicePort, + v1.Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions, + v1.Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions, + v1.Convert_v1_ServiceSpec_To_api_ServiceSpec, + v1.Convert_api_ServiceSpec_To_v1_ServiceSpec, + v1.Convert_v1_ServiceStatus_To_api_ServiceStatus, + v1.Convert_api_ServiceStatus_To_v1_ServiceStatus, + ) + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } + + // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. + for _, kind := range []string{ + "Service", + } { + err = scheme.AddFieldLabelConversionFunc("v1", kind, + func(label, value string) (string, string, error) { + switch label { + case "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) + } + }) + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/v1/deep_copy.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/v1/deep_copy.go new file mode 100644 index 000000000000..1320c5803f59 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/v1/deep_copy.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" +) + +func addDeepCopyFuncs(scheme *runtime.Scheme) { + if err := scheme.AddGeneratedDeepCopyFuncs( + v1.DeepCopy_v1_DeleteOptions, + v1.DeepCopy_v1_ExportOptions, + v1.DeepCopy_v1_List, + v1.DeepCopy_v1_ListOptions, + v1.DeepCopy_v1_ObjectMeta, + v1.DeepCopy_v1_ObjectReference, + v1.DeepCopy_v1_OwnerReference, + v1.DeepCopy_v1_Service, + v1.DeepCopy_v1_ServiceList, + v1.DeepCopy_v1_ServicePort, + v1.DeepCopy_v1_ServiceSpec, + v1.DeepCopy_v1_ServiceStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/v1/defaults.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/v1/defaults.go new file mode 100644 index 000000000000..5e03961883c1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/v1/defaults.go @@ -0,0 +1,28 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { + scheme.AddDefaultingFuncs( + v1.SetDefaults_ServiceSpec, + ) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/v1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/v1/register.go new file mode 100644 index 000000000000..b20c7659a5a5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/core/v1/register.go @@ -0,0 +1,54 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" +) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"} + +func AddToScheme(scheme *runtime.Scheme) { + // Add the API to Scheme. + addKnownTypes(scheme) + addConversionFuncs(scheme) + addDefaultingFuncs(scheme) + addDeepCopyFuncs(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &v1.Service{}, + &v1.ServiceList{}, + &v1.ListOptions{}, + &v1.DeleteOptions{}, + ) + + // Add common types + scheme.AddKnownTypes(SchemeGroupVersion, &unversioned.Status{}) + + // Add the watch version that applies + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/deep_copy_generated.go new file mode 100644 index 000000000000..e9936ff49ecb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/deep_copy_generated.go @@ -0,0 +1,181 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package federation + +import ( + api "k8s.io/kubernetes/pkg/api" + resource "k8s.io/kubernetes/pkg/api/resource" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_federation_Cluster, + DeepCopy_federation_ClusterCondition, + DeepCopy_federation_ClusterList, + DeepCopy_federation_ClusterMeta, + DeepCopy_federation_ClusterSpec, + DeepCopy_federation_ClusterStatus, + DeepCopy_federation_ServerAddressByClientCIDR, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_federation_Cluster(in Cluster, out *Cluster, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_federation_ClusterSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_federation_ClusterStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_federation_ClusterCondition(in ClusterCondition, out *ClusterCondition, c *conversion.Cloner) error { + out.Type = in.Type + out.Status = in.Status + if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func DeepCopy_federation_ClusterList(in ClusterList, out *ClusterList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Cluster, len(in)) + for i := range in { + if err := DeepCopy_federation_Cluster(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_federation_ClusterMeta(in ClusterMeta, out *ClusterMeta, c *conversion.Cloner) error { + out.Version = in.Version + return nil +} + +func DeepCopy_federation_ClusterSpec(in ClusterSpec, out *ClusterSpec, c *conversion.Cloner) error { + if in.ServerAddressByClientCIDRs != nil { + in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(in)) + for i := range in { + if err := DeepCopy_federation_ServerAddressByClientCIDR(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.ServerAddressByClientCIDRs = nil + } + if in.SecretRef != nil { + in, out := in.SecretRef, &out.SecretRef + *out = new(api.LocalObjectReference) + if err := api.DeepCopy_api_LocalObjectReference(*in, *out, c); err != nil { + return err + } + } else { + out.SecretRef = nil + } + return nil +} + +func DeepCopy_federation_ClusterStatus(in ClusterStatus, out *ClusterStatus, c *conversion.Cloner) error { + if in.Conditions != nil { + in, out := in.Conditions, &out.Conditions + *out = make([]ClusterCondition, len(in)) + for i := range in { + if err := DeepCopy_federation_ClusterCondition(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if in.Capacity != nil { + in, out := in.Capacity, &out.Capacity + *out = make(api.ResourceList) + for key, val := range in { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Capacity = nil + } + if in.Allocatable != nil { + in, out := in.Allocatable, &out.Allocatable + *out = make(api.ResourceList) + for key, val := range in { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Allocatable = nil + } + if err := DeepCopy_federation_ClusterMeta(in.ClusterMeta, &out.ClusterMeta, c); err != nil { + return err + } + if in.Zones != nil { + in, out := in.Zones, &out.Zones + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Zones = nil + } + out.Region = in.Region + return nil +} + +func DeepCopy_federation_ServerAddressByClientCIDR(in ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, c *conversion.Cloner) error { + out.ClientCIDR = in.ClientCIDR + out.ServerAddress = in.ServerAddress + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/install/install.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/install/install.go new file mode 100644 index 000000000000..4c09c75d9a01 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/install/install.go @@ -0,0 +1,129 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/federation/apis/federation/v1alpha1" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/federation/apis/federation" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", federation.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + // the list of kinds that are scoped at the root of the api hierarchy + // if a kind is not enumerated here, it is assumed to have a namespace scope + rootScoped := sets.NewString( + "Cluster", + ) + + ignoredKinds := sets.NewString() + + return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +// interfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1alpha1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(federation.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + federation.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1alpha1.SchemeGroupVersion: + v1alpha1.AddToScheme(api.Scheme) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/install/install_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/install/install_test.go new file mode 100644 index 000000000000..87f1ef65d59b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/install/install_test.go @@ -0,0 +1,118 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package install + +import ( + "encoding/json" + "testing" + + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/federation/apis/federation/v1alpha1" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestResourceVersioner(t *testing.T) { + cluster := federation.Cluster{ObjectMeta: api.ObjectMeta{ResourceVersion: "10"}} + version, err := accessor.ResourceVersion(&cluster) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if version != "10" { + t.Errorf("unexpected version %v", version) + } + + clusterList := federation.ClusterList{ListMeta: unversioned.ListMeta{ResourceVersion: "10"}} + version, err = accessor.ResourceVersion(&clusterList) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if version != "10" { + t.Errorf("unexpected version %v", version) + } +} + +func TestCodec(t *testing.T) { + cluster := federation.Cluster{} + // We do want to use package registered rather than testapi here, because we + // want to test if the package install and package registered work as expected. + data, err := runtime.Encode(api.Codecs.LegacyCodec(registered.GroupOrDie(federation.GroupName).GroupVersion), &cluster) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + other := federation.Cluster{} + if err := json.Unmarshal(data, &other); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if other.APIVersion != registered.GroupOrDie(federation.GroupName).GroupVersion.String() || other.Kind != "Cluster" { + t.Errorf("unexpected unmarshalled object %#v", other) + } +} + +func TestInterfacesFor(t *testing.T) { + if _, err := registered.GroupOrDie(federation.GroupName).InterfacesFor(federation.SchemeGroupVersion); err == nil { + t.Fatalf("unexpected non-error: %v", err) + } + for i, version := range registered.GroupOrDie(federation.GroupName).GroupVersions { + if vi, err := registered.GroupOrDie(federation.GroupName).InterfacesFor(version); err != nil || vi == nil { + t.Fatalf("%d: unexpected result: %v", i, err) + } + } +} + +func TestRESTMapper(t *testing.T) { + gv := v1alpha1.SchemeGroupVersion + clusterGVK := gv.WithKind("Cluster") + + if gvk, err := registered.GroupOrDie(federation.GroupName).RESTMapper.KindFor(gv.WithResource("clusters")); err != nil || gvk != clusterGVK { + t.Errorf("unexpected version mapping: %v %v", gvk, err) + } + + if m, err := registered.GroupOrDie(federation.GroupName).RESTMapper.RESTMapping(clusterGVK.GroupKind(), ""); err != nil || m.GroupVersionKind != clusterGVK || m.Resource != "clusters" { + t.Errorf("unexpected version mapping: %#v %v", m, err) + } + + for _, version := range registered.GroupOrDie(federation.GroupName).GroupVersions { + mapping, err := registered.GroupOrDie(federation.GroupName).RESTMapper.RESTMapping(clusterGVK.GroupKind(), version.Version) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if mapping.Resource != "clusters" { + t.Errorf("incorrect resource name: %#v", mapping) + } + if mapping.GroupVersionKind.GroupVersion() != version { + t.Errorf("incorrect groupVersion: %v", mapping) + } + + interfaces, _ := registered.GroupOrDie(federation.GroupName).InterfacesFor(version) + if mapping.ObjectConvertor != interfaces.ObjectConvertor { + t.Errorf("unexpected: %#v, expected: %#v", mapping, interfaces) + } + + rc := &federation.Cluster{ObjectMeta: api.ObjectMeta{Name: "foo"}} + name, err := mapping.MetadataAccessor.Name(rc) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if name != "foo" { + t.Errorf("unable to retrieve object meta with: %v", mapping.MetadataAccessor) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/register.go new file mode 100644 index 000000000000..2cc7f1f0e254 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federation + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// GroupName is the group name use in this package +const GroupName = "federation" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) unversioned.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) unversioned.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) +} + +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &Cluster{}, + &ClusterList{}, + &api.ListOptions{}, + &api.DeleteOptions{}, + ) +} + +func (obj *Cluster) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ClusterList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/types.generated.go new file mode 100644 index 000000000000..d8d5ec965cda --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/types.generated.go @@ -0,0 +1,2645 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package federation + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_api "k8s.io/kubernetes/pkg/api" + pkg3_resource "k8s.io/kubernetes/pkg/api/resource" + pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + pkg4_types "k8s.io/kubernetes/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_api.LocalObjectReference + var v1 pkg3_resource.Quantity + var v2 pkg2_unversioned.Time + var v3 pkg4_types.UID + var v4 time.Time + _, _, _, _, _ = v0, v1, v2, v3, v4 + } +} + +func (x *ServerAddressByClientCIDR) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("clientCIDR")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serverAddress")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServerAddressByClientCIDR) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServerAddressByClientCIDR) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "clientCIDR": + if r.TryDecodeAsNil() { + x.ClientCIDR = "" + } else { + x.ClientCIDR = string(r.DecodeString()) + } + case "serverAddress": + if r.TryDecodeAsNil() { + x.ServerAddress = "" + } else { + x.ServerAddress = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServerAddressByClientCIDR) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ClientCIDR = "" + } else { + x.ClientCIDR = string(r.DecodeString()) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServerAddress = "" + } else { + x.ServerAddress = string(r.DecodeString()) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.ServerAddressByClientCIDRs == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serverAddressByClientCIDRs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ServerAddressByClientCIDRs == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "serverAddressByClientCIDRs": + if r.TryDecodeAsNil() { + x.ServerAddressByClientCIDRs = nil + } else { + yyv4 := &x.ServerAddressByClientCIDRs + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv4), d) + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(pkg1_api.LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServerAddressByClientCIDRs = nil + } else { + yyv8 := &x.ServerAddressByClientCIDRs + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv8), d) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(pkg1_api.LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ClusterConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ClusterConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ClusterCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastProbeTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastProbeTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + x.Type = ClusterConditionType(r.DecodeString()) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + x.Status = pkg1_api.ConditionStatus(r.DecodeString()) + } + case "lastProbeTime": + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg2_unversioned.Time{} + } else { + yyv6 := &x.LastProbeTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_unversioned.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + x.Reason = string(r.DecodeString()) + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + x.Message = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + x.Type = ClusterConditionType(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + x.Status = pkg1_api.ConditionStatus(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg2_unversioned.Time{} + } else { + yyv15 := &x.LastProbeTime + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if yym16 { + z.DecBinaryUnmarshal(yyv15) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) + } else { + z.DecFallback(yyv15, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_unversioned.Time{} + } else { + yyv17 := &x.LastTransitionTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + x.Reason = string(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + x.Message = string(r.DecodeString()) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterMeta) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Version != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("version")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterMeta) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "version": + if r.TryDecodeAsNil() { + x.Version = "" + } else { + x.Version = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Version = "" + } else { + x.Version = string(r.DecodeString()) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Conditions) != 0 + yyq2[1] = len(x.Capacity) != 0 + yyq2[2] = len(x.Allocatable) != 0 + yyq2[3] = len(x.Zones) != 0 + yyq2[4] = x.Region != "" + yyq2[5] = x.Version != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Capacity == nil { + r.EncodeNil() + } else { + yysf7 := &x.Capacity + yysf7.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capacity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capacity == nil { + r.EncodeNil() + } else { + yysf8 := &x.Capacity + yysf8.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Allocatable == nil { + r.EncodeNil() + } else { + yysf10 := &x.Allocatable + yysf10.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("allocatable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Allocatable == nil { + r.EncodeNil() + } else { + yysf11 := &x.Allocatable + yysf11.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Zones == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.Zones, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("zones")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Zones == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.Zones, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Region)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("region")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Region)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("version")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv4 := &x.Conditions + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceClusterCondition((*[]ClusterCondition)(yyv4), d) + } + } + case "capacity": + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv6 := &x.Capacity + yyv6.CodecDecodeSelf(d) + } + case "allocatable": + if r.TryDecodeAsNil() { + x.Allocatable = nil + } else { + yyv7 := &x.Allocatable + yyv7.CodecDecodeSelf(d) + } + case "zones": + if r.TryDecodeAsNil() { + x.Zones = nil + } else { + yyv8 := &x.Zones + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "region": + if r.TryDecodeAsNil() { + x.Region = "" + } else { + x.Region = string(r.DecodeString()) + } + case "version": + if r.TryDecodeAsNil() { + x.Version = "" + } else { + x.Version = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv13 := &x.Conditions + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceClusterCondition((*[]ClusterCondition)(yyv13), d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv15 := &x.Capacity + yyv15.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Allocatable = nil + } else { + yyv16 := &x.Allocatable + yyv16.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Zones = nil + } else { + yyv17 := &x.Zones + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Region = "" + } else { + x.Region = string(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Version = "" + } else { + x.Version = string(r.DecodeString()) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Cluster) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy14 := &x.Status + yy14.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy16 := &x.Status + yy16.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Cluster) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Cluster) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_api.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ClusterSpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ClusterStatus{} + } else { + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Cluster) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_api.ObjectMeta{} + } else { + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ClusterSpec{} + } else { + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ClusterStatus{} + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceCluster(([]Cluster)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceCluster(([]Cluster)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceCluster((*[]Cluster)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceCluster((*[]Cluster)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceServerAddressByClientCIDR(v []ServerAddressByClientCIDR, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceServerAddressByClientCIDR(v *[]ServerAddressByClientCIDR, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ServerAddressByClientCIDR{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ServerAddressByClientCIDR, yyrl1) + } + } else { + yyv1 = make([]ServerAddressByClientCIDR, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServerAddressByClientCIDR{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ServerAddressByClientCIDR{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServerAddressByClientCIDR{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ServerAddressByClientCIDR{}) // var yyz1 ServerAddressByClientCIDR + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServerAddressByClientCIDR{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ServerAddressByClientCIDR{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterCondition(v []ClusterCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterCondition(v *[]ClusterCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterCondition, yyrl1) + } + } else { + yyv1 = make([]ClusterCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterCondition{}) // var yyz1 ClusterCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCluster(v []Cluster, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCluster(v *[]Cluster, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Cluster{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 368) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Cluster, yyrl1) + } + } else { + yyv1 = make([]Cluster, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Cluster{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Cluster{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Cluster{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Cluster{}) // var yyz1 Cluster + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Cluster{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Cluster{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/types.go new file mode 100644 index 000000000000..62227291da74 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/types.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federation + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +type ServerAddressByClientCIDR struct { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` +} + +// ClusterSpec describes the attributes of a kubernetes cluster. +type ClusterSpec struct { + // A map of client CIDR to server address. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" patchStrategy:"merge" patchMergeKey:"clientCIDR"` + // Name of the secret containing kubeconfig to access this cluster. + // The secret is read from the kubernetes cluster that is hosting federation control plane. + // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". + // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. + SecretRef *api.LocalObjectReference `json:"secretRef"` +} + +type ClusterConditionType string + +// These are valid conditions of a cluster. +const ( + // ClusterReady means the cluster is ready to accept workloads. + ClusterReady ClusterConditionType = "Ready" + // ClusterOffline means the cluster is temporarily down or not reachable + ClusterOffline ClusterConditionType = "Offline" +) + +// ClusterCondition describes current state of a cluster. +type ClusterCondition struct { + // Type of cluster condition, Complete or Failed. + Type ClusterConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus `json:"status"` + // Last time the condition was checked. + LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` + // Last time the condition transit from one status to another. + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty"` +} + +// Cluster metadata +type ClusterMeta struct { + // Release version of the cluster. + Version string `json:"version,omitempty"` +} + +// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. +type ClusterStatus struct { + // Conditions is an array of current cluster conditions. + Conditions []ClusterCondition `json:"conditions,omitempty"` + // Capacity represents the total resources of the cluster + Capacity api.ResourceList `json:"capacity,omitempty"` + // Allocatable represents the total resources of a cluster that are available for scheduling. + Allocatable api.ResourceList `json:"allocatable,omitempty"` + ClusterMeta `json:",inline"` + // Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. + // These will always be in the same region. + Zones []string `json:"zones,omitempty"` + // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. + Region string `json:"region,omitempty"` +} + +// +genclient=true,nonNamespaced=true + +// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. +type Cluster struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + api.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the behavior of the Cluster. + Spec ClusterSpec `json:"spec,omitempty"` + // Status describes the current status of a Cluster + Status ClusterStatus `json:"status,omitempty"` +} + +// A list of all the kubernetes clusters registered to the federation +type ClusterList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty"` + + // List of Cluster objects. + Items []Cluster `json:"items"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/conversion.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/conversion.go new file mode 100644 index 000000000000..c4c8ebbb37f4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/conversion.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) { + err := api.Scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.String(), "Cluster", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/conversion_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/conversion_generated.go new file mode 100644 index 000000000000..d09902d996af --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/conversion_generated.go @@ -0,0 +1,355 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + federation "k8s.io/kubernetes/federation/apis/federation" + api "k8s.io/kubernetes/pkg/api" + resource "k8s.io/kubernetes/pkg/api/resource" + v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_Cluster_To_federation_Cluster, + Convert_federation_Cluster_To_v1alpha1_Cluster, + Convert_v1alpha1_ClusterCondition_To_federation_ClusterCondition, + Convert_federation_ClusterCondition_To_v1alpha1_ClusterCondition, + Convert_v1alpha1_ClusterList_To_federation_ClusterList, + Convert_federation_ClusterList_To_v1alpha1_ClusterList, + Convert_v1alpha1_ClusterMeta_To_federation_ClusterMeta, + Convert_federation_ClusterMeta_To_v1alpha1_ClusterMeta, + Convert_v1alpha1_ClusterSpec_To_federation_ClusterSpec, + Convert_federation_ClusterSpec_To_v1alpha1_ClusterSpec, + Convert_v1alpha1_ClusterStatus_To_federation_ClusterStatus, + Convert_federation_ClusterStatus_To_v1alpha1_ClusterStatus, + Convert_v1alpha1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR, + Convert_federation_ServerAddressByClientCIDR_To_v1alpha1_ServerAddressByClientCIDR, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1alpha1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v1alpha1_ClusterSpec_To_federation_ClusterSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_ClusterStatus_To_federation_ClusterStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error { + return autoConvert_v1alpha1_Cluster_To_federation_Cluster(in, out, s) +} + +func autoConvert_federation_Cluster_To_v1alpha1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_federation_ClusterSpec_To_v1alpha1_ClusterSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_federation_ClusterStatus_To_v1alpha1_ClusterStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_federation_Cluster_To_v1alpha1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error { + return autoConvert_federation_Cluster_To_v1alpha1_Cluster(in, out, s) +} + +func autoConvert_v1alpha1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error { + out.Type = federation.ClusterConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1alpha1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterCondition_To_federation_ClusterCondition(in, out, s) +} + +func autoConvert_federation_ClusterCondition_To_v1alpha1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { + out.Type = ClusterConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_federation_ClusterCondition_To_v1alpha1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { + return autoConvert_federation_ClusterCondition_To_v1alpha1_ClusterCondition(in, out, s) +} + +func autoConvert_v1alpha1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]federation.Cluster, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Cluster_To_federation_Cluster(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterList_To_federation_ClusterList(in, out, s) +} + +func autoConvert_federation_ClusterList_To_v1alpha1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + if err := Convert_federation_Cluster_To_v1alpha1_Cluster(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_federation_ClusterList_To_v1alpha1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error { + return autoConvert_federation_ClusterList_To_v1alpha1_ClusterList(in, out, s) +} + +func autoConvert_v1alpha1_ClusterMeta_To_federation_ClusterMeta(in *ClusterMeta, out *federation.ClusterMeta, s conversion.Scope) error { + out.Version = in.Version + return nil +} + +func Convert_v1alpha1_ClusterMeta_To_federation_ClusterMeta(in *ClusterMeta, out *federation.ClusterMeta, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterMeta_To_federation_ClusterMeta(in, out, s) +} + +func autoConvert_federation_ClusterMeta_To_v1alpha1_ClusterMeta(in *federation.ClusterMeta, out *ClusterMeta, s conversion.Scope) error { + out.Version = in.Version + return nil +} + +func Convert_federation_ClusterMeta_To_v1alpha1_ClusterMeta(in *federation.ClusterMeta, out *ClusterMeta, s conversion.Scope) error { + return autoConvert_federation_ClusterMeta_To_v1alpha1_ClusterMeta(in, out, s) +} + +func autoConvert_v1alpha1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error { + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]federation.ServerAddressByClientCIDR, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.ServerAddressByClientCIDRs = nil + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(api.LocalObjectReference) + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.SecretRef = nil + } + return nil +} + +func Convert_v1alpha1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterSpec_To_federation_ClusterSpec(in, out, s) +} + +func autoConvert_federation_ClusterSpec_To_v1alpha1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error { + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + for i := range *in { + if err := Convert_federation_ServerAddressByClientCIDR_To_v1alpha1_ServerAddressByClientCIDR(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.ServerAddressByClientCIDRs = nil + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(v1.LocalObjectReference) + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.SecretRef = nil + } + return nil +} + +func Convert_federation_ClusterSpec_To_v1alpha1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error { + return autoConvert_federation_ClusterSpec_To_v1alpha1_ClusterSpec(in, out, s) +} + +func autoConvert_v1alpha1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]federation.ClusterCondition, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_ClusterCondition_To_federation_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if err := v1.Convert_v1_ResourceList_To_api_ResourceList(&in.Capacity, &out.Capacity, s); err != nil { + return err + } + if err := v1.Convert_v1_ResourceList_To_api_ResourceList(&in.Allocatable, &out.Allocatable, s); err != nil { + return err + } + if err := Convert_v1alpha1_ClusterMeta_To_federation_ClusterMeta(&in.ClusterMeta, &out.ClusterMeta, s); err != nil { + return err + } + out.Zones = in.Zones + out.Region = in.Region + return nil +} + +func Convert_v1alpha1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterStatus_To_federation_ClusterStatus(in, out, s) +} + +func autoConvert_federation_ClusterStatus_To_v1alpha1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + if err := Convert_federation_ClusterCondition_To_v1alpha1_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { + return err + } + (*out)[v1.ResourceName(key)] = *newVal + } + } else { + out.Capacity = nil + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { + return err + } + (*out)[v1.ResourceName(key)] = *newVal + } + } else { + out.Allocatable = nil + } + if err := Convert_federation_ClusterMeta_To_v1alpha1_ClusterMeta(&in.ClusterMeta, &out.ClusterMeta, s); err != nil { + return err + } + out.Zones = in.Zones + out.Region = in.Region + return nil +} + +func Convert_federation_ClusterStatus_To_v1alpha1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { + return autoConvert_federation_ClusterStatus_To_v1alpha1_ClusterStatus(in, out, s) +} + +func autoConvert_v1alpha1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error { + out.ClientCIDR = in.ClientCIDR + out.ServerAddress = in.ServerAddress + return nil +} + +func Convert_v1alpha1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error { + return autoConvert_v1alpha1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in, out, s) +} + +func autoConvert_federation_ServerAddressByClientCIDR_To_v1alpha1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error { + out.ClientCIDR = in.ClientCIDR + out.ServerAddress = in.ServerAddress + return nil +} + +func Convert_federation_ServerAddressByClientCIDR_To_v1alpha1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error { + return autoConvert_federation_ServerAddressByClientCIDR_To_v1alpha1_ServerAddressByClientCIDR(in, out, s) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/deep_copy_generated.go new file mode 100644 index 000000000000..52e672ccb213 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/deep_copy_generated.go @@ -0,0 +1,182 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + resource "k8s.io/kubernetes/pkg/api/resource" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1alpha1_Cluster, + DeepCopy_v1alpha1_ClusterCondition, + DeepCopy_v1alpha1_ClusterList, + DeepCopy_v1alpha1_ClusterMeta, + DeepCopy_v1alpha1_ClusterSpec, + DeepCopy_v1alpha1_ClusterStatus, + DeepCopy_v1alpha1_ServerAddressByClientCIDR, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1alpha1_Cluster(in Cluster, out *Cluster, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v1alpha1_ClusterSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1alpha1_ClusterStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1alpha1_ClusterCondition(in ClusterCondition, out *ClusterCondition, c *conversion.Cloner) error { + out.Type = in.Type + out.Status = in.Status + if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func DeepCopy_v1alpha1_ClusterList(in ClusterList, out *ClusterList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Cluster, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_Cluster(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1alpha1_ClusterMeta(in ClusterMeta, out *ClusterMeta, c *conversion.Cloner) error { + out.Version = in.Version + return nil +} + +func DeepCopy_v1alpha1_ClusterSpec(in ClusterSpec, out *ClusterSpec, c *conversion.Cloner) error { + if in.ServerAddressByClientCIDRs != nil { + in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_ServerAddressByClientCIDR(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.ServerAddressByClientCIDRs = nil + } + if in.SecretRef != nil { + in, out := in.SecretRef, &out.SecretRef + *out = new(v1.LocalObjectReference) + if err := v1.DeepCopy_v1_LocalObjectReference(*in, *out, c); err != nil { + return err + } + } else { + out.SecretRef = nil + } + return nil +} + +func DeepCopy_v1alpha1_ClusterStatus(in ClusterStatus, out *ClusterStatus, c *conversion.Cloner) error { + if in.Conditions != nil { + in, out := in.Conditions, &out.Conditions + *out = make([]ClusterCondition, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_ClusterCondition(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if in.Capacity != nil { + in, out := in.Capacity, &out.Capacity + *out = make(v1.ResourceList) + for key, val := range in { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Capacity = nil + } + if in.Allocatable != nil { + in, out := in.Allocatable, &out.Allocatable + *out = make(v1.ResourceList) + for key, val := range in { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.Allocatable = nil + } + if err := DeepCopy_v1alpha1_ClusterMeta(in.ClusterMeta, &out.ClusterMeta, c); err != nil { + return err + } + if in.Zones != nil { + in, out := in.Zones, &out.Zones + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Zones = nil + } + out.Region = in.Region + return nil +} + +func DeepCopy_v1alpha1_ServerAddressByClientCIDR(in ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, c *conversion.Cloner) error { + out.ClientCIDR = in.ClientCIDR + out.ServerAddress = in.ServerAddress + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/defaults.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/defaults.go new file mode 100644 index 000000000000..d69bb7f04443 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/defaults.go @@ -0,0 +1,24 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/doc.go new file mode 100644 index 000000000000..65a03a2093dc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1alpha1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/generated.pb.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/generated.pb.go new file mode 100644 index 000000000000..dbb8c8ad329a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/generated.pb.go @@ -0,0 +1,1849 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/federation/apis/federation/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/federation/apis/federation/v1alpha1/generated.proto + + It has these top-level messages: + Cluster + ClusterCondition + ClusterList + ClusterMeta + ClusterSpec + ClusterStatus + ServerAddressByClientCIDR +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_resource "k8s.io/kubernetes/pkg/api/resource" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} + +func (m *ClusterCondition) Reset() { *m = ClusterCondition{} } +func (m *ClusterCondition) String() string { return proto.CompactTextString(m) } +func (*ClusterCondition) ProtoMessage() {} + +func (m *ClusterList) Reset() { *m = ClusterList{} } +func (m *ClusterList) String() string { return proto.CompactTextString(m) } +func (*ClusterList) ProtoMessage() {} + +func (m *ClusterMeta) Reset() { *m = ClusterMeta{} } +func (m *ClusterMeta) String() string { return proto.CompactTextString(m) } +func (*ClusterMeta) ProtoMessage() {} + +func (m *ClusterSpec) Reset() { *m = ClusterSpec{} } +func (m *ClusterSpec) String() string { return proto.CompactTextString(m) } +func (*ClusterSpec) ProtoMessage() {} + +func (m *ClusterStatus) Reset() { *m = ClusterStatus{} } +func (m *ClusterStatus) String() string { return proto.CompactTextString(m) } +func (*ClusterStatus) ProtoMessage() {} + +func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } +func (m *ServerAddressByClientCIDR) String() string { return proto.CompactTextString(m) } +func (*ServerAddressByClientCIDR) ProtoMessage() {} + +func init() { + proto.RegisterType((*Cluster)(nil), "k8s.io.kubernetes.federation.apis.federation.v1alpha1.Cluster") + proto.RegisterType((*ClusterCondition)(nil), "k8s.io.kubernetes.federation.apis.federation.v1alpha1.ClusterCondition") + proto.RegisterType((*ClusterList)(nil), "k8s.io.kubernetes.federation.apis.federation.v1alpha1.ClusterList") + proto.RegisterType((*ClusterMeta)(nil), "k8s.io.kubernetes.federation.apis.federation.v1alpha1.ClusterMeta") + proto.RegisterType((*ClusterSpec)(nil), "k8s.io.kubernetes.federation.apis.federation.v1alpha1.ClusterSpec") + proto.RegisterType((*ClusterStatus)(nil), "k8s.io.kubernetes.federation.apis.federation.v1alpha1.ClusterStatus") + proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.kubernetes.federation.apis.federation.v1alpha1.ServerAddressByClientCIDR") +} +func (m *Cluster) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Cluster) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ClusterCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) + n4, err := m.LastProbeTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *ClusterList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + return i, nil +} + +func (m *ClusterSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.SecretRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n7, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *ClusterStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Capacity) > 0 { + for k := range m.Capacity { + data[i] = 0x12 + i++ + v := m.Capacity[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n8, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + } + if len(m.Allocatable) > 0 { + for k := range m.Allocatable { + data[i] = 0x1a + i++ + v := m.Allocatable[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n9, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + } + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ClusterMeta.Size())) + n10, err := m.ClusterMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + if len(m.Zones) > 0 { + for _, s := range m.Zones { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Region))) + i += copy(data[i:], m.Region) + return i, nil +} + +func (m *ServerAddressByClientCIDR) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServerAddressByClientCIDR) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClientCIDR))) + i += copy(data[i:], m.ClientCIDR) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServerAddress))) + i += copy(data[i:], m.ServerAddress) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Cluster) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterMeta) Size() (n int) { + var l int + _ = l + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterSpec) Size() (n int) { + var l int + _ = l + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClusterStatus) Size() (n int) { + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Allocatable) > 0 { + for k, v := range m.Allocatable { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.ClusterMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Zones) > 0 { + for _, s := range m.Zones { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Region) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServerAddressByClientCIDR) Size() (n int) { + var l int + _ = l + l = len(m.ClientCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServerAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Cluster) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Cluster: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ClusterConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Cluster{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &k8s_io_kubernetes_pkg_api_v1.LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ClusterCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(k8s_io_kubernetes_pkg_api_v1.ResourceList) + } + m.Capacity[k8s_io_kubernetes_pkg_api_v1.ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Allocatable == nil { + m.Allocatable = make(k8s_io_kubernetes_pkg_api_v1.ResourceList) + } + m.Allocatable[k8s_io_kubernetes_pkg_api_v1.ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClusterMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Zones = append(m.Zones, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Region = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientCIDR = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddress = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/generated.proto b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/generated.proto new file mode 100644 index 000000000000..48f534615087 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/generated.proto @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.federation.apis.federation.v1alpha1; + +import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. +message Cluster { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of the Cluster. + optional ClusterSpec spec = 2; + + // Status describes the current status of a Cluster + optional ClusterStatus status = 3; +} + +// ClusterCondition describes current state of a cluster. +message ClusterCondition { + // Type of cluster condition, Complete or Failed. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + optional string reason = 5; + + // Human readable message indicating details about last transition. + optional string message = 6; +} + +// A list of all the kubernetes clusters registered to the federation +message ClusterList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of Cluster objects. + repeated Cluster items = 2; +} + +// Cluster metadata +message ClusterMeta { + // Release version of the cluster. + optional string version = 1; +} + +// ClusterSpec describes the attributes of a kubernetes cluster. +message ClusterSpec { + // A map of client CIDR to server address. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 1; + + // Name of the secret containing kubeconfig to access this cluster. + // The secret is read from the kubernetes cluster that is hosting federation control plane. + // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". + // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. + optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference secretRef = 2; +} + +// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. +message ClusterStatus { + // Conditions is an array of current cluster conditions. + repeated ClusterCondition conditions = 1; + + // Capacity represents the total resources of the cluster + map capacity = 2; + + // Allocatable represents the total resources of a cluster that are available for scheduling. + map allocatable = 3; + + optional ClusterMeta clusterMeta = 4; + + // Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. + // These will always be in the same region. + repeated string zones = 5; + + // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. + optional string region = 6; +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +message ServerAddressByClientCIDR { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + optional string clientCIDR = 1; + + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + optional string serverAddress = 2; +} + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/register.go new file mode 100644 index 000000000000..1cb015ec9f82 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" +) + +// GroupName is the group name use in this package +const GroupName = "federation" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Adds the list of known types to api.Scheme. +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) + addDefaultingFuncs(scheme) + addConversionFuncs(scheme) +} + +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &Cluster{}, + &ClusterList{}, + &v1.ListOptions{}, + &v1.DeleteOptions{}, + ) + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) +} + +func (obj *Cluster) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ClusterList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/types.generated.go new file mode 100644 index 000000000000..6fadbf68823b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/types.generated.go @@ -0,0 +1,2645 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1alpha1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg3_resource "k8s.io/kubernetes/pkg/api/resource" + pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + pkg1_v1 "k8s.io/kubernetes/pkg/api/v1" + pkg4_types "k8s.io/kubernetes/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg3_resource.Quantity + var v1 pkg2_unversioned.Time + var v2 pkg1_v1.LocalObjectReference + var v3 pkg4_types.UID + var v4 time.Time + _, _, _, _, _ = v0, v1, v2, v3, v4 + } +} + +func (x *ServerAddressByClientCIDR) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("clientCIDR")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serverAddress")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServerAddressByClientCIDR) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServerAddressByClientCIDR) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "clientCIDR": + if r.TryDecodeAsNil() { + x.ClientCIDR = "" + } else { + x.ClientCIDR = string(r.DecodeString()) + } + case "serverAddress": + if r.TryDecodeAsNil() { + x.ServerAddress = "" + } else { + x.ServerAddress = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServerAddressByClientCIDR) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ClientCIDR = "" + } else { + x.ClientCIDR = string(r.DecodeString()) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServerAddress = "" + } else { + x.ServerAddress = string(r.DecodeString()) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.ServerAddressByClientCIDRs == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serverAddressByClientCIDRs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ServerAddressByClientCIDRs == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "serverAddressByClientCIDRs": + if r.TryDecodeAsNil() { + x.ServerAddressByClientCIDRs = nil + } else { + yyv4 := &x.ServerAddressByClientCIDRs + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv4), d) + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(pkg1_v1.LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServerAddressByClientCIDRs = nil + } else { + yyv8 := &x.ServerAddressByClientCIDRs + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv8), d) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(pkg1_v1.LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ClusterConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ClusterConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ClusterCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastProbeTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastProbeTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + x.Type = ClusterConditionType(r.DecodeString()) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + x.Status = pkg1_v1.ConditionStatus(r.DecodeString()) + } + case "lastProbeTime": + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg2_unversioned.Time{} + } else { + yyv6 := &x.LastProbeTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_unversioned.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + x.Reason = string(r.DecodeString()) + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + x.Message = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + x.Type = ClusterConditionType(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + x.Status = pkg1_v1.ConditionStatus(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg2_unversioned.Time{} + } else { + yyv15 := &x.LastProbeTime + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if yym16 { + z.DecBinaryUnmarshal(yyv15) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) + } else { + z.DecFallback(yyv15, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_unversioned.Time{} + } else { + yyv17 := &x.LastTransitionTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + x.Reason = string(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + x.Message = string(r.DecodeString()) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterMeta) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Version != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("version")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterMeta) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "version": + if r.TryDecodeAsNil() { + x.Version = "" + } else { + x.Version = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Version = "" + } else { + x.Version = string(r.DecodeString()) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Conditions) != 0 + yyq2[1] = len(x.Capacity) != 0 + yyq2[2] = len(x.Allocatable) != 0 + yyq2[3] = len(x.Zones) != 0 + yyq2[4] = x.Region != "" + yyq2[5] = x.Version != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Capacity == nil { + r.EncodeNil() + } else { + yysf7 := &x.Capacity + yysf7.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capacity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capacity == nil { + r.EncodeNil() + } else { + yysf8 := &x.Capacity + yysf8.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Allocatable == nil { + r.EncodeNil() + } else { + yysf10 := &x.Allocatable + yysf10.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("allocatable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Allocatable == nil { + r.EncodeNil() + } else { + yysf11 := &x.Allocatable + yysf11.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Zones == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.Zones, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("zones")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Zones == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.Zones, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Region)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("region")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Region)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("version")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv4 := &x.Conditions + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceClusterCondition((*[]ClusterCondition)(yyv4), d) + } + } + case "capacity": + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv6 := &x.Capacity + yyv6.CodecDecodeSelf(d) + } + case "allocatable": + if r.TryDecodeAsNil() { + x.Allocatable = nil + } else { + yyv7 := &x.Allocatable + yyv7.CodecDecodeSelf(d) + } + case "zones": + if r.TryDecodeAsNil() { + x.Zones = nil + } else { + yyv8 := &x.Zones + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "region": + if r.TryDecodeAsNil() { + x.Region = "" + } else { + x.Region = string(r.DecodeString()) + } + case "version": + if r.TryDecodeAsNil() { + x.Version = "" + } else { + x.Version = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv13 := &x.Conditions + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceClusterCondition((*[]ClusterCondition)(yyv13), d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv15 := &x.Capacity + yyv15.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Allocatable = nil + } else { + yyv16 := &x.Allocatable + yyv16.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Zones = nil + } else { + yyv17 := &x.Zones + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Region = "" + } else { + x.Region = string(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Version = "" + } else { + x.Version = string(r.DecodeString()) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Cluster) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy14 := &x.Status + yy14.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy16 := &x.Status + yy16.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Cluster) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Cluster) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ClusterSpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ClusterStatus{} + } else { + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Cluster) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ClusterSpec{} + } else { + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ClusterStatus{} + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceCluster(([]Cluster)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceCluster(([]Cluster)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceCluster((*[]Cluster)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceCluster((*[]Cluster)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceServerAddressByClientCIDR(v []ServerAddressByClientCIDR, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceServerAddressByClientCIDR(v *[]ServerAddressByClientCIDR, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ServerAddressByClientCIDR{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ServerAddressByClientCIDR, yyrl1) + } + } else { + yyv1 = make([]ServerAddressByClientCIDR, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServerAddressByClientCIDR{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ServerAddressByClientCIDR{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServerAddressByClientCIDR{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ServerAddressByClientCIDR{}) // var yyz1 ServerAddressByClientCIDR + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServerAddressByClientCIDR{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ServerAddressByClientCIDR{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterCondition(v []ClusterCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterCondition(v *[]ClusterCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterCondition, yyrl1) + } + } else { + yyv1 = make([]ClusterCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterCondition{}) // var yyz1 ClusterCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCluster(v []Cluster, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCluster(v *[]Cluster, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Cluster{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 368) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Cluster, yyrl1) + } + } else { + yyv1 = make([]Cluster, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Cluster{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Cluster{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Cluster{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Cluster{}) // var yyz1 Cluster + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Cluster{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Cluster{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/types.go new file mode 100644 index 000000000000..8d17fa2cd885 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/v1alpha1/types.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" +) + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +type ServerAddressByClientCIDR struct { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` +} + +// ClusterSpec describes the attributes of a kubernetes cluster. +type ClusterSpec struct { + // A map of client CIDR to server address. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" patchStrategy:"merge" patchMergeKey:"clientCIDR" protobuf:"bytes,1,rep,name=serverAddressByClientCIDRs"` + // Name of the secret containing kubeconfig to access this cluster. + // The secret is read from the kubernetes cluster that is hosting federation control plane. + // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". + // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. + SecretRef *v1.LocalObjectReference `json:"secretRef" protobuf:"bytes,2,opt,name=secretRef"` +} + +type ClusterConditionType string + +// These are valid conditions of a cluster. +const ( + // ClusterReady means the cluster is ready to accept workloads. + ClusterReady ClusterConditionType = "Ready" + // ClusterOffline means the cluster is temporarily down or not reachable + ClusterOffline ClusterConditionType = "Offline" +) + +// ClusterCondition describes current state of a cluster. +type ClusterCondition struct { + // Type of cluster condition, Complete or Failed. + Type ClusterConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ClusterConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // Last time the condition was checked. + LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transit from one status to another. + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// Cluster metadata +type ClusterMeta struct { + // Release version of the cluster. + Version string `json:"version,omitempty" protobuf:"bytes,1,opt,name=version"` +} + +// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. +type ClusterStatus struct { + // Conditions is an array of current cluster conditions. + Conditions []ClusterCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + // Capacity represents the total resources of the cluster + Capacity v1.ResourceList `json:"capacity,omitempty" protobuf:"bytes,2,rep,name=capacity,casttype=k8s.io/kubernetes/pkg/api/v1.ResourceList,castkey=k8s.io/kubernetes/pkg/api/v1.ResourceName"` + // Allocatable represents the total resources of a cluster that are available for scheduling. + Allocatable v1.ResourceList `json:"allocatable,omitempty" protobuf:"bytes,3,rep,name=allocatable,casttype=k8s.io/kubernetes/pkg/api/v1.ResourceList,castkey=k8s.io/kubernetes/pkg/api/v1.ResourceName"` + ClusterMeta `json:",inline" protobuf:"bytes,4,opt,name=clusterMeta"` + // Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. + // These will always be in the same region. + Zones []string `json:"zones,omitempty" protobuf:"bytes,5,rep,name=zones"` + // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. + Region string `json:"region,omitempty" protobuf:"bytes,6,opt,name=region"` +} + +// +genclient=true,nonNamespaced=true + +// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. +type Cluster struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of the Cluster. + Spec ClusterSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Status describes the current status of a Cluster + Status ClusterStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// A list of all the kubernetes clusters registered to the federation +type ClusterList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of Cluster objects. + Items []Cluster `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/validation/validation.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/validation/validation.go new file mode 100644 index 000000000000..f7cac2e264de --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/validation/validation.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +var ValidateClusterName = validation.NameIsDNSSubdomain + +func ValidateClusterSpec(spec *federation.ClusterSpec, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // address is required. + if len(spec.ServerAddressByClientCIDRs) == 0 { + allErrs = append(allErrs, field.Required(fieldPath.Child("serverAddressByClientCIDRs"), "")) + } + return allErrs +} + +func ValidateCluster(cluster *federation.Cluster) field.ErrorList { + allErrs := validation.ValidateObjectMeta(&cluster.ObjectMeta, false, ValidateClusterName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateClusterSpec(&cluster.Spec, field.NewPath("spec"))...) + return allErrs +} + +func ValidateClusterUpdate(cluster, oldCluster *federation.Cluster) field.ErrorList { + allErrs := validation.ValidateObjectMetaUpdate(&cluster.ObjectMeta, &oldCluster.ObjectMeta, field.NewPath("metadata")) + if cluster.Name != oldCluster.Name { + allErrs = append(allErrs, field.Invalid(field.NewPath("meta", "name"), + cluster.Name+" != "+oldCluster.Name, "cannot change cluster name")) + } + return allErrs +} + +func ValidateClusterStatusUpdate(cluster, oldCluster *federation.Cluster) field.ErrorList { + allErrs := validation.ValidateObjectMetaUpdate(&cluster.ObjectMeta, &oldCluster.ObjectMeta, field.NewPath("metadata")) + return allErrs +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/validation/validation_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/validation/validation_test.go new file mode 100644 index 000000000000..87a6b92e4798 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/apis/federation/validation/validation_test.go @@ -0,0 +1,205 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "testing" + + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/pkg/api" +) + +func TestValidateCluster(t *testing.T) { + successCases := []federation.Cluster{ + { + ObjectMeta: api.ObjectMeta{Name: "cluster-s"}, + Spec: federation.ClusterSpec{ + ServerAddressByClientCIDRs: []federation.ServerAddressByClientCIDR{ + { + ClientCIDR: "0.0.0.0/0", + ServerAddress: "localhost:8888", + }, + }, + }, + }, + } + for _, successCase := range successCases { + errs := ValidateCluster(&successCase) + if len(errs) != 0 { + t.Errorf("expect success: %v", errs) + } + } + + errorCases := map[string]federation.Cluster{ + "missing cluster addresses": { + ObjectMeta: api.ObjectMeta{Name: "cluster-f"}, + }, + "empty cluster addresses": { + ObjectMeta: api.ObjectMeta{Name: "cluster-f"}, + Spec: federation.ClusterSpec{ + ServerAddressByClientCIDRs: []federation.ServerAddressByClientCIDR{}, + }}, + "invalid_label": { + ObjectMeta: api.ObjectMeta{ + Name: "cluster-f", + Labels: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + }, + } + for testName, errorCase := range errorCases { + errs := ValidateCluster(&errorCase) + if len(errs) == 0 { + t.Errorf("expected failur for %s", testName) + } + } +} + +func TestValidateClusterUpdate(t *testing.T) { + type clusterUpdateTest struct { + old federation.Cluster + update federation.Cluster + } + successCases := []clusterUpdateTest{ + { + old: federation.Cluster{ + ObjectMeta: api.ObjectMeta{Name: "cluster-s"}, + Spec: federation.ClusterSpec{ + ServerAddressByClientCIDRs: []federation.ServerAddressByClientCIDR{ + { + ClientCIDR: "0.0.0.0/0", + ServerAddress: "localhost:8888", + }, + }, + }, + }, + update: federation.Cluster{ + ObjectMeta: api.ObjectMeta{Name: "cluster-s"}, + Spec: federation.ClusterSpec{ + ServerAddressByClientCIDRs: []federation.ServerAddressByClientCIDR{ + { + ClientCIDR: "0.0.0.0/0", + ServerAddress: "localhost:8888", + }, + }, + }, + }, + }, + } + for _, successCase := range successCases { + successCase.old.ObjectMeta.ResourceVersion = "1" + successCase.update.ObjectMeta.ResourceVersion = "1" + errs := ValidateClusterUpdate(&successCase.update, &successCase.old) + if len(errs) != 0 { + t.Errorf("expect success: %v", errs) + } + } + + errorCases := map[string]clusterUpdateTest{ + "cluster name changed": { + old: federation.Cluster{ + ObjectMeta: api.ObjectMeta{Name: "cluster-s"}, + Spec: federation.ClusterSpec{ + ServerAddressByClientCIDRs: []federation.ServerAddressByClientCIDR{ + { + ClientCIDR: "0.0.0.0/0", + ServerAddress: "localhost:8888", + }, + }, + }, + }, + update: federation.Cluster{ + ObjectMeta: api.ObjectMeta{Name: "cluster-newname"}, + Spec: federation.ClusterSpec{ + ServerAddressByClientCIDRs: []federation.ServerAddressByClientCIDR{ + { + ClientCIDR: "0.0.0.0/0", + ServerAddress: "localhost:8888", + }, + }, + }, + }, + }, + } + for testName, errorCase := range errorCases { + errs := ValidateClusterUpdate(&errorCase.update, &errorCase.old) + if len(errs) == 0 { + t.Errorf("expected failure: %s", testName) + } + } +} + +func TestValidateClusterStatusUpdate(t *testing.T) { + type clusterUpdateTest struct { + old federation.Cluster + update federation.Cluster + } + successCases := []clusterUpdateTest{ + { + old: federation.Cluster{ + ObjectMeta: api.ObjectMeta{Name: "cluster-s"}, + Spec: federation.ClusterSpec{ + ServerAddressByClientCIDRs: []federation.ServerAddressByClientCIDR{ + { + ClientCIDR: "0.0.0.0/0", + ServerAddress: "localhost:8888", + }, + }, + }, + Status: federation.ClusterStatus{ + Conditions: []federation.ClusterCondition{ + {Type: federation.ClusterReady, Status: api.ConditionTrue}, + }, + }, + }, + update: federation.Cluster{ + ObjectMeta: api.ObjectMeta{Name: "cluster-s"}, + Spec: federation.ClusterSpec{ + ServerAddressByClientCIDRs: []federation.ServerAddressByClientCIDR{ + { + ClientCIDR: "0.0.0.0/0", + ServerAddress: "localhost:8888", + }, + }, + }, + Status: federation.ClusterStatus{ + Conditions: []federation.ClusterCondition{ + {Type: federation.ClusterReady, Status: api.ConditionTrue}, + {Type: federation.ClusterOffline, Status: api.ConditionTrue}, + }, + }, + }, + }, + } + for _, successCase := range successCases { + successCase.old.ObjectMeta.ResourceVersion = "1" + successCase.update.ObjectMeta.ResourceVersion = "1" + errs := ValidateClusterUpdate(&successCase.update, &successCase.old) + if len(errs) != 0 { + t.Errorf("expect success: %v", errs) + } + } + + errorCases := map[string]clusterUpdateTest{} + for testName, errorCase := range errorCases { + errs := ValidateClusterStatusUpdate(&errorCase.update, &errorCase.old) + if len(errs) == 0 { + t.Errorf("expected failure: %s", testName) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/cache/cluster_cache.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/cache/cluster_cache.go new file mode 100644 index 000000000000..ba22cc562471 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/cache/cluster_cache.go @@ -0,0 +1,64 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "github.com/golang/glog" + "k8s.io/kubernetes/federation/apis/federation" + kubeCache "k8s.io/kubernetes/pkg/client/cache" +) + +// StoreToClusterLister makes a Store have the List method of the unversioned.ClusterInterface +// The Store must contain (only) clusters. +type StoreToClusterLister struct { + kubeCache.Store +} + +func (s *StoreToClusterLister) List() (clusters federation.ClusterList, err error) { + for _, m := range s.Store.List() { + clusters.Items = append(clusters.Items, *(m.(*federation.Cluster))) + } + return clusters, nil +} + +// ClusterConditionPredicate is a function that indicates whether the given cluster's conditions meet +// some set of criteria defined by the function. +type ClusterConditionPredicate func(cluster federation.Cluster) bool + +// storeToClusterConditionLister filters and returns nodes matching the given type and status from the store. +type storeToClusterConditionLister struct { + store kubeCache.Store + predicate ClusterConditionPredicate +} + +// ClusterCondition returns a storeToClusterConditionLister +func (s *StoreToClusterLister) ClusterCondition(predicate ClusterConditionPredicate) storeToClusterConditionLister { + return storeToClusterConditionLister{s.Store, predicate} +} + +// List returns a list of clusters that match the conditions defined by the predicate functions in the storeToClusterConditionLister. +func (s storeToClusterConditionLister) List() (clusters federation.ClusterList, err error) { + for _, m := range s.store.List() { + cluster := *m.(*federation.Cluster) + if s.predicate(cluster) { + clusters.Items = append(clusters.Items, cluster) + } else { + glog.V(5).Infof("Cluster %s matches none of the conditions", cluster.Name) + } + } + return +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/clientset.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/clientset.go new file mode 100644 index 000000000000..37c13f858a7b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/clientset.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federation_internalclientset + +import ( + "github.com/golang/glog" + unversionedcore "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned" + unversionedfederation "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Federation() unversionedfederation.FederationInterface + Core() unversionedcore.CoreInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *unversionedfederation.FederationClient + *unversionedcore.CoreClient +} + +// Federation retrieves the FederationClient +func (c *Clientset) Federation() unversionedfederation.FederationInterface { + if c == nil { + return nil + } + return c.FederationClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() unversionedcore.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.FederationClient, err = unversionedfederation.NewForConfig(&configShallowCopy) + if err != nil { + return &clientset, err + } + clientset.CoreClient, err = unversionedcore.NewForConfig(&configShallowCopy) + if err != nil { + return &clientset, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + } + return &clientset, err +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.FederationClient = unversionedfederation.NewForConfigOrDie(c) + clientset.CoreClient = unversionedcore.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.FederationClient = unversionedfederation.New(c) + clientset.CoreClient = unversionedcore.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/doc.go new file mode 100644 index 000000000000..40d4acceae9d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service] --input=[../../federation/apis/federation/,api/] + +// This package has the automatically generated clientset. +package federation_internalclientset diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/fake/clientset_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/fake/clientset_generated.go new file mode 100644 index 000000000000..21b95c03fb8e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/fake/clientset_generated.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" + unversionedcore "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned" + fakeunversionedcore "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/fake" + unversionedfederation "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned" + fakeunversionedfederation "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/fake" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// Clientset returns a clientset that will respond with the provided objects +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjects(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Federation retrieves the FederationClient +func (c *Clientset) Federation() unversionedfederation.FederationInterface { + return &fakeunversionedfederation.FakeFederation{Fake: &c.Fake} +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() unversionedcore.CoreInterface { + return &fakeunversionedcore.FakeCore{Fake: &c.Fake} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/fake/doc.go new file mode 100644 index 000000000000..febc307fa2b8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service] --input=[../../federation/apis/federation/,api/] + +// This package has the automatically generated fake clientset. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/import_known_versions.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/import_known_versions.go new file mode 100644 index 000000000000..af8c2e7436f8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/import_known_versions.go @@ -0,0 +1,25 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federation_internalclientset + +// These imports are the API groups the client will support. +import ( + _ "k8s.io/kubernetes/federation/apis/federation/install" +) + +func init() { +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/core_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/core_client.go new file mode 100644 index 000000000000..d308d0fe1ea9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/core_client.go @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + ServicesGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) Services(namespace string) ServiceInterface { + return newServices(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/api" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/doc.go new file mode 100644 index 000000000000..30cff08b9486 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service] --input=[../../federation/apis/federation/,api/] + +// This package has the automatically generated typed clients. +package unversioned diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/fake/doc.go new file mode 100644 index 000000000000..c2d08c592377 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service] --input=[../../federation/apis/federation/,api/] + +// Package fake has the automatically generated clients. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/fake/fake_core_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/fake/fake_core_client.go new file mode 100644 index 000000000000..1e28cd26c918 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/fake/fake_core_client.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + unversioned "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) Services(namespace string) unversioned.ServiceInterface { + return &FakeServices{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_service.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/fake/fake_service.go similarity index 75% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_service.go rename to Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/fake/fake_service.go index 2cf38901c5cb..62eae6481e54 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_service.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/fake/fake_service.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -29,9 +30,11 @@ type FakeServices struct { ns string } +var servicesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "services"} + func (c *FakeServices) Create(service *api.Service) (result *api.Service, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("services", c.ns, service), &api.Service{}) + Invokes(core.NewCreateAction(servicesResource, c.ns, service), &api.Service{}) if obj == nil { return nil, err @@ -41,7 +44,7 @@ func (c *FakeServices) Create(service *api.Service) (result *api.Service, err er func (c *FakeServices) Update(service *api.Service) (result *api.Service, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("services", c.ns, service), &api.Service{}) + Invokes(core.NewUpdateAction(servicesResource, c.ns, service), &api.Service{}) if obj == nil { return nil, err @@ -51,7 +54,7 @@ func (c *FakeServices) Update(service *api.Service) (result *api.Service, err er func (c *FakeServices) UpdateStatus(service *api.Service) (*api.Service, error) { obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction("services", "status", c.ns, service), &api.Service{}) + Invokes(core.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &api.Service{}) if obj == nil { return nil, err @@ -61,13 +64,13 @@ func (c *FakeServices) UpdateStatus(service *api.Service) (*api.Service, error) func (c *FakeServices) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("services", c.ns, name), &api.Service{}) + Invokes(core.NewDeleteAction(servicesResource, c.ns, name), &api.Service{}) return err } func (c *FakeServices) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("services", c.ns, listOptions) + action := core.NewDeleteCollectionAction(servicesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &api.ServiceList{}) return err @@ -75,7 +78,7 @@ func (c *FakeServices) DeleteCollection(options *api.DeleteOptions, listOptions func (c *FakeServices) Get(name string) (result *api.Service, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("services", c.ns, name), &api.Service{}) + Invokes(core.NewGetAction(servicesResource, c.ns, name), &api.Service{}) if obj == nil { return nil, err @@ -85,7 +88,7 @@ func (c *FakeServices) Get(name string) (result *api.Service, err error) { func (c *FakeServices) List(opts api.ListOptions) (result *api.ServiceList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("services", c.ns, opts), &api.ServiceList{}) + Invokes(core.NewListAction(servicesResource, c.ns, opts), &api.ServiceList{}) if obj == nil { return nil, err @@ -107,6 +110,6 @@ func (c *FakeServices) List(opts api.ListOptions) (result *api.ServiceList, err // Watch returns a watch.Interface that watches the requested services. func (c *FakeServices) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("services", c.ns, opts)) + InvokesWatch(core.NewWatchAction(servicesResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/generated_expansion.go similarity index 87% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/doc.go rename to Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/generated_expansion.go index 9300170400fe..65df6665a9c5 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/doc.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/generated_expansion.go @@ -14,5 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package unversioned has the automatically generated clients for unversioned resources. package unversioned + +type ServiceExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/service.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/service.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/service.go rename to Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/service.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/cluster.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/cluster.go new file mode 100644 index 000000000000..e270951455c9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/cluster.go @@ -0,0 +1,140 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + federation "k8s.io/kubernetes/federation/apis/federation" + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ClustersGetter has a method to return a ClusterInterface. +// A group's client should implement this interface. +type ClustersGetter interface { + Clusters() ClusterInterface +} + +// ClusterInterface has methods to work with Cluster resources. +type ClusterInterface interface { + Create(*federation.Cluster) (*federation.Cluster, error) + Update(*federation.Cluster) (*federation.Cluster, error) + UpdateStatus(*federation.Cluster) (*federation.Cluster, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*federation.Cluster, error) + List(opts api.ListOptions) (*federation.ClusterList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ClusterExpansion +} + +// clusters implements ClusterInterface +type clusters struct { + client *FederationClient +} + +// newClusters returns a Clusters +func newClusters(c *FederationClient) *clusters { + return &clusters{ + client: c, + } +} + +// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. +func (c *clusters) Create(cluster *federation.Cluster) (result *federation.Cluster, err error) { + result = &federation.Cluster{} + err = c.client.Post(). + Resource("clusters"). + Body(cluster). + Do(). + Into(result) + return +} + +// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. +func (c *clusters) Update(cluster *federation.Cluster) (result *federation.Cluster, err error) { + result = &federation.Cluster{} + err = c.client.Put(). + Resource("clusters"). + Name(cluster.Name). + Body(cluster). + Do(). + Into(result) + return +} + +func (c *clusters) UpdateStatus(cluster *federation.Cluster) (result *federation.Cluster, err error) { + result = &federation.Cluster{} + err = c.client.Put(). + Resource("clusters"). + Name(cluster.Name). + SubResource("status"). + Body(cluster). + Do(). + Into(result) + return +} + +// Delete takes name of the cluster and deletes it. Returns an error if one occurs. +func (c *clusters) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("clusters"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusters) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("clusters"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. +func (c *clusters) Get(name string) (result *federation.Cluster, err error) { + result = &federation.Cluster{} + err = c.client.Get(). + Resource("clusters"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Clusters that match those selectors. +func (c *clusters) List(opts api.ListOptions) (result *federation.ClusterList, err error) { + result = &federation.ClusterList{} + err = c.client.Get(). + Resource("clusters"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusters. +func (c *clusters) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("clusters"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/doc.go new file mode 100644 index 000000000000..30cff08b9486 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service] --input=[../../federation/apis/federation/,api/] + +// This package has the automatically generated typed clients. +package unversioned diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/fake/doc.go new file mode 100644 index 000000000000..c2d08c592377 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service] --input=[../../federation/apis/federation/,api/] + +// Package fake has the automatically generated clients. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/fake/fake_cluster.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/fake/fake_cluster.go new file mode 100644 index 000000000000..e5ef0d7bc6ed --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/fake/fake_cluster.go @@ -0,0 +1,108 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + federation "k8s.io/kubernetes/federation/apis/federation" + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeClusters implements ClusterInterface +type FakeClusters struct { + Fake *FakeFederation +} + +var clustersResource = unversioned.GroupVersionResource{Group: "federation", Version: "", Resource: "clusters"} + +func (c *FakeClusters) Create(cluster *federation.Cluster) (result *federation.Cluster, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(clustersResource, cluster), &federation.Cluster{}) + if obj == nil { + return nil, err + } + return obj.(*federation.Cluster), err +} + +func (c *FakeClusters) Update(cluster *federation.Cluster) (result *federation.Cluster, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(clustersResource, cluster), &federation.Cluster{}) + if obj == nil { + return nil, err + } + return obj.(*federation.Cluster), err +} + +func (c *FakeClusters) UpdateStatus(cluster *federation.Cluster) (*federation.Cluster, error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateSubresourceAction(clustersResource, "status", cluster), &federation.Cluster{}) + if obj == nil { + return nil, err + } + return obj.(*federation.Cluster), err +} + +func (c *FakeClusters) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(clustersResource, name), &federation.Cluster{}) + return err +} + +func (c *FakeClusters) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(clustersResource, listOptions) + + _, err := c.Fake.Invokes(action, &federation.ClusterList{}) + return err +} + +func (c *FakeClusters) Get(name string) (result *federation.Cluster, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(clustersResource, name), &federation.Cluster{}) + if obj == nil { + return nil, err + } + return obj.(*federation.Cluster), err +} + +func (c *FakeClusters) List(opts api.ListOptions) (result *federation.ClusterList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(clustersResource, opts), &federation.ClusterList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &federation.ClusterList{} + for _, item := range obj.(*federation.ClusterList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusters. +func (c *FakeClusters) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(clustersResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/fake/fake_federation_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/fake/fake_federation_client.go new file mode 100644 index 000000000000..f829ab0eca28 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/fake/fake_federation_client.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + unversioned "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeFederation struct { + *core.Fake +} + +func (c *FakeFederation) Clusters() unversioned.ClusterInterface { + return &FakeClusters{c} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeFederation) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/federation_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/federation_client.go new file mode 100644 index 000000000000..be2a8a153c13 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/federation_client.go @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type FederationInterface interface { + GetRESTClient() *restclient.RESTClient + ClustersGetter +} + +// FederationClient is used to interact with features provided by the Federation group. +type FederationClient struct { + *restclient.RESTClient +} + +func (c *FederationClient) Clusters() ClusterInterface { + return newClusters(c) +} + +// NewForConfig creates a new FederationClient for the given config. +func NewForConfig(c *restclient.Config) (*FederationClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &FederationClient{client}, nil +} + +// NewForConfigOrDie creates a new FederationClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *FederationClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new FederationClient for the given RESTClient. +func New(c *restclient.RESTClient) *FederationClient { + return &FederationClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if federation group is not registered, return an error + g, err := registered.Group("federation") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FederationClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/generated_expansion.go similarity index 87% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/doc.go rename to Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/generated_expansion.go index 9300170400fe..8888bf9bd497 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/doc.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/generated_expansion.go @@ -14,5 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package unversioned has the automatically generated clients for unversioned resources. package unversioned + +type ClusterExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/clientset.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/clientset.go new file mode 100644 index 000000000000..7bc6216e8073 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/clientset.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federation_release_1_3 + +import ( + "github.com/golang/glog" + v1core "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1" + v1alpha1federation "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Federation() v1alpha1federation.FederationInterface + Core() v1core.CoreInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *v1alpha1federation.FederationClient + *v1core.CoreClient +} + +// Federation retrieves the FederationClient +func (c *Clientset) Federation() v1alpha1federation.FederationInterface { + if c == nil { + return nil + } + return c.FederationClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.FederationClient, err = v1alpha1federation.NewForConfig(&configShallowCopy) + if err != nil { + return &clientset, err + } + clientset.CoreClient, err = v1core.NewForConfig(&configShallowCopy) + if err != nil { + return &clientset, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + } + return &clientset, err +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.FederationClient = v1alpha1federation.NewForConfigOrDie(c) + clientset.CoreClient = v1core.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.FederationClient = v1alpha1federation.New(c) + clientset.CoreClient = v1core.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/doc.go new file mode 100644 index 000000000000..44d6b8f09723 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_release_1_3 --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/v1/Service] --input=[../../federation/apis/federation/v1alpha1,api/v1] + +// This package has the automatically generated clientset. +package federation_release_1_3 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/fake/clientset_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/fake/clientset_generated.go new file mode 100644 index 000000000000..8af3fd9ef41e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/fake/clientset_generated.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3" + v1core "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1" + fakev1core "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/fake" + v1alpha1federation "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1" + fakev1alpha1federation "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/fake" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// Clientset returns a clientset that will respond with the provided objects +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjects(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Federation retrieves the FederationClient +func (c *Clientset) Federation() v1alpha1federation.FederationInterface { + return &fakev1alpha1federation.FakeFederation{Fake: &c.Fake} +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return &fakev1core.FakeCore{Fake: &c.Fake} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/fake/doc.go new file mode 100644 index 000000000000..cd0b07c20683 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_release_1_3 --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/v1/Service] --input=[../../federation/apis/federation/v1alpha1,api/v1] + +// This package has the automatically generated fake clientset. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/import_known_versions.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/import_known_versions.go new file mode 100644 index 000000000000..a977647aa0d7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/import_known_versions.go @@ -0,0 +1,25 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federation_release_1_3 + +// These imports are the API groups the client will support. +import ( + _ "k8s.io/kubernetes/federation/apis/federation/install" +) + +func init() { +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/core_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/core_client.go new file mode 100644 index 000000000000..8d0682720d04 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/core_client.go @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + ServicesGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) Services(namespace string) ServiceInterface { + return newServices(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/api" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/doc.go new file mode 100644 index 000000000000..e85a01c7c591 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_release_1_3 --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/v1/Service] --input=[../../federation/apis/federation/v1alpha1,api/v1] + +// This package has the automatically generated typed clients. +package v1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/fake/doc.go new file mode 100644 index 000000000000..846226b3be97 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_release_1_3 --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/v1/Service] --input=[../../federation/apis/federation/v1alpha1,api/v1] + +// Package fake has the automatically generated clients. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/fake/fake_core_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 000000000000..cffa8da7ebfb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) Services(namespace string) v1.ServiceInterface { + return &FakeServices{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/fake/fake_service.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/fake/fake_service.go new file mode 100644 index 000000000000..3355aa94e1f8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/fake/fake_service.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeServices implements ServiceInterface +type FakeServices struct { + Fake *FakeCore + ns string +} + +var servicesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "services"} + +func (c *FakeServices) Create(service *v1.Service) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(servicesResource, c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) Update(service *v1.Service) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(servicesResource, c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) UpdateStatus(service *v1.Service) (*v1.Service, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(servicesResource, c.ns, name), &v1.Service{}) + + return err +} + +func (c *FakeServices) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(servicesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ServiceList{}) + return err +} + +func (c *FakeServices) Get(name string) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(servicesResource, c.ns, name), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) List(opts api.ListOptions) (result *v1.ServiceList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(servicesResource, c.ns, opts), &v1.ServiceList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ServiceList{} + for _, item := range obj.(*v1.ServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *FakeServices) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(servicesResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/generated_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000000..2928a83ed34e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type ServiceExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/service.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/service.go new file mode 100644 index 000000000000..cd62b5d94f20 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/core/v1/service.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ServicesGetter has a method to return a ServiceInterface. +// A group's client should implement this interface. +type ServicesGetter interface { + Services(namespace string) ServiceInterface +} + +// ServiceInterface has methods to work with Service resources. +type ServiceInterface interface { + Create(*v1.Service) (*v1.Service, error) + Update(*v1.Service) (*v1.Service, error) + UpdateStatus(*v1.Service) (*v1.Service, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Service, error) + List(opts api.ListOptions) (*v1.ServiceList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ServiceExpansion +} + +// services implements ServiceInterface +type services struct { + client *CoreClient + ns string +} + +// newServices returns a Services +func newServices(c *CoreClient, namespace string) *services { + return &services{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Create(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Post(). + Namespace(c.ns). + Resource("services"). + Body(service). + Do(). + Into(result) + return +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Update(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + Body(service). + Do(). + Into(result) + return +} + +func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + SubResource("status"). + Body(service). + Do(). + Into(result) + return +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *services) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *services) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *services) Get(name string) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *services) List(opts api.ListOptions) (result *v1.ServiceList, err error) { + result = &v1.ServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *services) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/cluster.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/cluster.go new file mode 100644 index 000000000000..1a94323e0100 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/cluster.go @@ -0,0 +1,140 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/kubernetes/federation/apis/federation/v1alpha1" + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ClustersGetter has a method to return a ClusterInterface. +// A group's client should implement this interface. +type ClustersGetter interface { + Clusters() ClusterInterface +} + +// ClusterInterface has methods to work with Cluster resources. +type ClusterInterface interface { + Create(*v1alpha1.Cluster) (*v1alpha1.Cluster, error) + Update(*v1alpha1.Cluster) (*v1alpha1.Cluster, error) + UpdateStatus(*v1alpha1.Cluster) (*v1alpha1.Cluster, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1alpha1.Cluster, error) + List(opts api.ListOptions) (*v1alpha1.ClusterList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ClusterExpansion +} + +// clusters implements ClusterInterface +type clusters struct { + client *FederationClient +} + +// newClusters returns a Clusters +func newClusters(c *FederationClient) *clusters { + return &clusters{ + client: c, + } +} + +// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. +func (c *clusters) Create(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { + result = &v1alpha1.Cluster{} + err = c.client.Post(). + Resource("clusters"). + Body(cluster). + Do(). + Into(result) + return +} + +// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. +func (c *clusters) Update(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { + result = &v1alpha1.Cluster{} + err = c.client.Put(). + Resource("clusters"). + Name(cluster.Name). + Body(cluster). + Do(). + Into(result) + return +} + +func (c *clusters) UpdateStatus(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { + result = &v1alpha1.Cluster{} + err = c.client.Put(). + Resource("clusters"). + Name(cluster.Name). + SubResource("status"). + Body(cluster). + Do(). + Into(result) + return +} + +// Delete takes name of the cluster and deletes it. Returns an error if one occurs. +func (c *clusters) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("clusters"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusters) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("clusters"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. +func (c *clusters) Get(name string) (result *v1alpha1.Cluster, err error) { + result = &v1alpha1.Cluster{} + err = c.client.Get(). + Resource("clusters"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Clusters that match those selectors. +func (c *clusters) List(opts api.ListOptions) (result *v1alpha1.ClusterList, err error) { + result = &v1alpha1.ClusterList{} + err = c.client.Get(). + Resource("clusters"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusters. +func (c *clusters) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("clusters"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/doc.go new file mode 100644 index 000000000000..79dd05ec7f3b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_release_1_3 --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/v1/Service] --input=[../../federation/apis/federation/v1alpha1,api/v1] + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/fake/doc.go new file mode 100644 index 000000000000..846226b3be97 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_release_1_3 --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/v1/Service] --input=[../../federation/apis/federation/v1alpha1,api/v1] + +// Package fake has the automatically generated clients. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/fake/fake_cluster.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/fake/fake_cluster.go new file mode 100644 index 000000000000..a524e9c8e60d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/fake/fake_cluster.go @@ -0,0 +1,108 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1alpha1 "k8s.io/kubernetes/federation/apis/federation/v1alpha1" + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeClusters implements ClusterInterface +type FakeClusters struct { + Fake *FakeFederation +} + +var clustersResource = unversioned.GroupVersionResource{Group: "federation", Version: "v1alpha1", Resource: "clusters"} + +func (c *FakeClusters) Create(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(clustersResource, cluster), &v1alpha1.Cluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Cluster), err +} + +func (c *FakeClusters) Update(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(clustersResource, cluster), &v1alpha1.Cluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Cluster), err +} + +func (c *FakeClusters) UpdateStatus(cluster *v1alpha1.Cluster) (*v1alpha1.Cluster, error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateSubresourceAction(clustersResource, "status", cluster), &v1alpha1.Cluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Cluster), err +} + +func (c *FakeClusters) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(clustersResource, name), &v1alpha1.Cluster{}) + return err +} + +func (c *FakeClusters) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(clustersResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ClusterList{}) + return err +} + +func (c *FakeClusters) Get(name string) (result *v1alpha1.Cluster, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(clustersResource, name), &v1alpha1.Cluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Cluster), err +} + +func (c *FakeClusters) List(opts api.ListOptions) (result *v1alpha1.ClusterList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(clustersResource, opts), &v1alpha1.ClusterList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ClusterList{} + for _, item := range obj.(*v1alpha1.ClusterList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusters. +func (c *FakeClusters) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(clustersResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/fake/fake_federation_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/fake/fake_federation_client.go new file mode 100644 index 000000000000..48390713fd66 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/fake/fake_federation_client.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1alpha1 "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeFederation struct { + *core.Fake +} + +func (c *FakeFederation) Clusters() v1alpha1.ClusterInterface { + return &FakeClusters{c} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeFederation) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/federation_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/federation_client.go new file mode 100644 index 000000000000..57ce4bc4efce --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/federation_client.go @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type FederationInterface interface { + GetRESTClient() *restclient.RESTClient + ClustersGetter +} + +// FederationClient is used to interact with features provided by the Federation group. +type FederationClient struct { + *restclient.RESTClient +} + +func (c *FederationClient) Clusters() ClusterInterface { + return newClusters(c) +} + +// NewForConfig creates a new FederationClient for the given config. +func NewForConfig(c *restclient.Config) (*FederationClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &FederationClient{client}, nil +} + +// NewForConfigOrDie creates a new FederationClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *FederationClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new FederationClient for the given RESTClient. +func New(c *restclient.RESTClient) *FederationClient { + return &FederationClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if federation group is not registered, return an error + g, err := registered.Group("federation") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FederationClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/generated_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/generated_expansion.go new file mode 100644 index 000000000000..5a05fca2c2ec --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3/typed/federation/v1alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type ClusterExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/common.sh b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/common.sh new file mode 100644 index 000000000000..9fb63a06a873 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/common.sh @@ -0,0 +1,189 @@ +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# required: +# KUBE_ROOT: path of the root of the Kubernetes reposiitory + +# optional override +# FEDERATION_IMAGE_REPO_BASE: repo which federated images are tagged under (default gcr.io/google_containers) +# FEDERATION_NAMESPACE: name of the namespace will created for the federated components in the underlying cluster. +# KUBE_PLATFORM +# KUBE_ARCH +# KUBE_BUILD_STAGE + +: "${KUBE_ROOT?Must set KUBE_ROOT env var}" + +FEDERATION_IMAGE_REPO_BASE=${FEDERATION_IMAGE_REPO_BASE:-'gcr.io/google_containers'} +FEDERATION_NAMESPACE=${FEDERATION_NAMESPACE:-federation-e2e} + +KUBE_PLATFORM=${KUBE_PLATFORM:-linux} +KUBE_ARCH=${KUBE_ARCH:-amd64} +KUBE_BUILD_STAGE=${KUBE_BUILD_STAGE:-release-stage} + +source "${KUBE_ROOT}/cluster/common.sh" + +host_kubectl="${KUBE_ROOT}/cluster/kubectl.sh --namespace=${FEDERATION_NAMESPACE}" + +# required: +# FEDERATION_PUSH_REPO_BASE: repo to which federated container images will be pushed + +# Optional +# FEDERATION_IMAGE_TAG: reference and pull all federated images with this tag. Used for ci testing +function create-federated-api-objects { +( + : "${FEDERATION_PUSH_REPO_BASE?Must set FEDERATION_PUSH_REPO_BASE env var}" + export FEDERATION_APISERVER_DEPLOYMENT_NAME="federation-apiserver" + export FEDERATION_APISERVER_IMAGE_REPO="${FEDERATION_PUSH_REPO_BASE}/federation-apiserver" + export FEDERATION_APISERVER_IMAGE_TAG="${FEDERATION_IMAGE_TAG:-$(cat ${KUBE_ROOT}/_output/${KUBE_BUILD_STAGE}/server/${KUBE_PLATFORM}-${KUBE_ARCH}/kubernetes/server/bin/federation-apiserver.docker_tag)}" + + export FEDERATION_SERVICE_CIDR=${FEDERATION_SERVICE_CIDR:-"10.10.0.0/24"} + + #Only used for providers that require a nodeport service (vagrant for now) + #We will use loadbalancer services where we can + export FEDERATION_API_NODEPORT=32111 + export FEDERATION_NAMESPACE + + template="go run ${KUBE_ROOT}/federation/cluster/template.go" + + FEDERATION_KUBECONFIG_PATH="${KUBE_ROOT}/federation/cluster/kubeconfig" + + federation_kubectl="${KUBE_ROOT}/cluster/kubectl.sh --context=federated-cluster --namespace=default" + + manifests_root="${KUBE_ROOT}/federation/manifests/" + + $template "${manifests_root}/federation-ns.yaml" | $host_kubectl apply -f - + + cleanup-federated-api-objects + + export FEDERATION_API_HOST="" + export KUBE_MASTER_IP="" + if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]];then + # The vagrant approach is to use a nodeport service, and point kubectl at one of the nodes + $template "${manifests_root}/federation-apiserver-nodeport-service.yaml" | $host_kubectl create -f - + node_addresses=`$host_kubectl get nodes -o=jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}'` + FEDERATION_API_HOST=`printf "$node_addresses" | cut -d " " -f1` + KUBE_MASTER_IP="${FEDERATION_API_HOST}:${FEDERATION_API_NODEPORT}" + elif [[ "$KUBERNETES_PROVIDER" == "gce" || "$KUBERNETES_PROVIDER" == "gke" || "$KUBERNETES_PROVIDER" == "aws" ]];then + # any capable providers should use a loadbalancer service + # we check for ingress.ip and ingress.hostname, so should work for any loadbalancer-providing provider + # allows 30x5 = 150 seconds for loadbalancer creation + $template "${manifests_root}/federation-apiserver-lb-service.yaml" | $host_kubectl create -f - + for i in {1..30};do + echo "attempting to get federation-apiserver loadbalancer hostname ($i / 30)" + for field in ip hostname;do + FEDERATION_API_HOST=`${host_kubectl} get -o=jsonpath svc/${FEDERATION_APISERVER_DEPLOYMENT_NAME} --template '{.status.loadBalancer.ingress[*].'"${field}}"` + if [[ ! -z "${FEDERATION_API_HOST// }" ]];then + break 2 + fi + done + if [[ $i -eq 30 ]];then + echo "Could not find ingress hostname for federation-apiserver loadbalancer service" + exit 1 + fi + sleep 5 + done + KUBE_MASTER_IP="${FEDERATION_API_HOST}:443" + else + echo "provider ${KUBERNETES_PROVIDER} is not (yet) supported for e2e testing" + exit 1 + fi + echo "Found federation-apiserver host at $FEDERATION_API_HOST" + + FEDERATION_API_TOKEN="$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)" + export FEDERATION_API_KNOWN_TOKENS="${FEDERATION_API_TOKEN},admin,admin" + + $template "${manifests_root}/federation-apiserver-"{deployment,secrets}".yaml" | $host_kubectl create -f - + + # Don't finish provisioning until federation-apiserver pod is running + for i in {1..30};do + #TODO(colhom): in the future this needs to scale out for N pods. This assumes just one pod + phase="$($host_kubectl get -o=jsonpath pods -lapp=federated-cluster,module=federation-apiserver --template '{.items[*].status.phase}')" + echo "Waiting for federation-apiserver to be running...(phase= $phase)" + if [[ "$phase" == "Running" ]];then + echo "federation-apiserver pod is running!" + break + fi + + if [[ $i -eq 30 ]];then + echo "federation-apiserver pod is not running! giving up." + exit 1 + fi + + sleep 4 + done + + CONTEXT=federated-cluster \ + KUBE_BEARER_TOKEN="$FEDERATION_API_TOKEN" \ + SECONDARY_KUBECONFIG=true \ + create-kubeconfig +) +} + +# Required +# FEDERATION_PUSH_REPO_BASE: the docker repo where federated images will be pushed + +# Optional +# FEDERATION_IMAGE_TAG: push all federated images with this tag. Used for ci testing +function push-federated-images { + : "${FEDERATION_PUSH_REPO_BASE?Must set FEDERATION_PUSH_REPO_BASE env var}" + local FEDERATION_BINARIES=${FEDERATION_BINARIES:-'federation-apiserver'} + + local imageFolder="${KUBE_ROOT}/_output/${KUBE_BUILD_STAGE}/server/${KUBE_PLATFORM}-${KUBE_ARCH}/kubernetes/server/bin" + + if [[ ! -d "$imageFolder" ]];then + echo "${imageFolder} does not exist! Run make quick-release or make release" + exit 1 + fi + + for binary in $FEDERATION_BINARIES;do + local imageFile="${imageFolder}/${binary}.tar" + + if [[ ! -f "$imageFile" ]];then + echo "${imageFile} does not exist!" + exit 1 + fi + + echo "Load: ${imageFile}" + # Load the image. Trust we know what it's called, as docker load provides no help there :( + docker load < "${imageFile}" + + local srcImageTag="$(cat ${imageFolder}/${binary}.docker_tag)" + local dstImageTag="${FEDERATION_IMAGE_TAG:-$srcImageTag}" + local srcImageName="${FEDERATION_IMAGE_REPO_BASE}/${binary}:${srcImageTag}" + local dstImageName="${FEDERATION_PUSH_REPO_BASE}/${binary}:${dstImageTag}" + + echo "Tag: ${srcImageName} --> ${dstImageName}" + docker tag "$srcImageName" "$dstImageName" + + echo "Push: $dstImageName" + if [[ "${FEDERATION_PUSH_REPO_BASE}" == "gcr.io/"* ]];then + echo " -> GCR repository detected. Using gcloud" + gcloud docker push "$dstImageName" + else + docker push "$dstImageName" + fi + + echo "Remove: $srcImageName" + docker rmi "$srcImageName" + + if [[ "$srcImageName" != "dstImageName" ]];then + echo "Remove: $dstImageName" + docker rmi "$dstImageName" + fi + + done +} +function cleanup-federated-api-objects { + $host_kubectl delete pods,svc,rc,deployment,secret -lapp=federated-cluster +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/federated-down.sh b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/federated-down.sh new file mode 100755 index 000000000000..92f19307956c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/federated-down.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(readlink -m $(dirname "${BASH_SOURCE}")/../../) + +. ${KUBE_ROOT}/federation/cluster/common.sh + +cleanup-federated-api-objects + +$host_kubectl delete ns/${FEDERATION_NAMESPACE} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/federated-push.sh b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/federated-push.sh new file mode 100755 index 000000000000..80d66641972d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/federated-push.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Bring up a Kubernetes cluster. +# +# If the full release name (gs:///) is passed in then we take +# that directly. If not then we assume we are doing development stuff and take +# the defaults in the release config. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(readlink -m $(dirname "${BASH_SOURCE}")/../../) + +. ${KUBE_ROOT}/federation/cluster/common.sh + +push-federated-images + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/federated-up.sh b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/federated-up.sh new file mode 100755 index 000000000000..750b907aafb6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/federated-up.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(readlink -m $(dirname "${BASH_SOURCE}")/../../) + +. ${KUBE_ROOT}/federation/cluster/common.sh + +create-federated-api-objects diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/template.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/template.go new file mode 100644 index 000000000000..9c947bc8549f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cluster/template.go @@ -0,0 +1,76 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This is a simple script that makes *every* environment variable available +as a go template field of the same name + +$ echo "hello world, MYVAR={{.MYVAR}}" > test.txt +$ MYVAR=foobar go run template.go test.txt +> hello world, MYVAR=foobar + +If you want the base64 version of any MYVAR, simple use {{.MYVAR_BASE64}} +*/ + +package main + +import ( + "encoding/base64" + "flag" + "fmt" + "io" + "os" + "path" + "strings" + "text/template" +) + +func main() { + flag.Parse() + env := make(map[string]string) + envList := os.Environ() + + for i := range envList { + pieces := strings.SplitN(envList[i], "=", 2) + if len(pieces) == 2 { + env[pieces[0]] = pieces[1] + env[pieces[0]+"_BASE64"] = base64.StdEncoding.EncodeToString([]byte(pieces[1])) + } else { + fmt.Fprintf(os.Stderr, "Invalid environ found: %s\n", envList[i]) + os.Exit(2) + } + } + + for i := 0; i < flag.NArg(); i++ { + inpath := flag.Arg(i) + + if err := templateYamlFile(env, inpath, os.Stdout); err != nil { + panic(err) + } + } +} + +func templateYamlFile(params map[string]string, inpath string, out io.Writer) error { + if tmpl, err := template.New(path.Base(inpath)).ParseFiles(inpath); err != nil { + return err + } else { + if err := tmpl.Execute(out, params); err != nil { + return err + } + } + _, err := out.Write([]byte("\n---\n")) + return err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/OWNERS b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/OWNERS new file mode 100644 index 000000000000..c5bdda2d1ad0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/OWNERS @@ -0,0 +1,5 @@ +assignees: + - lavalamp + - smarterclayton + - nikhiljindal + - krousey diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/apiserver.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/apiserver.go new file mode 100644 index 000000000000..ff4ef919d404 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/apiserver.go @@ -0,0 +1,54 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// apiserver is the main api server and master for the cluster. +// it is responsible for serving the cluster management API. +package main + +import ( + "fmt" + "math/rand" + "os" + "runtime" + "time" + + "k8s.io/kubernetes/federation/cmd/federation-apiserver/app" + genericoptions "k8s.io/kubernetes/pkg/genericapiserver/options" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/flag" + "k8s.io/kubernetes/pkg/version/verflag" + + "github.com/spf13/pflag" +) + +func main() { + runtime.GOMAXPROCS(runtime.NumCPU()) + rand.Seed(time.Now().UTC().UnixNano()) + + s := genericoptions.NewServerRunOptions() + s.AddFlags(pflag.CommandLine) + + flag.InitFlags() + util.InitLogs() + defer util.FlushLogs() + + verflag.PrintAndExitIfRequested() + + if err := app.Run(s); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/core.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/core.go new file mode 100644 index 000000000000..6fe9f943a1d5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/core.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/genericapiserver" + genericoptions "k8s.io/kubernetes/pkg/genericapiserver/options" + + "k8s.io/kubernetes/federation/apis/core" + _ "k8s.io/kubernetes/federation/apis/core/install" + "k8s.io/kubernetes/federation/apis/core/v1" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/rest" + serviceetcd "k8s.io/kubernetes/pkg/registry/service/etcd" +) + +func installCoreAPIs(s *genericoptions.ServerRunOptions, g *genericapiserver.GenericAPIServer, f genericapiserver.StorageFactory) { + serviceStore, serviceStatusStorage := serviceetcd.NewREST(createRESTOptionsOrDie(s, g, f, api.Resource("service"))) + coreResources := map[string]rest.Storage{ + "services": serviceStore, + "services/status": serviceStatusStorage, + } + coreGroupMeta := registered.GroupOrDie(core.GroupName) + apiGroupInfo := genericapiserver.APIGroupInfo{ + GroupMeta: *coreGroupMeta, + VersionedResourcesStorageMap: map[string]map[string]rest.Storage{ + v1.SchemeGroupVersion.Version: coreResources, + }, + OptionsExternalVersion: ®istered.GroupOrDie(core.GroupName).GroupVersion, + IsLegacyGroup: true, + Scheme: core.Scheme, + ParameterCodec: core.ParameterCodec, + NegotiatedSerializer: core.Codecs, + } + if err := g.InstallAPIGroup(&apiGroupInfo); err != nil { + glog.Fatalf("Error in registering group version: %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/federation.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/federation.go new file mode 100644 index 000000000000..f081b55d9fdc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/federation.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "github.com/golang/glog" + + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/genericapiserver" + genericoptions "k8s.io/kubernetes/pkg/genericapiserver/options" + + _ "k8s.io/kubernetes/federation/apis/federation/install" + clusteretcd "k8s.io/kubernetes/federation/registry/cluster/etcd" +) + +func installFederationAPIs(s *genericoptions.ServerRunOptions, g *genericapiserver.GenericAPIServer, f genericapiserver.StorageFactory) { + clusterStorage, clusterStatusStorage := clusteretcd.NewREST(createRESTOptionsOrDie(s, g, f, federation.Resource("clusters"))) + federationResources := map[string]rest.Storage{ + "clusters": clusterStorage, + "clusters/status": clusterStatusStorage, + } + federationGroupMeta := registered.GroupOrDie(federation.GroupName) + apiGroupInfo := genericapiserver.APIGroupInfo{ + GroupMeta: *federationGroupMeta, + VersionedResourcesStorageMap: map[string]map[string]rest.Storage{ + "v1alpha1": federationResources, + }, + OptionsExternalVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion, + Scheme: api.Scheme, + ParameterCodec: api.ParameterCodec, + NegotiatedSerializer: api.Codecs, + } + if err := g.InstallAPIGroup(&apiGroupInfo); err != nil { + glog.Fatalf("Error in registering group versions: %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/plugins.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/plugins.go new file mode 100644 index 000000000000..f4742fc46aee --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/plugins.go @@ -0,0 +1,40 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +// This file exists to force the desired plugin implementations to be linked. +// This should probably be part of some configuration fed into the build for a +// given binary target. +import ( + // Cloud providers + _ "k8s.io/kubernetes/pkg/cloudprovider/providers" + + // Admission policies + _ "k8s.io/kubernetes/plugin/pkg/admission/admit" + _ "k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages" + _ "k8s.io/kubernetes/plugin/pkg/admission/deny" + _ "k8s.io/kubernetes/plugin/pkg/admission/exec" + _ "k8s.io/kubernetes/plugin/pkg/admission/initialresources" + _ "k8s.io/kubernetes/plugin/pkg/admission/limitranger" + _ "k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision" + _ "k8s.io/kubernetes/plugin/pkg/admission/namespace/exists" + _ "k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle" + _ "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label" + _ "k8s.io/kubernetes/plugin/pkg/admission/resourcequota" + _ "k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny" + _ "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server.go new file mode 100644 index 000000000000..15299a8e1675 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server.go @@ -0,0 +1,162 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package app does all of the work necessary to create a Kubernetes +// APIServer by binding together the API, master and APIServer infrastructure. +// It can be configured and called directly or via the hyperkube framework. +package app + +import ( + "strings" + + "github.com/golang/glog" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "k8s.io/kubernetes/pkg/admission" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apiserver" + "k8s.io/kubernetes/pkg/apiserver/authenticator" + "k8s.io/kubernetes/pkg/genericapiserver" + genericoptions "k8s.io/kubernetes/pkg/genericapiserver/options" + "k8s.io/kubernetes/pkg/registry/cachesize" + "k8s.io/kubernetes/pkg/registry/generic" +) + +// NewAPIServerCommand creates a *cobra.Command object with default parameters +func NewAPIServerCommand() *cobra.Command { + s := genericoptions.NewServerRunOptions() + s.AddFlags(pflag.CommandLine) + cmd := &cobra.Command{ + Use: "federation-apiserver", + Long: `The Kubernetes federation API server validates and configures data +for the api objects which include pods, services, replicationcontrollers, and +others. The API Server services REST operations and provides the frontend to the +cluster's shared state through which all other components interact.`, + Run: func(cmd *cobra.Command, args []string) { + }, + } + + return cmd +} + +// Run runs the specified APIServer. This should never exit. +func Run(s *genericoptions.ServerRunOptions) error { + genericapiserver.DefaultAndValidateRunOptions(s) + + // TODO: register cluster federation resources here. + resourceConfig := genericapiserver.NewResourceConfig() + + storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion() + if err != nil { + glog.Fatalf("error generating storage version map: %s", err) + } + storageFactory, err := genericapiserver.BuildDefaultStorageFactory( + s.StorageConfig, s.DefaultStorageMediaType, api.Codecs, + genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion, + resourceConfig, s.RuntimeConfig) + if err != nil { + glog.Fatalf("error in initializing storage factory: %s", err) + } + + for _, override := range s.EtcdServersOverrides { + tokens := strings.Split(override, "#") + if len(tokens) != 2 { + glog.Errorf("invalid value of etcd server overrides: %s", override) + continue + } + + apiresource := strings.Split(tokens[0], "/") + if len(apiresource) != 2 { + glog.Errorf("invalid resource definition: %s", tokens[0]) + continue + } + group := apiresource[0] + resource := apiresource[1] + groupResource := unversioned.GroupResource{Group: group, Resource: resource} + + servers := strings.Split(tokens[1], ";") + storageFactory.SetEtcdLocation(groupResource, servers) + } + + authenticator, err := authenticator.New(authenticator.AuthenticatorConfig{ + BasicAuthFile: s.BasicAuthFile, + ClientCAFile: s.ClientCAFile, + TokenAuthFile: s.TokenAuthFile, + OIDCIssuerURL: s.OIDCIssuerURL, + OIDCClientID: s.OIDCClientID, + OIDCCAFile: s.OIDCCAFile, + OIDCUsernameClaim: s.OIDCUsernameClaim, + OIDCGroupsClaim: s.OIDCGroupsClaim, + KeystoneURL: s.KeystoneURL, + }) + if err != nil { + glog.Fatalf("Invalid Authentication Config: %v", err) + } + + authorizationModeNames := strings.Split(s.AuthorizationMode, ",") + authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(authorizationModeNames, s.AuthorizationConfig) + if err != nil { + glog.Fatalf("Invalid Authorization Config: %v", err) + } + + admissionControlPluginNames := strings.Split(s.AdmissionControl, ",") + client, err := s.NewSelfClient() + if err != nil { + glog.Errorf("Failed to create clientset: %v", err) + } + admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile) + + genericConfig := genericapiserver.NewConfig(s) + // TODO: Move the following to generic api server as well. + genericConfig.StorageFactory = storageFactory + genericConfig.Authenticator = authenticator + genericConfig.SupportsBasicAuth = len(s.BasicAuthFile) > 0 + genericConfig.Authorizer = authorizer + genericConfig.AdmissionControl = admissionController + genericConfig.APIResourceConfigSource = storageFactory.APIResourceConfigSource + genericConfig.MasterServiceNamespace = s.MasterServiceNamespace + genericConfig.Serializer = api.Codecs + + // TODO: Move this to generic api server (Need to move the command line flag). + if s.EnableWatchCache { + cachesize.SetWatchCacheSizes(s.WatchCacheSizes) + } + + m, err := genericapiserver.New(genericConfig) + if err != nil { + return err + } + + installFederationAPIs(s, m, storageFactory) + installCoreAPIs(s, m, storageFactory) + + m.Run(s) + return nil +} + +func createRESTOptionsOrDie(s *genericoptions.ServerRunOptions, g *genericapiserver.GenericAPIServer, f genericapiserver.StorageFactory, resource unversioned.GroupResource) generic.RESTOptions { + storage, err := f.New(resource) + if err != nil { + glog.Fatalf("Unable to find storage destination for %v, due to %v", resource, err.Error()) + } + return generic.RESTOptions{ + Storage: storage, + Decorator: g.StorageDecorator(), + DeleteCollectionWorkers: s.DeleteCollectionWorkers, + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server_test.go new file mode 100644 index 000000000000..fa72048e2608 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server_test.go @@ -0,0 +1,287 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "regexp" + "testing" + + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "time" + + "github.com/stretchr/testify/assert" + fed_v1a1 "k8s.io/kubernetes/federation/apis/federation/v1alpha1" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/genericapiserver/options" +) + +func TestLongRunningRequestRegexp(t *testing.T) { + regexp := regexp.MustCompile(options.NewServerRunOptions().LongRunningRequestRE) + dontMatch := []string{ + "/api/v1/watch-namespace/", + "/api/v1/namespace-proxy/", + "/api/v1/namespace-watch", + "/api/v1/namespace-proxy", + "/api/v1/namespace-portforward/pods", + "/api/v1/portforward/pods", + ". anything", + "/ that", + } + doMatch := []string{ + "/api/v1/pods/watch", + "/api/v1/watch/stuff", + "/api/v1/default/service/proxy", + "/api/v1/pods/proxy/path/to/thing", + "/api/v1/namespaces/myns/pods/mypod/log", + "/api/v1/namespaces/myns/pods/mypod/logs", + "/api/v1/namespaces/myns/pods/mypod/portforward", + "/api/v1/namespaces/myns/pods/mypod/exec", + "/api/v1/namespaces/myns/pods/mypod/attach", + "/api/v1/namespaces/myns/pods/mypod/log/", + "/api/v1/namespaces/myns/pods/mypod/logs/", + "/api/v1/namespaces/myns/pods/mypod/portforward/", + "/api/v1/namespaces/myns/pods/mypod/exec/", + "/api/v1/namespaces/myns/pods/mypod/attach/", + "/api/v1/watch/namespaces/myns/pods", + } + for _, path := range dontMatch { + if regexp.MatchString(path) { + t.Errorf("path should not have match regexp but did: %s", path) + } + } + for _, path := range doMatch { + if !regexp.MatchString(path) { + t.Errorf("path should have match regexp did not: %s", path) + } + } +} + +var insecurePort = 8082 +var serverIP = fmt.Sprintf("http://localhost:%v", insecurePort) +var groupVersions = []unversioned.GroupVersion{ + fed_v1a1.SchemeGroupVersion, +} + +func TestRun(t *testing.T) { + s := options.NewServerRunOptions() + s.InsecurePort = insecurePort + _, ipNet, _ := net.ParseCIDR("10.10.10.0/24") + s.ServiceClusterIPRange = *ipNet + s.StorageConfig.ServerList = []string{"http://localhost:4001"} + go func() { + if err := Run(s); err != nil { + t.Fatalf("Error in bringing up the server: %v", err) + } + }() + if err := waitForApiserverUp(); err != nil { + t.Fatalf("%v", err) + } + testSwaggerSpec(t) + testSupport(t) + testAPIGroupList(t) + testAPIGroup(t) + testAPIResourceList(t) +} + +func waitForApiserverUp() error { + for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { + _, err := http.Get(serverIP) + if err == nil { + return nil + } + } + return fmt.Errorf("waiting for apiserver timed out") +} + +func readResponse(serverURL string) ([]byte, error) { + response, err := http.Get(serverURL) + if err != nil { + return nil, fmt.Errorf("Error in fetching %s: %v", serverURL, err) + } + defer response.Body.Close() + if response.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status: %d for URL: %s, expected status: %d", response.StatusCode, serverURL, http.StatusOK) + } + contents, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, fmt.Errorf("Error reading response from %s: %v", serverURL, err) + } + return contents, nil +} + +func testSwaggerSpec(t *testing.T) { + serverURL := serverIP + "/swaggerapi" + _, err := readResponse(serverURL) + if err != nil { + t.Fatalf("%v", err) + } +} + +func testSupport(t *testing.T) { + serverURL := serverIP + "/version" + _, err := readResponse(serverURL) + if err != nil { + t.Fatalf("%v", err) + } +} + +func findGroup(groups []unversioned.APIGroup, groupName string) *unversioned.APIGroup { + for _, group := range groups { + if group.Name == groupName { + return &group + } + } + return nil +} + +func testAPIGroupList(t *testing.T) { + groupVersionForDiscoveryMap := make(map[string]unversioned.GroupVersionForDiscovery) + for _, groupVersion := range groupVersions { + groupVersionForDiscoveryMap[groupVersion.Group] = unversioned.GroupVersionForDiscovery{ + GroupVersion: groupVersion.String(), + Version: groupVersion.Version, + } + } + + serverURL := serverIP + "/apis" + contents, err := readResponse(serverURL) + if err != nil { + t.Fatalf("%v", err) + } + var apiGroupList unversioned.APIGroupList + err = json.Unmarshal(contents, &apiGroupList) + if err != nil { + t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err) + } + + for _, groupVersion := range groupVersions { + found := findGroup(apiGroupList.Groups, groupVersion.Group) + assert.NotNil(t, found) + assert.Equal(t, groupVersion.Group, found.Name) + assert.Equal(t, 1, len(found.Versions)) + groupVersionForDiscovery := groupVersionForDiscoveryMap[groupVersion.Group] + assert.Equal(t, groupVersionForDiscovery, found.Versions[0]) + assert.Equal(t, groupVersionForDiscovery, found.PreferredVersion) + } +} + +func testAPIGroup(t *testing.T) { + for _, groupVersion := range groupVersions { + serverURL := serverIP + "/apis/" + groupVersion.Group + contents, err := readResponse(serverURL) + if err != nil { + t.Fatalf("%v", err) + } + var apiGroup unversioned.APIGroup + err = json.Unmarshal(contents, &apiGroup) + if err != nil { + t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err) + } + // empty APIVersion for extensions group + if groupVersion.Group == "extensions" { + assert.Equal(t, "", apiGroup.APIVersion) + } else { + assert.Equal(t, "v1", apiGroup.APIVersion) + } + assert.Equal(t, apiGroup.Name, groupVersion.Group) + assert.Equal(t, 1, len(apiGroup.Versions)) + assert.Equal(t, groupVersion.String(), apiGroup.Versions[0].GroupVersion) + assert.Equal(t, groupVersion.Version, apiGroup.Versions[0].Version) + assert.Equal(t, apiGroup.PreferredVersion, apiGroup.Versions[0]) + } + + testCoreAPIGroup(t) +} + +func testCoreAPIGroup(t *testing.T) { + serverURL := serverIP + "/api" + contents, err := readResponse(serverURL) + if err != nil { + t.Fatalf("%v", err) + } + var apiVersions unversioned.APIVersions + err = json.Unmarshal(contents, &apiVersions) + if err != nil { + t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err) + } + assert.Equal(t, 1, len(apiVersions.Versions)) + assert.Equal(t, "v1", apiVersions.Versions[0]) + assert.NotEmpty(t, apiVersions.ServerAddressByClientCIDRs) +} + +func findResource(resources []unversioned.APIResource, resourceName string) *unversioned.APIResource { + for _, resource := range resources { + if resource.Name == resourceName { + return &resource + } + } + return nil +} + +func testAPIResourceList(t *testing.T) { + testFederationResourceList(t) + testCoreResourceList(t) +} + +func testFederationResourceList(t *testing.T) { + serverURL := serverIP + "/apis/" + fed_v1a1.SchemeGroupVersion.String() + contents, err := readResponse(serverURL) + if err != nil { + t.Fatalf("%v", err) + } + var apiResourceList unversioned.APIResourceList + err = json.Unmarshal(contents, &apiResourceList) + if err != nil { + t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err) + } + assert.Equal(t, "v1", apiResourceList.APIVersion) + assert.Equal(t, fed_v1a1.SchemeGroupVersion.String(), apiResourceList.GroupVersion) + + found := findResource(apiResourceList.APIResources, "clusters") + assert.NotNil(t, found) + assert.False(t, found.Namespaced) + found = findResource(apiResourceList.APIResources, "clusters/status") + assert.NotNil(t, found) + assert.False(t, found.Namespaced) +} + +func testCoreResourceList(t *testing.T) { + serverURL := serverIP + "/api/" + v1.SchemeGroupVersion.String() + contents, err := readResponse(serverURL) + if err != nil { + t.Fatalf("%v", err) + } + var apiResourceList unversioned.APIResourceList + err = json.Unmarshal(contents, &apiResourceList) + if err != nil { + t.Fatalf("Error in unmarshalling response from server %s: %v", serverURL, err) + } + assert.Equal(t, "", apiResourceList.APIVersion) + assert.Equal(t, v1.SchemeGroupVersion.String(), apiResourceList.GroupVersion) + + found := findResource(apiResourceList.APIResources, "services") + assert.NotNil(t, found) + assert.True(t, found.Namespaced) + found = findResource(apiResourceList.APIResources, "services/status") + assert.NotNil(t, found) + assert.True(t, found.Namespaced) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-controller-manager/OWNERS b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-controller-manager/OWNERS new file mode 100644 index 000000000000..c6b4c5c4f653 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-controller-manager/OWNERS @@ -0,0 +1,4 @@ +assignees: + - quinton-hoole + - nikhiljindal + - madhusundancs diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-controller-manager/app/controllermanager.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-controller-manager/app/controllermanager.go new file mode 100644 index 000000000000..abc1db454dae --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-controller-manager/app/controllermanager.go @@ -0,0 +1,123 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package app implements a server that runs a set of active +// components. This includes cluster controller + +package app + +import ( + "net" + "net/http" + "net/http/pprof" + "strconv" + + "k8s.io/kubernetes/federation/cmd/federation-controller-manager/app/options" + "k8s.io/kubernetes/pkg/client/restclient" + + internalclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" + federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3" + "k8s.io/kubernetes/federation/pkg/dnsprovider" + clustercontroller "k8s.io/kubernetes/federation/pkg/federation-controller/cluster" + servicecontroller "k8s.io/kubernetes/federation/pkg/federation-controller/service" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" + "k8s.io/kubernetes/pkg/healthz" + "k8s.io/kubernetes/pkg/util/configz" + "k8s.io/kubernetes/pkg/util/wait" + + "github.com/golang/glog" + "github.com/prometheus/client_golang/prometheus" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// NewControllerManagerCommand creates a *cobra.Command object with default parameters +func NewControllerManagerCommand() *cobra.Command { + s := options.NewCMServer() + s.AddFlags(pflag.CommandLine) + cmd := &cobra.Command{ + Use: "federation-controller-manager", + Long: `The federation controller manager is a daemon that embeds +the core control loops shipped with federation. In applications of robotics and +automation, a control loop is a non-terminating loop that regulates the state of +the system. In federation, a controller is a control loop that watches the shared +state of the federation cluster through the apiserver and makes changes attempting +to move the current state towards the desired state. Examples of controllers that +ship with federation today is the cluster controller.`, + Run: func(cmd *cobra.Command, args []string) { + }, + } + + return cmd +} + +// Run runs the CMServer. This should never exit. +func Run(s *options.CMServer) error { + if c, err := configz.New("componentconfig"); err == nil { + c.Set(s.ControllerManagerConfiguration) + } else { + glog.Errorf("unable to register configz: %s", err) + } + restClientCfg, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) + if err != nil { + return err + } + + // Override restClientCfg qps/burst settings from flags + restClientCfg.QPS = s.APIServerQPS + restClientCfg.Burst = s.APIServerBurst + + go func() { + mux := http.NewServeMux() + healthz.InstallHandler(mux) + if s.EnableProfiling { + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + } + mux.Handle("/metrics", prometheus.Handler()) + + server := &http.Server{ + Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)), + Handler: mux, + } + glog.Fatal(server.ListenAndServe()) + }() + + run := func() { + err := StartControllers(s, restClientCfg) + glog.Fatalf("error running controllers: %v", err) + panic("unreachable") + } + run() + panic("unreachable") +} + +func StartControllers(s *options.CMServer, restClientCfg *restclient.Config) error { + + federationClientSet := federationclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, "cluster-controller")) + go clustercontroller.NewclusterController(federationClientSet, s.ClusterMonitorPeriod.Duration).Run() + dns, err := dnsprovider.InitDnsProvider(s.DnsProvider, s.DnsConfigFile) + if err != nil { + glog.Fatalf("Cloud provider could not be initialized: %v", err) + } + scclientset := internalclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, servicecontroller.UserAgentName)) + servicecontroller := servicecontroller.New(scclientset, dns) + if err := servicecontroller.Run(s.ConcurrentServiceSyncs, wait.NeverStop); err != nil { + glog.Errorf("Failed to start service controller: %v", err) + } + select {} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-controller-manager/app/options/options.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-controller-manager/app/options/options.go new file mode 100644 index 000000000000..f25b16daaa75 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-controller-manager/app/options/options.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package options provides the flags used for the controller manager. + +package options + +import ( + "time" + + "github.com/spf13/pflag" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/client/leaderelection" +) + +type ControllerManagerConfiguration struct { + // port is the port that the controller-manager's http service runs on. + Port int `json:"port"` + // address is the IP address to serve on (set to 0.0.0.0 for all interfaces). + Address string `json:"address"` + // dnsProvider is the provider for dns services. + DnsProvider string `json:"dnsProvider"` + // dnsConfigFile is the path to the dns provider configuration file. + DnsConfigFile string `json:"ndsConfigFile"` + // concurrentServiceSyncs is the number of services that are + // allowed to sync concurrently. Larger number = more responsive service + // management, but more CPU (and network) load. + ConcurrentServiceSyncs int `json:"concurrentServiceSyncs"` + // clusterMonitorPeriod is the period for syncing ClusterStatus in cluster controller. + ClusterMonitorPeriod unversioned.Duration `json:"clusterMonitorPeriod"` + // APIServerQPS is the QPS to use while talking with federation apiserver. + APIServerQPS float32 `json:"federatedAPIQPS"` + // APIServerBurst is the burst to use while talking with federation apiserver. + APIServerBurst int `json:"federatedAPIBurst"` + // enableProfiling enables profiling via web interface host:port/debug/pprof/ + EnableProfiling bool `json:"enableProfiling"` + // leaderElection defines the configuration of leader election client. + LeaderElection componentconfig.LeaderElectionConfiguration `json:"leaderElection"` + // contentType is contentType of requests sent to apiserver. + ContentType string `json:"contentType"` +} + +// CMServer is the main context object for the controller manager. +type CMServer struct { + ControllerManagerConfiguration + Master string + Kubeconfig string +} + +const ( + // FederatedControllerManagerPort is the default port for the federation controller manager status server. + // May be overridden by a flag at startup. + FederatedControllerManagerPort = 10253 +) + +// NewCMServer creates a new CMServer with a default config. +func NewCMServer() *CMServer { + s := CMServer{ + ControllerManagerConfiguration: ControllerManagerConfiguration{ + Port: FederatedControllerManagerPort, + Address: "0.0.0.0", + ConcurrentServiceSyncs: 10, + ClusterMonitorPeriod: unversioned.Duration{Duration: 40 * time.Second}, + APIServerQPS: 20.0, + APIServerBurst: 30, + LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(), + }, + } + return &s +} + +// AddFlags adds flags for a specific CMServer to the specified FlagSet +func (s *CMServer) AddFlags(fs *pflag.FlagSet) { + fs.IntVar(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on") + fs.Var(componentconfig.IPVar{Val: &s.Address}, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces)") + fs.IntVar(&s.ConcurrentServiceSyncs, "concurrent-service-syncs", s.ConcurrentServiceSyncs, "The number of service syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load") + fs.DurationVar(&s.ClusterMonitorPeriod.Duration, "cluster-monitor-period", s.ClusterMonitorPeriod.Duration, "The period for syncing ClusterStatus in ClusterController.") + fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/") + fs.StringVar(&s.Master, "master", s.Master, "The address of the federation API server (overrides any value in kubeconfig)") + fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") + fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now.") + fs.Float32Var(&s.APIServerQPS, "federated-api-qps", s.APIServerQPS, "QPS to use while talking with federation apiserver") + fs.IntVar(&s.APIServerBurst, "federated-api-burst", s.APIServerBurst, "Burst to use while talking with federation apiserver") + leaderelection.BindFlags(&s.LeaderElection, fs) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-controller-manager/controller-manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-controller-manager/controller-manager.go new file mode 100644 index 000000000000..56a7c0c9715c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/federation-controller-manager/controller-manager.go @@ -0,0 +1,52 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "os" + "runtime" + + "github.com/spf13/pflag" + "k8s.io/kubernetes/federation/cmd/federation-controller-manager/app" + "k8s.io/kubernetes/federation/cmd/federation-controller-manager/app/options" + "k8s.io/kubernetes/pkg/healthz" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/flag" + "k8s.io/kubernetes/pkg/version/verflag" +) + +func init() { + healthz.DefaultHealthz() +} + +func main() { + runtime.GOMAXPROCS(runtime.NumCPU()) + s := options.NewCMServer() + s.AddFlags(pflag.CommandLine) + + flag.InitFlags() + util.InitLogs() + defer util.FlushLogs() + + verflag.PrintAndExitIfRequested() + + if err := app.Run(s); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/genfeddocs/gen_fed_docs.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/genfeddocs/gen_fed_docs.go new file mode 100644 index 000000000000..d7c30b14c5a1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/cmd/genfeddocs/gen_fed_docs.go @@ -0,0 +1,64 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "os" + + "github.com/spf13/cobra/doc" + "k8s.io/kubernetes/cmd/genutils" + fedapiservapp "k8s.io/kubernetes/federation/cmd/federation-apiserver/app" + fedcmapp "k8s.io/kubernetes/federation/cmd/federation-controller-manager/app" +) + +// Note: We have a separate binary for generating federation docs and kube docs because of the way api groups are registered. +// If we import both kube-apiserver and federation-apiserver in the same binary then api groups from both kube and federation will get registered in both the apiservers +// and hence will produce incorrect flag values. +// We can potentially merge cmd/kubegendocs and this when we have fixed that problem. +func main() { + // use os.Args instead of "flags" because "flags" will mess up the man pages! + path := "" + module := "" + if len(os.Args) == 3 { + path = os.Args[1] + module = os.Args[2] + } else { + fmt.Fprintf(os.Stderr, "usage: %s [output directory] [module] \n", os.Args[0]) + os.Exit(1) + } + + outDir, err := genutils.OutDir(path) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to get output directory: %v\n", err) + os.Exit(1) + } + + switch module { + case "federation-apiserver": + // generate docs for federated-apiserver + apiserver := fedapiservapp.NewAPIServerCommand() + doc.GenMarkdownTree(apiserver, outDir) + case "federation-controller-manager": + // generate docs for kube-controller-manager + controllermanager := fedcmapp.NewControllerManagerCommand() + doc.GenMarkdownTree(controllermanager, outDir) + default: + fmt.Fprintf(os.Stderr, "Module %s is not supported", module) + os.Exit(1) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/.gitignore b/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/.gitignore new file mode 100644 index 000000000000..b1280e87ff38 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/.gitignore @@ -0,0 +1 @@ +/federated-image.tag diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-apiserver-deployment.yaml b/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-apiserver-deployment.yaml new file mode 100644 index 000000000000..ad5e78dad0ce --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-apiserver-deployment.yaml @@ -0,0 +1,42 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{.FEDERATION_APISERVER_DEPLOYMENT_NAME}} + namespace: {{.FEDERATION_NAMESPACE}} + labels: + app: federated-cluster +spec: + template: + metadata: + name: federation-apiserver + labels: + app: federated-cluster + module: federation-apiserver + spec: + containers: + - name: apiserver + image: {{.FEDERATION_APISERVER_IMAGE_REPO}}:{{.FEDERATION_APISERVER_IMAGE_TAG}} + command: + - /usr/local/bin/federation-apiserver + - --bind-address=0.0.0.0 + - --etcd-servers=http://localhost:2379 + - --service-cluster-ip-range={{.FEDERATION_SERVICE_CIDR}} + - --secure-port=443 + - --advertise-address={{.FEDERATION_API_HOST}} + - --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota + - --token-auth-file=/srv/kubernetes/known-tokens.csv + ports: + - containerPort: 443 + name: https + - containerPort: 8080 + name: local + volumeMounts: + - name: federation-apiserver-secrets + mountPath: /srv/kubernetes/ + readOnly: true + - name: etcd + image: quay.io/coreos/etcd:v2.3.3 + volumes: + - name: federation-apiserver-secrets + secret: + secretName: federation-apiserver-secrets diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-apiserver-lb-service.yaml b/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-apiserver-lb-service.yaml new file mode 100644 index 000000000000..977df03ba76a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-apiserver-lb-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{.FEDERATION_APISERVER_DEPLOYMENT_NAME}} + namespace: {{.FEDERATION_NAMESPACE}} + labels: + app: federated-cluster +spec: + type: LoadBalancer + selector: + app: federated-cluster + module: federation-apiserver + ports: + - name: https + protocol: TCP + port: 443 + targetPort: 443 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-apiserver-nodeport-service.yaml b/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-apiserver-nodeport-service.yaml new file mode 100644 index 000000000000..f28856a92f08 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-apiserver-nodeport-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{.FEDERATION_APISERVER_DEPLOYMENT_NAME}} + namespace: {{.FEDERATION_NAMESPACE}} + labels: + app: federated-cluster +spec: + type: NodePort + selector: + app: federated-cluster + module: federation-apiserver + ports: + - name: https + protocol: TCP + nodePort: {{.FEDERATION_API_NODEPORT}} + port: 443 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-apiserver-secrets.yaml b/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-apiserver-secrets.yaml new file mode 100644 index 000000000000..13a8853d32cd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-apiserver-secrets.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: federation-apiserver-secrets + labels: + app: federated-cluster +type: Opaque +data: + known-tokens.csv: {{.FEDERATION_API_KNOWN_TOKENS_BASE64}} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-ns.yaml b/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-ns.yaml new file mode 100644 index 000000000000..ae5a73495975 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/manifests/federation-ns.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: {{.FEDERATION_NAMESPACE}} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/dns.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/dns.go new file mode 100644 index 000000000000..4e32850bf16d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/dns.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dnsprovider + +import "k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype" + +// Interface is an abstract, pluggable interface for DNS providers. +type Interface interface { + // Zones returns the provider's Zones interface, or false if not supported. + Zones() (Zones, bool) +} + +type Zones interface { + // List returns the managed Zones, or an error if the list operation failed. + List() ([]Zone, error) +} + +type Zone interface { + // Name returns the name of the zone, e.g. "example.com" + Name() string + // ResourceRecordsets returns the provider's ResourceRecordSets interface, or false if not supported. + ResourceRecordSets() (ResourceRecordSets, bool) +} + +type ResourceRecordSets interface { + // List returns the ResourceRecordSets of the Zone, or an error if the list operation failed. + List() ([]ResourceRecordSet, error) + // Add adds and returns a ResourceRecordSet of the Zone, or an error if the add operation failed. + Add(ResourceRecordSet) (ResourceRecordSet, error) + // Remove removes a ResourceRecordSet from the Zone, or an error if the remove operation failed. + // The supplied ResourceRecordSet must match one of the existing zones (obtained via List()) exactly. + Remove(ResourceRecordSet) error + // New allocates a new ResourceRecordSet, which can then be passed to Add() or Remove() + // Arguments are as per the ResourceRecordSet interface below. + New(name string, rrdatas []string, ttl int64, rrstype rrstype.RrsType) ResourceRecordSet +} + +type ResourceRecordSet interface { + // Name returns the name of the ResourceRecordSet, e.g. "www.example.com". + Name() string + // Rrdatas returns the Resource Record Datas of the record set. + Rrdatas() []string + // Ttl returns the time-to-live of the record set, in seconds. + Ttl() int64 + // Type returns the type of the record set (A, CNAME, SRV, etc) + Type() rrstype.RrsType +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/doc.go new file mode 100644 index 000000000000..9a3d7e0455c9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +dnsprovider supplies interfaces for dns service providers (e.g. Google Cloud DNS, AWS route53, etc). +Implementations exist in the providers sub-package +*/ +package dnsprovider diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/plugins.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/plugins.go new file mode 100644 index 000000000000..71dbf65268be --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/plugins.go @@ -0,0 +1,98 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dnsprovider + +import ( + "fmt" + "io" + "os" + "sync" + + "github.com/golang/glog" +) + +// Factory is a function that returns a dnsprovider.Interface. +// The config parameter provides an io.Reader handler to the factory in +// order to load specific configurations. If no configuration is provided +// the parameter is nil. +type Factory func(config io.Reader) (Interface, error) + +// All registered dns providers. +var providersMutex sync.Mutex +var providers = make(map[string]Factory) + +// RegisterDnsProvider registers a dnsprovider.Factory by name. This +// is expected to happen during startup. +func RegisterDnsProvider(name string, cloud Factory) { + providersMutex.Lock() + defer providersMutex.Unlock() + if _, found := providers[name]; found { + glog.Fatalf("DNS provider %q was registered twice", name) + } + glog.V(1).Infof("Registered DNS provider %q", name) + providers[name] = cloud +} + +// GetDnsProvider creates an instance of the named DNS provider, or nil if +// the name is not known. The error return is only used if the named provider +// was known but failed to initialize. The config parameter specifies the +// io.Reader handler of the configuration file for the DNS provider, or nil +// for no configuation. +func GetDnsProvider(name string, config io.Reader) (Interface, error) { + providersMutex.Lock() + defer providersMutex.Unlock() + f, found := providers[name] + if !found { + return nil, nil + } + return f(config) +} + +// InitDnsProvider creates an instance of the named DNS provider. +func InitDnsProvider(name string, configFilePath string) (Interface, error) { + var dns Interface + var err error + + if name == "" { + glog.Info("No DNS provider specified.") + return nil, nil + } + + if configFilePath != "" { + var config *os.File + config, err = os.Open(configFilePath) + if err != nil { + return nil, fmt.Errorf("Couldn't open DNS provider configuration %s: %#v", configFilePath, err) + } + + defer config.Close() + dns, err = GetDnsProvider(name, config) + } else { + // Pass explicit nil so plugins can actually check for nil. See + // "Why is my nil error value not equal to nil?" in golang.org/doc/faq. + dns, err = GetDnsProvider(name, nil) + } + + if err != nil { + return nil, fmt.Errorf("could not init DNS provider %q: %v", name, err) + } + if dns == nil { + return nil, fmt.Errorf("unknown DNS provider %q", name) + } + + return dns, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/clouddns.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/clouddns.go new file mode 100644 index 000000000000..1ad772de6e88 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/clouddns.go @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// clouddns is the implementation of pkg/dnsprovider interface for Google Cloud DNS +package clouddns + +import ( + "io" + + "github.com/golang/glog" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + compute "google.golang.org/api/compute/v1" + dns "google.golang.org/api/dns/v1" + "google.golang.org/cloud/compute/metadata" + gcfg "gopkg.in/gcfg.v1" + + "k8s.io/kubernetes/federation/pkg/dnsprovider" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" +) + +const ( + ProviderName = "google-clouddns" +) + +func init() { + dnsprovider.RegisterDnsProvider(ProviderName, func(config io.Reader) (dnsprovider.Interface, error) { + return newCloudDns(config) + }) +} + +type Config struct { + Global struct { + TokenURL string `gcfg:"token-url"` + TokenBody string `gcfg:"token-body"` + ProjectID string `gcfg:"project-id"` + } +} + +// newCloudDns creates a new instance of a Google Cloud DNS Interface. +func newCloudDns(config io.Reader) (*Interface, error) { + projectID, _ := metadata.ProjectID() // On error we get an empty string, which is fine for now. + var tokenSource oauth2.TokenSource + // Possibly override defaults with config below + if config != nil { + var cfg Config + if err := gcfg.ReadInto(&cfg, config); err != nil { + glog.Errorf("Couldn't read config: %v", err) + return nil, err + } + glog.Infof("Using Google Cloud DNS provider config %+v", cfg) + if cfg.Global.ProjectID != "" { + projectID = cfg.Global.ProjectID + } + if cfg.Global.TokenURL != "" { + tokenSource = gce.NewAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody) + } + } + return CreateInterface(projectID, tokenSource) +} + +// CreateInterface creates a clouddns.Interface object using the specified parameters. +// If no tokenSource is specified, uses oauth2.DefaultTokenSource. +func CreateInterface(projectID string, tokenSource oauth2.TokenSource) (*Interface, error) { + if tokenSource == nil { + var err error + tokenSource, err = google.DefaultTokenSource( + oauth2.NoContext, + compute.CloudPlatformScope, + compute.ComputeScope) + glog.Infof("Using DefaultTokenSource %#v", tokenSource) + if err != nil { + return nil, err + } + } else { + glog.Infof("Using existing Token Source %#v", tokenSource) + } + + oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource) + + service, err := dns.New(oauthClient) + if err != nil { + glog.Errorf("Failed to get Cloud DNS client: %v", err) + } + glog.Infof("Successfully got DNS service: %v\n", service) + return newInterfaceWithStub(projectID, internal.NewService(service)), nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/clouddns_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/clouddns_test.go new file mode 100644 index 000000000000..7ab5b3b12020 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/clouddns_test.go @@ -0,0 +1,229 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clouddns + +import ( + "flag" + "fmt" + "os" + "testing" + + "k8s.io/kubernetes/federation/pkg/dnsprovider" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs" + "k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype" +) + +func newTestInterface() (dnsprovider.Interface, error) { + // Use this to test the real cloud service - insert appropriate project-id. Default token source will be used. See + // https://github.com/golang/oauth2/blob/master/google/default.go for details. + // i, err := dnsprovider.GetDnsProvider(ProviderName, strings.NewReader("\n[global]\nproject-id = federation0-cluster00")) + return newFakeInterface() // Use this to stub out the entire cloud service +} + +func newFakeInterface() (dnsprovider.Interface, error) { + service := stubs.NewService() + interface_ := newInterfaceWithStub("", service) + zones := service.ManagedZones_ + // Add a fake zone to test against. + zone := &stubs.ManagedZone{zones, "example.com", []stubs.ResourceRecordSet{}} + call := zones.Create(interface_.project(), zone) + _, err := call.Do() + if err != nil { + return nil, err + } + return interface_, nil +} + +var interface_ dnsprovider.Interface + +func TestMain(m *testing.M) { + flag.Parse() + var err error + interface_, err = newTestInterface() + if err != nil { + fmt.Printf("Error creating interface: %v", err) + os.Exit(1) + } + os.Exit(m.Run()) +} + +// firstZone returns the first zone for the configured dns provider account/project, +// or fails if it can't be found +func firstZone(t *testing.T) dnsprovider.Zone { + t.Logf("Getting zones") + z, supported := interface_.Zones() + if supported { + t.Logf("Got zones %v\n", z) + } else { + t.Fatalf("Zones interface not supported by interface %v", interface_) + } + zones, err := z.List() + if err != nil { + t.Fatalf("Failed to list zones: %v", err) + } else { + t.Logf("Got zone list: %v\n", zones) + } + if len(zones) < 1 { + t.Fatalf("Zone listing returned %d, expected >= %d", len(zones), 1) + } else { + t.Logf("Got at least 1 zone in list:%v\n", zones[0]) + } + return zones[0] +} + +/* rrs returns the ResourceRecordSets interface for a given zone */ +func rrs(t *testing.T, zone dnsprovider.Zone) (r dnsprovider.ResourceRecordSets) { + rrsets, supported := zone.ResourceRecordSets() + if !supported { + t.Fatalf("ResourceRecordSets interface not supported by zone %v", zone) + return r + } + return rrsets +} + +func listRrsOrFail(t *testing.T, rrsets dnsprovider.ResourceRecordSets) []dnsprovider.ResourceRecordSet { + rrset, err := rrsets.List() + if err != nil { + t.Fatalf("Failed to list recordsets: %v", err) + } else { + if len(rrset) < 0 { + t.Fatalf("Record set length=%d, expected >=0", len(rrset)) + } else { + t.Logf("Got %d recordsets: %v", len(rrset), rrset) + } + } + return rrset +} + +func getExampleRrs(zone dnsprovider.Zone) dnsprovider.ResourceRecordSet { + rrsets, _ := zone.ResourceRecordSets() + return rrsets.New("www11."+zone.Name(), []string{"10.10.10.10", "169.20.20.20"}, 180, rrstype.A) +} + +func getInvalidRrs(zone dnsprovider.Zone) dnsprovider.ResourceRecordSet { + rrsets, _ := zone.ResourceRecordSets() + return rrsets.New("www12."+zone.Name(), []string{"rubbish", "rubbish"}, 180, rrstype.A) +} + +func addRrsetOrFail(t *testing.T, rrsets dnsprovider.ResourceRecordSets, rrset dnsprovider.ResourceRecordSet) dnsprovider.ResourceRecordSet { + result, err := rrsets.Add(rrset) + if err != nil { + t.Fatalf("Failed to add recordsets: %v", err) + } + return result +} + +/* TestResourceRecordSetsList verifies that listing of zones succeeds */ +func TestZonesList(t *testing.T) { + firstZone(t) +} + +/* TestResourceRecordSetsList verifies that listing of RRS's succeeds */ +func TestResourceRecordSetsList(t *testing.T) { + listRrsOrFail(t, rrs(t, firstZone(t))) +} + +/* TestResourceRecordSetsAddSuccess verifies that addition of a valid RRS succeeds */ +func TestResourceRecordSetsAddSuccess(t *testing.T) { + zone := firstZone(t) + sets := rrs(t, zone) + set := addRrsetOrFail(t, sets, getExampleRrs(zone)) + defer sets.Remove(set) + t.Logf("Successfully added resource record set: %v", set) +} + +/* TestResourceRecordSetsAdditionVisible verifies that added RRS is visible after addition */ +func TestResourceRecordSetsAdditionVisible(t *testing.T) { + zone := firstZone(t) + sets := rrs(t, zone) + rrset := getExampleRrs(zone) + set := addRrsetOrFail(t, sets, rrset) + defer sets.Remove(set) + t.Logf("Successfully added resource record set: %v", set) + found := false + for _, record := range listRrsOrFail(t, sets) { + if record.Name() == rrset.Name() { + found = true + break + } + } + if !found { + t.Errorf("Failed to find added resource record set %s", rrset.Name()) + } +} + +/* TestResourceRecordSetsAddDuplicateFail verifies that addition of a duplicate RRS fails */ +func TestResourceRecordSetsAddDuplicateFail(t *testing.T) { + zone := firstZone(t) + sets := rrs(t, zone) + rrset := getExampleRrs(zone) + set := addRrsetOrFail(t, sets, rrset) + defer sets.Remove(set) + t.Logf("Successfully added resource record set: %v", set) + // Try to add it again, and verify that the call fails. + rrs, err := sets.Add(rrset) + if err == nil { + defer sets.Remove(rrs) + t.Errorf("Should have failed to add duplicate resource record %v, but succeeded instead.", set) + } else { + t.Logf("Correctly failed to add duplicate resource record %v: %v", set, err) + } +} + +/* TestResourceRecordSetsRemove verifies that the removal of an existing RRS succeeds */ +func TestResourceRecordSetsRemove(t *testing.T) { + zone := firstZone(t) + sets := rrs(t, zone) + rrset := getExampleRrs(zone) + set := addRrsetOrFail(t, sets, rrset) + err := sets.Remove(set) + if err != nil { + // Try again to clean up. + defer sets.Remove(rrset) + t.Errorf("Failed to remove resource record set %v after adding", rrset) + } else { + t.Logf("Successfully removed resource set %v after adding", set) + } +} + +/* TestResourceRecordSetsRemoveGone verifies that a removed RRS no longer exists */ +func TestResourceRecordSetsRemoveGone(t *testing.T) { + zone := firstZone(t) + sets := rrs(t, zone) + rrset := getExampleRrs(zone) + set := addRrsetOrFail(t, sets, rrset) + err := sets.Remove(set) + if err != nil { + // Try again to clean up. + defer sets.Remove(rrset) + t.Errorf("Failed to remove resource record set %v after adding", rrset) + } else { + t.Logf("Successfully removed resource set %v after adding", set) + } + // Check that it's gone + list := listRrsOrFail(t, sets) + found := false + for _, set := range list { + if set.Name() == rrset.Name() { + found = true + break + } + } + if found { + t.Errorf("Deleted resource record set %v is still present", rrset) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/interface.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/interface.go new file mode 100644 index 000000000000..0dd5ab877e71 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/interface.go @@ -0,0 +1,43 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clouddns + +import ( + "k8s.io/kubernetes/federation/pkg/dnsprovider" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ dnsprovider.Interface = Interface{} + +type Interface struct { + project_ string + service interfaces.Service +} + +// newInterfaceWithStub facilitates stubbing out the underlying Google Cloud DNS +// library for testing purposes. It returns an provider-independent interface. +func newInterfaceWithStub(project string, service interfaces.Service) *Interface { + return &Interface{project, service} +} + +func (i Interface) Zones() (zones dnsprovider.Zones, supported bool) { + return Zones{i.service.ManagedZones(), &i}, true +} + +func (i Interface) project() string { + return i.project_ +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/change.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/change.go new file mode 100644 index 000000000000..7b2a2edffaf9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/change.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.Change = Change{} + +type Change struct{ impl *dns.Change } + +func (c Change) Additions() (rrsets []interfaces.ResourceRecordSet) { + rrsets = make([]interfaces.ResourceRecordSet, len(c.impl.Additions)) + for index, addition := range c.impl.Additions { + rrsets[index] = interfaces.ResourceRecordSet(&ResourceRecordSet{addition}) + } + return rrsets +} + +func (c Change) Deletions() (rrsets []interfaces.ResourceRecordSet) { + rrsets = make([]interfaces.ResourceRecordSet, len(c.impl.Deletions)) + for index, deletion := range c.impl.Deletions { + rrsets[index] = interfaces.ResourceRecordSet(&ResourceRecordSet{deletion}) + } + return rrsets +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/changes_create_call.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/changes_create_call.go new file mode 100644 index 000000000000..6877ebe14ff5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/changes_create_call.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "google.golang.org/api/googleapi" + + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ChangesCreateCall = ChangesCreateCall{} + +type ChangesCreateCall struct{ impl *dns.ChangesCreateCall } + +func (c ChangesCreateCall) Do(opts ...googleapi.CallOption) (interfaces.Change, error) { + ch, err := c.impl.Do(opts...) + return &Change{ch}, err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/changes_service.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/changes_service.go new file mode 100644 index 000000000000..3d8b78349120 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/changes_service.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ChangesService = ChangesService{} + +type ChangesService struct{ impl *dns.ChangesService } + +func (c ChangesService) Create(project string, managedZone string, change interfaces.Change) interfaces.ChangesCreateCall { + return &ChangesCreateCall{c.impl.Create(project, managedZone, change.(*Change).impl)} +} + +func (c ChangesService) NewChange(additions, deletions []interfaces.ResourceRecordSet) interfaces.Change { + adds := make([]*dns.ResourceRecordSet, len(additions)) + deletes := make([]*dns.ResourceRecordSet, len(deletions)) + for i, a := range additions { + adds[i] = a.(*ResourceRecordSet).impl + } + for i, d := range deletions { + deletes[i] = d.(*ResourceRecordSet).impl + } + return &Change{&dns.Change{Additions: adds, Deletions: deletes}} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/clouddns.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/clouddns.go new file mode 100644 index 000000000000..4a985bd5623d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/clouddns.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +// Implementation of internal/interfaces/* on top of Google Cloud DNS API. +// See https://godoc.org/google.golang.org/api/dns/v1 for details +// This facilitates stubbing out Google Cloud DNS for unit testing. +// Only the parts of the API that we use are included. +// Others can be added as needed. + +import dns "google.golang.org/api/dns/v1" + +type ( + Project struct{ impl *dns.Project } + + ProjectsGetCall struct{ impl *dns.ProjectsGetCall } + + ProjectsService struct{ impl *dns.ProjectsService } + + Quota struct{ impl *dns.Quota } +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces/interfaces.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces/interfaces.go new file mode 100644 index 000000000000..2e7aab9d1508 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces/interfaces.go @@ -0,0 +1,207 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interfaces + +import ( + "google.golang.org/api/googleapi" + "k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype" +) + +// Interfaces to directly mirror the Google Cloud DNS API structures. +// See https://godoc.org/google.golang.org/api/dns/v1 for details +// This facilitates stubbing out Google Cloud DNS for unit testing. +// Only the parts of the API that we use are included. +// Others can be added as needed. + +type ( + Change interface { + Additions() []ResourceRecordSet + Deletions() []ResourceRecordSet + // Id() string // TODO: Add as needed + // Kind() string // TODO: Add as needed + // StartTime() string // TODO: Add as needed + // Status() string // TODO: Add as needed + } + + ChangesCreateCall interface { + // Context(ctx context.Context) *ChangesCreateCall // TODO: Add as needed + Do(opts ...googleapi.CallOption) (Change, error) + // Fields(s ...googleapi.Field) *ChangesCreateCall // TODO: Add as needed + } + + ChangesGetCall interface { + // Context(ctx context.Context) *ChangesGetCall // TODO: Add as needed + Do(opts ...googleapi.CallOption) (*Change, error) + // Fields(s ...googleapi.Field) *ChangesGetCall // TODO: Add as needed + // IfNoneMatch(entityTag string) *ChangesGetCall // TODO: Add as needed + } + + ChangesListCall interface { + // Context(ctx context.Context) *ChangesListCall // TODO: Add as needed + Do(opts ...googleapi.CallOption) (*ChangesListResponse, error) + // Fields(s ...googleapi.Field) *ChangesListCall // TODO: Add as needed + // IfNoneMatch(entityTag string) *ChangesListCall // TODO: Add as needed + // MaxResults(maxResults int64) *ChangesListCall // TODO: Add as needed + // PageToken(pageToken string) *ChangesListCall // TODO: Add as needed + // Pages(ctx context.Context, f func(*ChangesListResponse) error) error // TODO: Add as needed + // SortBy(sortBy string) *ChangesListCall // TODO: Add as needed + // SortOrder(sortOrder string) *ChangesListCall // TODO: Add as needed + } + + ChangesListResponse interface { + // Changes() []*Change // TODO: Add as needed + // Kind() string // TODO: Add as needed + // NextPageToken() string // TODO: Add as needed + // ServerResponse() googleapi.ServerResponse // TODO: Add as needed + // ForceSendFields() []string // TODO: Add as needed + } + + ChangesService interface { + // Create(project string, managedZone string, change *Change) *ChangesCreateCall // TODO: Add as needed + Create(project string, managedZone string, change Change) ChangesCreateCall + NewChange(additions, deletions []ResourceRecordSet) Change + + // Get(project string, managedZone string, changeId string) *ChangesGetCall // TODO: Add as needed + // List(project string, managedZone string) *ChangesListCall // TODO: Add as needed + } + + ManagedZone interface { + // CreationTime() string // TODO: Add as needed + // Description() string // TODO: Add as needed + DnsName() string + // Id() uint64 // TODO: Add as needed + // Kind() string // TODO: Add as needed + Name() string + // NameServerSet() string // TODO: Add as needed + // NameServers() []string // TODO: Add as needed + // ServerResponse() googleapi.ServerResponse // TODO: Add as needed + // ForceSendFields() []string // TODO: Add as needed + } + + ManagedZonesCreateCall interface { + // Context(ctx context.Context) *ManagedZonesCreateCall // TODO: Add as needed + Do(opts ...googleapi.CallOption) (ManagedZone, error) + // Fields(s ...googleapi.Field) *ManagedZonesCreateCall // TODO: Add as needed + } + + ManagedZonesDeleteCall interface { + // Context(ctx context.Context) *ManagedZonesDeleteCall // TODO: Add as needed + Do(opts ...googleapi.CallOption) error + // Fields(s ...googleapi.Field) *ManagedZonesDeleteCall // TODO: Add as needed + } + + ManagedZonesGetCall interface { + // Context(ctx context.Context) *ManagedZonesGetCall // TODO: Add as needed + Do(opts ...googleapi.CallOption) (ManagedZone, error) + // Fields(s ...googleapi.Field) *ManagedZonesGetCall // TODO: Add as needed + // IfNoneMatch(entityTag string) *ManagedZonesGetCall // TODO: Add as needed + } + + ManagedZonesListCall interface { + // Context(ctx context.Context) *ManagedZonesListCall // TODO: Add as needed + DnsName(dnsName string) ManagedZonesListCall + Do(opts ...googleapi.CallOption) (ManagedZonesListResponse, error) + // Fields(s ...googleapi.Field) *ManagedZonesListCall // TODO: Add as needed + // IfNoneMatch(entityTag string) *ManagedZonesListCall // TODO: Add as needed + // MaxResults(maxResults int64) *ManagedZonesListCall // TODO: Add as needed + // PageToken(pageToken string) *ManagedZonesListCall // TODO: Add as needed + // Pages(ctx context.Context, f func(*ManagedZonesListResponse) error) error // TODO: Add as needed + } + + ManagedZonesListResponse interface { + // Kind() string // TODO: Add as needed + // ManagedZones() []*ManagedZone // TODO: Add as needed + ManagedZones() []ManagedZone + // NextPageToken string // TODO: Add as needed + // ServerResponse() googleapi.ServerResponse // TODO: Add as needed + // ForceSendFields() []string // TODO: Add as needed + } + + ManagedZonesService interface { + // NewManagedZonesService(s *Service) *ManagedZonesService // TODO: Add to service if needed + Create(project string, managedzone ManagedZone) ManagedZonesCreateCall + Delete(project string, managedZone string) ManagedZonesDeleteCall + Get(project string, managedZone string) ManagedZonesGetCall + List(project string) ManagedZonesListCall + } + + Project interface { + // Id() string // TODO: Add as needed + // Kind() string // TODO: Add as needed + // Number() uint64 // TODO: Add as needed + // Quota() *Quota // TODO: Add as needed + // ServerResponse() googleapi.ServerResponse // TODO: Add as needed + // ForceSendFields() []string // TODO: Add as needed + } + + ProjectsGetCall interface { + // TODO: Add as needed + } + + ProjectsService interface { + // TODO: Add as needed + } + + Quota interface { + // TODO: Add as needed + } + + ResourceRecordSet interface { + // Kind() string // TODO: Add as needed + Name() string + Rrdatas() []string + Ttl() int64 + Type() string + // ForceSendFields []string // TODO: Add as needed + } + + ResourceRecordSetsListCall interface { + // Context(ctx context.Context) *ResourceRecordSetsListCall // TODO: Add as needed + // Do(opts ...googleapi.CallOption) (*ResourceRecordSetsListResponse, error) // TODO: Add as needed + Do(opts ...googleapi.CallOption) (ResourceRecordSetsListResponse, error) + // Fields(s ...googleapi.Field) *ResourceRecordSetsListCall // TODO: Add as needed + // IfNoneMatch(entityTag string) *ResourceRecordSetsListCall // TODO: Add as needed + // MaxResults(maxResults int64) *ResourceRecordSetsListCall // TODO: Add as needed + Name(name string) ResourceRecordSetsListCall + // PageToken(pageToken string) *ResourceRecordSetsListCall // TODO: Add as needed + Type(type_ string) ResourceRecordSetsListCall + } + + ResourceRecordSetsListResponse interface { + // Kind() string // TODO: Add as needed + // NextPageToken() string // TODO: Add as needed + Rrsets() []ResourceRecordSet + // ServerResponse() googleapi.ServerResponse // TODO: Add as needed + // ForceSendFields() []string // TODO: Add as needed + } + + ResourceRecordSetsService interface { + // NewResourceRecordSetsService(s *Service) *ResourceRecordSetsService // TODO: add to service as needed + List(project string, managedZone string) ResourceRecordSetsListCall + NewResourceRecordSet(name string, rrdatas []string, ttl int64, type_ rrstype.RrsType) ResourceRecordSet + } + + Service interface { + // BasePath() string // TODO: Add as needed + // UserAgent() string // TODO: Add as needed + Changes() ChangesService + ManagedZones() ManagedZonesService + Projects() ProjectsService + ResourceRecordSets() ResourceRecordSetsService + } + // New(client *http.Client) (*Service, error) // TODO: Add as needed +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zone.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zone.go new file mode 100644 index 000000000000..9336ea090eb8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zone.go @@ -0,0 +1,34 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ManagedZone = ManagedZone{} + +type ManagedZone struct{ impl *dns.ManagedZone } + +func (m ManagedZone) Name() string { + return m.impl.Name +} + +func (m ManagedZone) DnsName() string { + return m.impl.DnsName +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zone_create_call.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zone_create_call.go new file mode 100644 index 000000000000..dc7c09e05f50 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zone_create_call.go @@ -0,0 +1,32 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "google.golang.org/api/googleapi" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ManagedZonesCreateCall = ManagedZonesCreateCall{} + +type ManagedZonesCreateCall struct{ impl *dns.ManagedZonesCreateCall } + +func (call ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (interfaces.ManagedZone, error) { + m, err := call.impl.Do(opts...) + return &ManagedZone{m}, err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_delete_call.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_delete_call.go new file mode 100644 index 000000000000..7bc398c166ee --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_delete_call.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "google.golang.org/api/googleapi" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ManagedZonesDeleteCall = ManagedZonesDeleteCall{} + +type ManagedZonesDeleteCall struct{ impl *dns.ManagedZonesDeleteCall } + +func (call ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { + return call.Do(opts...) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_get_call.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_get_call.go new file mode 100644 index 000000000000..ca1f5a412eae --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_get_call.go @@ -0,0 +1,32 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "google.golang.org/api/googleapi" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ManagedZonesGetCall = ManagedZonesGetCall{} + +type ManagedZonesGetCall struct{ impl *dns.ManagedZonesGetCall } + +func (call ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (interfaces.ManagedZone, error) { + m, err := call.impl.Do(opts...) + return &ManagedZone{m}, err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_list_call.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_list_call.go new file mode 100644 index 000000000000..6c0ec65ba27b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_list_call.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "google.golang.org/api/googleapi" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ManagedZonesListCall = &ManagedZonesListCall{} + +type ManagedZonesListCall struct{ impl *dns.ManagedZonesListCall } + +func (call *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (interfaces.ManagedZonesListResponse, error) { + response, err := call.impl.Do(opts...) + return &ManagedZonesListResponse{response}, err +} + +func (call *ManagedZonesListCall) DnsName(dnsName string) interfaces.ManagedZonesListCall { + call.impl.DnsName(dnsName) + return call +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_list_response.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_list_response.go new file mode 100644 index 000000000000..27b734d9059a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_list_response.go @@ -0,0 +1,34 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ManagedZonesListResponse = &ManagedZonesListResponse{} + +type ManagedZonesListResponse struct{ impl *dns.ManagedZonesListResponse } + +func (response *ManagedZonesListResponse) ManagedZones() []interfaces.ManagedZone { + zones := make([]interfaces.ManagedZone, len(response.impl.ManagedZones)) + for i, z := range response.impl.ManagedZones { + zones[i] = &ManagedZone{z} + } + return zones +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_service.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_service.go new file mode 100644 index 000000000000..91ea5cce008e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/managed_zones_service.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ManagedZonesService = &ManagedZonesService{} + +type ManagedZonesService struct{ impl *dns.ManagedZonesService } + +func (m *ManagedZonesService) Create(project string, managedzone interfaces.ManagedZone) interfaces.ManagedZonesCreateCall { + return &ManagedZonesCreateCall{m.impl.Create(project, managedzone.(ManagedZone).impl)} +} + +func (m *ManagedZonesService) Delete(project, managedZone string) interfaces.ManagedZonesDeleteCall { + return &ManagedZonesDeleteCall{m.impl.Delete(project, managedZone)} +} + +func (m *ManagedZonesService) Get(project, managedZone string) interfaces.ManagedZonesGetCall { + return &ManagedZonesGetCall{m.impl.Get(project, managedZone)} +} + +func (m *ManagedZonesService) List(project string) interfaces.ManagedZonesListCall { + return &ManagedZonesListCall{m.impl.List(project)} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/rrset.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/rrset.go new file mode 100644 index 000000000000..5925cbe51e6d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/rrset.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ResourceRecordSet = ResourceRecordSet{} + +type ResourceRecordSet struct{ impl *dns.ResourceRecordSet } + +func (r ResourceRecordSet) Name() string { return r.impl.Name } +func (r ResourceRecordSet) Rrdatas() []string { return r.impl.Rrdatas } +func (r ResourceRecordSet) Ttl() int64 { return r.impl.Ttl } +func (r ResourceRecordSet) Type() string { return r.impl.Type } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/rrsets_list_call.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/rrsets_list_call.go new file mode 100644 index 000000000000..fc30be6d768d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/rrsets_list_call.go @@ -0,0 +1,44 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "google.golang.org/api/googleapi" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ResourceRecordSetsListCall = &ResourceRecordSetsListCall{} + +type ResourceRecordSetsListCall struct { + impl *dns.ResourceRecordSetsListCall +} + +func (call *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (interfaces.ResourceRecordSetsListResponse, error) { + response, err := call.impl.Do(opts...) + return &ResourceRecordSetsListResponse{response}, err +} + +func (call *ResourceRecordSetsListCall) Name(name string) interfaces.ResourceRecordSetsListCall { + call.impl.Name(name) + return call +} + +func (call *ResourceRecordSetsListCall) Type(type_ string) interfaces.ResourceRecordSetsListCall { + call.impl.Type(type_) + return call +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/rrsets_list_response.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/rrsets_list_response.go new file mode 100644 index 000000000000..af4aa87e252d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/rrsets_list_response.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ResourceRecordSetsListResponse = &ResourceRecordSetsListResponse{} + +type ResourceRecordSetsListResponse struct { + impl *dns.ResourceRecordSetsListResponse +} + +func (response *ResourceRecordSetsListResponse) Rrsets() []interfaces.ResourceRecordSet { + rrsets := make([]interfaces.ResourceRecordSet, len(response.impl.Rrsets)) + for i, rrset := range response.impl.Rrsets { + rrsets[i] = &ResourceRecordSet{rrset} + } + return rrsets + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/rrsets_service.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/rrsets_service.go new file mode 100644 index 000000000000..ba1832cd4519 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/rrsets_service.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" + "k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype" +) + +var _ interfaces.ResourceRecordSetsService = &ResourceRecordSetsService{} + +type ResourceRecordSetsService struct { + impl *dns.ResourceRecordSetsService +} + +func (service ResourceRecordSetsService) List(project string, managedZone string) interfaces.ResourceRecordSetsListCall { + return &ResourceRecordSetsListCall{service.impl.List(project, managedZone)} +} + +func (service ResourceRecordSetsService) NewResourceRecordSet(name string, rrdatas []string, ttl int64, type_ rrstype.RrsType) interfaces.ResourceRecordSet { + rrset := dns.ResourceRecordSet{Name: name, Rrdatas: rrdatas, Ttl: ttl, Type: string(type_)} + return &ResourceRecordSet{&rrset} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/service.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/service.go new file mode 100644 index 000000000000..d3854fb6225e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/service.go @@ -0,0 +1,48 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + dns "google.golang.org/api/dns/v1" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.Service = &Service{} + +type Service struct { + impl *dns.Service +} + +func NewService(service *dns.Service) *Service { + return &Service{service} +} + +func (s *Service) Changes() interfaces.ChangesService { + return &ChangesService{s.impl.Changes} +} + +func (s *Service) ManagedZones() interfaces.ManagedZonesService { + return &ManagedZonesService{s.impl.ManagedZones} +} + +func (s *Service) Projects() interfaces.ProjectsService { + return &ProjectsService{s.impl.Projects} +} + +func (s *Service) ResourceRecordSets() interfaces.ResourceRecordSetsService { + return &ResourceRecordSetsService{s.impl.ResourceRecordSets} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/change.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/change.go new file mode 100644 index 000000000000..2c103974974b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/change.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" + +var _ interfaces.Change = &Change{} + +type Change struct { + Service *ChangesService + Additions_ []interfaces.ResourceRecordSet + Deletions_ []interfaces.ResourceRecordSet +} + +func (c *Change) Additions() (rrsets []interfaces.ResourceRecordSet) { + return c.Additions_ +} + +func (c *Change) Deletions() (rrsets []interfaces.ResourceRecordSet) { + return c.Deletions_ +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/changes_create_call.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/changes_create_call.go new file mode 100644 index 000000000000..6e81506cf39d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/changes_create_call.go @@ -0,0 +1,68 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import ( + "fmt" + + "google.golang.org/api/googleapi" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ChangesCreateCall = ChangesCreateCall{} + +type ChangesCreateCall struct { + Service *ChangesService + Project string + Zone string + Change interfaces.Change + Error error // Use this to over-ride response if necessary +} + +func hashKey(set interfaces.ResourceRecordSet) string { + return fmt.Sprintf("%s-%d-%s", set.Name(), set.Ttl(), string(set.Type())) +} + +func (c ChangesCreateCall) Do(opts ...googleapi.CallOption) (interfaces.Change, error) { + if c.Error != nil { + return nil, c.Error + } + zone := (c.Service.Service.ManagedZones_.Impl[c.Project][c.Zone]).(*ManagedZone) + rrsets := map[string]int{} // Simple mechanism to detect dupes and missing rrsets before committing - stores index+1 + for i, set := range zone.Rrsets { + rrsets[hashKey(set)] = i + 1 + } + for _, add := range c.Change.Additions() { + if rrsets[hashKey(add)] > 0 { + return nil, fmt.Errorf("Attempt to insert duplicate rrset %v", add) + } + } + for _, del := range c.Change.Deletions() { + if !(rrsets[hashKey(del)] > 0) { + return nil, fmt.Errorf("Attempt to delete non-existent rrset %v", del) + } + } + for _, add := range c.Change.Additions() { + zone.Rrsets = append(zone.Rrsets, *(add.(*ResourceRecordSet))) + } + for _, del := range c.Change.Deletions() { + zone.Rrsets = append( + zone.Rrsets[:rrsets[hashKey(del)]-1], + zone.Rrsets[rrsets[hashKey(del)]:]...) + } + return c.Change, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/changes_service.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/changes_service.go new file mode 100644 index 000000000000..1f7648907227 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/changes_service.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" + +var _ interfaces.ChangesService = &ChangesService{} + +type ChangesService struct { + Service *Service +} + +func (c *ChangesService) Create(project string, managedZone string, change interfaces.Change) interfaces.ChangesCreateCall { + return &ChangesCreateCall{c, project, managedZone, change, nil} +} + +func (c *ChangesService) NewChange(additions, deletions []interfaces.ResourceRecordSet) interfaces.Change { + return &Change{c, additions, deletions} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/clouddns.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/clouddns.go new file mode 100644 index 000000000000..2d8a206dcac6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/clouddns.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +// Implementation of internal/interfaces/* on top of Google Cloud DNS API. +// See https://godoc.org/google.golang.org/api/dns/v1 for details +// This facilitates stubbing out Google Cloud DNS for unit testing. +// Only the parts of the API that we use are included. +// Others can be added as needed. + +import dns "google.golang.org/api/dns/v1" + +type ( + // TODO: We dont' need these yet, so they remain unimplemented. Add later as required. + Project struct{ impl *dns.Project } + ProjectsGetCall struct{ impl *dns.ProjectsGetCall } + ProjectsService struct{ impl *dns.ProjectsService } + Quota struct{ impl *dns.Quota } +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zone.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zone.go new file mode 100644 index 000000000000..4391efd31af6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zone.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" + +var _ interfaces.ManagedZone = ManagedZone{} + +type ManagedZone struct { + Service *ManagedZonesService + Name_ string + Rrsets []ResourceRecordSet +} + +func (m ManagedZone) Name() string { + return m.Name_ +} + +func (m ManagedZone) DnsName() string { + return m.Name_ // Don't bother storing a separate DNS name +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zone_create_call.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zone_create_call.go new file mode 100644 index 000000000000..fe4ef7cc8822 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zone_create_call.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import ( + "fmt" + + "google.golang.org/api/googleapi" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ManagedZonesCreateCall = ManagedZonesCreateCall{} + +type ManagedZonesCreateCall struct { + Error *error // Use to override response for testing + Service *ManagedZonesService + Project string + ManagedZone interfaces.ManagedZone +} + +func (call ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (interfaces.ManagedZone, error) { + if call.Error != nil { + return nil, *call.Error + } + if call.Service.Impl[call.Project][call.ManagedZone.DnsName()] != nil { + return nil, fmt.Errorf("Error - attempt to create duplicate zone %s in project %s.", + call.ManagedZone.DnsName(), call.Project) + } + if call.Service.Impl == nil { + call.Service.Impl = map[string]map[string]interfaces.ManagedZone{} + } + if call.Service.Impl[call.Project] == nil { + call.Service.Impl[call.Project] = map[string]interfaces.ManagedZone{} + } + call.Service.Impl[call.Project][call.ManagedZone.DnsName()] = call.ManagedZone + return call.ManagedZone, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_delete_call.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_delete_call.go new file mode 100644 index 000000000000..e63fb74da265 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_delete_call.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import ( + "fmt" + + "google.golang.org/api/googleapi" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ManagedZonesDeleteCall = ManagedZonesDeleteCall{} + +type ManagedZonesDeleteCall struct { + Service *ManagedZonesService + Project string + ZoneName string + Error *error // Use this to overide response for testing if required +} + +func (call ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { + if call.Error != nil { // Return the override value + return *call.Error + } else { // Just try to delete it from the in-memory array. + project, ok := call.Service.Impl[call.Project] + if ok { + zone, ok := project[call.ZoneName] + if ok { + delete(project, zone.Name()) + return nil + } else { + return fmt.Errorf("Failed to find zone %s in project %s to delete it", call.ZoneName, call.Project) + } + } else { + return fmt.Errorf("Failed to find project %s to delete zone %s from it", call.Project, call.ZoneName) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_get_call.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_get_call.go new file mode 100644 index 000000000000..6fe75c04a839 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_get_call.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import ( + "google.golang.org/api/googleapi" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ManagedZonesGetCall = ManagedZonesGetCall{} + +type ManagedZonesGetCall struct { + Service *ManagedZonesService + Project string + ZoneName string + Response interfaces.ManagedZone // Use this to overide response if required + Error *error // Use this to overide response if required + DnsName_ string +} + +func (call ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (interfaces.ManagedZone, error) { + if call.Response != nil { + return call.Response, *call.Error + } else { + return call.Service.Impl[call.Project][call.ZoneName], nil + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_list_call.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_list_call.go new file mode 100644 index 000000000000..d57815c79cd9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_list_call.go @@ -0,0 +1,58 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import ( + "fmt" + + "google.golang.org/api/googleapi" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ManagedZonesListCall = &ManagedZonesListCall{} + +type ManagedZonesListCall struct { + Service *ManagedZonesService + Project string + Response *interfaces.ManagedZonesListResponse // Use this to overide response if required + Error *error // Use this to overide response if required + DnsName_ string +} + +func (call *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (interfaces.ManagedZonesListResponse, error) { + if call.Response != nil { + return *call.Response, *call.Error + } else { + proj, projectFound := call.Service.Impl[call.Project] + if !projectFound { + return nil, fmt.Errorf("Project %s not found.", call.Project) + } + if call.DnsName_ != "" { + return &ManagedZonesListResponse{[]interfaces.ManagedZone{proj[call.DnsName_]}}, nil + } + list := []interfaces.ManagedZone{} + for _, zone := range proj { + list = append(list, zone) + } + return &ManagedZonesListResponse{list}, nil + } +} + +func (call *ManagedZonesListCall) DnsName(dnsName string) interfaces.ManagedZonesListCall { + call.DnsName_ = dnsName + return call +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_list_response.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_list_response.go new file mode 100644 index 000000000000..b949612f0c7e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_list_response.go @@ -0,0 +1,27 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" + +var _ interfaces.ManagedZonesListResponse = &ManagedZonesListResponse{} + +type ManagedZonesListResponse struct{ ManagedZones_ []interfaces.ManagedZone } + +func (response *ManagedZonesListResponse) ManagedZones() []interfaces.ManagedZone { + return response.ManagedZones_ +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_service.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_service.go new file mode 100644 index 000000000000..85b0087ea6d4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/managed_zones_service.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" + +var _ interfaces.ManagedZonesService = &ManagedZonesService{} + +type ManagedZonesService struct { + Impl map[string]map[string]interfaces.ManagedZone +} + +func (m *ManagedZonesService) Create(project string, managedzone interfaces.ManagedZone) interfaces.ManagedZonesCreateCall { + return &ManagedZonesCreateCall{nil, m, project, managedzone.(*ManagedZone)} +} + +func (m *ManagedZonesService) Delete(project string, managedZone string) interfaces.ManagedZonesDeleteCall { + return &ManagedZonesDeleteCall{m, project, managedZone, nil} +} + +func (m *ManagedZonesService) Get(project string, managedZone string) interfaces.ManagedZonesGetCall { + return &ManagedZonesGetCall{m, project, managedZone, nil, nil, ""} +} + +func (m *ManagedZonesService) List(project string) interfaces.ManagedZonesListCall { + return &ManagedZonesListCall{m, project, nil, nil, ""} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/rrset.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/rrset.go new file mode 100644 index 000000000000..bef4de8c920a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/rrset.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" + +var _ interfaces.ResourceRecordSet = ResourceRecordSet{} + +type ResourceRecordSet struct { + Name_ string + Rrdatas_ []string + Ttl_ int64 + Type_ string +} + +func (r ResourceRecordSet) Name() string { return r.Name_ } +func (r ResourceRecordSet) Rrdatas() []string { return r.Rrdatas_ } +func (r ResourceRecordSet) Ttl() int64 { return r.Ttl_ } +func (r ResourceRecordSet) Type() string { return r.Type_ } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/rrsets_list_call.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/rrsets_list_call.go new file mode 100644 index 000000000000..0bdbafcc54d8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/rrsets_list_call.go @@ -0,0 +1,45 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import ( + "google.golang.org/api/googleapi" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +var _ interfaces.ResourceRecordSetsListCall = &ResourceRecordSetsListCall{} + +type ResourceRecordSetsListCall struct { + Response_ *ResourceRecordSetsListResponse + Err_ error + Name_ string + Type_ string +} + +func (call *ResourceRecordSetsListCall) Do(opts ...googleapi.CallOption) (interfaces.ResourceRecordSetsListResponse, error) { + return call.Response_, call.Err_ +} + +func (call *ResourceRecordSetsListCall) Name(name string) interfaces.ResourceRecordSetsListCall { + call.Name_ = name + return call +} + +func (call *ResourceRecordSetsListCall) Type(type_ string) interfaces.ResourceRecordSetsListCall { + call.Type_ = type_ + return call +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/rrsets_list_response.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/rrsets_list_response.go new file mode 100644 index 000000000000..0e0b20a7f78e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/rrsets_list_response.go @@ -0,0 +1,29 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" + +var _ interfaces.ResourceRecordSetsListResponse = &ResourceRecordSetsListResponse{} + +type ResourceRecordSetsListResponse struct { + impl []interfaces.ResourceRecordSet +} + +func (response *ResourceRecordSetsListResponse) Rrsets() []interfaces.ResourceRecordSet { + return response.impl +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/rrsets_service.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/rrsets_service.go new file mode 100644 index 000000000000..753f31e42c3f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/rrsets_service.go @@ -0,0 +1,58 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import ( + "fmt" + + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" + "k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype" +) + +var _ interfaces.ResourceRecordSetsService = &ResourceRecordSetsService{} + +type ResourceRecordSetsService struct { + Service *Service + ListCall interfaces.ResourceRecordSetsListCall // Use to override response if reqired for testing +} + +func (s ResourceRecordSetsService) List(project string, managedZone string) interfaces.ResourceRecordSetsListCall { + if s.ListCall != nil { + return s.ListCall + } + p := s.Service.ManagedZones_.Impl[project] + if p == nil { + return &ResourceRecordSetsListCall{Err_: fmt.Errorf("Project not found: %s", project)} + } + z := s.Service.ManagedZones_.Impl[project][managedZone] + if z == nil { + return &ResourceRecordSetsListCall{ + Err_: fmt.Errorf("Zone %s not found in project %s", managedZone, project), + } + } + zone := s.Service.ManagedZones_.Impl[project][managedZone].(*ManagedZone) + response := &ResourceRecordSetsListResponse{} + for _, set := range zone.Rrsets { + response.impl = append(response.impl, set) + } + return &ResourceRecordSetsListCall{Response_: response} +} + +func (service ResourceRecordSetsService) NewResourceRecordSet(name string, rrdatas []string, ttl int64, type_ rrstype.RrsType) interfaces.ResourceRecordSet { + rrset := ResourceRecordSet{Name_: name, Rrdatas_: rrdatas, Ttl_: ttl, Type_: string(type_)} + return &rrset +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/service.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/service.go new file mode 100644 index 000000000000..ac7c48367a1e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/stubs/service.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stubs + +import "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" + +var _ interfaces.Service = &Service{} + +type Service struct { + Changes_ *ChangesService + ManagedZones_ *ManagedZonesService + Projects_ *ProjectsService + Rrsets_ *ResourceRecordSetsService +} + +func NewService() *Service { + s := &Service{} + s.Changes_ = &ChangesService{s} + s.ManagedZones_ = &ManagedZonesService{} + s.Projects_ = &ProjectsService{} + s.Rrsets_ = &ResourceRecordSetsService{s, nil} + return s +} + +func (s *Service) Changes() interfaces.ChangesService { + return s.Changes_ +} + +func (s *Service) ManagedZones() interfaces.ManagedZonesService { + return s.ManagedZones_ +} + +func (s *Service) Projects() interfaces.ProjectsService { + return s.Projects_ +} + +func (s *Service) ResourceRecordSets() interfaces.ResourceRecordSetsService { + return s.Rrsets_ +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/rrset.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/rrset.go new file mode 100644 index 000000000000..40176b357060 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/rrset.go @@ -0,0 +1,43 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clouddns + +import ( + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" + "k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype" +) + +type ResourceRecordSet struct { + impl interfaces.ResourceRecordSet + rrsets *ResourceRecordSets +} + +func (rrset ResourceRecordSet) Name() string { + return rrset.impl.Name() +} + +func (rrset ResourceRecordSet) Rrdatas() []string { + return rrset.impl.Rrdatas() +} + +func (rrset ResourceRecordSet) Ttl() int64 { + return rrset.impl.Ttl() +} + +func (rrset ResourceRecordSet) Type() rrstype.RrsType { + return rrstype.RrsType(rrset.impl.Type()) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/rrsets.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/rrsets.go new file mode 100644 index 000000000000..3a03820ef332 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/rrsets.go @@ -0,0 +1,80 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clouddns + +import ( + "fmt" + + "k8s.io/kubernetes/federation/pkg/dnsprovider" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" + "k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype" +) + +type ResourceRecordSets struct { + zone *Zone + impl interfaces.ResourceRecordSetsService +} + +func (rrsets ResourceRecordSets) List() ([]dnsprovider.ResourceRecordSet, error) { + response, err := rrsets.impl.List(rrsets.project(), rrsets.zone.impl.Name()).Do() + if err != nil { + return nil, err + } + list := make([]dnsprovider.ResourceRecordSet, len(response.Rrsets())) + for i, rrset := range response.Rrsets() { + list[i] = &ResourceRecordSet{rrset, &rrsets} + } + return list, nil +} + +func (rrsets ResourceRecordSets) Add(rrset dnsprovider.ResourceRecordSet) (dnsprovider.ResourceRecordSet, error) { + service := rrsets.zone.zones.interface_.service.Changes() + additions := []interfaces.ResourceRecordSet{rrset.(*ResourceRecordSet).impl} + change := service.NewChange(additions, []interfaces.ResourceRecordSet{}) + newChange, err := service.Create(rrsets.project(), rrsets.zone.impl.Name(), change).Do() + if err != nil { + return nil, err + } + newAdditions := newChange.Additions() + if len(newAdditions) != len(additions) { + return nil, fmt.Errorf("Internal error when adding resource record set. Call succeeded but number of records returned is incorrect. Records sent=%d, records returned=%d, record set:%v", len(additions), len(newAdditions), rrset) + } + return ResourceRecordSet{newChange.Additions()[0], &rrsets}, nil +} + +func (rrsets ResourceRecordSets) Remove(rrset dnsprovider.ResourceRecordSet) error { + service := rrsets.zone.zones.interface_.service.Changes() + deletions := []interfaces.ResourceRecordSet{rrset.(ResourceRecordSet).impl} + change := service.NewChange([]interfaces.ResourceRecordSet{}, deletions) + newChange, err := service.Create(rrsets.project(), rrsets.zone.impl.Name(), change).Do() + if err != nil { + return err + } + newDeletions := newChange.Deletions() + if len(newDeletions) != len(deletions) { + return fmt.Errorf("Internal error when deleting resource record set. Call succeeded but number of records returned is incorrect. Records sent=%d, records returned=%d, record set:%v", len(deletions), len(newDeletions), rrset) + } + return nil +} + +func (rrsets ResourceRecordSets) New(name string, rrdatas []string, ttl int64, rrstype rrstype.RrsType) dnsprovider.ResourceRecordSet { + return &ResourceRecordSet{rrsets.impl.NewResourceRecordSet(name, rrdatas, ttl, rrstype), &rrsets} +} + +func (rrsets ResourceRecordSets) project() string { + return rrsets.zone.project() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/zone.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/zone.go new file mode 100644 index 000000000000..824df344c368 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/zone.go @@ -0,0 +1,39 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clouddns + +import ( + "k8s.io/kubernetes/federation/pkg/dnsprovider" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +type Zone struct { + impl interfaces.ManagedZone + zones *Zones +} + +func (zone *Zone) Name() string { + return zone.impl.DnsName() +} + +func (zone *Zone) ResourceRecordSets() (dnsprovider.ResourceRecordSets, bool) { + return &ResourceRecordSets{zone, zone.zones.interface_.service.ResourceRecordSets()}, true +} + +func (zone Zone) project() string { + return zone.zones.project() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/zones.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/zones.go new file mode 100644 index 000000000000..3a76f535d7d4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/zones.go @@ -0,0 +1,44 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clouddns + +import ( + "k8s.io/kubernetes/federation/pkg/dnsprovider" + "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns/internal/interfaces" +) + +type Zones struct { + impl interfaces.ManagedZonesService + interface_ *Interface +} + +func (zones Zones) List() ([]dnsprovider.Zone, error) { + response, err := zones.impl.List(zones.project()).Do() + if err != nil { + return []dnsprovider.Zone{}, nil + } + managedZones := response.ManagedZones() + zoneList := make([]dnsprovider.Zone, len(managedZones)) + for i, zone := range managedZones { + zoneList[i] = &Zone{zone, &zones} + } + return zoneList, nil +} + +func (zones Zones) project() string { + return zones.interface_.project() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype/rrstype.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype/rrstype.go new file mode 100644 index 000000000000..414c88f7f185 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype/rrstype.go @@ -0,0 +1,27 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rrstype + +type ( + RrsType string +) + +const ( + A = RrsType("A") + CNAME = RrsType("CNAME") + // TODO: Add other types as required +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/OWNERS b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/OWNERS new file mode 100644 index 000000000000..c6b4c5c4f653 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/OWNERS @@ -0,0 +1,4 @@ +assignees: + - quinton-hoole + - nikhiljindal + - madhusundancs diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/cluster/cluster_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/cluster/cluster_client.go new file mode 100644 index 000000000000..7f5aa57e2172 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/cluster/cluster_client.go @@ -0,0 +1,220 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "fmt" + "net" + "os" + "strings" + + "github.com/golang/glog" + federation_v1alpha1 "k8s.io/kubernetes/federation/apis/federation/v1alpha1" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/typed/discovery" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + utilnet "k8s.io/kubernetes/pkg/util/net" + "k8s.io/kubernetes/pkg/util/sets" +) + +const ( + UserAgentName = "Cluster-Controller" + KubeAPIQPS = 20.0 + KubeAPIBurst = 30 + KubeconfigSecretDataKey = "kubeconfig" +) + +// This is to inject a different kubeconfigGetter in tests. +// We dont use the standard one which calls NewInCluster in tests to avoid having to setup service accounts and mount files with secret tokens. +var KubeconfigGetterForCluster = func(c *federation_v1alpha1.Cluster) clientcmd.KubeconfigGetter { + return func() (*clientcmdapi.Config, error) { + // Get the namespace this is running in from the env variable. + namespace := os.Getenv("POD_NAMESPACE") + if namespace == "" { + return nil, fmt.Errorf("unexpected: POD_NAMESPACE env var returned empty string") + } + // Get a client to talk to the k8s apiserver, to fetch secrets from it. + client, err := client.NewInCluster() + if err != nil { + return nil, fmt.Errorf("error in creating in-cluster client: %s", err) + } + secret, err := client.Secrets(namespace).Get(c.Spec.SecretRef.Name) + if err != nil { + return nil, fmt.Errorf("error in fetching secret: %s", err) + } + data, ok := secret.Data[KubeconfigSecretDataKey] + if !ok { + return nil, fmt.Errorf("secret does not have data with key: %s", KubeconfigSecretDataKey) + } + return clientcmd.Load(data) + } +} + +type ClusterClient struct { + discoveryClient *discovery.DiscoveryClient + kubeClient *clientset.Clientset +} + +func NewClusterClientSet(c *federation_v1alpha1.Cluster) (*ClusterClient, error) { + var serverAddress string + hostIP, err := utilnet.ChooseHostInterface() + if err != nil { + return nil, err + } + + for _, item := range c.Spec.ServerAddressByClientCIDRs { + _, cidrnet, err := net.ParseCIDR(item.ClientCIDR) + if err != nil { + return nil, err + } + myaddr := net.ParseIP(hostIP.String()) + if cidrnet.Contains(myaddr) == true { + serverAddress = item.ServerAddress + break + } + } + var clusterClientSet = ClusterClient{} + if serverAddress != "" { + kubeconfigGetter := KubeconfigGetterForCluster(c) + clusterConfig, err := clientcmd.BuildConfigFromKubeconfigGetter(serverAddress, kubeconfigGetter) + if err != nil { + return nil, err + } + clusterConfig.QPS = KubeAPIQPS + clusterConfig.Burst = KubeAPIBurst + clusterClientSet.discoveryClient = discovery.NewDiscoveryClientForConfigOrDie((restclient.AddUserAgent(clusterConfig, UserAgentName))) + if clusterClientSet.discoveryClient == nil { + return nil, nil + } + clusterClientSet.kubeClient = clientset.NewForConfigOrDie((restclient.AddUserAgent(clusterConfig, UserAgentName))) + if clusterClientSet.kubeClient == nil { + return nil, nil + } + } + return &clusterClientSet, err +} + +// GetClusterHealthStatus gets the kubernetes cluster health status by requesting "/healthz" +func (self *ClusterClient) GetClusterHealthStatus() *federation_v1alpha1.ClusterStatus { + clusterStatus := federation_v1alpha1.ClusterStatus{} + currentTime := unversioned.Now() + newClusterReadyCondition := federation_v1alpha1.ClusterCondition{ + Type: federation_v1alpha1.ClusterReady, + Status: v1.ConditionTrue, + Reason: "ClusterReady", + Message: "/healthz responded with ok", + LastProbeTime: currentTime, + LastTransitionTime: currentTime, + } + newClusterNotReadyCondition := federation_v1alpha1.ClusterCondition{ + Type: federation_v1alpha1.ClusterReady, + Status: v1.ConditionFalse, + Reason: "ClusterNotReady", + Message: "/healthz responded without ok", + LastProbeTime: currentTime, + LastTransitionTime: currentTime, + } + newNodeOfflineCondition := federation_v1alpha1.ClusterCondition{ + Type: federation_v1alpha1.ClusterOffline, + Status: v1.ConditionTrue, + Reason: "ClusterNotReachable", + Message: "cluster is not reachable", + LastProbeTime: currentTime, + LastTransitionTime: currentTime, + } + newNodeNotOfflineCondition := federation_v1alpha1.ClusterCondition{ + Type: federation_v1alpha1.ClusterOffline, + Status: v1.ConditionFalse, + Reason: "ClusterReachable", + Message: "cluster is reachable", + LastProbeTime: currentTime, + LastTransitionTime: currentTime, + } + body, err := self.discoveryClient.Get().AbsPath("/healthz").Do().Raw() + if err != nil { + clusterStatus.Conditions = append(clusterStatus.Conditions, newNodeOfflineCondition) + } else { + if !strings.EqualFold(string(body), "ok") { + clusterStatus.Conditions = append(clusterStatus.Conditions, newClusterNotReadyCondition, newNodeNotOfflineCondition) + } else { + clusterStatus.Conditions = append(clusterStatus.Conditions, newClusterReadyCondition) + } + } + return &clusterStatus +} + +// GetClusterZones gets the kubernetes cluster zones and region by inspecting labels on nodes in the cluster. +func (self *ClusterClient) GetClusterZones() (zones []string, region string, err error) { + return getZoneNames(self.kubeClient) +} + +// Find the name of the zone in which a Node is running +func getZoneNameForNode(node api.Node) (string, error) { + for key, value := range node.Labels { + if key == unversioned.LabelZoneFailureDomain { + return value, nil + } + } + return "", fmt.Errorf("Zone name for node %s not found. No label with key %s", + node.Name, unversioned.LabelZoneFailureDomain) +} + +// Find the name of the region in which a Node is running +func getRegionNameForNode(node api.Node) (string, error) { + for key, value := range node.Labels { + if key == unversioned.LabelZoneRegion { + return value, nil + } + } + return "", fmt.Errorf("Region name for node %s not found. No label with key %s", + node.Name, unversioned.LabelZoneRegion) +} + +// Find the names of all zones and the region in which we have nodes in this cluster. +func getZoneNames(client *clientset.Clientset) (zones []string, region string, err error) { + zoneNames := sets.NewString() + nodes, err := client.Core().Nodes().List(api.ListOptions{}) + if err != nil { + glog.Errorf("Failed to list nodes while getting zone names: %v", err) + return nil, "", err + } + for i, node := range nodes.Items { + // TODO: quinton-hoole make this more efficient. + // For non-multi-zone clusters the zone will + // be identical for all nodes, so we only need to look at one node + // For multi-zone clusters we know at build time + // which zones are included. Rather get this info from there, because it's cheaper. + zoneName, err := getZoneNameForNode(node) + if err != nil { + return nil, "", err + } + zoneNames.Insert(zoneName) + if i == 0 { + region, err = getRegionNameForNode(node) + if err != nil { + return nil, "", err + } + } + } + return zoneNames.List(), region, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/cluster/clustercontroller.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/cluster/clustercontroller.go new file mode 100644 index 000000000000..1d3d2d5b88b0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/cluster/clustercontroller.go @@ -0,0 +1,211 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "strings" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/federation/apis/federation" + federation_v1alpha1 "k8s.io/kubernetes/federation/apis/federation/v1alpha1" + cluster_cache "k8s.io/kubernetes/federation/client/cache" + federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/runtime" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" +) + +type ClusterController struct { + knownClusterSet sets.String + + // federationClient used to operate cluster + federationClient federationclientset.Interface + + // clusterMonitorPeriod is the period for updating status of cluster + clusterMonitorPeriod time.Duration + // clusterClusterStatusMap is a mapping of clusterName and cluster status of last sampling + clusterClusterStatusMap map[string]federation_v1alpha1.ClusterStatus + + // clusterKubeClientMap is a mapping of clusterName and restclient + clusterKubeClientMap map[string]ClusterClient + + // cluster framework and store + clusterController *framework.Controller + clusterStore cluster_cache.StoreToClusterLister +} + +// NewclusterController returns a new cluster controller +func NewclusterController(federationClient federationclientset.Interface, clusterMonitorPeriod time.Duration) *ClusterController { + cc := &ClusterController{ + knownClusterSet: make(sets.String), + federationClient: federationClient, + clusterMonitorPeriod: clusterMonitorPeriod, + clusterClusterStatusMap: make(map[string]federation_v1alpha1.ClusterStatus), + clusterKubeClientMap: make(map[string]ClusterClient), + } + cc.clusterStore.Store, cc.clusterController = framework.NewInformer( + &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return cc.federationClient.Federation().Clusters().List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return cc.federationClient.Federation().Clusters().Watch(options) + }, + }, + &federation.Cluster{}, + controller.NoResyncPeriodFunc(), + framework.ResourceEventHandlerFuncs{ + DeleteFunc: cc.delFromClusterSet, + AddFunc: cc.addToClusterSet, + }, + ) + return cc +} + +// delFromClusterSet delete a cluster from clusterSet and +// delete the corresponding restclient from the map clusterKubeClientMap +func (cc *ClusterController) delFromClusterSet(obj interface{}) { + cluster := obj.(*federation_v1alpha1.Cluster) + cc.knownClusterSet.Delete(cluster.Name) + delete(cc.clusterKubeClientMap, cluster.Name) +} + +// addToClusterSet insert the new cluster to clusterSet and create a corresponding +// restclient to map clusterKubeClientMap +func (cc *ClusterController) addToClusterSet(obj interface{}) { + cluster := obj.(*federation_v1alpha1.Cluster) + cc.knownClusterSet.Insert(cluster.Name) + // create the restclient of cluster + restClient, err := NewClusterClientSet(cluster) + if err != nil || restClient == nil { + glog.Errorf("Failed to create corresponding restclient of kubernetes cluster: %v", err) + return + } + cc.clusterKubeClientMap[cluster.Name] = *restClient +} + +// Run begins watching and syncing. +func (cc *ClusterController) Run() { + defer utilruntime.HandleCrash() + go cc.clusterController.Run(wait.NeverStop) + // monitor cluster status periodically, in phase 1 we just get the health state from "/healthz" + go wait.Until(func() { + if err := cc.UpdateClusterStatus(); err != nil { + glog.Errorf("Error monitoring cluster status: %v", err) + } + }, cc.clusterMonitorPeriod, wait.NeverStop) +} + +func (cc *ClusterController) GetClusterStatus(cluster *federation_v1alpha1.Cluster) (*federation_v1alpha1.ClusterStatus, error) { + // just get the status of cluster, by requesting the restapi "/healthz" + clusterClient, found := cc.clusterKubeClientMap[cluster.Name] + if !found { + glog.Infof("It's a new cluster, a cluster client will be created") + client, err := NewClusterClientSet(cluster) + if err != nil || client == nil { + glog.Infof("Failed to create cluster client, err: %v", err) + return nil, err + } + clusterClient = *client + cc.clusterKubeClientMap[cluster.Name] = clusterClient + } + clusterStatus := clusterClient.GetClusterHealthStatus() + return clusterStatus, nil +} + +// UpdateClusterStatus checks cluster status and get the metrics from cluster's restapi +func (cc *ClusterController) UpdateClusterStatus() error { + clusters, err := cc.federationClient.Federation().Clusters().List(api.ListOptions{}) + if err != nil { + return err + } + for _, cluster := range clusters.Items { + if !cc.knownClusterSet.Has(cluster.Name) { + glog.V(1).Infof("ClusterController observed a new cluster: %#v", cluster) + cc.knownClusterSet.Insert(cluster.Name) + } + } + + // If there's a difference between lengths of known clusters and observed clusters + if len(cc.knownClusterSet) != len(clusters.Items) { + observedSet := make(sets.String) + for _, cluster := range clusters.Items { + observedSet.Insert(cluster.Name) + } + deleted := cc.knownClusterSet.Difference(observedSet) + for clusterName := range deleted { + glog.V(1).Infof("ClusterController observed a Cluster deletion: %v", clusterName) + cc.knownClusterSet.Delete(clusterName) + } + } + for _, cluster := range clusters.Items { + clusterStatusNew, err := cc.GetClusterStatus(&cluster) + if err != nil { + glog.Infof("Failed to Get the status of cluster: %v", cluster.Name) + continue + } + clusterStatusOld, found := cc.clusterClusterStatusMap[cluster.Name] + if !found { + glog.Infof("There is no status stored for cluster: %v before", cluster.Name) + + } else { + hasTransition := false + for i := 0; i < len(clusterStatusNew.Conditions); i++ { + if !(strings.EqualFold(string(clusterStatusNew.Conditions[i].Type), string(clusterStatusOld.Conditions[i].Type)) && + strings.EqualFold(string(clusterStatusNew.Conditions[i].Status), string(clusterStatusOld.Conditions[i].Status))) { + hasTransition = true + break + } + } + if !hasTransition { + for j := 0; j < len(clusterStatusNew.Conditions); j++ { + clusterStatusNew.Conditions[j].LastTransitionTime = clusterStatusOld.Conditions[j].LastTransitionTime + } + } + } + clusterClient, found := cc.clusterKubeClientMap[cluster.Name] + if !found { + glog.Warningf("Failed to client for cluster %s", cluster.Name) + continue + } + + zones, region, err := clusterClient.GetClusterZones() + if err != nil { + glog.Warningf("Failed to get zones and region for cluster %s: %v", cluster.Name, err) + // Don't return err here, as we want the rest of the status update to proceed. + } else { + clusterStatusNew.Zones = zones + clusterStatusNew.Region = region + } + cc.clusterClusterStatusMap[cluster.Name] = *clusterStatusNew + cluster.Status = *clusterStatusNew + cluster, err := cc.federationClient.Federation().Clusters().UpdateStatus(&cluster) + if err != nil { + glog.Warningf("Failed to update the status of cluster: %v ,error is : %v", cluster.Name, err) + // Don't return err here, as we want to continue processing remaining clusters. + continue + } + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/cluster/clustercontroller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/cluster/clustercontroller_test.go new file mode 100644 index 000000000000..ca7c918a01a7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/cluster/clustercontroller_test.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + federation_v1alpha1 "k8s.io/kubernetes/federation/apis/federation/v1alpha1" + federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_3" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + "k8s.io/kubernetes/pkg/util" +) + +func newCluster(clusterName string, serverUrl string) *federation_v1alpha1.Cluster { + cluster := federation_v1alpha1.Cluster{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Federation.GroupVersion().String()}, + ObjectMeta: v1.ObjectMeta{ + UID: util.NewUUID(), + Name: clusterName, + }, + Spec: federation_v1alpha1.ClusterSpec{ + ServerAddressByClientCIDRs: []federation_v1alpha1.ServerAddressByClientCIDR{ + { + ClientCIDR: "0.0.0.0/0", + ServerAddress: serverUrl, + }, + }, + }, + } + return &cluster +} + +func newClusterList(cluster *federation_v1alpha1.Cluster) *federation_v1alpha1.ClusterList { + clusterList := federation_v1alpha1.ClusterList{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Federation.GroupVersion().String()}, + ListMeta: unversioned.ListMeta{ + SelfLink: "foobar", + }, + Items: []federation_v1alpha1.Cluster{}, + } + clusterList.Items = append(clusterList.Items, *cluster) + return &clusterList +} + +// init a fake http handler, simulate a federation apiserver, response the "DELETE" "PUT" "GET" "UPDATE" +// when "canBeGotten" is false, means that user can not get the cluster cluster from apiserver +func createHttptestFakeHandlerForFederation(clusterList *federation_v1alpha1.ClusterList, canBeGotten bool) *http.HandlerFunc { + fakeHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + clusterListString, _ := json.Marshal(*clusterList) + w.Header().Set("Content-Type", "application/json") + switch r.Method { + case "PUT": + fmt.Fprintln(w, string(clusterListString)) + case "GET": + if canBeGotten { + fmt.Fprintln(w, string(clusterListString)) + } else { + fmt.Fprintln(w, "") + } + default: + fmt.Fprintln(w, "") + } + }) + return &fakeHandler +} + +// init a fake http handler, simulate a cluster apiserver, response the "/healthz" +// when "canBeGotten" is false, means that user can not get response from apiserver +func createHttptestFakeHandlerForCluster(canBeGotten bool) *http.HandlerFunc { + fakeHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + switch r.Method { + case "GET": + if canBeGotten { + fmt.Fprintln(w, "ok") + } else { + w.WriteHeader(http.StatusNotFound) + } + default: + fmt.Fprintln(w, "") + } + }) + return &fakeHandler +} + +func TestUpdateClusterStatusOK(t *testing.T) { + clusterName := "foobarCluster" + // create dummy httpserver + testClusterServer := httptest.NewServer(createHttptestFakeHandlerForCluster(true)) + defer testClusterServer.Close() + federationCluster := newCluster(clusterName, testClusterServer.URL) + federationClusterList := newClusterList(federationCluster) + + testFederationServer := httptest.NewServer(createHttptestFakeHandlerForFederation(federationClusterList, true)) + defer testFederationServer.Close() + + restClientCfg, err := clientcmd.BuildConfigFromFlags(testFederationServer.URL, "") + if err != nil { + t.Errorf("Failed to build client config") + } + federationClientSet := federationclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, "cluster-controller")) + + // Override KubeconfigGetterForCluster to avoid having to setup service accounts and mount files with secret tokens. + originalGetter := KubeconfigGetterForCluster + KubeconfigGetterForCluster = func(c *federation_v1alpha1.Cluster) clientcmd.KubeconfigGetter { + return func() (*clientcmdapi.Config, error) { + return &clientcmdapi.Config{}, nil + } + } + + manager := NewclusterController(federationClientSet, 5) + err = manager.UpdateClusterStatus() + if err != nil { + t.Errorf("Failed to Update Cluster Status: %v", err) + } + clusterStatus, found := manager.clusterClusterStatusMap[clusterName] + if !found { + t.Errorf("Failed to Update Cluster Status") + } else { + if (clusterStatus.Conditions[1].Status != v1.ConditionFalse) || (clusterStatus.Conditions[1].Type != federation_v1alpha1.ClusterOffline) { + t.Errorf("Failed to Update Cluster Status") + } + } + + // Reset KubeconfigGetterForCluster + KubeconfigGetterForCluster = originalGetter +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/cluster/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/cluster/doc.go new file mode 100644 index 000000000000..0815e8418a14 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/cluster/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cluster contains code for syncing cluster +package cluster diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/doc.go new file mode 100644 index 000000000000..2d806482ed46 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package federation_controller contains code for controllers (like the cluster +// controller). +package federation_controller diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/cluster_helper.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/cluster_helper.go new file mode 100644 index 000000000000..ac8093d651e0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/cluster_helper.go @@ -0,0 +1,207 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "sync" + + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/pkg/api" + cache "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" + "k8s.io/kubernetes/pkg/controller/framework" + pkg_runtime "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/util/workqueue" + "k8s.io/kubernetes/pkg/watch" + + "github.com/golang/glog" + "reflect" +) + +type clusterCache struct { + clientset *clientset.Clientset + cluster *federation.Cluster + // A store of services, populated by the serviceController + serviceStore cache.StoreToServiceLister + // Watches changes to all services + serviceController *framework.Controller + // A store of endpoint, populated by the serviceController + endpointStore cache.StoreToEndpointsLister + // Watches changes to all endpoints + endpointController *framework.Controller + // services that need to be synced + serviceQueue *workqueue.Type + // endpoints that need to be synced + endpointQueue *workqueue.Type +} + +type clusterClientCache struct { + rwlock sync.Mutex // protects serviceMap + clientMap map[string]*clusterCache +} + +func (cc *clusterClientCache) startClusterLW(cluster *federation.Cluster, clusterName string) { + cachedClusterClient, ok := cc.clientMap[clusterName] + // only create when no existing cachedClusterClient + if ok { + if !reflect.DeepEqual(cachedClusterClient.cluster.Spec, cluster.Spec) { + //rebuild clientset when cluster spec is changed + clientset, err := newClusterClientset(cluster) + if err != nil || clientset == nil { + glog.Errorf("Failed to create corresponding restclient of kubernetes cluster: %v", err) + } + glog.V(4).Infof("Cluster spec changed, rebuild clientset for cluster %s", clusterName) + cachedClusterClient.clientset = clientset + go cachedClusterClient.serviceController.Run(wait.NeverStop) + go cachedClusterClient.endpointController.Run(wait.NeverStop) + glog.V(2).Infof("Start watching services and endpoints on cluster %s", clusterName) + } else { + // do nothing when there is no spec change + glog.V(4).Infof("Keep clientset for cluster %s", clusterName) + return + } + } else { + glog.V(4).Infof("No client cache for cluster %s, building new", clusterName) + clientset, err := newClusterClientset(cluster) + if err != nil || clientset == nil { + glog.Errorf("Failed to create corresponding restclient of kubernetes cluster: %v", err) + } + cachedClusterClient = &clusterCache{ + cluster: cluster, + clientset: clientset, + serviceQueue: workqueue.New(), + endpointQueue: workqueue.New(), + } + cachedClusterClient.endpointStore.Store, cachedClusterClient.endpointController = framework.NewInformer( + &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { + return clientset.Core().Endpoints(api.NamespaceAll).List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return clientset.Core().Endpoints(api.NamespaceAll).Watch(options) + }, + }, + &api.Endpoints{}, + serviceSyncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + cc.enqueueEndpoint(obj, clusterName) + }, + UpdateFunc: func(old, cur interface{}) { + cc.enqueueEndpoint(cur, clusterName) + }, + DeleteFunc: func(obj interface{}) { + cc.enqueueEndpoint(obj, clusterName) + }, + }, + ) + + cachedClusterClient.serviceStore.Store, cachedClusterClient.serviceController = framework.NewInformer( + &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { + return clientset.Core().Services(api.NamespaceAll).List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return clientset.Core().Services(api.NamespaceAll).Watch(options) + }, + }, + &api.Service{}, + serviceSyncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + cc.enqueueService(obj, clusterName) + }, + UpdateFunc: func(old, cur interface{}) { + oldService, ok := old.(*api.Service) + + if !ok { + return + } + curService, ok := cur.(*api.Service) + if !ok { + return + } + if !reflect.DeepEqual(oldService.Status.LoadBalancer, curService.Status.LoadBalancer) { + cc.enqueueService(cur, clusterName) + } + }, + DeleteFunc: func(obj interface{}) { + service, _ := obj.(*api.Service) + cc.enqueueService(obj, clusterName) + glog.V(2).Infof("Service %s/%s deletion found and enque to service store %s", service.Namespace, service.Name, clusterName) + }, + }, + ) + cc.clientMap[clusterName] = cachedClusterClient + go cachedClusterClient.serviceController.Run(wait.NeverStop) + go cachedClusterClient.endpointController.Run(wait.NeverStop) + glog.V(2).Infof("Start watching services and endpoints on cluster %s", clusterName) + } + +} + +//TODO: copied from cluster controller, to make this as common function in pass 2 +// delFromClusterSet delete a cluster from clusterSet and +// delete the corresponding restclient from the map clusterKubeClientMap +func (cc *clusterClientCache) delFromClusterSet(obj interface{}) { + cluster, ok := obj.(*federation.Cluster) + cc.rwlock.Lock() + defer cc.rwlock.Unlock() + if ok { + delete(cc.clientMap, cluster.Name) + } else { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + glog.Infof("Object contained wasn't a cluster or a deleted key: %+v", obj) + return + } + glog.Infof("Found tombstone for %v", obj) + delete(cc.clientMap, tombstone.Key) + } +} + +// addToClusterSet inserts the new cluster to clusterSet and creates a corresponding +// restclient to map clusterKubeClientMap +func (cc *clusterClientCache) addToClientMap(obj interface{}) { + cluster := obj.(*federation.Cluster) + cc.rwlock.Lock() + defer cc.rwlock.Unlock() + cluster, ok := obj.(*federation.Cluster) + if !ok { + return + } + pred := getClusterConditionPredicate() + // check status + // skip if not ready + if pred(*cluster) { + cc.startClusterLW(cluster, cluster.Name) + } +} + +func newClusterClientset(c *federation.Cluster) (*clientset.Clientset, error) { + clusterConfig, err := clientcmd.BuildConfigFromFlags(c.Spec.ServerAddressByClientCIDRs[0].ServerAddress, "") + if err != nil { + return nil, err + } + clusterConfig.QPS = KubeAPIQPS + clusterConfig.Burst = KubeAPIBurst + clientset := clientset.NewForConfigOrDie(restclient.AddUserAgent(clusterConfig, UserAgentName)) + return clientset, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/dns.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/dns.go new file mode 100644 index 000000000000..d393fec9565b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/dns.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +// getClusterZoneName returns the name of the zone where the specified cluster exists (e.g. "us-east1-c" on GCE, or "us-east-1b" on AWS) +func getClusterZoneName(clusterName string) string { + // TODO: quinton: Get this from the cluster API object - from the annotation on a node in the cluster - it doesn't contain this yet. + return "zone-of-cluster-" + clusterName +} + +// getClusterRegionName returns the name of the region where the specified cluster exists (e.g. us-east1 on GCE, or "us-east-1" on AWS) +func getClusterRegionName(clusterName string) string { + // TODO: quinton: Get this from the cluster API object - from the annotation on a node in the cluster - it doesn't contain this yet. + return "region-of-cluster-" + clusterName +} + +// getFederationDNSZoneName returns the name of the managed DNS Zone configured for this federation +func getFederationDNSZoneName() string { + return "mydomain.com" // TODO: quinton: Get this from the federation configuration. +} + +func ensureDNSRecords(clusterName string, cachedService *cachedService) error { + // Quinton: Pseudocode.... + + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/doc.go new file mode 100644 index 000000000000..aebc7837f004 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package service contains code for syncing Kubernetes services, +// and cloud DNS servers with the federated service registry. +package service diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/endpoint_helper.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/endpoint_helper.go new file mode 100644 index 000000000000..0358860e516c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/endpoint_helper.go @@ -0,0 +1,162 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "fmt" + "time" + + federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" + "k8s.io/kubernetes/pkg/api" + cache "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/controller" + + "github.com/golang/glog" +) + +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncHandler is never invoked concurrently with the same key. +func (sc *ServiceController) clusterEndpointWorker() { + fedClient := sc.federationClient + for clusterName, cache := range sc.clusterCache.clientMap { + go func(cache *clusterCache, clusterName string) { + for { + func() { + key, quit := cache.endpointQueue.Get() + // update endpoint cache + if quit { + return + } + defer cache.endpointQueue.Done(key) + err := sc.clusterCache.syncEndpoint(key.(string), clusterName, cache, sc.serviceCache, fedClient) + if err != nil { + glog.V(2).Infof("Failed to sync endpoint: %+v", err) + } + }() + } + }(cache, clusterName) + } +} + +// Whenever there is change on endpoint, the federation service should be updated +// key is the namespaced name of endpoint +func (cc *clusterClientCache) syncEndpoint(key, clusterName string, clusterCache *clusterCache, serviceCache *serviceCache, fedClient federationclientset.Interface) error { + cachedService, ok := serviceCache.get(key) + if !ok { + // here we filtered all non-federation services + return nil + } + endpointInterface, exists, err := clusterCache.endpointStore.GetByKey(key) + if err != nil { + glog.Infof("Did not successfully get %v from store: %v, will retry later", key, err) + clusterCache.endpointQueue.Add(key) + return err + } + if exists { + endpoint, ok := endpointInterface.(*api.Endpoints) + if ok { + glog.V(4).Infof("Found endpoint for federation service %s/%s from cluster %s", endpoint.Namespace, endpoint.Name, clusterName) + err = cc.processEndpointUpdate(cachedService, endpoint, clusterName) + } else { + _, ok := endpointInterface.(cache.DeletedFinalStateUnknown) + if !ok { + return fmt.Errorf("Object contained wasn't a service or a deleted key: %+v", endpointInterface) + } + glog.Infof("Found tombstone for %v", key) + err = cc.processEndpointDeletion(cachedService, clusterName) + } + } else { + // service absence in store means watcher caught the deletion, ensure LB info is cleaned + glog.Infof("Can not get endpoint %v for cluster %s from endpointStore", key, clusterName) + err = cc.processEndpointDeletion(cachedService, clusterName) + } + if err != nil { + glog.Errorf("Failed to sync service: %+v, put back to service queue", err) + clusterCache.endpointQueue.Add(key) + } + cachedService.resetDNSUpdateDelay() + return nil +} + +func (cc *clusterClientCache) processEndpointDeletion(cachedService *cachedService, clusterName string) error { + glog.V(4).Infof("Processing endpoint update for %s/%s, cluster %s", cachedService.lastState.Namespace, cachedService.lastState.Name, clusterName) + var err error + cachedService.rwlock.Lock() + defer cachedService.rwlock.Unlock() + _, ok := cachedService.endpointMap[clusterName] + // TODO remove ok checking? if service controller is restarted, then endpointMap for the cluster does not exist + // need to query dns info from dnsprovider and make sure of if deletion is needed + if ok { + // endpoints lost, clean dns record + glog.V(4).Infof("Cached endpoint was not found for %s/%s, cluster %s, building one", cachedService.lastState.Namespace, cachedService.lastState.Name, clusterName) + // TODO: need to integrate with dns.go:ensureDNSRecords + for i := 0; i < clientRetryCount; i++ { + err := ensureDNSRecords(clusterName, cachedService) + if err == nil { + delete(cachedService.endpointMap, clusterName) + return nil + } + time.Sleep(cachedService.nextDNSUpdateDelay()) + } + } + return err +} + +// Update dns info when endpoint update event received +// We do not care about the endpoint info, what we need to make sure here is len(endpoints.subsets)>0 +func (cc *clusterClientCache) processEndpointUpdate(cachedService *cachedService, endpoint *api.Endpoints, clusterName string) error { + glog.V(4).Infof("Processing endpoint update for %s/%s, cluster %s", endpoint.Namespace, endpoint.Name, clusterName) + cachedService.rwlock.Lock() + defer cachedService.rwlock.Unlock() + for _, subset := range endpoint.Subsets { + if len(subset.Addresses) > 0 { + cachedService.endpointMap[clusterName] = 1 + } + } + _, ok := cachedService.endpointMap[clusterName] + if !ok { + // first time get endpoints, update dns record + glog.V(4).Infof("Cached endpoint was not found for %s/%s, cluster %s, building one", endpoint.Namespace, endpoint.Name, clusterName) + cachedService.endpointMap[clusterName] = 1 + err := ensureDNSRecords(clusterName, cachedService) + if err != nil { + // TODO: need to integrate with dns.go:ensureDNSRecords + for i := 0; i < clientRetryCount; i++ { + time.Sleep(cachedService.nextDNSUpdateDelay()) + err := ensureDNSRecords(clusterName, cachedService) + if err == nil { + return nil + } + } + return err + } + } + return nil +} + +// obj could be an *api.Endpoints, or a DeletionFinalStateUnknown marker item. +func (cc *clusterClientCache) enqueueEndpoint(obj interface{}, clusterName string) { + key, err := controller.KeyFunc(obj) + if err != nil { + glog.Errorf("Couldn't get key for object %+v: %v", obj, err) + return + } + _, ok := cc.clientMap[clusterName] + if ok { + cc.clientMap[clusterName].endpointQueue.Add(key) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/endpoint_helper_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/endpoint_helper_test.go new file mode 100644 index 000000000000..944030ce64fc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/endpoint_helper_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +func buildEndpoint(subsets [][]string) *api.Endpoints { + endpoint := &api.Endpoints{ + Subsets: []api.EndpointSubset{ + {Addresses: []api.EndpointAddress{}}, + }, + } + for _, element := range subsets { + address := api.EndpointAddress{IP: element[0], Hostname: element[1], TargetRef: nil} + endpoint.Subsets[0].Addresses = append(endpoint.Subsets[0].Addresses, address) + } + return endpoint +} + +func TestProcessEndpointUpdate(t *testing.T) { + cc := clusterClientCache{ + clientMap: make(map[string]*clusterCache), + } + tests := []struct { + name string + cachedService *cachedService + endpoint *api.Endpoints + clusterName string + expectResult int + }{ + { + "no-cache", + &cachedService{ + lastState: &api.Service{}, + endpointMap: make(map[string]int), + }, + buildEndpoint([][]string{{"ip1", ""}}), + "foo", + 1, + }, + { + "has-cache", + &cachedService{ + lastState: &api.Service{}, + endpointMap: map[string]int{ + "foo": 1, + }, + }, + buildEndpoint([][]string{{"ip1", ""}}), + "foo", + 1, + }, + } + for _, test := range tests { + cc.processEndpointUpdate(test.cachedService, test.endpoint, test.clusterName) + if test.expectResult != test.cachedService.endpointMap[test.clusterName] { + t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectResult, test.cachedService.endpointMap[test.clusterName]) + } + } +} + +func TestProcessEndpointDeletion(t *testing.T) { + cc := clusterClientCache{ + clientMap: make(map[string]*clusterCache), + } + tests := []struct { + name string + cachedService *cachedService + endpoint *api.Endpoints + clusterName string + expectResult int + }{ + { + "no-cache", + &cachedService{ + lastState: &api.Service{}, + endpointMap: make(map[string]int), + }, + buildEndpoint([][]string{{"ip1", ""}}), + "foo", + 0, + }, + { + "has-cache", + &cachedService{ + lastState: &api.Service{}, + endpointMap: map[string]int{ + "foo": 1, + }, + }, + buildEndpoint([][]string{{"ip1", ""}}), + "foo", + 0, + }, + } + for _, test := range tests { + cc.processEndpointDeletion(test.cachedService, test.clusterName) + if test.expectResult != test.cachedService.endpointMap[test.clusterName] { + t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectResult, test.cachedService.endpointMap[test.clusterName]) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/service_helper.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/service_helper.go new file mode 100644 index 000000000000..5a2c4fca3639 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/service_helper.go @@ -0,0 +1,253 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "fmt" + "time" + + federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + cache "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/controller" + + "github.com/golang/glog" + "reflect" + "sort" +) + +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncHandler is never invoked concurrently with the same key. +func (sc *ServiceController) clusterServiceWorker() { + fedClient := sc.federationClient + for clusterName, cache := range sc.clusterCache.clientMap { + go func(cache *clusterCache, clusterName string) { + for { + func() { + key, quit := cache.serviceQueue.Get() + defer cache.serviceQueue.Done(key) + if quit { + return + } + err := sc.clusterCache.syncService(key.(string), clusterName, cache, sc.serviceCache, fedClient) + if err != nil { + glog.Errorf("Failed to sync service: %+v", err) + } + }() + } + }(cache, clusterName) + } +} + +// Whenever there is change on service, the federation service should be updated +func (cc *clusterClientCache) syncService(key, clusterName string, clusterCache *clusterCache, serviceCache *serviceCache, fedClient federationclientset.Interface) error { + // obj holds the latest service info from apiserver, return if there is no federation cache for the service + cachedService, ok := serviceCache.get(key) + if !ok { + // if serviceCache does not exists, that means the service is not created by federation, we should skip it + return nil + } + serviceInterface, exists, err := clusterCache.serviceStore.GetByKey(key) + if err != nil { + glog.Infof("Did not successfully get %v from store: %v, will retry later", key, err) + clusterCache.serviceQueue.Add(key) + return err + } + var needUpdate bool + if exists { + service, ok := serviceInterface.(*api.Service) + if ok { + glog.V(4).Infof("Found service for federation service %s/%s from cluster %s", service.Namespace, service.Name, clusterName) + needUpdate = cc.processServiceUpdate(cachedService, service, clusterName) + } else { + _, ok := serviceInterface.(cache.DeletedFinalStateUnknown) + if !ok { + return fmt.Errorf("Object contained wasn't a service or a deleted key: %+v", serviceInterface) + } + glog.Infof("Found tombstone for %v", key) + needUpdate = cc.processServiceDeletion(cachedService, clusterName) + } + } else { + glog.Infof("Can not get service %v for cluster %s from serviceStore", key, clusterName) + needUpdate = cc.processServiceDeletion(cachedService, clusterName) + } + + if needUpdate { + err := cc.persistFedServiceUpdate(cachedService, fedClient) + if err == nil { + cachedService.appliedState = cachedService.lastState + cachedService.resetFedUpdateDelay() + } else { + if err != nil { + glog.Errorf("Failed to sync service: %+v, put back to service queue", err) + clusterCache.serviceQueue.Add(key) + } + } + } + return nil +} + +// processServiceDeletion is triggered when a service is delete from underlying k8s cluster +// the deletion function will wip out the cached ingress info of the service from federation service ingress +// the function returns a bool to indicate if actual update happend on federation service cache +// and if the federation service cache is updated, the updated info should be post to federation apiserver +func (cc *clusterClientCache) processServiceDeletion(cachedService *cachedService, clusterName string) bool { + cachedService.rwlock.Lock() + defer cachedService.rwlock.Unlock() + cachedStatus, ok := cachedService.serviceStatusMap[clusterName] + // cached status found, remove ingress info from federation service cache + if ok { + cachedFedServiceStatus := cachedService.lastState.Status.LoadBalancer + removeIndexes := []int{} + for i, fed := range cachedFedServiceStatus.Ingress { + for _, new := range cachedStatus.Ingress { + // remove if same ingress record found + if new.IP == fed.IP && new.Hostname == fed.Hostname { + removeIndexes = append(removeIndexes, i) + } + } + } + sort.Ints(removeIndexes) + for i := len(removeIndexes) - 1; i >= 0; i-- { + cachedFedServiceStatus.Ingress = append(cachedFedServiceStatus.Ingress[:removeIndexes[i]], cachedFedServiceStatus.Ingress[removeIndexes[i]+1:]...) + glog.V(4).Infof("Remove old ingress %d for service %s/%s", removeIndexes[i], cachedService.lastState.Namespace, cachedService.lastState.Name) + } + delete(cachedService.serviceStatusMap, clusterName) + delete(cachedService.endpointMap, clusterName) + cachedService.lastState.Status.LoadBalancer = cachedFedServiceStatus + return true + } else { + glog.V(4).Infof("Service removal %s/%s from cluster %s observed.", cachedService.lastState.Namespace, cachedService.lastState.Name, clusterName) + } + return false +} + +// processServiceUpdate Update ingress info when service updated +// the function returns a bool to indicate if actual update happend on federation service cache +// and if the federation service cache is updated, the updated info should be post to federation apiserver +func (cc *clusterClientCache) processServiceUpdate(cachedService *cachedService, service *api.Service, clusterName string) bool { + glog.V(4).Infof("Processing service update for %s/%s, cluster %s", service.Namespace, service.Name, clusterName) + cachedService.rwlock.Lock() + defer cachedService.rwlock.Unlock() + var needUpdate bool + newServiceLB := service.Status.LoadBalancer + cachedFedServiceStatus := cachedService.lastState.Status.LoadBalancer + if len(newServiceLB.Ingress) == 0 { + // not yet get LB IP + return false + } + + cachedStatus, ok := cachedService.serviceStatusMap[clusterName] + if ok { + if reflect.DeepEqual(cachedStatus, newServiceLB) { + glog.V(4).Infof("Same ingress info observed for service %s/%s: %+v ", service.Namespace, service.Name, cachedStatus.Ingress) + } else { + glog.V(4).Infof("Ingress info was changed for service %s/%s: cache: %+v, new: %+v ", + service.Namespace, service.Name, cachedStatus.Ingress, newServiceLB) + needUpdate = true + } + } else { + glog.V(4).Infof("Cached service status was not found for %s/%s, cluster %s, building one", service.Namespace, service.Name, clusterName) + + // cache is not always reliable(cache will be cleaned when service controller restart) + // two cases will run into this branch: + // 1. new service loadbalancer info received -> no info in cache, and no in federation service + // 2. service controller being restarted -> no info in cache, but it is in federation service + + // check if the lb info is already in federation service + + cachedService.serviceStatusMap[clusterName] = newServiceLB + needUpdate = false + // iterate service ingress info + for _, new := range newServiceLB.Ingress { + var found bool + // if it is known by federation service + for _, fed := range cachedFedServiceStatus.Ingress { + if new.IP == fed.IP && new.Hostname == fed.Hostname { + found = true + } + } + if !found { + needUpdate = true + break + } + } + } + + if needUpdate { + // new status = cached federation status - cached status + new status from k8s cluster + + removeIndexes := []int{} + for i, fed := range cachedFedServiceStatus.Ingress { + for _, new := range cachedStatus.Ingress { + // remove if same ingress record found + if new.IP == fed.IP && new.Hostname == fed.Hostname { + removeIndexes = append(removeIndexes, i) + } + } + } + sort.Ints(removeIndexes) + for i := len(removeIndexes) - 1; i >= 0; i-- { + cachedFedServiceStatus.Ingress = append(cachedFedServiceStatus.Ingress[:removeIndexes[i]], cachedFedServiceStatus.Ingress[removeIndexes[i]+1:]...) + } + cachedFedServiceStatus.Ingress = append(cachedFedServiceStatus.Ingress, service.Status.LoadBalancer.Ingress...) + cachedService.lastState.Status.LoadBalancer = cachedFedServiceStatus + glog.V(4).Infof("Add new ingress info %+v for service %s/%s", service.Status.LoadBalancer, service.Namespace, service.Name) + } else { + glog.V(4).Infof("Same ingress info found for %s/%s, cluster %s", service.Namespace, service.Name, clusterName) + } + return needUpdate +} + +func (cc *clusterClientCache) persistFedServiceUpdate(cachedService *cachedService, fedClient federationclientset.Interface) error { + service := cachedService.lastState + glog.V(5).Infof("Persist federation service status %s/%s", service.Namespace, service.Name) + var err error + for i := 0; i < clientRetryCount; i++ { + _, err := fedClient.Core().Services(service.Namespace).UpdateStatus(service) + if err == nil { + glog.V(2).Infof("Successfully update service %s/%s to federation apiserver", service.Namespace, service.Name) + return nil + } + if errors.IsNotFound(err) { + glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v", + service.Namespace, service.Name, err) + return nil + } + if errors.IsConflict(err) { + glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v", + service.Namespace, service.Name, err) + return err + } + time.Sleep(cachedService.nextFedUpdateDelay()) + } + return err +} + +// obj could be an *api.Service, or a DeletionFinalStateUnknown marker item. +func (cc *clusterClientCache) enqueueService(obj interface{}, clusterName string) { + key, err := controller.KeyFunc(obj) + if err != nil { + glog.Errorf("Couldn't get key for object %+v: %v", obj, err) + return + } + _, ok := cc.clientMap[clusterName] + if ok { + cc.clientMap[clusterName].serviceQueue.Add(key) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/service_helper_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/service_helper_test.go new file mode 100644 index 000000000000..09767d3f4750 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/service_helper_test.go @@ -0,0 +1,162 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +func buildServiceStatus(ingresses [][]string) api.LoadBalancerStatus { + status := api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{}, + } + for _, element := range ingresses { + ingress := api.LoadBalancerIngress{IP: element[0], Hostname: element[1]} + status.Ingress = append(status.Ingress, ingress) + } + return status +} + +func TestProcessServiceUpdate(t *testing.T) { + cc := clusterClientCache{ + clientMap: make(map[string]*clusterCache), + } + tests := []struct { + name string + cachedService *cachedService + service *api.Service + clusterName string + expectNeedUpdate bool + expectStatus api.LoadBalancerStatus + }{ + { + "no-cache", + &cachedService{ + lastState: &api.Service{}, + serviceStatusMap: make(map[string]api.LoadBalancerStatus), + }, + &api.Service{Status: api.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}}, + "foo", + true, + buildServiceStatus([][]string{{"ip1", ""}}), + }, + { + "same-ingress", + &cachedService{ + lastState: &api.Service{Status: api.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}}, + serviceStatusMap: map[string]api.LoadBalancerStatus{ + "foo1": {Ingress: []api.LoadBalancerIngress{{"ip1", ""}}}, + }, + }, + &api.Service{Status: api.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}}, + "foo1", + false, + buildServiceStatus([][]string{{"ip1", ""}}), + }, + { + "diff-cluster", + &cachedService{ + lastState: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "bar1"}, + }, + serviceStatusMap: map[string]api.LoadBalancerStatus{ + "foo2": {Ingress: []api.LoadBalancerIngress{{"ip1", ""}}}, + }, + }, + &api.Service{Status: api.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}}, + "foo1", + true, + buildServiceStatus([][]string{{"ip1", ""}}), + }, + { + "diff-ingress", + &cachedService{ + lastState: &api.Service{Status: api.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip4", ""}, {"ip1", ""}, {"ip2", ""}})}}, + serviceStatusMap: map[string]api.LoadBalancerStatus{ + "foo1": buildServiceStatus([][]string{{"ip4", ""}, {"ip1", ""}, {"ip2", ""}}), + }, + }, + &api.Service{Status: api.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip2", ""}, {"ip3", ""}, {"ip5", ""}})}}, + "foo1", + true, + buildServiceStatus([][]string{{"ip2", ""}, {"ip3", ""}, {"ip5", ""}}), + }, + } + for _, test := range tests { + result := cc.processServiceUpdate(test.cachedService, test.service, test.clusterName) + if test.expectNeedUpdate != result { + t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectNeedUpdate, result) + } + if !reflect.DeepEqual(test.expectStatus, test.cachedService.lastState.Status.LoadBalancer) { + t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectStatus, test.cachedService.lastState.Status.LoadBalancer) + } + } +} + +func TestProcessServiceDeletion(t *testing.T) { + cc := clusterClientCache{ + clientMap: make(map[string]*clusterCache), + } + tests := []struct { + name string + cachedService *cachedService + service *api.Service + clusterName string + expectNeedUpdate bool + expectStatus api.LoadBalancerStatus + }{ + { + "same-ingress", + &cachedService{ + lastState: &api.Service{Status: api.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}}, + serviceStatusMap: map[string]api.LoadBalancerStatus{ + "foo1": {Ingress: []api.LoadBalancerIngress{{"ip1", ""}}}, + }, + }, + &api.Service{Status: api.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}})}}, + "foo1", + true, + buildServiceStatus([][]string{}), + }, + { + "diff-ingress", + &cachedService{ + lastState: &api.Service{Status: api.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip4", ""}, {"ip1", ""}, {"ip2", ""}, {"ip3", ""}, {"ip5", ""}, {"ip6", ""}, {"ip8", ""}})}}, + serviceStatusMap: map[string]api.LoadBalancerStatus{ + "foo1": buildServiceStatus([][]string{{"ip1", ""}, {"ip2", ""}, {"ip3", ""}}), + "foo2": buildServiceStatus([][]string{{"ip5", ""}, {"ip6", ""}, {"ip8", ""}}), + }, + }, + &api.Service{Status: api.ServiceStatus{LoadBalancer: buildServiceStatus([][]string{{"ip1", ""}, {"ip2", ""}, {"ip3", ""}})}}, + "foo1", + true, + buildServiceStatus([][]string{{"ip4", ""}, {"ip5", ""}, {"ip6", ""}, {"ip8", ""}}), + }, + } + for _, test := range tests { + result := cc.processServiceDeletion(test.cachedService, test.clusterName) + if test.expectNeedUpdate != result { + t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectNeedUpdate, result) + } + if !reflect.DeepEqual(test.expectStatus, test.cachedService.lastState.Status.LoadBalancer) { + t.Errorf("Test failed for %s, expected %+v, saw %+v", test.name, test.expectStatus, test.cachedService.lastState.Status.LoadBalancer) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/servicecontroller.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/servicecontroller.go new file mode 100644 index 000000000000..3bd2562ff758 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/servicecontroller.go @@ -0,0 +1,874 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "fmt" + "sync" + "time" + + "reflect" + + "github.com/golang/glog" + federation "k8s.io/kubernetes/federation/apis/federation" + federationcache "k8s.io/kubernetes/federation/client/cache" + federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" + "k8s.io/kubernetes/federation/pkg/dnsprovider" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + cache "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/controller/framework" + pkg_runtime "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/util/workqueue" + "k8s.io/kubernetes/pkg/watch" + + "k8s.io/kubernetes/pkg/conversion" +) + +const ( + // TODO update to 10 mins before merge + serviceSyncPeriod = 30 * time.Second + clusterSyncPeriod = 100 * time.Second + + // How long to wait before retrying the processing of a service change. + // If this changes, the sleep in hack/jenkins/e2e.sh before downing a cluster + // should be changed appropriately. + minRetryDelay = 5 * time.Second + maxRetryDelay = 300 * time.Second + + // client retry count and interval is when accessing a remote kube-apiserver or federation apiserver + // how many times should be attempted and how long it should sleep when failure occurs + // the retry should be in short time so no exponential backoff + clientRetryCount = 5 + + retryable = true + + doNotRetry = time.Duration(0) + + UserAgentName = "federation-service-controller" + KubeAPIQPS = 20.0 + KubeAPIBurst = 30 +) + +type cachedService struct { + lastState *api.Service + // The state as successfully applied to the DNS server + appliedState *api.Service + // cluster endpoint map hold subset info from kubernetes clusters + // key clusterName + // value is a flag that if there is ready address, 1 means there is ready address, 0 means no ready address + endpointMap map[string]int + // cluster service map hold serivice status info from kubernetes clusters + // key clusterName + + serviceStatusMap map[string]api.LoadBalancerStatus + + // Ensures only one goroutine can operate on this service at any given time. + rwlock sync.Mutex + + // Controls error back-off for procceeding federation service to k8s clusters + lastRetryDelay time.Duration + // Controls error back-off for updating federation service back to federation apiserver + lastFedUpdateDelay time.Duration + // Controls error back-off for dns record update + lastDNSUpdateDelay time.Duration +} + +type serviceCache struct { + rwlock sync.Mutex // protects serviceMap + // federation service map contains all service received from federation apiserver + // key serviceName + fedServiceMap map[string]*cachedService +} + +type ServiceController struct { + dns dnsprovider.Interface + federationClient federationclientset.Interface + zones []dnsprovider.Zone + serviceCache *serviceCache + clusterCache *clusterClientCache + // A store of services, populated by the serviceController + serviceStore cache.StoreToServiceLister + // Watches changes to all services + serviceController *framework.Controller + // A store of services, populated by the serviceController + clusterStore federationcache.StoreToClusterLister + // Watches changes to all services + clusterController *framework.Controller + eventBroadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + // services that need to be synced + queue *workqueue.Type + knownClusterSet sets.String +} + +// New returns a new service controller to keep DNS provider service resources +// (like Kubernetes Services and DNS server records for service discovery) in sync with the registry. + +func New(federationClient federationclientset.Interface, dns dnsprovider.Interface) *ServiceController { + broadcaster := record.NewBroadcaster() + // federationClient event is not supported yet + // broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) + recorder := broadcaster.NewRecorder(api.EventSource{Component: UserAgentName}) + + s := &ServiceController{ + dns: dns, + federationClient: federationClient, + serviceCache: &serviceCache{fedServiceMap: make(map[string]*cachedService)}, + clusterCache: &clusterClientCache{ + rwlock: sync.Mutex{}, + clientMap: make(map[string]*clusterCache), + }, + eventBroadcaster: broadcaster, + eventRecorder: recorder, + queue: workqueue.New(), + knownClusterSet: make(sets.String), + } + s.serviceStore.Store, s.serviceController = framework.NewInformer( + &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { + return s.federationClient.Core().Services(api.NamespaceAll).List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return s.federationClient.Core().Services(api.NamespaceAll).Watch(options) + }, + }, + &api.Service{}, + serviceSyncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: s.enqueueService, + UpdateFunc: func(old, cur interface{}) { + // there is case that old and new are equals but we still catch the event now. + if !reflect.DeepEqual(old, cur) { + s.enqueueService(cur) + } + }, + DeleteFunc: s.enqueueService, + }, + ) + s.clusterStore.Store, s.clusterController = framework.NewInformer( + &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { + return s.federationClient.Federation().Clusters().List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return s.federationClient.Federation().Clusters().Watch(options) + }, + }, + &federation.Cluster{}, + clusterSyncPeriod, + framework.ResourceEventHandlerFuncs{ + DeleteFunc: s.clusterCache.delFromClusterSet, + AddFunc: s.clusterCache.addToClientMap, + UpdateFunc: func(old, cur interface{}) { + oldCluster, ok := old.(*federation.Cluster) + if !ok { + return + } + curCluster, ok := cur.(*federation.Cluster) + if !ok { + return + } + if !reflect.DeepEqual(oldCluster.Spec, curCluster.Spec) { + // update when spec is changed + s.clusterCache.addToClientMap(cur) + } + + pred := getClusterConditionPredicate() + // only update when condition changed to ready from not-ready + if !pred(*oldCluster) && pred(*curCluster) { + s.clusterCache.addToClientMap(cur) + } + // did not handle ready -> not-ready + // how could we stop a controller? + }, + }, + ) + return s +} + +// obj could be an *api.Service, or a DeletionFinalStateUnknown marker item. +func (s *ServiceController) enqueueService(obj interface{}) { + key, err := controller.KeyFunc(obj) + if err != nil { + glog.Errorf("Couldn't get key for object %+v: %v", obj, err) + return + } + s.queue.Add(key) +} + +// Run starts a background goroutine that watches for changes to federation services +// and ensures that they have Kubernetes services created, updated or deleted appropriately. +// federationSyncPeriod controls how often we check the federation's services to +// ensure that the correct Kubernetes services (and associated DNS entries) exist. +// This is only necessary to fudge over failed watches. +// clusterSyncPeriod controls how often we check the federation's underlying clusters and +// their Kubernetes services to ensure that matching services created independently of the Federation +// (e.g. directly via the underlying cluster's API) are correctly accounted for. + +// It's an error to call Run() more than once for a given ServiceController +// object. +func (s *ServiceController) Run(workers int, stopCh <-chan struct{}) error { + defer runtime.HandleCrash() + go s.serviceController.Run(stopCh) + go s.clusterController.Run(stopCh) + for i := 0; i < workers; i++ { + go wait.Until(s.fedServiceWorker, time.Second, stopCh) + } + go wait.Until(s.clusterEndpointWorker, time.Second, stopCh) + go wait.Until(s.clusterServiceWorker, time.Second, stopCh) + go wait.Until(s.clusterSyncLoop, clusterSyncPeriod, stopCh) + <-stopCh + glog.Infof("Shutting down Federation Service Controller") + s.queue.ShutDown() + return nil +} + +// fedServiceWorker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncService is never invoked concurrently with the same key. +func (s *ServiceController) fedServiceWorker() { + for { + func() { + key, quit := s.queue.Get() + if quit { + return + } + + defer s.queue.Done(key) + err := s.syncService(key.(string)) + if err != nil { + glog.Errorf("Error syncing service: %v", err) + } + }() + } +} + +func wantsDNSRecords(service *api.Service) bool { + return service.Spec.Type == api.ServiceTypeLoadBalancer +} + +// processServiceForCluster creates or updates service to all registered running clusters, +// update DNS records and update the service info with DNS entries to federation apiserver. +// the function returns any error caught +func (s *ServiceController) processServiceForCluster(cachedService *cachedService, clusterName string, service *api.Service, client *clientset.Clientset) error { + glog.V(4).Infof("Process service %s/%s for cluster %s", service.Namespace, service.Name, clusterName) + // Create or Update k8s Service + err := s.ensureClusterService(cachedService, clusterName, service, client) + if err != nil { + glog.V(4).Infof("Failed to process service %s/%s for cluster %s", service.Namespace, service.Name, clusterName) + return err + } + glog.V(4).Infof("Successfully process service %s/%s for cluster %s", service.Namespace, service.Name, clusterName) + return nil +} + +// updateFederationService Returns whatever error occurred along with a boolean indicator of whether it +// should be retried. +func (s *ServiceController) updateFederationService(key string, cachedService *cachedService) (error, bool) { + // Clone federation service, and create them in underlying k8s cluster + clone, err := conversion.NewCloner().DeepCopy(cachedService.lastState) + if err != nil { + return err, !retryable + } + service, ok := clone.(*api.Service) + if !ok { + return fmt.Errorf("Unexpected service cast error : %v\n", service), !retryable + } + + // handle available clusters one by one + var hasErr bool + for clusterName, cache := range s.clusterCache.clientMap { + go func(cache *clusterCache, clusterName string) { + err = s.processServiceForCluster(cachedService, clusterName, service, cache.clientset) + if err != nil { + hasErr = true + } + }(cache, clusterName) + } + if hasErr { + // detail error has been dumpped inside the loop + return fmt.Errorf("Service %s/%s was not successfully updated to all clusters", service.Namespace, service.Name), retryable + } + return nil, !retryable +} + +func (s *ServiceController) deleteFederationService(cachedService *cachedService) (error, bool) { + // handle available clusters one by one + var hasErr bool + for clusterName, cluster := range s.clusterCache.clientMap { + err := s.deleteClusterService(clusterName, cachedService, cluster.clientset) + if err != nil { + hasErr = true + } + } + if hasErr { + // detail error has been dumpped inside the loop + return fmt.Errorf("Service %s/%s was not successfully updated to all clusters", cachedService.lastState.Namespace, cachedService.lastState.Name), retryable + } + return nil, !retryable +} + +func (s *ServiceController) deleteClusterService(clusterName string, cachedService *cachedService, clientset *clientset.Clientset) error { + service := cachedService.lastState + glog.V(4).Infof("Deleting service %s/%s from cluster %s", service.Namespace, service.Name, clusterName) + var err error + for i := 0; i < clientRetryCount; i++ { + err = clientset.Core().Services(service.Namespace).Delete(service.Name, &api.DeleteOptions{}) + if err == nil || errors.IsNotFound(err) { + glog.V(4).Infof("Service %s/%s deleted from cluster %s", service.Namespace, service.Name, clusterName) + return nil + } + time.Sleep(cachedService.nextRetryDelay()) + } + glog.V(4).Infof("Failed to delete service %s/%s from cluster %s, %+v", service.Namespace, service.Name, clusterName, err) + return err +} + +func (s *ServiceController) ensureClusterService(cachedService *cachedService, clusterName string, service *api.Service, client *clientset.Clientset) error { + var err error + var needUpdate bool + for i := 0; i < clientRetryCount; i++ { + svc, err := client.Core().Services(service.Namespace).Get(service.Name) + if err == nil { + // service exists + glog.V(5).Infof("Found service %s/%s from cluster %s", service.Namespace, service.Name, clusterName) + //reserve immutable fields + service.Spec.ClusterIP = svc.Spec.ClusterIP + + //reserve auto assigned field + for i, oldPort := range svc.Spec.Ports { + for _, port := range service.Spec.Ports { + if port.NodePort == 0 { + if !portEqualExcludeNodePort(&oldPort, &port) { + svc.Spec.Ports[i] = port + needUpdate = true + } + } else { + if !portEqualForLB(&oldPort, &port) { + svc.Spec.Ports[i] = port + needUpdate = true + } + } + } + } + + if needUpdate { + // we only apply spec update + svc.Spec = service.Spec + _, err = client.Core().Services(svc.Namespace).Update(svc) + if err == nil { + glog.V(5).Infof("Service %s/%s successfully updated to cluster %s", svc.Namespace, svc.Name, clusterName) + return nil + } else { + glog.V(4).Infof("Failed to update %+v", err) + } + } else { + glog.V(5).Infof("Service %s/%s is not updated to cluster %s as the spec are identical", svc.Namespace, svc.Name, clusterName) + return nil + } + } else if errors.IsNotFound(err) { + // Create service if it is not found + glog.Infof("Service '%s/%s' is not found in cluster %s, trying to create new", + service.Namespace, service.Name, clusterName) + service.ResourceVersion = "" + _, err = client.Core().Services(service.Namespace).Create(service) + if err == nil { + glog.V(5).Infof("Service %s/%s successfully created to cluster %s", service.Namespace, service.Name, clusterName) + return nil + } + glog.V(4).Infof("Failed to create %+v", err) + if errors.IsAlreadyExists(err) { + glog.V(5).Infof("service %s/%s already exists in cluster %s", service.Namespace, service.Name, clusterName) + return nil + } + } + if errors.IsConflict(err) { + glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v", + service.Namespace, service.Name, err) + } + // should we reuse same retry delay for all clusters? + time.Sleep(cachedService.nextRetryDelay()) + } + return err +} + +func (s *serviceCache) allServices() []*cachedService { + s.rwlock.Lock() + defer s.rwlock.Unlock() + services := make([]*cachedService, 0, len(s.fedServiceMap)) + for _, v := range s.fedServiceMap { + services = append(services, v) + } + return services +} + +func (s *serviceCache) get(serviceName string) (*cachedService, bool) { + s.rwlock.Lock() + defer s.rwlock.Unlock() + service, ok := s.fedServiceMap[serviceName] + return service, ok +} + +func (s *serviceCache) getOrCreate(serviceName string) *cachedService { + s.rwlock.Lock() + defer s.rwlock.Unlock() + service, ok := s.fedServiceMap[serviceName] + if !ok { + service = &cachedService{ + endpointMap: make(map[string]int), + serviceStatusMap: make(map[string]api.LoadBalancerStatus), + } + s.fedServiceMap[serviceName] = service + } + return service +} + +func (s *serviceCache) set(serviceName string, service *cachedService) { + s.rwlock.Lock() + defer s.rwlock.Unlock() + s.fedServiceMap[serviceName] = service +} + +func (s *serviceCache) delete(serviceName string) { + s.rwlock.Lock() + defer s.rwlock.Unlock() + delete(s.fedServiceMap, serviceName) +} + +// needsUpdateDNS check if the dns records of the given service should be updated +func (s *ServiceController) needsUpdateDNS(oldService *api.Service, newService *api.Service) bool { + if !wantsDNSRecords(oldService) && !wantsDNSRecords(newService) { + return false + } + if wantsDNSRecords(oldService) != wantsDNSRecords(newService) { + s.eventRecorder.Eventf(newService, api.EventTypeNormal, "Type", "%v -> %v", + oldService.Spec.Type, newService.Spec.Type) + return true + } + if !portsEqualForLB(oldService, newService) || oldService.Spec.SessionAffinity != newService.Spec.SessionAffinity { + return true + } + if !LoadBalancerIPsAreEqual(oldService, newService) { + s.eventRecorder.Eventf(newService, api.EventTypeNormal, "LoadbalancerIP", "%v -> %v", + oldService.Spec.LoadBalancerIP, newService.Spec.LoadBalancerIP) + return true + } + if len(oldService.Spec.ExternalIPs) != len(newService.Spec.ExternalIPs) { + s.eventRecorder.Eventf(newService, api.EventTypeNormal, "ExternalIP", "Count: %v -> %v", + len(oldService.Spec.ExternalIPs), len(newService.Spec.ExternalIPs)) + return true + } + for i := range oldService.Spec.ExternalIPs { + if oldService.Spec.ExternalIPs[i] != newService.Spec.ExternalIPs[i] { + s.eventRecorder.Eventf(newService, api.EventTypeNormal, "ExternalIP", "Added: %v", + newService.Spec.ExternalIPs[i]) + return true + } + } + if !reflect.DeepEqual(oldService.Annotations, newService.Annotations) { + return true + } + if oldService.UID != newService.UID { + s.eventRecorder.Eventf(newService, api.EventTypeNormal, "UID", "%v -> %v", + oldService.UID, newService.UID) + return true + } + + return false +} + +func getPortsForLB(service *api.Service) ([]*api.ServicePort, error) { + // TODO: quinton: Probably applies for DNS SVC records. Come back to this. + //var protocol api.Protocol + + ports := []*api.ServicePort{} + for i := range service.Spec.Ports { + sp := &service.Spec.Ports[i] + // The check on protocol was removed here. The DNS provider itself is now responsible for all protocol validation + ports = append(ports, sp) + } + return ports, nil +} + +func portsEqualForLB(x, y *api.Service) bool { + xPorts, err := getPortsForLB(x) + if err != nil { + return false + } + yPorts, err := getPortsForLB(y) + if err != nil { + return false + } + return portSlicesEqualForLB(xPorts, yPorts) +} + +func portSlicesEqualForLB(x, y []*api.ServicePort) bool { + if len(x) != len(y) { + return false + } + + for i := range x { + if !portEqualForLB(x[i], y[i]) { + return false + } + } + return true +} + +func portEqualForLB(x, y *api.ServicePort) bool { + // TODO: Should we check name? (In theory, an LB could expose it) + if x.Name != y.Name { + return false + } + + if x.Protocol != y.Protocol { + return false + } + + if x.Port != y.Port { + return false + } + if x.NodePort != y.NodePort { + return false + } + return true +} + +func portEqualExcludeNodePort(x, y *api.ServicePort) bool { + // TODO: Should we check name? (In theory, an LB could expose it) + if x.Name != y.Name { + return false + } + + if x.Protocol != y.Protocol { + return false + } + + if x.Port != y.Port { + return false + } + return true +} + +func clustersFromList(list *federation.ClusterList) []string { + result := []string{} + for ix := range list.Items { + result = append(result, list.Items[ix].Name) + } + return result +} + +// getClusterConditionPredicate filter all clusters meet condition of +// condition.type=Ready and condition.status=true +func getClusterConditionPredicate() federationcache.ClusterConditionPredicate { + return func(cluster federation.Cluster) bool { + // If we have no info, don't accept + if len(cluster.Status.Conditions) == 0 { + return false + } + for _, cond := range cluster.Status.Conditions { + //We consider the cluster for load balancing only when its ClusterReady condition status + //is ConditionTrue + if cond.Type == federation.ClusterReady && cond.Status != api.ConditionTrue { + glog.V(4).Infof("Ignoring cluser %v with %v condition status %v", cluster.Name, cond.Type, cond.Status) + return false + } + } + return true + } +} + +// clusterSyncLoop observes running clusters changes, and apply all services to new added cluster +// and add dns records for the changes +func (s *ServiceController) clusterSyncLoop() { + var servicesToUpdate []*cachedService + // should we remove cache for cluster from ready to not ready? should remove the condition predicate if no + clusters, err := s.clusterStore.ClusterCondition(getClusterConditionPredicate()).List() + if err != nil { + glog.Infof("Fail to get cluster list") + return + } + newClusters := clustersFromList(&clusters) + var newSet, increase sets.String + newSet = sets.NewString(newClusters...) + if newSet.Equal(s.knownClusterSet) { + // The set of cluster names in the services in the federation hasn't changed, but we can retry + // updating any services that we failed to update last time around. + servicesToUpdate = s.updateDNSRecords(servicesToUpdate, newClusters) + return + } + glog.Infof("Detected change in list of cluster names. New set: %v, Old set: %v", newSet, s.knownClusterSet) + increase = newSet.Difference(s.knownClusterSet) + // do nothing when cluster is removed. + if increase != nil { + for newCluster := range increase { + glog.Infof("New cluster observed %s", newCluster) + s.updateAllServicesToCluster(servicesToUpdate, newCluster) + } + // Try updating all services, and save the ones that fail to try again next + // round. + servicesToUpdate = s.serviceCache.allServices() + numServices := len(servicesToUpdate) + servicesToUpdate = s.updateDNSRecords(servicesToUpdate, newClusters) + glog.Infof("Successfully updated %d out of %d DNS records to direct traffic to the updated cluster", + numServices-len(servicesToUpdate), numServices) + } + s.knownClusterSet = newSet +} + +func (s *ServiceController) updateAllServicesToCluster(services []*cachedService, clusterName string) { + cluster, ok := s.clusterCache.clientMap[clusterName] + if ok { + for _, cachedService := range services { + appliedState := cachedService.appliedState + s.processServiceForCluster(cachedService, clusterName, appliedState, cluster.clientset) + } + } +} + +func (s *ServiceController) removeAllServicesFromCluster(services []*cachedService, clusterName string) { + client, ok := s.clusterCache.clientMap[clusterName] + if ok { + for _, cachedService := range services { + s.deleteClusterService(clusterName, cachedService, client.clientset) + } + glog.Infof("Synced all services to cluster %s", clusterName) + } +} + +// updateDNSRecords updates all existing federation service DNS Records so that +// they will match the list of cluster names provided. +// Returns the list of services that couldn't be updated. +func (s *ServiceController) updateDNSRecords(services []*cachedService, clusters []string) (servicesToRetry []*cachedService) { + for _, service := range services { + func() { + service.rwlock.Lock() + defer service.rwlock.Unlock() + // If the applied state is nil, that means it hasn't yet been successfully dealt + // with by the DNS Record reconciler. We can trust the DNS Record + // reconciler to ensure the federation service's DNS records are created to target + // the correct backend service IP's + if service.appliedState == nil { + return + } + if err := s.lockedUpdateDNSRecords(service, clusters); err != nil { + glog.Errorf("External error while updating DNS Records: %v.", err) + servicesToRetry = append(servicesToRetry, service) + } + }() + } + return servicesToRetry +} + +// lockedUpdateDNSRecords Updates the DNS records of a service, assuming we hold the mutex +// associated with the service. +// TODO: quinton: Still screwed up in the same way as above. Fix. +func (s *ServiceController) lockedUpdateDNSRecords(service *cachedService, clusterNames []string) error { + if !wantsDNSRecords(service.appliedState) { + return nil + } + for key := range s.clusterCache.clientMap { + for _, clusterName := range clusterNames { + if key == clusterName { + ensureDNSRecords(clusterName, service) + } + } + } + return nil +} + +func LoadBalancerIPsAreEqual(oldService, newService *api.Service) bool { + return oldService.Spec.LoadBalancerIP == newService.Spec.LoadBalancerIP +} + +// Computes the next retry, using exponential backoff +// mutex must be held. +func (s *cachedService) nextRetryDelay() time.Duration { + s.lastRetryDelay = s.lastRetryDelay * 2 + if s.lastRetryDelay < minRetryDelay { + s.lastRetryDelay = minRetryDelay + } + if s.lastRetryDelay > maxRetryDelay { + s.lastRetryDelay = maxRetryDelay + } + return s.lastRetryDelay +} + +// resetRetryDelay Resets the retry exponential backoff. mutex must be held. +func (s *cachedService) resetRetryDelay() { + s.lastRetryDelay = time.Duration(0) +} + +// Computes the next retry, using exponential backoff +// mutex must be held. +func (s *cachedService) nextFedUpdateDelay() time.Duration { + s.lastFedUpdateDelay = s.lastFedUpdateDelay * 2 + if s.lastFedUpdateDelay < minRetryDelay { + s.lastFedUpdateDelay = minRetryDelay + } + if s.lastFedUpdateDelay > maxRetryDelay { + s.lastFedUpdateDelay = maxRetryDelay + } + return s.lastFedUpdateDelay +} + +// resetRetryDelay Resets the retry exponential backoff. mutex must be held. +func (s *cachedService) resetFedUpdateDelay() { + s.lastFedUpdateDelay = time.Duration(0) +} + +// Computes the next retry, using exponential backoff +// mutex must be held. +func (s *cachedService) nextDNSUpdateDelay() time.Duration { + s.lastDNSUpdateDelay = s.lastDNSUpdateDelay * 2 + if s.lastDNSUpdateDelay < minRetryDelay { + s.lastDNSUpdateDelay = minRetryDelay + } + if s.lastDNSUpdateDelay > maxRetryDelay { + s.lastDNSUpdateDelay = maxRetryDelay + } + return s.lastDNSUpdateDelay +} + +// resetRetryDelay Resets the retry exponential backoff. mutex must be held. +func (s *cachedService) resetDNSUpdateDelay() { + s.lastDNSUpdateDelay = time.Duration(0) +} + +// syncService will sync the Service with the given key if it has had its expectations fulfilled, +// meaning it did not expect to see any more of its pods created or deleted. This function is not meant to be +// invoked concurrently with the same key. +func (s *ServiceController) syncService(key string) error { + startTime := time.Now() + var cachedService *cachedService + var retryDelay time.Duration + defer func() { + glog.V(4).Infof("Finished syncing service %q (%v)", key, time.Now().Sub(startTime)) + }() + // obj holds the latest service info from apiserver + obj, exists, err := s.serviceStore.Store.GetByKey(key) + if err != nil { + glog.Infof("Unable to retrieve service %v from store: %v", key, err) + s.queue.Add(key) + return err + } + + if !exists { + // service absence in store means watcher caught the deletion, ensure LB info is cleaned + glog.Infof("Service has been deleted %v", key) + err, retryDelay = s.processServiceDeletion(key) + } + + if exists { + service, ok := obj.(*api.Service) + if ok { + cachedService = s.serviceCache.getOrCreate(key) + err, retryDelay = s.processServiceUpdate(cachedService, service, key) + } else { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + return fmt.Errorf("Object contained wasn't a service or a deleted key: %+v", obj) + } + glog.Infof("Found tombstone for %v", key) + err, retryDelay = s.processServiceDeletion(tombstone.Key) + } + } + + if retryDelay != 0 { + s.enqueueService(obj) + } else if err != nil { + runtime.HandleError(fmt.Errorf("Failed to process service. Not retrying: %v", err)) + } + return nil +} + +// processServiceUpdate returns an error if processing the service update failed, along with a time.Duration +// indicating whether processing should be retried; zero means no-retry; otherwise +// we should retry in that Duration. +func (s *ServiceController) processServiceUpdate(cachedService *cachedService, service *api.Service, key string) (error, time.Duration) { + // Ensure that no other goroutine will interfere with our processing of the + // service. + cachedService.rwlock.Lock() + defer cachedService.rwlock.Unlock() + + // Update the cached service (used above for populating synthetic deletes) + // alway trust service, which is retrieve from serviceStore, which keeps the latest service info getting from apiserver + // if the same service is changed before this go routine finished, there will be another queue entry to handle that. + cachedService.lastState = service + err, retry := s.updateFederationService(key, cachedService) + if err != nil { + message := "Error occurs when updating service to all clusters" + if retry { + message += " (will retry): " + } else { + message += " (will not retry): " + } + message += err.Error() + s.eventRecorder.Event(service, api.EventTypeWarning, "UpdateServiceFail", message) + return err, cachedService.nextRetryDelay() + } + // Always update the cache upon success. + // NOTE: Since we update the cached service if and only if we successfully + // processed it, a cached service being nil implies that it hasn't yet + // been successfully processed. + + cachedService.appliedState = service + s.serviceCache.set(key, cachedService) + glog.V(4).Infof("Successfully procceeded services %s", key) + cachedService.resetRetryDelay() + return nil, doNotRetry +} + +// processServiceDeletion returns an error if processing the service deletion failed, along with a time.Duration +// indicating whether processing should be retried; zero means no-retry; otherwise +// we should retry in that Duration. +func (s *ServiceController) processServiceDeletion(key string) (error, time.Duration) { + glog.V(2).Infof("Process service deletion for %v", key) + cachedService, ok := s.serviceCache.get(key) + if !ok { + return fmt.Errorf("Service %s not in cache even though the watcher thought it was. Ignoring the deletion.", key), doNotRetry + } + service := cachedService.lastState + cachedService.rwlock.Lock() + defer cachedService.rwlock.Unlock() + s.eventRecorder.Event(service, api.EventTypeNormal, "DeletingDNSRecord", "Deleting DNS Records") + // TODO should we delete dns info here or wait for endpoint changes? prefer here + // or we do nothing for service deletion + //err := s.dns.balancer.EnsureLoadBalancerDeleted(service) + err, retry := s.deleteFederationService(cachedService) + if err != nil { + message := "Error occurs when deleting federation service" + if retry { + message += " (will retry): " + } else { + message += " (will not retry): " + } + s.eventRecorder.Event(service, api.EventTypeWarning, "DeletingDNSRecordFailed", message) + return err, cachedService.nextRetryDelay() + } + s.eventRecorder.Event(service, api.EventTypeNormal, "DeletedDNSRecord", "Deleted DNS Records") + s.serviceCache.delete(key) + + cachedService.resetRetryDelay() + return nil, doNotRetry +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/servicecontroller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/servicecontroller_test.go new file mode 100644 index 000000000000..e5db2fb9b6ec --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/pkg/federation-controller/service/servicecontroller_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "testing" + + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/pkg/api" +) + +func TestGetClusterConditionPredicate(t *testing.T) { + tests := []struct { + cluster federation.Cluster + expectAccept bool + name string + }{ + { + cluster: federation.Cluster{}, + expectAccept: false, + name: "empty", + }, + { + cluster: federation.Cluster{ + Status: federation.ClusterStatus{ + Conditions: []federation.ClusterCondition{ + {Type: federation.ClusterReady, Status: api.ConditionTrue}, + }, + }, + }, + expectAccept: true, + name: "basic", + }, + { + cluster: federation.Cluster{ + Status: federation.ClusterStatus{ + Conditions: []federation.ClusterCondition{ + {Type: federation.ClusterReady, Status: api.ConditionFalse}, + }, + }, + }, + expectAccept: false, + name: "notready", + }, + } + pred := getClusterConditionPredicate() + for _, test := range tests { + accept := pred(test.cluster) + if accept != test.expectAccept { + t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectAccept, accept) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd.go new file mode 100644 index 000000000000..99f3f9a2cd30 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd.go @@ -0,0 +1,83 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/federation/registry/cluster" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/registry/generic/registry" + "k8s.io/kubernetes/pkg/runtime" +) + +type REST struct { + *registry.Store +} + +type StatusREST struct { + store *registry.Store +} + +func (r *StatusREST) New() runtime.Object { + return &federation.Cluster{} +} + +// Update alters the status subset of an object. +func (r *StatusREST) Update(ctx api.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo) +} + +// NewREST returns a RESTStorage object that will work against clusters. +func NewREST(opts generic.RESTOptions) (*REST, *StatusREST) { + prefix := "/clusters" + + newListFunc := func() runtime.Object { return &federation.ClusterList{} } + storageInterface := opts.Decorator( + opts.Storage, 100, &federation.Cluster{}, prefix, cluster.Strategy, newListFunc) + + store := ®istry.Store{ + NewFunc: func() runtime.Object { return &federation.Cluster{} }, + NewListFunc: newListFunc, + KeyRootFunc: func(ctx api.Context) string { + return prefix + }, + KeyFunc: func(ctx api.Context, name string) (string, error) { + return registry.NoNamespaceKeyFunc(ctx, prefix, name) + }, + ObjectNameFunc: func(obj runtime.Object) (string, error) { + return obj.(*federation.Cluster).Name, nil + }, + PredicateFunc: cluster.MatchCluster, + QualifiedResource: federation.Resource("clusters"), + DeleteCollectionWorkers: opts.DeleteCollectionWorkers, + + CreateStrategy: cluster.Strategy, + UpdateStrategy: cluster.Strategy, + DeleteStrategy: cluster.Strategy, + + ReturnDeletedObject: true, + + Storage: storageInterface, + } + + statusStore := *store + statusStore.UpdateStrategy = cluster.StatusStrategy + + return &REST{store}, &StatusREST{store: &statusStore} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd_test.go new file mode 100644 index 000000000000..f04a329bbd88 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd_test.go @@ -0,0 +1,143 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "testing" + + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/registry/registrytest" + "k8s.io/kubernetes/pkg/runtime" + etcdtesting "k8s.io/kubernetes/pkg/storage/etcd/testing" +) + +func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) { + etcdStorage, server := registrytest.NewEtcdStorage(t, federation.GroupName) + restOptions := generic.RESTOptions{ + Storage: etcdStorage, + Decorator: generic.UndecoratedStorage, + DeleteCollectionWorkers: 1} + storage, _ := NewREST(restOptions) + return storage, server +} + +func validNewCluster() *federation.Cluster { + return &federation.Cluster{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "name": "foo", + }, + }, + Spec: federation.ClusterSpec{ + ServerAddressByClientCIDRs: []federation.ServerAddressByClientCIDR{ + { + ClientCIDR: "0.0.0.0/0", + ServerAddress: "localhost:8888", + }, + }, + }, + Status: federation.ClusterStatus{ + Conditions: []federation.ClusterCondition{ + {Type: federation.ClusterReady, Status: api.ConditionFalse}, + }, + }, + } +} + +func TestCreate(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store).ClusterScope() + cluster := validNewCluster() + cluster.ObjectMeta = api.ObjectMeta{GenerateName: "foo"} + test.TestCreate( + cluster, + &federation.Cluster{ + ObjectMeta: api.ObjectMeta{Name: "-a123-a_"}, + }, + ) +} + +func TestUpdate(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store).ClusterScope() + test.TestUpdate( + // valid + validNewCluster(), + // updateFunc + func(obj runtime.Object) runtime.Object { + object := obj.(*federation.Cluster) + object.Spec.SecretRef = &api.LocalObjectReference{ + Name: "bar", + } + return object + }, + ) +} + +func TestDelete(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store).ClusterScope().ReturnDeletedObject() + test.TestDelete(validNewCluster()) +} + +func TestGet(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store).ClusterScope() + test.TestGet(validNewCluster()) +} + +func TestList(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store).ClusterScope() + test.TestList(validNewCluster()) +} + +func TestWatch(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store).ClusterScope() + test.TestWatch( + validNewCluster(), + // matching labels + []labels.Set{ + {"name": "foo"}, + }, + // not matching labels + []labels.Set{ + {"name": "bar"}, + {"foo": "bar"}, + }, + // matching fields + []fields.Set{ + {"metadata.name": "foo"}, + }, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + }, + ) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/registry.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/registry.go new file mode 100644 index 000000000000..99a41e63cc05 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/registry.go @@ -0,0 +1,81 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/watch" +) + +// Registry is an interface implemented by things that know how to store Cluster objects. +type Registry interface { + ListClusters(ctx api.Context, options *api.ListOptions) (*federation.ClusterList, error) + WatchCluster(ctx api.Context, options *api.ListOptions) (watch.Interface, error) + GetCluster(ctx api.Context, name string) (*federation.Cluster, error) + CreateCluster(ctx api.Context, cluster *federation.Cluster) error + UpdateCluster(ctx api.Context, cluster *federation.Cluster) error + DeleteCluster(ctx api.Context, name string) error +} + +// storage puts strong typing around storage calls + +type storage struct { + rest.StandardStorage +} + +// NewRegistry returns a new Registry interface for the given Storage. Any mismatched +// types will panic. +func NewRegistry(s rest.StandardStorage) Registry { + return &storage{s} +} + +func (s *storage) ListClusters(ctx api.Context, options *api.ListOptions) (*federation.ClusterList, error) { + obj, err := s.List(ctx, options) + if err != nil { + return nil, err + } + return obj.(*federation.ClusterList), nil +} + +func (s *storage) WatchCluster(ctx api.Context, options *api.ListOptions) (watch.Interface, error) { + return s.Watch(ctx, options) +} + +func (s *storage) GetCluster(ctx api.Context, name string) (*federation.Cluster, error) { + obj, err := s.Get(ctx, name) + if err != nil { + return nil, err + } + return obj.(*federation.Cluster), nil +} + +func (s *storage) CreateCluster(ctx api.Context, cluster *federation.Cluster) error { + _, err := s.Create(ctx, cluster) + return err +} + +func (s *storage) UpdateCluster(ctx api.Context, cluster *federation.Cluster) error { + _, _, err := s.Update(ctx, cluster.Name, rest.DefaultUpdatedObjectInfo(cluster, api.Scheme)) + return err +} + +func (s *storage) DeleteCluster(ctx api.Context, name string) error { + _, err := s.Delete(ctx, name, nil) + return err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/strategy.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/strategy.go new file mode 100644 index 000000000000..8557a66f7dd2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/strategy.go @@ -0,0 +1,115 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "fmt" + + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/federation/apis/federation/validation" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +type clusterStrategy struct { + runtime.ObjectTyper + api.NameGenerator +} + +var Strategy = clusterStrategy{api.Scheme, api.SimpleNameGenerator} + +func (clusterStrategy) NamespaceScoped() bool { + return false +} + +func ClusterToSelectableFields(cluster *federation.Cluster) fields.Set { + return generic.ObjectMetaFieldsSet(cluster.ObjectMeta, false) +} + +func MatchCluster(label labels.Selector, field fields.Selector) generic.Matcher { + return &generic.SelectionPredicate{ + Label: label, + Field: field, + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + cluster, ok := obj.(*federation.Cluster) + if !ok { + return nil, nil, fmt.Errorf("given object is not a cluster.") + } + return labels.Set(cluster.ObjectMeta.Labels), ClusterToSelectableFields(cluster), nil + }, + } +} + +// PrepareForCreate clears fields that are not allowed to be set by end users on creation. +func (clusterStrategy) PrepareForCreate(obj runtime.Object) { + cluster := obj.(*federation.Cluster) + cluster.Status = federation.ClusterStatus{} +} + +// Validate validates a new cluster. +func (clusterStrategy) Validate(ctx api.Context, obj runtime.Object) field.ErrorList { + cluster := obj.(*federation.Cluster) + return validation.ValidateCluster(cluster) +} + +// Canonicalize normalizes the object after validation. +func (clusterStrategy) Canonicalize(obj runtime.Object) { +} + +// AllowCreateOnUpdate is false for cluster. +func (clusterStrategy) AllowCreateOnUpdate() bool { + return false +} + +// PrepareForUpdate clears fields that are not allowed to be set by end users on update. +func (clusterStrategy) PrepareForUpdate(obj, old runtime.Object) { + cluster := obj.(*federation.Cluster) + oldCluster := old.(*federation.Cluster) + cluster.Status = oldCluster.Status +} + +// ValidateUpdate is the default update validation for an end user. +func (clusterStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList { + return validation.ValidateClusterUpdate(obj.(*federation.Cluster), old.(*federation.Cluster)) +} +func (clusterStrategy) AllowUnconditionalUpdate() bool { + return true +} + +type clusterStatusStrategy struct { + clusterStrategy +} + +var StatusStrategy = clusterStatusStrategy{Strategy} + +func (clusterStatusStrategy) PrepareForCreate(obj runtime.Object) { + _ = obj.(*federation.Cluster) +} +func (clusterStatusStrategy) PrepareForUpdate(obj, old runtime.Object) { + cluster := obj.(*federation.Cluster) + oldCluster := old.(*federation.Cluster) + cluster.Spec = oldCluster.Spec +} + +// ValidateUpdate is the default update validation for an end user. +func (clusterStatusStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList { + return validation.ValidateClusterStatusUpdate(obj.(*federation.Cluster), old.(*federation.Cluster)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/strategy_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/strategy_test.go new file mode 100644 index 000000000000..28db1d1abd3e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/federation/registry/cluster/strategy_test.go @@ -0,0 +1,162 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "testing" + + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "reflect" +) + +func validNewCluster() *federation.Cluster { + return &federation.Cluster{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + ResourceVersion: "4", + Labels: map[string]string{ + "name": "foo", + }, + }, + Spec: federation.ClusterSpec{ + ServerAddressByClientCIDRs: []federation.ServerAddressByClientCIDR{ + { + ClientCIDR: "0.0.0.0/0", + ServerAddress: "localhost:8888", + }, + }, + }, + Status: federation.ClusterStatus{ + Conditions: []federation.ClusterCondition{ + {Type: federation.ClusterReady, Status: api.ConditionTrue}, + }, + }, + } +} + +func invalidNewCluster() *federation.Cluster { + // Create a cluster with empty ServerAddressByClientCIDRs (which is a required field). + return &federation.Cluster{ + ObjectMeta: api.ObjectMeta{ + Name: "foo2", + ResourceVersion: "5", + }, + Status: federation.ClusterStatus{ + Conditions: []federation.ClusterCondition{ + {Type: federation.ClusterReady, Status: api.ConditionFalse}, + }, + }, + } +} + +func TestClusterStrategy(t *testing.T) { + ctx := api.NewDefaultContext() + if Strategy.NamespaceScoped() { + t.Errorf("Cluster should not be namespace scoped") + } + if Strategy.AllowCreateOnUpdate() { + t.Errorf("Cluster should not allow create on update") + } + + cluster := validNewCluster() + Strategy.PrepareForCreate(cluster) + if len(cluster.Status.Conditions) != 0 { + t.Errorf("Cluster should not allow setting conditions on create") + } + errs := Strategy.Validate(ctx, cluster) + if len(errs) != 0 { + t.Errorf("Unexpected error validating %v", errs) + } + + invalidCluster := invalidNewCluster() + Strategy.PrepareForUpdate(invalidCluster, cluster) + if reflect.DeepEqual(invalidCluster.Spec, cluster.Spec) || + !reflect.DeepEqual(invalidCluster.Status, cluster.Status) { + t.Error("Only spec is expected being changed") + } + errs = Strategy.ValidateUpdate(ctx, invalidCluster, cluster) + if len(errs) == 0 { + t.Errorf("Expected a validation error") + } + if cluster.ResourceVersion != "4" { + t.Errorf("Incoming resource version on update should not be mutated") + } +} + +func TestClusterStatusStrategy(t *testing.T) { + ctx := api.NewDefaultContext() + if StatusStrategy.NamespaceScoped() { + t.Errorf("Cluster should not be namespace scoped") + } + if StatusStrategy.AllowCreateOnUpdate() { + t.Errorf("Cluster should not allow create on update") + } + + cluster := validNewCluster() + invalidCluster := invalidNewCluster() + StatusStrategy.PrepareForUpdate(cluster, invalidCluster) + if !reflect.DeepEqual(invalidCluster.Spec, cluster.Spec) || + reflect.DeepEqual(invalidCluster.Status, cluster.Status) { + t.Logf("== cluster.Spec: %v\n", cluster.Spec) + t.Logf("== cluster.Status: %v\n", cluster.Status) + t.Logf("== invalidCluster.Spec: %v\n", cluster.Spec) + t.Logf("== invalidCluster.Spec: %v\n", cluster.Status) + t.Error("Only spec is expected being changed") + } + errs := Strategy.ValidateUpdate(ctx, invalidCluster, cluster) + if len(errs) == 0 { + t.Errorf("Expected a validation error") + } + if cluster.ResourceVersion != "4" { + t.Errorf("Incoming resource version on update should not be mutated") + } +} + +func TestMatchCluster(t *testing.T) { + testFieldMap := map[bool][]fields.Set{ + true: { + {"metadata.name": "foo"}, + }, + false: { + {"foo": "bar"}, + }, + } + + for expectedResult, fieldSet := range testFieldMap { + for _, field := range fieldSet { + m := MatchCluster(labels.Everything(), field.AsSelector()) + _, matchesSingle := m.MatchesSingle() + if e, a := expectedResult, matchesSingle; e != a { + t.Errorf("%+v: expected %v, got %v", fieldSet, e, a) + } + } + } +} + +func TestSelectableFieldLabelConversions(t *testing.T) { + apitesting.TestSelectableFieldLabelConversionsOfKind(t, + testapi.Federation.GroupVersion().String(), + "Cluster", + labels.Set(ClusterToSelectableFields(&federation.Cluster{})), + nil, + ) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/annotations/annotations.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/annotations/annotations.go new file mode 100644 index 000000000000..dbdf11cbabcf --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/annotations/annotations.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +const kubectlPrefix = "kubectl.kubernetes.io/" + +// LastAppliedConfigAnnotation is the annotation used to store the previous +// configuration of a resource for use in a three way diff by UpdateApplyAnnotation. +const LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/annotations/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/annotations/doc.go new file mode 100644 index 000000000000..6a422ea4f414 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/annotations/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package annotations defines annotation keys that shared between server and client +package annotations diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/context_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/context_test.go new file mode 100644 index 000000000000..c384999ff24b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/context_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api_test + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +// TestNamespaceContext validates that a namespace can be get/set on a context object +func TestNamespaceContext(t *testing.T) { + ctx := api.NewDefaultContext() + result, ok := api.NamespaceFrom(ctx) + if !ok { + t.Errorf("Error getting namespace") + } + if api.NamespaceDefault != result { + t.Errorf("Expected: %v, Actual: %v", api.NamespaceDefault, result) + } + + ctx = api.NewContext() + result, ok = api.NamespaceFrom(ctx) + if ok { + t.Errorf("Should not be ok because there is no namespace on the context") + } +} + +// TestValidNamespace validates that namespace rules are enforced on a resource prior to create or update +func TestValidNamespace(t *testing.T) { + ctx := api.NewDefaultContext() + namespace, _ := api.NamespaceFrom(ctx) + resource := api.ReplicationController{} + if !api.ValidNamespace(ctx, &resource.ObjectMeta) { + t.Errorf("expected success") + } + if namespace != resource.Namespace { + t.Errorf("expected resource to have the default namespace assigned during validation") + } + resource = api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: "other"}} + if api.ValidNamespace(ctx, &resource.ObjectMeta) { + t.Errorf("Expected error that resource and context errors do not match because resource has different namespace") + } + ctx = api.NewContext() + if api.ValidNamespace(ctx, &resource.ObjectMeta) { + t.Errorf("Expected error that resource and context errors do not match since context has no namespace") + } + + ctx = api.NewContext() + ns := api.NamespaceValue(ctx) + if ns != "" { + t.Errorf("Expected the empty string") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/conversion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/conversion.go index e0605fa098e7..7ae1e0184e39 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/conversion.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/conversion.go @@ -41,20 +41,20 @@ func init() { Convert_unversioned_ListMeta_To_unversioned_ListMeta, Convert_intstr_IntOrString_To_intstr_IntOrString, Convert_unversioned_Time_To_unversioned_Time, - Convert_string_slice_To_unversioned_Time, + Convert_Slice_string_To_unversioned_Time, Convert_string_To_labels_Selector, Convert_string_To_fields_Selector, - Convert_bool_ref_To_bool, - Convert_bool_To_bool_ref, - Convert_string_ref_To_string, - Convert_string_To_string_ref, + Convert_Pointer_bool_To_bool, + Convert_bool_To_Pointer_bool, + Convert_Pointer_string_To_string, + Convert_string_To_Pointer_string, Convert_labels_Selector_To_string, Convert_fields_Selector_To_string, Convert_resource_Quantity_To_resource_Quantity, ) } -func Convert_string_ref_To_string(in **string, out *string, s conversion.Scope) error { +func Convert_Pointer_string_To_string(in **string, out *string, s conversion.Scope) error { if *in == nil { *out = "" return nil @@ -63,7 +63,7 @@ func Convert_string_ref_To_string(in **string, out *string, s conversion.Scope) return nil } -func Convert_string_To_string_ref(in *string, out **string, s conversion.Scope) error { +func Convert_string_To_Pointer_string(in *string, out **string, s conversion.Scope) error { if in == nil { stringVar := "" *out = &stringVar @@ -73,7 +73,7 @@ func Convert_string_To_string_ref(in *string, out **string, s conversion.Scope) return nil } -func Convert_bool_ref_To_bool(in **bool, out *bool, s conversion.Scope) error { +func Convert_Pointer_bool_To_bool(in **bool, out *bool, s conversion.Scope) error { if *in == nil { *out = false return nil @@ -82,7 +82,7 @@ func Convert_bool_ref_To_bool(in **bool, out *bool, s conversion.Scope) error { return nil } -func Convert_bool_To_bool_ref(in *bool, out **bool, s conversion.Scope) error { +func Convert_bool_To_Pointer_bool(in *bool, out **bool, s conversion.Scope) error { if in == nil { boolVar := false *out = &boolVar @@ -118,8 +118,8 @@ func Convert_unversioned_Time_To_unversioned_Time(in *unversioned.Time, out *unv return nil } -// Convert_string_slice_To_unversioned_Time allows converting a URL query parameter value -func Convert_string_slice_To_unversioned_Time(input *[]string, out *unversioned.Time, s conversion.Scope) error { +// Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value +func Convert_Slice_string_To_unversioned_Time(input *[]string, out *unversioned.Time, s conversion.Scope) error { str := "" if len(*input) > 0 { str = (*input)[0] @@ -158,7 +158,6 @@ func Convert_fields_Selector_To_string(in *fields.Selector, out *string, s conve return nil } func Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error { - // Cannot deep copy these, because inf.Dec has unexported fields. - *out = *in.Copy() + *out = *in return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/conversion_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/conversion_test.go new file mode 100644 index 000000000000..1f4c3a174ea4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/conversion_test.go @@ -0,0 +1,123 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api_test + +import ( + "io/ioutil" + "math/rand" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/diff" +) + +func BenchmarkPodConversion(b *testing.B) { + apiObjectFuzzer := apitesting.FuzzerFor(nil, api.SchemeGroupVersion, rand.NewSource(benchmarkSeed)) + items := make([]api.Pod, 4) + for i := range items { + apiObjectFuzzer.Fuzz(&items[i]) + items[i].Spec.InitContainers = nil + items[i].Status.InitContainerStatuses = nil + } + + // add a fixed item + data, err := ioutil.ReadFile("pod_example.json") + if err != nil { + b.Fatalf("Unexpected error while reading file: %v", err) + } + var pod api.Pod + if err := runtime.DecodeInto(testapi.Default.Codec(), data, &pod); err != nil { + b.Fatalf("Unexpected error decoding pod: %v", err) + } + items = append(items, pod) + width := len(items) + + scheme := api.Scheme + for i := 0; i < b.N; i++ { + pod := &items[i%width] + versionedObj, err := scheme.UnsafeConvertToVersion(pod, *testapi.Default.GroupVersion()) + if err != nil { + b.Fatalf("Conversion error: %v", err) + } + if _, err = scheme.UnsafeConvertToVersion(versionedObj, testapi.Default.InternalGroupVersion()); err != nil { + b.Fatalf("Conversion error: %v", err) + } + } +} + +func BenchmarkNodeConversion(b *testing.B) { + data, err := ioutil.ReadFile("node_example.json") + if err != nil { + b.Fatalf("Unexpected error while reading file: %v", err) + } + var node api.Node + if err := runtime.DecodeInto(testapi.Default.Codec(), data, &node); err != nil { + b.Fatalf("Unexpected error decoding node: %v", err) + } + + scheme := api.Scheme + var result *api.Node + b.ResetTimer() + for i := 0; i < b.N; i++ { + versionedObj, err := scheme.UnsafeConvertToVersion(&node, *testapi.Default.GroupVersion()) + if err != nil { + b.Fatalf("Conversion error: %v", err) + } + obj, err := scheme.UnsafeConvertToVersion(versionedObj, testapi.Default.InternalGroupVersion()) + if err != nil { + b.Fatalf("Conversion error: %v", err) + } + result = obj.(*api.Node) + } + b.StopTimer() + if !api.Semantic.DeepDerivative(node, *result) { + b.Fatalf("Incorrect conversion: %s", diff.ObjectDiff(node, *result)) + } +} + +func BenchmarkReplicationControllerConversion(b *testing.B) { + data, err := ioutil.ReadFile("replication_controller_example.json") + if err != nil { + b.Fatalf("Unexpected error while reading file: %v", err) + } + var replicationController api.ReplicationController + if err := runtime.DecodeInto(testapi.Default.Codec(), data, &replicationController); err != nil { + b.Fatalf("Unexpected error decoding node: %v", err) + } + + scheme := api.Scheme + var result *api.ReplicationController + b.ResetTimer() + for i := 0; i < b.N; i++ { + versionedObj, err := scheme.UnsafeConvertToVersion(&replicationController, *testapi.Default.GroupVersion()) + if err != nil { + b.Fatalf("Conversion error: %v", err) + } + obj, err := scheme.UnsafeConvertToVersion(versionedObj, testapi.Default.InternalGroupVersion()) + if err != nil { + b.Fatalf("Conversion error: %v", err) + } + result = obj.(*api.ReplicationController) + } + b.StopTimer() + if !api.Semantic.DeepDerivative(replicationController, *result) { + b.Fatalf("Incorrect conversion: expected %v, got %v", replicationController, *result) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/copy_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/copy_test.go new file mode 100644 index 000000000000..af81d027329a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/copy_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api_test + +import ( + "math/rand" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/util/diff" + + "github.com/google/gofuzz" +) + +func TestDeepCopyApiObjects(t *testing.T) { + for i := 0; i < *fuzzIters; i++ { + for _, version := range []unversioned.GroupVersion{testapi.Default.InternalGroupVersion(), *testapi.Default.GroupVersion()} { + f := apitesting.FuzzerFor(t, version, rand.NewSource(rand.Int63())) + for kind := range api.Scheme.KnownTypes(version) { + doDeepCopyTest(t, version.WithKind(kind), f) + } + } + } +} + +func doDeepCopyTest(t *testing.T, kind unversioned.GroupVersionKind, f *fuzz.Fuzzer) { + item, err := api.Scheme.New(kind) + if err != nil { + t.Fatalf("Could not create a %v: %s", kind, err) + } + f.Fuzz(item) + itemCopy, err := api.Scheme.DeepCopy(item) + if err != nil { + t.Errorf("Could not deep copy a %v: %s", kind, err) + return + } + + if !reflect.DeepEqual(item, itemCopy) { + t.Errorf("\nexpected: %#v\n\ngot: %#v\n\ndiff: %v", item, itemCopy, diff.ObjectGoPrintSideBySide(item, itemCopy)) + } +} + +func TestDeepCopySingleType(t *testing.T) { + for i := 0; i < *fuzzIters; i++ { + for _, version := range []unversioned.GroupVersion{testapi.Default.InternalGroupVersion(), *testapi.Default.GroupVersion()} { + f := apitesting.FuzzerFor(t, version, rand.NewSource(rand.Int63())) + doDeepCopyTest(t, version.WithKind("Pod"), f) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/deep_copy_generated.go index aa2712e1025b..ec1cd7ce7dd4 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/deep_copy_generated.go @@ -1,3 +1,5 @@ +// +build !ignore_autogenerated + /* Copyright 2016 The Kubernetes Authors All rights reserved. @@ -25,8 +27,8 @@ import ( fields "k8s.io/kubernetes/pkg/fields" labels "k8s.io/kubernetes/pkg/labels" runtime "k8s.io/kubernetes/pkg/runtime" + types "k8s.io/kubernetes/pkg/types" intstr "k8s.io/kubernetes/pkg/util/intstr" - sets "k8s.io/kubernetes/pkg/util/sets" ) func init() { @@ -115,6 +117,7 @@ func init() { DeepCopy_api_ObjectFieldSelector, DeepCopy_api_ObjectMeta, DeepCopy_api_ObjectReference, + DeepCopy_api_OwnerReference, DeepCopy_api_PersistentVolume, DeepCopy_api_PersistentVolumeClaim, DeepCopy_api_PersistentVolumeClaimList, @@ -126,6 +129,9 @@ func init() { DeepCopy_api_PersistentVolumeSpec, DeepCopy_api_PersistentVolumeStatus, DeepCopy_api_Pod, + DeepCopy_api_PodAffinity, + DeepCopy_api_PodAffinityTerm, + DeepCopy_api_PodAntiAffinity, DeepCopy_api_PodAttachOptions, DeepCopy_api_PodCondition, DeepCopy_api_PodExecOptions, @@ -139,6 +145,7 @@ func init() { DeepCopy_api_PodTemplate, DeepCopy_api_PodTemplateList, DeepCopy_api_PodTemplateSpec, + DeepCopy_api_Preconditions, DeepCopy_api_PreferredSchedulingTerm, DeepCopy_api_Probe, DeepCopy_api_RBDVolumeSource, @@ -147,6 +154,7 @@ func init() { DeepCopy_api_ReplicationControllerList, DeepCopy_api_ReplicationControllerSpec, DeepCopy_api_ReplicationControllerStatus, + DeepCopy_api_ResourceFieldSelector, DeepCopy_api_ResourceQuota, DeepCopy_api_ResourceQuotaList, DeepCopy_api_ResourceQuotaSpec, @@ -168,19 +176,13 @@ func init() { DeepCopy_api_ServiceSpec, DeepCopy_api_ServiceStatus, DeepCopy_api_TCPSocketAction, + DeepCopy_api_Taint, + DeepCopy_api_Toleration, DeepCopy_api_Volume, DeepCopy_api_VolumeMount, DeepCopy_api_VolumeSource, - DeepCopy_conversion_Meta, - DeepCopy_intstr_IntOrString, - DeepCopy_sets_Empty, - DeepCopy_unversioned_GroupKind, - DeepCopy_unversioned_GroupResource, - DeepCopy_unversioned_GroupVersion, - DeepCopy_unversioned_GroupVersionKind, - DeepCopy_unversioned_GroupVersionResource, - DeepCopy_unversioned_ListMeta, - DeepCopy_unversioned_TypeMeta, + DeepCopy_api_VsphereVirtualDiskVolumeSource, + DeepCopy_api_WeightedPodAffinityTerm, ); err != nil { // if one of the deep copy functions is malformed, detect it immediately. panic(err) @@ -205,6 +207,24 @@ func DeepCopy_api_Affinity(in Affinity, out *Affinity, c *conversion.Cloner) err } else { out.NodeAffinity = nil } + if in.PodAffinity != nil { + in, out := in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + if err := DeepCopy_api_PodAffinity(*in, *out, c); err != nil { + return err + } + } else { + out.PodAffinity = nil + } + if in.PodAntiAffinity != nil { + in, out := in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + if err := DeepCopy_api_PodAntiAffinity(*in, *out, c); err != nil { + return err + } + } else { + out.PodAntiAffinity = nil + } return nil } @@ -216,7 +236,7 @@ func DeepCopy_api_AzureFileVolumeSource(in AzureFileVolumeSource, out *AzureFile } func DeepCopy_api_Binding(in Binding, out *Binding, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -290,7 +310,7 @@ func DeepCopy_api_ComponentCondition(in ComponentCondition, out *ComponentCondit } func DeepCopy_api_ComponentStatus(in ComponentStatus, out *ComponentStatus, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -311,10 +331,10 @@ func DeepCopy_api_ComponentStatus(in ComponentStatus, out *ComponentStatus, c *c } func DeepCopy_api_ComponentStatusList(in ComponentStatusList, out *ComponentStatusList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -332,7 +352,7 @@ func DeepCopy_api_ComponentStatusList(in ComponentStatusList, out *ComponentStat } func DeepCopy_api_ConfigMap(in ConfigMap, out *ConfigMap, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -359,10 +379,10 @@ func DeepCopy_api_ConfigMapKeySelector(in ConfigMapKeySelector, out *ConfigMapKe } func DeepCopy_api_ConfigMapList(in ConfigMapList, out *ConfigMapList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -548,10 +568,8 @@ func DeepCopy_api_ContainerState(in ContainerState, out *ContainerState, c *conv } func DeepCopy_api_ContainerStateRunning(in ContainerStateRunning, out *ContainerStateRunning, c *conversion.Cloner) error { - if newVal, err := c.DeepCopy(in.StartedAt); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.StartedAt, &out.StartedAt, c); err != nil { return err - } else { - out.StartedAt = newVal.(unversioned.Time) } return nil } @@ -561,15 +579,11 @@ func DeepCopy_api_ContainerStateTerminated(in ContainerStateTerminated, out *Con out.Signal = in.Signal out.Reason = in.Reason out.Message = in.Message - if newVal, err := c.DeepCopy(in.StartedAt); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.StartedAt, &out.StartedAt, c); err != nil { return err - } else { - out.StartedAt = newVal.(unversioned.Time) } - if newVal, err := c.DeepCopy(in.FinishedAt); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.FinishedAt, &out.FinishedAt, c); err != nil { return err - } else { - out.FinishedAt = newVal.(unversioned.Time) } out.ContainerID = in.ContainerID return nil @@ -598,12 +612,16 @@ func DeepCopy_api_ContainerStatus(in ContainerStatus, out *ContainerStatus, c *c } func DeepCopy_api_ConversionError(in ConversionError, out *ConversionError, c *conversion.Cloner) error { - if newVal, err := c.DeepCopy(in.In); err != nil { + if in.In == nil { + out.In = nil + } else if newVal, err := c.DeepCopy(in.In); err != nil { return err } else { out.In = newVal.(interface{}) } - if newVal, err := c.DeepCopy(in.Out); err != nil { + if in.Out == nil { + out.Out = nil + } else if newVal, err := c.DeepCopy(in.Out); err != nil { return err } else { out.Out = newVal.(interface{}) @@ -618,7 +636,7 @@ func DeepCopy_api_DaemonEndpoint(in DaemonEndpoint, out *DaemonEndpoint, c *conv } func DeepCopy_api_DeleteOptions(in DeleteOptions, out *DeleteOptions, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if in.GracePeriodSeconds != nil { @@ -628,13 +646,44 @@ func DeepCopy_api_DeleteOptions(in DeleteOptions, out *DeleteOptions, c *convers } else { out.GracePeriodSeconds = nil } + if in.Preconditions != nil { + in, out := in.Preconditions, &out.Preconditions + *out = new(Preconditions) + if err := DeepCopy_api_Preconditions(*in, *out, c); err != nil { + return err + } + } else { + out.Preconditions = nil + } + if in.OrphanDependents != nil { + in, out := in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = *in + } else { + out.OrphanDependents = nil + } return nil } func DeepCopy_api_DownwardAPIVolumeFile(in DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, c *conversion.Cloner) error { out.Path = in.Path - if err := DeepCopy_api_ObjectFieldSelector(in.FieldRef, &out.FieldRef, c); err != nil { - return err + if in.FieldRef != nil { + in, out := in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + if err := DeepCopy_api_ObjectFieldSelector(*in, *out, c); err != nil { + return err + } + } else { + out.FieldRef = nil + } + if in.ResourceFieldRef != nil { + in, out := in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } else { + out.ResourceFieldRef = nil } return nil } @@ -661,6 +710,7 @@ func DeepCopy_api_EmptyDirVolumeSource(in EmptyDirVolumeSource, out *EmptyDirVol func DeepCopy_api_EndpointAddress(in EndpointAddress, out *EndpointAddress, c *conversion.Cloner) error { out.IP = in.IP + out.Hostname = in.Hostname if in.TargetRef != nil { in, out := in.TargetRef, &out.TargetRef *out = new(ObjectReference) @@ -718,7 +768,7 @@ func DeepCopy_api_EndpointSubset(in EndpointSubset, out *EndpointSubset, c *conv } func DeepCopy_api_Endpoints(in Endpoints, out *Endpoints, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -739,10 +789,10 @@ func DeepCopy_api_Endpoints(in Endpoints, out *Endpoints, c *conversion.Cloner) } func DeepCopy_api_EndpointsList(in EndpointsList, out *EndpointsList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -784,6 +834,15 @@ func DeepCopy_api_EnvVarSource(in EnvVarSource, out *EnvVarSource, c *conversion } else { out.FieldRef = nil } + if in.ResourceFieldRef != nil { + in, out := in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } else { + out.ResourceFieldRef = nil + } if in.ConfigMapKeyRef != nil { in, out := in.ConfigMapKeyRef, &out.ConfigMapKeyRef *out = new(ConfigMapKeySelector) @@ -806,7 +865,7 @@ func DeepCopy_api_EnvVarSource(in EnvVarSource, out *EnvVarSource, c *conversion } func DeepCopy_api_Event(in Event, out *Event, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -820,15 +879,11 @@ func DeepCopy_api_Event(in Event, out *Event, c *conversion.Cloner) error { if err := DeepCopy_api_EventSource(in.Source, &out.Source, c); err != nil { return err } - if newVal, err := c.DeepCopy(in.FirstTimestamp); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.FirstTimestamp, &out.FirstTimestamp, c); err != nil { return err - } else { - out.FirstTimestamp = newVal.(unversioned.Time) } - if newVal, err := c.DeepCopy(in.LastTimestamp); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastTimestamp, &out.LastTimestamp, c); err != nil { return err - } else { - out.LastTimestamp = newVal.(unversioned.Time) } out.Count = in.Count out.Type = in.Type @@ -836,10 +891,10 @@ func DeepCopy_api_Event(in Event, out *Event, c *conversion.Cloner) error { } func DeepCopy_api_EventList(in EventList, out *EventList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -874,7 +929,7 @@ func DeepCopy_api_ExecAction(in ExecAction, out *ExecAction, c *conversion.Clone } func DeepCopy_api_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Export = in.Export @@ -892,7 +947,7 @@ func DeepCopy_api_FCVolumeSource(in FCVolumeSource, out *FCVolumeSource, c *conv } if in.Lun != nil { in, out := in.Lun, &out.Lun - *out = new(int) + *out = new(int32) **out = *in } else { out.Lun = nil @@ -956,7 +1011,7 @@ func DeepCopy_api_GlusterfsVolumeSource(in GlusterfsVolumeSource, out *Glusterfs func DeepCopy_api_HTTPGetAction(in HTTPGetAction, out *HTTPGetAction, c *conversion.Cloner) error { out.Path = in.Path - if err := DeepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { + if err := intstr.DeepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { return err } out.Host = in.Host @@ -1056,7 +1111,7 @@ func DeepCopy_api_Lifecycle(in Lifecycle, out *Lifecycle, c *conversion.Cloner) } func DeepCopy_api_LimitRange(in LimitRange, out *LimitRange, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -1074,11 +1129,11 @@ func DeepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv in, out := in.Max, &out.Max *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.Max = nil @@ -1087,11 +1142,11 @@ func DeepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv in, out := in.Min, &out.Min *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.Min = nil @@ -1100,11 +1155,11 @@ func DeepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv in, out := in.Default, &out.Default *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.Default = nil @@ -1113,11 +1168,11 @@ func DeepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv in, out := in.DefaultRequest, &out.DefaultRequest *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.DefaultRequest = nil @@ -1126,11 +1181,11 @@ func DeepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv in, out := in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.MaxLimitRequestRatio = nil @@ -1139,10 +1194,10 @@ func DeepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv } func DeepCopy_api_LimitRangeList(in LimitRangeList, out *LimitRangeList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -1175,10 +1230,10 @@ func DeepCopy_api_LimitRangeSpec(in LimitRangeSpec, out *LimitRangeSpec, c *conv } func DeepCopy_api_List(in List, out *List, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -1198,15 +1253,19 @@ func DeepCopy_api_List(in List, out *List, c *conversion.Cloner) error { } func DeepCopy_api_ListOptions(in ListOptions, out *ListOptions, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if newVal, err := c.DeepCopy(in.LabelSelector); err != nil { + if in.LabelSelector == nil { + out.LabelSelector = nil + } else if newVal, err := c.DeepCopy(in.LabelSelector); err != nil { return err } else { out.LabelSelector = newVal.(labels.Selector) } - if newVal, err := c.DeepCopy(in.FieldSelector); err != nil { + if in.FieldSelector == nil { + out.FieldSelector = nil + } else if newVal, err := c.DeepCopy(in.FieldSelector); err != nil { return err } else { out.FieldSelector = newVal.(fields.Selector) @@ -1257,7 +1316,7 @@ func DeepCopy_api_NFSVolumeSource(in NFSVolumeSource, out *NFSVolumeSource, c *c } func DeepCopy_api_Namespace(in Namespace, out *Namespace, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -1273,10 +1332,10 @@ func DeepCopy_api_Namespace(in Namespace, out *Namespace, c *conversion.Cloner) } func DeepCopy_api_NamespaceList(in NamespaceList, out *NamespaceList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -1312,7 +1371,7 @@ func DeepCopy_api_NamespaceStatus(in NamespaceStatus, out *NamespaceStatus, c *c } func DeepCopy_api_Node(in Node, out *Node, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -1360,15 +1419,11 @@ func DeepCopy_api_NodeAffinity(in NodeAffinity, out *NodeAffinity, c *conversion func DeepCopy_api_NodeCondition(in NodeCondition, out *NodeCondition, c *conversion.Cloner) error { out.Type = in.Type out.Status = in.Status - if newVal, err := c.DeepCopy(in.LastHeartbeatTime); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastHeartbeatTime, &out.LastHeartbeatTime, c); err != nil { return err - } else { - out.LastHeartbeatTime = newVal.(unversioned.Time) } - if newVal, err := c.DeepCopy(in.LastTransitionTime); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { return err - } else { - out.LastTransitionTime = newVal.(unversioned.Time) } out.Reason = in.Reason out.Message = in.Message @@ -1383,10 +1438,10 @@ func DeepCopy_api_NodeDaemonEndpoints(in NodeDaemonEndpoints, out *NodeDaemonEnd } func DeepCopy_api_NodeList(in NodeList, out *NodeList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -1404,7 +1459,7 @@ func DeepCopy_api_NodeList(in NodeList, out *NodeList, c *conversion.Cloner) err } func DeepCopy_api_NodeProxyOptions(in NodeProxyOptions, out *NodeProxyOptions, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Path = in.Path @@ -1416,11 +1471,11 @@ func DeepCopy_api_NodeResources(in NodeResources, out *NodeResources, c *convers in, out := in.Capacity, &out.Capacity *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.Capacity = nil @@ -1484,11 +1539,11 @@ func DeepCopy_api_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Clone in, out := in.Capacity, &out.Capacity *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.Capacity = nil @@ -1497,11 +1552,11 @@ func DeepCopy_api_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Clone in, out := in.Allocatable, &out.Allocatable *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.Allocatable = nil @@ -1558,6 +1613,8 @@ func DeepCopy_api_NodeSystemInfo(in NodeSystemInfo, out *NodeSystemInfo, c *conv out.ContainerRuntimeVersion = in.ContainerRuntimeVersion out.KubeletVersion = in.KubeletVersion out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture return nil } @@ -1575,18 +1632,14 @@ func DeepCopy_api_ObjectMeta(in ObjectMeta, out *ObjectMeta, c *conversion.Clone out.UID = in.UID out.ResourceVersion = in.ResourceVersion out.Generation = in.Generation - if newVal, err := c.DeepCopy(in.CreationTimestamp); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.CreationTimestamp, &out.CreationTimestamp, c); err != nil { return err - } else { - out.CreationTimestamp = newVal.(unversioned.Time) } if in.DeletionTimestamp != nil { in, out := in.DeletionTimestamp, &out.DeletionTimestamp *out = new(unversioned.Time) - if newVal, err := c.DeepCopy(*in); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { return err - } else { - **out = newVal.(unversioned.Time) } } else { out.DeletionTimestamp = nil @@ -1616,6 +1669,24 @@ func DeepCopy_api_ObjectMeta(in ObjectMeta, out *ObjectMeta, c *conversion.Clone } else { out.Annotations = nil } + if in.OwnerReferences != nil { + in, out := in.OwnerReferences, &out.OwnerReferences + *out = make([]OwnerReference, len(in)) + for i := range in { + if err := DeepCopy_api_OwnerReference(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.OwnerReferences = nil + } + if in.Finalizers != nil { + in, out := in.Finalizers, &out.Finalizers + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Finalizers = nil + } return nil } @@ -1630,8 +1701,16 @@ func DeepCopy_api_ObjectReference(in ObjectReference, out *ObjectReference, c *c return nil } +func DeepCopy_api_OwnerReference(in OwnerReference, out *OwnerReference, c *conversion.Cloner) error { + out.APIVersion = in.APIVersion + out.Kind = in.Kind + out.Name = in.Name + out.UID = in.UID + return nil +} + func DeepCopy_api_PersistentVolume(in PersistentVolume, out *PersistentVolume, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -1647,7 +1726,7 @@ func DeepCopy_api_PersistentVolume(in PersistentVolume, out *PersistentVolume, c } func DeepCopy_api_PersistentVolumeClaim(in PersistentVolumeClaim, out *PersistentVolumeClaim, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -1663,10 +1742,10 @@ func DeepCopy_api_PersistentVolumeClaim(in PersistentVolumeClaim, out *Persisten } func DeepCopy_api_PersistentVolumeClaimList(in PersistentVolumeClaimList, out *PersistentVolumeClaimList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -1715,11 +1794,11 @@ func DeepCopy_api_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, ou in, out := in.Capacity, &out.Capacity *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.Capacity = nil @@ -1734,10 +1813,10 @@ func DeepCopy_api_PersistentVolumeClaimVolumeSource(in PersistentVolumeClaimVolu } func DeepCopy_api_PersistentVolumeList(in PersistentVolumeList, out *PersistentVolumeList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -1872,6 +1951,15 @@ func DeepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *Persist } else { out.AzureFile = nil } + if in.VsphereVolume != nil { + in, out := in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + if err := DeepCopy_api_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } else { + out.VsphereVolume = nil + } return nil } @@ -1880,11 +1968,11 @@ func DeepCopy_api_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentV in, out := in.Capacity, &out.Capacity *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.Capacity = nil @@ -1922,7 +2010,7 @@ func DeepCopy_api_PersistentVolumeStatus(in PersistentVolumeStatus, out *Persist } func DeepCopy_api_Pod(in Pod, out *Pod, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -1937,8 +2025,81 @@ func DeepCopy_api_Pod(in Pod, out *Pod, c *conversion.Cloner) error { return nil } +func DeepCopy_api_PodAffinity(in PodAffinity, out *PodAffinity, c *conversion.Cloner) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_api_PodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_api_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func DeepCopy_api_PodAffinityTerm(in PodAffinityTerm, out *PodAffinityTerm, c *conversion.Cloner) error { + if in.LabelSelector != nil { + in, out := in.LabelSelector, &out.LabelSelector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.LabelSelector = nil + } + if in.Namespaces != nil { + in, out := in.Namespaces, &out.Namespaces + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Namespaces = nil + } + out.TopologyKey = in.TopologyKey + return nil +} + +func DeepCopy_api_PodAntiAffinity(in PodAntiAffinity, out *PodAntiAffinity, c *conversion.Cloner) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_api_PodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_api_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + func DeepCopy_api_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Stdin = in.Stdin @@ -1952,15 +2113,11 @@ func DeepCopy_api_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c func DeepCopy_api_PodCondition(in PodCondition, out *PodCondition, c *conversion.Cloner) error { out.Type = in.Type out.Status = in.Status - if newVal, err := c.DeepCopy(in.LastProbeTime); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { return err - } else { - out.LastProbeTime = newVal.(unversioned.Time) } - if newVal, err := c.DeepCopy(in.LastTransitionTime); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { return err - } else { - out.LastTransitionTime = newVal.(unversioned.Time) } out.Reason = in.Reason out.Message = in.Message @@ -1968,7 +2125,7 @@ func DeepCopy_api_PodCondition(in PodCondition, out *PodCondition, c *conversion } func DeepCopy_api_PodExecOptions(in PodExecOptions, out *PodExecOptions, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Stdin = in.Stdin @@ -1987,10 +2144,10 @@ func DeepCopy_api_PodExecOptions(in PodExecOptions, out *PodExecOptions, c *conv } func DeepCopy_api_PodList(in PodList, out *PodList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -2008,7 +2165,7 @@ func DeepCopy_api_PodList(in PodList, out *PodList, c *conversion.Cloner) error } func DeepCopy_api_PodLogOptions(in PodLogOptions, out *PodLogOptions, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Container = in.Container @@ -2024,10 +2181,8 @@ func DeepCopy_api_PodLogOptions(in PodLogOptions, out *PodLogOptions, c *convers if in.SinceTime != nil { in, out := in.SinceTime, &out.SinceTime *out = new(unversioned.Time) - if newVal, err := c.DeepCopy(*in); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { return err - } else { - **out = newVal.(unversioned.Time) } } else { out.SinceTime = nil @@ -2051,7 +2206,7 @@ func DeepCopy_api_PodLogOptions(in PodLogOptions, out *PodLogOptions, c *convers } func DeepCopy_api_PodProxyOptions(in PodProxyOptions, out *PodProxyOptions, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Path = in.Path @@ -2114,6 +2269,17 @@ func DeepCopy_api_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error } else { out.Volumes = nil } + if in.InitContainers != nil { + in, out := in.InitContainers, &out.InitContainers + *out = make([]Container, len(in)) + for i := range in { + if err := DeepCopy_api_Container(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } if in.Containers != nil { in, out := in.Containers, &out.Containers *out = make([]Container, len(in)) @@ -2172,6 +2338,8 @@ func DeepCopy_api_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error } else { out.ImagePullSecrets = nil } + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain return nil } @@ -2195,14 +2363,23 @@ func DeepCopy_api_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) if in.StartTime != nil { in, out := in.StartTime, &out.StartTime *out = new(unversioned.Time) - if newVal, err := c.DeepCopy(*in); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { return err - } else { - **out = newVal.(unversioned.Time) } } else { out.StartTime = nil } + if in.InitContainerStatuses != nil { + in, out := in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(in)) + for i := range in { + if err := DeepCopy_api_ContainerStatus(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.InitContainerStatuses = nil + } if in.ContainerStatuses != nil { in, out := in.ContainerStatuses, &out.ContainerStatuses *out = make([]ContainerStatus, len(in)) @@ -2218,7 +2395,7 @@ func DeepCopy_api_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) } func DeepCopy_api_PodStatusResult(in PodStatusResult, out *PodStatusResult, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -2231,7 +2408,7 @@ func DeepCopy_api_PodStatusResult(in PodStatusResult, out *PodStatusResult, c *c } func DeepCopy_api_PodTemplate(in PodTemplate, out *PodTemplate, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -2244,10 +2421,10 @@ func DeepCopy_api_PodTemplate(in PodTemplate, out *PodTemplate, c *conversion.Cl } func DeepCopy_api_PodTemplateList(in PodTemplateList, out *PodTemplateList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -2274,6 +2451,21 @@ func DeepCopy_api_PodTemplateSpec(in PodTemplateSpec, out *PodTemplateSpec, c *c return nil } +func DeepCopy_api_Preconditions(in Preconditions, out *Preconditions, c *conversion.Cloner) error { + if in.UID != nil { + in, out := in.UID, &out.UID + *out = new(types.UID) + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + **out = newVal.(types.UID) + } + } else { + out.UID = nil + } + return nil +} + func DeepCopy_api_PreferredSchedulingTerm(in PreferredSchedulingTerm, out *PreferredSchedulingTerm, c *conversion.Cloner) error { out.Weight = in.Weight if err := DeepCopy_api_NodeSelectorTerm(in.Preference, &out.Preference, c); err != nil { @@ -2321,7 +2513,7 @@ func DeepCopy_api_RBDVolumeSource(in RBDVolumeSource, out *RBDVolumeSource, c *c } func DeepCopy_api_RangeAllocation(in RangeAllocation, out *RangeAllocation, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -2339,7 +2531,7 @@ func DeepCopy_api_RangeAllocation(in RangeAllocation, out *RangeAllocation, c *c } func DeepCopy_api_ReplicationController(in ReplicationController, out *ReplicationController, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -2355,10 +2547,10 @@ func DeepCopy_api_ReplicationController(in ReplicationController, out *Replicati } func DeepCopy_api_ReplicationControllerList(in ReplicationControllerList, out *ReplicationControllerList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -2405,8 +2597,17 @@ func DeepCopy_api_ReplicationControllerStatus(in ReplicationControllerStatus, ou return nil } +func DeepCopy_api_ResourceFieldSelector(in ResourceFieldSelector, out *ResourceFieldSelector, c *conversion.Cloner) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + if err := resource.DeepCopy_resource_Quantity(in.Divisor, &out.Divisor, c); err != nil { + return err + } + return nil +} + func DeepCopy_api_ResourceQuota(in ResourceQuota, out *ResourceQuota, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -2422,10 +2623,10 @@ func DeepCopy_api_ResourceQuota(in ResourceQuota, out *ResourceQuota, c *convers } func DeepCopy_api_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -2447,11 +2648,11 @@ func DeepCopy_api_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec in, out := in.Hard, &out.Hard *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.Hard = nil @@ -2473,11 +2674,11 @@ func DeepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuota in, out := in.Hard, &out.Hard *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.Hard = nil @@ -2486,11 +2687,11 @@ func DeepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuota in, out := in.Used, &out.Used *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.Used = nil @@ -2503,11 +2704,11 @@ func DeepCopy_api_ResourceRequirements(in ResourceRequirements, out *ResourceReq in, out := in.Limits, &out.Limits *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.Limits = nil @@ -2516,11 +2717,11 @@ func DeepCopy_api_ResourceRequirements(in ResourceRequirements, out *ResourceReq in, out := in.Requests, &out.Requests *out = make(ResourceList) for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { + newVal := new(resource.Quantity) + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err - } else { - (*out)[key] = newVal.(resource.Quantity) } + (*out)[key] = *newVal } } else { out.Requests = nil @@ -2537,7 +2738,7 @@ func DeepCopy_api_SELinuxOptions(in SELinuxOptions, out *SELinuxOptions, c *conv } func DeepCopy_api_Secret(in Secret, out *Secret, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -2569,10 +2770,10 @@ func DeepCopy_api_SecretKeySelector(in SecretKeySelector, out *SecretKeySelector } func DeepCopy_api_SecretList(in SecretList, out *SecretList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -2591,6 +2792,17 @@ func DeepCopy_api_SecretList(in SecretList, out *SecretList, c *conversion.Clone func DeepCopy_api_SecretVolumeSource(in SecretVolumeSource, out *SecretVolumeSource, c *conversion.Cloner) error { out.SecretName = in.SecretName + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]KeyToPath, len(in)) + for i := range in { + if err := DeepCopy_api_KeyToPath(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -2645,7 +2857,7 @@ func DeepCopy_api_SecurityContext(in SecurityContext, out *SecurityContext, c *c } func DeepCopy_api_SerializedReference(in SerializedReference, out *SerializedReference, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectReference(in.Reference, &out.Reference, c); err != nil { @@ -2655,7 +2867,7 @@ func DeepCopy_api_SerializedReference(in SerializedReference, out *SerializedRef } func DeepCopy_api_Service(in Service, out *Service, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -2671,7 +2883,7 @@ func DeepCopy_api_Service(in Service, out *Service, c *conversion.Cloner) error } func DeepCopy_api_ServiceAccount(in ServiceAccount, out *ServiceAccount, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { @@ -2703,10 +2915,10 @@ func DeepCopy_api_ServiceAccount(in ServiceAccount, out *ServiceAccount, c *conv } func DeepCopy_api_ServiceAccountList(in ServiceAccountList, out *ServiceAccountList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -2724,10 +2936,10 @@ func DeepCopy_api_ServiceAccountList(in ServiceAccountList, out *ServiceAccountL } func DeepCopy_api_ServiceList(in ServiceList, out *ServiceList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { @@ -2748,7 +2960,7 @@ func DeepCopy_api_ServicePort(in ServicePort, out *ServicePort, c *conversion.Cl out.Name = in.Name out.Protocol = in.Protocol out.Port = in.Port - if err := DeepCopy_intstr_IntOrString(in.TargetPort, &out.TargetPort, c); err != nil { + if err := intstr.DeepCopy_intstr_IntOrString(in.TargetPort, &out.TargetPort, c); err != nil { return err } out.NodePort = in.NodePort @@ -2756,7 +2968,7 @@ func DeepCopy_api_ServicePort(in ServicePort, out *ServicePort, c *conversion.Cl } func DeepCopy_api_ServiceProxyOptions(in ServiceProxyOptions, out *ServiceProxyOptions, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Path = in.Path @@ -2795,6 +3007,13 @@ func DeepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cl } out.LoadBalancerIP = in.LoadBalancerIP out.SessionAffinity = in.SessionAffinity + if in.LoadBalancerSourceRanges != nil { + in, out := in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.LoadBalancerSourceRanges = nil + } return nil } @@ -2806,12 +3025,27 @@ func DeepCopy_api_ServiceStatus(in ServiceStatus, out *ServiceStatus, c *convers } func DeepCopy_api_TCPSocketAction(in TCPSocketAction, out *TCPSocketAction, c *conversion.Cloner) error { - if err := DeepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { + if err := intstr.DeepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { return err } return nil } +func DeepCopy_api_Taint(in Taint, out *Taint, c *conversion.Cloner) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = in.Effect + return nil +} + +func DeepCopy_api_Toleration(in Toleration, out *Toleration, c *conversion.Cloner) error { + out.Key = in.Key + out.Operator = in.Operator + out.Value = in.Value + out.Effect = in.Effect + return nil +} + func DeepCopy_api_Volume(in Volume, out *Volume, c *conversion.Cloner) error { out.Name = in.Name if err := DeepCopy_api_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil { @@ -2824,6 +3058,7 @@ func DeepCopy_api_VolumeMount(in VolumeMount, out *VolumeMount, c *conversion.Cl out.Name = in.Name out.ReadOnly = in.ReadOnly out.MountPath = in.MountPath + out.SubPath = in.SubPath return nil } @@ -2999,71 +3234,28 @@ func DeepCopy_api_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion } else { out.ConfigMap = nil } - return nil -} - -func DeepCopy_conversion_Meta(in conversion.Meta, out *conversion.Meta, c *conversion.Cloner) error { - out.SrcVersion = in.SrcVersion - out.DestVersion = in.DestVersion - if newVal, err := c.DeepCopy(in.KeyNameMapping); err != nil { - return err + if in.VsphereVolume != nil { + in, out := in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + if err := DeepCopy_api_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil { + return err + } } else { - out.KeyNameMapping = newVal.(conversion.FieldMappingFunc) + out.VsphereVolume = nil } return nil } -func DeepCopy_intstr_IntOrString(in intstr.IntOrString, out *intstr.IntOrString, c *conversion.Cloner) error { - out.Type = in.Type - out.IntVal = in.IntVal - out.StrVal = in.StrVal - return nil -} - -func DeepCopy_sets_Empty(in sets.Empty, out *sets.Empty, c *conversion.Cloner) error { - return nil -} - -func DeepCopy_unversioned_GroupKind(in unversioned.GroupKind, out *unversioned.GroupKind, c *conversion.Cloner) error { - out.Group = in.Group - out.Kind = in.Kind - return nil -} - -func DeepCopy_unversioned_GroupResource(in unversioned.GroupResource, out *unversioned.GroupResource, c *conversion.Cloner) error { - out.Group = in.Group - out.Resource = in.Resource - return nil -} - -func DeepCopy_unversioned_GroupVersion(in unversioned.GroupVersion, out *unversioned.GroupVersion, c *conversion.Cloner) error { - out.Group = in.Group - out.Version = in.Version - return nil -} - -func DeepCopy_unversioned_GroupVersionKind(in unversioned.GroupVersionKind, out *unversioned.GroupVersionKind, c *conversion.Cloner) error { - out.Group = in.Group - out.Version = in.Version - out.Kind = in.Kind - return nil -} - -func DeepCopy_unversioned_GroupVersionResource(in unversioned.GroupVersionResource, out *unversioned.GroupVersionResource, c *conversion.Cloner) error { - out.Group = in.Group - out.Version = in.Version - out.Resource = in.Resource - return nil -} - -func DeepCopy_unversioned_ListMeta(in unversioned.ListMeta, out *unversioned.ListMeta, c *conversion.Cloner) error { - out.SelfLink = in.SelfLink - out.ResourceVersion = in.ResourceVersion +func DeepCopy_api_VsphereVirtualDiskVolumeSource(in VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, c *conversion.Cloner) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType return nil } -func DeepCopy_unversioned_TypeMeta(in unversioned.TypeMeta, out *unversioned.TypeMeta, c *conversion.Cloner) error { - out.Kind = in.Kind - out.APIVersion = in.APIVersion +func DeepCopy_api_WeightedPodAffinityTerm(in WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, c *conversion.Cloner) error { + out.Weight = in.Weight + if err := DeepCopy_api_PodAffinityTerm(in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { + return err + } return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/deep_copy_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/deep_copy_test.go new file mode 100644 index 000000000000..a251623a0547 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/deep_copy_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api_test + +import ( + "io/ioutil" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/runtime" +) + +func BenchmarkPodCopy(b *testing.B) { + data, err := ioutil.ReadFile("pod_example.json") + if err != nil { + b.Fatalf("Unexpected error while reading file: %v", err) + } + var pod api.Pod + if err := runtime.DecodeInto(testapi.Default.Codec(), data, &pod); err != nil { + b.Fatalf("Unexpected error decoding pod: %v", err) + } + + var result *api.Pod + for i := 0; i < b.N; i++ { + obj, err := api.Scheme.DeepCopy(&pod) + if err != nil { + b.Fatalf("Unexpected error copying pod: %v", err) + } + result = obj.(*api.Pod) + } + if !api.Semantic.DeepEqual(pod, *result) { + b.Fatalf("Incorrect copy: expected %v, got %v", pod, *result) + } +} + +func BenchmarkNodeCopy(b *testing.B) { + data, err := ioutil.ReadFile("node_example.json") + if err != nil { + b.Fatalf("Unexpected error while reading file: %v", err) + } + var node api.Node + if err := runtime.DecodeInto(testapi.Default.Codec(), data, &node); err != nil { + b.Fatalf("Unexpected error decoding node: %v", err) + } + + var result *api.Node + for i := 0; i < b.N; i++ { + obj, err := api.Scheme.DeepCopy(&node) + if err != nil { + b.Fatalf("Unexpected error copying node: %v", err) + } + result = obj.(*api.Node) + } + if !api.Semantic.DeepEqual(node, *result) { + b.Fatalf("Incorrect copy: expected %v, got %v", node, *result) + } +} + +func BenchmarkReplicationControllerCopy(b *testing.B) { + data, err := ioutil.ReadFile("replication_controller_example.json") + if err != nil { + b.Fatalf("Unexpected error while reading file: %v", err) + } + var replicationController api.ReplicationController + if err := runtime.DecodeInto(testapi.Default.Codec(), data, &replicationController); err != nil { + b.Fatalf("Unexpected error decoding node: %v", err) + } + + var result *api.ReplicationController + for i := 0; i < b.N; i++ { + obj, err := api.Scheme.DeepCopy(&replicationController) + if err != nil { + b.Fatalf("Unexpected error copying replication controller: %v", err) + } + result = obj.(*api.ReplicationController) + } + if !api.Semantic.DeepEqual(replicationController, *result) { + b.Fatalf("Incorrect copy: expected %v, got %v", replicationController, *result) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/endpoints/util.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/endpoints/util.go index 7758434a19d2..501d58f28538 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/endpoints/util.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/endpoints/util.go @@ -29,11 +29,13 @@ import ( ) const ( + // TODO: to be deleted after v1.3 is released // Its value is the json representation of map[string(IP)][HostRecord] // example: '{"10.245.1.6":{"HostName":"my-webserver"}}' PodHostnamesAnnotation = "endpoints.beta.kubernetes.io/hostnames-map" ) +// TODO: to be deleted after v1.3 is released type HostRecord struct { HostName string } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/endpoints/util_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/endpoints/util_test.go new file mode 100644 index 000000000000..7e003e66dd7e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/endpoints/util_test.go @@ -0,0 +1,464 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoints + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/types" +) + +func podRef(uid string) *api.ObjectReference { + ref := api.ObjectReference{UID: types.UID(uid)} + return &ref +} + +func TestPackSubsets(t *testing.T) { + // The downside of table-driven tests is that some things have to live outside the table. + fooObjRef := api.ObjectReference{Name: "foo"} + barObjRef := api.ObjectReference{Name: "bar"} + + testCases := []struct { + name string + given []api.EndpointSubset + expect []api.EndpointSubset + }{ + { + name: "empty everything", + given: []api.EndpointSubset{{Addresses: []api.EndpointAddress{}, Ports: []api.EndpointPort{}}}, + expect: []api.EndpointSubset{}, + }, { + name: "empty addresses", + given: []api.EndpointSubset{{Addresses: []api.EndpointAddress{}, Ports: []api.EndpointPort{{Port: 111}}}}, + expect: []api.EndpointSubset{}, + }, { + name: "empty ports", + given: []api.EndpointSubset{{Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, Ports: []api.EndpointPort{}}}, + expect: []api.EndpointSubset{}, + }, { + name: "empty ports", + given: []api.EndpointSubset{{NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, Ports: []api.EndpointPort{}}}, + expect: []api.EndpointSubset{}, + }, { + name: "one set, one ip, one port", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one ip, one port (IPv6)", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "beef::1:2:3:4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "beef::1:2:3:4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one notReady ip, one port", + given: []api.EndpointSubset{{ + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one ip, one UID, one port", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one notReady ip, one UID, one port", + given: []api.EndpointSubset{{ + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one ip, empty UID, one port", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one notReady ip, empty UID, one port", + given: []api.EndpointSubset{{ + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, two ips, one port", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}, {IP: "5.6.7.8"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}, {IP: "5.6.7.8"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, two mixed ips, one port", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + NotReadyAddresses: []api.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + NotReadyAddresses: []api.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, two duplicate ips, one port, notReady is covered by ready", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one ip, two ports", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}, {Port: 222}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}, {Port: 222}}, + }}, + }, { + name: "one set, dup ips, one port", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}, {IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, dup ips, one port (IPv6)", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "beef::1"}, {IP: "beef::1"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "beef::1"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, dup ips with target-refs, one port", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "1.2.3.4", TargetRef: &fooObjRef}, + {IP: "1.2.3.4", TargetRef: &barObjRef}, + }, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &fooObjRef}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, dup mixed ips with target-refs, one port", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "1.2.3.4", TargetRef: &fooObjRef}, + }, + NotReadyAddresses: []api.EndpointAddress{ + {IP: "1.2.3.4", TargetRef: &barObjRef}, + }, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + // finding the same address twice is considered an error on input, only the first address+port + // reference is preserved + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &fooObjRef}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "one set, one ip, dup ports", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}, {Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two sets, dup ip, dup port", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two sets, dup mixed ip, dup port", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two sets, dup ip, two ports", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 222}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}, {Port: 222}}, + }}, + }, { + name: "two sets, dup ip, dup uids, two ports", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 222}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 111}, {Port: 222}}, + }}, + }, { + name: "two sets, dup mixed ip, dup uids, two ports", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 222}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 222}}, + }}, + }, { + name: "two sets, two ips, dup port", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}, {IP: "5.6.7.8"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two set, dup ip, two uids, dup ports", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-2")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "1.2.3.4", TargetRef: podRef("uid-1")}, + {IP: "1.2.3.4", TargetRef: podRef("uid-2")}, + }, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two set, dup ip, with and without uid, dup ports", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-2")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "1.2.3.4"}, + {IP: "1.2.3.4", TargetRef: podRef("uid-2")}, + }, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two sets, two ips, two dup ip with uid, dup port, wrong order", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "5.6.7.8", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "1.2.3.4"}, + {IP: "1.2.3.4", TargetRef: podRef("uid-1")}, + {IP: "5.6.7.8"}, + {IP: "5.6.7.8", TargetRef: podRef("uid-1")}, + }, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two sets, two mixed ips, two dup ip with uid, dup port, wrong order, ends up with split addresses", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []api.EndpointAddress{{IP: "5.6.7.8", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: podRef("uid-1")}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "5.6.7.8"}, + }, + NotReadyAddresses: []api.EndpointAddress{ + {IP: "1.2.3.4"}, + {IP: "1.2.3.4", TargetRef: podRef("uid-1")}, + {IP: "5.6.7.8", TargetRef: podRef("uid-1")}, + }, + Ports: []api.EndpointPort{{Port: 111}}, + }}, + }, { + name: "two sets, two ips, two ports", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []api.EndpointPort{{Port: 222}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "5.6.7.8"}}, + Ports: []api.EndpointPort{{Port: 222}}, + }}, + }, { + name: "four sets, three ips, three ports, jumbled", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "1.2.3.5"}}, + Ports: []api.EndpointPort{{Port: 222}}, + }, { + Addresses: []api.EndpointAddress{{IP: "1.2.3.6"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "1.2.3.5"}}, + Ports: []api.EndpointPort{{Port: 333}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}, {IP: "1.2.3.6"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + Addresses: []api.EndpointAddress{{IP: "1.2.3.5"}}, + Ports: []api.EndpointPort{{Port: 222}, {Port: 333}}, + }}, + }, { + name: "four sets, three mixed ips, three ports, jumbled", + given: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.5"}}, + Ports: []api.EndpointPort{{Port: 222}}, + }, { + Addresses: []api.EndpointAddress{{IP: "1.2.3.6"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.5"}}, + Ports: []api.EndpointPort{{Port: 333}}, + }}, + expect: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}, {IP: "1.2.3.6"}}, + Ports: []api.EndpointPort{{Port: 111}}, + }, { + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.5"}}, + Ports: []api.EndpointPort{{Port: 222}, {Port: 333}}, + }}, + }, + } + + for _, tc := range testCases { + result := RepackSubsets(tc.given) + if !reflect.DeepEqual(result, SortSubsets(tc.expect)) { + t.Errorf("case %q: expected %s, got %s", tc.name, spew.Sprintf("%#v", SortSubsets(tc.expect)), spew.Sprintf("%#v", result)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/errors/errors.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/errors/errors.go index a78b7bc240e9..89e83c2e3a6c 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/errors/errors.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/errors/errors.go @@ -93,7 +93,7 @@ func FromObject(obj runtime.Object) error { } // NewNotFound returns a new error which indicates that the resource of the kind and the name was not found. -func NewNotFound(qualifiedResource unversioned.GroupResource, name string) error { +func NewNotFound(qualifiedResource unversioned.GroupResource, name string) *StatusError { return &StatusError{unversioned.Status{ Status: unversioned.StatusFailure, Code: http.StatusNotFound, @@ -108,7 +108,7 @@ func NewNotFound(qualifiedResource unversioned.GroupResource, name string) error } // NewAlreadyExists returns an error indicating the item requested exists by that identifier. -func NewAlreadyExists(qualifiedResource unversioned.GroupResource, name string) error { +func NewAlreadyExists(qualifiedResource unversioned.GroupResource, name string) *StatusError { return &StatusError{unversioned.Status{ Status: unversioned.StatusFailure, Code: http.StatusConflict, @@ -124,7 +124,7 @@ func NewAlreadyExists(qualifiedResource unversioned.GroupResource, name string) // NewUnauthorized returns an error indicating the client is not authorized to perform the requested // action. -func NewUnauthorized(reason string) error { +func NewUnauthorized(reason string) *StatusError { message := reason if len(message) == 0 { message = "not authorized" @@ -138,7 +138,7 @@ func NewUnauthorized(reason string) error { } // NewForbidden returns an error indicating the requested action was forbidden -func NewForbidden(qualifiedResource unversioned.GroupResource, name string, err error) error { +func NewForbidden(qualifiedResource unversioned.GroupResource, name string, err error) *StatusError { return &StatusError{unversioned.Status{ Status: unversioned.StatusFailure, Code: http.StatusForbidden, @@ -153,7 +153,7 @@ func NewForbidden(qualifiedResource unversioned.GroupResource, name string, err } // NewConflict returns an error indicating the item can't be updated as provided. -func NewConflict(qualifiedResource unversioned.GroupResource, name string, err error) error { +func NewConflict(qualifiedResource unversioned.GroupResource, name string, err error) *StatusError { return &StatusError{unversioned.Status{ Status: unversioned.StatusFailure, Code: http.StatusConflict, @@ -163,12 +163,12 @@ func NewConflict(qualifiedResource unversioned.GroupResource, name string, err e Kind: qualifiedResource.Resource, Name: name, }, - Message: fmt.Sprintf("%s %q cannot be updated: %v", qualifiedResource.String(), name, err), + Message: fmt.Sprintf("Operation cannot be fulfilled on %s %q: %v", qualifiedResource.String(), name, err), }} } // NewGone returns an error indicating the item no longer available at the server and no forwarding address is known. -func NewGone(message string) error { +func NewGone(message string) *StatusError { return &StatusError{unversioned.Status{ Status: unversioned.StatusFailure, Code: http.StatusGone, @@ -178,7 +178,7 @@ func NewGone(message string) error { } // NewInvalid returns an error indicating the item is invalid and cannot be processed. -func NewInvalid(qualifiedKind unversioned.GroupKind, name string, errs field.ErrorList) error { +func NewInvalid(qualifiedKind unversioned.GroupKind, name string, errs field.ErrorList) *StatusError { causes := make([]unversioned.StatusCause, 0, len(errs)) for i := range errs { err := errs[i] @@ -203,7 +203,7 @@ func NewInvalid(qualifiedKind unversioned.GroupKind, name string, errs field.Err } // NewBadRequest creates an error that indicates that the request is invalid and can not be processed. -func NewBadRequest(reason string) error { +func NewBadRequest(reason string) *StatusError { return &StatusError{unversioned.Status{ Status: unversioned.StatusFailure, Code: http.StatusBadRequest, @@ -213,7 +213,7 @@ func NewBadRequest(reason string) error { } // NewServiceUnavailable creates an error that indicates that the requested service is unavailable. -func NewServiceUnavailable(reason string) error { +func NewServiceUnavailable(reason string) *StatusError { return &StatusError{unversioned.Status{ Status: unversioned.StatusFailure, Code: http.StatusServiceUnavailable, @@ -223,7 +223,7 @@ func NewServiceUnavailable(reason string) error { } // NewMethodNotSupported returns an error indicating the requested action is not supported on this kind. -func NewMethodNotSupported(qualifiedResource unversioned.GroupResource, action string) error { +func NewMethodNotSupported(qualifiedResource unversioned.GroupResource, action string) *StatusError { return &StatusError{unversioned.Status{ Status: unversioned.StatusFailure, Code: http.StatusMethodNotAllowed, @@ -238,7 +238,7 @@ func NewMethodNotSupported(qualifiedResource unversioned.GroupResource, action s // NewServerTimeout returns an error indicating the requested action could not be completed due to a // transient error, and the client should try again. -func NewServerTimeout(qualifiedResource unversioned.GroupResource, operation string, retryAfterSeconds int) error { +func NewServerTimeout(qualifiedResource unversioned.GroupResource, operation string, retryAfterSeconds int) *StatusError { return &StatusError{unversioned.Status{ Status: unversioned.StatusFailure, Code: http.StatusInternalServerError, @@ -255,12 +255,12 @@ func NewServerTimeout(qualifiedResource unversioned.GroupResource, operation str // NewServerTimeoutForKind should not exist. Server timeouts happen when accessing resources, the Kind is just what we // happened to be looking at when the request failed. This delegates to keep code sane, but we should work towards removing this. -func NewServerTimeoutForKind(qualifiedKind unversioned.GroupKind, operation string, retryAfterSeconds int) error { +func NewServerTimeoutForKind(qualifiedKind unversioned.GroupKind, operation string, retryAfterSeconds int) *StatusError { return NewServerTimeout(unversioned.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds) } // NewInternalError returns an error indicating the item is invalid and cannot be processed. -func NewInternalError(err error) error { +func NewInternalError(err error) *StatusError { return &StatusError{unversioned.Status{ Status: unversioned.StatusFailure, Code: http.StatusInternalServerError, @@ -274,7 +274,7 @@ func NewInternalError(err error) error { // NewTimeoutError returns an error indicating that a timeout occurred before the request // could be completed. Clients may retry, but the operation may still complete. -func NewTimeoutError(message string, retryAfterSeconds int) error { +func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { return &StatusError{unversioned.Status{ Status: unversioned.StatusFailure, Code: StatusServerTimeout, @@ -287,7 +287,7 @@ func NewTimeoutError(message string, retryAfterSeconds int) error { } // NewGenericServerResponse returns a new error for server responses that are not in a recognizable form. -func NewGenericServerResponse(code int, verb string, qualifiedResource unversioned.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) error { +func NewGenericServerResponse(code int, verb string, qualifiedResource unversioned.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError { reason := unversioned.StatusReasonUnknown message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code) switch code { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/errors/errors_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/errors/errors_test.go new file mode 100644 index 000000000000..55928f2318a5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/errors/errors_test.go @@ -0,0 +1,189 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func TestErrorNew(t *testing.T) { + err := NewAlreadyExists(api.Resource("tests"), "1") + if !IsAlreadyExists(err) { + t.Errorf("expected to be %s", unversioned.StatusReasonAlreadyExists) + } + if IsConflict(err) { + t.Errorf("expected to not be %s", unversioned.StatusReasonConflict) + } + if IsNotFound(err) { + t.Errorf(fmt.Sprintf("expected to not be %s", unversioned.StatusReasonNotFound)) + } + if IsInvalid(err) { + t.Errorf("expected to not be %s", unversioned.StatusReasonInvalid) + } + if IsBadRequest(err) { + t.Errorf("expected to not be %s", unversioned.StatusReasonBadRequest) + } + if IsForbidden(err) { + t.Errorf("expected to not be %s", unversioned.StatusReasonForbidden) + } + if IsServerTimeout(err) { + t.Errorf("expected to not be %s", unversioned.StatusReasonServerTimeout) + } + if IsMethodNotSupported(err) { + t.Errorf("expected to not be %s", unversioned.StatusReasonMethodNotAllowed) + } + + if !IsConflict(NewConflict(api.Resource("tests"), "2", errors.New("message"))) { + t.Errorf("expected to be conflict") + } + if !IsNotFound(NewNotFound(api.Resource("tests"), "3")) { + t.Errorf("expected to be %s", unversioned.StatusReasonNotFound) + } + if !IsInvalid(NewInvalid(api.Kind("Test"), "2", nil)) { + t.Errorf("expected to be %s", unversioned.StatusReasonInvalid) + } + if !IsBadRequest(NewBadRequest("reason")) { + t.Errorf("expected to be %s", unversioned.StatusReasonBadRequest) + } + if !IsForbidden(NewForbidden(api.Resource("tests"), "2", errors.New("reason"))) { + t.Errorf("expected to be %s", unversioned.StatusReasonForbidden) + } + if !IsUnauthorized(NewUnauthorized("reason")) { + t.Errorf("expected to be %s", unversioned.StatusReasonUnauthorized) + } + if !IsServerTimeout(NewServerTimeout(api.Resource("tests"), "reason", 0)) { + t.Errorf("expected to be %s", unversioned.StatusReasonServerTimeout) + } + if time, ok := SuggestsClientDelay(NewServerTimeout(api.Resource("tests"), "doing something", 10)); time != 10 || !ok { + t.Errorf("expected to be %s", unversioned.StatusReasonServerTimeout) + } + if time, ok := SuggestsClientDelay(NewTimeoutError("test reason", 10)); time != 10 || !ok { + t.Errorf("expected to be %s", unversioned.StatusReasonTimeout) + } + if !IsMethodNotSupported(NewMethodNotSupported(api.Resource("foos"), "delete")) { + t.Errorf("expected to be %s", unversioned.StatusReasonMethodNotAllowed) + } +} + +func TestNewInvalid(t *testing.T) { + testCases := []struct { + Err *field.Error + Details *unversioned.StatusDetails + }{ + { + field.Duplicate(field.NewPath("field[0].name"), "bar"), + &unversioned.StatusDetails{ + Kind: "Kind", + Name: "name", + Causes: []unversioned.StatusCause{{ + Type: unversioned.CauseTypeFieldValueDuplicate, + Field: "field[0].name", + }}, + }, + }, + { + field.Invalid(field.NewPath("field[0].name"), "bar", "detail"), + &unversioned.StatusDetails{ + Kind: "Kind", + Name: "name", + Causes: []unversioned.StatusCause{{ + Type: unversioned.CauseTypeFieldValueInvalid, + Field: "field[0].name", + }}, + }, + }, + { + field.NotFound(field.NewPath("field[0].name"), "bar"), + &unversioned.StatusDetails{ + Kind: "Kind", + Name: "name", + Causes: []unversioned.StatusCause{{ + Type: unversioned.CauseTypeFieldValueNotFound, + Field: "field[0].name", + }}, + }, + }, + { + field.NotSupported(field.NewPath("field[0].name"), "bar", nil), + &unversioned.StatusDetails{ + Kind: "Kind", + Name: "name", + Causes: []unversioned.StatusCause{{ + Type: unversioned.CauseTypeFieldValueNotSupported, + Field: "field[0].name", + }}, + }, + }, + { + field.Required(field.NewPath("field[0].name"), ""), + &unversioned.StatusDetails{ + Kind: "Kind", + Name: "name", + Causes: []unversioned.StatusCause{{ + Type: unversioned.CauseTypeFieldValueRequired, + Field: "field[0].name", + }}, + }, + }, + } + for i, testCase := range testCases { + vErr, expected := testCase.Err, testCase.Details + expected.Causes[0].Message = vErr.ErrorBody() + err := NewInvalid(api.Kind("Kind"), "name", field.ErrorList{vErr}) + status := err.ErrStatus + if status.Code != 422 || status.Reason != unversioned.StatusReasonInvalid { + t.Errorf("%d: unexpected status: %#v", i, status) + } + if !reflect.DeepEqual(expected, status.Details) { + t.Errorf("%d: expected %#v, got %#v", i, expected, status.Details) + } + } +} + +func Test_reasonForError(t *testing.T) { + if e, a := unversioned.StatusReasonUnknown, reasonForError(nil); e != a { + t.Errorf("unexpected reason type: %#v", a) + } +} + +type TestType struct{} + +func (obj *TestType) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } + +func TestFromObject(t *testing.T) { + table := []struct { + obj runtime.Object + message string + }{ + {&unversioned.Status{Message: "foobar"}, "foobar"}, + {&TestType{}, "unexpected object: &{}"}, + } + + for _, item := range table { + if e, a := item.message, FromObject(item.obj).Error(); e != a { + t.Errorf("Expected %v, got %v", e, a) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/errors/storage/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/errors/storage/doc.go new file mode 100644 index 000000000000..a2a550526c97 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/errors/storage/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package etcd provides conversion of etcd errors to API errors. +package storage diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/errors/storage/storage.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/errors/storage/storage.go new file mode 100644 index 000000000000..2a22b4f1716b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/errors/storage/storage.go @@ -0,0 +1,108 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/storage" +) + +// InterpretListError converts a generic error on a retrieval +// operation into the appropriate API error. +func InterpretListError(err error, qualifiedResource unversioned.GroupResource) error { + switch { + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, "") + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "list", 2) // TODO: make configurable or handled at a higher level + default: + return err + } +} + +// InterpretGetError converts a generic error on a retrieval +// operation into the appropriate API error. +func InterpretGetError(err error, qualifiedResource unversioned.GroupResource, name string) error { + switch { + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, name) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "get", 2) // TODO: make configurable or handled at a higher level + default: + return err + } +} + +// InterpretCreateError converts a generic error on a create +// operation into the appropriate API error. +func InterpretCreateError(err error, qualifiedResource unversioned.GroupResource, name string) error { + switch { + case storage.IsNodeExist(err): + return errors.NewAlreadyExists(qualifiedResource, name) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "create", 2) // TODO: make configurable or handled at a higher level + default: + return err + } +} + +// InterpretUpdateError converts a generic error on a update +// operation into the appropriate API error. +func InterpretUpdateError(err error, qualifiedResource unversioned.GroupResource, name string) error { + switch { + case storage.IsTestFailed(err), storage.IsNodeExist(err): + return errors.NewConflict(qualifiedResource, name, err) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "update", 2) // TODO: make configurable or handled at a higher level + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, name) + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretDeleteError converts a generic error on a delete +// operation into the appropriate API error. +func InterpretDeleteError(err error, qualifiedResource unversioned.GroupResource, name string) error { + switch { + case storage.IsNotFound(err): + return errors.NewNotFound(qualifiedResource, name) + case storage.IsUnreachable(err): + return errors.NewServerTimeout(qualifiedResource, "delete", 2) // TODO: make configurable or handled at a higher level + case storage.IsTestFailed(err), storage.IsNodeExist(err): + return errors.NewConflict(qualifiedResource, name, err) + case storage.IsInternalError(err): + return errors.NewInternalError(err) + default: + return err + } +} + +// InterpretWatchError converts a generic error on a watch +// operation into the appropriate API error. +func InterpretWatchError(err error, resource unversioned.GroupResource, name string) error { + switch { + case storage.IsInvalidError(err): + invalidError, _ := err.(storage.InvalidError) + return errors.NewInvalid(unversioned.GroupKind{Group: resource.Group, Kind: resource.Resource}, name, invalidError.Errs) + default: + return err + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/generate_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/generate_test.go new file mode 100644 index 000000000000..fe3cf812b21a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/generate_test.go @@ -0,0 +1,79 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "strings" + "testing" +) + +type nameGeneratorFunc func(base string) string + +func (fn nameGeneratorFunc) GenerateName(base string) string { + return fn(base) +} + +func TestGenerateName(t *testing.T) { + testCases := []struct { + meta ObjectMeta + + base string + returned string + }{ + { + returned: "", + }, + { + meta: ObjectMeta{ + GenerateName: "test", + }, + base: "test", + returned: "test", + }, + { + meta: ObjectMeta{ + Name: "foo", + GenerateName: "test", + }, + base: "test", + returned: "foo", + }, + } + + for i, testCase := range testCases { + GenerateName(nameGeneratorFunc(func(base string) string { + if base != testCase.base { + t.Errorf("%d: unexpected call with base", i) + } + return "test" + }), &testCase.meta) + expect := testCase.returned + if expect != testCase.meta.Name { + t.Errorf("%d: unexpected name: %#v", i, testCase.meta) + } + } +} + +func TestSimpleNameGenerator(t *testing.T) { + meta := &ObjectMeta{ + GenerateName: "foo", + } + GenerateName(SimpleNameGenerator, meta) + if !strings.HasPrefix(meta.Name, "foo") || meta.Name == "foo" { + t.Errorf("unexpected name: %#v", meta) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/helpers.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/helpers.go index aed3c5a7aa3e..d173c1fb887a 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/helpers.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/helpers.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/sets" "github.com/davecgh/go-spew/spew" @@ -57,16 +58,7 @@ var Semantic = conversion.EqualitiesOrDie( // TODO: if we decide it's important, it should be safe to start comparing the format. // // Uninitialized quantities are equivalent to 0 quantities. - if a.Amount == nil && b.MilliValue() == 0 { - return true - } - if b.Amount == nil && a.MilliValue() == 0 { - return true - } - if a.Amount == nil || b.Amount == nil { - return false - } - return a.Amount.Cmp(b.Amount) == 0 + return a.Cmp(b) == 0 }, func(a, b unversioned.Time) bool { return a.UTC() == b.UTC() @@ -151,6 +143,8 @@ var standardQuotaResources = sets.NewString( string(ResourceSecrets), string(ResourcePersistentVolumeClaims), string(ResourceConfigMaps), + string(ResourceServicesNodePorts), + string(ResourceServicesLoadBalancers), ) // IsStandardQuotaResourceName returns true if the resource is known to @@ -189,6 +183,8 @@ var integerResources = sets.NewString( string(ResourceSecrets), string(ResourceConfigMaps), string(ResourcePersistentVolumeClaims), + string(ResourceServicesNodePorts), + string(ResourceServicesLoadBalancers), ) // IsIntegerResourceName returns true if the resource is measured in integer values @@ -204,6 +200,19 @@ func NewDeleteOptions(grace int64) *DeleteOptions { return &DeleteOptions{GracePeriodSeconds: &grace} } +// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set. +func NewPreconditionDeleteOptions(uid string) *DeleteOptions { + u := types.UID(uid) + p := Preconditions{UID: &u} + return &DeleteOptions{Preconditions: &p} +} + +// NewUIDPreconditions returns a Preconditions with UID set. +func NewUIDPreconditions(uid string) *Preconditions { + u := types.UID(uid) + return &Preconditions{UID: &u} +} + // this function aims to check if the service's ClusterIP is set or not // the objective is not to perform validation here func IsServiceIPSet(service *Service) bool { @@ -216,12 +225,22 @@ func IsServiceIPRequested(service *Service) bool { } var standardFinalizers = sets.NewString( - string(FinalizerKubernetes)) + string(FinalizerKubernetes), + FinalizerOrphan, +) func IsStandardFinalizerName(str string) bool { return standardFinalizers.Has(str) } +// SingleObject returns a ListOptions for watching a single object. +func SingleObject(meta ObjectMeta) ListOptions { + return ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", meta.Name), + ResourceVersion: meta.ResourceVersion, + } +} + // AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, // only if they do not already exist func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) { @@ -342,13 +361,13 @@ func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolum // ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format. func ParseRFC3339(s string, nowFn func() unversioned.Time) (unversioned.Time, error) { if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil { - return unversioned.Time{t}, nil + return unversioned.Time{Time: t}, nil } t, err := time.Parse(time.RFC3339, s) if err != nil { return unversioned.Time{}, err } - return unversioned.Time{t}, nil + return unversioned.Time{Time: t}, nil } // NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements @@ -385,9 +404,19 @@ func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.S return selector, nil } -// AffinityAnnotationKey represents the key of affinity data (json serialized) -// in the Annotations of a Pod. -const AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" +const ( + // AffinityAnnotationKey represents the key of affinity data (json serialized) + // in the Annotations of a Pod. + AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" + + // TolerationsAnnotationKey represents the key of tolerations data (json serialized) + // in the Annotations of a Pod. + TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" + + // TaintsAnnotationKey represents the key of taints data (json serialized) + // in the Annotations of a Node. + TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" +) // GetAffinityFromPod gets the json serialized affinity data from Pod.Annotations // and converts it to the Affinity type in api. @@ -401,3 +430,61 @@ func GetAffinityFromPodAnnotations(annotations map[string]string) (Affinity, err } return affinity, nil } + +// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations +// and converts it to the []Toleration type in api. +func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]Toleration, error) { + var tolerations []Toleration + if len(annotations) > 0 && annotations[TolerationsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[TolerationsAnnotationKey]), &tolerations) + if err != nil { + return tolerations, err + } + } + return tolerations, nil +} + +// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations +// and converts it to the []Taint type in api. +func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]Taint, error) { + var taints []Taint + if len(annotations) > 0 && annotations[TaintsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[TaintsAnnotationKey]), &taints) + if err != nil { + return []Taint{}, err + } + } + return taints, nil +} + +// TolerationToleratesTaint checks if the toleration tolerates the taint. +func TolerationToleratesTaint(toleration Toleration, taint Taint) bool { + if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect { + return false + } + + if toleration.Key != taint.Key { + return false + } + // TODO: Use proper defaulting when Toleration becomes a field of PodSpec + if (len(toleration.Operator) == 0 || toleration.Operator == TolerationOpEqual) && toleration.Value == taint.Value { + return true + } + if toleration.Operator == TolerationOpExists { + return true + } + return false + +} + +// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations. +func TaintToleratedByTolerations(taint Taint, tolerations []Toleration) bool { + tolerated := false + for _, toleration := range tolerations { + if TolerationToleratesTaint(toleration, taint) { + tolerated = true + break + } + } + return tolerated +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/helpers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/helpers_test.go new file mode 100644 index 000000000000..28c696b43c65 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/helpers_test.go @@ -0,0 +1,297 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/labels" +) + +func TestConversionError(t *testing.T) { + var i int + var s string + i = 3 + s = "foo" + c := ConversionError{ + In: &i, Out: &s, + Message: "Can't make x into y, silly", + } + var e error + e = &c // ensure it implements error + msg := e.Error() + t.Logf("Message is %v", msg) + for _, part := range []string{"3", "int", "string", "Can't"} { + if !strings.Contains(msg, part) { + t.Errorf("didn't find %v", part) + } + } +} + +func TestSemantic(t *testing.T) { + table := []struct { + a, b interface{} + shouldEqual bool + }{ + {resource.MustParse("0"), resource.Quantity{}, true}, + {resource.Quantity{}, resource.MustParse("0"), true}, + {resource.Quantity{}, resource.MustParse("1m"), false}, + { + resource.NewQuantity(5, resource.BinarySI), + resource.NewQuantity(5, resource.DecimalSI), + true, + }, + {resource.MustParse("2m"), resource.MustParse("1m"), false}, + } + + for index, item := range table { + if e, a := item.shouldEqual, Semantic.DeepEqual(item.a, item.b); e != a { + t.Errorf("case[%d], expected %v, got %v.", index, e, a) + } + } +} + +func TestIsStandardResource(t *testing.T) { + testCases := []struct { + input string + output bool + }{ + {"cpu", true}, + {"memory", true}, + {"disk", false}, + {"blah", false}, + {"x.y.z", false}, + } + for i, tc := range testCases { + if IsStandardResourceName(tc.input) != tc.output { + t.Errorf("case[%d], expected: %t, got: %t", i, tc.output, !tc.output) + } + } +} + +func TestAddToNodeAddresses(t *testing.T) { + testCases := []struct { + existing []NodeAddress + toAdd []NodeAddress + expected []NodeAddress + }{ + { + existing: []NodeAddress{}, + toAdd: []NodeAddress{}, + expected: []NodeAddress{}, + }, + { + existing: []NodeAddress{}, + toAdd: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + {Type: NodeHostName, Address: "localhost"}, + }, + expected: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + {Type: NodeHostName, Address: "localhost"}, + }, + }, + { + existing: []NodeAddress{}, + toAdd: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + {Type: NodeExternalIP, Address: "1.1.1.1"}, + }, + expected: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + }, + }, + { + existing: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + {Type: NodeInternalIP, Address: "10.1.1.1"}, + }, + toAdd: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + {Type: NodeHostName, Address: "localhost"}, + }, + expected: []NodeAddress{ + {Type: NodeExternalIP, Address: "1.1.1.1"}, + {Type: NodeInternalIP, Address: "10.1.1.1"}, + {Type: NodeHostName, Address: "localhost"}, + }, + }, + } + + for i, tc := range testCases { + AddToNodeAddresses(&tc.existing, tc.toAdd...) + if !Semantic.DeepEqual(tc.expected, tc.existing) { + t.Errorf("case[%d], expected: %v, got: %v", i, tc.expected, tc.existing) + } + } +} + +func TestGetAccessModesFromString(t *testing.T) { + modes := GetAccessModesFromString("ROX") + if !containsAccessMode(modes, ReadOnlyMany) { + t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes) + } + + modes = GetAccessModesFromString("ROX,RWX") + if !containsAccessMode(modes, ReadOnlyMany) { + t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes) + } + if !containsAccessMode(modes, ReadWriteMany) { + t.Errorf("Expected mode %s, but got %+v", ReadWriteMany, modes) + } + + modes = GetAccessModesFromString("RWO,ROX,RWX") + if !containsAccessMode(modes, ReadOnlyMany) { + t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes) + } + if !containsAccessMode(modes, ReadWriteMany) { + t.Errorf("Expected mode %s, but got %+v", ReadWriteMany, modes) + } +} + +func TestRemoveDuplicateAccessModes(t *testing.T) { + modes := []PersistentVolumeAccessMode{ + ReadWriteOnce, ReadOnlyMany, ReadOnlyMany, ReadOnlyMany, + } + modes = removeDuplicateAccessModes(modes) + if len(modes) != 2 { + t.Errorf("Expected 2 distinct modes in set but found %v", len(modes)) + } +} + +func TestNodeSelectorRequirementsAsSelector(t *testing.T) { + matchExpressions := []NodeSelectorRequirement{{ + Key: "foo", + Operator: NodeSelectorOpIn, + Values: []string{"bar", "baz"}, + }} + mustParse := func(s string) labels.Selector { + out, e := labels.Parse(s) + if e != nil { + panic(e) + } + return out + } + tc := []struct { + in []NodeSelectorRequirement + out labels.Selector + expectErr bool + }{ + {in: nil, out: labels.Nothing()}, + {in: []NodeSelectorRequirement{}, out: labels.Nothing()}, + { + in: matchExpressions, + out: mustParse("foo in (baz,bar)"), + }, + { + in: []NodeSelectorRequirement{{ + Key: "foo", + Operator: NodeSelectorOpExists, + Values: []string{"bar", "baz"}, + }}, + expectErr: true, + }, + { + in: []NodeSelectorRequirement{{ + Key: "foo", + Operator: NodeSelectorOpGt, + Values: []string{"1.1"}, + }}, + out: mustParse("foo>1.1"), + }, + { + in: []NodeSelectorRequirement{{ + Key: "bar", + Operator: NodeSelectorOpLt, + Values: []string{"7.1"}, + }}, + out: mustParse("bar<7.1"), + }, + } + + for i, tc := range tc { + out, err := NodeSelectorRequirementsAsSelector(tc.in) + if err == nil && tc.expectErr { + t.Errorf("[%v]expected error but got none.", i) + } + if err != nil && !tc.expectErr { + t.Errorf("[%v]did not expect error but got: %v", i, err) + } + if !reflect.DeepEqual(out, tc.out) { + t.Errorf("[%v]expected:\n\t%+v\nbut got:\n\t%+v", i, tc.out, out) + } + } +} + +func TestGetAffinityFromPod(t *testing.T) { + testCases := []struct { + pod *Pod + expectErr bool + }{ + { + pod: &Pod{}, + expectErr: false, + }, + { + pod: &Pod{ + ObjectMeta: ObjectMeta{ + Annotations: map[string]string{ + AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["value1", "value2"] + }] + }] + }}}`, + }, + }, + }, + expectErr: false, + }, + { + pod: &Pod{ + ObjectMeta: ObjectMeta{ + Annotations: map[string]string{ + AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "foo", + `, + }, + }, + }, + expectErr: true, + }, + } + + for i, tc := range testCases { + _, err := GetAffinityFromPodAnnotations(tc.pod.Annotations) + if err == nil && tc.expectErr { + t.Errorf("[%v]expected error but got none.", i) + } + if err != nil && !tc.expectErr { + t.Errorf("[%v]did not expect error but got: %v", i, err) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/install/install.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/install/install.go index 58d121ba86e2..bed5f0791abf 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/install/install.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/install/install.go @@ -29,8 +29,10 @@ import ( "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apimachinery" "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/watch/versioned" ) const importPrefix = "k8s.io/kubernetes/pkg/api" @@ -150,4 +152,100 @@ func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { v1.AddToScheme(api.Scheme) } } + + // This is a "fast-path" that avoids reflection for common types. It focuses on the objects that are + // converted the most in the cluster. + // TODO: generate one of these for every external API group - this is to prove the impact + api.Scheme.AddGenericConversionFunc(func(objA, objB interface{}, s conversion.Scope) (bool, error) { + switch a := objA.(type) { + case *v1.Pod: + switch b := objB.(type) { + case *api.Pod: + return true, v1.Convert_v1_Pod_To_api_Pod(a, b, s) + } + case *api.Pod: + switch b := objB.(type) { + case *v1.Pod: + return true, v1.Convert_api_Pod_To_v1_Pod(a, b, s) + } + + case *v1.Event: + switch b := objB.(type) { + case *api.Event: + return true, v1.Convert_v1_Event_To_api_Event(a, b, s) + } + case *api.Event: + switch b := objB.(type) { + case *v1.Event: + return true, v1.Convert_api_Event_To_v1_Event(a, b, s) + } + + case *v1.ReplicationController: + switch b := objB.(type) { + case *api.ReplicationController: + return true, v1.Convert_v1_ReplicationController_To_api_ReplicationController(a, b, s) + } + case *api.ReplicationController: + switch b := objB.(type) { + case *v1.ReplicationController: + return true, v1.Convert_api_ReplicationController_To_v1_ReplicationController(a, b, s) + } + + case *v1.Node: + switch b := objB.(type) { + case *api.Node: + return true, v1.Convert_v1_Node_To_api_Node(a, b, s) + } + case *api.Node: + switch b := objB.(type) { + case *v1.Node: + return true, v1.Convert_api_Node_To_v1_Node(a, b, s) + } + + case *v1.Namespace: + switch b := objB.(type) { + case *api.Namespace: + return true, v1.Convert_v1_Namespace_To_api_Namespace(a, b, s) + } + case *api.Namespace: + switch b := objB.(type) { + case *v1.Namespace: + return true, v1.Convert_api_Namespace_To_v1_Namespace(a, b, s) + } + + case *v1.Service: + switch b := objB.(type) { + case *api.Service: + return true, v1.Convert_v1_Service_To_api_Service(a, b, s) + } + case *api.Service: + switch b := objB.(type) { + case *v1.Service: + return true, v1.Convert_api_Service_To_v1_Service(a, b, s) + } + + case *v1.Endpoints: + switch b := objB.(type) { + case *api.Endpoints: + return true, v1.Convert_v1_Endpoints_To_api_Endpoints(a, b, s) + } + case *api.Endpoints: + switch b := objB.(type) { + case *v1.Endpoints: + return true, v1.Convert_api_Endpoints_To_v1_Endpoints(a, b, s) + } + + case *versioned.Event: + switch b := objB.(type) { + case *versioned.InternalEvent: + return true, versioned.Convert_versioned_Event_to_versioned_InternalEvent(a, b, s) + } + case *versioned.InternalEvent: + switch b := objB.(type) { + case *versioned.Event: + return true, versioned.Convert_versioned_InternalEvent_to_versioned_Event(a, b, s) + } + } + return false, nil + }) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/install/install_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/install/install_test.go new file mode 100644 index 000000000000..07c7f5d51dd4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/install/install_test.go @@ -0,0 +1,129 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package install + +import ( + "encoding/json" + "reflect" + "testing" + + internal "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestResourceVersioner(t *testing.T) { + pod := internal.Pod{ObjectMeta: internal.ObjectMeta{ResourceVersion: "10"}} + version, err := accessor.ResourceVersion(&pod) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if version != "10" { + t.Errorf("unexpected version %v", version) + } + + podList := internal.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "10"}} + version, err = accessor.ResourceVersion(&podList) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if version != "10" { + t.Errorf("unexpected version %v", version) + } +} + +func TestCodec(t *testing.T) { + pod := internal.Pod{} + // We do want to use package registered rather than testapi here, because we + // want to test if the package install and package registered work as expected. + data, err := runtime.Encode(internal.Codecs.LegacyCodec(registered.GroupOrDie(internal.GroupName).GroupVersion), &pod) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + other := internal.Pod{} + if err := json.Unmarshal(data, &other); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if other.APIVersion != registered.GroupOrDie(internal.GroupName).GroupVersion.Version || other.Kind != "Pod" { + t.Errorf("unexpected unmarshalled object %#v", other) + } +} + +func TestInterfacesFor(t *testing.T) { + if _, err := registered.GroupOrDie(internal.GroupName).InterfacesFor(internal.SchemeGroupVersion); err == nil { + t.Fatalf("unexpected non-error: %v", err) + } + for i, version := range registered.GroupOrDie(internal.GroupName).GroupVersions { + if vi, err := registered.GroupOrDie(internal.GroupName).InterfacesFor(version); err != nil || vi == nil { + t.Fatalf("%d: unexpected result: %v", i, err) + } + } +} + +func TestRESTMapper(t *testing.T) { + gv := unversioned.GroupVersion{Group: "", Version: "v1"} + rcGVK := gv.WithKind("ReplicationController") + podTemplateGVK := gv.WithKind("PodTemplate") + + if gvk, err := registered.RESTMapper().KindFor(internal.SchemeGroupVersion.WithResource("replicationcontrollers")); err != nil || gvk != rcGVK { + t.Errorf("unexpected version mapping: %v %v", gvk, err) + } + + if m, err := registered.GroupOrDie(internal.GroupName).RESTMapper.RESTMapping(podTemplateGVK.GroupKind(), ""); err != nil || m.GroupVersionKind != podTemplateGVK || m.Resource != "podtemplates" { + t.Errorf("unexpected version mapping: %#v %v", m, err) + } + + for _, version := range registered.GroupOrDie(internal.GroupName).GroupVersions { + mapping, err := registered.GroupOrDie(internal.GroupName).RESTMapper.RESTMapping(rcGVK.GroupKind(), version.Version) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if mapping.Resource != "replicationControllers" && mapping.Resource != "replicationcontrollers" { + t.Errorf("incorrect resource name: %#v", mapping) + } + if mapping.GroupVersionKind.GroupVersion() != version { + t.Errorf("incorrect version: %v", mapping) + } + + interfaces, _ := registered.GroupOrDie(internal.GroupName).InterfacesFor(version) + if mapping.ObjectConvertor != interfaces.ObjectConvertor { + t.Errorf("unexpected: %#v, expected: %#v", mapping, interfaces) + } + + rc := &internal.ReplicationController{ObjectMeta: internal.ObjectMeta{Name: "foo"}} + name, err := mapping.MetadataAccessor.Name(rc) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if name != "foo" { + t.Errorf("unable to retrieve object meta with: %v", mapping.MetadataAccessor) + } + } +} + +func TestUnversioned(t *testing.T) { + for _, obj := range []runtime.Object{ + &unversioned.Status{}, + &unversioned.ExportOptions{}, + } { + if unversioned, ok := internal.Scheme.IsUnversioned(obj); !unversioned || !ok { + t.Errorf("%v is expected to be unversioned", reflect.TypeOf(obj)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta.go index ec84c3c8a92f..f3f84063dd88 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta.go @@ -17,6 +17,7 @@ limitations under the License. package api import ( + "k8s.io/kubernetes/pkg/api/meta/metatypes" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/runtime" @@ -39,6 +40,7 @@ func HasObjectMetaSystemFieldValues(meta *ObjectMeta) bool { // ObjectMetaFor returns a pointer to a provided object's ObjectMeta. // TODO: allow runtime.Unknown to extract this object +// TODO: Remove this function and use meta.Accessor() instead. func ObjectMetaFor(obj runtime.Object) (*ObjectMeta, error) { v, err := conversion.EnforcePtr(obj) if err != nil { @@ -64,19 +66,51 @@ func ListMetaFor(obj runtime.Object) (*unversioned.ListMeta, error) { // Namespace implements meta.Object for any object with an ObjectMeta typed field. Allows // fast, direct access to metadata fields for API objects. -func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } -func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } -func (meta *ObjectMeta) GetName() string { return meta.Name } -func (meta *ObjectMeta) SetName(name string) { meta.Name = name } -func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } -func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } -func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } -func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } -func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } -func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } -func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } -func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } +func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } +func (meta *ObjectMeta) GetName() string { return meta.Name } +func (meta *ObjectMeta) SetName(name string) { meta.Name = name } +func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } +func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } +func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } +func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } +func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ObjectMeta) GetCreationTimestamp() unversioned.Time { return meta.CreationTimestamp } +func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp unversioned.Time) { + meta.CreationTimestamp = creationTimestamp +} +func (meta *ObjectMeta) GetDeletionTimestamp() *unversioned.Time { return meta.DeletionTimestamp } +func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *unversioned.Time) { + meta.DeletionTimestamp = deletionTimestamp +} func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } +func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } +func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } + +func (meta *ObjectMeta) GetOwnerReferences() []metatypes.OwnerReference { + ret := make([]metatypes.OwnerReference, len(meta.OwnerReferences)) + for i := 0; i < len(meta.OwnerReferences); i++ { + ret[i].Kind = meta.OwnerReferences[i].Kind + ret[i].Name = meta.OwnerReferences[i].Name + ret[i].UID = meta.OwnerReferences[i].UID + ret[i].APIVersion = meta.OwnerReferences[i].APIVersion + } + return ret +} + +func (meta *ObjectMeta) SetOwnerReferences(references []metatypes.OwnerReference) { + newReferences := make([]OwnerReference, len(references)) + for i := 0; i < len(references); i++ { + newReferences[i].Kind = references[i].Kind + newReferences[i].Name = references[i].Name + newReferences[i].UID = references[i].UID + newReferences[i].APIVersion = references[i].APIVersion + } + meta.OwnerReferences = newReferences +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/deep_copy_generated.go new file mode 100644 index 000000000000..8fbea282355a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/deep_copy_generated.go @@ -0,0 +1,154 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package meta + +import ( + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" + runtime "k8s.io/kubernetes/pkg/runtime" +) + +func DeepCopy_meta_DefaultRESTMapper(in DefaultRESTMapper, out *DefaultRESTMapper, c *conversion.Cloner) error { + if in.defaultGroupVersions != nil { + in, out := in.defaultGroupVersions, &out.defaultGroupVersions + *out = make([]unversioned.GroupVersion, len(in)) + for i := range in { + if err := unversioned.DeepCopy_unversioned_GroupVersion(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.defaultGroupVersions = nil + } + if in.resourceToKind != nil { + in, out := in.resourceToKind, &out.resourceToKind + *out = make(map[unversioned.GroupVersionResource]unversioned.GroupVersionKind) + for range in { + // FIXME: Copying unassignable keys unsupported unversioned.GroupVersionResource + } + } else { + out.resourceToKind = nil + } + if in.kindToPluralResource != nil { + in, out := in.kindToPluralResource, &out.kindToPluralResource + *out = make(map[unversioned.GroupVersionKind]unversioned.GroupVersionResource) + for range in { + // FIXME: Copying unassignable keys unsupported unversioned.GroupVersionKind + } + } else { + out.kindToPluralResource = nil + } + if in.kindToScope != nil { + in, out := in.kindToScope, &out.kindToScope + *out = make(map[unversioned.GroupVersionKind]RESTScope) + for range in { + // FIXME: Copying unassignable keys unsupported unversioned.GroupVersionKind + } + } else { + out.kindToScope = nil + } + if in.singularToPlural != nil { + in, out := in.singularToPlural, &out.singularToPlural + *out = make(map[unversioned.GroupVersionResource]unversioned.GroupVersionResource) + for range in { + // FIXME: Copying unassignable keys unsupported unversioned.GroupVersionResource + } + } else { + out.singularToPlural = nil + } + if in.pluralToSingular != nil { + in, out := in.pluralToSingular, &out.pluralToSingular + *out = make(map[unversioned.GroupVersionResource]unversioned.GroupVersionResource) + for range in { + // FIXME: Copying unassignable keys unsupported unversioned.GroupVersionResource + } + } else { + out.pluralToSingular = nil + } + if in.interfacesFunc == nil { + out.interfacesFunc = nil + } else if newVal, err := c.DeepCopy(in.interfacesFunc); err != nil { + return err + } else { + out.interfacesFunc = newVal.(VersionInterfacesFunc) + } + if in.aliasToResource != nil { + in, out := in.aliasToResource, &out.aliasToResource + *out = make(map[string][]string) + for key, val := range in { + if newVal, err := c.DeepCopy(val); err != nil { + return err + } else { + (*out)[key] = newVal.([]string) + } + } + } else { + out.aliasToResource = nil + } + return nil +} + +func DeepCopy_meta_RESTMapping(in RESTMapping, out *RESTMapping, c *conversion.Cloner) error { + out.Resource = in.Resource + if err := unversioned.DeepCopy_unversioned_GroupVersionKind(in.GroupVersionKind, &out.GroupVersionKind, c); err != nil { + return err + } + if in.Scope == nil { + out.Scope = nil + } else if newVal, err := c.DeepCopy(in.Scope); err != nil { + return err + } else { + out.Scope = newVal.(RESTScope) + } + if in.ObjectConvertor == nil { + out.ObjectConvertor = nil + } else if newVal, err := c.DeepCopy(in.ObjectConvertor); err != nil { + return err + } else { + out.ObjectConvertor = newVal.(runtime.ObjectConvertor) + } + if in.MetadataAccessor == nil { + out.MetadataAccessor = nil + } else if newVal, err := c.DeepCopy(in.MetadataAccessor); err != nil { + return err + } else { + out.MetadataAccessor = newVal.(MetadataAccessor) + } + return nil +} + +func DeepCopy_meta_VersionInterfaces(in VersionInterfaces, out *VersionInterfaces, c *conversion.Cloner) error { + if in.ObjectConvertor == nil { + out.ObjectConvertor = nil + } else if newVal, err := c.DeepCopy(in.ObjectConvertor); err != nil { + return err + } else { + out.ObjectConvertor = newVal.(runtime.ObjectConvertor) + } + if in.MetadataAccessor == nil { + out.MetadataAccessor = nil + } else if newVal, err := c.DeepCopy(in.MetadataAccessor); err != nil { + return err + } else { + out.MetadataAccessor = newVal.(MetadataAccessor) + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/help.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/help.go index 7d1570bc202f..cdc07930fe83 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/help.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/help.go @@ -76,8 +76,9 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { switch { case item.Object != nil: list[i] = item.Object - case item.RawJSON != nil: - list[i] = &runtime.Unknown{RawJSON: item.RawJSON} + case item.Raw != nil: + // TODO: Set ContentEncoding and ContentType correctly. + list[i] = &runtime.Unknown{Raw: item.Raw} default: list[i] = nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/help_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/help_test.go new file mode 100644 index 000000000000..85ba2cbf6d5d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/help_test.go @@ -0,0 +1,253 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta_test + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/diff" + + "github.com/google/gofuzz" +) + +func TestIsList(t *testing.T) { + tests := []struct { + obj runtime.Object + isList bool + }{ + {&api.PodList{}, true}, + {&api.Pod{}, false}, + } + for _, item := range tests { + if e, a := item.isList, meta.IsListType(item.obj); e != a { + t.Errorf("%v: Expected %v, got %v", reflect.TypeOf(item.obj), e, a) + } + } +} + +func TestExtractList(t *testing.T) { + pl := &api.PodList{ + Items: []api.Pod{ + {ObjectMeta: api.ObjectMeta{Name: "1"}}, + {ObjectMeta: api.ObjectMeta{Name: "2"}}, + {ObjectMeta: api.ObjectMeta{Name: "3"}}, + }, + } + list, err := meta.ExtractList(pl) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + if e, a := len(list), len(pl.Items); e != a { + t.Fatalf("Expected %v, got %v", e, a) + } + for i := range list { + if e, a := list[i].(*api.Pod).Name, pl.Items[i].Name; e != a { + t.Fatalf("Expected %v, got %v", e, a) + } + } +} + +func TestExtractListV1(t *testing.T) { + pl := &v1.PodList{ + Items: []v1.Pod{ + {ObjectMeta: v1.ObjectMeta{Name: "1"}}, + {ObjectMeta: v1.ObjectMeta{Name: "2"}}, + {ObjectMeta: v1.ObjectMeta{Name: "3"}}, + }, + } + list, err := meta.ExtractList(pl) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + if e, a := len(list), len(pl.Items); e != a { + t.Fatalf("Expected %v, got %v", e, a) + } + for i := range list { + if e, a := list[i].(*v1.Pod).Name, pl.Items[i].Name; e != a { + t.Fatalf("Expected %v, got %v", e, a) + } + } +} + +func TestExtractListGeneric(t *testing.T) { + pl := &api.List{ + Items: []runtime.Object{ + &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}}, + &api.Service{ObjectMeta: api.ObjectMeta{Name: "2"}}, + }, + } + list, err := meta.ExtractList(pl) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + if e, a := len(list), len(pl.Items); e != a { + t.Fatalf("Expected %v, got %v", e, a) + } + if obj, ok := list[0].(*api.Pod); !ok { + t.Fatalf("Expected list[0] to be *api.Pod, it is %#v", obj) + } + if obj, ok := list[1].(*api.Service); !ok { + t.Fatalf("Expected list[1] to be *api.Service, it is %#v", obj) + } +} + +func TestExtractListGenericV1(t *testing.T) { + pl := &v1.List{ + Items: []runtime.RawExtension{ + {Raw: []byte("foo")}, + {Raw: []byte("bar")}, + {Object: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "other"}}}, + }, + } + list, err := meta.ExtractList(pl) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + if e, a := len(list), len(pl.Items); e != a { + t.Fatalf("Expected %v, got %v", e, a) + } + if obj, ok := list[0].(*runtime.Unknown); !ok { + t.Fatalf("Expected list[0] to be *runtime.Unknown, it is %#v", obj) + } + if obj, ok := list[1].(*runtime.Unknown); !ok { + t.Fatalf("Expected list[1] to be *runtime.Unknown, it is %#v", obj) + } + if obj, ok := list[2].(*v1.Pod); !ok { + t.Fatalf("Expected list[2] to be *runtime.Unknown, it is %#v", obj) + } +} + +type fakePtrInterfaceList struct { + Items *[]runtime.Object +} + +func (obj fakePtrInterfaceList) GetObjectKind() unversioned.ObjectKind { + return unversioned.EmptyObjectKind +} + +func TestExtractListOfInterfacePtrs(t *testing.T) { + pl := &fakePtrInterfaceList{ + Items: &[]runtime.Object{}, + } + list, err := meta.ExtractList(pl) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + if len(list) > 0 { + t.Fatalf("Expected empty list, got %#v", list) + } +} + +type fakePtrValueList struct { + Items []*api.Pod +} + +func (obj fakePtrValueList) GetObjectKind() unversioned.ObjectKind { + return unversioned.EmptyObjectKind +} + +func TestExtractListOfValuePtrs(t *testing.T) { + pl := &fakePtrValueList{ + Items: []*api.Pod{ + {ObjectMeta: api.ObjectMeta{Name: "1"}}, + {ObjectMeta: api.ObjectMeta{Name: "2"}}, + }, + } + list, err := meta.ExtractList(pl) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + if e, a := len(list), len(pl.Items); e != a { + t.Fatalf("Expected %v, got %v", e, a) + } + for i := range list { + if obj, ok := list[i].(*api.Pod); !ok { + t.Fatalf("Expected list[%d] to be *api.Pod, it is %#v", i, obj) + } + } +} + +func TestSetList(t *testing.T) { + pl := &api.PodList{} + list := []runtime.Object{ + &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}}, + &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, + &api.Pod{ObjectMeta: api.ObjectMeta{Name: "3"}}, + } + err := meta.SetList(pl, list) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + if e, a := len(list), len(pl.Items); e != a { + t.Fatalf("Expected %v, got %v", e, a) + } + for i := range list { + if e, a := list[i].(*api.Pod).Name, pl.Items[i].Name; e != a { + t.Fatalf("Expected %v, got %v", e, a) + } + } +} + +func TestSetListToRuntimeObjectArray(t *testing.T) { + pl := &api.List{} + list := []runtime.Object{ + &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}}, + &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, + &api.Pod{ObjectMeta: api.ObjectMeta{Name: "3"}}, + } + err := meta.SetList(pl, list) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + if e, a := len(list), len(pl.Items); e != a { + t.Fatalf("Expected %v, got %v", e, a) + } + for i := range list { + if e, a := list[i], pl.Items[i]; e != a { + t.Fatalf("%d: unmatched: %s", i, diff.ObjectDiff(e, a)) + } + } +} + +func TestSetExtractListRoundTrip(t *testing.T) { + fuzzer := fuzz.New().NilChance(0).NumElements(1, 5) + for i := 0; i < 5; i++ { + start := &api.PodList{} + fuzzer.Fuzz(&start.Items) + + list, err := meta.ExtractList(start) + if err != nil { + t.Errorf("Unexpected error %v", err) + continue + } + got := &api.PodList{} + err = meta.SetList(got, list) + if err != nil { + t.Errorf("Unexpected error %v", err) + continue + } + if e, a := start, got; !reflect.DeepEqual(e, a) { + t.Fatalf("Expected %#v, got %#v", e, a) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/interfaces.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/interfaces.go index f1402e7ad79e..90d7501801e2 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/interfaces.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/interfaces.go @@ -17,6 +17,7 @@ limitations under the License. package meta import ( + "k8s.io/kubernetes/pkg/api/meta/metatypes" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/types" @@ -49,10 +50,24 @@ type Object interface { SetResourceVersion(version string) GetSelfLink() string SetSelfLink(selfLink string) + GetCreationTimestamp() unversioned.Time + SetCreationTimestamp(timestamp unversioned.Time) + GetDeletionTimestamp() *unversioned.Time + SetDeletionTimestamp(timestamp *unversioned.Time) GetLabels() map[string]string SetLabels(labels map[string]string) GetAnnotations() map[string]string SetAnnotations(annotations map[string]string) + GetFinalizers() []string + SetFinalizers(finalizers []string) + GetOwnerReferences() []metatypes.OwnerReference + SetOwnerReferences([]metatypes.OwnerReference) +} + +var _ Object = &runtime.Unstructured{} + +type ListMetaAccessor interface { + GetListMeta() List } // List lets you work with list metadata from any of the versioned or diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/meta.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/meta.go index bf11abd18703..b9c8d69d02cb 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/meta.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/meta.go @@ -20,26 +20,65 @@ import ( "fmt" "reflect" + "k8s.io/kubernetes/pkg/api/meta/metatypes" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/types" + + "github.com/golang/glog" ) +func ListAccessor(obj interface{}) (List, error) { + if listMetaAccessor, ok := obj.(ListMetaAccessor); ok { + if om := listMetaAccessor.GetListMeta(); om != nil { + return om, nil + } + } + // we may get passed an object that is directly portable to List + if list, ok := obj.(List); ok { + return list, nil + } + glog.V(4).Infof("Calling ListAccessor on non-internal object: %v", reflect.TypeOf(obj)) + // legacy path for objects that do not implement List and ListMetaAccessor via + // reflection - very slow code path. + v, err := conversion.EnforcePtr(obj) + if err != nil { + return nil, err + } + t := v.Type() + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface()) + } + a := &genericAccessor{} + listMeta := v.FieldByName("ListMeta") + if listMeta.IsValid() { + // look for the ListMeta fields + if err := extractFromListMeta(listMeta, a); err != nil { + return nil, fmt.Errorf("unable to find list fields on %#v: %v", listMeta, err) + } + } else { + return nil, fmt.Errorf("unable to find listMeta on %#v", v) + } + return a, nil +} + // Accessor takes an arbitrary object pointer and returns meta.Interface. // obj must be a pointer to an API type. An error is returned if the minimum // required fields are missing. Fields that are not required return the default // value and are a no-op if set. func Accessor(obj interface{}) (Object, error) { - if oi, ok := obj.(ObjectMetaAccessor); ok { - if om := oi.GetObjectMeta(); om != nil { + if objectMetaAccessor, ok := obj.(ObjectMetaAccessor); ok { + if om := objectMetaAccessor.GetObjectMeta(); om != nil { return om, nil } } // we may get passed an object that is directly portable to Object - if oi, ok := obj.(Object); ok { - return oi, nil + if object, ok := obj.(Object); ok { + return object, nil } + + glog.V(4).Infof("Calling Accessor on non-internal object: %v", reflect.TypeOf(obj)) // legacy path for objects that do not implement Object and ObjectMetaAccessor via // reflection - very slow code path. v, err := conversion.EnforcePtr(obj) @@ -119,33 +158,21 @@ type objectAccessor struct { } func (obj objectAccessor) GetKind() string { - if gvk := obj.GetObjectKind().GroupVersionKind(); gvk != nil { - return gvk.Kind - } - return "" + return obj.GetObjectKind().GroupVersionKind().Kind } func (obj objectAccessor) SetKind(kind string) { gvk := obj.GetObjectKind().GroupVersionKind() - if gvk == nil { - gvk = &unversioned.GroupVersionKind{} - } gvk.Kind = kind obj.GetObjectKind().SetGroupVersionKind(gvk) } func (obj objectAccessor) GetAPIVersion() string { - if gvk := obj.GetObjectKind().GroupVersionKind(); gvk != nil { - return gvk.GroupVersion().String() - } - return "" + return obj.GetObjectKind().GroupVersionKind().GroupVersion().String() } func (obj objectAccessor) SetAPIVersion(version string) { gvk := obj.GetObjectKind().GroupVersionKind() - if gvk == nil { - gvk = &unversioned.GroupVersionKind{} - } gv, err := unversioned.ParseGroupVersion(version) if err != nil { gv = unversioned.GroupVersion{Version: version} @@ -318,19 +345,57 @@ func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) e return nil } +// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object. +func extractFromOwnerReference(v reflect.Value, o *metatypes.OwnerReference) error { + if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil { + return err + } + if err := runtime.Field(v, "Kind", &o.Kind); err != nil { + return err + } + if err := runtime.Field(v, "Name", &o.Name); err != nil { + return err + } + if err := runtime.Field(v, "UID", &o.UID); err != nil { + return err + } + return nil +} + +// setOwnerReference sets v to o. v is the OwnerReferences field of an object. +func setOwnerReference(v reflect.Value, o *metatypes.OwnerReference) error { + if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil { + return err + } + if err := runtime.SetField(o.Kind, v, "Kind"); err != nil { + return err + } + if err := runtime.SetField(o.Name, v, "Name"); err != nil { + return err + } + if err := runtime.SetField(o.UID, v, "UID"); err != nil { + return err + } + return nil +} + // genericAccessor contains pointers to strings that can modify an arbitrary // struct and implements the Accessor interface. type genericAccessor struct { - namespace *string - name *string - generateName *string - uid *types.UID - apiVersion *string - kind *string - resourceVersion *string - selfLink *string - labels *map[string]string - annotations *map[string]string + namespace *string + name *string + generateName *string + uid *types.UID + apiVersion *string + kind *string + resourceVersion *string + selfLink *string + creationTimestamp *unversioned.Time + deletionTimestamp **unversioned.Time + labels *map[string]string + annotations *map[string]string + ownerReferences reflect.Value + finalizers *[]string } func (a genericAccessor) GetNamespace() string { @@ -421,6 +486,22 @@ func (a genericAccessor) SetSelfLink(selfLink string) { *a.selfLink = selfLink } +func (a genericAccessor) GetCreationTimestamp() unversioned.Time { + return *a.creationTimestamp +} + +func (a genericAccessor) SetCreationTimestamp(timestamp unversioned.Time) { + *a.creationTimestamp = timestamp +} + +func (a genericAccessor) GetDeletionTimestamp() *unversioned.Time { + return *a.deletionTimestamp +} + +func (a genericAccessor) SetDeletionTimestamp(timestamp *unversioned.Time) { + *a.deletionTimestamp = timestamp +} + func (a genericAccessor) GetLabels() map[string]string { if a.labels == nil { return nil @@ -447,6 +528,52 @@ func (a genericAccessor) SetAnnotations(annotations map[string]string) { *a.annotations = annotations } +func (a genericAccessor) GetFinalizers() []string { + if a.finalizers == nil { + return nil + } + return *a.finalizers +} + +func (a genericAccessor) SetFinalizers(finalizers []string) { + *a.finalizers = finalizers +} + +func (a genericAccessor) GetOwnerReferences() []metatypes.OwnerReference { + var ret []metatypes.OwnerReference + s := a.ownerReferences + if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + glog.Errorf("expect %v to be a pointer to slice", s) + return ret + } + s = s.Elem() + // Set the capacity to one element greater to avoid copy if the caller later append an element. + ret = make([]metatypes.OwnerReference, s.Len(), s.Len()+1) + for i := 0; i < s.Len(); i++ { + if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { + glog.Errorf("extractFromOwnerReference failed: %v", err) + return ret + } + } + return ret +} + +func (a genericAccessor) SetOwnerReferences(references []metatypes.OwnerReference) { + s := a.ownerReferences + if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + glog.Errorf("expect %v to be a pointer to slice", s) + } + s = s.Elem() + newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) + for i := 0; i < len(references); i++ { + if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { + glog.Errorf("setOwnerReference failed: %v", err) + return + } + } + s.Set(newReferences) +} + // extractFromTypeMeta extracts pointers to version and kind fields from an object func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error { if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil { @@ -484,6 +611,17 @@ func extractFromObjectMeta(v reflect.Value, a *genericAccessor) error { if err := runtime.FieldPtr(v, "Annotations", &a.annotations); err != nil { return err } + if err := runtime.FieldPtr(v, "Finalizers", &a.finalizers); err != nil { + return err + } + ownerReferences := v.FieldByName("OwnerReferences") + if !ownerReferences.IsValid() { + return fmt.Errorf("struct %#v lacks OwnerReferences type", v) + } + if ownerReferences.Kind() != reflect.Slice { + return fmt.Errorf("expect %v to be a slice", ownerReferences.Kind()) + } + a.ownerReferences = ownerReferences.Addr() return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/meta_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/meta_test.go new file mode 100644 index 000000000000..73eeffb98e25 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/meta_test.go @@ -0,0 +1,880 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta_test + +import ( + "reflect" + "testing" + + "github.com/google/gofuzz" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/meta/metatypes" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" +) + +func TestAPIObjectMeta(t *testing.T) { + j := &api.Pod{ + TypeMeta: unversioned.TypeMeta{APIVersion: "/a", Kind: "b"}, + ObjectMeta: api.ObjectMeta{ + Namespace: "bar", + Name: "foo", + GenerateName: "prefix", + UID: "uid", + ResourceVersion: "1", + SelfLink: "some/place/only/we/know", + Labels: map[string]string{"foo": "bar"}, + Annotations: map[string]string{"x": "y"}, + Finalizers: []string{ + "finalizer.1", + "finalizer.2", + }, + }, + } + var _ meta.Object = &j.ObjectMeta + var _ meta.ObjectMetaAccessor = j + accessor, err := meta.Accessor(j) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if accessor != meta.Object(&j.ObjectMeta) { + t.Fatalf("should have returned the same pointer: %#v %#v", accessor, j) + } + if e, a := "bar", accessor.GetNamespace(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "foo", accessor.GetName(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "prefix", accessor.GetGenerateName(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "uid", string(accessor.GetUID()); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "1", accessor.GetResourceVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "some/place/only/we/know", accessor.GetSelfLink(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := []string{"finalizer.1", "finalizer.2"}, accessor.GetFinalizers(); !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } + + typeAccessor, err := meta.TypeAccessor(j) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if e, a := "a", typeAccessor.GetAPIVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "b", typeAccessor.GetKind(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + + accessor.SetNamespace("baz") + accessor.SetName("bar") + accessor.SetGenerateName("generate") + accessor.SetUID("other") + typeAccessor.SetAPIVersion("c") + typeAccessor.SetKind("d") + accessor.SetResourceVersion("2") + accessor.SetSelfLink("google.com") + accessor.SetFinalizers([]string{"finalizer.3"}) + + // Prove that accessor changes the original object. + if e, a := "baz", j.Namespace; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "bar", j.Name; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "generate", j.GenerateName; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := types.UID("other"), j.UID; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "c", j.APIVersion; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "d", j.Kind; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "2", j.ResourceVersion; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "google.com", j.SelfLink; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := []string{"finalizer.3"}, j.Finalizers; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } + + typeAccessor.SetAPIVersion("d") + typeAccessor.SetKind("e") + if e, a := "d", j.APIVersion; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "e", j.Kind; e != a { + t.Errorf("expected %v, got %v", e, a) + } +} + +func TestGenericTypeMeta(t *testing.T) { + type TypeMeta struct { + Kind string `json:"kind,omitempty"` + Namespace string `json:"namespace,omitempty"` + Name string `json:"name,omitempty"` + GenerateName string `json:"generateName,omitempty"` + UID string `json:"uid,omitempty"` + CreationTimestamp unversioned.Time `json:"creationTimestamp,omitempty"` + SelfLink string `json:"selfLink,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + OwnerReferences []api.OwnerReference `json:"ownerReferences,omitempty"` + Finalizers []string `json:"finalizers,omitempty"` + } + type Object struct { + TypeMeta `json:",inline"` + } + j := Object{ + TypeMeta{ + Namespace: "bar", + Name: "foo", + GenerateName: "prefix", + UID: "uid", + APIVersion: "a", + Kind: "b", + ResourceVersion: "1", + SelfLink: "some/place/only/we/know", + Labels: map[string]string{"foo": "bar"}, + Annotations: map[string]string{"x": "y"}, + Finalizers: []string{"finalizer.1", "finalizer.2"}, + }, + } + accessor, err := meta.Accessor(&j) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if e, a := "bar", accessor.GetNamespace(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "foo", accessor.GetName(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "prefix", accessor.GetGenerateName(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "uid", string(accessor.GetUID()); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "1", accessor.GetResourceVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "some/place/only/we/know", accessor.GetSelfLink(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := []string{"finalizer.1", "finalizer.2"}, accessor.GetFinalizers(); !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } + + typeAccessor, err := meta.TypeAccessor(&j) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if e, a := "a", typeAccessor.GetAPIVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "b", typeAccessor.GetKind(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + + accessor.SetNamespace("baz") + accessor.SetName("bar") + accessor.SetGenerateName("generate") + accessor.SetUID("other") + typeAccessor.SetAPIVersion("c") + typeAccessor.SetKind("d") + accessor.SetResourceVersion("2") + accessor.SetSelfLink("google.com") + accessor.SetFinalizers([]string{"finalizer.3"}) + + // Prove that accessor changes the original object. + if e, a := "baz", j.Namespace; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "bar", j.Name; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "generate", j.GenerateName; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "other", j.UID; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "c", j.APIVersion; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "d", j.Kind; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "2", j.ResourceVersion; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "google.com", j.SelfLink; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := []string{"finalizer.3"}, j.Finalizers; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } + + typeAccessor.SetAPIVersion("d") + typeAccessor.SetKind("e") + if e, a := "d", j.APIVersion; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "e", j.Kind; e != a { + t.Errorf("expected %v, got %v", e, a) + } +} + +type InternalTypeMeta struct { + Kind string `json:"kind,omitempty"` + Namespace string `json:"namespace,omitempty"` + Name string `json:"name,omitempty"` + GenerateName string `json:"generateName,omitempty"` + UID string `json:"uid,omitempty"` + CreationTimestamp unversioned.Time `json:"creationTimestamp,omitempty"` + SelfLink string `json:"selfLink,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + Finalizers []string `json:"finalizers,omitempty"` + OwnerReferences []api.OwnerReference `json:"ownerReferences,omitempty"` +} + +type InternalObject struct { + TypeMeta InternalTypeMeta `json:",inline"` +} + +func (obj *InternalObject) GetObjectKind() unversioned.ObjectKind { return obj } +func (obj *InternalObject) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { + obj.TypeMeta.APIVersion, obj.TypeMeta.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *InternalObject) GroupVersionKind() unversioned.GroupVersionKind { + return unversioned.FromAPIVersionAndKind(obj.TypeMeta.APIVersion, obj.TypeMeta.Kind) +} + +func TestGenericTypeMetaAccessor(t *testing.T) { + j := &InternalObject{ + InternalTypeMeta{ + Namespace: "bar", + Name: "foo", + GenerateName: "prefix", + UID: "uid", + APIVersion: "/a", + Kind: "b", + ResourceVersion: "1", + SelfLink: "some/place/only/we/know", + Labels: map[string]string{"foo": "bar"}, + Annotations: map[string]string{"x": "y"}, + // OwnerReferences are tested separately + }, + } + accessor := meta.NewAccessor() + namespace, err := accessor.Namespace(j) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if e, a := "bar", namespace; e != a { + t.Errorf("expected %v, got %v", e, a) + } + name, err := accessor.Name(j) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if e, a := "foo", name; e != a { + t.Errorf("expected %v, got %v", e, a) + } + generateName, err := accessor.GenerateName(j) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if e, a := "prefix", generateName; e != a { + t.Errorf("expected %v, got %v", e, a) + } + uid, err := accessor.UID(j) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if e, a := "uid", string(uid); e != a { + t.Errorf("expected %v, got %v", e, a) + } + apiVersion, err := accessor.APIVersion(j) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if e, a := "a", apiVersion; e != a { + t.Errorf("expected %v, got %v", e, a) + } + kind, err := accessor.Kind(j) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if e, a := "b", kind; e != a { + t.Errorf("expected %v, got %v", e, a) + } + rv, err := accessor.ResourceVersion(j) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if e, a := "1", rv; e != a { + t.Errorf("expected %v, got %v", e, a) + } + selfLink, err := accessor.SelfLink(j) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if e, a := "some/place/only/we/know", selfLink; e != a { + t.Errorf("expected %v, got %v", e, a) + } + labels, err := accessor.Labels(j) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if e, a := 1, len(labels); e != a { + t.Errorf("expected %v, got %v", e, a) + } + annotations, err := accessor.Annotations(j) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if e, a := 1, len(annotations); e != a { + t.Errorf("expected %v, got %v", e, a) + } + + if err := accessor.SetNamespace(j, "baz"); err != nil { + t.Errorf("unexpected error: %v", err) + } + if err := accessor.SetName(j, "bar"); err != nil { + t.Errorf("unexpected error: %v", err) + } + if err := accessor.SetGenerateName(j, "generate"); err != nil { + t.Errorf("unexpected error: %v", err) + } + if err := accessor.SetUID(j, "other"); err != nil { + t.Errorf("unexpected error: %v", err) + } + if err := accessor.SetAPIVersion(j, "c"); err != nil { + t.Errorf("unexpected error: %v", err) + } + if err := accessor.SetKind(j, "d"); err != nil { + t.Errorf("unexpected error: %v", err) + } + if err := accessor.SetResourceVersion(j, "2"); err != nil { + t.Errorf("unexpected error: %v", err) + } + if err := accessor.SetSelfLink(j, "google.com"); err != nil { + t.Errorf("unexpected error: %v", err) + } + if err := accessor.SetLabels(j, map[string]string{}); err != nil { + t.Errorf("unexpected error: %v", err) + } + var nilMap map[string]string + if err := accessor.SetAnnotations(j, nilMap); err != nil { + t.Errorf("unexpected error: %v", err) + } + + // Prove that accessor changes the original object. + if e, a := "baz", j.TypeMeta.Namespace; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "bar", j.TypeMeta.Name; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "generate", j.TypeMeta.GenerateName; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "other", j.TypeMeta.UID; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "c", j.TypeMeta.APIVersion; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "d", j.TypeMeta.Kind; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "2", j.TypeMeta.ResourceVersion; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "google.com", j.TypeMeta.SelfLink; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := map[string]string{}, j.TypeMeta.Labels; !reflect.DeepEqual(e, a) { + t.Errorf("expected %#v, got %#v", e, a) + } + if e, a := nilMap, j.TypeMeta.Annotations; !reflect.DeepEqual(e, a) { + t.Errorf("expected %#v, got %#v", e, a) + } +} + +func TestGenericObjectMeta(t *testing.T) { + type TypeMeta struct { + Kind string `json:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` + } + type ObjectMeta struct { + Namespace string `json:"namespace,omitempty"` + Name string `json:"name,omitempty"` + GenerateName string `json:"generateName,omitempty"` + UID string `json:"uid,omitempty"` + CreationTimestamp unversioned.Time `json:"creationTimestamp,omitempty"` + SelfLink string `json:"selfLink,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + Finalizers []string `json:"finalizers,omitempty"` + OwnerReferences []api.OwnerReference `json:"ownerReferences,omitempty"` + } + type Object struct { + TypeMeta `json:",inline"` + ObjectMeta `json:"metadata"` + } + j := Object{ + TypeMeta{ + APIVersion: "a", + Kind: "b", + }, + ObjectMeta{ + Namespace: "bar", + Name: "foo", + GenerateName: "prefix", + UID: "uid", + ResourceVersion: "1", + SelfLink: "some/place/only/we/know", + Labels: map[string]string{"foo": "bar"}, + Annotations: map[string]string{"a": "b"}, + Finalizers: []string{ + "finalizer.1", + "finalizer.2", + }, + }, + } + accessor, err := meta.Accessor(&j) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if e, a := "bar", accessor.GetNamespace(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "foo", accessor.GetName(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "prefix", accessor.GetGenerateName(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "uid", string(accessor.GetUID()); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "1", accessor.GetResourceVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "some/place/only/we/know", accessor.GetSelfLink(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := 1, len(accessor.GetLabels()); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := 1, len(accessor.GetAnnotations()); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := []string{"finalizer.1", "finalizer.2"}, accessor.GetFinalizers(); !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } + + typeAccessor, err := meta.TypeAccessor(&j) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if e, a := "a", typeAccessor.GetAPIVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "b", typeAccessor.GetKind(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + + accessor.SetNamespace("baz") + accessor.SetName("bar") + accessor.SetGenerateName("generate") + accessor.SetUID("other") + typeAccessor.SetAPIVersion("c") + typeAccessor.SetKind("d") + accessor.SetResourceVersion("2") + accessor.SetSelfLink("google.com") + accessor.SetLabels(map[string]string{"other": "label"}) + accessor.SetAnnotations(map[string]string{"c": "d"}) + accessor.SetFinalizers([]string{"finalizer.3"}) + + // Prove that accessor changes the original object. + if e, a := "baz", j.Namespace; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "bar", j.Name; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "generate", j.GenerateName; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "other", j.UID; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "c", j.APIVersion; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "d", j.Kind; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "2", j.ResourceVersion; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "google.com", j.SelfLink; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := map[string]string{"other": "label"}, j.Labels; !reflect.DeepEqual(e, a) { + t.Errorf("expected %#v, got %#v", e, a) + } + if e, a := map[string]string{"c": "d"}, j.Annotations; !reflect.DeepEqual(e, a) { + t.Errorf("expected %#v, got %#v", e, a) + } + if e, a := []string{"finalizer.3"}, j.Finalizers; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } +} + +func TestGenericListMeta(t *testing.T) { + type TypeMeta struct { + Kind string `json:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` + } + type ListMeta struct { + SelfLink string `json:"selfLink,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty"` + } + type Object struct { + TypeMeta `json:",inline"` + ListMeta `json:"metadata"` + } + j := Object{ + TypeMeta{ + APIVersion: "a", + Kind: "b", + }, + ListMeta{ + ResourceVersion: "1", + SelfLink: "some/place/only/we/know", + }, + } + accessor, err := meta.Accessor(&j) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if e, a := "", accessor.GetName(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "", string(accessor.GetUID()); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "1", accessor.GetResourceVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "some/place/only/we/know", accessor.GetSelfLink(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + + typeAccessor, err := meta.TypeAccessor(&j) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if e, a := "a", typeAccessor.GetAPIVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "b", typeAccessor.GetKind(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + + accessor.SetName("bar") + accessor.SetUID("other") + typeAccessor.SetAPIVersion("c") + typeAccessor.SetKind("d") + accessor.SetResourceVersion("2") + accessor.SetSelfLink("google.com") + + // Prove that accessor changes the original object. + if e, a := "c", j.APIVersion; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "d", j.Kind; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "2", j.ResourceVersion; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "google.com", j.SelfLink; e != a { + t.Errorf("expected %v, got %v", e, a) + } +} + +type MyAPIObject struct { + TypeMeta InternalTypeMeta `json:",inline"` +} + +func (obj *MyAPIObject) GetObjectKind() unversioned.ObjectKind { return obj } +func (obj *MyAPIObject) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { + obj.TypeMeta.APIVersion, obj.TypeMeta.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *MyAPIObject) GroupVersionKind() unversioned.GroupVersionKind { + return unversioned.FromAPIVersionAndKind(obj.TypeMeta.APIVersion, obj.TypeMeta.Kind) +} + +type MyIncorrectlyMarkedAsAPIObject struct{} + +func (obj *MyIncorrectlyMarkedAsAPIObject) GetObjectKind() unversioned.ObjectKind { + return unversioned.EmptyObjectKind +} + +func TestResourceVersionerOfAPI(t *testing.T) { + type T struct { + runtime.Object + Expected string + } + testCases := map[string]T{ + "empty api object": {&MyAPIObject{}, ""}, + "api object with version": {&MyAPIObject{TypeMeta: InternalTypeMeta{ResourceVersion: "1"}}, "1"}, + "pointer to api object with version": {&MyAPIObject{TypeMeta: InternalTypeMeta{ResourceVersion: "1"}}, "1"}, + } + versioning := meta.NewAccessor() + for key, testCase := range testCases { + actual, err := versioning.ResourceVersion(testCase.Object) + if err != nil { + t.Errorf("%s: unexpected error %#v", key, err) + } + if actual != testCase.Expected { + t.Errorf("%s: expected %v, got %v", key, testCase.Expected, actual) + } + } + + failingCases := map[string]struct { + runtime.Object + Expected string + }{ + "not a valid object to try": {&MyIncorrectlyMarkedAsAPIObject{}, "1"}, + } + for key, testCase := range failingCases { + _, err := versioning.ResourceVersion(testCase.Object) + if err == nil { + t.Errorf("%s: expected error, got nil", key) + } + } + + setCases := map[string]struct { + runtime.Object + Expected string + }{ + "pointer to api object with version": {&MyAPIObject{TypeMeta: InternalTypeMeta{ResourceVersion: "1"}}, "1"}, + } + for key, testCase := range setCases { + if err := versioning.SetResourceVersion(testCase.Object, "5"); err != nil { + t.Errorf("%s: unexpected error %#v", key, err) + } + actual, err := versioning.ResourceVersion(testCase.Object) + if err != nil { + t.Errorf("%s: unexpected error %#v", key, err) + } + if actual != "5" { + t.Errorf("%s: expected %v, got %v", key, "5", actual) + } + } +} + +func TestTypeMetaSelfLinker(t *testing.T) { + table := map[string]struct { + obj runtime.Object + expect string + try string + succeed bool + }{ + "normal": { + obj: &MyAPIObject{TypeMeta: InternalTypeMeta{SelfLink: "foobar"}}, + expect: "foobar", + try: "newbar", + succeed: true, + }, + "fail": { + obj: &MyIncorrectlyMarkedAsAPIObject{}, + succeed: false, + }, + } + + linker := runtime.SelfLinker(meta.NewAccessor()) + for name, item := range table { + got, err := linker.SelfLink(item.obj) + if e, a := item.succeed, err == nil; e != a { + t.Errorf("%v: expected %v, got %v", name, e, a) + } + if e, a := item.expect, got; item.succeed && e != a { + t.Errorf("%v: expected %v, got %v", name, e, a) + } + + err = linker.SetSelfLink(item.obj, item.try) + if e, a := item.succeed, err == nil; e != a { + t.Errorf("%v: expected %v, got %v", name, e, a) + } + if item.succeed { + got, err := linker.SelfLink(item.obj) + if err != nil { + t.Errorf("%v: expected no err, got %v", name, err) + } + if e, a := item.try, got; e != a { + t.Errorf("%v: expected %v, got %v", name, e, a) + } + } + } +} + +type MyAPIObject2 struct { + unversioned.TypeMeta + v1.ObjectMeta +} + +func getObjectMetaAndOwnerRefereneces() (myAPIObject2 MyAPIObject2, metaOwnerReferences []metatypes.OwnerReference) { + fuzz.New().NilChance(.5).NumElements(1, 5).Fuzz(&myAPIObject2) + references := myAPIObject2.ObjectMeta.OwnerReferences + // This is necessary for the test to pass because the getter will return a + // non-nil slice. + metaOwnerReferences = make([]metatypes.OwnerReference, 0) + for i := 0; i < len(references); i++ { + metaOwnerReferences = append(metaOwnerReferences, metatypes.OwnerReference{ + Kind: references[i].Kind, + Name: references[i].Name, + UID: references[i].UID, + APIVersion: references[i].APIVersion, + }) + } + if len(references) == 0 { + // This is necessary for the test to pass because the setter will make a + // non-nil slice. + myAPIObject2.ObjectMeta.OwnerReferences = make([]v1.OwnerReference, 0) + } + return myAPIObject2, metaOwnerReferences +} + +func testGetOwnerReferences(t *testing.T) { + obj, expected := getObjectMetaAndOwnerRefereneces() + accessor, err := meta.Accessor(&obj) + if err != nil { + t.Error(err) + } + references := accessor.GetOwnerReferences() + if !reflect.DeepEqual(references, expected) { + t.Errorf("expect %#v\n got %#v", expected, references) + } +} + +func testSetOwnerReferences(t *testing.T) { + expected, references := getObjectMetaAndOwnerRefereneces() + obj := MyAPIObject2{} + accessor, err := meta.Accessor(&obj) + if err != nil { + t.Error(err) + } + accessor.SetOwnerReferences(references) + if e, a := expected.ObjectMeta.OwnerReferences, obj.ObjectMeta.OwnerReferences; !reflect.DeepEqual(e, a) { + t.Errorf("expect %#v\n got %#v", e, a) + } +} + +func TestAccessOwnerReferences(t *testing.T) { + fuzzIter := 5 + for i := 0; i < fuzzIter; i++ { + testGetOwnerReferences(t) + testSetOwnerReferences(t) + } +} + +// BenchmarkAccessorSetFastPath shows the interface fast path +func BenchmarkAccessorSetFastPath(b *testing.B) { + obj := &api.Pod{ + TypeMeta: unversioned.TypeMeta{APIVersion: "/a", Kind: "b"}, + ObjectMeta: api.ObjectMeta{ + Namespace: "bar", + Name: "foo", + GenerateName: "prefix", + UID: "uid", + ResourceVersion: "1", + SelfLink: "some/place/only/we/know", + Labels: map[string]string{"foo": "bar"}, + Annotations: map[string]string{"x": "y"}, + }, + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + acc, err := meta.Accessor(obj) + if err != nil { + b.Fatal(err) + } + acc.SetNamespace("something") + } + b.StopTimer() +} + +// BenchmarkAccessorSetReflection provides a baseline for accessor performance +func BenchmarkAccessorSetReflection(b *testing.B) { + obj := &InternalObject{ + InternalTypeMeta{ + Namespace: "bar", + Name: "foo", + GenerateName: "prefix", + UID: "uid", + APIVersion: "a", + Kind: "b", + ResourceVersion: "1", + SelfLink: "some/place/only/we/know", + Labels: map[string]string{"foo": "bar"}, + Annotations: map[string]string{"x": "y"}, + }, + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + acc, err := meta.Accessor(obj) + if err != nil { + b.Fatal(err) + } + acc.SetNamespace("something") + } + b.StopTimer() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/metatypes/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/metatypes/deep_copy_generated.go new file mode 100644 index 000000000000..f06a194e54a5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/metatypes/deep_copy_generated.go @@ -0,0 +1,33 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package metatypes + +import ( + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func DeepCopy_metatypes_OwnerReference(in OwnerReference, out *OwnerReference, c *conversion.Cloner) error { + out.APIVersion = in.APIVersion + out.Kind = in.Kind + out.UID = in.UID + out.Name = in.Name + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/metatypes/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/metatypes/types.go new file mode 100644 index 000000000000..ca4edf72e28d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/metatypes/types.go @@ -0,0 +1,29 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The types defined in this package are used by the meta package to represent +// the in-memory version of objects. We cannot reuse the __internal version of +// API objects because it causes import cycle. +package metatypes + +import "k8s.io/kubernetes/pkg/types" + +type OwnerReference struct { + APIVersion string + Kind string + UID types.UID + Name string +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go index 3071d45072c6..b720f8fa2ef3 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go @@ -22,6 +22,7 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/sets" ) // MultiRESTMapper is a wrapper for multiple RESTMappers. @@ -169,24 +170,29 @@ func (m MultiRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...strin if len(allMappings) == 1 { return allMappings[0], nil } + if len(allMappings) > 1 { + return nil, fmt.Errorf("multiple matches found for %v in %v", gk, versions) + } if len(errors) > 0 { return nil, utilerrors.NewAggregate(errors) } - if len(allMappings) == 0 { - return nil, fmt.Errorf("no match found for %v in %v", gk, versions) - } - - return nil, fmt.Errorf("multiple matches found for %v in %v", gk, versions) + return nil, fmt.Errorf("no match found for %v in %v", gk, versions) } // AliasesForResource finds the first alias response for the provided mappers. func (m MultiRESTMapper) AliasesForResource(alias string) ([]string, bool) { + seenAliases := sets.NewString() allAliases := []string{} handled := false for _, t := range m { if currAliases, currOk := t.AliasesForResource(alias); currOk { - allAliases = append(allAliases, currAliases...) + for _, currAlias := range currAliases { + if !seenAliases.Has(currAlias) { + allAliases = append(allAliases, currAlias) + seenAliases.Insert(currAlias) + } + } handled = true } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/multirestmapper_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/multirestmapper_test.go new file mode 100644 index 000000000000..1b3685e850e6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/multirestmapper_test.go @@ -0,0 +1,298 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "errors" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" +) + +func TestMultiRESTMapperResourceFor(t *testing.T) { + tcs := []struct { + name string + + mapper MultiRESTMapper + input unversioned.GroupVersionResource + result unversioned.GroupVersionResource + err error + }{ + { + name: "empty", + mapper: MultiRESTMapper{}, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: unversioned.GroupVersionResource{}, + err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "foo"}}, + }, + { + name: "ignore not found", + mapper: MultiRESTMapper{fixedRESTMapper{err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "IGNORE_THIS"}}}}, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: unversioned.GroupVersionResource{}, + err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "foo"}}, + }, + { + name: "accept first failure", + mapper: MultiRESTMapper{fixedRESTMapper{err: errors.New("fail on this")}, fixedRESTMapper{resourcesFor: []unversioned.GroupVersionResource{{Resource: "unused"}}}}, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: unversioned.GroupVersionResource{}, + err: errors.New("fail on this"), + }, + } + + for _, tc := range tcs { + actualResult, actualErr := tc.mapper.ResourceFor(tc.input) + if e, a := tc.result, actualResult; e != a { + t.Errorf("%s: expected %v, got %v", tc.name, e, a) + } + switch { + case tc.err == nil && actualErr == nil: + case tc.err == nil: + t.Errorf("%s: unexpected error: %v", tc.name, actualErr) + case actualErr == nil: + t.Errorf("%s: expected error: %v got nil", tc.name, tc.err) + case tc.err.Error() != actualErr.Error(): + t.Errorf("%s: expected %v, got %v", tc.name, tc.err, actualErr) + } + } +} + +func TestMultiRESTMapperResourcesFor(t *testing.T) { + tcs := []struct { + name string + + mapper MultiRESTMapper + input unversioned.GroupVersionResource + result []unversioned.GroupVersionResource + err error + }{ + { + name: "empty", + mapper: MultiRESTMapper{}, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: nil, + err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "foo"}}, + }, + { + name: "ignore not found", + mapper: MultiRESTMapper{fixedRESTMapper{err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "IGNORE_THIS"}}}}, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: nil, + err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "foo"}}, + }, + { + name: "accept first failure", + mapper: MultiRESTMapper{fixedRESTMapper{err: errors.New("fail on this")}, fixedRESTMapper{resourcesFor: []unversioned.GroupVersionResource{{Resource: "unused"}}}}, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: nil, + err: errors.New("fail on this"), + }, + { + name: "union and dedup", + mapper: MultiRESTMapper{ + fixedRESTMapper{resourcesFor: []unversioned.GroupVersionResource{{Resource: "dupe"}, {Resource: "first"}}}, + fixedRESTMapper{resourcesFor: []unversioned.GroupVersionResource{{Resource: "dupe"}, {Resource: "second"}}}, + }, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: []unversioned.GroupVersionResource{{Resource: "dupe"}, {Resource: "first"}, {Resource: "second"}}, + }, + { + name: "skip not and continue", + mapper: MultiRESTMapper{ + fixedRESTMapper{err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "IGNORE_THIS"}}}, + fixedRESTMapper{resourcesFor: []unversioned.GroupVersionResource{{Resource: "first"}, {Resource: "second"}}}, + }, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: []unversioned.GroupVersionResource{{Resource: "first"}, {Resource: "second"}}, + }, + } + + for _, tc := range tcs { + actualResult, actualErr := tc.mapper.ResourcesFor(tc.input) + if e, a := tc.result, actualResult; !reflect.DeepEqual(e, a) { + t.Errorf("%s: expected %v, got %v", tc.name, e, a) + } + switch { + case tc.err == nil && actualErr == nil: + case tc.err == nil: + t.Errorf("%s: unexpected error: %v", tc.name, actualErr) + case actualErr == nil: + t.Errorf("%s: expected error: %v got nil", tc.name, tc.err) + case tc.err.Error() != actualErr.Error(): + t.Errorf("%s: expected %v, got %v", tc.name, tc.err, actualErr) + } + } +} + +func TestMultiRESTMapperKindsFor(t *testing.T) { + tcs := []struct { + name string + + mapper MultiRESTMapper + input unversioned.GroupVersionResource + result []unversioned.GroupVersionKind + err error + }{ + { + name: "empty", + mapper: MultiRESTMapper{}, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: nil, + err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "foo"}}, + }, + { + name: "ignore not found", + mapper: MultiRESTMapper{fixedRESTMapper{err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "IGNORE_THIS"}}}}, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: nil, + err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "foo"}}, + }, + { + name: "accept first failure", + mapper: MultiRESTMapper{fixedRESTMapper{err: errors.New("fail on this")}, fixedRESTMapper{kindsFor: []unversioned.GroupVersionKind{{Kind: "unused"}}}}, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: nil, + err: errors.New("fail on this"), + }, + { + name: "union and dedup", + mapper: MultiRESTMapper{ + fixedRESTMapper{kindsFor: []unversioned.GroupVersionKind{{Kind: "dupe"}, {Kind: "first"}}}, + fixedRESTMapper{kindsFor: []unversioned.GroupVersionKind{{Kind: "dupe"}, {Kind: "second"}}}, + }, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: []unversioned.GroupVersionKind{{Kind: "dupe"}, {Kind: "first"}, {Kind: "second"}}, + }, + { + name: "skip not and continue", + mapper: MultiRESTMapper{ + fixedRESTMapper{err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "IGNORE_THIS"}}}, + fixedRESTMapper{kindsFor: []unversioned.GroupVersionKind{{Kind: "first"}, {Kind: "second"}}}, + }, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: []unversioned.GroupVersionKind{{Kind: "first"}, {Kind: "second"}}, + }, + } + + for _, tc := range tcs { + actualResult, actualErr := tc.mapper.KindsFor(tc.input) + if e, a := tc.result, actualResult; !reflect.DeepEqual(e, a) { + t.Errorf("%s: expected %v, got %v", tc.name, e, a) + } + switch { + case tc.err == nil && actualErr == nil: + case tc.err == nil: + t.Errorf("%s: unexpected error: %v", tc.name, actualErr) + case actualErr == nil: + t.Errorf("%s: expected error: %v got nil", tc.name, tc.err) + case tc.err.Error() != actualErr.Error(): + t.Errorf("%s: expected %v, got %v", tc.name, tc.err, actualErr) + } + } +} + +func TestMultiRESTMapperKindFor(t *testing.T) { + tcs := []struct { + name string + + mapper MultiRESTMapper + input unversioned.GroupVersionResource + result unversioned.GroupVersionKind + err error + }{ + { + name: "empty", + mapper: MultiRESTMapper{}, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: unversioned.GroupVersionKind{}, + err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "foo"}}, + }, + { + name: "ignore not found", + mapper: MultiRESTMapper{fixedRESTMapper{err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "IGNORE_THIS"}}}}, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: unversioned.GroupVersionKind{}, + err: &NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "foo"}}, + }, + { + name: "accept first failure", + mapper: MultiRESTMapper{fixedRESTMapper{err: errors.New("fail on this")}, fixedRESTMapper{kindsFor: []unversioned.GroupVersionKind{{Kind: "unused"}}}}, + input: unversioned.GroupVersionResource{Resource: "foo"}, + result: unversioned.GroupVersionKind{}, + err: errors.New("fail on this"), + }, + } + + for _, tc := range tcs { + actualResult, actualErr := tc.mapper.KindFor(tc.input) + if e, a := tc.result, actualResult; e != a { + t.Errorf("%s: expected %v, got %v", tc.name, e, a) + } + switch { + case tc.err == nil && actualErr == nil: + case tc.err == nil: + t.Errorf("%s: unexpected error: %v", tc.name, actualErr) + case actualErr == nil: + t.Errorf("%s: expected error: %v got nil", tc.name, tc.err) + case tc.err.Error() != actualErr.Error(): + t.Errorf("%s: expected %v, got %v", tc.name, tc.err, actualErr) + } + } +} + +type fixedRESTMapper struct { + resourcesFor []unversioned.GroupVersionResource + kindsFor []unversioned.GroupVersionKind + resourceFor unversioned.GroupVersionResource + kindFor unversioned.GroupVersionKind + + err error +} + +func (m fixedRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + return "", m.err +} + +func (m fixedRESTMapper) ResourcesFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { + return m.resourcesFor, m.err +} + +func (m fixedRESTMapper) KindsFor(resource unversioned.GroupVersionResource) (gvk []unversioned.GroupVersionKind, err error) { + return m.kindsFor, m.err +} + +func (m fixedRESTMapper) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { + return m.resourceFor, m.err +} + +func (m fixedRESTMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { + return m.kindFor, m.err +} + +func (m fixedRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (mapping *RESTMapping, err error) { + return nil, m.err +} + +func (m fixedRESTMapper) AliasesForResource(alias string) (aliases []string, ok bool) { + return nil, false +} + +func (m fixedRESTMapper) ResourceIsValid(resource unversioned.GroupVersionResource) bool { + return false +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/priority_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/priority_test.go new file mode 100644 index 000000000000..ea2d24b37254 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/priority_test.go @@ -0,0 +1,206 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" +) + +func TestPriorityRESTMapperResourceForErrorHandling(t *testing.T) { + tcs := []struct { + name string + + delegate RESTMapper + resourcePatterns []unversioned.GroupVersionResource + result unversioned.GroupVersionResource + err string + }{ + { + name: "single hit", + delegate: fixedRESTMapper{resourcesFor: []unversioned.GroupVersionResource{{Resource: "single-hit"}}}, + result: unversioned.GroupVersionResource{Resource: "single-hit"}, + }, + { + name: "ambiguous match", + delegate: fixedRESTMapper{resourcesFor: []unversioned.GroupVersionResource{ + {Group: "one", Version: "a", Resource: "first"}, + {Group: "two", Version: "b", Resource: "second"}, + }}, + err: "matches multiple resources", + }, + { + name: "group selection", + delegate: fixedRESTMapper{resourcesFor: []unversioned.GroupVersionResource{ + {Group: "one", Version: "a", Resource: "first"}, + {Group: "two", Version: "b", Resource: "second"}, + }}, + resourcePatterns: []unversioned.GroupVersionResource{ + {Group: "one", Version: AnyVersion, Resource: AnyResource}, + }, + result: unversioned.GroupVersionResource{Group: "one", Version: "a", Resource: "first"}, + }, + { + name: "empty match continues", + delegate: fixedRESTMapper{resourcesFor: []unversioned.GroupVersionResource{ + {Group: "one", Version: "a", Resource: "first"}, + {Group: "two", Version: "b", Resource: "second"}, + }}, + resourcePatterns: []unversioned.GroupVersionResource{ + {Group: "fail", Version: AnyVersion, Resource: AnyResource}, + {Group: "one", Version: AnyVersion, Resource: AnyResource}, + }, + result: unversioned.GroupVersionResource{Group: "one", Version: "a", Resource: "first"}, + }, + { + name: "group followed by version selection", + delegate: fixedRESTMapper{resourcesFor: []unversioned.GroupVersionResource{ + {Group: "one", Version: "a", Resource: "first"}, + {Group: "two", Version: "b", Resource: "second"}, + {Group: "one", Version: "c", Resource: "third"}, + }}, + resourcePatterns: []unversioned.GroupVersionResource{ + {Group: "one", Version: AnyVersion, Resource: AnyResource}, + {Group: AnyGroup, Version: "a", Resource: AnyResource}, + }, + result: unversioned.GroupVersionResource{Group: "one", Version: "a", Resource: "first"}, + }, + { + name: "resource selection", + delegate: fixedRESTMapper{resourcesFor: []unversioned.GroupVersionResource{ + {Group: "one", Version: "a", Resource: "first"}, + {Group: "one", Version: "a", Resource: "second"}, + }}, + resourcePatterns: []unversioned.GroupVersionResource{ + {Group: AnyGroup, Version: AnyVersion, Resource: "second"}, + }, + result: unversioned.GroupVersionResource{Group: "one", Version: "a", Resource: "second"}, + }, + } + + for _, tc := range tcs { + mapper := PriorityRESTMapper{Delegate: tc.delegate, ResourcePriority: tc.resourcePatterns} + + actualResult, actualErr := mapper.ResourceFor(unversioned.GroupVersionResource{}) + if e, a := tc.result, actualResult; e != a { + t.Errorf("%s: expected %v, got %v", tc.name, e, a) + } + if len(tc.err) == 0 && actualErr == nil { + continue + } + if len(tc.err) > 0 && actualErr == nil { + t.Errorf("%s: missing expected err: %v", tc.name, tc.err) + continue + } + if !strings.Contains(actualErr.Error(), tc.err) { + t.Errorf("%s: expected %v, got %v", tc.name, tc.err, actualErr) + } + } +} + +func TestPriorityRESTMapperKindForErrorHandling(t *testing.T) { + tcs := []struct { + name string + + delegate RESTMapper + kindPatterns []unversioned.GroupVersionKind + result unversioned.GroupVersionKind + err string + }{ + { + name: "single hit", + delegate: fixedRESTMapper{kindsFor: []unversioned.GroupVersionKind{{Kind: "single-hit"}}}, + result: unversioned.GroupVersionKind{Kind: "single-hit"}, + }, + { + name: "ambiguous match", + delegate: fixedRESTMapper{kindsFor: []unversioned.GroupVersionKind{ + {Group: "one", Version: "a", Kind: "first"}, + {Group: "two", Version: "b", Kind: "second"}, + }}, + err: "matches multiple kinds", + }, + { + name: "group selection", + delegate: fixedRESTMapper{kindsFor: []unversioned.GroupVersionKind{ + {Group: "one", Version: "a", Kind: "first"}, + {Group: "two", Version: "b", Kind: "second"}, + }}, + kindPatterns: []unversioned.GroupVersionKind{ + {Group: "one", Version: AnyVersion, Kind: AnyKind}, + }, + result: unversioned.GroupVersionKind{Group: "one", Version: "a", Kind: "first"}, + }, + { + name: "empty match continues", + delegate: fixedRESTMapper{kindsFor: []unversioned.GroupVersionKind{ + {Group: "one", Version: "a", Kind: "first"}, + {Group: "two", Version: "b", Kind: "second"}, + }}, + kindPatterns: []unversioned.GroupVersionKind{ + {Group: "fail", Version: AnyVersion, Kind: AnyKind}, + {Group: "one", Version: AnyVersion, Kind: AnyKind}, + }, + result: unversioned.GroupVersionKind{Group: "one", Version: "a", Kind: "first"}, + }, + { + name: "group followed by version selection", + delegate: fixedRESTMapper{kindsFor: []unversioned.GroupVersionKind{ + {Group: "one", Version: "a", Kind: "first"}, + {Group: "two", Version: "b", Kind: "second"}, + {Group: "one", Version: "c", Kind: "third"}, + }}, + kindPatterns: []unversioned.GroupVersionKind{ + {Group: "one", Version: AnyVersion, Kind: AnyKind}, + {Group: AnyGroup, Version: "a", Kind: AnyKind}, + }, + result: unversioned.GroupVersionKind{Group: "one", Version: "a", Kind: "first"}, + }, + { + name: "kind selection", + delegate: fixedRESTMapper{kindsFor: []unversioned.GroupVersionKind{ + {Group: "one", Version: "a", Kind: "first"}, + {Group: "one", Version: "a", Kind: "second"}, + }}, + kindPatterns: []unversioned.GroupVersionKind{ + {Group: AnyGroup, Version: AnyVersion, Kind: "second"}, + }, + result: unversioned.GroupVersionKind{Group: "one", Version: "a", Kind: "second"}, + }, + } + + for _, tc := range tcs { + mapper := PriorityRESTMapper{Delegate: tc.delegate, KindPriority: tc.kindPatterns} + + actualResult, actualErr := mapper.KindFor(unversioned.GroupVersionResource{}) + if e, a := tc.result, actualResult; e != a { + t.Errorf("%s: expected %v, got %v", tc.name, e, a) + } + if len(tc.err) == 0 && actualErr == nil { + continue + } + if len(tc.err) > 0 && actualErr == nil { + t.Errorf("%s: missing expected err: %v", tc.name, tc.err) + continue + } + if !strings.Contains(actualErr.Error(), tc.err) { + t.Errorf("%s: expected %v, got %v", tc.name, tc.err, actualErr) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/restmapper.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/restmapper.go index d56b18cea9a5..4e07ab7414e1 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/restmapper.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/restmapper.go @@ -69,7 +69,6 @@ var RESTScopeRoot = &restScope{ // // TODO: Only accept plural for some operations for increased control? // (`get pod bar` vs `get pods bar`) -// TODO these maps should be keyed based on GroupVersionKinds type DefaultRESTMapper struct { defaultGroupVersions []unversioned.GroupVersion @@ -80,6 +79,9 @@ type DefaultRESTMapper struct { pluralToSingular map[unversioned.GroupVersionResource]unversioned.GroupVersionResource interfacesFunc VersionInterfacesFunc + + // aliasToResource is used for mapping aliases to resources + aliasToResource map[string][]string } func (m *DefaultRESTMapper) String() string { @@ -103,6 +105,7 @@ func NewDefaultRESTMapper(defaultGroupVersions []unversioned.GroupVersion, f Ver kindToScope := make(map[unversioned.GroupVersionKind]RESTScope) singularToPlural := make(map[unversioned.GroupVersionResource]unversioned.GroupVersionResource) pluralToSingular := make(map[unversioned.GroupVersionResource]unversioned.GroupVersionResource) + aliasToResource := make(map[string][]string) // TODO: verify name mappings work correctly when versions differ return &DefaultRESTMapper{ @@ -112,6 +115,7 @@ func NewDefaultRESTMapper(defaultGroupVersions []unversioned.GroupVersion, f Ver defaultGroupVersions: defaultGroupVersions, singularToPlural: singularToPlural, pluralToSingular: pluralToSingular, + aliasToResource: aliasToResource, interfacesFunc: f, } } @@ -138,6 +142,8 @@ var unpluralizedSuffixes = []string{ } // KindToResource converts Kind to a resource name. +// Broken. This method only "sort of" works when used outside of this package. It assumes that Kinds and Resources match +// and they aren't guaranteed to do so. func KindToResource(kind unversioned.GroupVersionKind) ( /*plural*/ unversioned.GroupVersionResource /*singular*/, unversioned.GroupVersionResource) { kindName := kind.Kind if len(kindName) == 0 { @@ -194,7 +200,19 @@ func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, e return singular.Resource, nil } -func (m *DefaultRESTMapper) ResourcesFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { +// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior) +func coerceResourceForMatching(resource unversioned.GroupVersionResource) unversioned.GroupVersionResource { + resource.Resource = strings.ToLower(resource.Resource) + if resource.Version == runtime.APIVersionInternal { + resource.Version = "" + } + + return resource +} + +func (m *DefaultRESTMapper) ResourcesFor(input unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { + resource := coerceResourceForMatching(input) + hasResource := len(resource.Resource) > 0 hasGroup := len(resource.Group) > 0 hasVersion := len(resource.Version) > 0 @@ -271,10 +289,7 @@ func (m *DefaultRESTMapper) ResourceFor(resource unversioned.GroupVersionResourc } func (m *DefaultRESTMapper) KindsFor(input unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) { - resource := input.GroupVersion().WithResource(strings.ToLower(input.Resource)) - if resource.Version == runtime.APIVersionInternal { - resource.Version = "" - } + resource := coerceResourceForMatching(input) hasResource := len(resource.Resource) > 0 hasGroup := len(resource.Group) > 0 @@ -488,20 +503,17 @@ func (m *DefaultRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...st return retVal, nil } -// aliasToResource is used for mapping aliases to resources -var aliasToResource = map[string][]string{} - // AddResourceAlias maps aliases to resources func (m *DefaultRESTMapper) AddResourceAlias(alias string, resources ...string) { if len(resources) == 0 { return } - aliasToResource[alias] = resources + m.aliasToResource[alias] = resources } // AliasesForResource returns whether a resource has an alias or not func (m *DefaultRESTMapper) AliasesForResource(alias string) ([]string, bool) { - if res, ok := aliasToResource[alias]; ok { + if res, ok := m.aliasToResource[alias]; ok { return res, true } return nil, false diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/restmapper_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/restmapper_test.go new file mode 100644 index 000000000000..5baad6e6223a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta/restmapper_test.go @@ -0,0 +1,550 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "errors" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +type fakeConvertor struct{} + +func (fakeConvertor) Convert(in, out interface{}) error { + return nil +} + +func (fakeConvertor) ConvertToVersion(in runtime.Object, _ unversioned.GroupVersion) (runtime.Object, error) { + return in, nil +} + +func (fakeConvertor) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return label, value, nil +} + +var validAccessor = resourceAccessor{} +var validConvertor = fakeConvertor{} + +func fakeInterfaces(version unversioned.GroupVersion) (*VersionInterfaces, error) { + return &VersionInterfaces{ObjectConvertor: validConvertor, MetadataAccessor: validAccessor}, nil +} + +var unmatchedErr = errors.New("no version") + +func unmatchedVersionInterfaces(version unversioned.GroupVersion) (*VersionInterfaces, error) { + return nil, unmatchedErr +} + +func TestRESTMapperVersionAndKindForResource(t *testing.T) { + testGroup := "test.group" + testVersion := "test" + testGroupVersion := unversioned.GroupVersion{Group: testGroup, Version: testVersion} + + testCases := []struct { + Resource unversioned.GroupVersionResource + GroupVersionToRegister unversioned.GroupVersion + ExpectedGVK unversioned.GroupVersionKind + Err bool + }{ + {Resource: unversioned.GroupVersionResource{Resource: "internalobjec"}, Err: true}, + {Resource: unversioned.GroupVersionResource{Resource: "internalObjec"}, Err: true}, + + {Resource: unversioned.GroupVersionResource{Resource: "internalobject"}, ExpectedGVK: testGroupVersion.WithKind("InternalObject")}, + {Resource: unversioned.GroupVersionResource{Resource: "internalobjects"}, ExpectedGVK: testGroupVersion.WithKind("InternalObject")}, + } + for i, testCase := range testCases { + mapper := NewDefaultRESTMapper([]unversioned.GroupVersion{testGroupVersion}, fakeInterfaces) + if len(testCase.ExpectedGVK.Kind) != 0 { + mapper.Add(testCase.ExpectedGVK, RESTScopeNamespace) + } + actualGVK, err := mapper.KindFor(testCase.Resource) + + hasErr := err != nil + if hasErr != testCase.Err { + t.Errorf("%d: unexpected error behavior %t: %v", i, testCase.Err, err) + continue + } + if err != nil { + continue + } + + if actualGVK != testCase.ExpectedGVK { + t.Errorf("%d: unexpected version and kind: e=%s a=%s", i, testCase.ExpectedGVK, actualGVK) + } + } +} + +func TestRESTMapperGroupForResource(t *testing.T) { + testCases := []struct { + Resource unversioned.GroupVersionResource + GroupVersionKind unversioned.GroupVersionKind + Err bool + }{ + {Resource: unversioned.GroupVersionResource{Resource: "myObject"}, GroupVersionKind: unversioned.GroupVersionKind{Group: "testapi", Version: "test", Kind: "MyObject"}}, + {Resource: unversioned.GroupVersionResource{Resource: "myobject"}, GroupVersionKind: unversioned.GroupVersionKind{Group: "testapi2", Version: "test", Kind: "MyObject"}}, + {Resource: unversioned.GroupVersionResource{Resource: "myObje"}, Err: true, GroupVersionKind: unversioned.GroupVersionKind{Group: "testapi", Version: "test", Kind: "MyObject"}}, + {Resource: unversioned.GroupVersionResource{Resource: "myobje"}, Err: true, GroupVersionKind: unversioned.GroupVersionKind{Group: "testapi", Version: "test", Kind: "MyObject"}}, + } + for i, testCase := range testCases { + mapper := NewDefaultRESTMapper([]unversioned.GroupVersion{testCase.GroupVersionKind.GroupVersion()}, fakeInterfaces) + mapper.Add(testCase.GroupVersionKind, RESTScopeNamespace) + + actualGVK, err := mapper.KindFor(testCase.Resource) + if testCase.Err { + if err == nil { + t.Errorf("%d: expected error", i) + } + } else if err != nil { + t.Errorf("%d: unexpected error: %v", i, err) + } else if actualGVK != testCase.GroupVersionKind { + t.Errorf("%d: expected group %q, got %q", i, testCase.GroupVersionKind, actualGVK) + } + } +} + +func TestRESTMapperKindsFor(t *testing.T) { + testCases := []struct { + Name string + PreferredOrder []unversioned.GroupVersion + KindsToRegister []unversioned.GroupVersionKind + PartialResourceToRequest unversioned.GroupVersionResource + + ExpectedKinds []unversioned.GroupVersionKind + ExpectedKindErr string + }{ + { + Name: "ambiguous groups, with preference order", + PreferredOrder: []unversioned.GroupVersion{ + {Group: "second-group", Version: "first-version"}, + {Group: "first-group", Version: "first-version"}, + }, + KindsToRegister: []unversioned.GroupVersionKind{ + {Group: "first-group", Version: "first-version", Kind: "my-kind"}, + {Group: "first-group", Version: "first-version", Kind: "your-kind"}, + {Group: "second-group", Version: "first-version", Kind: "my-kind"}, + {Group: "second-group", Version: "first-version", Kind: "your-kind"}, + }, + PartialResourceToRequest: unversioned.GroupVersionResource{Resource: "my-kinds"}, + + ExpectedKinds: []unversioned.GroupVersionKind{ + {Group: "second-group", Version: "first-version", Kind: "my-kind"}, + {Group: "first-group", Version: "first-version", Kind: "my-kind"}, + }, + ExpectedKindErr: " matches multiple kinds ", + }, + + { + Name: "ambiguous groups, with explicit group match", + PreferredOrder: []unversioned.GroupVersion{ + {Group: "second-group", Version: "first-version"}, + {Group: "first-group", Version: "first-version"}, + }, + KindsToRegister: []unversioned.GroupVersionKind{ + {Group: "first-group", Version: "first-version", Kind: "my-kind"}, + {Group: "first-group", Version: "first-version", Kind: "your-kind"}, + {Group: "second-group", Version: "first-version", Kind: "my-kind"}, + {Group: "second-group", Version: "first-version", Kind: "your-kind"}, + }, + PartialResourceToRequest: unversioned.GroupVersionResource{Group: "first-group", Resource: "my-kinds"}, + + ExpectedKinds: []unversioned.GroupVersionKind{ + {Group: "first-group", Version: "first-version", Kind: "my-kind"}, + }, + }, + + { + Name: "ambiguous groups, with ambiguous version match", + PreferredOrder: []unversioned.GroupVersion{ + {Group: "first-group", Version: "first-version"}, + {Group: "second-group", Version: "first-version"}, + }, + KindsToRegister: []unversioned.GroupVersionKind{ + {Group: "first-group", Version: "first-version", Kind: "my-kind"}, + {Group: "first-group", Version: "first-version", Kind: "your-kind"}, + {Group: "second-group", Version: "first-version", Kind: "my-kind"}, + {Group: "second-group", Version: "first-version", Kind: "your-kind"}, + }, + PartialResourceToRequest: unversioned.GroupVersionResource{Version: "first-version", Resource: "my-kinds"}, + + ExpectedKinds: []unversioned.GroupVersionKind{ + {Group: "first-group", Version: "first-version", Kind: "my-kind"}, + {Group: "second-group", Version: "first-version", Kind: "my-kind"}, + }, + ExpectedKindErr: " matches multiple kinds ", + }, + } + for _, testCase := range testCases { + tcName := testCase.Name + mapper := NewDefaultRESTMapper(testCase.PreferredOrder, fakeInterfaces) + for _, kind := range testCase.KindsToRegister { + mapper.Add(kind, RESTScopeNamespace) + } + + actualKinds, err := mapper.KindsFor(testCase.PartialResourceToRequest) + if err != nil { + t.Errorf("%s: unexpected error: %v", tcName, err) + continue + } + if !reflect.DeepEqual(testCase.ExpectedKinds, actualKinds) { + t.Errorf("%s: expected %v, got %v", tcName, testCase.ExpectedKinds, actualKinds) + } + + singleKind, err := mapper.KindFor(testCase.PartialResourceToRequest) + if err == nil && len(testCase.ExpectedKindErr) != 0 { + t.Errorf("%s: expected error: %v", tcName, testCase.ExpectedKindErr) + continue + } + if err != nil { + if len(testCase.ExpectedKindErr) == 0 { + t.Errorf("%s: unexpected error: %v", tcName, err) + continue + } else { + if !strings.Contains(err.Error(), testCase.ExpectedKindErr) { + t.Errorf("%s: expected %v, got %v", tcName, testCase.ExpectedKindErr, err) + continue + } + } + + } else { + if testCase.ExpectedKinds[0] != singleKind { + t.Errorf("%s: expected %v, got %v", tcName, testCase.ExpectedKinds[0], singleKind) + } + + } + } +} + +func TestRESTMapperResourcesFor(t *testing.T) { + testCases := []struct { + Name string + PreferredOrder []unversioned.GroupVersion + KindsToRegister []unversioned.GroupVersionKind + PluralPartialResourceToRequest unversioned.GroupVersionResource + SingularPartialResourceToRequest unversioned.GroupVersionResource + + ExpectedResources []unversioned.GroupVersionResource + ExpectedResourceErr string + }{ + { + Name: "ambiguous groups, with preference order", + PreferredOrder: []unversioned.GroupVersion{ + {Group: "second-group", Version: "first-version"}, + {Group: "first-group", Version: "first-version"}, + }, + KindsToRegister: []unversioned.GroupVersionKind{ + {Group: "first-group", Version: "first-version", Kind: "my-kind"}, + {Group: "first-group", Version: "first-version", Kind: "your-kind"}, + {Group: "second-group", Version: "first-version", Kind: "my-kind"}, + {Group: "second-group", Version: "first-version", Kind: "your-kind"}, + }, + PluralPartialResourceToRequest: unversioned.GroupVersionResource{Resource: "my-kinds"}, + SingularPartialResourceToRequest: unversioned.GroupVersionResource{Resource: "my-kind"}, + + ExpectedResources: []unversioned.GroupVersionResource{ + {Group: "second-group", Version: "first-version", Resource: "my-kinds"}, + {Group: "first-group", Version: "first-version", Resource: "my-kinds"}, + }, + ExpectedResourceErr: " matches multiple resources ", + }, + + { + Name: "ambiguous groups, with explicit group match", + PreferredOrder: []unversioned.GroupVersion{ + {Group: "second-group", Version: "first-version"}, + {Group: "first-group", Version: "first-version"}, + }, + KindsToRegister: []unversioned.GroupVersionKind{ + {Group: "first-group", Version: "first-version", Kind: "my-kind"}, + {Group: "first-group", Version: "first-version", Kind: "your-kind"}, + {Group: "second-group", Version: "first-version", Kind: "my-kind"}, + {Group: "second-group", Version: "first-version", Kind: "your-kind"}, + }, + PluralPartialResourceToRequest: unversioned.GroupVersionResource{Group: "first-group", Resource: "my-kinds"}, + SingularPartialResourceToRequest: unversioned.GroupVersionResource{Group: "first-group", Resource: "my-kind"}, + + ExpectedResources: []unversioned.GroupVersionResource{ + {Group: "first-group", Version: "first-version", Resource: "my-kinds"}, + }, + }, + + { + Name: "ambiguous groups, with ambiguous version match", + PreferredOrder: []unversioned.GroupVersion{ + {Group: "first-group", Version: "first-version"}, + {Group: "second-group", Version: "first-version"}, + }, + KindsToRegister: []unversioned.GroupVersionKind{ + {Group: "first-group", Version: "first-version", Kind: "my-kind"}, + {Group: "first-group", Version: "first-version", Kind: "your-kind"}, + {Group: "second-group", Version: "first-version", Kind: "my-kind"}, + {Group: "second-group", Version: "first-version", Kind: "your-kind"}, + }, + PluralPartialResourceToRequest: unversioned.GroupVersionResource{Version: "first-version", Resource: "my-kinds"}, + SingularPartialResourceToRequest: unversioned.GroupVersionResource{Version: "first-version", Resource: "my-kind"}, + + ExpectedResources: []unversioned.GroupVersionResource{ + {Group: "first-group", Version: "first-version", Resource: "my-kinds"}, + {Group: "second-group", Version: "first-version", Resource: "my-kinds"}, + }, + ExpectedResourceErr: " matches multiple resources ", + }, + } + for _, testCase := range testCases { + tcName := testCase.Name + + for _, partialResource := range []unversioned.GroupVersionResource{testCase.PluralPartialResourceToRequest, testCase.SingularPartialResourceToRequest} { + mapper := NewDefaultRESTMapper(testCase.PreferredOrder, fakeInterfaces) + for _, kind := range testCase.KindsToRegister { + mapper.Add(kind, RESTScopeNamespace) + } + + actualResources, err := mapper.ResourcesFor(partialResource) + if err != nil { + t.Errorf("%s: unexpected error: %v", tcName, err) + continue + } + if !reflect.DeepEqual(testCase.ExpectedResources, actualResources) { + t.Errorf("%s: expected %v, got %v", tcName, testCase.ExpectedResources, actualResources) + } + + singleResource, err := mapper.ResourceFor(partialResource) + if err == nil && len(testCase.ExpectedResourceErr) != 0 { + t.Errorf("%s: expected error: %v", tcName, testCase.ExpectedResourceErr) + continue + } + if err != nil { + if len(testCase.ExpectedResourceErr) == 0 { + t.Errorf("%s: unexpected error: %v", tcName, err) + continue + } else { + if !strings.Contains(err.Error(), testCase.ExpectedResourceErr) { + t.Errorf("%s: expected %v, got %v", tcName, testCase.ExpectedResourceErr, err) + continue + } + } + + } else { + if testCase.ExpectedResources[0] != singleResource { + t.Errorf("%s: expected %v, got %v", tcName, testCase.ExpectedResources[0], singleResource) + } + + } + } + } +} + +func TestKindToResource(t *testing.T) { + testCases := []struct { + Kind string + Plural, Singular string + }{ + {Kind: "Pod", Plural: "pods", Singular: "pod"}, + + {Kind: "ReplicationController", Plural: "replicationcontrollers", Singular: "replicationcontroller"}, + + // Add "ies" when ending with "y" + {Kind: "ImageRepository", Plural: "imagerepositories", Singular: "imagerepository"}, + // Add "es" when ending with "s" + {Kind: "miss", Plural: "misses", Singular: "miss"}, + // Add "s" otherwise + {Kind: "lowercase", Plural: "lowercases", Singular: "lowercase"}, + } + for i, testCase := range testCases { + version := unversioned.GroupVersion{} + + plural, singular := KindToResource(version.WithKind(testCase.Kind)) + if singular != version.WithResource(testCase.Singular) || plural != version.WithResource(testCase.Plural) { + t.Errorf("%d: unexpected plural and singular: %v %v", i, plural, singular) + } + } +} + +func TestRESTMapperResourceSingularizer(t *testing.T) { + testGroupVersion := unversioned.GroupVersion{Group: "tgroup", Version: "test"} + + testCases := []struct { + Kind string + Plural string + Singular string + }{ + {Kind: "Pod", Plural: "pods", Singular: "pod"}, + {Kind: "ReplicationController", Plural: "replicationcontrollers", Singular: "replicationcontroller"}, + {Kind: "ImageRepository", Plural: "imagerepositories", Singular: "imagerepository"}, + {Kind: "Status", Plural: "statuses", Singular: "status"}, + + {Kind: "lowercase", Plural: "lowercases", Singular: "lowercase"}, + // TODO this test is broken. This updates to reflect actual behavior. Kinds are expected to be singular + // old (incorrect), coment: Don't add extra s if the original object is already plural + {Kind: "lowercases", Plural: "lowercaseses", Singular: "lowercases"}, + } + for i, testCase := range testCases { + mapper := NewDefaultRESTMapper([]unversioned.GroupVersion{testGroupVersion}, fakeInterfaces) + // create singular/plural mapping + mapper.Add(testGroupVersion.WithKind(testCase.Kind), RESTScopeNamespace) + + singular, err := mapper.ResourceSingularizer(testCase.Plural) + if err != nil { + t.Errorf("%d: unexpected error: %v", i, err) + } + if singular != testCase.Singular { + t.Errorf("%d: mismatched singular: got %v, expected %v", i, singular, testCase.Singular) + } + } +} + +func TestRESTMapperRESTMapping(t *testing.T) { + testGroup := "tgroup" + testGroupVersion := unversioned.GroupVersion{Group: testGroup, Version: "test"} + internalGroupVersion := unversioned.GroupVersion{Group: testGroup, Version: "test"} + + testCases := []struct { + Kind string + APIGroupVersions []unversioned.GroupVersion + DefaultVersions []unversioned.GroupVersion + + Resource string + ExpectedGroupVersion *unversioned.GroupVersion + Err bool + }{ + {Kind: "Unknown", Err: true}, + {Kind: "InternalObject", Err: true}, + + {DefaultVersions: []unversioned.GroupVersion{testGroupVersion}, Kind: "Unknown", Err: true}, + + {DefaultVersions: []unversioned.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []unversioned.GroupVersion{{Group: testGroup, Version: "test"}}, Resource: "internalobjects"}, + {DefaultVersions: []unversioned.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []unversioned.GroupVersion{{Group: testGroup, Version: "test"}}, Resource: "internalobjects"}, + + {DefaultVersions: []unversioned.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []unversioned.GroupVersion{{Group: testGroup, Version: "test"}}, Resource: "internalobjects"}, + + {DefaultVersions: []unversioned.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []unversioned.GroupVersion{}, Resource: "internalobjects", ExpectedGroupVersion: &unversioned.GroupVersion{Group: testGroup, Version: "test"}}, + + {DefaultVersions: []unversioned.GroupVersion{testGroupVersion}, Kind: "InternalObject", APIGroupVersions: []unversioned.GroupVersion{{Group: testGroup, Version: "test"}}, Resource: "internalobjects"}, + + // TODO: add test for a resource that exists in one version but not another + } + for i, testCase := range testCases { + mapper := NewDefaultRESTMapper(testCase.DefaultVersions, fakeInterfaces) + mapper.Add(internalGroupVersion.WithKind("InternalObject"), RESTScopeNamespace) + + preferredVersions := []string{} + for _, gv := range testCase.APIGroupVersions { + preferredVersions = append(preferredVersions, gv.Version) + } + gk := unversioned.GroupKind{Group: testGroup, Kind: testCase.Kind} + + mapping, err := mapper.RESTMapping(gk, preferredVersions...) + hasErr := err != nil + if hasErr != testCase.Err { + t.Errorf("%d: unexpected error behavior %t: %v", i, testCase.Err, err) + } + if hasErr { + continue + } + if mapping.Resource != testCase.Resource { + t.Errorf("%d: unexpected resource: %#v", i, mapping) + } + + if mapping.MetadataAccessor == nil || mapping.ObjectConvertor == nil { + t.Errorf("%d: missing codec and accessor: %#v", i, mapping) + } + + groupVersion := testCase.ExpectedGroupVersion + if groupVersion == nil { + groupVersion = &testCase.APIGroupVersions[0] + } + if mapping.GroupVersionKind.GroupVersion() != *groupVersion { + t.Errorf("%d: unexpected version: %#v", i, mapping) + } + + } +} + +func TestRESTMapperRESTMappingSelectsVersion(t *testing.T) { + expectedGroupVersion1 := unversioned.GroupVersion{Group: "tgroup", Version: "test1"} + expectedGroupVersion2 := unversioned.GroupVersion{Group: "tgroup", Version: "test2"} + expectedGroupVersion3 := unversioned.GroupVersion{Group: "tgroup", Version: "test3"} + internalObjectGK := unversioned.GroupKind{Group: "tgroup", Kind: "InternalObject"} + otherObjectGK := unversioned.GroupKind{Group: "tgroup", Kind: "OtherObject"} + + mapper := NewDefaultRESTMapper([]unversioned.GroupVersion{expectedGroupVersion1, expectedGroupVersion2}, fakeInterfaces) + mapper.Add(expectedGroupVersion1.WithKind("InternalObject"), RESTScopeNamespace) + mapper.Add(expectedGroupVersion2.WithKind("OtherObject"), RESTScopeNamespace) + + // pick default matching object kind based on search order + mapping, err := mapper.RESTMapping(otherObjectGK) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if mapping.Resource != "otherobjects" || mapping.GroupVersionKind.GroupVersion() != expectedGroupVersion2 { + t.Errorf("unexpected mapping: %#v", mapping) + } + + mapping, err = mapper.RESTMapping(internalObjectGK) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if mapping.Resource != "internalobjects" || mapping.GroupVersionKind.GroupVersion() != expectedGroupVersion1 { + t.Errorf("unexpected mapping: %#v", mapping) + } + + // mismatch of version + mapping, err = mapper.RESTMapping(internalObjectGK, expectedGroupVersion2.Version) + if err == nil { + t.Errorf("unexpected non-error") + } + mapping, err = mapper.RESTMapping(otherObjectGK, expectedGroupVersion1.Version) + if err == nil { + t.Errorf("unexpected non-error") + } + + // not in the search versions + mapping, err = mapper.RESTMapping(otherObjectGK, expectedGroupVersion3.Version) + if err == nil { + t.Errorf("unexpected non-error") + } + + // explicit search order + mapping, err = mapper.RESTMapping(otherObjectGK, expectedGroupVersion3.Version, expectedGroupVersion1.Version) + if err == nil { + t.Errorf("unexpected non-error") + } + + mapping, err = mapper.RESTMapping(otherObjectGK, expectedGroupVersion3.Version, expectedGroupVersion2.Version) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if mapping.Resource != "otherobjects" || mapping.GroupVersionKind.GroupVersion() != expectedGroupVersion2 { + t.Errorf("unexpected mapping: %#v", mapping) + } +} + +func TestRESTMapperReportsErrorOnBadVersion(t *testing.T) { + expectedGroupVersion1 := unversioned.GroupVersion{Group: "tgroup", Version: "test1"} + expectedGroupVersion2 := unversioned.GroupVersion{Group: "tgroup", Version: "test2"} + internalObjectGK := unversioned.GroupKind{Group: "tgroup", Kind: "InternalObject"} + + mapper := NewDefaultRESTMapper([]unversioned.GroupVersion{expectedGroupVersion1, expectedGroupVersion2}, unmatchedVersionInterfaces) + mapper.Add(expectedGroupVersion1.WithKind("InternalObject"), RESTScopeNamespace) + _, err := mapper.RESTMapping(internalObjectGK, expectedGroupVersion1.Version) + if err == nil { + t.Errorf("unexpected non-error") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta_test.go new file mode 100644 index 000000000000..cbac57e88cee --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/meta_test.go @@ -0,0 +1,98 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api_test + +import ( + "reflect" + "testing" + + "github.com/google/gofuzz" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/meta/metatypes" +) + +var _ meta.Object = &api.ObjectMeta{} + +// TestFillObjectMetaSystemFields validates that system populated fields are set on an object +func TestFillObjectMetaSystemFields(t *testing.T) { + ctx := api.NewDefaultContext() + resource := api.ObjectMeta{} + api.FillObjectMetaSystemFields(ctx, &resource) + if resource.CreationTimestamp.Time.IsZero() { + t.Errorf("resource.CreationTimestamp is zero") + } else if len(resource.UID) == 0 { + t.Errorf("resource.UID missing") + } +} + +// TestHasObjectMetaSystemFieldValues validates that true is returned if and only if all fields are populated +func TestHasObjectMetaSystemFieldValues(t *testing.T) { + ctx := api.NewDefaultContext() + resource := api.ObjectMeta{} + if api.HasObjectMetaSystemFieldValues(&resource) { + t.Errorf("the resource does not have all fields yet populated, but incorrectly reports it does") + } + api.FillObjectMetaSystemFields(ctx, &resource) + if !api.HasObjectMetaSystemFieldValues(&resource) { + t.Errorf("the resource does have all fields populated, but incorrectly reports it does not") + } +} + +func getObjectMetaAndOwnerReferences() (objectMeta api.ObjectMeta, metaOwnerReferences []metatypes.OwnerReference) { + fuzz.New().NilChance(.5).NumElements(1, 5).Fuzz(&objectMeta) + references := objectMeta.OwnerReferences + metaOwnerReferences = make([]metatypes.OwnerReference, 0) + for i := 0; i < len(references); i++ { + metaOwnerReferences = append(metaOwnerReferences, metatypes.OwnerReference{ + Kind: references[i].Kind, + Name: references[i].Name, + UID: references[i].UID, + APIVersion: references[i].APIVersion, + }) + } + if len(references) == 0 { + objectMeta.OwnerReferences = make([]api.OwnerReference, 0) + } + return objectMeta, metaOwnerReferences +} + +func testGetOwnerReferences(t *testing.T) { + meta, expected := getObjectMetaAndOwnerReferences() + refs := meta.GetOwnerReferences() + if !reflect.DeepEqual(refs, expected) { + t.Errorf("expect %v\n got %v", expected, refs) + } +} + +func testSetOwnerReferences(t *testing.T) { + expected, newRefs := getObjectMetaAndOwnerReferences() + objectMeta := &api.ObjectMeta{} + objectMeta.SetOwnerReferences(newRefs) + if !reflect.DeepEqual(expected.OwnerReferences, objectMeta.OwnerReferences) { + t.Errorf("expect: %#v\n got: %#v", expected.OwnerReferences, objectMeta.OwnerReferences) + } +} + +func TestAccessOwnerReferences(t *testing.T) { + fuzzIter := 5 + for i := 0; i < fuzzIter; i++ { + testGetOwnerReferences(t) + testSetOwnerReferences(t) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/pod/util.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/pod/util.go index 6b00c7e7dfaf..1bdacfe20d68 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/pod/util.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/pod/util.go @@ -24,9 +24,11 @@ import ( ) const ( + // TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Hostname field. // The annotation value is a string specifying the hostname to be used for the pod e.g 'my-webserver-1' PodHostnameAnnotation = "pod.beta.kubernetes.io/hostname" + // TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Subdomain field. // The annotation value is a string specifying the subdomain e.g. "my-web-service" // If specified, on the the pod itself, ".my-web-service..svc." would resolve to // the pod's IP. @@ -47,7 +49,7 @@ func FindPort(pod *api.Pod, svcPort *api.ServicePort) (int, error) { for _, container := range pod.Spec.Containers { for _, port := range container.Ports { if port.Name == name && port.Protocol == svcPort.Protocol { - return port.ContainerPort, nil + return int(port.ContainerPort), nil } } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/pod/util_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/pod/util_test.go new file mode 100644 index 000000000000..4c4cc97a0d1f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/pod/util_test.go @@ -0,0 +1,192 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/intstr" +) + +func TestFindPort(t *testing.T) { + testCases := []struct { + name string + containers []api.Container + port intstr.IntOrString + expected int + pass bool + }{{ + name: "valid int, no ports", + containers: []api.Container{{}}, + port: intstr.FromInt(93), + expected: 93, + pass: true, + }, { + name: "valid int, with ports", + containers: []api.Container{{Ports: []api.ContainerPort{{ + Name: "", + ContainerPort: 11, + Protocol: "TCP", + }, { + Name: "p", + ContainerPort: 22, + Protocol: "TCP", + }}}}, + port: intstr.FromInt(93), + expected: 93, + pass: true, + }, { + name: "valid str, no ports", + containers: []api.Container{{}}, + port: intstr.FromString("p"), + expected: 0, + pass: false, + }, { + name: "valid str, one ctr with ports", + containers: []api.Container{{Ports: []api.ContainerPort{{ + Name: "", + ContainerPort: 11, + Protocol: "UDP", + }, { + Name: "p", + ContainerPort: 22, + Protocol: "TCP", + }, { + Name: "q", + ContainerPort: 33, + Protocol: "TCP", + }}}}, + port: intstr.FromString("q"), + expected: 33, + pass: true, + }, { + name: "valid str, two ctr with ports", + containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + Name: "", + ContainerPort: 11, + Protocol: "UDP", + }, { + Name: "p", + ContainerPort: 22, + Protocol: "TCP", + }, { + Name: "q", + ContainerPort: 33, + Protocol: "TCP", + }}}}, + port: intstr.FromString("q"), + expected: 33, + pass: true, + }, { + name: "valid str, two ctr with same port", + containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + Name: "", + ContainerPort: 11, + Protocol: "UDP", + }, { + Name: "p", + ContainerPort: 22, + Protocol: "TCP", + }, { + Name: "q", + ContainerPort: 22, + Protocol: "TCP", + }}}}, + port: intstr.FromString("q"), + expected: 22, + pass: true, + }, { + name: "valid str, invalid protocol", + containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + Name: "a", + ContainerPort: 11, + Protocol: "snmp", + }, + }}}, + port: intstr.FromString("a"), + expected: 0, + pass: false, + }, { + name: "valid hostPort", + containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + Name: "a", + ContainerPort: 11, + HostPort: 81, + Protocol: "TCP", + }, + }}}, + port: intstr.FromString("a"), + expected: 11, + pass: true, + }, + { + name: "invalid hostPort", + containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + Name: "a", + ContainerPort: 11, + HostPort: -1, + Protocol: "TCP", + }, + }}}, + port: intstr.FromString("a"), + expected: 11, + pass: true, + //this should fail but passes. + }, + { + name: "invalid ContainerPort", + containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + Name: "a", + ContainerPort: -1, + Protocol: "TCP", + }, + }}}, + port: intstr.FromString("a"), + expected: -1, + pass: true, + //this should fail but passes + }, + { + name: "HostIP Address", + containers: []api.Container{{}, {Ports: []api.ContainerPort{{ + Name: "a", + ContainerPort: 11, + HostIP: "192.168.1.1", + Protocol: "TCP", + }, + }}}, + port: intstr.FromString("a"), + expected: 11, + pass: true, + }, + } + + for _, tc := range testCases { + port, err := FindPort(&api.Pod{Spec: api.PodSpec{Containers: tc.containers}}, + &api.ServicePort{Protocol: "TCP", TargetPort: tc.port}) + if err != nil && tc.pass { + t.Errorf("unexpected error for %s: %v", tc.name, err) + } + if err == nil && !tc.pass { + t.Errorf("unexpected non-error for %s: %d", tc.name, port) + } + if port != tc.expected { + t.Errorf("wrong result for %s: expected %d, got %d", tc.name, tc.expected, port) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/ref.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/ref.go index 95690b821ada..08dede0711f8 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/ref.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/ref.go @@ -54,24 +54,18 @@ func GetReference(obj runtime.Object) (*ObjectReference, error) { // if the object referenced is actually persisted, we can just get kind from meta // if we are building an object reference to something not yet persisted, we should fallback to scheme - var kind string - if gvk != nil { - kind = gvk.Kind - } + kind := gvk.Kind if len(kind) == 0 { // TODO: this is wrong - gvk, err := Scheme.ObjectKind(obj) + gvks, _, err := Scheme.ObjectKinds(obj) if err != nil { return nil, err } - kind = gvk.Kind + kind = gvks[0].Kind } // if the object referenced is actually persisted, we can also get version from meta - var version string - if gvk != nil { - version = gvk.GroupVersion().String() - } + version := gvk.GroupVersion().String() if len(version) == 0 { selfLink := meta.GetSelfLink() if len(selfLink) == 0 { @@ -111,9 +105,9 @@ func GetPartialReference(obj runtime.Object, fieldPath string) (*ObjectReference // IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that // intend only to get a reference to that object. This simplifies the event recording interface. -func (obj *ObjectReference) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) { +func (obj *ObjectReference) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } -func (obj *ObjectReference) GroupVersionKind() *unversioned.GroupVersionKind { +func (obj *ObjectReference) GroupVersionKind() unversioned.GroupVersionKind { return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/ref_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/ref_test.go new file mode 100644 index 000000000000..ad4df02b3e15 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/ref_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +type FakeAPIObject struct{} + +func (obj *FakeAPIObject) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } + +type ExtensionAPIObject struct { + unversioned.TypeMeta + ObjectMeta +} + +func (obj *ExtensionAPIObject) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } + +func TestGetReference(t *testing.T) { + + // when vendoring kube, if you don't force the set of registered versions (like this hack/test-go.sh does) + // then you run into trouble because the types aren't registered in the scheme by anything. This does the + // register manually to allow unit test execution + if _, _, err := Scheme.ObjectKinds(&Pod{}); err != nil { + AddToScheme(Scheme) + } + + table := map[string]struct { + obj runtime.Object + ref *ObjectReference + fieldPath string + shouldErr bool + }{ + "pod": { + obj: &Pod{ + ObjectMeta: ObjectMeta{ + Name: "foo", + UID: "bar", + ResourceVersion: "42", + SelfLink: "/api/version1/pods/foo", + }, + }, + fieldPath: ".desiredState.containers[0]", + ref: &ObjectReference{ + Kind: "Pod", + APIVersion: "version1", + Name: "foo", + UID: "bar", + ResourceVersion: "42", + FieldPath: ".desiredState.containers[0]", + }, + }, + "serviceList": { + obj: &ServiceList{ + ListMeta: unversioned.ListMeta{ + ResourceVersion: "42", + SelfLink: "/api/version2/services", + }, + }, + ref: &ObjectReference{ + Kind: "ServiceList", + APIVersion: "version2", + ResourceVersion: "42", + }, + }, + "extensionAPIObject": { + obj: &ExtensionAPIObject{ + TypeMeta: unversioned.TypeMeta{ + Kind: "ExtensionAPIObject", + }, + ObjectMeta: ObjectMeta{ + Name: "foo", + UID: "bar", + ResourceVersion: "42", + SelfLink: "/custom_prefix/version1/extensions/foo", + }, + }, + ref: &ObjectReference{ + Kind: "ExtensionAPIObject", + APIVersion: "version1", + Name: "foo", + UID: "bar", + ResourceVersion: "42", + }, + }, + "badSelfLink": { + obj: &ServiceList{ + ListMeta: unversioned.ListMeta{ + ResourceVersion: "42", + SelfLink: "version2/services", + }, + }, + shouldErr: true, + }, + "error": { + obj: &FakeAPIObject{}, + ref: nil, + shouldErr: true, + }, + "errorNil": { + obj: nil, + ref: nil, + shouldErr: true, + }, + } + + for name, item := range table { + ref, err := GetPartialReference(item.obj, item.fieldPath) + if e, a := item.shouldErr, (err != nil); e != a { + t.Errorf("%v: expected %v, got %v, err %v", name, e, a, err) + continue + } + if e, a := item.ref, ref; !reflect.DeepEqual(e, a) { + t.Errorf("%v: expected %#v, got %#v", name, e, a) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/amount.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/amount.go new file mode 100644 index 000000000000..49caf3c17365 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/amount.go @@ -0,0 +1,298 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math/big" + "strconv" + + inf "gopkg.in/inf.v0" +) + +// Scale is used for getting and setting the base-10 scaled value. +// Base-2 scales are omitted for mathematical simplicity. +// See Quantity.ScaledValue for more details. +type Scale int32 + +// infScale adapts a Scale value to an inf.Scale value. +func (s Scale) infScale() inf.Scale { + return inf.Scale(-s) // inf.Scale is upside-down +} + +const ( + Nano Scale = -9 + Micro Scale = -6 + Milli Scale = -3 + Kilo Scale = 3 + Mega Scale = 6 + Giga Scale = 9 + Tera Scale = 12 + Peta Scale = 15 + Exa Scale = 18 +) + +var ( + Zero = int64Amount{} + + // Used by quantity strings - treat as read only + zeroBytes = []byte("0") +) + +// int64Amount represents a fixed precision numerator and arbitary scale exponent. It is faster +// than operations on inf.Dec for values that can be represented as int64. +type int64Amount struct { + value int64 + scale Scale +} + +// Sign returns 0 if the value is zero, -1 if it is less than 0, or 1 if it is greater than 0. +func (a int64Amount) Sign() int { + switch { + case a.value == 0: + return 0 + case a.value > 0: + return 1 + default: + return -1 + } +} + +// AsInt64 returns the current amount as an int64 at scale 0, or false if the value cannot be +// represented in an int64 OR would result in a loss of precision. This method is intended as +// an optimization to avoid calling AsDec. +func (a int64Amount) AsInt64() (int64, bool) { + if a.scale == 0 { + return a.value, true + } + if a.scale < 0 { + // TODO: attempt to reduce factors, although it is assumed that factors are reduced prior + // to the int64Amount being created. + return 0, false + } + return positiveScaleInt64(a.value, a.scale) +} + +// AsScaledInt64 returns an int64 representing the value of this amount at the specified scale, +// rounding up, or false if that would result in overflow. (1e20).AsScaledInt64(1) would result +// in overflow because 1e19 is not representable as an int64. Note that setting a scale larger +// than the current value may result in loss of precision - i.e. (1e-6).AsScaledInt64(0) would +// return 1, because 0.000001 is rounded up to 1. +func (a int64Amount) AsScaledInt64(scale Scale) (result int64, ok bool) { + if a.scale < scale { + result, _ = negativeScaleInt64(a.value, scale-a.scale) + return result, true + } + return positiveScaleInt64(a.value, a.scale-scale) +} + +// AsDec returns an inf.Dec representation of this value. +func (a int64Amount) AsDec() *inf.Dec { + var base inf.Dec + base.SetUnscaled(a.value) + base.SetScale(inf.Scale(-a.scale)) + return &base +} + +// Cmp returns 0 if a and b are equal, 1 if a is greater than b, or -1 if a is less than b. +func (a int64Amount) Cmp(b int64Amount) int { + switch { + case a.scale == b.scale: + // compare only the unscaled portion + case a.scale > b.scale: + result, remainder, exact := divideByScaleInt64(b.value, a.scale-b.scale) + if !exact { + return a.AsDec().Cmp(b.AsDec()) + } + if result == a.value { + switch { + case remainder == 0: + return 0 + case remainder > 0: + return -1 + default: + return 1 + } + } + b.value = result + default: + result, remainder, exact := divideByScaleInt64(a.value, b.scale-a.scale) + if !exact { + return a.AsDec().Cmp(b.AsDec()) + } + if result == b.value { + switch { + case remainder == 0: + return 0 + case remainder > 0: + return 1 + default: + return -1 + } + } + a.value = result + } + + switch { + case a.value == b.value: + return 0 + case a.value < b.value: + return -1 + default: + return 1 + } +} + +// Add adds two int64Amounts together, matching scales. It will return false and not mutate +// a if overflow or underflow would result. +func (a *int64Amount) Add(b int64Amount) bool { + switch { + case b.value == 0: + return true + case a.value == 0: + a.value = b.value + a.scale = b.scale + return true + case a.scale == b.scale: + c, ok := int64Add(a.value, b.value) + if !ok { + return false + } + a.value = c + case a.scale > b.scale: + c, ok := positiveScaleInt64(a.value, a.scale-b.scale) + if !ok { + return false + } + c, ok = int64Add(c, b.value) + if !ok { + return false + } + a.scale = b.scale + a.value = c + default: + c, ok := positiveScaleInt64(b.value, b.scale-a.scale) + if !ok { + return false + } + c, ok = int64Add(a.value, c) + if !ok { + return false + } + a.value = c + } + return true +} + +// Sub removes the value of b from the current amount, or returns false if underflow would result. +func (a *int64Amount) Sub(b int64Amount) bool { + return a.Add(int64Amount{value: -b.value, scale: b.scale}) +} + +// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision +// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. +func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) { + if a.scale >= scale { + return a, true + } + result, exact := negativeScaleInt64(a.value, scale-a.scale) + return int64Amount{value: result, scale: scale}, exact +} + +// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted +// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3. +func (a int64Amount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + mantissa := a.value + exponent = int32(a.scale) + + amount, times := removeInt64Factors(mantissa, 10) + exponent += int32(times) + + // make sure exponent is a multiple of 3 + var ok bool + switch exponent % 3 { + case 1, -2: + amount, ok = int64MultiplyScale10(amount) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBytes(out) + } + exponent = exponent - 1 + case 2, -1: + amount, ok = int64MultiplyScale100(amount) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBytes(out) + } + exponent = exponent - 2 + } + return strconv.AppendInt(out, amount, 10), exponent +} + +// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would +// return []byte("2048"), 1. +func (a int64Amount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) { + value, ok := a.AsScaledInt64(0) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBase1024Bytes(out) + } + amount, exponent := removeInt64Factors(value, 1024) + return strconv.AppendInt(out, amount, 10), exponent +} + +// infDecAmount implements common operations over an inf.Dec that are specific to the quantity +// representation. +type infDecAmount struct { + *inf.Dec +} + +// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision +// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. +func (a infDecAmount) AsScale(scale Scale) (infDecAmount, bool) { + tmp := &inf.Dec{} + tmp.Round(a.Dec, scale.infScale(), inf.RoundUp) + return infDecAmount{tmp}, tmp.Cmp(a.Dec) == 0 +} + +// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted +// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3. +func (a infDecAmount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + mantissa := a.Dec.UnscaledBig() + exponent = int32(-a.Dec.Scale()) + amount := big.NewInt(0).Set(mantissa) + // move all factors of 10 into the exponent for easy reasoning + amount, times := removeBigIntFactors(amount, bigTen) + exponent += times + + // make sure exponent is a multiple of 3 + for exponent%3 != 0 { + amount.Mul(amount, bigTen) + exponent-- + } + + return append(out, amount.String()...), exponent +} + +// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would +// return []byte("2048"), 1. +func (a infDecAmount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) { + tmp := &inf.Dec{} + tmp.Round(a.Dec, 0, inf.RoundUp) + amount, exponent := removeBigIntFactors(tmp.UnscaledBig(), big1024) + return append(out, amount.String()...), exponent +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/amount_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/amount_test.go new file mode 100644 index 000000000000..acaad6307b61 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/amount_test.go @@ -0,0 +1,111 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "testing" +) + +func TestInt64AmountAsInt64(t *testing.T) { + for _, test := range []struct { + value int64 + scale Scale + result int64 + ok bool + }{ + {100, 0, 100, true}, + {100, 1, 1000, true}, + {100, -5, 0, false}, + {100, 100, 0, false}, + } { + r, ok := int64Amount{value: test.value, scale: test.scale}.AsInt64() + if r != test.result { + t.Errorf("%v: unexpected result: %d", test, r) + } + if ok != test.ok { + t.Errorf("%v: unexpected ok: %t", test, ok) + } + } +} + +func TestInt64AmountAdd(t *testing.T) { + for _, test := range []struct { + a, b, c int64Amount + ok bool + }{ + {int64Amount{value: 100, scale: 1}, int64Amount{value: 10, scale: 2}, int64Amount{value: 200, scale: 1}, true}, + {int64Amount{value: 100, scale: 1}, int64Amount{value: 1, scale: 2}, int64Amount{value: 110, scale: 1}, true}, + {int64Amount{value: 100, scale: 1}, int64Amount{value: 1, scale: 100}, int64Amount{value: 1, scale: 100}, false}, + {int64Amount{value: -5, scale: 2}, int64Amount{value: 50, scale: 1}, int64Amount{value: 0, scale: 1}, true}, + {int64Amount{value: -5, scale: 2}, int64Amount{value: 5, scale: 2}, int64Amount{value: 0, scale: 2}, true}, + + {int64Amount{value: mostPositive, scale: -1}, int64Amount{value: 1, scale: -1}, int64Amount{value: 0, scale: -1}, false}, + {int64Amount{value: mostPositive, scale: -1}, int64Amount{value: 0, scale: -1}, int64Amount{value: mostPositive, scale: -1}, true}, + {int64Amount{value: mostPositive / 10, scale: 1}, int64Amount{value: 10, scale: 0}, int64Amount{value: mostPositive, scale: -1}, false}, + } { + c := test.a + ok := c.Add(test.b) + if ok != test.ok { + t.Errorf("%v: unexpected ok: %t", test, ok) + } + if ok { + if c != test.c { + t.Errorf("%v: unexpected result: %d", test, c) + } + } else { + if c != test.a { + t.Errorf("%v: overflow addition mutated source: %d", test, c) + } + } + + // addition is commutative + c = test.b + if ok := c.Add(test.a); ok != test.ok { + t.Errorf("%v: unexpected ok: %t", test, ok) + } + if ok { + if c != test.c { + t.Errorf("%v: unexpected result: %d", test, c) + } + } else { + if c != test.b { + t.Errorf("%v: overflow addition mutated source: %d", test, c) + } + } + } +} +func TestInt64AsCanonicalString(t *testing.T) { + for _, test := range []struct { + value int64 + scale Scale + result string + exponent int32 + }{ + {100, 0, "100", 0}, + {100, 1, "1", 3}, + {100, -1, "10", 0}, + {10800, -10, "1080", -9}, + } { + r, exp := int64Amount{value: test.value, scale: test.scale}.AsCanonicalBytes(nil) + if string(r) != test.result { + t.Errorf("%v: unexpected result: %s", test, r) + } + if exp != test.exponent { + t.Errorf("%v: unexpected exponent: %d", test, exp) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/deep_copy.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/deep_copy.go new file mode 100644 index 000000000000..4efc0406f0d1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/deep_copy.go @@ -0,0 +1,32 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + inf "gopkg.in/inf.v0" + + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func DeepCopy_resource_Quantity(in Quantity, out *Quantity, c *conversion.Cloner) error { + *out = in + if in.d.Dec != nil { + tmp := &inf.Dec{} + out.d.Dec = tmp.Set(in.d.Dec) + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/generated.pb.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/generated.pb.go new file mode 100644 index 000000000000..cf9447a321f4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/generated.pb.go @@ -0,0 +1,46 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/api/resource/generated.proto +// DO NOT EDIT! + +/* + Package resource is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/api/resource/generated.proto + + It has these top-level messages: + Quantity +*/ +package resource + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *Quantity) Reset() { *m = Quantity{} } +func (*Quantity) ProtoMessage() {} + +func init() { + proto.RegisterType((*Quantity)(nil), "k8s.io.kubernetes.pkg.api.resource.Quantity") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/generated.proto b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/generated.proto new file mode 100644 index 000000000000..8a1376c32e11 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/generated.proto @@ -0,0 +1,93 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.api.resource; + +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "resource"; + +// Quantity is a fixed-point representation of a number. +// It provides convenient marshaling/unmarshaling in JSON and YAML, +// in addition to String() and Int64() accessors. +// +// The serialization format is: +// +// ::= +// (Note that may be empty, from the "" case in .) +// ::= 0 | 1 | ... | 9 +// ::= | +// ::= | . | . | . +// ::= "+" | "-" +// ::= | +// ::= | | +// ::= Ki | Mi | Gi | Ti | Pi | Ei +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// ::= m | "" | k | M | G | T | P | E +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// ::= "e" | "E" +// +// No matter which of the three exponent forms is used, no quantity may represent +// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal +// places. Numbers larger or more precise will be capped or rounded up. +// (E.g.: 0.1m will rounded up to 1m.) +// This may be extended in the future if we require larger or smaller quantities. +// +// When a Quantity is parsed from a string, it will remember the type of suffix +// it had, and will use the same type again when it is serialized. +// +// Before serializing, Quantity will be put in "canonical form". +// This means that Exponent/suffix will be adjusted up or down (with a +// corresponding increase or decrease in Mantissa) such that: +// a. No precision is lost +// b. No fractional digits will be emitted +// c. The exponent (or suffix) is as large as possible. +// The sign will be omitted unless the number is negative. +// +// Examples: +// 1.5 will be serialized as "1500m" +// 1.5Gi will be serialized as "1536Mi" +// +// NOTE: We reserve the right to amend this canonical format, perhaps to +// allow 1.5 to be canonical. +// TODO: Remove above disclaimer after all bikeshedding about format is over, +// or after March 2015. +// +// Note that the quantity will NEVER be internally represented by a +// floating point number. That is the whole point of this exercise. +// +// Non-canonical values will still parse as long as they are well formed, +// but will be re-emitted in their canonical form. (So always use canonical +// form, or don't diff.) +// +// This format is intended to make it difficult to use these numbers without +// writing some sort of special handling code in the hopes that that will +// cause implementors to also use a fixed point implementation. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +message Quantity { + optional string string = 1; +} + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/math.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/math.go new file mode 100644 index 000000000000..163aafa5db6a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/math.go @@ -0,0 +1,327 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math/big" + + inf "gopkg.in/inf.v0" +) + +const ( + // maxInt64Factors is the highest value that will be checked when removing factors of 10 from an int64. + // It is also the maximum decimal digits that can be represented with an int64. + maxInt64Factors = 18 +) + +var ( + // Commonly needed big.Int values-- treat as read only! + bigTen = big.NewInt(10) + bigZero = big.NewInt(0) + bigOne = big.NewInt(1) + bigThousand = big.NewInt(1000) + big1024 = big.NewInt(1024) + + // Commonly needed inf.Dec values-- treat as read only! + decZero = inf.NewDec(0, 0) + decOne = inf.NewDec(1, 0) + decMinusOne = inf.NewDec(-1, 0) + decThousand = inf.NewDec(1000, 0) + dec1024 = inf.NewDec(1024, 0) + decMinus1024 = inf.NewDec(-1024, 0) + + // Largest (in magnitude) number allowed. + maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64 + + // The maximum value we can represent milli-units for. + // Compare with the return value of Quantity.Value() to + // see if it's safe to use Quantity.MilliValue(). + MaxMilliValue = int64(((1 << 63) - 1) / 1000) +) + +const mostNegative = -(mostPositive + 1) +const mostPositive = 1<<63 - 1 + +// int64Add returns a+b, or false if that would overflow int64. +func int64Add(a, b int64) (int64, bool) { + c := a + b + switch { + case a > 0 && b > 0: + if c < 0 { + return 0, false + } + case a < 0 && b < 0: + if c > 0 { + return 0, false + } + if a == mostNegative && b == mostNegative { + return 0, false + } + } + return c, true +} + +// int64Multiply returns a*b, or false if that would overflow or underflow int64. +func int64Multiply(a, b int64) (int64, bool) { + if a == 0 || b == 0 || a == 1 || b == 1 { + return a * b, true + } + if a == mostNegative || b == mostNegative { + return 0, false + } + c := a * b + return c, c/b == a +} + +// int64MultiplyScale returns a*b, assuming b is greater than one, or false if that would overflow or underflow int64. +// Use when b is known to be greater than one. +func int64MultiplyScale(a int64, b int64) (int64, bool) { + if a == 0 || a == 1 { + return a * b, true + } + if a == mostNegative && b != 1 { + return 0, false + } + c := a * b + return c, c/b == a +} + +// int64MultiplyScale10 multiplies a by 10, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 10) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale10(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 10, true + } + if a == mostNegative { + return 0, false + } + c := a * 10 + return c, c/10 == a +} + +// int64MultiplyScale100 multiplies a by 100, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 100) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale100(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 100, true + } + if a == mostNegative { + return 0, false + } + c := a * 100 + return c, c/100 == a +} + +// int64MultiplyScale1000 multiplies a by 1000, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 1000) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale1000(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 1000, true + } + if a == mostNegative { + return 0, false + } + c := a * 1000 + return c, c/1000 == a +} + +// positiveScaleInt64 multiplies base by 10^scale, returning false if the +// value overflows. Passing a negative scale is undefined. +func positiveScaleInt64(base int64, scale Scale) (int64, bool) { + switch scale { + case 0: + return base, true + case 1: + return int64MultiplyScale10(base) + case 2: + return int64MultiplyScale100(base) + case 3: + return int64MultiplyScale1000(base) + case 6: + return int64MultiplyScale(base, 1000000) + case 9: + return int64MultiplyScale(base, 1000000000) + default: + value := base + var ok bool + for i := Scale(0); i < scale; i++ { + if value, ok = int64MultiplyScale(value, 10); !ok { + return 0, false + } + } + return value, true + } +} + +// negativeScaleInt64 reduces base by the provided scale, rounding up, until the +// value is zero or the scale is reached. Passing a negative scale is undefined. +// The value returned, if not exact, is rounded away from zero. +func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) { + if scale == 0 { + return base, true + } + + value := base + var fraction bool + for i := Scale(0); i < scale; i++ { + if !fraction && value%10 != 0 { + fraction = true + } + value = value / 10 + if value == 0 { + if fraction { + if base > 0 { + return 1, false + } + return -1, false + } + return 0, true + } + } + if fraction { + if base > 0 { + value += 1 + } else { + value += -1 + } + } + return value, !fraction +} + +func pow10Int64(b int64) int64 { + switch b { + case 0: + return 1 + case 1: + return 10 + case 2: + return 100 + case 3: + return 1000 + case 4: + return 10000 + case 5: + return 100000 + case 6: + return 1000000 + case 7: + return 10000000 + case 8: + return 100000000 + case 9: + return 1000000000 + case 10: + return 10000000000 + case 11: + return 100000000000 + case 12: + return 1000000000000 + case 13: + return 10000000000000 + case 14: + return 100000000000000 + case 15: + return 1000000000000000 + case 16: + return 10000000000000000 + case 17: + return 100000000000000000 + case 18: + return 1000000000000000000 + default: + return 0 + } +} + +// powInt64 raises a to the bth power. Is not overflow aware. +func powInt64(a, b int64) int64 { + p := int64(1) + for b > 0 { + if b&1 != 0 { + p *= a + } + b >>= 1 + a *= a + } + return p +} + +// negativeScaleInt64 returns the result of dividing base by scale * 10 and the remainder, or +// false if no such division is possible. Dividing by negative scales is undefined. +func divideByScaleInt64(base int64, scale Scale) (result, remainder int64, exact bool) { + if scale == 0 { + return base, 0, true + } + // the max scale representable in base 10 in an int64 is 18 decimal places + if scale >= 18 { + return 0, base, false + } + divisor := pow10Int64(int64(scale)) + return base / divisor, base % divisor, true +} + +// removeInt64Factors divides in a loop; the return values have the property that +// value == result * base ^ scale +func removeInt64Factors(value int64, base int64) (result int64, times int32) { + times = 0 + result = value + negative := result < 0 + if negative { + result = -result + } + switch base { + // allow the compiler to optimize the common cases + case 10: + for result >= 10 && result%10 == 0 { + times++ + result = result / 10 + } + // allow the compiler to optimize the common cases + case 1024: + for result >= 1024 && result%1024 == 0 { + times++ + result = result / 1024 + } + default: + for result >= base && result%base == 0 { + times++ + result = result / base + } + } + if negative { + result = -result + } + return result, times +} + +// removeBigIntFactors divides in a loop; the return values have the property that +// d == result * factor ^ times +// d may be modified in place. +// If d == 0, then the return values will be (0, 0) +func removeBigIntFactors(d, factor *big.Int) (result *big.Int, times int32) { + q := big.NewInt(0) + m := big.NewInt(0) + for d.Cmp(bigZero) != 0 { + q.DivMod(d, factor, m) + if m.Cmp(bigZero) != 0 { + break + } + times++ + d, q = q, d + } + return d, times +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/math_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/math_test.go new file mode 100644 index 000000000000..0fdda12e0a1f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/math_test.go @@ -0,0 +1,211 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "testing" +) + +func TestDetectOverflowAdd(t *testing.T) { + for _, test := range []struct { + a, b int64 + c int64 + ok bool + }{ + {0, 0, 0, true}, + {-1, 1, 0, true}, + {0, 1, 1, true}, + {2, 2, 4, true}, + {2, -2, 0, true}, + {-2, -2, -4, true}, + + {mostNegative, -1, 0, false}, + {mostNegative, 1, mostNegative + 1, true}, + {mostPositive, -1, mostPositive - 1, true}, + {mostPositive, 1, 0, false}, + + {mostNegative, mostPositive, -1, true}, + {mostPositive, mostNegative, -1, true}, + {mostPositive, mostPositive, 0, false}, + {mostNegative, mostNegative, 0, false}, + + {-mostPositive, mostNegative, 0, false}, + {mostNegative, -mostPositive, 0, false}, + {-mostPositive, -mostPositive, 0, false}, + } { + c, ok := int64Add(test.a, test.b) + if c != test.c { + t.Errorf("%v: unexpected result: %d", test, c) + } + if ok != test.ok { + t.Errorf("%v: unexpected overflow: %t", test, ok) + } + // addition is commutative + d, ok2 := int64Add(test.b, test.a) + if c != d || ok != ok2 { + t.Errorf("%v: not commutative: %d %t", test, d, ok2) + } + } +} + +func TestDetectOverflowMultiply(t *testing.T) { + for _, test := range []struct { + a, b int64 + c int64 + ok bool + }{ + {0, 0, 0, true}, + {-1, 1, -1, true}, + {-1, -1, 1, true}, + {1, 1, 1, true}, + {0, 1, 0, true}, + {1, 0, 0, true}, + {2, 2, 4, true}, + {2, -2, -4, true}, + {-2, -2, 4, true}, + + {mostNegative, -1, 0, false}, + {mostNegative, 1, mostNegative, true}, + {mostPositive, -1, -mostPositive, true}, + {mostPositive, 1, mostPositive, true}, + + {mostNegative, mostPositive, 0, false}, + {mostPositive, mostNegative, 0, false}, + {mostPositive, mostPositive, 1, false}, + {mostNegative, mostNegative, 0, false}, + + {-mostPositive, mostNegative, 0, false}, + {mostNegative, -mostPositive, 0, false}, + {-mostPositive, -mostPositive, 1, false}, + } { + c, ok := int64Multiply(test.a, test.b) + if c != test.c { + t.Errorf("%v: unexpected result: %d", test, c) + } + if ok != test.ok { + t.Errorf("%v: unexpected overflow: %t", test, ok) + } + // multiplication is commutative + d, ok2 := int64Multiply(test.b, test.a) + if c != d || ok != ok2 { + t.Errorf("%v: not commutative: %d %t", test, d, ok2) + } + } +} + +func TestDetectOverflowScale(t *testing.T) { + for _, a := range []int64{0, -1, 1, 10, -10, mostPositive, mostNegative, -mostPositive} { + for _, b := range []int64{1, 2, 10, 100, 1000, mostPositive} { + expect, expectOk := int64Multiply(a, b) + + c, ok := int64MultiplyScale(a, b) + if c != expect { + t.Errorf("%d*%d: unexpected result: %d", a, b, c) + } + if ok != expectOk { + t.Errorf("%d*%d: unexpected overflow: %t", a, b, ok) + } + } + for _, test := range []struct { + base int64 + fn func(a int64) (int64, bool) + }{ + {10, int64MultiplyScale10}, + {100, int64MultiplyScale100}, + {1000, int64MultiplyScale1000}, + } { + expect, expectOk := int64Multiply(a, test.base) + c, ok := test.fn(a) + if c != expect { + t.Errorf("%d*%d: unexpected result: %d", a, test.base, c) + } + if ok != expectOk { + t.Errorf("%d*%d: unexpected overflow: %t", a, test.base, ok) + } + } + } +} + +func TestRemoveInt64Factors(t *testing.T) { + for _, test := range []struct { + value int64 + max int64 + result int64 + scale int32 + }{ + {100, 10, 1, 2}, + {100, 10, 1, 2}, + {100, 100, 1, 1}, + {1, 10, 1, 0}, + } { + r, s := removeInt64Factors(test.value, test.max) + if r != test.result { + t.Errorf("%v: unexpected result: %d", test, r) + } + if s != test.scale { + t.Errorf("%v: unexpected scale: %d", test, s) + } + } +} + +func TestNegativeScaleInt64(t *testing.T) { + for _, test := range []struct { + base int64 + scale Scale + result int64 + exact bool + }{ + {1234567, 0, 1234567, true}, + {1234567, 1, 123457, false}, + {1234567, 2, 12346, false}, + {1234567, 3, 1235, false}, + {1234567, 4, 124, false}, + + {-1234567, 0, -1234567, true}, + {-1234567, 1, -123457, false}, + {-1234567, 2, -12346, false}, + {-1234567, 3, -1235, false}, + {-1234567, 4, -124, false}, + + {1000, 0, 1000, true}, + {1000, 1, 100, true}, + {1000, 2, 10, true}, + {1000, 3, 1, true}, + {1000, 4, 1, false}, + + {-1000, 0, -1000, true}, + {-1000, 1, -100, true}, + {-1000, 2, -10, true}, + {-1000, 3, -1, true}, + {-1000, 4, -1, false}, + + {0, 0, 0, true}, + {0, 1, 0, true}, + {0, 2, 0, true}, + + // negative scale is undefined behavior + {1000, -1, 1000, true}, + } { + result, exact := negativeScaleInt64(test.base, test.scale) + if result != test.result { + t.Errorf("%v: unexpected result: %d", test, result) + } + if exact != test.exact { + t.Errorf("%v: unexpected exact: %t", test, exact) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity.go index aeb43c897676..ddff16436d14 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity.go @@ -17,14 +17,17 @@ limitations under the License. package resource import ( + "bytes" "errors" "fmt" "math/big" "regexp" + "strconv" "strings" flag "github.com/spf13/pflag" - "speter.net/go/exp/math/dec/inf" + + inf "gopkg.in/inf.v0" ) // Quantity is a fixed-point representation of a number. @@ -85,19 +88,34 @@ import ( // cause implementors to also use a fixed point implementation. // // +protobuf=true -// +protobuf.embed=QuantityProto +// +protobuf.embed=string // +protobuf.options.marshal=false // +protobuf.options.(gogoproto.goproto_stringer)=false type Quantity struct { - // Amount is public, so you can manipulate it if the accessor - // functions are not sufficient. - Amount *inf.Dec + // i is the quantity in int64 scaled form, if d.Dec == nil + i int64Amount + // d is the quantity in inf.Dec form if d.Dec != nil + d infDecAmount + // s is the generated value of this quantity to avoid recalculation + s string // Change Format at will. See the comment for Canonicalize for // more details. Format } +// CanonicalValue allows a quantity amount to be converted to a string. +type CanonicalValue interface { + // AsCanonicalBytes returns a byte array representing the string representation + // of the value mantissa and an int32 representing its exponent in base-10. Callers may + // pass a byte slice to the method to avoid allocations. + AsCanonicalBytes(out []byte) ([]byte, int32) + // AsCanonicalBase1024Bytes returns a byte array representing the string representation + // of the value mantissa and an int32 representing its exponent in base-1024. Callers + // may pass a byte slice to the method to avoid allocations. + AsCanonicalBase1024Bytes(out []byte) ([]byte, int32) +} + // Format lists the three possible formattings of a quantity. type Format string @@ -114,26 +132,9 @@ func MustParse(str string) Quantity { if err != nil { panic(fmt.Errorf("cannot parse '%v': %v", str, err)) } - return *q + return q } -// Scale is used for getting and setting the base-10 scaled value. -// Base-2 scales are omitted for mathematical simplicity. -// See Quantity.ScaledValue for more details. -type Scale int - -const ( - Nano Scale = -9 - Micro Scale = -6 - Milli Scale = -3 - Kilo Scale = 3 - Mega Scale = 6 - Giga Scale = 9 - Tera Scale = 12 - Peta Scale = 15 - Exa Scale = 18 -) - const ( // splitREString is used to separate a number from its suffix; as such, // this is overly permissive, but that's OK-- it will be checked later. @@ -148,47 +149,200 @@ var ( ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'") ErrNumeric = errors.New("unable to parse numeric part of quantity") ErrSuffix = errors.New("unable to parse quantity's suffix") - - // Commonly needed big.Int values-- treat as read only! - bigTen = big.NewInt(10) - bigZero = big.NewInt(0) - bigOne = big.NewInt(1) - bigThousand = big.NewInt(1000) - big1024 = big.NewInt(1024) - - // Commonly needed inf.Dec values-- treat as read only! - decZero = inf.NewDec(0, 0) - decOne = inf.NewDec(1, 0) - decMinusOne = inf.NewDec(-1, 0) - decThousand = inf.NewDec(1000, 0) - dec1024 = inf.NewDec(1024, 0) - decMinus1024 = inf.NewDec(-1024, 0) - - // Largest (in magnitude) number allowed. - maxAllowed = inf.NewDec((1<<63)-1, 0) // == max int64 - - // The maximum value we can represent milli-units for. - // Compare with the return value of Quantity.Value() to - // see if it's safe to use Quantity.MilliValue(). - MaxMilliValue = int64(((1 << 63) - 1) / 1000) ) +// parseQuantityString is a fast scanner for quantity values. +func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) { + positive = true + pos := 0 + end := len(str) + + // handle leading sign + if pos < end { + switch str[0] { + case '-': + positive = false + pos++ + case '+': + pos++ + } + } + + // strip leading zeros +Zeroes: + for i := pos; ; i++ { + if i >= end { + num = "0" + value = num + return + } + switch str[i] { + case '0': + pos++ + default: + break Zeroes + } + } + + // extract the numerator +Num: + for i := pos; ; i++ { + if i >= end { + num = str[pos:end] + value = str[0:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + num = str[pos:i] + pos = i + break Num + } + } + + // if we stripped all numerator positions, always return 0 + if len(num) == 0 { + num = "0" + } + + // handle a denominator + if pos < end && str[pos] == '.' { + pos++ + Denom: + for i := pos; ; i++ { + if i >= end { + denom = str[pos:end] + value = str[0:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + denom = str[pos:i] + pos = i + break Denom + } + } + // TODO: we currently allow 1.G, but we may not want to in the future. + // if len(denom) == 0 { + // err = ErrFormatWrong + // return + // } + } + value = str[0:pos] + + // grab the elements of the suffix + suffixStart := pos + for i := pos; ; i++ { + if i >= end { + suffix = str[suffixStart:end] + return + } + if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") { + pos = i + break + } + } + if pos < end { + switch str[pos] { + case '-', '+': + pos++ + } + } +Suffix: + for i := pos; ; i++ { + if i >= end { + suffix = str[suffixStart:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + pos = i + break Suffix + } + } + // we encountered a non decimal in the Suffix loop, but the last character + // was not a valid exponent + err = ErrFormatWrong + return +} + // ParseQuantity turns str into a Quantity, or returns an error. -func ParseQuantity(str string) (*Quantity, error) { - parts := splitRE.FindStringSubmatch(strings.TrimSpace(str)) - // regexp returns are entire match, followed by an entry for each () section. - if len(parts) != 3 { - return nil, ErrFormatWrong +func ParseQuantity(str string) (Quantity, error) { + if len(str) == 0 { + return Quantity{}, ErrFormatWrong + } + if str == "0" { + return Quantity{Format: DecimalSI, s: str}, nil } - amount := new(inf.Dec) - if _, ok := amount.SetString(parts[1]); !ok { - return nil, ErrNumeric + positive, value, num, denom, suf, err := parseQuantityString(str) + if err != nil { + return Quantity{}, err } - base, exponent, format, ok := quantitySuffixer.interpret(suffix(parts[2])) + base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf)) if !ok { - return nil, ErrSuffix + return Quantity{}, ErrSuffix + } + + precision := int32(0) + scale := int32(0) + mantissa := int64(1) + switch format { + case DecimalExponent, DecimalSI: + scale = exponent + precision = maxInt64Factors - int32(len(num)+len(denom)) + case BinarySI: + scale = 0 + switch { + case exponent >= 0 && len(denom) == 0: + // only handle positive binary numbers with the fast path + mantissa = int64(int64(mantissa) << uint64(exponent)) + // 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision + precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1 + default: + precision = -1 + } + } + + if precision >= 0 { + // if we have a denominator, shift the entire value to the left by the number of places in the + // denominator + scale -= int32(len(denom)) + if scale >= int32(Nano) { + shifted := num + denom + + var value int64 + value, err := strconv.ParseInt(shifted, 10, 64) + if err != nil { + return Quantity{}, ErrNumeric + } + if result, ok := int64Multiply(value, int64(mantissa)); ok { + if !positive { + result = -result + } + // if the number is in canonical form, reuse the string + switch format { + case BinarySI: + if exponent%10 == 0 && (value&0x07 != 0) { + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil + } + default: + if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' { + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil + } + } + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil + } + } + } + + amount := new(inf.Dec) + if _, ok := amount.SetString(value); !ok { + return Quantity{}, ErrNumeric } // So that no one but us has to think about suffixes, remove it. @@ -216,9 +370,11 @@ func ParseQuantity(str string) (*Quantity, error) { } // The max is just a simple cap. - if amount.Cmp(maxAllowed) > 0 { - amount.Set(maxAllowed) + // TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster + if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 { + amount.Set(maxAllowed.Dec) } + if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 { // This avoids rounding and hopefully confusion, too. format = DecimalSI @@ -227,55 +383,32 @@ func ParseQuantity(str string) (*Quantity, error) { amount.Neg(amount) } - return &Quantity{amount, format}, nil + return Quantity{d: infDecAmount{amount}, Format: format}, nil } -// removeFactors divides in a loop; the return values have the property that -// d == result * factor ^ times -// d may be modified in place. -// If d == 0, then the return values will be (0, 0) -func removeFactors(d, factor *big.Int) (result *big.Int, times int) { - q := big.NewInt(0) - m := big.NewInt(0) - for d.Cmp(bigZero) != 0 { - q.DivMod(d, factor, m) - if m.Cmp(bigZero) != 0 { - break - } - times++ - d, q = q, d - } - return d, times -} - -// Canonicalize returns the canonical form of q and its suffix (see comment on Quantity). +// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity). // // Note about BinarySI: // * If q.Format is set to BinarySI and q.Amount represents a non-zero value between // -1 and +1, it will be emitted as if q.Format were DecimalSI. // * Otherwise, if q.Format is set to BinarySI, frational parts of q.Amount will be // rounded up. (1.1i becomes 2i.) -func (q *Quantity) Canonicalize() (string, suffix) { - if q.Amount == nil { - return "0", "" - } - - // zero is zero always - if q.Amount.Cmp(&inf.Dec{}) == 0 { - return "0", "" +func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { + if q.IsZero() { + return zeroBytes, nil } + var rounded CanonicalValue format := q.Format switch format { case DecimalExponent, DecimalSI: case BinarySI: - if q.Amount.Cmp(decMinus1024) > 0 && q.Amount.Cmp(dec1024) < 0 { + if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 { // This avoids rounding and hopefully confusion, too. format = DecimalSI } else { - tmp := &inf.Dec{} - tmp.Round(q.Amount, 0, inf.RoundUp) - if tmp.Cmp(q.Amount) != 0 { + var exact bool + if rounded, exact = q.AsScale(0); !exact { // Don't lose precision-- show as DecimalSI format = DecimalSI } @@ -288,125 +421,225 @@ func (q *Quantity) Canonicalize() (string, suffix) { // one of the other formats. switch format { case DecimalExponent, DecimalSI: - mantissa := q.Amount.UnscaledBig() - exponent := int(-q.Amount.Scale()) - amount := big.NewInt(0).Set(mantissa) - // move all factors of 10 into the exponent for easy reasoning - amount, times := removeFactors(amount, bigTen) - exponent += times - - // make sure exponent is a multiple of 3 - for exponent%3 != 0 { - amount.Mul(amount, bigTen) - exponent-- - } - - suffix, _ := quantitySuffixer.construct(10, exponent, format) - number := amount.String() + number, exponent := q.AsCanonicalBytes(out) + suffix, _ := quantitySuffixer.constructBytes(10, exponent, format) return number, suffix - case BinarySI: - tmp := &inf.Dec{} - tmp.Round(q.Amount, 0, inf.RoundUp) - - amount, exponent := removeFactors(tmp.UnscaledBig(), big1024) - suffix, _ := quantitySuffixer.construct(2, exponent*10, format) - number := amount.String() + default: + // format must be BinarySI + number, exponent := rounded.AsCanonicalBase1024Bytes(out) + suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format) return number, suffix } - return "0", "" } -// String formats the Quantity as a string. -func (q *Quantity) String() string { - number, suffix := q.Canonicalize() - return number + string(suffix) +// AsInt64 returns a representation of the current value as an int64 if a fast conversion +// is possible. If false is returned, callers must use the inf.Dec form of this quantity. +func (q *Quantity) AsInt64() (int64, bool) { + if q.d.Dec != nil { + return 0, false + } + return q.i.AsInt64() } -// Cmp compares q and y and returns: -// -// -1 if q < y -// 0 if q == y -// +1 if q > y -// -func (q *Quantity) Cmp(y Quantity) int { - if q.Amount == nil { - if y.Amount == nil { - return 0 - } - return -y.Amount.Sign() +// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself. +func (q *Quantity) ToDec() *Quantity { + if q.d.Dec == nil { + q.d.Dec = q.i.AsDec() + q.i = int64Amount{} } - if y.Amount == nil { - return q.Amount.Sign() + return q +} + +// AsDec returns the quantity as represented by a scaled inf.Dec. +func (q *Quantity) AsDec() *inf.Dec { + if q.d.Dec != nil { + return q.d.Dec } - return q.Amount.Cmp(y.Amount) + q.d.Dec = q.i.AsDec() + q.i = int64Amount{} + return q.d.Dec } -func (q *Quantity) Add(y Quantity) error { - switch { - case y.Amount == nil: - // Adding 0: do nothing. - case q.Amount == nil: - q.Amount = &inf.Dec{} - return q.Add(y) - default: - // we want to preserve the format of the non-zero value - zero := &inf.Dec{} - if q.Amount.Cmp(zero) == 0 && y.Amount.Cmp(zero) != 0 { - q.Format = y.Format - } - q.Amount.Add(q.Amount, y.Amount) +// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa +// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra +// allocation. +func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + if q.d.Dec != nil { + return q.d.AsCanonicalBytes(out) } - return nil + return q.i.AsCanonicalBytes(out) } -func (q *Quantity) Sub(y Quantity) error { - switch { - case y.Amount == nil: - // Subtracting 0: do nothing. - case q.Amount == nil: - q.Amount = &inf.Dec{} - return q.Sub(y) - default: - // we want to preserve the format of the non-zero value - zero := &inf.Dec{} - if q.Amount.Cmp(zero) == 0 && y.Amount.Cmp(zero) != 0 { +// IsZero returns true if the quantity is equal to zero. +func (q *Quantity) IsZero() bool { + if q.d.Dec != nil { + return q.d.Dec.Sign() == 0 + } + return q.i.value == 0 +} + +// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the +// quantity is greater than zero. +func (q *Quantity) Sign() int { + if q.d.Dec != nil { + return q.d.Dec.Sign() + } + return q.i.Sign() +} + +// AsScaled returns the current value, rounded up to the provided scale, and returns +// false if the scale resulted in a loss of precision. +func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) { + if q.d.Dec != nil { + return q.d.AsScale(scale) + } + return q.i.AsScale(scale) +} + +// RoundUp updates the quantity to the provided scale, ensuring that the value is at +// least 1. False is returned if the rounding operation resulted in a loss of precision. +// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10). +func (q *Quantity) RoundUp(scale Scale) bool { + if q.d.Dec != nil { + q.s = "" + d, exact := q.d.AsScale(scale) + q.d = d + return exact + } + // avoid clearing the string value if we have already calculated it + if q.i.scale >= scale { + return true + } + q.s = "" + i, exact := q.i.AsScale(scale) + q.i = i + return exact +} + +// Add adds the provide y quantity to the current value. If the current value is zero, +// the format of the quantity will be updated to the format of y. +func (q *Quantity) Add(y Quantity) { + q.s = "" + if q.d.Dec == nil && y.d.Dec == nil { + if q.i.value == 0 { q.Format = y.Format } - q.Amount.Sub(q.Amount, y.Amount) + if q.i.Add(y.i) { + return + } + } else if q.IsZero() { + q.Format = y.Format } - return nil + q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec()) } -// Neg sets q to the negative value of y. -// It updates the format of q to match y. -func (q *Quantity) Neg(y Quantity) error { - switch { - case y.Amount == nil: - *q = y - case q.Amount == nil: - q.Amount = &inf.Dec{} - fallthrough - default: - q.Amount.Neg(y.Amount) +// Sub subtracts the provided quantity from the current value in place. If the current +// value is zero, the format of the quantity will be updated to the format of y. +func (q *Quantity) Sub(y Quantity) { + q.s = "" + if q.IsZero() { q.Format = y.Format } - return nil + if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) { + return + } + q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec()) +} + +// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the +// quantity is greater than y. +func (q *Quantity) Cmp(y Quantity) int { + if q.d.Dec == nil && y.d.Dec == nil { + return q.i.Cmp(y.i) + } + return q.AsDec().Cmp(y.AsDec()) +} + +// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the +// quantity is greater than y. +func (q *Quantity) CmpInt64(y int64) int { + if q.d.Dec != nil { + return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0))) + } + return q.i.Cmp(int64Amount{value: y}) +} + +// Neg sets quantity to be the negative value of itself. +func (q *Quantity) Neg() { + q.s = "" + if q.d.Dec == nil { + q.i.value = -q.i.value + return + } + q.d.Dec.Neg(q.d.Dec) +} + +// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation +// of most Quantity values. +const int64QuantityExpectedBytes = 18 + +// String formats the Quantity as a string, caching the result if not calculated. +// String is an expensive operation and caching this result significantly reduces the cost of +// normal parse / marshal operations on Quantity. +func (q *Quantity) String() string { + if len(q.s) == 0 { + result := make([]byte, 0, int64QuantityExpectedBytes) + number, suffix := q.CanonicalizeBytes(result) + number = append(number, suffix...) + q.s = string(number) + } + return q.s } // MarshalJSON implements the json.Marshaller interface. func (q Quantity) MarshalJSON() ([]byte, error) { - return []byte(`"` + q.String() + `"`), nil + if len(q.s) > 0 { + out := make([]byte, len(q.s)+2) + out[0], out[len(out)-1] = '"', '"' + copy(out[1:], q.s) + return out, nil + } + result := make([]byte, int64QuantityExpectedBytes, int64QuantityExpectedBytes) + result[0] = '"' + number, suffix := q.CanonicalizeBytes(result[1:1]) + // if the same slice was returned to us that we passed in, avoid another allocation by copying number into + // the source slice and returning that + if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes { + number = append(number, suffix...) + number = append(number, '"') + return result[:1+len(number)], nil + } + // if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use + // append + result = result[:1] + result = append(result, number...) + result = append(result, suffix...) + result = append(result, '"') + return result, nil } // UnmarshalJSON implements the json.Unmarshaller interface. func (q *Quantity) UnmarshalJSON(value []byte) error { - str := string(value) - parsed, err := ParseQuantity(strings.Trim(str, `"`)) + l := len(value) + if l == 4 && bytes.Equal(value, []byte("null")) { + q.d.Dec = nil + q.i = int64Amount{} + return nil + } + if l < 2 { + return ErrFormatWrong + } + if value[0] == '"' && value[l-1] == '"' { + value = value[1 : l-1] + } + + parsed, err := ParseQuantity(string(value)) if err != nil { return err } + // This copy is safe because parsed will not be referred to again. - *q = *parsed + *q = parsed return nil } @@ -414,7 +647,7 @@ func (q *Quantity) UnmarshalJSON(value []byte) error { // value in the given format. func NewQuantity(value int64, format Format) *Quantity { return &Quantity{ - Amount: inf.NewDec(value, 0), + i: int64Amount{value: value}, Format: format, } } @@ -425,7 +658,7 @@ func NewQuantity(value int64, format Format) *Quantity { // values x where (-1 < x < 1) && (x != 0). func NewMilliQuantity(value int64, format Format) *Quantity { return &Quantity{ - Amount: inf.NewDec(value, 3), + i: int64Amount{value: value, scale: -3}, Format: format, } } @@ -434,7 +667,7 @@ func NewMilliQuantity(value int64, format Format) *Quantity { // value * 10^scale in DecimalSI format. func NewScaledQuantity(value int64, scale Scale) *Quantity { return &Quantity{ - Amount: inf.NewDec(value, scale.infScale()), + i: int64Amount{value: value, scale: scale}, Format: DecimalSI, } } @@ -453,10 +686,12 @@ func (q *Quantity) MilliValue() int64 { // ScaledValue returns the value of ceil(q * 10^scale); this could overflow an int64. // To detect overflow, call Value() first and verify the expected magnitude. func (q *Quantity) ScaledValue(scale Scale) int64 { - if q.Amount == nil { - return 0 + if q.d.Dec == nil { + i, _ := q.i.AsScaledInt64(scale) + return i } - return scaledValue(q.Amount.UnscaledBig(), int(q.Amount.Scale()), int(scale.infScale())) + dec := q.d.Dec + return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale())) } // Set sets q's value to be value. @@ -471,22 +706,25 @@ func (q *Quantity) SetMilli(value int64) { // SetScaled sets q's value to be value * 10^scale func (q *Quantity) SetScaled(value int64, scale Scale) { - if q.Amount == nil { - q.Amount = &inf.Dec{} - } - q.Amount.SetUnscaled(value) - q.Amount.SetScale(scale.infScale()) + q.s = "" + q.d.Dec = nil + q.i = int64Amount{value: value, scale: scale} } // Copy is a convenience function that makes a deep copy for you. Non-deep // copies of quantities share pointers and you will regret that. func (q *Quantity) Copy() *Quantity { - if q.Amount == nil { - return NewQuantity(0, q.Format) + if q.d.Dec == nil { + return &Quantity{ + s: q.s, + i: q.i, + Format: q.Format, + } } tmp := &inf.Dec{} return &Quantity{ - Amount: tmp.Set(q.Amount), + s: q.s, + d: infDecAmount{tmp.Set(q.d.Dec)}, Format: q.Format, } } @@ -503,7 +741,7 @@ func (qf qFlag) Set(val string) error { return err } // This copy is OK because q will not be referenced again. - *qf.dest = *q + *qf.dest = q return nil } @@ -530,8 +768,3 @@ func QuantityFlag(flagName, defaultValue, description string) *Quantity { func NewQuantityFlagValue(q *Quantity) flag.Value { return qFlag{q} } - -// infScale adapts a Scale value to an inf.Scale value. -func (s Scale) infScale() inf.Scale { - return inf.Scale(-s) // inf.Scale is upside-down -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity_example_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity_example_test.go new file mode 100644 index 000000000000..48c3d2555f0b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity_example_test.go @@ -0,0 +1,59 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource_test + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api/resource" +) + +func ExampleFormat() { + memorySize := resource.NewQuantity(5*1024*1024*1024, resource.BinarySI) + fmt.Printf("memorySize = %v\n", memorySize) + + diskSize := resource.NewQuantity(5*1000*1000*1000, resource.DecimalSI) + fmt.Printf("diskSize = %v\n", diskSize) + + cores := resource.NewMilliQuantity(5300, resource.DecimalSI) + fmt.Printf("cores = %v\n", cores) + + // Output: + // memorySize = 5Gi + // diskSize = 5G + // cores = 5300m +} + +func ExampleMustParse() { + memorySize := resource.MustParse("5Gi") + fmt.Printf("memorySize = %v (%v)\n", memorySize.Value(), memorySize.Format) + + diskSize := resource.MustParse("5G") + fmt.Printf("diskSize = %v (%v)\n", diskSize.Value(), diskSize.Format) + + cores := resource.MustParse("5300m") + fmt.Printf("milliCores = %v (%v)\n", cores.MilliValue(), cores.Format) + + cores2 := resource.MustParse("5.4") + fmt.Printf("milliCores = %v (%v)\n", cores2.MilliValue(), cores2.Format) + + // Output: + // memorySize = 5368709120 (BinarySI) + // diskSize = 5000000000 (DecimalSI) + // milliCores = 5300 (DecimalSI) + // milliCores = 5400 (DecimalSI) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity_proto.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity_proto.go index 986d846083dc..240294682cff 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity_proto.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity_proto.go @@ -1,5 +1,3 @@ -// +build proto - /* Copyright 2015 The Kubernetes Authors All rights reserved. @@ -19,62 +17,268 @@ limitations under the License. package resource import ( - "math/big" + "fmt" + "io" - "speter.net/go/exp/math/dec/inf" + "github.com/gogo/protobuf/proto" ) -// QuantityProto is a struct that is equivalent to Quantity, but intended for -// protobuf marshalling/unmarshalling. It is generated into a serialization -// that matches Quantity. Do not use in Go structs. -// -// +protobuf=true -type QuantityProto struct { - // The format of the quantity - Format Format - // The scale dimension of the value - Scale int32 - // Bigint is serialized as a raw bytes array - Bigint []byte -} +var _ proto.Sizer = &Quantity{} -// ProtoTime returns the Time as a new ProtoTime value. -func (q *Quantity) QuantityProto() *QuantityProto { - if q == nil { - return &QuantityProto{} +func (m *Quantity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err } - p := &QuantityProto{ - Format: q.Format, + return data[:n], nil +} + +// MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct +// with a single string field. +func (m *Quantity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + + data[i] = 0xa + i++ + // BEGIN CUSTOM MARSHAL + out := m.String() + i = encodeVarintGenerated(data, i, uint64(len(out))) + i += copy(data[i:], out) + // END CUSTOM MARSHAL + + return i, nil +} + +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - if q.Amount != nil { - p.Scale = int32(q.Amount.Scale()) - p.Bigint = q.Amount.UnscaledBig().Bytes() + data[offset] = uint8(v) + return offset + 1 +} + +func (m *Quantity) Size() (n int) { + var l int + _ = l + + // BEGIN CUSTOM SIZE + l = len(m.String()) + // END CUSTOM SIZE + + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } } - return p + return n } -// Size implements the protobuf marshalling interface. -func (q *Quantity) Size() (n int) { return q.QuantityProto().Size() } +// Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct +// with a single string field. +func (m *Quantity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quantity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quantity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field String_", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + + // BEGIN CUSTOM DECODE + p, err := ParseQuantity(s) + if err != nil { + return err + } + *m = p + // END CUSTOM DECODE -// Reset implements the protobuf marshalling interface. -func (q *Quantity) Unmarshal(data []byte) error { - p := QuantityProto{} - if err := p.Unmarshal(data); err != nil { - return err + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - q.Format = p.Format - b := big.NewInt(0) - b.SetBytes(p.Bigint) - q.Amount = inf.NewDecBig(b, inf.Scale(p.Scale)) return nil } -// Marshal implements the protobuf marshalling interface. -func (q *Quantity) Marshal() (data []byte, err error) { - return q.QuantityProto().Marshal() +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") } -// MarshalTo implements the protobuf marshalling interface. -func (q *Quantity) MarshalTo(data []byte) (int, error) { - return q.QuantityProto().MarshalTo(data) -} +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity_test.go new file mode 100644 index 000000000000..77d54a0d7f54 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/quantity_test.go @@ -0,0 +1,1300 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "encoding/json" + "math/rand" + "testing" + + fuzz "github.com/google/gofuzz" + "github.com/spf13/pflag" + + inf "gopkg.in/inf.v0" +) + +var ( + testQuantityFlag = QuantityFlag("quantityFlag", "1M", "dummy flag for testing the quantity flag mechanism") +) + +var useInfDec bool + +func amount(i int64, exponent int) infDecAmount { + // See the below test-- scale is the negative of an exponent. + return infDecAmount{inf.NewDec(i, inf.Scale(-exponent))} +} + +func dec(i int64, exponent int) infDecAmount { + // See the below test-- scale is the negative of an exponent. + return infDecAmount{inf.NewDec(i, inf.Scale(-exponent))} +} + +func decQuantity(i int64, exponent int, format Format) Quantity { + return Quantity{d: dec(i, exponent), Format: format} +} + +func intQuantity(i int64, exponent Scale, format Format) Quantity { + return Quantity{i: int64Amount{value: i, scale: exponent}, Format: format} +} + +func TestDec(t *testing.T) { + table := []struct { + got infDecAmount + expect string + }{ + {dec(1, 0), "1"}, + {dec(1, 1), "10"}, + {dec(5, 2), "500"}, + {dec(8, 3), "8000"}, + {dec(2, 0), "2"}, + {dec(1, -1), "0.1"}, + {dec(3, -2), "0.03"}, + {dec(4, -3), "0.004"}, + } + + for _, item := range table { + if e, a := item.expect, item.got.Dec.String(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + } +} + +// TestQuantityParseZero ensures that when a 0 quantity is passed, its string value is 0 +func TestQuantityParseZero(t *testing.T) { + zero := MustParse("0") + if expected, actual := "0", zero.String(); expected != actual { + t.Errorf("Expected %v, actual %v", expected, actual) + } +} + +// TestQuantityAddZeroPreservesSuffix verifies that a suffix is preserved +// independent of the order of operations when adding a zero and non-zero val +func TestQuantityAddZeroPreservesSuffix(t *testing.T) { + testValues := []string{"100m", "1Gi"} + zero := MustParse("0") + for _, testValue := range testValues { + value := MustParse(testValue) + v1 := *value.Copy() + // ensure non-zero + zero = non-zero (suffix preserved) + v1.Add(zero) + // ensure zero + non-zero = non-zero (suffix preserved) + v2 := *zero.Copy() + v2.Add(value) + + if v1.String() != testValue { + t.Errorf("Expected %v, actual %v", testValue, v1.String()) + continue + } + if v2.String() != testValue { + t.Errorf("Expected %v, actual %v", testValue, v2.String()) + } + } +} + +// TestQuantitySubZeroPreservesSuffix verifies that a suffix is preserved +// independent of the order of operations when subtracting a zero and non-zero val +func TestQuantitySubZeroPreservesSuffix(t *testing.T) { + testValues := []string{"100m", "1Gi"} + zero := MustParse("0") + for _, testValue := range testValues { + value := MustParse(testValue) + v1 := *value.Copy() + // ensure non-zero - zero = non-zero (suffix preserved) + v1.Sub(zero) + // ensure we preserved the input value + if v1.String() != testValue { + t.Errorf("Expected %v, actual %v", testValue, v1.String()) + } + + // ensure zero - non-zero = -non-zero (suffix preserved) + v2 := *zero.Copy() + v2.Sub(value) + negVal := *value.Copy() + negVal.Neg() + if v2.String() != negVal.String() { + t.Errorf("Expected %v, actual %v", negVal.String(), v2.String()) + } + } +} + +// Verifies that you get 0 as canonical value if internal value is 0, and not 0 +func TestQuantityCanocicalizeZero(t *testing.T) { + val := MustParse("1000m") + val.i.Sub(int64Amount{value: 1}) + zero := Quantity{i: val.i, Format: DecimalSI} + if expected, actual := "0", zero.String(); expected != actual { + t.Errorf("Expected %v, actual %v", expected, actual) + } +} + +func TestQuantityCmp(t *testing.T) { + table := []struct { + x string + y string + expect int + }{ + {"0", "0", 0}, + {"100m", "50m", 1}, + {"50m", "100m", -1}, + {"10000T", "100Gi", 1}, + } + for _, testCase := range table { + q1 := MustParse(testCase.x) + q2 := MustParse(testCase.y) + if result := q1.Cmp(q2); result != testCase.expect { + t.Errorf("X: %v, Y: %v, Expected: %v, Actual: %v", testCase.x, testCase.y, testCase.expect, result) + } + } + + nils := []struct { + x *inf.Dec + y *inf.Dec + expect int + }{ + {dec(0, 0).Dec, dec(0, 0).Dec, 0}, + {nil, dec(0, 0).Dec, 0}, + {dec(0, 0).Dec, nil, 0}, + {nil, nil, 0}, + {nil, dec(10, 0).Dec, -1}, + {nil, dec(-10, 0).Dec, 1}, + {dec(10, 0).Dec, nil, 1}, + {dec(-10, 0).Dec, nil, -1}, + } + for _, nilCase := range nils { + q1 := Quantity{d: infDecAmount{nilCase.x}, Format: DecimalSI} + q2 := Quantity{d: infDecAmount{nilCase.y}, Format: DecimalSI} + if result := q1.Cmp(q2); result != nilCase.expect { + t.Errorf("X: %v, Y: %v, Expected: %v, Actual: %v", nilCase.x, nilCase.y, nilCase.expect, result) + } + } +} + +func TestParseQuantityString(t *testing.T) { + table := []struct { + input string + positive bool + value string + num, denom, suffix string + }{ + {"0.025Ti", true, "0.025", "0", "025", "Ti"}, + {"1.025Ti", true, "1.025", "1", "025", "Ti"}, + {"-1.025Ti", false, "-1.025", "1", "025", "Ti"}, + {".", true, ".", "0", "", ""}, + {"-.", false, "-.", "0", "", ""}, + {"1E-3", true, "1", "1", "", "E-3"}, + } + for _, test := range table { + positive, value, num, denom, suffix, err := parseQuantityString(test.input) + if err != nil { + t.Errorf("%s: error: %v", test.input, err) + continue + } + if positive != test.positive || value != test.value || num != test.num || denom != test.denom || suffix != test.suffix { + t.Errorf("%s: unmatched: %t %q %q %q %q", test.input, positive, value, num, denom, suffix) + } + } +} + +func TestQuantityParse(t *testing.T) { + if _, err := ParseQuantity(""); err == nil { + t.Errorf("expected empty string to return error") + } + + table := []struct { + input string + expect Quantity + }{ + {"0", decQuantity(0, 0, DecimalSI)}, + {"0n", decQuantity(0, 0, DecimalSI)}, + {"0u", decQuantity(0, 0, DecimalSI)}, + {"0m", decQuantity(0, 0, DecimalSI)}, + {"0Ki", decQuantity(0, 0, BinarySI)}, + {"0k", decQuantity(0, 0, DecimalSI)}, + {"0Mi", decQuantity(0, 0, BinarySI)}, + {"0M", decQuantity(0, 0, DecimalSI)}, + {"0Gi", decQuantity(0, 0, BinarySI)}, + {"0G", decQuantity(0, 0, DecimalSI)}, + {"0Ti", decQuantity(0, 0, BinarySI)}, + {"0T", decQuantity(0, 0, DecimalSI)}, + + // Binary suffixes + {"1Ki", decQuantity(1024, 0, BinarySI)}, + {"8Ki", decQuantity(8*1024, 0, BinarySI)}, + {"7Mi", decQuantity(7*1024*1024, 0, BinarySI)}, + {"6Gi", decQuantity(6*1024*1024*1024, 0, BinarySI)}, + {"5Ti", decQuantity(5*1024*1024*1024*1024, 0, BinarySI)}, + {"4Pi", decQuantity(4*1024*1024*1024*1024*1024, 0, BinarySI)}, + {"3Ei", decQuantity(3*1024*1024*1024*1024*1024*1024, 0, BinarySI)}, + + {"10Ti", decQuantity(10*1024*1024*1024*1024, 0, BinarySI)}, + {"100Ti", decQuantity(100*1024*1024*1024*1024, 0, BinarySI)}, + + // Decimal suffixes + {"5n", decQuantity(5, -9, DecimalSI)}, + {"4u", decQuantity(4, -6, DecimalSI)}, + {"3m", decQuantity(3, -3, DecimalSI)}, + {"9", decQuantity(9, 0, DecimalSI)}, + {"8k", decQuantity(8, 3, DecimalSI)}, + {"50k", decQuantity(5, 4, DecimalSI)}, + {"7M", decQuantity(7, 6, DecimalSI)}, + {"6G", decQuantity(6, 9, DecimalSI)}, + {"5T", decQuantity(5, 12, DecimalSI)}, + {"40T", decQuantity(4, 13, DecimalSI)}, + {"300T", decQuantity(3, 14, DecimalSI)}, + {"2P", decQuantity(2, 15, DecimalSI)}, + {"1E", decQuantity(1, 18, DecimalSI)}, + + // Decimal exponents + {"1E-3", decQuantity(1, -3, DecimalExponent)}, + {"1e3", decQuantity(1, 3, DecimalExponent)}, + {"1E6", decQuantity(1, 6, DecimalExponent)}, + {"1e9", decQuantity(1, 9, DecimalExponent)}, + {"1E12", decQuantity(1, 12, DecimalExponent)}, + {"1e15", decQuantity(1, 15, DecimalExponent)}, + {"1E18", decQuantity(1, 18, DecimalExponent)}, + + // Nonstandard but still parsable + {"1e14", decQuantity(1, 14, DecimalExponent)}, + {"1e13", decQuantity(1, 13, DecimalExponent)}, + {"1e3", decQuantity(1, 3, DecimalExponent)}, + {"100.035k", decQuantity(100035, 0, DecimalSI)}, + + // Things that look like floating point + {"0.001", decQuantity(1, -3, DecimalSI)}, + {"0.0005k", decQuantity(5, -1, DecimalSI)}, + {"0.005", decQuantity(5, -3, DecimalSI)}, + {"0.05", decQuantity(5, -2, DecimalSI)}, + {"0.5", decQuantity(5, -1, DecimalSI)}, + {"0.00050k", decQuantity(5, -1, DecimalSI)}, + {"0.00500", decQuantity(5, -3, DecimalSI)}, + {"0.05000", decQuantity(5, -2, DecimalSI)}, + {"0.50000", decQuantity(5, -1, DecimalSI)}, + {"0.5e0", decQuantity(5, -1, DecimalExponent)}, + {"0.5e-1", decQuantity(5, -2, DecimalExponent)}, + {"0.5e-2", decQuantity(5, -3, DecimalExponent)}, + {"0.5e0", decQuantity(5, -1, DecimalExponent)}, + {"10.035M", decQuantity(10035, 3, DecimalSI)}, + + {"1.2e3", decQuantity(12, 2, DecimalExponent)}, + {"1.3E+6", decQuantity(13, 5, DecimalExponent)}, + {"1.40e9", decQuantity(14, 8, DecimalExponent)}, + {"1.53E12", decQuantity(153, 10, DecimalExponent)}, + {"1.6e15", decQuantity(16, 14, DecimalExponent)}, + {"1.7E18", decQuantity(17, 17, DecimalExponent)}, + + {"9.01", decQuantity(901, -2, DecimalSI)}, + {"8.1k", decQuantity(81, 2, DecimalSI)}, + {"7.123456M", decQuantity(7123456, 0, DecimalSI)}, + {"6.987654321G", decQuantity(6987654321, 0, DecimalSI)}, + {"5.444T", decQuantity(5444, 9, DecimalSI)}, + {"40.1T", decQuantity(401, 11, DecimalSI)}, + {"300.2T", decQuantity(3002, 11, DecimalSI)}, + {"2.5P", decQuantity(25, 14, DecimalSI)}, + {"1.01E", decQuantity(101, 16, DecimalSI)}, + + // Things that saturate/round + {"3.001n", decQuantity(4, -9, DecimalSI)}, + {"1.1E-9", decQuantity(2, -9, DecimalExponent)}, + {"0.0000000001", decQuantity(1, -9, DecimalSI)}, + {"0.0000000005", decQuantity(1, -9, DecimalSI)}, + {"0.00000000050", decQuantity(1, -9, DecimalSI)}, + {"0.5e-9", decQuantity(1, -9, DecimalExponent)}, + {"0.9n", decQuantity(1, -9, DecimalSI)}, + {"0.00000012345", decQuantity(124, -9, DecimalSI)}, + {"0.00000012354", decQuantity(124, -9, DecimalSI)}, + {"9Ei", Quantity{d: maxAllowed, Format: BinarySI}}, + {"9223372036854775807Ki", Quantity{d: maxAllowed, Format: BinarySI}}, + {"12E", decQuantity(12, 18, DecimalSI)}, + + // We'll accept fractional binary stuff, too. + {"100.035Ki", decQuantity(10243584, -2, BinarySI)}, + {"0.5Mi", decQuantity(.5*1024*1024, 0, BinarySI)}, + {"0.05Gi", decQuantity(536870912, -1, BinarySI)}, + {"0.025Ti", decQuantity(274877906944, -1, BinarySI)}, + + // Things written by trolls + {"0.000000000001Ki", decQuantity(2, -9, DecimalSI)}, // rounds up, changes format + {".001", decQuantity(1, -3, DecimalSI)}, + {".0001k", decQuantity(100, -3, DecimalSI)}, + {"1.", decQuantity(1, 0, DecimalSI)}, + {"1.G", decQuantity(1, 9, DecimalSI)}, + } + + for _, asDec := range []bool{false, true} { + for _, item := range table { + got, err := ParseQuantity(item.input) + if err != nil { + t.Errorf("%v: unexpected error: %v", item.input, err) + continue + } + if asDec { + got.AsDec() + } + + if e, a := item.expect, got; e.Cmp(a) != 0 { + t.Errorf("%v: expected %v, got %v", item.input, e.String(), a.String()) + } + if e, a := item.expect.Format, got.Format; e != a { + t.Errorf("%v: expected %#v, got %#v", item.input, e, a) + } + + if asDec { + if i, ok := got.AsInt64(); i != 0 || ok { + t.Errorf("%v: expected inf.Dec to return false for AsInt64: %d", item.input, i) + } + continue + } + i, ok := item.expect.AsInt64() + if !ok { + continue + } + j, ok := got.AsInt64() + if !ok { + if got.d.Dec == nil && got.i.scale >= 0 { + t.Errorf("%v: is an int64Amount, but can't return AsInt64: %v", item.input, got) + } + continue + } + if i != j { + t.Errorf("%v: expected equivalent representation as int64: %d %d", item.input, i, j) + } + } + + for _, item := range table { + got, err := ParseQuantity(item.input) + if err != nil { + t.Errorf("%v: unexpected error: %v", item.input, err) + continue + } + + if asDec { + got.AsDec() + } + + // verify that we can decompose the input and get the same result by building up from the base. + positive, _, num, denom, suffix, err := parseQuantityString(item.input) + if err != nil { + t.Errorf("%v: unexpected error: %v", item.input, err) + continue + } + if got.Sign() >= 0 && !positive || got.Sign() < 0 && positive { + t.Errorf("%v: positive was incorrect: %t", item.input, positive) + continue + } + var value string + if !positive { + value = "-" + } + value += num + if len(denom) > 0 { + value += "." + denom + } + value += suffix + if len(value) == 0 { + t.Errorf("%v: did not parse correctly, %q %q %q", item.input, num, denom, suffix) + } + expected, err := ParseQuantity(value) + if err != nil { + t.Errorf("%v: unexpected error for %s: %v", item.input, value, err) + continue + } + if expected.Cmp(got) != 0 { + t.Errorf("%v: not the same as %s", item.input, value) + continue + } + } + + // Try the negative version of everything + desired := &inf.Dec{} + expect := Quantity{d: infDecAmount{Dec: desired}} + for _, item := range table { + got, err := ParseQuantity("-" + item.input) + if err != nil { + t.Errorf("-%v: unexpected error: %v", item.input, err) + continue + } + if asDec { + got.AsDec() + } + + expected := item.expect + desired.Neg(expected.AsDec()) + + if e, a := expect, got; e.Cmp(a) != 0 { + t.Errorf("%v: expected %s, got %s", item.input, e.String(), a.String()) + } + if e, a := expected.Format, got.Format; e != a { + t.Errorf("%v: expected %#v, got %#v", item.input, e, a) + } + } + + // Try everything with an explicit + + for _, item := range table { + got, err := ParseQuantity("+" + item.input) + if err != nil { + t.Errorf("-%v: unexpected error: %v", item.input, err) + continue + } + if asDec { + got.AsDec() + } + + if e, a := item.expect, got; e.Cmp(a) != 0 { + t.Errorf("%v(%t): expected %s, got %s", item.input, asDec, e.String(), a.String()) + } + if e, a := item.expect.Format, got.Format; e != a { + t.Errorf("%v: expected %#v, got %#v", item.input, e, a) + } + } + } + + invalid := []string{ + "1.1.M", + "1+1.0M", + "0.1mi", + "0.1am", + "aoeu", + ".5i", + "1i", + "-3.01i", + "-3.01e-", + } + for _, item := range invalid { + _, err := ParseQuantity(item) + if err == nil { + t.Errorf("%v parsed unexpectedly", item) + } + } +} + +func TestQuantityRoundUp(t *testing.T) { + table := []struct { + in string + scale Scale + expect Quantity + ok bool + }{ + {"9.01", -3, decQuantity(901, -2, DecimalSI), true}, + {"9.01", -2, decQuantity(901, -2, DecimalSI), true}, + {"9.01", -1, decQuantity(91, -1, DecimalSI), false}, + {"9.01", 0, decQuantity(10, 0, DecimalSI), false}, + {"9.01", 1, decQuantity(10, 0, DecimalSI), false}, + {"9.01", 2, decQuantity(100, 0, DecimalSI), false}, + + {"-9.01", -3, decQuantity(-901, -2, DecimalSI), true}, + {"-9.01", -2, decQuantity(-901, -2, DecimalSI), true}, + {"-9.01", -1, decQuantity(-91, -1, DecimalSI), false}, + {"-9.01", 0, decQuantity(-10, 0, DecimalSI), false}, + {"-9.01", 1, decQuantity(-10, 0, DecimalSI), false}, + {"-9.01", 2, decQuantity(-100, 0, DecimalSI), false}, + } + + for _, asDec := range []bool{false, true} { + for _, item := range table { + got, err := ParseQuantity(item.in) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + expect := *item.expect.Copy() + if asDec { + got.AsDec() + } + if ok := got.RoundUp(item.scale); ok != item.ok { + t.Errorf("%s(%d,%t): unexpected ok: %t", item.in, item.scale, asDec, ok) + } + if got.Cmp(expect) != 0 { + t.Errorf("%s(%d,%t): unexpected round: %s vs %s", item.in, item.scale, asDec, got.String(), expect.String()) + } + } + } +} + +func TestQuantityCmpInt64AndDec(t *testing.T) { + table := []struct { + a, b Quantity + cmp int + }{ + {intQuantity(901, -2, DecimalSI), intQuantity(901, -2, DecimalSI), 0}, + {intQuantity(90, -1, DecimalSI), intQuantity(901, -2, DecimalSI), -1}, + {intQuantity(901, -2, DecimalSI), intQuantity(900, -2, DecimalSI), 1}, + {intQuantity(0, 0, DecimalSI), intQuantity(0, 0, DecimalSI), 0}, + {intQuantity(0, 1, DecimalSI), intQuantity(0, -1, DecimalSI), 0}, + {intQuantity(0, -1, DecimalSI), intQuantity(0, 1, DecimalSI), 0}, + {intQuantity(800, -3, DecimalSI), intQuantity(1, 0, DecimalSI), -1}, + {intQuantity(800, -3, DecimalSI), intQuantity(79, -2, DecimalSI), 1}, + + {intQuantity(mostPositive, 0, DecimalSI), intQuantity(1, -1, DecimalSI), 1}, + {intQuantity(mostPositive, 1, DecimalSI), intQuantity(1, 0, DecimalSI), 1}, + {intQuantity(mostPositive, 1, DecimalSI), intQuantity(1, 1, DecimalSI), 1}, + {intQuantity(mostPositive, 1, DecimalSI), intQuantity(0, 1, DecimalSI), 1}, + {intQuantity(mostPositive, -16, DecimalSI), intQuantity(1, 3, DecimalSI), -1}, + + {intQuantity(mostNegative, 0, DecimalSI), intQuantity(0, 0, DecimalSI), -1}, + {intQuantity(mostNegative, -18, DecimalSI), intQuantity(-1, 0, DecimalSI), -1}, + {intQuantity(mostNegative, -19, DecimalSI), intQuantity(-1, 0, DecimalSI), 1}, + + {intQuantity(1*1000000*1000000*1000000, -17, DecimalSI), intQuantity(1, 1, DecimalSI), 0}, + {intQuantity(1*1000000*1000000*1000000, -17, DecimalSI), intQuantity(-10, 0, DecimalSI), 1}, + {intQuantity(-1*1000000*1000000*1000000, -17, DecimalSI), intQuantity(-10, 0, DecimalSI), 0}, + {intQuantity(1*1000000*1000000*1000000, -17, DecimalSI), intQuantity(1, 0, DecimalSI), 1}, + + {intQuantity(1*1000000*1000000*1000000+1, -17, DecimalSI), intQuantity(1, 1, DecimalSI), 1}, + {intQuantity(1*1000000*1000000*1000000-1, -17, DecimalSI), intQuantity(1, 1, DecimalSI), -1}, + } + + for _, item := range table { + if cmp := item.a.Cmp(item.b); cmp != item.cmp { + t.Errorf("%#v: unexpected Cmp: %d", item, cmp) + } + if cmp := item.b.Cmp(item.a); cmp != -item.cmp { + t.Errorf("%#v: unexpected inverted Cmp: %d", item, cmp) + } + } + + for _, item := range table { + a, b := *item.a.Copy(), *item.b.Copy() + a.AsDec() + if cmp := a.Cmp(b); cmp != item.cmp { + t.Errorf("%#v: unexpected Cmp: %d", item, cmp) + } + if cmp := b.Cmp(a); cmp != -item.cmp { + t.Errorf("%#v: unexpected inverted Cmp: %d", item, cmp) + } + } + + for _, item := range table { + a, b := *item.a.Copy(), *item.b.Copy() + b.AsDec() + if cmp := a.Cmp(b); cmp != item.cmp { + t.Errorf("%#v: unexpected Cmp: %d", item, cmp) + } + if cmp := b.Cmp(a); cmp != -item.cmp { + t.Errorf("%#v: unexpected inverted Cmp: %d", item, cmp) + } + } + + for _, item := range table { + a, b := *item.a.Copy(), *item.b.Copy() + a.AsDec() + b.AsDec() + if cmp := a.Cmp(b); cmp != item.cmp { + t.Errorf("%#v: unexpected Cmp: %d", item, cmp) + } + if cmp := b.Cmp(a); cmp != -item.cmp { + t.Errorf("%#v: unexpected inverted Cmp: %d", item, cmp) + } + } +} + +func TestQuantityNeg(t *testing.T) { + table := []struct { + a Quantity + out string + }{ + {intQuantity(901, -2, DecimalSI), "-9010m"}, + {decQuantity(901, -2, DecimalSI), "-9010m"}, + } + + for i, item := range table { + out := *item.a.Copy() + out.Neg() + if out.Cmp(item.a) == 0 { + t.Errorf("%d: negating an item should not mutate the source: %s", i, out.String()) + } + if out.String() != item.out { + t.Errorf("%d: negating did not equal exact value: %s", i, out.String()) + } + } +} + +func TestQuantityString(t *testing.T) { + table := []struct { + in Quantity + expect string + alternate string + }{ + {decQuantity(1024*1024*1024, 0, BinarySI), "1Gi", "1024Mi"}, + {decQuantity(300*1024*1024, 0, BinarySI), "300Mi", "307200Ki"}, + {decQuantity(6*1024, 0, BinarySI), "6Ki", ""}, + {decQuantity(1001*1024*1024*1024, 0, BinarySI), "1001Gi", "1025024Mi"}, + {decQuantity(1024*1024*1024*1024, 0, BinarySI), "1Ti", "1024Gi"}, + {decQuantity(5, 0, BinarySI), "5", "5000m"}, + {decQuantity(500, -3, BinarySI), "500m", "0.5"}, + {decQuantity(1, 9, DecimalSI), "1G", "1000M"}, + {decQuantity(1000, 6, DecimalSI), "1G", "0.001T"}, + {decQuantity(1000000, 3, DecimalSI), "1G", ""}, + {decQuantity(1000000000, 0, DecimalSI), "1G", ""}, + {decQuantity(1, -3, DecimalSI), "1m", "1000u"}, + {decQuantity(80, -3, DecimalSI), "80m", ""}, + {decQuantity(1080, -3, DecimalSI), "1080m", "1.08"}, + {decQuantity(108, -2, DecimalSI), "1080m", "1080000000n"}, + {decQuantity(10800, -4, DecimalSI), "1080m", ""}, + {decQuantity(300, 6, DecimalSI), "300M", ""}, + {decQuantity(1, 12, DecimalSI), "1T", ""}, + {decQuantity(1234567, 6, DecimalSI), "1234567M", ""}, + {decQuantity(1234567, -3, BinarySI), "1234567m", ""}, + {decQuantity(3, 3, DecimalSI), "3k", ""}, + {decQuantity(1025, 0, BinarySI), "1025", ""}, + {decQuantity(0, 0, DecimalSI), "0", ""}, + {decQuantity(0, 0, BinarySI), "0", ""}, + {decQuantity(1, 9, DecimalExponent), "1e9", ".001e12"}, + {decQuantity(1, -3, DecimalExponent), "1e-3", "0.001e0"}, + {decQuantity(1, -9, DecimalExponent), "1e-9", "1000e-12"}, + {decQuantity(80, -3, DecimalExponent), "80e-3", ""}, + {decQuantity(300, 6, DecimalExponent), "300e6", ""}, + {decQuantity(1, 12, DecimalExponent), "1e12", ""}, + {decQuantity(1, 3, DecimalExponent), "1e3", ""}, + {decQuantity(3, 3, DecimalExponent), "3e3", ""}, + {decQuantity(3, 3, DecimalSI), "3k", ""}, + {decQuantity(0, 0, DecimalExponent), "0", "00"}, + {decQuantity(1, -9, DecimalSI), "1n", ""}, + {decQuantity(80, -9, DecimalSI), "80n", ""}, + {decQuantity(1080, -9, DecimalSI), "1080n", ""}, + {decQuantity(108, -8, DecimalSI), "1080n", ""}, + {decQuantity(10800, -10, DecimalSI), "1080n", ""}, + {decQuantity(1, -6, DecimalSI), "1u", ""}, + {decQuantity(80, -6, DecimalSI), "80u", ""}, + {decQuantity(1080, -6, DecimalSI), "1080u", ""}, + } + for _, item := range table { + got := item.in.String() + if e, a := item.expect, got; e != a { + t.Errorf("%#v: expected %v, got %v", item.in, e, a) + } + q, err := ParseQuantity(item.expect) + if err != nil { + t.Errorf("%#v: unexpected error: %v", item.expect, err) + } + if len(q.s) == 0 || q.s != item.expect { + t.Errorf("%#v: did not copy canonical string on parse: %s", item.expect, q.s) + } + if len(item.alternate) == 0 { + continue + } + q, err = ParseQuantity(item.alternate) + if err != nil { + t.Errorf("%#v: unexpected error: %v", item.expect, err) + continue + } + if len(q.s) != 0 { + t.Errorf("%#v: unexpected nested string: %v", item.expect, q.s) + } + if q.String() != item.expect { + t.Errorf("%#v: unexpected alternate canonical: %v", item.expect, q.String()) + } + if len(q.s) == 0 || q.s != item.expect { + t.Errorf("%#v: did not set canonical string on ToString: %s", item.expect, q.s) + } + } + desired := &inf.Dec{} // Avoid modifying the values in the table. + for _, item := range table { + if item.in.Cmp(Quantity{}) == 0 { + // Don't expect it to print "-0" ever + continue + } + q := item.in + q.d = infDecAmount{desired.Neg(q.AsDec())} + if e, a := "-"+item.expect, q.String(); e != a { + t.Errorf("%#v: expected %v, got %v", item.in, e, a) + } + } +} + +func TestQuantityParseEmit(t *testing.T) { + table := []struct { + in string + expect string + }{ + {"1Ki", "1Ki"}, + {"1Mi", "1Mi"}, + {"1Gi", "1Gi"}, + {"1024Mi", "1Gi"}, + {"1000M", "1G"}, + {".001Ki", "1024m"}, + {".000001Ki", "1024u"}, + {".000000001Ki", "1024n"}, + {".000000000001Ki", "2n"}, + } + + for _, item := range table { + q, err := ParseQuantity(item.in) + if err != nil { + t.Errorf("Couldn't parse %v", item.in) + continue + } + if e, a := item.expect, q.String(); e != a { + t.Errorf("%#v: expected %v, got %v", item.in, e, a) + } + } + for _, item := range table { + q, err := ParseQuantity("-" + item.in) + if err != nil { + t.Errorf("Couldn't parse %v", item.in) + continue + } + if q.Cmp(Quantity{}) == 0 { + continue + } + if e, a := "-"+item.expect, q.String(); e != a { + t.Errorf("%#v: expected %v, got %v (%#v)", item.in, e, a, q.i) + } + } +} + +var fuzzer = fuzz.New().Funcs( + func(q *Quantity, c fuzz.Continue) { + q.i = Zero + if c.RandBool() { + q.Format = BinarySI + if c.RandBool() { + dec := &inf.Dec{} + q.d = infDecAmount{Dec: dec} + dec.SetScale(0) + dec.SetUnscaled(c.Int63()) + return + } + // Be sure to test cases like 1Mi + dec := &inf.Dec{} + q.d = infDecAmount{Dec: dec} + dec.SetScale(0) + dec.SetUnscaled(c.Int63n(1024) << uint(10*c.Intn(5))) + return + } + if c.RandBool() { + q.Format = DecimalSI + } else { + q.Format = DecimalExponent + } + if c.RandBool() { + dec := &inf.Dec{} + q.d = infDecAmount{Dec: dec} + dec.SetScale(inf.Scale(c.Intn(4))) + dec.SetUnscaled(c.Int63()) + return + } + // Be sure to test cases like 1M + dec := &inf.Dec{} + q.d = infDecAmount{Dec: dec} + dec.SetScale(inf.Scale(3 - c.Intn(15))) + dec.SetUnscaled(c.Int63n(1000)) + }, +) + +func TestJSON(t *testing.T) { + for i := 0; i < 500; i++ { + q := &Quantity{} + fuzzer.Fuzz(q) + b, err := json.Marshal(q) + if err != nil { + t.Errorf("error encoding %v: %v", q, err) + continue + } + q2 := &Quantity{} + err = json.Unmarshal(b, q2) + if err != nil { + t.Logf("%d: %s", i, string(b)) + t.Errorf("%v: error decoding %v: %v", q, string(b), err) + } + if q2.Cmp(*q) != 0 { + t.Errorf("Expected equal: %v, %v (json was '%v')", q, q2, string(b)) + } + } +} + +func TestMilliNewSet(t *testing.T) { + table := []struct { + value int64 + format Format + expect string + exact bool + }{ + {1, DecimalSI, "1m", true}, + {1000, DecimalSI, "1", true}, + {1234000, DecimalSI, "1234", true}, + {1024, BinarySI, "1024m", false}, // Format changes + {1000000, "invalidFormatDefaultsToExponent", "1e3", true}, + {1024 * 1024, BinarySI, "1048576m", false}, // Format changes + } + + for _, item := range table { + q := NewMilliQuantity(item.value, item.format) + if e, a := item.expect, q.String(); e != a { + t.Errorf("Expected %v, got %v; %#v", e, a, q) + } + if !item.exact { + continue + } + q2, err := ParseQuantity(q.String()) + if err != nil { + t.Errorf("Round trip failed on %v", q) + } + if e, a := item.value, q2.MilliValue(); e != a { + t.Errorf("Expected %v, got %v", e, a) + } + } + + for _, item := range table { + q := NewQuantity(0, item.format) + q.SetMilli(item.value) + if e, a := item.expect, q.String(); e != a { + t.Errorf("Set: Expected %v, got %v; %#v", e, a, q) + } + } +} + +func TestNewSet(t *testing.T) { + table := []struct { + value int64 + format Format + expect string + }{ + {1, DecimalSI, "1"}, + {1000, DecimalSI, "1k"}, + {1234000, DecimalSI, "1234k"}, + {1024, BinarySI, "1Ki"}, + {1000000, "invalidFormatDefaultsToExponent", "1e6"}, + {1024 * 1024, BinarySI, "1Mi"}, + } + + for _, asDec := range []bool{false, true} { + for _, item := range table { + q := NewQuantity(item.value, item.format) + if asDec { + q.ToDec() + } + if e, a := item.expect, q.String(); e != a { + t.Errorf("Expected %v, got %v; %#v", e, a, q) + } + q2, err := ParseQuantity(q.String()) + if err != nil { + t.Errorf("Round trip failed on %v", q) + } + if e, a := item.value, q2.Value(); e != a { + t.Errorf("Expected %v, got %v", e, a) + } + } + + for _, item := range table { + q := NewQuantity(0, item.format) + q.Set(item.value) + if asDec { + q.ToDec() + } + if e, a := item.expect, q.String(); e != a { + t.Errorf("Set: Expected %v, got %v; %#v", e, a, q) + } + } + } +} + +func TestNewScaledSet(t *testing.T) { + table := []struct { + value int64 + scale Scale + expect string + }{ + {1, Nano, "1n"}, + {1000, Nano, "1u"}, + {1, Micro, "1u"}, + {1000, Micro, "1m"}, + {1, Milli, "1m"}, + {1000, Milli, "1"}, + {1, 0, "1"}, + {0, Nano, "0"}, + {0, Micro, "0"}, + {0, Milli, "0"}, + {0, 0, "0"}, + } + + for _, item := range table { + q := NewScaledQuantity(item.value, item.scale) + if e, a := item.expect, q.String(); e != a { + t.Errorf("Expected %v, got %v; %#v", e, a, q) + } + q2, err := ParseQuantity(q.String()) + if err != nil { + t.Errorf("Round trip failed on %v", q) + } + if e, a := item.value, q2.ScaledValue(item.scale); e != a { + t.Errorf("Expected %v, got %v", e, a) + } + q3 := NewQuantity(0, DecimalSI) + q3.SetScaled(item.value, item.scale) + if q.Cmp(*q3) != 0 { + t.Errorf("Expected %v and %v to be equal", q, q3) + } + } +} + +func TestScaledValue(t *testing.T) { + table := []struct { + fromScale Scale + toScale Scale + expected int64 + }{ + {Nano, Nano, 1}, + {Nano, Micro, 1}, + {Nano, Milli, 1}, + {Nano, 0, 1}, + {Micro, Nano, 1000}, + {Micro, Micro, 1}, + {Micro, Milli, 1}, + {Micro, 0, 1}, + {Milli, Nano, 1000 * 1000}, + {Milli, Micro, 1000}, + {Milli, Milli, 1}, + {Milli, 0, 1}, + {0, Nano, 1000 * 1000 * 1000}, + {0, Micro, 1000 * 1000}, + {0, Milli, 1000}, + {0, 0, 1}, + } + + for _, item := range table { + q := NewScaledQuantity(1, item.fromScale) + if e, a := item.expected, q.ScaledValue(item.toScale); e != a { + t.Errorf("%v to %v: Expected %v, got %v", item.fromScale, item.toScale, e, a) + } + } +} + +func TestUninitializedNoCrash(t *testing.T) { + var q Quantity + + q.Value() + q.MilliValue() + q.Copy() + _ = q.String() + q.MarshalJSON() +} + +func TestCopy(t *testing.T) { + q := NewQuantity(5, DecimalSI) + c := q.Copy() + c.Set(6) + if q.Value() == 6 { + t.Errorf("Copy didn't") + } +} + +func TestQFlagSet(t *testing.T) { + qf := qFlag{&Quantity{}} + qf.Set("1Ki") + if e, a := "1Ki", qf.String(); e != a { + t.Errorf("Unexpected result %v != %v", e, a) + } +} + +func TestQFlagIsPFlag(t *testing.T) { + var pfv pflag.Value = qFlag{} + if e, a := "quantity", pfv.Type(); e != a { + t.Errorf("Unexpected result %v != %v", e, a) + } +} + +func TestSub(t *testing.T) { + tests := []struct { + a Quantity + b Quantity + expected Quantity + }{ + {decQuantity(10, 0, DecimalSI), decQuantity(1, 1, DecimalSI), decQuantity(0, 0, DecimalSI)}, + {decQuantity(10, 0, DecimalSI), decQuantity(1, 0, BinarySI), decQuantity(9, 0, DecimalSI)}, + {decQuantity(10, 0, BinarySI), decQuantity(1, 0, DecimalSI), decQuantity(9, 0, BinarySI)}, + {Quantity{Format: DecimalSI}, decQuantity(50, 0, DecimalSI), decQuantity(-50, 0, DecimalSI)}, + {decQuantity(50, 0, DecimalSI), Quantity{Format: DecimalSI}, decQuantity(50, 0, DecimalSI)}, + {Quantity{Format: DecimalSI}, Quantity{Format: DecimalSI}, decQuantity(0, 0, DecimalSI)}, + } + + for i, test := range tests { + test.a.Sub(test.b) + if test.a.Cmp(test.expected) != 0 { + t.Errorf("[%d] Expected %q, got %q", i, test.expected.String(), test.a.String()) + } + } +} + +func TestNeg(t *testing.T) { + tests := []struct { + a Quantity + b Quantity + expected Quantity + }{ + {a: intQuantity(0, 0, DecimalSI), expected: intQuantity(0, 0, DecimalSI)}, + {a: Quantity{}, expected: Quantity{}}, + {a: intQuantity(10, 0, BinarySI), expected: intQuantity(-10, 0, BinarySI)}, + {a: intQuantity(-10, 0, BinarySI), expected: intQuantity(10, 0, BinarySI)}, + {a: decQuantity(0, 0, DecimalSI), expected: intQuantity(0, 0, DecimalSI)}, + {a: decQuantity(10, 0, BinarySI), expected: intQuantity(-10, 0, BinarySI)}, + {a: decQuantity(-10, 0, BinarySI), expected: intQuantity(10, 0, BinarySI)}, + } + + for i, test := range tests { + a := test.a.Copy() + a.Neg() + // ensure value is same + if a.Cmp(test.expected) != 0 { + t.Errorf("[%d] Expected %q, got %q", i, test.expected.String(), a.String()) + } + } +} + +func TestAdd(t *testing.T) { + tests := []struct { + a Quantity + b Quantity + expected Quantity + }{ + {decQuantity(10, 0, DecimalSI), decQuantity(1, 1, DecimalSI), decQuantity(20, 0, DecimalSI)}, + {decQuantity(10, 0, DecimalSI), decQuantity(1, 0, BinarySI), decQuantity(11, 0, DecimalSI)}, + {decQuantity(10, 0, BinarySI), decQuantity(1, 0, DecimalSI), decQuantity(11, 0, BinarySI)}, + {Quantity{Format: DecimalSI}, decQuantity(50, 0, DecimalSI), decQuantity(50, 0, DecimalSI)}, + {decQuantity(50, 0, DecimalSI), Quantity{Format: DecimalSI}, decQuantity(50, 0, DecimalSI)}, + {Quantity{Format: DecimalSI}, Quantity{Format: DecimalSI}, decQuantity(0, 0, DecimalSI)}, + } + + for i, test := range tests { + test.a.Add(test.b) + if test.a.Cmp(test.expected) != 0 { + t.Errorf("[%d] Expected %q, got %q", i, test.expected.String(), test.a.String()) + } + } +} + +func TestAddSubRoundTrip(t *testing.T) { + for k := -10; k <= 10; k++ { + q := Quantity{Format: DecimalSI} + var order []int64 + for i := 0; i < 100; i++ { + j := rand.Int63() + order = append(order, j) + q.Add(*NewScaledQuantity(j, Scale(k))) + } + for _, j := range order { + q.Sub(*NewScaledQuantity(j, Scale(k))) + } + if !q.IsZero() { + t.Errorf("addition and subtraction did not cancel: %s", &q) + } + } +} + +func TestAddSubRoundTripAcrossScales(t *testing.T) { + q := Quantity{Format: DecimalSI} + var order []int64 + for i := 0; i < 100; i++ { + j := rand.Int63() + order = append(order, j) + q.Add(*NewScaledQuantity(j, Scale(j%20-10))) + } + for _, j := range order { + q.Sub(*NewScaledQuantity(j, Scale(j%20-10))) + } + if !q.IsZero() { + t.Errorf("addition and subtraction did not cancel: %s", &q) + } +} + +func TestNegateRoundTrip(t *testing.T) { + for _, asDec := range []bool{false, true} { + for k := -10; k <= 10; k++ { + for i := 0; i < 100; i++ { + j := rand.Int63() + q := *NewScaledQuantity(j, Scale(k)) + if asDec { + q.AsDec() + } + + b := q.Copy() + b.Neg() + b.Neg() + if b.Cmp(q) != 0 { + t.Errorf("double negation did not cancel: %s", &q) + } + } + } + } +} +func benchmarkQuantities() []Quantity { + return []Quantity{ + intQuantity(1024*1024*1024, 0, BinarySI), + intQuantity(1024*1024*1024*1024, 0, BinarySI), + intQuantity(1000000, 3, DecimalSI), + intQuantity(1000000000, 0, DecimalSI), + intQuantity(1, -3, DecimalSI), + intQuantity(80, -3, DecimalSI), + intQuantity(1080, -3, DecimalSI), + intQuantity(0, 0, BinarySI), + intQuantity(1, 9, DecimalExponent), + intQuantity(1, -9, DecimalSI), + intQuantity(1000000, 10, DecimalSI), + } +} + +func BenchmarkQuantityString(b *testing.B) { + values := benchmarkQuantities() + b.ResetTimer() + var s string + for i := 0; i < b.N; i++ { + q := values[i%len(values)] + q.s = "" + s = q.String() + } + b.StopTimer() + if len(s) == 0 { + b.Fatal(s) + } +} + +func BenchmarkQuantityStringPrecalc(b *testing.B) { + values := benchmarkQuantities() + for i := range values { + _ = values[i].String() + } + b.ResetTimer() + var s string + for i := 0; i < b.N; i++ { + q := values[i%len(values)] + s = q.String() + } + b.StopTimer() + if len(s) == 0 { + b.Fatal(s) + } +} + +func BenchmarkQuantityStringBinarySI(b *testing.B) { + values := benchmarkQuantities() + for i := range values { + values[i].Format = BinarySI + } + b.ResetTimer() + var s string + for i := 0; i < b.N; i++ { + q := values[i%len(values)] + q.s = "" + s = q.String() + } + b.StopTimer() + if len(s) == 0 { + b.Fatal(s) + } +} + +func BenchmarkQuantityMarshalJSON(b *testing.B) { + values := benchmarkQuantities() + b.ResetTimer() + for i := 0; i < b.N; i++ { + q := values[i%len(values)] + q.s = "" + if _, err := q.MarshalJSON(); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +func BenchmarkQuantityUnmarshalJSON(b *testing.B) { + values := benchmarkQuantities() + var json [][]byte + for _, v := range values { + data, _ := v.MarshalJSON() + json = append(json, data) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + var q Quantity + if err := q.UnmarshalJSON(json[i%len(values)]); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +func BenchmarkParseQuantity(b *testing.B) { + values := benchmarkQuantities() + var strings []string + for _, v := range values { + strings = append(strings, v.String()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := ParseQuantity(strings[i%len(values)]); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +func BenchmarkCanonicalize(b *testing.B) { + values := benchmarkQuantities() + b.ResetTimer() + buffer := make([]byte, 0, 100) + for i := 0; i < b.N; i++ { + s, _ := values[i%len(values)].CanonicalizeBytes(buffer) + if len(s) == 0 { + b.Fatal(s) + } + } + b.StopTimer() +} + +func BenchmarkQuantityRoundUp(b *testing.B) { + values := benchmarkQuantities() + b.ResetTimer() + for i := 0; i < b.N; i++ { + q := values[i%len(values)] + copied := q + copied.RoundUp(-3) + } + b.StopTimer() +} + +func BenchmarkQuantityCopy(b *testing.B) { + values := benchmarkQuantities() + b.ResetTimer() + for i := 0; i < b.N; i++ { + values[i%len(values)].Copy() + } + b.StopTimer() +} + +func BenchmarkQuantityAdd(b *testing.B) { + values := benchmarkQuantities() + base := &Quantity{} + b.ResetTimer() + for i := 0; i < b.N; i++ { + q := values[i%len(values)] + base.d.Dec = nil + base.i = int64Amount{value: 100} + base.Add(q) + } + b.StopTimer() +} + +func BenchmarkQuantityCmp(b *testing.B) { + values := benchmarkQuantities() + b.ResetTimer() + for i := 0; i < b.N; i++ { + q := values[i%len(values)] + if q.Cmp(q) != 0 { + b.Fatal(q) + } + } + b.StopTimer() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/scale_int_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/scale_int_test.go new file mode 100644 index 000000000000..1b4390e55992 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/scale_int_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math" + "math/big" + "testing" +) + +func TestScaledValueInternal(t *testing.T) { + tests := []struct { + unscaled *big.Int + scale int + newScale int + + want int64 + }{ + // remain scale + {big.NewInt(1000), 0, 0, 1000}, + + // scale down + {big.NewInt(1000), 0, -3, 1}, + {big.NewInt(1000), 3, 0, 1}, + {big.NewInt(0), 3, 0, 0}, + + // always round up + {big.NewInt(999), 3, 0, 1}, + {big.NewInt(500), 3, 0, 1}, + {big.NewInt(499), 3, 0, 1}, + {big.NewInt(1), 3, 0, 1}, + // large scaled value does not lose precision + {big.NewInt(0).Sub(maxInt64, bigOne), 1, 0, (math.MaxInt64-1)/10 + 1}, + // large intermidiate result. + {big.NewInt(1).Exp(big.NewInt(10), big.NewInt(100), nil), 100, 0, 1}, + + // scale up + {big.NewInt(0), 0, 3, 0}, + {big.NewInt(1), 0, 3, 1000}, + {big.NewInt(1), -3, 0, 1000}, + {big.NewInt(1000), -3, 2, 100000000}, + {big.NewInt(0).Div(big.NewInt(math.MaxInt64), bigThousand), 0, 3, + (math.MaxInt64 / 1000) * 1000}, + } + + for i, tt := range tests { + old := (&big.Int{}).Set(tt.unscaled) + got := scaledValue(tt.unscaled, tt.scale, tt.newScale) + if got != tt.want { + t.Errorf("#%d: got = %v, want %v", i, got, tt.want) + } + if tt.unscaled.Cmp(old) != 0 { + t.Errorf("#%d: unscaled = %v, want %v", i, tt.unscaled, old) + } + } +} + +func BenchmarkScaledValueSmall(b *testing.B) { + s := big.NewInt(1000) + for i := 0; i < b.N; i++ { + scaledValue(s, 3, 0) + } +} + +func BenchmarkScaledValueLarge(b *testing.B) { + s := big.NewInt(math.MaxInt64) + s.Mul(s, big.NewInt(1000)) + for i := 0; i < b.N; i++ { + scaledValue(s, 10, 0) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/suffix.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/suffix.go index 529712365d78..0aa2ce2bf60e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/suffix.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource/suffix.go @@ -24,8 +24,9 @@ type suffix string // suffixer can interpret and construct suffixes. type suffixer interface { - interpret(suffix) (base, exponent int, fmt Format, ok bool) - construct(base, exponent int, fmt Format) (s suffix, ok bool) + interpret(suffix) (base, exponent int32, fmt Format, ok bool) + construct(base, exponent int32, fmt Format) (s suffix, ok bool) + constructBytes(base, exponent int32, fmt Format) (s []byte, ok bool) } // quantitySuffixer handles suffixes for all three formats that quantity @@ -33,12 +34,13 @@ type suffixer interface { var quantitySuffixer = newSuffixer() type bePair struct { - base, exponent int + base, exponent int32 } type listSuffixer struct { - suffixToBE map[suffix]bePair - beToSuffix map[bePair]suffix + suffixToBE map[suffix]bePair + beToSuffix map[bePair]suffix + beToSuffixBytes map[bePair][]byte } func (ls *listSuffixer) addSuffix(s suffix, pair bePair) { @@ -48,11 +50,15 @@ func (ls *listSuffixer) addSuffix(s suffix, pair bePair) { if ls.beToSuffix == nil { ls.beToSuffix = map[bePair]suffix{} } + if ls.beToSuffixBytes == nil { + ls.beToSuffixBytes = map[bePair][]byte{} + } ls.suffixToBE[s] = pair ls.beToSuffix[pair] = s + ls.beToSuffixBytes[pair] = []byte(s) } -func (ls *listSuffixer) lookup(s suffix) (base, exponent int, ok bool) { +func (ls *listSuffixer) lookup(s suffix) (base, exponent int32, ok bool) { pair, ok := ls.suffixToBE[s] if !ok { return 0, 0, false @@ -60,19 +66,50 @@ func (ls *listSuffixer) lookup(s suffix) (base, exponent int, ok bool) { return pair.base, pair.exponent, true } -func (ls *listSuffixer) construct(base, exponent int) (s suffix, ok bool) { +func (ls *listSuffixer) construct(base, exponent int32) (s suffix, ok bool) { s, ok = ls.beToSuffix[bePair{base, exponent}] return } +func (ls *listSuffixer) constructBytes(base, exponent int32) (s []byte, ok bool) { + s, ok = ls.beToSuffixBytes[bePair{base, exponent}] + return +} + type suffixHandler struct { decSuffixes listSuffixer binSuffixes listSuffixer } +type fastLookup struct { + *suffixHandler +} + +func (l fastLookup) interpret(s suffix) (base, exponent int32, format Format, ok bool) { + switch s { + case "": + return 10, 0, DecimalSI, true + case "n": + return 10, -9, DecimalSI, true + case "u": + return 10, -6, DecimalSI, true + case "m": + return 10, -3, DecimalSI, true + case "k": + return 10, 3, DecimalSI, true + case "M": + return 10, 6, DecimalSI, true + case "G": + return 10, 9, DecimalSI, true + } + return l.suffixHandler.interpret(s) +} + func newSuffixer() suffixer { sh := &suffixHandler{} + // IMPORTANT: if you change this section you must change fastLookup + sh.binSuffixes.addSuffix("Ki", bePair{2, 10}) sh.binSuffixes.addSuffix("Mi", bePair{2, 20}) sh.binSuffixes.addSuffix("Gi", bePair{2, 30}) @@ -94,10 +131,10 @@ func newSuffixer() suffixer { sh.decSuffixes.addSuffix("P", bePair{10, 15}) sh.decSuffixes.addSuffix("E", bePair{10, 18}) - return sh + return fastLookup{sh} } -func (sh *suffixHandler) construct(base, exponent int, fmt Format) (s suffix, ok bool) { +func (sh *suffixHandler) construct(base, exponent int32, fmt Format) (s suffix, ok bool) { switch fmt { case DecimalSI: return sh.decSuffixes.construct(base, exponent) @@ -115,7 +152,32 @@ func (sh *suffixHandler) construct(base, exponent int, fmt Format) (s suffix, ok return "", false } -func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int, fmt Format, ok bool) { +func (sh *suffixHandler) constructBytes(base, exponent int32, format Format) (s []byte, ok bool) { + switch format { + case DecimalSI: + return sh.decSuffixes.constructBytes(base, exponent) + case BinarySI: + return sh.binSuffixes.constructBytes(base, exponent) + case DecimalExponent: + if base != 10 { + return nil, false + } + if exponent == 0 { + return nil, true + } + result := make([]byte, 8, 8) + result[0] = 'e' + number := strconv.AppendInt(result[1:1], int64(exponent), 10) + if &result[1] == &number[0] { + return result[:1+len(number)], true + } + result = append(result[:1], number...) + return result, true + } + return nil, false +} + +func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int32, fmt Format, ok bool) { // Try lookup tables first if b, e, ok := sh.decSuffixes.lookup(suffix); ok { return b, e, DecimalSI, true @@ -129,7 +191,7 @@ func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int, fmt Forma if err != nil { return 0, 0, DecimalExponent, false } - return 10, int(parsed), DecimalExponent, true + return 10, int32(parsed), DecimalExponent, true } return 0, 0, DecimalExponent, false diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource_helpers.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource_helpers.go index 7a98a4c2f070..4c55b120e144 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource_helpers.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource_helpers.go @@ -18,6 +18,7 @@ package api import ( "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" ) // Returns string version of ResourceName. @@ -48,6 +49,13 @@ func (self *ResourceList) Pods() *resource.Quantity { return &resource.Quantity{} } +func (self *ResourceList) NvidiaGPU() *resource.Quantity { + if val, ok := (*self)[ResourceNvidiaGPU]; ok { + return &val + } + return &resource.Quantity{} +} + func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) { for i := range statuses { if statuses[i].Name == name { @@ -80,12 +88,66 @@ func IsPodReadyConditionTrue(status PodStatus) bool { // Extracts the pod ready condition from the given status and returns that. // Returns nil if the condition is not present. func GetPodReadyCondition(status PodStatus) *PodCondition { - for i, c := range status.Conditions { - if c.Type == PodReady { - return &status.Conditions[i] + _, condition := GetPodCondition(&status, PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the the index of the located condition. +func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// GetNodeCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the the index of the located condition. +func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// status has changed. +// Returns true if pod condition has changed or has been added. +func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { + condition.LastTransitionTime = unversioned.Now() + // Try to find this pod condition. + conditionIndex, oldCondition := GetPodCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new pod condition. + status.Conditions = append(status.Conditions, *condition) + return true + } else { + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual } - return nil } // IsNodeReady returns true if a node is ready; false otherwise. @@ -106,15 +168,40 @@ func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, li for name, quantity := range container.Resources.Requests { if value, ok := reqs[name]; !ok { reqs[name] = *quantity.Copy() - } else if err = value.Add(quantity); err != nil { - return nil, nil, err + } else { + value.Add(quantity) + reqs[name] = value } } for name, quantity := range container.Resources.Limits { if value, ok := limits[name]; !ok { limits[name] = *quantity.Copy() - } else if err = value.Add(quantity); err != nil { - return nil, nil, err + } else { + value.Add(quantity) + limits[name] = value + } + } + } + // init containers define the minimum of any resource + for _, container := range pod.Spec.InitContainers { + for name, quantity := range container.Resources.Requests { + value, ok := reqs[name] + if !ok { + reqs[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + reqs[name] = *quantity.Copy() + } + } + for name, quantity := range container.Resources.Limits { + value, ok := limits[name] + if !ok { + limits[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + limits[name] = *quantity.Copy() } } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource_helpers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource_helpers_test.go new file mode 100644 index 000000000000..d13929fa946c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource_helpers_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api/resource" +) + +func TestResourceHelpers(t *testing.T) { + cpuLimit := resource.MustParse("10") + memoryLimit := resource.MustParse("10G") + resourceSpec := ResourceRequirements{ + Limits: ResourceList{ + "cpu": cpuLimit, + "memory": memoryLimit, + "kube.io/storage": memoryLimit, + }, + } + if res := resourceSpec.Limits.Cpu(); res.Cmp(cpuLimit) != 0 { + t.Errorf("expected cpulimit %v, got %v", cpuLimit, res) + } + if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 { + t.Errorf("expected memorylimit %v, got %v", memoryLimit, res) + } + resourceSpec = ResourceRequirements{ + Limits: ResourceList{ + "memory": memoryLimit, + "kube.io/storage": memoryLimit, + }, + } + if res := resourceSpec.Limits.Cpu(); res.Value() != 0 { + t.Errorf("expected cpulimit %v, got %v", 0, res) + } + if res := resourceSpec.Limits.Memory(); res.Cmp(memoryLimit) != 0 { + t.Errorf("expected memorylimit %v, got %v", memoryLimit, res) + } +} + +func TestDefaultResourceHelpers(t *testing.T) { + resourceList := ResourceList{} + if resourceList.Cpu().Format != resource.DecimalSI { + t.Errorf("expected %v, actual %v", resource.DecimalSI, resourceList.Cpu().Format) + } + if resourceList.Memory().Format != resource.BinarySI { + t.Errorf("expected %v, actual %v", resource.BinarySI, resourceList.Memory().Format) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/create.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/create.go index 4e3a8938b0dd..fa95b7f93f23 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/create.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/create.go @@ -112,11 +112,11 @@ func objectMetaAndKind(typer runtime.ObjectTyper, obj runtime.Object) (*api.Obje if err != nil { return nil, unversioned.GroupVersionKind{}, errors.NewInternalError(err) } - kind, err := typer.ObjectKind(obj) + kinds, _, err := typer.ObjectKinds(obj) if err != nil { return nil, unversioned.GroupVersionKind{}, errors.NewInternalError(err) } - return objectMeta, kind, nil + return objectMeta, kinds[0], nil } // NamespaceScopedStrategy has a method to tell if the object must be in a namespace. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/delete.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/delete.go index c05f3446db39..34965d52fe8e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/delete.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/delete.go @@ -17,9 +17,11 @@ limitations under the License. package rest import ( + "fmt" "time" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/runtime" ) @@ -28,7 +30,11 @@ import ( // API conventions. type RESTDeleteStrategy interface { runtime.ObjectTyper +} +// RESTGracefulDeleteStrategy must be implemented by the registry that supports +// graceful deletion. +type RESTGracefulDeleteStrategy interface { // CheckGracefulDelete should return true if the object can be gracefully deleted and set // any default values on the DeleteOptions. CheckGracefulDelete(obj runtime.Object, options *api.DeleteOptions) bool @@ -40,14 +46,18 @@ type RESTDeleteStrategy interface { // condition cannot be checked or the gracePeriodSeconds is invalid. The options argument may be updated with // default values if graceful is true. func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Object, options *api.DeleteOptions) (graceful, gracefulPending bool, err error) { - if strategy == nil { - return false, false, nil - } - objectMeta, _, kerr := objectMetaAndKind(strategy, obj) + objectMeta, gvk, kerr := objectMetaAndKind(strategy, obj) if kerr != nil { return false, false, kerr } - + // Checking the Preconditions here to fail early. They'll be enforced later on when we actually do the deletion, too. + if options.Preconditions != nil && options.Preconditions.UID != nil && *options.Preconditions.UID != objectMeta.UID { + return false, false, errors.NewConflict(unversioned.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.Name, fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.UID)) + } + gracefulStrategy, ok := strategy.(RESTGracefulDeleteStrategy) + if !ok { + return false, false, nil + } // if the object is already being deleted if objectMeta.DeletionTimestamp != nil { // if we are already being deleted, we may only shorten the deletion grace period @@ -73,7 +83,7 @@ func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Obje return false, true, nil } - if !strategy.CheckGracefulDelete(obj, options) { + if !gracefulStrategy.CheckGracefulDelete(obj, options) { return false, false, nil } now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/rest.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/rest.go index d07023dad5d4..4d5b5ba977c2 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/rest.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/rest.go @@ -57,7 +57,7 @@ type Storage interface { // KindProvider specifies a different kind for its API than for its internal storage. This is necessary for external // objects that are not compiled into the api server. For such objects, there is no in-memory representation for -// the object, so they must be represented as generic objects (e.g. RawJSON), but when we present the object as part of +// the object, so they must be represented as generic objects (e.g. runtime.Unknown), but when we present the object as part of // API discovery we want to present the specific kind, not the generic internal representation. type KindProvider interface { Kind() string @@ -174,6 +174,19 @@ type NamedCreater interface { Create(ctx api.Context, name string, obj runtime.Object) (runtime.Object, error) } +// UpdatedObjectInfo provides information about an updated object to an Updater. +// It requires access to the old object in order to return the newly updated object. +type UpdatedObjectInfo interface { + // Returns preconditions built from the updated object, if applicable. + // May return nil, or a preconditions object containing nil fields, + // if no preconditions can be determined from the updated object. + Preconditions() *api.Preconditions + + // UpdatedObject returns the updated object, given a context and old object. + // The only time an empty oldObj should be passed in is if a "create on update" is occurring (there is no oldObj). + UpdatedObject(ctx api.Context, oldObj runtime.Object) (newObj runtime.Object, err error) +} + // Updater is an object that can update an instance of a RESTful object. type Updater interface { // New returns an empty object that can be used with Update after request data has been put into it. @@ -183,14 +196,14 @@ type Updater interface { // Update finds a resource in the storage and updates it. Some implementations // may allow updates creates the object - they should set the created boolean // to true. - Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) + Update(ctx api.Context, name string, objInfo UpdatedObjectInfo) (runtime.Object, bool, error) } // CreaterUpdater is a storage object that must support both create and update. // Go prevents embedded interfaces that implement the same method. type CreaterUpdater interface { Creater - Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) + Update(ctx api.Context, name string, objInfo UpdatedObjectInfo) (runtime.Object, bool, error) } // CreaterUpdater must satisfy the Updater interface. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/resttest/resttest.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/resttest/resttest.go index b17ae740df9b..40b33b46e247 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/resttest/resttest.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/resttest/resttest.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/wait" ) @@ -42,15 +43,28 @@ type Tester struct { createOnUpdate bool generatesName bool returnDeletedObject bool + namer func(int) string } func New(t *testing.T, storage rest.Storage) *Tester { return &Tester{ T: t, storage: storage, + namer: defaultNamer, } } +func defaultNamer(i int) string { + return fmt.Sprintf("foo%d", i) +} + +// Namer allows providing a custom name maker +// By default "foo%d" is used +func (t *Tester) Namer(namer func(int) string) *Tester { + t.namer = namer + return t +} + func (t *Tester) ClusterScope() *Tester { t.clusterScope = true return t @@ -122,22 +136,23 @@ type GetFunc func(api.Context, runtime.Object) (runtime.Object, error) type InitWatchFunc func() type InjectErrFunc func(err error) type IsErrorFunc func(err error) bool -type SetFunc func(api.Context, runtime.Object) error +type CreateFunc func(api.Context, runtime.Object) error type SetRVFunc func(uint64) type UpdateFunc func(runtime.Object) runtime.Object // Test creating an object. -func (t *Tester) TestCreate(valid runtime.Object, setFn SetFunc, getFn GetFunc, invalid ...runtime.Object) { +func (t *Tester) TestCreate(valid runtime.Object, createFn CreateFunc, getFn GetFunc, invalid ...runtime.Object) { t.testCreateHasMetadata(copyOrDie(valid)) if !t.generatesName { t.testCreateGeneratesName(copyOrDie(valid)) } t.testCreateEquals(copyOrDie(valid), getFn) - t.testCreateAlreadyExisting(copyOrDie(valid), setFn) + t.testCreateAlreadyExisting(copyOrDie(valid), createFn) if t.clusterScope { t.testCreateDiscardsObjectNamespace(copyOrDie(valid)) t.testCreateIgnoresContextNamespace(copyOrDie(valid)) t.testCreateIgnoresMismatchedNamespace(copyOrDie(valid)) + t.testCreateResetsUserData(copyOrDie(valid)) } else { t.testCreateRejectsMismatchedNamespace(copyOrDie(valid)) } @@ -146,29 +161,33 @@ func (t *Tester) TestCreate(valid runtime.Object, setFn SetFunc, getFn GetFunc, } // Test updating an object. -func (t *Tester) TestUpdate(valid runtime.Object, setFn SetFunc, getFn GetFunc, updateFn UpdateFunc, invalidUpdateFn ...UpdateFunc) { - t.testUpdateEquals(copyOrDie(valid), setFn, getFn, updateFn) - t.testUpdateFailsOnVersionTooOld(copyOrDie(valid), setFn, getFn) +func (t *Tester) TestUpdate(valid runtime.Object, createFn CreateFunc, getFn GetFunc, updateFn UpdateFunc, invalidUpdateFn ...UpdateFunc) { + t.testUpdateEquals(copyOrDie(valid), createFn, getFn, updateFn) + t.testUpdateFailsOnVersionTooOld(copyOrDie(valid), createFn, getFn) t.testUpdateOnNotFound(copyOrDie(valid)) if !t.clusterScope { - t.testUpdateRejectsMismatchedNamespace(copyOrDie(valid), setFn) + t.testUpdateRejectsMismatchedNamespace(copyOrDie(valid), createFn) } - t.testUpdateInvokesValidation(copyOrDie(valid), setFn, invalidUpdateFn...) + t.testUpdateInvokesValidation(copyOrDie(valid), createFn, invalidUpdateFn...) + t.testUpdateWithWrongUID(copyOrDie(valid), createFn, getFn) + t.testUpdateRetrievesOldObject(copyOrDie(valid), createFn, getFn) + t.testUpdatePropagatesUpdatedObjectError(copyOrDie(valid), createFn, getFn) } // Test deleting an object. -func (t *Tester) TestDelete(valid runtime.Object, setFn SetFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) { +func (t *Tester) TestDelete(valid runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) { t.testDeleteNonExist(copyOrDie(valid)) - t.testDeleteNoGraceful(copyOrDie(valid), setFn, getFn, isNotFoundFn) + t.testDeleteNoGraceful(copyOrDie(valid), createFn, getFn, isNotFoundFn) + t.testDeleteWithUID(copyOrDie(valid), createFn, getFn, isNotFoundFn) } // Test gracefully deleting an object. -func (t *Tester) TestDeleteGraceful(valid runtime.Object, setFn SetFunc, getFn GetFunc, expectedGrace int64) { - t.testDeleteGracefulHasDefault(copyOrDie(valid), setFn, getFn, expectedGrace) - t.testDeleteGracefulWithValue(copyOrDie(valid), setFn, getFn, expectedGrace) - t.testDeleteGracefulUsesZeroOnNil(copyOrDie(valid), setFn, expectedGrace) - t.testDeleteGracefulExtend(copyOrDie(valid), setFn, getFn, expectedGrace) - t.testDeleteGracefulImmediate(copyOrDie(valid), setFn, getFn, expectedGrace) +func (t *Tester) TestDeleteGraceful(valid runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) { + t.testDeleteGracefulHasDefault(copyOrDie(valid), createFn, getFn, expectedGrace) + t.testDeleteGracefulWithValue(copyOrDie(valid), createFn, getFn, expectedGrace) + t.testDeleteGracefulUsesZeroOnNil(copyOrDie(valid), createFn, expectedGrace) + t.testDeleteGracefulExtend(copyOrDie(valid), createFn, getFn, expectedGrace) + t.testDeleteGracefulImmediate(copyOrDie(valid), createFn, getFn, expectedGrace) } // Test getting object. @@ -199,14 +218,28 @@ func (t *Tester) TestWatch( // ============================================================================= // Creation tests. -func (t *Tester) testCreateAlreadyExisting(obj runtime.Object, setFn SetFunc) { +func (t *Tester) delete(ctx api.Context, obj runtime.Object) error { + objectMeta, err := api.ObjectMetaFor(obj) + if err != nil { + return err + } + deleter, ok := t.storage.(rest.GracefulDeleter) + if !ok { + return fmt.Errorf("Expected deleting storage, got %v", t.storage) + } + _, err = deleter.Delete(ctx, objectMeta.Name, nil) + return err +} + +func (t *Tester) testCreateAlreadyExisting(obj runtime.Object, createFn CreateFunc) { ctx := t.TestContext() foo := copyOrDie(obj) - t.setObjectMeta(foo, "foo1") - if err := setFn(ctx, foo); err != nil { + t.setObjectMeta(foo, t.namer(1)) + if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } + defer t.delete(ctx, foo) _, err := t.storage.(rest.Creater).Create(ctx, foo) if !errors.IsAlreadyExists(err) { @@ -218,12 +251,13 @@ func (t *Tester) testCreateEquals(obj runtime.Object, getFn GetFunc) { ctx := t.TestContext() foo := copyOrDie(obj) - t.setObjectMeta(foo, "foo2") + t.setObjectMeta(foo, t.namer(2)) created, err := t.storage.(rest.Creater).Create(ctx, foo) if err != nil { t.Errorf("unexpected error: %v", err) } + defer t.delete(ctx, created) got, err := getFn(ctx, foo) if err != nil { @@ -251,6 +285,7 @@ func (t *Tester) testCreateDiscardsObjectNamespace(valid runtime.Object) { if err != nil { t.Fatalf("Unexpected error: %v", err) } + defer t.delete(t.TestContext(), created) createdObjectMeta := t.getObjectMetaOrFail(created) if createdObjectMeta.Namespace != api.NamespaceNone { t.Errorf("Expected empty namespace on created object, got '%v'", createdObjectMeta.Namespace) @@ -262,10 +297,11 @@ func (t *Tester) testCreateGeneratesName(valid runtime.Object) { objectMeta.Name = "" objectMeta.GenerateName = "test-" - _, err := t.storage.(rest.Creater).Create(t.TestContext(), valid) + created, err := t.storage.(rest.Creater).Create(t.TestContext(), valid) if err != nil { t.Fatalf("Unexpected error: %v", err) } + defer t.delete(t.TestContext(), created) if objectMeta.Name == "test-" || !strings.HasPrefix(objectMeta.Name, "test-") { t.Errorf("unexpected name: %#v", valid) } @@ -273,8 +309,7 @@ func (t *Tester) testCreateGeneratesName(valid runtime.Object) { func (t *Tester) testCreateHasMetadata(valid runtime.Object) { objectMeta := t.getObjectMetaOrFail(valid) - objectMeta.Name = "" - objectMeta.GenerateName = "test-" + objectMeta.Name = t.namer(1) objectMeta.Namespace = t.TestNamespace() obj, err := t.storage.(rest.Creater).Create(t.TestContext(), valid) @@ -284,6 +319,7 @@ func (t *Tester) testCreateHasMetadata(valid runtime.Object) { if obj == nil { t.Fatalf("Unexpected object from result: %#v", obj) } + defer t.delete(t.TestContext(), obj) if !api.HasObjectMetaSystemFieldValues(objectMeta) { t.Errorf("storage did not populate object meta field values") } @@ -298,6 +334,7 @@ func (t *Tester) testCreateIgnoresContextNamespace(valid runtime.Object) { if err != nil { t.Fatalf("Unexpected error: %v", err) } + defer t.delete(ctx, created) createdObjectMeta := t.getObjectMetaOrFail(created) if createdObjectMeta.Namespace != api.NamespaceNone { t.Errorf("Expected empty namespace on created object, got '%v'", createdObjectMeta.Namespace) @@ -316,6 +353,7 @@ func (t *Tester) testCreateIgnoresMismatchedNamespace(valid runtime.Object) { if err != nil { t.Fatalf("Unexpected error: %v", err) } + defer t.delete(ctx, created) createdObjectMeta := t.getObjectMetaOrFail(created) if createdObjectMeta.Namespace != api.NamespaceNone { t.Errorf("Expected empty namespace on created object, got '%v'", createdObjectMeta.Namespace) @@ -331,7 +369,7 @@ func (t *Tester) testCreateValidatesNames(valid runtime.Object) { ctx := t.TestContext() _, err := t.storage.(rest.Creater).Create(ctx, objCopy) if !errors.IsInvalid(err) { - t.Errorf("%s: Expected to get an invalid resource error, got %v", invalidName, err) + t.Errorf("%s: Expected to get an invalid resource error, got '%v'", invalidName, err) } } @@ -343,7 +381,7 @@ func (t *Tester) testCreateValidatesNames(valid runtime.Object) { ctx := t.TestContext() _, err := t.storage.(rest.Creater).Create(ctx, objCopy) if !errors.IsInvalid(err) { - t.Errorf("%s: Expected to get an invalid resource error, got %v", invalidSuffix, err) + t.Errorf("%s: Expected to get an invalid resource error, got '%v'", invalidSuffix, err) } } } @@ -383,6 +421,7 @@ func (t *Tester) testCreateResetsUserData(valid runtime.Object) { if obj == nil { t.Fatalf("Unexpected object from result: %#v", obj) } + defer t.delete(t.TestContext(), obj) if objectMeta.UID == "bad-uid" || objectMeta.CreationTimestamp == now { t.Errorf("ObjectMeta did not reset basic fields: %#v", objectMeta) } @@ -391,12 +430,12 @@ func (t *Tester) testCreateResetsUserData(valid runtime.Object) { // ============================================================================= // Update tests. -func (t *Tester) testUpdateEquals(obj runtime.Object, setFn SetFunc, getFn GetFunc, updateFn UpdateFunc) { +func (t *Tester) testUpdateEquals(obj runtime.Object, createFn CreateFunc, getFn GetFunc, updateFn UpdateFunc) { ctx := t.TestContext() foo := copyOrDie(obj) - t.setObjectMeta(foo, "foo2") - if err := setFn(ctx, foo); err != nil { + t.setObjectMeta(foo, t.namer(2)) + if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } @@ -405,7 +444,8 @@ func (t *Tester) testUpdateEquals(obj runtime.Object, setFn SetFunc, getFn GetFu t.Errorf("unexpected error: %v", err) } toUpdate = updateFn(toUpdate) - updated, created, err := t.storage.(rest.Updater).Update(ctx, toUpdate) + toUpdateMeta := t.getObjectMetaOrFail(toUpdate) + updated, created, err := t.storage.(rest.Updater).Update(ctx, toUpdateMeta.Name, rest.DefaultUpdatedObjectInfo(toUpdate, api.Scheme)) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -426,13 +466,13 @@ func (t *Tester) testUpdateEquals(obj runtime.Object, setFn SetFunc, getFn GetFu } } -func (t *Tester) testUpdateFailsOnVersionTooOld(obj runtime.Object, setFn SetFunc, getFn GetFunc) { +func (t *Tester) testUpdateFailsOnVersionTooOld(obj runtime.Object, createFn CreateFunc, getFn GetFunc) { ctx := t.TestContext() foo := copyOrDie(obj) - t.setObjectMeta(foo, "foo3") + t.setObjectMeta(foo, t.namer(3)) - if err := setFn(ctx, foo); err != nil { + if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } @@ -445,7 +485,7 @@ func (t *Tester) testUpdateFailsOnVersionTooOld(obj runtime.Object, setFn SetFun olderMeta := t.getObjectMetaOrFail(older) olderMeta.ResourceVersion = "1" - _, _, err = t.storage.(rest.Updater).Update(t.TestContext(), older) + _, _, err = t.storage.(rest.Updater).Update(t.TestContext(), olderMeta.Name, rest.DefaultUpdatedObjectInfo(older, api.Scheme)) if err == nil { t.Errorf("Expected an error, but we didn't get one") } else if !errors.IsConflict(err) { @@ -453,18 +493,19 @@ func (t *Tester) testUpdateFailsOnVersionTooOld(obj runtime.Object, setFn SetFun } } -func (t *Tester) testUpdateInvokesValidation(obj runtime.Object, setFn SetFunc, invalidUpdateFn ...UpdateFunc) { +func (t *Tester) testUpdateInvokesValidation(obj runtime.Object, createFn CreateFunc, invalidUpdateFn ...UpdateFunc) { ctx := t.TestContext() foo := copyOrDie(obj) - t.setObjectMeta(foo, "foo4") - if err := setFn(ctx, foo); err != nil { + t.setObjectMeta(foo, t.namer(4)) + if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } for _, update := range invalidUpdateFn { toUpdate := update(copyOrDie(foo)) - got, created, err := t.storage.(rest.Updater).Update(t.TestContext(), toUpdate) + toUpdateMeta := t.getObjectMetaOrFail(toUpdate) + got, created, err := t.storage.(rest.Updater).Update(t.TestContext(), toUpdateMeta.Name, rest.DefaultUpdatedObjectInfo(toUpdate, api.Scheme)) if got != nil || created { t.Errorf("expected nil object and no creation for object: %v", toUpdate) } @@ -474,9 +515,105 @@ func (t *Tester) testUpdateInvokesValidation(obj runtime.Object, setFn SetFunc, } } +func (t *Tester) testUpdateWithWrongUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc) { + ctx := t.TestContext() + foo := copyOrDie(obj) + t.setObjectMeta(foo, t.namer(5)) + objectMeta := t.getObjectMetaOrFail(foo) + objectMeta.UID = types.UID("UID0000") + if err := createFn(ctx, foo); err != nil { + t.Errorf("unexpected error: %v", err) + } + objectMeta.UID = types.UID("UID1111") + + obj, created, err := t.storage.(rest.Updater).Update(ctx, objectMeta.Name, rest.DefaultUpdatedObjectInfo(foo, api.Scheme)) + if created || obj != nil { + t.Errorf("expected nil object and no creation for object: %v", foo) + } + if err == nil || !errors.IsConflict(err) { + t.Errorf("unexpected error: %v", err) + } +} + +func (t *Tester) testUpdateRetrievesOldObject(obj runtime.Object, createFn CreateFunc, getFn GetFunc) { + ctx := t.TestContext() + foo := copyOrDie(obj) + t.setObjectMeta(foo, t.namer(6)) + objectMeta := t.getObjectMetaOrFail(foo) + objectMeta.Annotations = map[string]string{"A": "1"} + if err := createFn(ctx, foo); err != nil { + t.Errorf("unexpected error: %v", err) + return + } + + storedFoo, err := getFn(ctx, foo) + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + + storedFooWithUpdates := copyOrDie(storedFoo) + objectMeta = t.getObjectMetaOrFail(storedFooWithUpdates) + objectMeta.Annotations = map[string]string{"A": "2"} + + // Make sure a custom transform is called, and sees the expected updatedObject and oldObject + // This tests the mechanism used to pass the old and new object to admission + calledUpdatedObject := 0 + noopTransform := func(_ api.Context, updatedObject runtime.Object, oldObject runtime.Object) (runtime.Object, error) { + if !reflect.DeepEqual(storedFoo, oldObject) { + t.Errorf("Expected\n\t%#v\ngot\n\t%#v", storedFoo, oldObject) + } + if !reflect.DeepEqual(storedFooWithUpdates, updatedObject) { + t.Errorf("Expected\n\t%#v\ngot\n\t%#v", storedFooWithUpdates, updatedObject) + } + calledUpdatedObject++ + return updatedObject, nil + } + + updatedObj, created, err := t.storage.(rest.Updater).Update(ctx, objectMeta.Name, rest.DefaultUpdatedObjectInfo(storedFooWithUpdates, api.Scheme, noopTransform)) + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + if created { + t.Errorf("expected no creation for object") + return + } + if updatedObj == nil { + t.Errorf("expected non-nil object from update") + return + } + if calledUpdatedObject != 1 { + t.Errorf("expected UpdatedObject() to be called 1 time, was called %d", calledUpdatedObject) + return + } +} + +func (t *Tester) testUpdatePropagatesUpdatedObjectError(obj runtime.Object, createFn CreateFunc, getFn GetFunc) { + ctx := t.TestContext() + foo := copyOrDie(obj) + name := t.namer(7) + t.setObjectMeta(foo, name) + if err := createFn(ctx, foo); err != nil { + t.Errorf("unexpected error: %v", err) + return + } + + // Make sure our transform is called, and sees the expected updatedObject and oldObject + propagateErr := fmt.Errorf("custom updated object error for %v", foo) + noopTransform := func(_ api.Context, updatedObject runtime.Object, oldObject runtime.Object) (runtime.Object, error) { + return nil, propagateErr + } + + _, _, err := t.storage.(rest.Updater).Update(ctx, name, rest.DefaultUpdatedObjectInfo(foo, api.Scheme, noopTransform)) + if err != propagateErr { + t.Errorf("expected propagated error, got %#v", err) + } +} + func (t *Tester) testUpdateOnNotFound(obj runtime.Object) { - t.setObjectMeta(obj, "foo") - _, created, err := t.storage.(rest.Updater).Update(t.TestContext(), obj) + t.setObjectMeta(obj, t.namer(0)) + _, created, err := t.storage.(rest.Updater).Update(t.TestContext(), t.namer(0), rest.DefaultUpdatedObjectInfo(obj, api.Scheme)) if t.createOnUpdate { if err != nil { t.Errorf("creation allowed on updated, but got an error: %v", err) @@ -493,20 +630,20 @@ func (t *Tester) testUpdateOnNotFound(obj runtime.Object) { } } -func (t *Tester) testUpdateRejectsMismatchedNamespace(obj runtime.Object, setFn SetFunc) { +func (t *Tester) testUpdateRejectsMismatchedNamespace(obj runtime.Object, createFn CreateFunc) { ctx := t.TestContext() foo := copyOrDie(obj) - t.setObjectMeta(foo, "foo1") - if err := setFn(ctx, foo); err != nil { + t.setObjectMeta(foo, t.namer(1)) + if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } objectMeta := t.getObjectMetaOrFail(obj) - objectMeta.Name = "foo1" + objectMeta.Name = t.namer(1) objectMeta.Namespace = "not-default" - obj, updated, err := t.storage.(rest.Updater).Update(t.TestContext(), obj) + obj, updated, err := t.storage.(rest.Updater).Update(t.TestContext(), "foo1", rest.DefaultUpdatedObjectInfo(obj, api.Scheme)) if obj != nil || updated { t.Errorf("expected nil object and not updated") } @@ -520,12 +657,12 @@ func (t *Tester) testUpdateRejectsMismatchedNamespace(obj runtime.Object, setFn // ============================================================================= // Deletion tests. -func (t *Tester) testDeleteNoGraceful(obj runtime.Object, setFn SetFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) { +func (t *Tester) testDeleteNoGraceful(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) { ctx := t.TestContext() foo := copyOrDie(obj) - t.setObjectMeta(foo, "foo1") - if err := setFn(ctx, foo); err != nil { + t.setObjectMeta(foo, t.namer(1)) + if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } objectMeta := t.getObjectMetaOrFail(foo) @@ -557,15 +694,51 @@ func (t *Tester) testDeleteNonExist(obj runtime.Object) { } +// This test the fast-fail path. We test that the precondition gets verified +// again before deleting the object in tests of pkg/storage/etcd. +func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) { + ctx := t.TestContext() + + foo := copyOrDie(obj) + t.setObjectMeta(foo, t.namer(1)) + objectMeta := t.getObjectMetaOrFail(foo) + objectMeta.UID = types.UID("UID0000") + if err := createFn(ctx, foo); err != nil { + t.Errorf("unexpected error: %v", err) + } + obj, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewPreconditionDeleteOptions("UID1111")) + if err == nil || !errors.IsConflict(err) { + t.Errorf("unexpected error: %v", err) + } + + obj, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewPreconditionDeleteOptions("UID0000")) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if !t.returnDeletedObject { + if status, ok := obj.(*unversioned.Status); !ok { + t.Errorf("expected status of delete, got %v", status) + } else if status.Status != unversioned.StatusSuccess { + t.Errorf("expected success, got: %v", status.Status) + } + } + + _, err = getFn(ctx, foo) + if err == nil || !isNotFoundFn(err) { + t.Errorf("unexpected error: %v", err) + } +} + // ============================================================================= // Graceful Deletion tests. -func (t *Tester) testDeleteGracefulHasDefault(obj runtime.Object, setFn SetFunc, getFn GetFunc, expectedGrace int64) { +func (t *Tester) testDeleteGracefulHasDefault(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) { ctx := t.TestContext() foo := copyOrDie(obj) - t.setObjectMeta(foo, "foo1") - if err := setFn(ctx, foo); err != nil { + t.setObjectMeta(foo, t.namer(1)) + if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } objectMeta := t.getObjectMetaOrFail(foo) @@ -574,7 +747,7 @@ func (t *Tester) testDeleteGracefulHasDefault(obj runtime.Object, setFn SetFunc, t.Errorf("unexpected error: %v", err) } if _, err := getFn(ctx, foo); err != nil { - t.Fatalf("did not gracefully delete resource", err) + t.Fatalf("did not gracefully delete resource: %v", err) } object, err := t.storage.(rest.Getter).Get(ctx, objectMeta.Name) @@ -587,12 +760,12 @@ func (t *Tester) testDeleteGracefulHasDefault(obj runtime.Object, setFn SetFunc, } } -func (t *Tester) testDeleteGracefulWithValue(obj runtime.Object, setFn SetFunc, getFn GetFunc, expectedGrace int64) { +func (t *Tester) testDeleteGracefulWithValue(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) { ctx := t.TestContext() foo := copyOrDie(obj) - t.setObjectMeta(foo, "foo2") - if err := setFn(ctx, foo); err != nil { + t.setObjectMeta(foo, t.namer(2)) + if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } objectMeta := t.getObjectMetaOrFail(foo) @@ -601,7 +774,7 @@ func (t *Tester) testDeleteGracefulWithValue(obj runtime.Object, setFn SetFunc, t.Errorf("unexpected error: %v", err) } if _, err := getFn(ctx, foo); err != nil { - t.Fatalf("did not gracefully delete resource", err) + t.Fatalf("did not gracefully delete resource: %v", err) } object, err := t.storage.(rest.Getter).Get(ctx, objectMeta.Name) @@ -614,12 +787,12 @@ func (t *Tester) testDeleteGracefulWithValue(obj runtime.Object, setFn SetFunc, } } -func (t *Tester) testDeleteGracefulExtend(obj runtime.Object, setFn SetFunc, getFn GetFunc, expectedGrace int64) { +func (t *Tester) testDeleteGracefulExtend(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) { ctx := t.TestContext() foo := copyOrDie(obj) - t.setObjectMeta(foo, "foo3") - if err := setFn(ctx, foo); err != nil { + t.setObjectMeta(foo, t.namer(3)) + if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } objectMeta := t.getObjectMetaOrFail(foo) @@ -628,7 +801,7 @@ func (t *Tester) testDeleteGracefulExtend(obj runtime.Object, setFn SetFunc, get t.Errorf("unexpected error: %v", err) } if _, err := getFn(ctx, foo); err != nil { - t.Fatalf("did not gracefully delete resource", err) + t.Fatalf("did not gracefully delete resource: %v", err) } // second delete duration is ignored @@ -646,12 +819,12 @@ func (t *Tester) testDeleteGracefulExtend(obj runtime.Object, setFn SetFunc, get } } -func (t *Tester) testDeleteGracefulImmediate(obj runtime.Object, setFn SetFunc, getFn GetFunc, expectedGrace int64) { +func (t *Tester) testDeleteGracefulImmediate(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) { ctx := t.TestContext() foo := copyOrDie(obj) t.setObjectMeta(foo, "foo4") - if err := setFn(ctx, foo); err != nil { + if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } objectMeta := t.getObjectMetaOrFail(foo) @@ -660,7 +833,7 @@ func (t *Tester) testDeleteGracefulImmediate(obj runtime.Object, setFn SetFunc, t.Errorf("unexpected error: %v", err) } if _, err := getFn(ctx, foo); err != nil { - t.Fatalf("did not gracefully delete resource", err) + t.Fatalf("did not gracefully delete resource: %v", err) } // second delete is immediate, resource is deleted @@ -679,12 +852,12 @@ func (t *Tester) testDeleteGracefulImmediate(obj runtime.Object, setFn SetFunc, } } -func (t *Tester) testDeleteGracefulUsesZeroOnNil(obj runtime.Object, setFn SetFunc, expectedGrace int64) { +func (t *Tester) testDeleteGracefulUsesZeroOnNil(obj runtime.Object, createFn CreateFunc, expectedGrace int64) { ctx := t.TestContext() foo := copyOrDie(obj) - t.setObjectMeta(foo, "foo5") - if err := setFn(ctx, foo); err != nil { + t.setObjectMeta(foo, t.namer(5)) + if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } objectMeta := t.getObjectMetaOrFail(foo) @@ -707,7 +880,7 @@ func (t *Tester) testGetDifferentNamespace(obj runtime.Object) { } objMeta := t.getObjectMetaOrFail(obj) - objMeta.Name = "foo5" + objMeta.Name = t.namer(5) ctx1 := api.WithNamespace(api.NewContext(), "bar3") objMeta.Namespace = api.NamespaceValue(ctx1) @@ -750,7 +923,7 @@ func (t *Tester) testGetDifferentNamespace(obj runtime.Object) { func (t *Tester) testGetFound(obj runtime.Object) { ctx := t.TestContext() - t.setObjectMeta(obj, "foo1") + t.setObjectMeta(obj, t.namer(1)) existing, err := t.storage.(rest.Creater).Create(ctx, obj) if err != nil { @@ -758,7 +931,7 @@ func (t *Tester) testGetFound(obj runtime.Object) { } existingMeta := t.getObjectMetaOrFail(existing) - got, err := t.storage.(rest.Getter).Get(ctx, "foo1") + got, err := t.storage.(rest.Getter).Get(ctx, t.namer(1)) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -773,13 +946,13 @@ func (t *Tester) testGetMimatchedNamespace(obj runtime.Object) { ctx1 := api.WithNamespace(api.NewContext(), "bar1") ctx2 := api.WithNamespace(api.NewContext(), "bar2") objMeta := t.getObjectMetaOrFail(obj) - objMeta.Name = "foo4" + objMeta.Name = t.namer(4) objMeta.Namespace = api.NamespaceValue(ctx1) _, err := t.storage.(rest.Creater).Create(ctx1, obj) if err != nil { t.Errorf("unexpected error: %v", err) } - _, err = t.storage.(rest.Getter).Get(ctx2, "foo4") + _, err = t.storage.(rest.Getter).Get(ctx2, t.namer(4)) if t.clusterScope { if err != nil { t.Errorf("unexpected error: %v", err) @@ -793,12 +966,12 @@ func (t *Tester) testGetMimatchedNamespace(obj runtime.Object) { func (t *Tester) testGetNotFound(obj runtime.Object) { ctx := t.TestContext() - t.setObjectMeta(obj, "foo2") + t.setObjectMeta(obj, t.namer(2)) _, err := t.storage.(rest.Creater).Create(ctx, obj) if err != nil { t.Errorf("unexpected error: %v", err) } - _, err = t.storage.(rest.Getter).Get(ctx, "foo3") + _, err = t.storage.(rest.Getter).Get(ctx, t.namer(3)) if !errors.IsNotFound(err) { t.Errorf("unexpected error returned: %#v", err) } @@ -830,9 +1003,9 @@ func (t *Tester) testListFound(obj runtime.Object, assignFn AssignFunc) { ctx := t.TestContext() foo1 := copyOrDie(obj) - t.setObjectMeta(foo1, "foo1") + t.setObjectMeta(foo1, t.namer(1)) foo2 := copyOrDie(obj) - t.setObjectMeta(foo2, "foo2") + t.setObjectMeta(foo2, t.namer(2)) existing := assignFn([]runtime.Object{foo1, foo2}) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/update.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/update.go index 80ad14f866b0..bc5ed0c5f866 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/update.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/rest/update.go @@ -21,6 +21,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/validation/field" @@ -103,3 +104,72 @@ func BeforeUpdate(strategy RESTUpdateStrategy, ctx api.Context, obj, old runtime return nil } + +// TransformFunc is a function to transform and return newObj +type TransformFunc func(ctx api.Context, newObj runtime.Object, oldObj runtime.Object) (transformedNewObj runtime.Object, err error) + +// defaultUpdatedObjectInfo implements UpdatedObjectInfo +type defaultUpdatedObjectInfo struct { + // obj is the updated object + obj runtime.Object + + // copier makes a copy of the object before returning it. + // this allows repeated calls to UpdatedObject() to return + // pristine data, even if the returned value is mutated. + copier runtime.ObjectCopier + + // transformers is an optional list of transforming functions that modify or + // replace obj using information from the context, old object, or other sources. + transformers []TransformFunc +} + +// DefaultUpdatedObjectInfo returns an UpdatedObjectInfo impl based on the specified object. +func DefaultUpdatedObjectInfo(obj runtime.Object, copier runtime.ObjectCopier, transformers ...TransformFunc) UpdatedObjectInfo { + return &defaultUpdatedObjectInfo{obj, copier, transformers} +} + +// Preconditions satisfies the UpdatedObjectInfo interface. +func (i *defaultUpdatedObjectInfo) Preconditions() *api.Preconditions { + // Attempt to get the UID out of the object + accessor, err := meta.Accessor(i.obj) + if err != nil { + // If no UID can be read, no preconditions are possible + return nil + } + + // If empty, no preconditions needed + uid := accessor.GetUID() + if len(uid) == 0 { + return nil + } + + return &api.Preconditions{UID: &uid} +} + +// UpdatedObject satisfies the UpdatedObjectInfo interface. +// It returns a copy of the held obj, passed through any configured transformers. +func (i *defaultUpdatedObjectInfo) UpdatedObject(ctx api.Context, oldObj runtime.Object) (runtime.Object, error) { + var err error + // Start with the configured object + newObj := i.obj + + // If the original is non-nil (might be nil if the first transformer builds the object from the oldObj), make a copy, + // so we don't return the original. BeforeUpdate can mutate the returned object, doing things like clearing ResourceVersion. + // If we're re-called, we need to be able to return the pristine version. + if newObj != nil { + newObj, err = i.copier.Copy(newObj) + if err != nil { + return nil, err + } + } + + // Allow any configured transformers to update the new object + for _, transformer := range i.transformers { + newObj, err = transformer(ctx, newObj, oldObj) + if err != nil { + return nil, err + } + } + + return newObj, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/serialization_proto_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/serialization_proto_test.go new file mode 100644 index 000000000000..36fce27a1c05 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/serialization_proto_test.go @@ -0,0 +1,185 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api_test + +import ( + "bytes" + "encoding/hex" + "math/rand" + "testing" + + "github.com/gogo/protobuf/proto" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + _ "k8s.io/kubernetes/pkg/apis/extensions" + _ "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/protobuf" + "k8s.io/kubernetes/pkg/util/diff" +) + +func init() { + codecsToTest = append(codecsToTest, func(version unversioned.GroupVersion, item runtime.Object) (runtime.Codec, error) { + s := protobuf.NewSerializer(api.Scheme, api.Scheme, "application/arbitrary.content.type") + return api.Codecs.CodecForVersions(s, s, testapi.ExternalGroupVersions(), nil), nil + }) +} + +func TestUniversalDeserializer(t *testing.T) { + expected := &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "test"}} + d := api.Codecs.UniversalDeserializer() + for _, mediaType := range []string{"application/json", "application/yaml", "application/vnd.kubernetes.protobuf"} { + e, ok := api.Codecs.SerializerForMediaType(mediaType, nil) + if !ok { + t.Fatal(mediaType) + } + buf := &bytes.Buffer{} + if err := e.EncodeToStream(expected, buf); err != nil { + t.Fatalf("%s: %v", mediaType, err) + } + obj, _, err := d.Decode(buf.Bytes(), &unversioned.GroupVersionKind{Kind: "Pod", Version: "v1"}, nil) + if err != nil { + t.Fatalf("%s: %v", mediaType, err) + } + if !api.Semantic.DeepEqual(expected, obj) { + t.Fatalf("%s: %#v", mediaType, obj) + } + } +} + +func TestProtobufRoundTrip(t *testing.T) { + obj := &v1.Pod{} + apitesting.FuzzerFor(t, v1.SchemeGroupVersion, rand.NewSource(benchmarkSeed)).Fuzz(obj) + // InitContainers are turned into annotations by conversion. + obj.Spec.InitContainers = nil + obj.Status.InitContainerStatuses = nil + data, err := obj.Marshal() + if err != nil { + t.Fatal(err) + } + out := &v1.Pod{} + if err := out.Unmarshal(data); err != nil { + t.Fatal(err) + } + if !api.Semantic.Equalities.DeepEqual(out, obj) { + t.Logf("marshal\n%s", hex.Dump(data)) + t.Fatalf("Unmarshal is unequal\n%s", diff.ObjectGoPrintDiff(out, obj)) + } +} + +// BenchmarkEncodeCodec measures the cost of performing a codec encode, which includes +// reflection (to clear APIVersion and Kind) +func BenchmarkEncodeCodecProtobuf(b *testing.B) { + items := benchmarkItems() + width := len(items) + s := protobuf.NewSerializer(nil, nil, "application/arbitrary.content.type") + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := runtime.Encode(s, &items[i%width]); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +// BenchmarkEncodeCodecFromInternalProtobuf measures the cost of performing a codec encode, +// including conversions and any type setting. This is a "full" encode. +func BenchmarkEncodeCodecFromInternalProtobuf(b *testing.B) { + items := benchmarkItems() + width := len(items) + encodable := make([]api.Pod, width) + for i := range items { + if err := api.Scheme.Convert(&items[i], &encodable[i]); err != nil { + b.Fatal(err) + } + } + s := protobuf.NewSerializer(nil, nil, "application/arbitrary.content.type") + codec := api.Codecs.EncoderForVersion(s, v1.SchemeGroupVersion) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := runtime.Encode(codec, &encodable[i%width]); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +func BenchmarkEncodeProtobufGeneratedMarshal(b *testing.B) { + items := benchmarkItems() + width := len(items) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := items[i%width].Marshal(); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +// BenchmarkDecodeCodecToInternalProtobuf measures the cost of performing a codec decode, +// including conversions and any type setting. This is a "full" decode. +func BenchmarkDecodeCodecToInternalProtobuf(b *testing.B) { + items := benchmarkItems() + width := len(items) + s := protobuf.NewSerializer(api.Scheme, api.Scheme, "application/arbitrary.content.type") + encoder := api.Codecs.EncoderForVersion(s, v1.SchemeGroupVersion) + var encoded [][]byte + for i := range items { + data, err := runtime.Encode(encoder, &items[i]) + if err != nil { + b.Fatal(err) + } + encoded = append(encoded, data) + } + + decoder := api.Codecs.DecoderToVersion(s, api.SchemeGroupVersion) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := runtime.Decode(decoder, encoded[i%width]); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +// BenchmarkDecodeJSON provides a baseline for regular JSON decode performance +func BenchmarkDecodeIntoProtobuf(b *testing.B) { + items := benchmarkItems() + width := len(items) + encoded := make([][]byte, width) + for i := range items { + data, err := (&items[i]).Marshal() + if err != nil { + b.Fatal(err) + } + encoded[i] = data + validate := &v1.Pod{} + if err := proto.Unmarshal(data, validate); err != nil { + b.Fatalf("Failed to unmarshal %d: %v\n%#v", i, err, items[i]) + } + } + + for i := 0; i < b.N; i++ { + obj := v1.Pod{} + if err := proto.Unmarshal(encoded[i%width], &obj); err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/serialization_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/serialization_test.go new file mode 100644 index 000000000000..27a2e3526410 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/serialization_test.go @@ -0,0 +1,546 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api_test + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "math/rand" + "reflect" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + proto "github.com/golang/protobuf/proto" + flag "github.com/spf13/pflag" + "github.com/ugorji/go/codec" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/streaming" + "k8s.io/kubernetes/pkg/util/diff" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/watch" + "k8s.io/kubernetes/pkg/watch/versioned" +) + +var fuzzIters = flag.Int("fuzz-iters", 20, "How many fuzzing iterations to do.") + +var codecsToTest = []func(version unversioned.GroupVersion, item runtime.Object) (runtime.Codec, error){ + func(version unversioned.GroupVersion, item runtime.Object) (runtime.Codec, error) { + return testapi.GetCodecForObject(item) + }, +} + +func fuzzInternalObject(t *testing.T, forVersion unversioned.GroupVersion, item runtime.Object, seed int64) runtime.Object { + apitesting.FuzzerFor(t, forVersion, rand.NewSource(seed)).Fuzz(item) + + j, err := meta.TypeAccessor(item) + if err != nil { + t.Fatalf("Unexpected error %v for %#v", err, item) + } + j.SetKind("") + j.SetAPIVersion("") + + return item +} + +func dataAsString(data []byte) string { + dataString := string(data) + if !strings.HasPrefix(dataString, "{") { + dataString = "\n" + hex.Dump(data) + proto.NewBuffer(make([]byte, 0, 1024)).DebugPrint("decoded object", data) + } + return dataString +} + +func roundTrip(t *testing.T, codec runtime.Codec, item runtime.Object) { + printer := spew.ConfigState{DisableMethods: true} + + original := item + copied, err := api.Scheme.DeepCopy(item) + if err != nil { + panic(fmt.Sprintf("unable to copy: %v", err)) + } + item = copied.(runtime.Object) + + name := reflect.TypeOf(item).Elem().Name() + data, err := runtime.Encode(codec, item) + if err != nil { + t.Errorf("%v: %v (%s)", name, err, printer.Sprintf("%#v", item)) + return + } + + if !api.Semantic.DeepEqual(original, item) { + t.Errorf("0: %v: encode altered the object, diff: %v", name, diff.ObjectReflectDiff(original, item)) + return + } + + obj2, err := runtime.Decode(codec, data) + if err != nil { + t.Errorf("0: %v: %v\nCodec: %#v\nData: %s\nSource: %#v", name, err, codec, dataAsString(data), printer.Sprintf("%#v", item)) + panic("failed") + } + if !api.Semantic.DeepEqual(original, obj2) { + t.Errorf("\n1: %v: diff: %v\nCodec: %#v\nSource:\n\n%#v\n\nEncoded:\n\n%s\n\nFinal:\n\n%#v", name, diff.ObjectReflectDiff(item, obj2), codec, printer.Sprintf("%#v", item), dataAsString(data), printer.Sprintf("%#v", obj2)) + return + } + + obj3 := reflect.New(reflect.TypeOf(item).Elem()).Interface().(runtime.Object) + if err := runtime.DecodeInto(codec, data, obj3); err != nil { + t.Errorf("2: %v: %v", name, err) + return + } + if !api.Semantic.DeepEqual(item, obj3) { + t.Errorf("3: %v: diff: %v\nCodec: %#v", name, diff.ObjectReflectDiff(item, obj3), codec) + return + } +} + +// roundTripSame verifies the same source object is tested in all API versions. +func roundTripSame(t *testing.T, group testapi.TestGroup, item runtime.Object, except ...string) { + set := sets.NewString(except...) + seed := rand.Int63() + fuzzInternalObject(t, group.InternalGroupVersion(), item, seed) + + version := *group.GroupVersion() + codecs := []runtime.Codec{} + for _, fn := range codecsToTest { + codec, err := fn(version, item) + if err != nil { + t.Errorf("unable to get codec: %v", err) + return + } + codecs = append(codecs, codec) + } + + if !set.Has(version.String()) { + fuzzInternalObject(t, version, item, seed) + for _, codec := range codecs { + roundTrip(t, codec, item) + } + } +} + +// For debugging problems +func TestSpecificKind(t *testing.T) { + kind := "DaemonSet" + for i := 0; i < *fuzzIters; i++ { + doRoundTripTest(testapi.Groups["extensions"], kind, t) + if t.Failed() { + break + } + } +} + +func TestList(t *testing.T) { + kind := "List" + item, err := api.Scheme.New(api.SchemeGroupVersion.WithKind(kind)) + if err != nil { + t.Errorf("Couldn't make a %v? %v", kind, err) + return + } + roundTripSame(t, testapi.Default, item) +} + +var nonRoundTrippableTypes = sets.NewString( + "ExportOptions", + // WatchEvent does not include kind and version and can only be deserialized + // implicitly (if the caller expects the specific object). The watch call defines + // the schema by content type, rather than via kind/version included in each + // object. + "WatchEvent", +) + +var nonInternalRoundTrippableTypes = sets.NewString("List", "ListOptions", "ExportOptions") +var nonRoundTrippableTypesByVersion = map[string][]string{} + +func TestRoundTripTypes(t *testing.T) { + for groupKey, group := range testapi.Groups { + for kind := range group.InternalTypes() { + t.Logf("working on %v in %v", kind, groupKey) + if nonRoundTrippableTypes.Has(kind) { + continue + } + // Try a few times, since runTest uses random values. + for i := 0; i < *fuzzIters; i++ { + doRoundTripTest(group, kind, t) + if t.Failed() { + break + } + } + } + } +} + +func doRoundTripTest(group testapi.TestGroup, kind string, t *testing.T) { + item, err := api.Scheme.New(group.InternalGroupVersion().WithKind(kind)) + if err != nil { + t.Fatalf("Couldn't make a %v? %v", kind, err) + } + if _, err := meta.TypeAccessor(item); err != nil { + t.Fatalf("%q is not a TypeMeta and cannot be tested - add it to nonRoundTrippableTypes: %v", kind, err) + } + if api.Scheme.Recognizes(group.GroupVersion().WithKind(kind)) { + roundTripSame(t, group, item, nonRoundTrippableTypesByVersion[kind]...) + } + if !nonInternalRoundTrippableTypes.Has(kind) && api.Scheme.Recognizes(group.GroupVersion().WithKind(kind)) { + roundTrip(t, group.Codec(), fuzzInternalObject(t, group.InternalGroupVersion(), item, rand.Int63())) + } +} + +func TestEncode_Ptr(t *testing.T) { + grace := int64(30) + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"name": "foo"}, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + + TerminationGracePeriodSeconds: &grace, + + SecurityContext: &api.PodSecurityContext{}, + }, + } + obj := runtime.Object(pod) + data, err := runtime.Encode(testapi.Default.Codec(), obj) + obj2, err2 := runtime.Decode(testapi.Default.Codec(), data) + if err != nil || err2 != nil { + t.Fatalf("Failure: '%v' '%v'", err, err2) + } + if _, ok := obj2.(*api.Pod); !ok { + t.Fatalf("Got wrong type") + } + if !api.Semantic.DeepEqual(obj2, pod) { + t.Errorf("\nExpected:\n\n %#v,\n\nGot:\n\n %#vDiff: %v\n\n", pod, obj2, diff.ObjectDiff(obj2, pod)) + + } +} + +func TestBadJSONRejection(t *testing.T) { + badJSONMissingKind := []byte(`{ }`) + if _, err := runtime.Decode(testapi.Default.Codec(), badJSONMissingKind); err == nil { + t.Errorf("Did not reject despite lack of kind field: %s", badJSONMissingKind) + } + badJSONUnknownType := []byte(`{"kind": "bar"}`) + if _, err1 := runtime.Decode(testapi.Default.Codec(), badJSONUnknownType); err1 == nil { + t.Errorf("Did not reject despite use of unknown type: %s", badJSONUnknownType) + } + /*badJSONKindMismatch := []byte(`{"kind": "Pod"}`) + if err2 := DecodeInto(badJSONKindMismatch, &Minion{}); err2 == nil { + t.Errorf("Kind is set but doesn't match the object type: %s", badJSONKindMismatch) + }*/ +} + +func TestUnversionedTypes(t *testing.T) { + testcases := []runtime.Object{ + &unversioned.Status{Status: "Failure", Message: "something went wrong"}, + &unversioned.APIVersions{Versions: []string{"A", "B", "C"}}, + &unversioned.APIGroupList{Groups: []unversioned.APIGroup{{Name: "mygroup"}}}, + &unversioned.APIGroup{Name: "mygroup"}, + &unversioned.APIResourceList{GroupVersion: "mygroup/myversion"}, + } + + for _, obj := range testcases { + // Make sure the unversioned codec can encode + unversionedJSON, err := runtime.Encode(testapi.Default.Codec(), obj) + if err != nil { + t.Errorf("%v: unexpected error: %v", obj, err) + continue + } + + // Make sure the versioned codec under test can decode + versionDecodedObject, err := runtime.Decode(testapi.Default.Codec(), unversionedJSON) + if err != nil { + t.Errorf("%v: unexpected error: %v", obj, err) + continue + } + // Make sure it decodes correctly + if !reflect.DeepEqual(obj, versionDecodedObject) { + t.Errorf("%v: expected %#v, got %#v", obj, obj, versionDecodedObject) + continue + } + } +} + +func TestObjectWatchFraming(t *testing.T) { + f := apitesting.FuzzerFor(nil, api.SchemeGroupVersion, rand.NewSource(benchmarkSeed)) + secret := &api.Secret{} + f.Fuzz(secret) + secret.Data["binary"] = []byte{0x00, 0x10, 0x30, 0x55, 0xff, 0x00} + secret.Data["utf8"] = []byte("a string with \u0345 characters") + secret.Data["long"] = bytes.Repeat([]byte{0x01, 0x02, 0x03, 0x00}, 1000) + converted, _ := api.Scheme.ConvertToVersion(secret, v1.SchemeGroupVersion) + v1secret := converted.(*v1.Secret) + for _, streamingMediaType := range api.Codecs.SupportedStreamingMediaTypes() { + s, _ := api.Codecs.StreamingSerializerForMediaType(streamingMediaType, nil) + framer := s.Framer + embedded := s.Embedded.Serializer + if embedded == nil { + t.Errorf("no embedded serializer for %s", streamingMediaType) + continue + } + innerDecode := api.Codecs.DecoderToVersion(embedded, api.SchemeGroupVersion) + + // write a single object through the framer and back out + obj := &bytes.Buffer{} + if err := s.EncodeToStream(v1secret, obj); err != nil { + t.Fatal(err) + } + out := &bytes.Buffer{} + w := framer.NewFrameWriter(out) + if n, err := w.Write(obj.Bytes()); err != nil || n != len(obj.Bytes()) { + t.Fatal(err) + } + sr := streaming.NewDecoder(framer.NewFrameReader(ioutil.NopCloser(out)), s) + resultSecret := &v1.Secret{} + res, _, err := sr.Decode(nil, resultSecret) + if err != nil { + t.Fatalf("%v:\n%s", err, hex.Dump(obj.Bytes())) + } + resultSecret.Kind = "Secret" + resultSecret.APIVersion = "v1" + if !api.Semantic.DeepEqual(v1secret, res) { + t.Fatalf("objects did not match: %s", diff.ObjectGoPrintDiff(v1secret, res)) + } + + // write a watch event through and back out + obj = &bytes.Buffer{} + if err := embedded.EncodeToStream(v1secret, obj); err != nil { + t.Fatal(err) + } + event := &versioned.Event{Type: string(watch.Added)} + event.Object.Raw = obj.Bytes() + obj = &bytes.Buffer{} + if err := s.EncodeToStream(event, obj); err != nil { + t.Fatal(err) + } + out = &bytes.Buffer{} + w = framer.NewFrameWriter(out) + if n, err := w.Write(obj.Bytes()); err != nil || n != len(obj.Bytes()) { + t.Fatal(err) + } + sr = streaming.NewDecoder(framer.NewFrameReader(ioutil.NopCloser(out)), s) + outEvent := &versioned.Event{} + res, _, err = sr.Decode(nil, outEvent) + if err != nil || outEvent.Type != string(watch.Added) { + t.Fatalf("%v: %#v", err, outEvent) + } + if outEvent.Object.Object == nil && outEvent.Object.Raw != nil { + outEvent.Object.Object, err = runtime.Decode(innerDecode, outEvent.Object.Raw) + if err != nil { + t.Fatalf("%v:\n%s", err, hex.Dump(outEvent.Object.Raw)) + } + } + + if !api.Semantic.DeepEqual(secret, outEvent.Object.Object) { + t.Fatalf("%s: did not match after frame decoding: %s", streamingMediaType, diff.ObjectGoPrintDiff(secret, outEvent.Object.Object)) + } + } +} + +const benchmarkSeed = 100 + +func benchmarkItems() []v1.Pod { + apiObjectFuzzer := apitesting.FuzzerFor(nil, api.SchemeGroupVersion, rand.NewSource(benchmarkSeed)) + items := make([]v1.Pod, 2) + for i := range items { + var pod api.Pod + apiObjectFuzzer.Fuzz(&pod) + pod.Spec.InitContainers, pod.Status.InitContainerStatuses = nil, nil + out, err := api.Scheme.ConvertToVersion(&pod, v1.SchemeGroupVersion) + if err != nil { + panic(err) + } + items[i] = *out.(*v1.Pod) + } + return items +} + +// BenchmarkEncodeCodec measures the cost of performing a codec encode, which includes +// reflection (to clear APIVersion and Kind) +func BenchmarkEncodeCodec(b *testing.B) { + items := benchmarkItems() + width := len(items) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := runtime.Encode(testapi.Default.Codec(), &items[i%width]); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +// BenchmarkEncodeCodecFromInternal measures the cost of performing a codec encode, +// including conversions. +func BenchmarkEncodeCodecFromInternal(b *testing.B) { + items := benchmarkItems() + width := len(items) + encodable := make([]api.Pod, width) + for i := range items { + if err := api.Scheme.Convert(&items[i], &encodable[i]); err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := runtime.Encode(testapi.Default.Codec(), &encodable[i%width]); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +// BenchmarkEncodeJSONMarshal provides a baseline for regular JSON encode performance +func BenchmarkEncodeJSONMarshal(b *testing.B) { + items := benchmarkItems() + width := len(items) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := json.Marshal(&items[i%width]); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +func BenchmarkDecodeCodec(b *testing.B) { + codec := testapi.Default.Codec() + items := benchmarkItems() + width := len(items) + encoded := make([][]byte, width) + for i := range items { + data, err := runtime.Encode(codec, &items[i]) + if err != nil { + b.Fatal(err) + } + encoded[i] = data + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := runtime.Decode(codec, encoded[i%width]); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +func BenchmarkDecodeIntoExternalCodec(b *testing.B) { + codec := testapi.Default.Codec() + items := benchmarkItems() + width := len(items) + encoded := make([][]byte, width) + for i := range items { + data, err := runtime.Encode(codec, &items[i]) + if err != nil { + b.Fatal(err) + } + encoded[i] = data + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + obj := v1.Pod{} + if err := runtime.DecodeInto(codec, encoded[i%width], &obj); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +func BenchmarkDecodeIntoInternalCodec(b *testing.B) { + codec := testapi.Default.Codec() + items := benchmarkItems() + width := len(items) + encoded := make([][]byte, width) + for i := range items { + data, err := runtime.Encode(codec, &items[i]) + if err != nil { + b.Fatal(err) + } + encoded[i] = data + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + obj := api.Pod{} + if err := runtime.DecodeInto(codec, encoded[i%width], &obj); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +// BenchmarkDecodeJSON provides a baseline for regular JSON decode performance +func BenchmarkDecodeIntoJSON(b *testing.B) { + codec := testapi.Default.Codec() + items := benchmarkItems() + width := len(items) + encoded := make([][]byte, width) + for i := range items { + data, err := runtime.Encode(codec, &items[i]) + if err != nil { + b.Fatal(err) + } + encoded[i] = data + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + obj := v1.Pod{} + if err := json.Unmarshal(encoded[i%width], &obj); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +// BenchmarkDecodeJSON provides a baseline for codecgen JSON decode performance +func BenchmarkDecodeIntoJSONCodecGen(b *testing.B) { + kcodec := testapi.Default.Codec() + items := benchmarkItems() + width := len(items) + encoded := make([][]byte, width) + for i := range items { + data, err := runtime.Encode(kcodec, &items[i]) + if err != nil { + b.Fatal(err) + } + encoded[i] = data + } + handler := &codec.JsonHandle{} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + obj := v1.Pod{} + if err := codec.NewDecoderBytes(encoded[i%width], handler).Decode(&obj); err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/service/util.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/service/util.go index a77e5b9c70b4..b6611d2370d1 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/service/util.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/service/util.go @@ -20,6 +20,7 @@ import ( "fmt" "strings" + "k8s.io/kubernetes/pkg/api" netsets "k8s.io/kubernetes/pkg/util/net/sets" ) @@ -37,18 +38,31 @@ func IsAllowAll(ipnets netsets.IPNet) bool { return false } -// GetLoadBalancerSourceRanges verifies and parses the AnnotationLoadBalancerSourceRangesKey annotation from a service, +// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service. +// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service, // extracting the source ranges to allow, and if not present returns a default (allow-all) value. -func GetLoadBalancerSourceRanges(annotations map[string]string) (netsets.IPNet, error) { - val := annotations[AnnotationLoadBalancerSourceRangesKey] - val = strings.TrimSpace(val) - if val == "" { - val = defaultLoadBalancerSourceRanges - } - specs := strings.Split(val, ",") - ipnets, err := netsets.ParseIPNets(specs...) - if err != nil { - return nil, fmt.Errorf("Service annotation %s:%s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", AnnotationLoadBalancerSourceRangesKey, val) +func GetLoadBalancerSourceRanges(service *api.Service) (netsets.IPNet, error) { + var ipnets netsets.IPNet + var err error + // if SourceRange field is specified, ignore sourceRange annotation + if len(service.Spec.LoadBalancerSourceRanges) > 0 { + specs := service.Spec.LoadBalancerSourceRanges + ipnets, err = netsets.ParseIPNets(specs...) + + if err != nil { + return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err) + } + } else { + val := service.Annotations[AnnotationLoadBalancerSourceRangesKey] + val = strings.TrimSpace(val) + if val == "" { + val = defaultLoadBalancerSourceRanges + } + specs := strings.Split(val, ",") + ipnets, err = netsets.ParseIPNets(specs...) + if err != nil { + return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", AnnotationLoadBalancerSourceRangesKey, val) + } } return ipnets, nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/service/util_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/service/util_test.go new file mode 100644 index 000000000000..a13f1e588d5b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/service/util_test.go @@ -0,0 +1,130 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + netsets "k8s.io/kubernetes/pkg/util/net/sets" + "strings" +) + +func TestGetLoadBalancerSourceRanges(t *testing.T) { + checkError := func(v string) { + annotations := make(map[string]string) + annotations[AnnotationLoadBalancerSourceRangesKey] = v + svc := api.Service{} + svc.Annotations = annotations + _, err := GetLoadBalancerSourceRanges(&svc) + if err == nil { + t.Errorf("Expected error parsing: %q", v) + } + svc = api.Service{} + svc.Spec.LoadBalancerSourceRanges = strings.Split(v, ",") + _, err = GetLoadBalancerSourceRanges(&svc) + if err == nil { + t.Errorf("Expected error parsing: %q", v) + } + } + checkError("10.0.0.1/33") + checkError("foo.bar") + checkError("10.0.0.1/32,*") + checkError("10.0.0.1/32,") + checkError("10.0.0.1/32, ") + checkError("10.0.0.1") + + checkOK := func(v string) netsets.IPNet { + annotations := make(map[string]string) + annotations[AnnotationLoadBalancerSourceRangesKey] = v + svc := api.Service{} + svc.Annotations = annotations + cidrs, err := GetLoadBalancerSourceRanges(&svc) + if err != nil { + t.Errorf("Unexpected error parsing: %q", v) + } + svc = api.Service{} + svc.Spec.LoadBalancerSourceRanges = strings.Split(v, ",") + cidrs, err = GetLoadBalancerSourceRanges(&svc) + if err != nil { + t.Errorf("Unexpected error parsing: %q", v) + } + return cidrs + } + cidrs := checkOK("192.168.0.1/32") + if len(cidrs) != 1 { + t.Errorf("Expected exactly one CIDR: %v", cidrs.StringSlice()) + } + cidrs = checkOK("192.168.0.1/32,192.168.0.1/32") + if len(cidrs) != 1 { + t.Errorf("Expected exactly one CIDR (after de-dup): %v", cidrs.StringSlice()) + } + cidrs = checkOK("192.168.0.1/32,192.168.0.2/32") + if len(cidrs) != 2 { + t.Errorf("Expected two CIDRs: %v", cidrs.StringSlice()) + } + cidrs = checkOK(" 192.168.0.1/32 , 192.168.0.2/32 ") + if len(cidrs) != 2 { + t.Errorf("Expected two CIDRs: %v", cidrs.StringSlice()) + } + // check LoadBalancerSourceRanges not specified + svc := api.Service{} + cidrs, err := GetLoadBalancerSourceRanges(&svc) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if len(cidrs) != 1 { + t.Errorf("Expected exactly one CIDR: %v", cidrs.StringSlice()) + } + if !IsAllowAll(cidrs) { + t.Errorf("Expected default to be allow-all: %v", cidrs.StringSlice()) + } + // check SourceRanges annotation is empty + annotations := make(map[string]string) + annotations[AnnotationLoadBalancerSourceRangesKey] = "" + svc = api.Service{} + svc.Annotations = annotations + cidrs, err = GetLoadBalancerSourceRanges(&svc) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if len(cidrs) != 1 { + t.Errorf("Expected exactly one CIDR: %v", cidrs.StringSlice()) + } + if !IsAllowAll(cidrs) { + t.Errorf("Expected default to be allow-all: %v", cidrs.StringSlice()) + } +} + +func TestAllowAll(t *testing.T) { + checkAllowAll := func(allowAll bool, cidrs ...string) { + ipnets, err := netsets.ParseIPNets(cidrs...) + if err != nil { + t.Errorf("Unexpected error parsing cidrs: %v", cidrs) + } + if allowAll != IsAllowAll(ipnets) { + t.Errorf("IsAllowAll did not return expected value for %v", cidrs) + } + } + checkAllowAll(false, "10.0.0.1/32") + checkAllowAll(false, "10.0.0.1/32", "10.0.0.2/32") + checkAllowAll(false, "10.0.0.1/32", "10.0.0.1/32") + + checkAllowAll(true, "0.0.0.0/0") + checkAllowAll(true, "192.168.0.0/0") + checkAllowAll(true, "192.168.0.1/32", "0.0.0.0/0") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/testapi/testapi.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/testapi/testapi.go index d5d5452adc66..f738acc5889b 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/testapi/testapi.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/testapi/testapi.go @@ -19,25 +19,35 @@ package testapi import ( "fmt" + "mime" "os" "reflect" "strings" + "k8s.io/kubernetes/federation/apis/federation" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/recognizer" + _ "k8s.io/kubernetes/federation/apis/federation/install" _ "k8s.io/kubernetes/pkg/api/install" + _ "k8s.io/kubernetes/pkg/apis/apps/install" _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" _ "k8s.io/kubernetes/pkg/apis/batch/install" _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" _ "k8s.io/kubernetes/pkg/apis/extensions/install" _ "k8s.io/kubernetes/pkg/apis/metrics/install" + _ "k8s.io/kubernetes/pkg/apis/policy/install" + _ "k8s.io/kubernetes/pkg/apis/rbac/install" ) var ( @@ -46,15 +56,47 @@ var ( Autoscaling TestGroup Batch TestGroup Extensions TestGroup + Apps TestGroup + Policy TestGroup + Federation TestGroup + Rbac TestGroup + + serializer runtime.SerializerInfo + storageSerializer runtime.SerializerInfo ) type TestGroup struct { - externalGroupVersion unversioned.GroupVersion - internalGroupVersion unversioned.GroupVersion - internalTypes map[string]reflect.Type + // the first element in the group is meant to be the preferred version + externalGroupVersions []unversioned.GroupVersion + internalGroupVersion unversioned.GroupVersion + internalTypes map[string]reflect.Type } func init() { + if apiMediaType := os.Getenv("KUBE_TEST_API_TYPE"); len(apiMediaType) > 0 { + var ok bool + mediaType, options, err := mime.ParseMediaType(apiMediaType) + if err != nil { + panic(err) + } + serializer, ok = api.Codecs.SerializerForMediaType(mediaType, options) + if !ok { + panic(fmt.Sprintf("no serializer for %s", apiMediaType)) + } + } + + if storageMediaType := StorageMediaType(); len(storageMediaType) > 0 { + var ok bool + mediaType, options, err := mime.ParseMediaType(storageMediaType) + if err != nil { + panic(err) + } + storageSerializer, ok = api.Codecs.SerializerForMediaType(mediaType, options) + if !ok { + panic(fmt.Sprintf("no serializer for %s", storageMediaType)) + } + } + kubeTestAPI := os.Getenv("KUBE_TEST_API") if len(kubeTestAPI) != 0 { testGroupVersions := strings.Split(kubeTestAPI, ",") @@ -65,26 +107,31 @@ func init() { } internalGroupVersion := unversioned.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal} - Groups[groupVersion.Group] = TestGroup{ - externalGroupVersion: groupVersion, - internalGroupVersion: internalGroupVersion, - internalTypes: api.Scheme.KnownTypes(internalGroupVersion), + if group, ok := Groups[groupVersion.Group]; !ok { + Groups[groupVersion.Group] = TestGroup{ + externalGroupVersions: []unversioned.GroupVersion{groupVersion}, + internalGroupVersion: internalGroupVersion, + internalTypes: api.Scheme.KnownTypes(internalGroupVersion), + } + } else { + group.externalGroupVersions = append(group.externalGroupVersions, groupVersion) + Groups[groupVersion.Group] = group } } } if _, ok := Groups[api.GroupName]; !ok { Groups[api.GroupName] = TestGroup{ - externalGroupVersion: unversioned.GroupVersion{Group: api.GroupName, Version: registered.GroupOrDie(api.GroupName).GroupVersion.Version}, - internalGroupVersion: api.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(api.SchemeGroupVersion), + externalGroupVersions: []unversioned.GroupVersion{{Group: api.GroupName, Version: registered.GroupOrDie(api.GroupName).GroupVersion.Version}}, + internalGroupVersion: api.SchemeGroupVersion, + internalTypes: api.Scheme.KnownTypes(api.SchemeGroupVersion), } } if _, ok := Groups[extensions.GroupName]; !ok { Groups[extensions.GroupName] = TestGroup{ - externalGroupVersion: unversioned.GroupVersion{Group: extensions.GroupName, Version: registered.GroupOrDie(extensions.GroupName).GroupVersion.Version}, - internalGroupVersion: extensions.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(extensions.SchemeGroupVersion), + externalGroupVersions: []unversioned.GroupVersion{{Group: extensions.GroupName, Version: registered.GroupOrDie(extensions.GroupName).GroupVersion.Version}}, + internalGroupVersion: extensions.SchemeGroupVersion, + internalTypes: api.Scheme.KnownTypes(extensions.SchemeGroupVersion), } } if _, ok := Groups[autoscaling.GroupName]; !ok { @@ -96,9 +143,9 @@ func init() { internalTypes[k] = t } Groups[autoscaling.GroupName] = TestGroup{ - externalGroupVersion: unversioned.GroupVersion{Group: autoscaling.GroupName, Version: registered.GroupOrDie(autoscaling.GroupName).GroupVersion.Version}, - internalGroupVersion: extensions.SchemeGroupVersion, - internalTypes: internalTypes, + externalGroupVersions: []unversioned.GroupVersion{{Group: autoscaling.GroupName, Version: registered.GroupOrDie(autoscaling.GroupName).GroupVersion.Version}}, + internalGroupVersion: extensions.SchemeGroupVersion, + internalTypes: internalTypes, } } if _, ok := Groups[autoscaling.GroupName+"IntraGroup"]; !ok { @@ -110,23 +157,59 @@ func init() { } } Groups[autoscaling.GroupName] = TestGroup{ - externalGroupVersion: unversioned.GroupVersion{Group: autoscaling.GroupName, Version: registered.GroupOrDie(autoscaling.GroupName).GroupVersion.Version}, - internalGroupVersion: autoscaling.SchemeGroupVersion, - internalTypes: internalTypes, + externalGroupVersions: []unversioned.GroupVersion{{Group: autoscaling.GroupName, Version: registered.GroupOrDie(autoscaling.GroupName).GroupVersion.Version}}, + internalGroupVersion: autoscaling.SchemeGroupVersion, + internalTypes: internalTypes, } } if _, ok := Groups[batch.GroupName]; !ok { + var gvs []unversioned.GroupVersion + for _, gv := range registered.GroupOrDie(batch.GroupName).GroupVersions { + gvs = append(gvs, gv) + } Groups[batch.GroupName] = TestGroup{ - externalGroupVersion: unversioned.GroupVersion{Group: batch.GroupName, Version: registered.GroupOrDie(batch.GroupName).GroupVersion.Version}, - internalGroupVersion: extensions.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(extensions.SchemeGroupVersion), + externalGroupVersions: gvs, + internalGroupVersion: batch.SchemeGroupVersion, + internalTypes: api.Scheme.KnownTypes(batch.SchemeGroupVersion), + } + } + if _, ok := Groups[apps.GroupName]; !ok { + Groups[apps.GroupName] = TestGroup{ + externalGroupVersions: []unversioned.GroupVersion{{Group: apps.GroupName, Version: registered.GroupOrDie(apps.GroupName).GroupVersion.Version}}, + internalGroupVersion: extensions.SchemeGroupVersion, + internalTypes: api.Scheme.KnownTypes(extensions.SchemeGroupVersion), + } + } + if _, ok := Groups[policy.GroupName]; !ok { + Groups[policy.GroupName] = TestGroup{ + externalGroupVersions: []unversioned.GroupVersion{{Group: policy.GroupName, Version: registered.GroupOrDie(policy.GroupName).GroupVersion.Version}}, + internalGroupVersion: policy.SchemeGroupVersion, + internalTypes: api.Scheme.KnownTypes(policy.SchemeGroupVersion), + } + } + if _, ok := Groups[federation.GroupName]; !ok { + Groups[federation.GroupName] = TestGroup{ + externalGroupVersions: []unversioned.GroupVersion{{Group: federation.GroupName, Version: registered.GroupOrDie(federation.GroupName).GroupVersion.Version}}, + internalGroupVersion: federation.SchemeGroupVersion, + internalTypes: api.Scheme.KnownTypes(federation.SchemeGroupVersion), + } + } + if _, ok := Groups[rbac.GroupName]; !ok { + Groups[rbac.GroupName] = TestGroup{ + externalGroupVersions: []unversioned.GroupVersion{{Group: rbac.GroupName, Version: registered.GroupOrDie(rbac.GroupName).GroupVersion.Version}}, + internalGroupVersion: rbac.SchemeGroupVersion, + internalTypes: api.Scheme.KnownTypes(rbac.SchemeGroupVersion), } } Default = Groups[api.GroupName] Autoscaling = Groups[autoscaling.GroupName] Batch = Groups[batch.GroupName] + Apps = Groups[apps.GroupName] + Policy = Groups[policy.GroupName] Extensions = Groups[extensions.GroupName] + Federation = Groups[federation.GroupName] + Rbac = Groups[rbac.GroupName] } func (g TestGroup) ContentConfig() (string, *unversioned.GroupVersion, runtime.Codec) { @@ -134,10 +217,14 @@ func (g TestGroup) ContentConfig() (string, *unversioned.GroupVersion, runtime.C } func (g TestGroup) GroupVersion() *unversioned.GroupVersion { - copyOfGroupVersion := g.externalGroupVersion + copyOfGroupVersion := g.externalGroupVersions[0] return ©OfGroupVersion } +func (g TestGroup) GroupVersions() []unversioned.GroupVersion { + return append([]unversioned.GroupVersion{}, g.externalGroupVersions...) +} + // InternalGroupVersion returns the group,version used to identify the internal // types for this API func (g TestGroup) InternalGroupVersion() unversioned.GroupVersion { @@ -150,15 +237,46 @@ func (g TestGroup) InternalTypes() map[string]reflect.Type { } // Codec returns the codec for the API version to test against, as set by the -// KUBE_TEST_API env var. +// KUBE_TEST_API_TYPE env var. func (g TestGroup) Codec() runtime.Codec { - return api.Codecs.LegacyCodec(g.externalGroupVersion) + if serializer.Serializer == nil { + return api.Codecs.LegacyCodec(g.externalGroupVersions[0]) + } + return api.Codecs.CodecForVersions(serializer, api.Codecs.UniversalDeserializer(), g.externalGroupVersions, nil) +} + +// NegotiatedSerializer returns the negotiated serializer for the server. +func (g TestGroup) NegotiatedSerializer() runtime.NegotiatedSerializer { + return api.Codecs +} + +func StorageMediaType() string { + return os.Getenv("KUBE_TEST_API_STORAGE_TYPE") +} + +// StorageCodec returns the codec for the API version to store in etcd, as set by the +// KUBE_TEST_API_STORAGE_TYPE env var. +func (g TestGroup) StorageCodec() runtime.Codec { + s := storageSerializer.Serializer + + if s == nil { + return api.Codecs.LegacyCodec(g.externalGroupVersions[0]) + } + + // etcd2 only supports string data - we must wrap any result before returning + // TODO: remove for etcd3 / make parameterizable + if !storageSerializer.EncodesAsText { + s = runtime.NewBase64Serializer(s) + } + ds := recognizer.NewDecoder(s, api.Codecs.UniversalDeserializer()) + + return api.Codecs.CodecForVersions(s, ds, g.externalGroupVersions, nil) } // Converter returns the api.Scheme for the API version to test against, as set by the // KUBE_TEST_API env var. func (g TestGroup) Converter() runtime.ObjectConvertor { - interfaces, err := registered.GroupOrDie(g.externalGroupVersion.Group).InterfacesFor(g.externalGroupVersion) + interfaces, err := registered.GroupOrDie(g.externalGroupVersions[0].Group).InterfacesFor(g.externalGroupVersions[0]) if err != nil { panic(err) } @@ -168,7 +286,7 @@ func (g TestGroup) Converter() runtime.ObjectConvertor { // MetadataAccessor returns the MetadataAccessor for the API version to test against, // as set by the KUBE_TEST_API env var. func (g TestGroup) MetadataAccessor() meta.MetadataAccessor { - interfaces, err := registered.GroupOrDie(g.externalGroupVersion.Group).InterfacesFor(g.externalGroupVersion) + interfaces, err := registered.GroupOrDie(g.externalGroupVersions[0].Group).InterfacesFor(g.externalGroupVersions[0]) if err != nil { panic(err) } @@ -179,18 +297,18 @@ func (g TestGroup) MetadataAccessor() meta.MetadataAccessor { // 'resource' should be the resource path, e.g. "pods" for the Pod type. 'name' should be // empty for lists. func (g TestGroup) SelfLink(resource, name string) string { - if g.externalGroupVersion.Group == api.GroupName { + if g.externalGroupVersions[0].Group == api.GroupName { if name == "" { - return fmt.Sprintf("/api/%s/%s", g.externalGroupVersion.Version, resource) + return fmt.Sprintf("/api/%s/%s", g.externalGroupVersions[0].Version, resource) } - return fmt.Sprintf("/api/%s/%s/%s", g.externalGroupVersion.Version, resource, name) + return fmt.Sprintf("/api/%s/%s/%s", g.externalGroupVersions[0].Version, resource, name) } else { // TODO: will need a /apis prefix once we have proper multi-group // support if name == "" { - return fmt.Sprintf("/apis/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource) + return fmt.Sprintf("/apis/%s/%s/%s", g.externalGroupVersions[0].Group, g.externalGroupVersions[0].Version, resource) } - return fmt.Sprintf("/apis/%s/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource, name) + return fmt.Sprintf("/apis/%s/%s/%s/%s", g.externalGroupVersions[0].Group, g.externalGroupVersions[0].Version, resource, name) } } @@ -199,12 +317,12 @@ func (g TestGroup) SelfLink(resource, name string) string { // /api/v1/watch/namespaces/foo/pods/pod0 for v1. func (g TestGroup) ResourcePathWithPrefix(prefix, resource, namespace, name string) string { var path string - if g.externalGroupVersion.Group == api.GroupName { - path = "/api/" + g.externalGroupVersion.Version + if g.externalGroupVersions[0].Group == api.GroupName { + path = "/api/" + g.externalGroupVersions[0].Version } else { // TODO: switch back once we have proper multiple group support // path = "/apis/" + g.Group + "/" + Version(group...) - path = "/apis/" + g.externalGroupVersion.Group + "/" + g.externalGroupVersion.Version + path = "/apis/" + g.externalGroupVersions[0].Group + "/" + g.externalGroupVersions[0].Version } if prefix != "" { @@ -232,15 +350,26 @@ func (g TestGroup) ResourcePath(resource, namespace, name string) string { } func (g TestGroup) RESTMapper() meta.RESTMapper { - return registered.GroupOrDie(g.externalGroupVersion.Group).RESTMapper + return registered.RESTMapper() +} + +// ExternalGroupVersions returns all external group versions allowed for the server. +func ExternalGroupVersions() []unversioned.GroupVersion { + versions := []unversioned.GroupVersion{} + for _, g := range Groups { + gv := g.GroupVersion() + versions = append(versions, *gv) + } + return versions } // Get codec based on runtime.Object func GetCodecForObject(obj runtime.Object) (runtime.Codec, error) { - kind, err := api.Scheme.ObjectKind(obj) + kinds, _, err := api.Scheme.ObjectKinds(obj) if err != nil { return nil, fmt.Errorf("unexpected encoding error: %v", err) } + kind := kinds[0] for _, group := range Groups { if group.GroupVersion().Group != kind.Group { @@ -263,5 +392,9 @@ func GetCodecForObject(obj runtime.Object) (runtime.Codec, error) { } func NewTestGroup(external, internal unversioned.GroupVersion, internalTypes map[string]reflect.Type) TestGroup { - return TestGroup{external, internal, internalTypes} + return TestGroup{ + externalGroupVersions: []unversioned.GroupVersion{external}, + internalGroupVersion: internal, + internalTypes: internalTypes, + } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/testapi/testapi_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/testapi/testapi_test.go new file mode 100644 index 000000000000..aa049f91d868 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/testapi/testapi_test.go @@ -0,0 +1,137 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testapi + +import ( + "encoding/json" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// TODO these tests don't add much value for testing things that have groups + +func TestResourcePathWithPrefix(t *testing.T) { + testCases := []struct { + prefix string + resource string + namespace string + name string + expected string + }{ + {"prefix", "resource", "mynamespace", "myresource", "/api/" + Default.GroupVersion().Version + "/prefix/namespaces/mynamespace/resource/myresource"}, + {"prefix", "resource", "", "myresource", "/api/" + Default.GroupVersion().Version + "/prefix/resource/myresource"}, + {"prefix", "resource", "mynamespace", "", "/api/" + Default.GroupVersion().Version + "/prefix/namespaces/mynamespace/resource"}, + {"prefix", "resource", "", "", "/api/" + Default.GroupVersion().Version + "/prefix/resource"}, + {"", "resource", "mynamespace", "myresource", "/api/" + Default.GroupVersion().Version + "/namespaces/mynamespace/resource/myresource"}, + } + for _, item := range testCases { + if actual := Default.ResourcePathWithPrefix(item.prefix, item.resource, item.namespace, item.name); actual != item.expected { + t.Errorf("Expected: %s, got: %s for prefix: %s, resource: %s, namespace: %s and name: %s", item.expected, actual, item.prefix, item.resource, item.namespace, item.name) + } + } +} + +func TestResourcePath(t *testing.T) { + testCases := []struct { + resource string + namespace string + name string + expected string + }{ + {"resource", "mynamespace", "myresource", "/api/" + Default.GroupVersion().Version + "/namespaces/mynamespace/resource/myresource"}, + {"resource", "", "myresource", "/api/" + Default.GroupVersion().Version + "/resource/myresource"}, + {"resource", "mynamespace", "", "/api/" + Default.GroupVersion().Version + "/namespaces/mynamespace/resource"}, + {"resource", "", "", "/api/" + Default.GroupVersion().Version + "/resource"}, + } + for _, item := range testCases { + if actual := Default.ResourcePath(item.resource, item.namespace, item.name); actual != item.expected { + t.Errorf("Expected: %s, got: %s for resource: %s, namespace: %s and name: %s", item.expected, actual, item.resource, item.namespace, item.name) + } + } +} + +var status = &unversioned.Status{ + Status: unversioned.StatusFailure, + Code: 200, + Reason: unversioned.StatusReasonUnknown, + Message: "", +} + +func TestV1EncodeDecodeStatus(t *testing.T) { + v1Codec := Default.Codec() + + encoded, err := runtime.Encode(v1Codec, status) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + typeMeta := unversioned.TypeMeta{} + if err := json.Unmarshal(encoded, &typeMeta); err != nil { + t.Errorf("unexpected error: %v", err) + } + if typeMeta.Kind != "Status" { + t.Errorf("Kind is not set to \"Status\". Got %v", string(encoded)) + } + if typeMeta.APIVersion != "v1" { + t.Errorf("APIVersion is not set to \"v1\". Got %v", string(encoded)) + } + decoded, err := runtime.Decode(v1Codec, encoded) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(status, decoded) { + t.Errorf("expected: %#v, got: %#v", status, decoded) + } +} + +func testEncodeDecodeStatus(t *testing.T, codec runtime.Codec) { + encoded, err := runtime.Encode(codec, status) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + typeMeta := unversioned.TypeMeta{} + if err := json.Unmarshal(encoded, &typeMeta); err != nil { + t.Errorf("unexpected error: %v", err) + } + if typeMeta.Kind != "Status" { + t.Errorf("Kind is not set to \"Status\". Got %s", encoded) + } + if typeMeta.APIVersion != "v1" { + t.Errorf("APIVersion is not set to \"\". Got %s", encoded) + } + decoded, err := runtime.Decode(codec, encoded) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(status, decoded) { + t.Errorf("expected: %v, got: %v", status, decoded) + } +} + +func TestAutoscalingEncodeDecodeStatus(t *testing.T) { + testEncodeDecodeStatus(t, Autoscaling.Codec()) +} + +func TestBatchEncodeDecodeStatus(t *testing.T) { + testEncodeDecodeStatus(t, Batch.Codec()) +} + +func TestExperimentalEncodeDecodeStatus(t *testing.T) { + testEncodeDecodeStatus(t, Extensions.Codec()) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/testing/fuzzer.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/testing/fuzzer.go index 4501014ac367..5c52bbb77e2c 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/testing/fuzzer.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/testing/fuzzer.go @@ -23,11 +23,12 @@ import ( "strconv" "testing" - docker "github.com/fsouza/go-dockerclient" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" @@ -156,10 +157,10 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source) j.RollingUpdate = &rollingUpdate } }, - func(j *extensions.JobSpec, c fuzz.Continue) { + func(j *batch.JobSpec, c fuzz.Continue) { c.FuzzNoCustom(j) // fuzz self without calling this function again - completions := int(c.Rand.Int31()) - parallelism := int(c.Rand.Int31()) + completions := int32(c.Rand.Int31()) + parallelism := int32(c.Rand.Int31()) j.Completions = &completions j.Parallelism = ¶llelism if c.Rand.Int31()%2 == 0 { @@ -180,7 +181,8 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source) if true { //c.RandBool() { *j = &runtime.Unknown{ // We do not set TypeMeta here because it is not carried through a round trip - RawJSON: []byte(`{"apiVersion":"unknown.group/unknown","kind":"Something","someKey":"someValue"}`), + Raw: []byte(`{"apiVersion":"unknown.group/unknown","kind":"Something","someKey":"someValue"}`), + ContentType: runtime.ContentTypeJSON, } } else { types := []runtime.Object{&api.Pod{}, &api.ReplicationController{}} @@ -189,25 +191,12 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source) *j = t } }, - func(pb map[docker.Port][]docker.PortBinding, c fuzz.Continue) { - // This is necessary because keys with nil values get omitted. - // TODO: Is this a bug? - pb[docker.Port(c.RandString())] = []docker.PortBinding{ - {c.RandString(), c.RandString()}, - {c.RandString(), c.RandString()}, - } - }, - func(pm map[string]docker.PortMapping, c fuzz.Continue) { - // This is necessary because keys with nil values get omitted. - // TODO: Is this a bug? - pm[c.RandString()] = docker.PortMapping{ - c.RandString(): c.RandString(), - } - }, func(q *api.ResourceRequirements, c fuzz.Continue) { randomQuantity := func() resource.Quantity { var q resource.Quantity c.Fuzz(&q) + // precalc the string for benchmarking purposes + _ = q.String() return q } q.Limits = make(api.ResourceList) @@ -256,6 +245,7 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source) func(m *api.DownwardAPIVolumeFile, c fuzz.Continue) { m.Path = c.RandString() versions := []string{"v1"} + m.FieldRef = &api.ObjectFieldSelector{} m.FieldRef.APIVersion = versions[c.Rand.Intn(len(versions))] m.FieldRef.FieldPath = c.RandString() }, @@ -353,7 +343,7 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source) }, func(pvc *api.PersistentVolumeClaim, c fuzz.Continue) { c.FuzzNoCustom(pvc) // fuzz self without calling this function again - types := []api.PersistentVolumeClaimPhase{api.ClaimBound, api.ClaimPending} + types := []api.PersistentVolumeClaimPhase{api.ClaimBound, api.ClaimPending, api.ClaimLost} pvc.Status.Phase = types[c.Rand.Intn(len(types))] }, func(s *api.NamespaceSpec, c fuzz.Continue) { @@ -391,20 +381,12 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source) c.FuzzNoCustom(s) s.Allocatable = s.Capacity }, - func(s *extensions.APIVersion, c fuzz.Continue) { - // We can't use c.RandString() here because it may generate empty - // string, which will cause tests failure. - s.APIGroup = "something" - }, - func(s *extensions.HorizontalPodAutoscalerSpec, c fuzz.Continue) { + func(s *autoscaling.HorizontalPodAutoscalerSpec, c fuzz.Continue) { c.FuzzNoCustom(s) // fuzz self without calling this function again - minReplicas := int(c.Rand.Int31()) + minReplicas := int32(c.Rand.Int31()) s.MinReplicas = &minReplicas - s.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: int(int32(c.RandUint64()))} - }, - func(s *extensions.SubresourceReference, c fuzz.Continue) { - c.FuzzNoCustom(s) // fuzz self without calling this function again - s.Subresource = "scale" + targetCpu := int32(c.RandUint64()) + s.TargetCPUUtilizationPercentage = &targetCpu }, func(psp *extensions.PodSecurityPolicySpec, c fuzz.Continue) { c.FuzzNoCustom(psp) // fuzz self without calling this function again @@ -432,6 +414,37 @@ func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source) } } }, + func(r *runtime.RawExtension, c fuzz.Continue) { + // Pick an arbitrary type and fuzz it + types := []runtime.Object{&api.Pod{}, &extensions.Deployment{}, &api.Service{}} + obj := types[c.Rand.Intn(len(types))] + c.Fuzz(obj) + + // Find a codec for converting the object to raw bytes. This is necessary for the + // api version and kind to be correctly set be serialization. + var codec runtime.Codec + switch obj.(type) { + case *api.Pod: + codec = testapi.Default.Codec() + case *extensions.Deployment: + codec = testapi.Extensions.Codec() + case *api.Service: + codec = testapi.Default.Codec() + default: + t.Errorf("Failed to find codec for object type: %T", obj) + return + } + + // Convert the object to raw bytes + bytes, err := runtime.Encode(codec, obj) + if err != nil { + t.Errorf("Failed to encode object: %v", err) + return + } + + // Set the bytes field on the RawExtension + r.Raw = bytes + }, ) return f } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/types.generated.go index b4ccb56ccd6d..37a3bcea03b5 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/types.generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/types.generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,14 +27,13 @@ import ( codec1978 "github.com/ugorji/go/codec" pkg3_resource "k8s.io/kubernetes/pkg/api/resource" pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg7_fields "k8s.io/kubernetes/pkg/fields" - pkg6_labels "k8s.io/kubernetes/pkg/labels" - pkg8_runtime "k8s.io/kubernetes/pkg/runtime" + pkg6_fields "k8s.io/kubernetes/pkg/fields" + pkg5_labels "k8s.io/kubernetes/pkg/labels" + pkg7_runtime "k8s.io/kubernetes/pkg/runtime" pkg1_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" + pkg4_intstr "k8s.io/kubernetes/pkg/util/intstr" "reflect" "runtime" - pkg4_inf "speter.net/go/exp/math/dec/inf" time "time" ) @@ -70,14 +69,13 @@ func init() { if false { // reference the types, but skip this branch at build/run time var v0 pkg3_resource.Quantity var v1 pkg2_unversioned.Time - var v2 pkg7_fields.Selector - var v3 pkg6_labels.Selector - var v4 pkg8_runtime.Object + var v2 pkg6_fields.Selector + var v3 pkg5_labels.Selector + var v4 pkg7_runtime.Object var v5 pkg1_types.UID - var v6 pkg5_intstr.IntOrString - var v7 pkg4_inf.Dec - var v8 time.Time - _, _, _, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6, v7, v8 + var v6 pkg4_intstr.IntOrString + var v7 time.Time + _, _, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6, v7 } } @@ -95,7 +93,7 @@ func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [12]bool + var yyq2 [14]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.Name != "" @@ -110,9 +108,11 @@ func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[9] = x.DeletionGracePeriodSeconds != nil yyq2[10] = len(x.Labels) != 0 yyq2[11] = len(x.Annotations) != 0 + yyq2[12] = len(x.OwnerReferences) != 0 + yyq2[13] = len(x.Finalizers) != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(12) + r.EncodeArrayStart(14) } else { yynn2 = 0 for _, b := range yyq2 { @@ -481,6 +481,72 @@ func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.OwnerReferences == nil { + r.EncodeNil() + } else { + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.OwnerReferences == nil { + r.EncodeNil() + } else { + yym45 := z.EncBinary() + _ = yym45 + if false { + } else { + h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym47 := z.EncBinary() + _ = yym47 + if false { + } else { + z.F.EncSliceStringV(x.Finalizers, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("finalizers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym48 := z.EncBinary() + _ = yym48 + if false { + } else { + z.F.EncSliceStringV(x.Finalizers, false, e) + } + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -662,6 +728,30 @@ func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.F.DecMapStringStringX(yyv19, false, d) } } + case "ownerReferences": + if r.TryDecodeAsNil() { + x.OwnerReferences = nil + } else { + yyv21 := &x.OwnerReferences + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + h.decSliceOwnerReference((*[]OwnerReference)(yyv21), d) + } + } + case "finalizers": + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv23 := &x.Finalizers + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + z.F.DecSliceStringX(yyv23, false, d) + } + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -673,16 +763,16 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj21 int - var yyb21 bool - var yyhl21 bool = l >= 0 - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + var yyj25 int + var yyb25 bool + var yyhl25 bool = l >= 0 + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -692,13 +782,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Name = string(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -708,13 +798,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.GenerateName = string(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -724,13 +814,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Namespace = string(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -740,13 +830,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.SelfLink = string(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -756,13 +846,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.UID = pkg1_types.UID(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -772,13 +862,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.ResourceVersion = string(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -788,13 +878,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Generation = int64(r.DecodeInt(64)) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -802,26 +892,26 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.CreationTimestamp = pkg2_unversioned.Time{} } else { - yyv29 := &x.CreationTimestamp - yym30 := z.DecBinary() - _ = yym30 + yyv33 := &x.CreationTimestamp + yym34 := z.DecBinary() + _ = yym34 if false { - } else if z.HasExtensions() && z.DecExt(yyv29) { - } else if yym30 { - z.DecBinaryUnmarshal(yyv29) - } else if !yym30 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv29) + } else if z.HasExtensions() && z.DecExt(yyv33) { + } else if yym34 { + z.DecBinaryUnmarshal(yyv33) + } else if !yym34 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv33) } else { - z.DecFallback(yyv29, false) + z.DecFallback(yyv33, false) } } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -834,25 +924,25 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.DeletionTimestamp == nil { x.DeletionTimestamp = new(pkg2_unversioned.Time) } - yym32 := z.DecBinary() - _ = yym32 + yym36 := z.DecBinary() + _ = yym36 if false { } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym32 { + } else if yym36 { z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym32 && z.IsJSONHandle() { + } else if !yym36 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.DeletionTimestamp) } else { z.DecFallback(x.DeletionTimestamp, false) } } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -865,20 +955,20 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.DeletionGracePeriodSeconds == nil { x.DeletionGracePeriodSeconds = new(int64) } - yym34 := z.DecBinary() - _ = yym34 + yym38 := z.DecBinary() + _ = yym38 if false { } else { *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) } } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -886,21 +976,21 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Labels = nil } else { - yyv35 := &x.Labels - yym36 := z.DecBinary() - _ = yym36 + yyv39 := &x.Labels + yym40 := z.DecBinary() + _ = yym40 if false { } else { - z.F.DecMapStringStringX(yyv35, false, d) + z.F.DecMapStringStringX(yyv39, false, d) } } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -908,26 +998,70 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Annotations = nil } else { - yyv37 := &x.Annotations - yym38 := z.DecBinary() - _ = yym38 + yyv41 := &x.Annotations + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + z.F.DecMapStringStringX(yyv41, false, d) + } + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.OwnerReferences = nil + } else { + yyv43 := &x.OwnerReferences + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + h.decSliceOwnerReference((*[]OwnerReference)(yyv43), d) + } + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv45 := &x.Finalizers + yym46 := z.DecBinary() + _ = yym46 if false { } else { - z.F.DecMapStringStringX(yyv37, false, d) + z.F.DecSliceStringX(yyv45, false, d) } } for { - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj21-1, "") + z.DecStructFieldNotFound(yyj25-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -946,7 +1080,7 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [20]bool + var yyq2 [21]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[1] = x.VolumeSource.HostPath != nil && x.HostPath != nil @@ -968,9 +1102,10 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[17] = x.VolumeSource.FC != nil && x.FC != nil yyq2[18] = x.VolumeSource.AzureFile != nil && x.AzureFile != nil yyq2[19] = x.VolumeSource.ConfigMap != nil && x.ConfigMap != nil + yyq2[20] = x.VolumeSource.VsphereVolume != nil && x.VsphereVolume != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(20) + r.EncodeArrayStart(21) } else { yynn2 = 1 for _, b := range yyq2 { @@ -1703,6 +1838,43 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } + var yyn63 bool + if x.VolumeSource.VsphereVolume == nil { + yyn63 = true + goto LABEL63 + } + LABEL63: + if yyr2 || yy2arr2 { + if yyn63 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn63 { + r.EncodeNil() + } else { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -2036,6 +2208,20 @@ func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } x.ConfigMap.CodecDecodeSelf(d) } + case "vsphereVolume": + if x.VolumeSource.VsphereVolume == nil { + x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -2047,16 +2233,16 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj24 int - var yyb24 bool - var yyhl24 bool = l >= 0 - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + var yyj25 int + var yyb25 bool + var yyhl25 bool = l >= 0 + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2069,13 +2255,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.HostPath == nil { x.VolumeSource.HostPath = new(HostPathVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2093,13 +2279,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.EmptyDir == nil { x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2117,13 +2303,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.GCEPersistentDisk == nil { x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2141,13 +2327,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.AWSElasticBlockStore == nil { x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2165,13 +2351,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.GitRepo == nil { x.VolumeSource.GitRepo = new(GitRepoVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2189,13 +2375,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.Secret == nil { x.VolumeSource.Secret = new(SecretVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2213,13 +2399,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.NFS == nil { x.VolumeSource.NFS = new(NFSVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2237,13 +2423,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.ISCSI == nil { x.VolumeSource.ISCSI = new(ISCSIVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2261,13 +2447,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.Glusterfs == nil { x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2285,13 +2471,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.PersistentVolumeClaim == nil { x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2309,13 +2495,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.RBD == nil { x.VolumeSource.RBD = new(RBDVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2333,13 +2519,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.FlexVolume == nil { x.VolumeSource.FlexVolume = new(FlexVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2357,13 +2543,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.Cinder == nil { x.VolumeSource.Cinder = new(CinderVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2381,13 +2567,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.CephFS == nil { x.VolumeSource.CephFS = new(CephFSVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2405,13 +2591,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.Flocker == nil { x.VolumeSource.Flocker = new(FlockerVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2429,13 +2615,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.DownwardAPI == nil { x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2453,13 +2639,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.FC == nil { x.VolumeSource.FC = new(FCVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2477,13 +2663,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.AzureFile == nil { x.VolumeSource.AzureFile = new(AzureFileVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2501,13 +2687,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.ConfigMap == nil { x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2522,18 +2708,42 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.ConfigMap.CodecDecodeSelf(d) } + if x.VolumeSource.VsphereVolume == nil { + x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } for { - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj24-1, "") + z.DecStructFieldNotFound(yyj25-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2552,7 +2762,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [19]bool + var yyq2 [20]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.HostPath != nil @@ -2574,9 +2784,10 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[16] = x.FC != nil yyq2[17] = x.AzureFile != nil yyq2[18] = x.ConfigMap != nil + yyq2[19] = x.VsphereVolume != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(19) + r.EncodeArrayStart(20) } else { yynn2 = 0 for _, b := range yyq2 { @@ -3024,6 +3235,29 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -3294,6 +3528,17 @@ func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } x.ConfigMap.CodecDecodeSelf(d) } + case "vsphereVolume": + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -3305,16 +3550,16 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj23 int - var yyb23 bool - var yyhl23 bool = l >= 0 - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3329,13 +3574,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.HostPath.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3350,13 +3595,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.EmptyDir.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3371,13 +3616,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.GCEPersistentDisk.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3392,13 +3637,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.AWSElasticBlockStore.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3413,13 +3658,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.GitRepo.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3434,13 +3679,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Secret.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3455,13 +3700,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.NFS.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3476,13 +3721,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.ISCSI.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3497,13 +3742,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Glusterfs.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3518,13 +3763,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.PersistentVolumeClaim.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3539,13 +3784,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.RBD.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3560,13 +3805,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.FlexVolume.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3581,13 +3826,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Cinder.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3602,13 +3847,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.CephFS.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3623,13 +3868,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Flocker.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3644,13 +3889,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.DownwardAPI.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3665,13 +3910,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.FC.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3686,13 +3931,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.AzureFile.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3707,18 +3952,39 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.ConfigMap.CodecDecodeSelf(d) } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } for { - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj23-1, "") + z.DecStructFieldNotFound(yyj24-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -3737,7 +4003,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [13]bool + var yyq2 [14]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.GCEPersistentDisk != nil @@ -3753,9 +4019,10 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[10] = x.FC != nil yyq2[11] = x.Flocker != nil yyq2[12] = x.AzureFile != nil + yyq2[13] = x.VsphereVolume != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(13) + r.EncodeArrayStart(14) } else { yynn2 = 0 for _, b := range yyq2 { @@ -4065,6 +4332,29 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -4269,6 +4559,17 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Deco } x.AzureFile.CodecDecodeSelf(d) } + case "vsphereVolume": + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -4280,16 +4581,16 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj17 int - var yyb17 bool - var yyhl17 bool = l >= 0 - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4304,13 +4605,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.GCEPersistentDisk.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4325,13 +4626,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.AWSElasticBlockStore.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4346,13 +4647,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.HostPath.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4367,13 +4668,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.Glusterfs.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4388,13 +4689,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.NFS.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4409,13 +4710,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.RBD.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4430,13 +4731,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.ISCSI.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4451,13 +4752,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.FlexVolume.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4472,13 +4773,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.Cinder.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4493,13 +4794,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.CephFS.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4514,13 +4815,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.FC.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4535,13 +4836,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.Flocker.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4556,18 +4857,39 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.AzureFile.CodecDecodeSelf(d) } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } for { - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj17-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -5139,7 +5461,7 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [17]bool + var yyq2 [18]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[1] = len(x.AccessModes) != 0 @@ -5158,9 +5480,10 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[14] = x.PersistentVolumeSource.FC != nil && x.FC != nil yyq2[15] = x.PersistentVolumeSource.Flocker != nil && x.Flocker != nil yyq2[16] = x.PersistentVolumeSource.AzureFile != nil && x.AzureFile != nil + yyq2[17] = x.PersistentVolumeSource.VsphereVolume != nil && x.VsphereVolume != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(17) + r.EncodeArrayStart(18) } else { yynn2 = 1 for _, b := range yyq2 { @@ -5740,6 +6063,43 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } + var yyn54 bool + if x.PersistentVolumeSource.VsphereVolume == nil { + yyn54 = true + goto LABEL54 + } + LABEL54: + if yyr2 || yy2arr2 { + if yyn54 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn54 { + r.EncodeNil() + } else { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -6019,6 +6379,20 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decode } x.AzureFile.CodecDecodeSelf(d) } + case "vsphereVolume": + if x.PersistentVolumeSource.VsphereVolume == nil { + x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -6030,16 +6404,16 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj22 int - var yyb22 bool - var yyhl22 bool = l >= 0 - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6047,16 +6421,16 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Capacity = nil } else { - yyv23 := &x.Capacity - yyv23.CodecDecodeSelf(d) + yyv24 := &x.Capacity + yyv24.CodecDecodeSelf(d) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6064,21 +6438,21 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.AccessModes = nil } else { - yyv24 := &x.AccessModes - yym25 := z.DecBinary() - _ = yym25 + yyv25 := &x.AccessModes + yym26 := z.DecBinary() + _ = yym26 if false { } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv24), d) + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv25), d) } } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6093,13 +6467,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco } x.ClaimRef.CodecDecodeSelf(d) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6112,13 +6486,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.GCEPersistentDisk == nil { x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6136,13 +6510,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.AWSElasticBlockStore == nil { x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6160,13 +6534,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.HostPath == nil { x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6184,13 +6558,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.Glusterfs == nil { x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6208,13 +6582,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.NFS == nil { x.PersistentVolumeSource.NFS = new(NFSVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6232,13 +6606,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.RBD == nil { x.PersistentVolumeSource.RBD = new(RBDVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6256,13 +6630,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.ISCSI == nil { x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6280,13 +6654,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.FlexVolume == nil { x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6304,13 +6678,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.Cinder == nil { x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6328,13 +6702,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.CephFS == nil { x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6352,13 +6726,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.FC == nil { x.PersistentVolumeSource.FC = new(FCVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6376,13 +6750,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.Flocker == nil { x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6400,13 +6774,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.AzureFile == nil { x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6421,18 +6795,42 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco } x.AzureFile.CodecDecodeSelf(d) } + if x.PersistentVolumeSource.VsphereVolume == nil { + x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } for { - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj22-1, "") + z.DecStructFieldNotFound(yyj23-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -8958,7 +9356,7 @@ func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec19 if r.TryDecodeAsNil() { x.Partition = 0 } else { - x.Partition = int(r.DecodeInt(codecSelferBitsize1234)) + x.Partition = int32(r.DecodeInt(32)) } case "readOnly": if r.TryDecodeAsNil() { @@ -9026,7 +9424,7 @@ func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec if r.TryDecodeAsNil() { x.Partition = 0 } else { - x.Partition = int(r.DecodeInt(codecSelferBitsize1234)) + x.Partition = int32(r.DecodeInt(32)) } yyj8++ if yyhl8 { @@ -9323,7 +9721,7 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.Lun = 0 } else { - x.Lun = int(r.DecodeInt(codecSelferBitsize1234)) + x.Lun = int32(r.DecodeInt(32)) } case "iscsiInterface": if r.TryDecodeAsNil() { @@ -9403,7 +9801,7 @@ func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.Lun = 0 } else { - x.Lun = int(r.DecodeInt(codecSelferBitsize1234)) + x.Lun = int32(r.DecodeInt(32)) } yyj10++ if yyhl10 { @@ -9687,13 +10085,13 @@ func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } else { if x.Lun == nil { - x.Lun = new(int) + x.Lun = new(int32) } yym7 := z.DecBinary() _ = yym7 if false { } else { - *((*int)(x.Lun)) = int(r.DecodeInt(codecSelferBitsize1234)) + *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) } } case "fsType": @@ -9761,13 +10159,13 @@ func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } } else { if x.Lun == nil { - x.Lun = new(int) + x.Lun = new(int32) } yym14 := z.DecBinary() _ = yym14 if false { } else { - *((*int)(x.Lun)) = int(r.DecodeInt(codecSelferBitsize1234)) + *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) } } yyj10++ @@ -10404,7 +10802,7 @@ func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromMap(l int, d *code if r.TryDecodeAsNil() { x.Partition = 0 } else { - x.Partition = int(r.DecodeInt(codecSelferBitsize1234)) + x.Partition = int32(r.DecodeInt(32)) } case "readOnly": if r.TryDecodeAsNil() { @@ -10472,7 +10870,7 @@ func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *co if r.TryDecodeAsNil() { x.Partition = 0 } else { - x.Partition = int(r.DecodeInt(codecSelferBitsize1234)) + x.Partition = int32(r.DecodeInt(32)) } yyj8++ if yyhl8 { @@ -10778,13 +11176,14 @@ func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.SecretName != "" + yyq2[1] = len(x.Items) != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) + r.EncodeArrayStart(2) } else { yynn2 = 0 for _, b := range yyq2 { @@ -10820,6 +11219,39 @@ func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -10887,6 +11319,18 @@ func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } else { x.SecretName = string(r.DecodeString()) } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv5 := &x.Items + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv5), d) + } + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -10898,16 +11342,16 @@ func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb5 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb5 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10917,18 +11361,40 @@ func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode } else { x.SecretName = string(r.DecodeString()) } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv9 := &x.Items + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv9), d) + } + } for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb5 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb5 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -12979,14 +13445,16 @@ func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[1] = x.FieldRef != nil + yyq2[2] = x.ResourceFieldRef != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(3) } else { - yynn2 = 2 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -13016,14 +13484,49 @@ func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.FieldRef - yy7.CodecEncodeSelf(e) + if yyq2[1] { + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.FieldRef - yy9.CodecEncodeSelf(e) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) @@ -13094,10 +13597,25 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decod } case "fieldRef": if r.TryDecodeAsNil() { - x.FieldRef = ObjectFieldSelector{} + if x.FieldRef != nil { + x.FieldRef = nil + } } else { - yyv5 := &x.FieldRef - yyv5.CodecDecodeSelf(d) + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + case "resourceFieldRef": + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) } default: z.DecStructFieldNotFound(-1, yys3) @@ -13110,16 +13628,16 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13129,35 +13647,60 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Dec } else { x.Path = string(r.DecodeString()) } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.FieldRef = ObjectFieldSelector{} + if x.FieldRef != nil { + x.FieldRef = nil + } } else { - yyv8 := &x.FieldRef - yyv8.CodecDecodeSelf(d) + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -13413,7 +13956,7 @@ func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -13430,7 +13973,7 @@ func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = len(x.Items) != 0 + yyq2[1] = x.FSType != "" var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(2) @@ -13446,54 +13989,46 @@ func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) } } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumePath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } } } if yyr2 || yy2arr2 { @@ -13505,7 +14040,225 @@ func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *VsphereVirtualDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumePath": + if r.TryDecodeAsNil() { + x.VolumePath = "" + } else { + x.VolumePath = string(r.DecodeString()) + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + x.FSType = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumePath = "" + } else { + x.VolumePath = string(r.DecodeString()) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + x.FSType = string(r.DecodeString()) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Items) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Items == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -14060,13 +14813,13 @@ func (x *ContainerPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.HostPort = 0 } else { - x.HostPort = int(r.DecodeInt(codecSelferBitsize1234)) + x.HostPort = int32(r.DecodeInt(32)) } case "containerPort": if r.TryDecodeAsNil() { x.ContainerPort = 0 } else { - x.ContainerPort = int(r.DecodeInt(codecSelferBitsize1234)) + x.ContainerPort = int32(r.DecodeInt(32)) } case "protocol": if r.TryDecodeAsNil() { @@ -14124,7 +14877,7 @@ func (x *ContainerPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.HostPort = 0 } else { - x.HostPort = int(r.DecodeInt(codecSelferBitsize1234)) + x.HostPort = int32(r.DecodeInt(32)) } yyj9++ if yyhl9 { @@ -14140,7 +14893,7 @@ func (x *ContainerPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ContainerPort = 0 } else { - x.ContainerPort = int(r.DecodeInt(codecSelferBitsize1234)) + x.ContainerPort = int32(r.DecodeInt(32)) } yyj9++ if yyhl9 { @@ -14204,13 +14957,14 @@ func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[1] = x.ReadOnly != false + yyq2[3] = x.SubPath != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) + r.EncodeArrayStart(4) } else { yynn2 = 2 for _, b := range yyq2 { @@ -14284,6 +15038,31 @@ func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -14363,6 +15142,12 @@ func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } else { x.MountPath = string(r.DecodeString()) } + case "subPath": + if r.TryDecodeAsNil() { + x.SubPath = "" + } else { + x.SubPath = string(r.DecodeString()) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -14374,16 +15159,16 @@ func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14393,13 +15178,13 @@ func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Name = string(r.DecodeString()) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14409,13 +15194,13 @@ func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.ReadOnly = bool(r.DecodeBool()) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14425,18 +15210,34 @@ func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.MountPath = string(r.DecodeString()) } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SubPath = "" + } else { + x.SubPath = string(r.DecodeString()) + } for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -14721,15 +15522,16 @@ func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.FieldRef != nil - yyq2[1] = x.ConfigMapKeyRef != nil - yyq2[2] = x.SecretKeyRef != nil + yyq2[1] = x.ResourceFieldRef != nil + yyq2[2] = x.ConfigMapKeyRef != nil + yyq2[3] = x.SecretKeyRef != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) + r.EncodeArrayStart(4) } else { yynn2 = 0 for _, b := range yyq2 { @@ -14766,6 +15568,29 @@ func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { if x.ConfigMapKeyRef == nil { r.EncodeNil() } else { @@ -14775,7 +15600,7 @@ func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2[1] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("configMapKeyRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -14788,7 +15613,7 @@ func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { + if yyq2[3] { if x.SecretKeyRef == nil { r.EncodeNil() } else { @@ -14798,7 +15623,7 @@ func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2[2] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("secretKeyRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -14881,6 +15706,17 @@ func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } x.FieldRef.CodecDecodeSelf(d) } + case "resourceFieldRef": + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } case "configMapKeyRef": if r.TryDecodeAsNil() { if x.ConfigMapKeyRef != nil { @@ -14914,16 +15750,16 @@ func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14938,13 +15774,34 @@ func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.FieldRef.CodecDecodeSelf(d) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14959,13 +15816,13 @@ func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.ConfigMapKeyRef.CodecDecodeSelf(d) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14981,17 +15838,17 @@ func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.SecretKeyRef.CodecDecodeSelf(d) } for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -15199,7 +16056,7 @@ func (x *ObjectFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decod z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -15213,14 +16070,16 @@ func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[0] = x.ContainerName != "" + yyq2[2] = true var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(3) } else { - yynn2 = 2 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -15231,21 +16090,27 @@ func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) + } } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) + } } } if yyr2 || yy2arr2 { @@ -15254,17 +16119,50 @@ func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) + r.EncodeString(codecSelferC_UTF81234, string("resource")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym8 := z.EncBinary() _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.Divisor + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("divisor")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.Divisor + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } } } if yyr2 || yy2arr2 { @@ -15276,7 +16174,7 @@ func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15306,7 +16204,7 @@ func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15328,17 +16226,32 @@ func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decode yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "key": + case "containerName": if r.TryDecodeAsNil() { - x.Key = "" + x.ContainerName = "" } else { - x.Key = string(r.DecodeString()) + x.ContainerName = string(r.DecodeString()) } - case "Name": + case "resource": if r.TryDecodeAsNil() { - x.Name = "" + x.Resource = "" } else { - x.Name = string(r.DecodeString()) + x.Resource = string(r.DecodeString()) + } + case "divisor": + if r.TryDecodeAsNil() { + x.Divisor = pkg3_resource.Quantity{} + } else { + yyv6 := &x.Divisor + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } } default: z.DecStructFieldNotFound(-1, yys3) @@ -15347,62 +16260,87 @@ func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decode z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ResourceFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Key = "" + x.ContainerName = "" } else { - x.Key = string(r.DecodeString()) + x.ContainerName = string(r.DecodeString()) } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Name = "" + x.Resource = "" } else { - x.Name = string(r.DecodeString()) + x.Resource = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Divisor = pkg3_resource.Quantity{} + } else { + yyv11 := &x.Divisor + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -15479,7 +16417,7 @@ func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15509,7 +16447,7 @@ func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15550,7 +16488,210 @@ func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + x.Key = string(r.DecodeString()) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + x.Name = string(r.DecodeString()) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + x.Key = string(r.DecodeString()) + } + case "Name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + x.Name = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -16043,7 +17184,7 @@ func (x *HTTPGetAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } case "port": if r.TryDecodeAsNil() { - x.Port = pkg5_intstr.IntOrString{} + x.Port = pkg4_intstr.IntOrString{} } else { yyv5 := &x.Port yym6 := z.DecBinary() @@ -16122,7 +17263,7 @@ func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Port = pkg5_intstr.IntOrString{} + x.Port = pkg4_intstr.IntOrString{} } else { yyv13 := &x.Port yym14 := z.DecBinary() @@ -16358,7 +17499,7 @@ func (x *TCPSocketAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { switch yys3 { case "port": if r.TryDecodeAsNil() { - x.Port = pkg5_intstr.IntOrString{} + x.Port = pkg4_intstr.IntOrString{} } else { yyv4 := &x.Port yym5 := z.DecBinary() @@ -16397,7 +17538,7 @@ func (x *TCPSocketAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Port = pkg5_intstr.IntOrString{} + x.Port = pkg4_intstr.IntOrString{} } else { yyv7 := &x.Port yym8 := z.DecBinary() @@ -16954,31 +18095,31 @@ func (x *Probe) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.InitialDelaySeconds = 0 } else { - x.InitialDelaySeconds = int(r.DecodeInt(codecSelferBitsize1234)) + x.InitialDelaySeconds = int32(r.DecodeInt(32)) } case "timeoutSeconds": if r.TryDecodeAsNil() { x.TimeoutSeconds = 0 } else { - x.TimeoutSeconds = int(r.DecodeInt(codecSelferBitsize1234)) + x.TimeoutSeconds = int32(r.DecodeInt(32)) } case "periodSeconds": if r.TryDecodeAsNil() { x.PeriodSeconds = 0 } else { - x.PeriodSeconds = int(r.DecodeInt(codecSelferBitsize1234)) + x.PeriodSeconds = int32(r.DecodeInt(32)) } case "successThreshold": if r.TryDecodeAsNil() { x.SuccessThreshold = 0 } else { - x.SuccessThreshold = int(r.DecodeInt(codecSelferBitsize1234)) + x.SuccessThreshold = int32(r.DecodeInt(32)) } case "failureThreshold": if r.TryDecodeAsNil() { x.FailureThreshold = 0 } else { - x.FailureThreshold = int(r.DecodeInt(codecSelferBitsize1234)) + x.FailureThreshold = int32(r.DecodeInt(32)) } case "exec": if x.Handler.Exec == nil { @@ -17050,7 +18191,7 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.InitialDelaySeconds = 0 } else { - x.InitialDelaySeconds = int(r.DecodeInt(codecSelferBitsize1234)) + x.InitialDelaySeconds = int32(r.DecodeInt(32)) } yyj12++ if yyhl12 { @@ -17066,7 +18207,7 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.TimeoutSeconds = 0 } else { - x.TimeoutSeconds = int(r.DecodeInt(codecSelferBitsize1234)) + x.TimeoutSeconds = int32(r.DecodeInt(32)) } yyj12++ if yyhl12 { @@ -17082,7 +18223,7 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PeriodSeconds = 0 } else { - x.PeriodSeconds = int(r.DecodeInt(codecSelferBitsize1234)) + x.PeriodSeconds = int32(r.DecodeInt(32)) } yyj12++ if yyhl12 { @@ -17098,7 +18239,7 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.SuccessThreshold = 0 } else { - x.SuccessThreshold = int(r.DecodeInt(codecSelferBitsize1234)) + x.SuccessThreshold = int32(r.DecodeInt(32)) } yyj12++ if yyhl12 { @@ -17114,7 +18255,7 @@ func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.FailureThreshold = 0 } else { - x.FailureThreshold = int(r.DecodeInt(codecSelferBitsize1234)) + x.FailureThreshold = int32(r.DecodeInt(32)) } if x.Handler.Exec == nil { x.Handler.Exec = new(ExecAction) @@ -20072,13 +21213,13 @@ func (x *ContainerStateTerminated) codecDecodeSelfFromMap(l int, d *codec1978.De if r.TryDecodeAsNil() { x.ExitCode = 0 } else { - x.ExitCode = int(r.DecodeInt(codecSelferBitsize1234)) + x.ExitCode = int32(r.DecodeInt(32)) } case "signal": if r.TryDecodeAsNil() { x.Signal = 0 } else { - x.Signal = int(r.DecodeInt(codecSelferBitsize1234)) + x.Signal = int32(r.DecodeInt(32)) } case "reason": if r.TryDecodeAsNil() { @@ -20160,7 +21301,7 @@ func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.ExitCode = 0 } else { - x.ExitCode = int(r.DecodeInt(codecSelferBitsize1234)) + x.ExitCode = int32(r.DecodeInt(32)) } yyj13++ if yyhl13 { @@ -20176,7 +21317,7 @@ func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978. if r.TryDecodeAsNil() { x.Signal = 0 } else { - x.Signal = int(r.DecodeInt(codecSelferBitsize1234)) + x.Signal = int32(r.DecodeInt(32)) } yyj13++ if yyhl13 { @@ -20863,7 +22004,7 @@ func (x *ContainerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.RestartCount = 0 } else { - x.RestartCount = int(r.DecodeInt(codecSelferBitsize1234)) + x.RestartCount = int32(r.DecodeInt(32)) } case "image": if r.TryDecodeAsNil() { @@ -20977,7 +22118,7 @@ func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) if r.TryDecodeAsNil() { x.RestartCount = 0 } else { - x.RestartCount = int(r.DecodeInt(codecSelferBitsize1234)) + x.RestartCount = int32(r.DecodeInt(32)) } yyj12++ if yyhl12 { @@ -22599,13 +23740,15 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool + var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.NodeAffinity != nil + yyq2[1] = x.PodAffinity != nil + yyq2[2] = x.PodAntiAffinity != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) + r.EncodeArrayStart(3) } else { yynn2 = 0 for _, b := range yyq2 { @@ -22639,6 +23782,52 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -22711,6 +23900,28 @@ func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } x.NodeAffinity.CodecDecodeSelf(d) } + case "podAffinity": + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + case "podAntiAffinity": + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -22722,16 +23933,16 @@ func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb5 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb5 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -22746,23 +23957,65 @@ func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.NodeAffinity.CodecDecodeSelf(d) } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb5 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb5 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -22779,7 +24032,7 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 var yynn2 int if yyr2 || yy2arr2 { @@ -22800,7 +24053,12 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } } } else { r.EncodeNil() @@ -22813,7 +24071,12 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } } } } @@ -22827,7 +24090,7 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym7 if false { } else { - h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) } } } else { @@ -22845,7 +24108,7 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym8 if false { } else { - h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) } } } @@ -22859,7 +24122,1018 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("weight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.PodAffinityTerm + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.PodAffinityTerm + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "weight": + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + x.Weight = int(r.DecodeInt(codecSelferBitsize1234)) + } + case "podAffinityTerm": + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv5 := &x.PodAffinityTerm + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + x.Weight = int(r.DecodeInt(codecSelferBitsize1234)) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv8 := &x.PodAffinityTerm + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.LabelSelector != nil + yyq2[2] = x.TopologyKey != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespaces")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("topologyKey")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "labelSelector": + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_unversioned.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + case "namespaces": + if r.TryDecodeAsNil() { + x.Namespaces = nil + } else { + yyv6 := &x.Namespaces + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "topologyKey": + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + x.TopologyKey = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_unversioned.LabelSelector) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespaces = nil + } else { + yyv12 := &x.Namespaces + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecSliceStringX(yyv12, false, d) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + x.TopologyKey = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -22911,28 +25185,461 @@ func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "requiredDuringSchedulingIgnoredDuringExecution": + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + } else { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) + } + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv5 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + } else { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) + } + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("weight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.Preference + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preference")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.Preference + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "weight": + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + x.Weight = int32(r.DecodeInt(32)) + } + case "preference": + if r.TryDecodeAsNil() { + x.Preference = NodeSelectorTerm{} + } else { + yyv5 := &x.Preference + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + x.Weight = int32(r.DecodeInt(32)) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Preference = NodeSelectorTerm{} + } else { + yyv8 := &x.Preference + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Value != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Effect.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("effect")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Effect.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Taint) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Taint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": if r.TryDecodeAsNil() { - if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } + x.Key = "" } else { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) - } - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + x.Key = string(r.DecodeString()) } - case "preferredDuringSchedulingIgnoredDuringExecution": + case "value": if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil + x.Value = "" } else { - yyv5 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv5), d) - } + x.Value = string(r.DecodeString()) + } + case "effect": + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + x.Effect = TaintEffect(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -22941,7 +25648,7 @@ func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -22960,14 +25667,9 @@ func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } + x.Key = "" } else { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) - } - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + x.Key = string(r.DecodeString()) } yyj7++ if yyhl7 { @@ -22981,15 +25683,25 @@ func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil + x.Value = "" } else { - yyv9 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv9), d) - } + x.Value = string(r.DecodeString()) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + x.Effect = TaintEffect(r.DecodeString()) } for { yyj7++ @@ -23007,7 +25719,33 @@ func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { +func (x TaintEffect) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TaintEffect) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -23021,14 +25759,18 @@ func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[0] = x.Key != "" + yyq2[1] = x.Operator != "" + yyq2[2] = x.Value != "" + yyq2[3] = x.Effect != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(4) } else { - yynn2 = 2 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -23039,33 +25781,83 @@ func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } } else { - r.EncodeInt(int64(x.Weight)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("weight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + x.Operator.CodecEncodeSelf(e) } else { - r.EncodeInt(int64(x.Weight)) + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operator")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Operator.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Preference - yy7.CodecEncodeSelf(e) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preference")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Preference - yy9.CodecEncodeSelf(e) + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Effect.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("effect")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Effect.CodecEncodeSelf(e) + } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) @@ -23076,7 +25868,7 @@ func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *Toleration) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -23106,7 +25898,7 @@ func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *Toleration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -23128,18 +25920,29 @@ func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Dec yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "weight": + case "key": if r.TryDecodeAsNil() { - x.Weight = 0 + x.Key = "" } else { - x.Weight = int(r.DecodeInt(codecSelferBitsize1234)) + x.Key = string(r.DecodeString()) } - case "preference": + case "operator": if r.TryDecodeAsNil() { - x.Preference = NodeSelectorTerm{} + x.Operator = "" } else { - yyv5 := &x.Preference - yyv5.CodecDecodeSelf(d) + x.Operator = TolerationOperator(r.DecodeString()) + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + x.Value = string(r.DecodeString()) + } + case "effect": + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + x.Effect = TaintEffect(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -23148,62 +25951,119 @@ func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Dec z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Weight = 0 + x.Key = "" } else { - x.Weight = int(r.DecodeInt(codecSelferBitsize1234)) + x.Key = string(r.DecodeString()) } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Preference = NodeSelectorTerm{} + x.Operator = "" } else { - yyv8 := &x.Preference - yyv8.CodecDecodeSelf(d) + x.Operator = TolerationOperator(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + x.Value = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + x.Effect = TaintEffect(r.DecodeString()) } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } +func (x TolerationOperator) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TolerationOperator) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) @@ -23218,7 +26078,7 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [11]bool + var yyq2 [13]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[2] = x.RestartPolicy != "" @@ -23229,9 +26089,11 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[8] = x.NodeName != "" yyq2[9] = x.SecurityContext != nil yyq2[10] = len(x.ImagePullSecrets) != 0 + yyq2[11] = x.Hostname != "" + yyq2[12] = x.Subdomain != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(11) + r.EncodeArrayStart(13) } else { yynn2 = 3 for _, b := range yyq2 { @@ -23529,6 +26391,56 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym42 := z.EncBinary() + _ = yym42 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subdomain")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym45 := z.EncBinary() + _ = yym45 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -23705,6 +26617,18 @@ func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv19), d) } } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + x.Hostname = string(r.DecodeString()) + } + case "subdomain": + if r.TryDecodeAsNil() { + x.Subdomain = "" + } else { + x.Subdomain = string(r.DecodeString()) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -23716,16 +26640,16 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj21 int - var yyb21 bool - var yyhl21 bool = l >= 0 - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb21 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb21 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23733,21 +26657,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Volumes = nil } else { - yyv22 := &x.Volumes - yym23 := z.DecBinary() - _ = yym23 + yyv24 := &x.Volumes + yym25 := z.DecBinary() + _ = yym25 if false { } else { - h.decSliceVolume((*[]Volume)(yyv22), d) + h.decSliceVolume((*[]Volume)(yyv24), d) } } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb21 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb21 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23755,21 +26679,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Containers = nil } else { - yyv24 := &x.Containers - yym25 := z.DecBinary() - _ = yym25 + yyv26 := &x.Containers + yym27 := z.DecBinary() + _ = yym27 if false { } else { - h.decSliceContainer((*[]Container)(yyv24), d) + h.decSliceContainer((*[]Container)(yyv26), d) } } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb21 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb21 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23779,13 +26703,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.RestartPolicy = RestartPolicy(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb21 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb21 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23798,20 +26722,20 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.TerminationGracePeriodSeconds == nil { x.TerminationGracePeriodSeconds = new(int64) } - yym28 := z.DecBinary() - _ = yym28 + yym30 := z.DecBinary() + _ = yym30 if false { } else { *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) } } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb21 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb21 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23824,20 +26748,20 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.ActiveDeadlineSeconds == nil { x.ActiveDeadlineSeconds = new(int64) } - yym30 := z.DecBinary() - _ = yym30 + yym32 := z.DecBinary() + _ = yym32 if false { } else { *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) } } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb21 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb21 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23847,13 +26771,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.DNSPolicy = DNSPolicy(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb21 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb21 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23861,21 +26785,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.NodeSelector = nil } else { - yyv32 := &x.NodeSelector - yym33 := z.DecBinary() - _ = yym33 + yyv34 := &x.NodeSelector + yym35 := z.DecBinary() + _ = yym35 if false { } else { - z.F.DecMapStringStringX(yyv32, false, d) + z.F.DecMapStringStringX(yyv34, false, d) } } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb21 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb21 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23885,13 +26809,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.ServiceAccountName = string(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb21 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb21 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23901,13 +26825,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.NodeName = string(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb21 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb21 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23922,13 +26846,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.SecurityContext.CodecDecodeSelf(d) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb21 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb21 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23936,26 +26860,58 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ImagePullSecrets = nil } else { - yyv37 := &x.ImagePullSecrets - yym38 := z.DecBinary() - _ = yym38 + yyv39 := &x.ImagePullSecrets + yym40 := z.DecBinary() + _ = yym40 if false { } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv37), d) + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv39), d) } } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + x.Hostname = string(r.DecodeString()) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subdomain = "" + } else { + x.Subdomain = string(r.DecodeString()) + } for { - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb21 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb21 { + if yyb23 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj21-1, "") + z.DecStructFieldNotFound(yyj23-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -26825,7 +29781,7 @@ func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Replicas = int32(r.DecodeInt(32)) } case "selector": if r.TryDecodeAsNil() { @@ -26878,7 +29834,7 @@ func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978 if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Replicas = int32(r.DecodeInt(32)) } yyj8++ if yyhl8 { @@ -27105,13 +30061,13 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978 if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Replicas = int32(r.DecodeInt(32)) } case "fullyLabeledReplicas": if r.TryDecodeAsNil() { x.FullyLabeledReplicas = 0 } else { - x.FullyLabeledReplicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.FullyLabeledReplicas = int32(r.DecodeInt(32)) } case "observedGeneration": if r.TryDecodeAsNil() { @@ -27147,7 +30103,7 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Replicas = int32(r.DecodeInt(32)) } yyj7++ if yyhl7 { @@ -27163,7 +30119,7 @@ func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.FullyLabeledReplicas = 0 } else { - x.FullyLabeledReplicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.FullyLabeledReplicas = int32(r.DecodeInt(32)) } yyj7++ if yyhl7 { @@ -28863,7 +31819,7 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool + var yyq2 [8]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.Type != "" @@ -28871,9 +31827,10 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[4] = len(x.ExternalIPs) != 0 yyq2[5] = x.LoadBalancerIP != "" yyq2[6] = x.SessionAffinity != "" + yyq2[7] = len(x.LoadBalancerSourceRanges) != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) + r.EncodeArrayStart(8) } else { yynn2 = 2 for _, b := range yyq2 { @@ -29051,6 +32008,39 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { x.SessionAffinity.CodecEncodeSelf(e) } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.LoadBalancerSourceRanges == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancerSourceRanges")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LoadBalancerSourceRanges == nil { + r.EncodeNil() + } else { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) + } + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -29172,6 +32162,18 @@ func (x *ServiceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } else { x.SessionAffinity = ServiceAffinity(r.DecodeString()) } + case "loadBalancerSourceRanges": + if r.TryDecodeAsNil() { + x.LoadBalancerSourceRanges = nil + } else { + yyv14 := &x.LoadBalancerSourceRanges + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + z.F.DecSliceStringX(yyv14, false, d) + } + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -29183,16 +32185,16 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb14 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb14 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29202,13 +32204,13 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Type = ServiceType(r.DecodeString()) } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb14 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb14 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29216,21 +32218,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Ports = nil } else { - yyv16 := &x.Ports - yym17 := z.DecBinary() - _ = yym17 + yyv18 := &x.Ports + yym19 := z.DecBinary() + _ = yym19 if false { } else { - h.decSliceServicePort((*[]ServicePort)(yyv16), d) + h.decSliceServicePort((*[]ServicePort)(yyv18), d) } } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb14 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb14 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29238,21 +32240,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Selector = nil } else { - yyv18 := &x.Selector - yym19 := z.DecBinary() - _ = yym19 + yyv20 := &x.Selector + yym21 := z.DecBinary() + _ = yym21 if false { } else { - z.F.DecMapStringStringX(yyv18, false, d) + z.F.DecMapStringStringX(yyv20, false, d) } } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb14 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb14 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29262,13 +32264,13 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.ClusterIP = string(r.DecodeString()) } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb14 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb14 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29276,21 +32278,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ExternalIPs = nil } else { - yyv21 := &x.ExternalIPs - yym22 := z.DecBinary() - _ = yym22 + yyv23 := &x.ExternalIPs + yym24 := z.DecBinary() + _ = yym24 if false { } else { - z.F.DecSliceStringX(yyv21, false, d) + z.F.DecSliceStringX(yyv23, false, d) } } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb14 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb14 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29300,13 +32302,13 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.LoadBalancerIP = string(r.DecodeString()) } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb14 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb14 { + if yyb16 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -29316,18 +32318,40 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.SessionAffinity = ServiceAffinity(r.DecodeString()) } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LoadBalancerSourceRanges = nil + } else { + yyv27 := &x.LoadBalancerSourceRanges + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + z.F.DecSliceStringX(yyv27, false, d) + } + } for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l } else { - yyb14 = r.CheckBreak() + yyb16 = r.CheckBreak() } - if yyb14 { + if yyb16 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") + z.DecStructFieldNotFound(yyj16-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -29532,11 +32556,11 @@ func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int(r.DecodeInt(codecSelferBitsize1234)) + x.Port = int32(r.DecodeInt(32)) } case "targetPort": if r.TryDecodeAsNil() { - x.TargetPort = pkg5_intstr.IntOrString{} + x.TargetPort = pkg4_intstr.IntOrString{} } else { yyv7 := &x.TargetPort yym8 := z.DecBinary() @@ -29553,7 +32577,7 @@ func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.NodePort = 0 } else { - x.NodePort = int(r.DecodeInt(codecSelferBitsize1234)) + x.NodePort = int32(r.DecodeInt(32)) } default: z.DecStructFieldNotFound(-1, yys3) @@ -29615,7 +32639,7 @@ func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int(r.DecodeInt(codecSelferBitsize1234)) + x.Port = int32(r.DecodeInt(32)) } yyj10++ if yyhl10 { @@ -29629,7 +32653,7 @@ func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.TargetPort = pkg5_intstr.IntOrString{} + x.TargetPort = pkg4_intstr.IntOrString{} } else { yyv14 := &x.TargetPort yym15 := z.DecBinary() @@ -29656,7 +32680,7 @@ func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.NodePort = 0 } else { - x.NodePort = int(r.DecodeInt(codecSelferBitsize1234)) + x.NodePort = int32(r.DecodeInt(32)) } for { yyj10++ @@ -31387,12 +34411,13 @@ func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[1] = x.Hostname != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(3) } else { yynn2 = 2 for _, b := range yyq2 { @@ -31422,6 +34447,31 @@ func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, string(x.IP)) } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.TargetRef == nil { @@ -31506,6 +34556,12 @@ func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } else { x.IP = string(r.DecodeString()) } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + x.Hostname = string(r.DecodeString()) + } case "TargetRef": if r.TryDecodeAsNil() { if x.TargetRef != nil { @@ -31528,16 +34584,16 @@ func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31547,13 +34603,29 @@ func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) } else { x.IP = string(r.DecodeString()) } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + x.Hostname = string(r.DecodeString()) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31569,17 +34641,17 @@ func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) x.TargetRef.CodecDecodeSelf(d) } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -31732,7 +34804,7 @@ func (x *EndpointPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int(r.DecodeInt(codecSelferBitsize1234)) + x.Port = int32(r.DecodeInt(32)) } case "Protocol": if r.TryDecodeAsNil() { @@ -31784,7 +34856,7 @@ func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int(r.DecodeInt(codecSelferBitsize1234)) + x.Port = int32(r.DecodeInt(32)) } yyj7++ if yyhl7 { @@ -32589,7 +35661,7 @@ func (x *DaemonEndpoint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int(r.DecodeInt(codecSelferBitsize1234)) + x.Port = int32(r.DecodeInt(32)) } default: z.DecStructFieldNotFound(-1, yys3) @@ -32619,7 +35691,7 @@ func (x *DaemonEndpoint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int(r.DecodeInt(codecSelferBitsize1234)) + x.Port = int32(r.DecodeInt(32)) } for { yyj5++ @@ -32814,14 +35886,14 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool + var yyq2 [10]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) + r.EncodeArrayStart(10) } else { - yynn2 = 8 + yynn2 = 10 for _, b := range yyq2 { if b { yynn2++ @@ -32982,6 +36054,44 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operatingSystem")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("architecture")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -33091,6 +36201,18 @@ func (x *NodeSystemInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } else { x.KubeProxyVersion = string(r.DecodeString()) } + case "operatingSystem": + if r.TryDecodeAsNil() { + x.OperatingSystem = "" + } else { + x.OperatingSystem = string(r.DecodeString()) + } + case "architecture": + if r.TryDecodeAsNil() { + x.Architecture = "" + } else { + x.Architecture = string(r.DecodeString()) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -33102,16 +36224,16 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33121,13 +36243,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.MachineID = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33137,13 +36259,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.SystemUUID = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33153,13 +36275,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.BootID = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33169,13 +36291,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.KernelVersion = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33185,13 +36307,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.OSImage = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33201,13 +36323,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.ContainerRuntimeVersion = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33217,13 +36339,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.KubeletVersion = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33233,18 +36355,50 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.KubeProxyVersion = string(r.DecodeString()) } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.OperatingSystem = "" + } else { + x.OperatingSystem = string(r.DecodeString()) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Architecture = "" + } else { + x.Architecture = string(r.DecodeString()) + } for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -33273,11 +36427,12 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[4] = len(x.Addresses) != 0 yyq2[5] = true yyq2[6] = true + yyq2[7] = len(x.Images) != 0 var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(8) } else { - yynn2 = 1 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -33449,28 +36604,34 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Images == nil { - r.EncodeNil() - } else { - yym29 := z.EncBinary() - _ = yym29 - if false { + if yyq2[7] { + if x.Images == nil { + r.EncodeNil() } else { - h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + } } + } else { + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("images")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Images == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("images")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Images == nil { + r.EncodeNil() } else { - h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + } } } } @@ -37015,7 +40176,7 @@ func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *Preconditions) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -37029,16 +40190,15 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool + var yyq2 [1]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[1] = x.Kind != "" - yyq2[2] = x.APIVersion != "" + yyq2[0] = x.UID != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) + r.EncodeArrayStart(1) } else { - yynn2 = 1 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -37049,38 +40209,309 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.GracePeriodSeconds == nil { + if yyq2[0] { + if x.UID == nil { + r.EncodeNil() + } else { + yy4 := *x.UID + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy4)) + } + } + } else { r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.UID == nil { + r.EncodeNil() + } else { + yy6 := *x.UID + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Preconditions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "uid": + if r.TryDecodeAsNil() { + if x.UID != nil { + x.UID = nil + } + } else { + if x.UID == nil { + x.UID = new(pkg1_types.UID) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.UID) { } else { - yy4 := *x.GracePeriodSeconds - yym5 := z.EncBinary() - _ = yym5 - if false { + *((*string)(x.UID)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.UID != nil { + x.UID = nil + } + } else { + if x.UID == nil { + x.UID = new(pkg1_types.UID) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.UID) { + } else { + *((*string)(x.UID)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.GracePeriodSeconds != nil + yyq2[1] = x.Preconditions != nil + yyq2[2] = x.OrphanDependents != nil + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.GracePeriodSeconds == nil { + r.EncodeNil() } else { - r.EncodeInt(int64(yy4)) + yy4 := *x.GracePeriodSeconds + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } } + } else { + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GracePeriodSeconds == nil { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy6 := *x.GracePeriodSeconds + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Preconditions == nil { + r.EncodeNil() + } else { + x.Preconditions.CodecEncodeSelf(e) + } + } else { r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preconditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Preconditions == nil { + r.EncodeNil() + } else { + x.Preconditions.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.OrphanDependents == nil { + r.EncodeNil() + } else { + yy12 := *x.OrphanDependents + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } } else { - yy6 := *x.GracePeriodSeconds - yym7 := z.EncBinary() - _ = yym7 - if false { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("orphanDependents")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.OrphanDependents == nil { + r.EncodeNil() } else { - r.EncodeInt(int64(yy6)) + yy14 := *x.OrphanDependents + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeBool(bool(yy14)) + } } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym9 := z.EncBinary() - _ = yym9 + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -37089,12 +40520,12 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[1] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 + yym18 := z.EncBinary() + _ = yym18 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -37103,9 +40534,9 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -37114,12 +40545,12 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[2] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 + yym21 := z.EncBinary() + _ = yym21 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -37203,6 +40634,33 @@ func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) } } + case "preconditions": + if r.TryDecodeAsNil() { + if x.Preconditions != nil { + x.Preconditions = nil + } + } else { + if x.Preconditions == nil { + x.Preconditions = new(Preconditions) + } + x.Preconditions.CodecDecodeSelf(d) + } + case "orphanDependents": + if r.TryDecodeAsNil() { + if x.OrphanDependents != nil { + x.OrphanDependents = nil + } + } else { + if x.OrphanDependents == nil { + x.OrphanDependents = new(bool) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*bool)(x.OrphanDependents)) = r.DecodeBool() + } + } case "kind": if r.TryDecodeAsNil() { x.Kind = "" @@ -37226,16 +40684,16 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb8 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb8 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37248,20 +40706,67 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.GracePeriodSeconds == nil { x.GracePeriodSeconds = new(int64) } - yym10 := z.DecBinary() - _ = yym10 + yym13 := z.DecBinary() + _ = yym13 if false { } else { *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) } } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb8 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb8 { + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Preconditions != nil { + x.Preconditions = nil + } + } else { + if x.Preconditions == nil { + x.Preconditions = new(Preconditions) + } + x.Preconditions.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.OrphanDependents != nil { + x.OrphanDependents = nil + } + } else { + if x.OrphanDependents == nil { + x.OrphanDependents = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.OrphanDependents)) = r.DecodeBool() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37271,13 +40776,13 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Kind = string(r.DecodeString()) } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb8 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb8 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37288,17 +40793,17 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.APIVersion = string(r.DecodeString()) } for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb8 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb8 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -40241,7 +43746,265 @@ func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Kind != "" + yyq2[2] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "Path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + x.Path = string(r.DecodeString()) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + x.Path = string(r.DecodeString()) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *OwnerReference) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -40255,16 +44018,14 @@ func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[1] = x.Kind != "" - yyq2[2] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) + r.EncodeArrayStart(4) } else { - yynn2 = 1 + yynn2 = 4 for _, b := range yyq2 { if b { yynn2++ @@ -40279,67 +44040,76 @@ func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Path")) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym5 := z.EncBinary() _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } + yym7 := z.EncBinary() + _ = yym7 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } + yym10 := z.EncBinary() + _ = yym10 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) } } if yyr2 || yy2arr2 { @@ -40351,7 +44121,7 @@ func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *OwnerReference) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -40381,7 +44151,7 @@ func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *OwnerReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -40403,11 +44173,11 @@ func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "Path": + case "apiVersion": if r.TryDecodeAsNil() { - x.Path = "" + x.APIVersion = "" } else { - x.Path = string(r.DecodeString()) + x.APIVersion = string(r.DecodeString()) } case "kind": if r.TryDecodeAsNil() { @@ -40415,11 +44185,17 @@ func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } else { x.Kind = string(r.DecodeString()) } - case "apiVersion": + case "name": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Name = "" } else { - x.APIVersion = string(r.DecodeString()) + x.Name = string(r.DecodeString()) + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + x.UID = pkg1_types.UID(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -40428,36 +44204,36 @@ func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *OwnerReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Path = "" + x.APIVersion = "" } else { - x.Path = string(r.DecodeString()) + x.APIVersion = string(r.DecodeString()) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -40467,34 +44243,50 @@ func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decod } else { x.Kind = string(r.DecodeString()) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Name = "" } else { - x.APIVersion = string(r.DecodeString()) + x.Name = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + x.UID = pkg1_types.UID(r.DecodeString()) } for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -42044,7 +45836,7 @@ func (x *Event) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Count = 0 } else { - x.Count = int(r.DecodeInt(codecSelferBitsize1234)) + x.Count = int32(r.DecodeInt(32)) } case "type": if r.TryDecodeAsNil() { @@ -42229,7 +46021,7 @@ func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Count = 0 } else { - x.Count = int(r.DecodeInt(codecSelferBitsize1234)) + x.Count = int32(r.DecodeInt(32)) } yyj17++ if yyhl17 { @@ -42710,7 +46502,7 @@ func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym9 if false { } else { - h.encSliceruntime_Object(([]pkg8_runtime.Object)(x.Items), e) + h.encSliceruntime_Object(([]pkg7_runtime.Object)(x.Items), e) } } } else { @@ -42724,7 +46516,7 @@ func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym10 if false { } else { - h.encSliceruntime_Object(([]pkg8_runtime.Object)(x.Items), e) + h.encSliceruntime_Object(([]pkg7_runtime.Object)(x.Items), e) } } } @@ -42861,7 +46653,7 @@ func (x *List) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { _ = yym7 if false { } else { - h.decSliceruntime_Object((*[]pkg8_runtime.Object)(yyv6), d) + h.decSliceruntime_Object((*[]pkg7_runtime.Object)(yyv6), d) } } case "kind": @@ -42932,7 +46724,7 @@ func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { _ = yym14 if false { } else { - h.decSliceruntime_Object((*[]pkg8_runtime.Object)(yyv13), d) + h.decSliceruntime_Object((*[]pkg7_runtime.Object)(yyv13), d) } } yyj10++ @@ -46482,14 +50274,13 @@ func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = true - yyq2[1] = len(x.Items) != 0 yyq2[2] = x.Kind != "" yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2 = 0 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -46529,34 +50320,28 @@ func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Items == nil { - r.EncodeNil() + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceConfigMap(([]ConfigMap)(x.Items), e) - } + h.encSliceConfigMap(([]ConfigMap)(x.Items), e) } - } else { - r.EncodeNil() } } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceConfigMap(([]ConfigMap)(x.Items), e) - } + h.encSliceConfigMap(([]ConfigMap)(x.Items), e) } } } @@ -49036,6 +52821,125 @@ func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } +func (x codecSelfer1234) encSliceOwnerReference(v []OwnerReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceOwnerReference(v *[]OwnerReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []OwnerReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]OwnerReference, yyrl1) + } + } else { + yyv1 = make([]OwnerReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = OwnerReference{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, OwnerReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = OwnerReference{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, OwnerReference{}) // var yyz1 OwnerReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = OwnerReference{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []OwnerReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + func (x codecSelfer1234) encSlicePersistentVolumeAccessMode(v []PersistentVolumeAccessMode, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) @@ -49185,7 +53089,7 @@ func (x codecSelfer1234) decSlicePersistentVolume(v *[]PersistentVolume, d *code yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 400) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 456) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -49304,7 +53208,7 @@ func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClai yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 344) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -49384,7 +53288,7 @@ func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClai } } -func (x codecSelfer1234) encSliceDownwardAPIVolumeFile(v []DownwardAPIVolumeFile, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceKeyToPath(v []KeyToPath, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -49397,7 +53301,7 @@ func (x codecSelfer1234) encSliceDownwardAPIVolumeFile(v []DownwardAPIVolumeFile z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFile, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -49408,7 +53312,7 @@ func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFil _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []DownwardAPIVolumeFile{} + yyv1 = []KeyToPath{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -49423,15 +53327,15 @@ func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFil yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]DownwardAPIVolumeFile, yyrl1) + yyv1 = make([]KeyToPath, yyrl1) } } else { - yyv1 = make([]DownwardAPIVolumeFile, yyrl1) + yyv1 = make([]KeyToPath, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -49446,7 +53350,7 @@ func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFil for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = DownwardAPIVolumeFile{} + yyv1[yyj1] = KeyToPath{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -49455,10 +53359,10 @@ func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFil } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, DownwardAPIVolumeFile{}) + yyv1 = append(yyv1, KeyToPath{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = DownwardAPIVolumeFile{} + yyv1[yyj1] = KeyToPath{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -49472,13 +53376,13 @@ func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFil for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, DownwardAPIVolumeFile{}) // var yyz1 DownwardAPIVolumeFile + yyv1 = append(yyv1, KeyToPath{}) // var yyz1 KeyToPath yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = DownwardAPIVolumeFile{} + yyv1[yyj1] = KeyToPath{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -49493,7 +53397,7 @@ func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFil yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []DownwardAPIVolumeFile{} + yyv1 = []KeyToPath{} yyc1 = true } } @@ -49503,7 +53407,7 @@ func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFil } } -func (x codecSelfer1234) encSliceKeyToPath(v []KeyToPath, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceDownwardAPIVolumeFile(v []DownwardAPIVolumeFile, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -49516,7 +53420,7 @@ func (x codecSelfer1234) encSliceKeyToPath(v []KeyToPath, e *codec1978.Encoder) z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFile, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -49527,7 +53431,7 @@ func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []KeyToPath{} + yyv1 = []DownwardAPIVolumeFile{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -49547,10 +53451,10 @@ func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]KeyToPath, yyrl1) + yyv1 = make([]DownwardAPIVolumeFile, yyrl1) } } else { - yyv1 = make([]KeyToPath, yyrl1) + yyv1 = make([]DownwardAPIVolumeFile, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -49565,7 +53469,7 @@ func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = KeyToPath{} + yyv1[yyj1] = DownwardAPIVolumeFile{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -49574,10 +53478,10 @@ func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, KeyToPath{}) + yyv1 = append(yyv1, DownwardAPIVolumeFile{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = KeyToPath{} + yyv1[yyj1] = DownwardAPIVolumeFile{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -49591,13 +53495,13 @@ func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, KeyToPath{}) // var yyz1 KeyToPath + yyv1 = append(yyv1, DownwardAPIVolumeFile{}) // var yyz1 DownwardAPIVolumeFile yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = KeyToPath{} + yyv1[yyj1] = DownwardAPIVolumeFile{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -49612,7 +53516,7 @@ func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []KeyToPath{} + yyv1 = []DownwardAPIVolumeFile{} yyc1 = true } } @@ -49890,7 +53794,7 @@ func (x codecSelfer1234) decSliceContainerPort(v *[]ContainerPort, d *codec1978. yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -50128,7 +54032,7 @@ func (x codecSelfer1234) decSliceVolumeMount(v *[]VolumeMount, d *codec1978.Deco yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -50247,7 +54151,7 @@ func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 496) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 624) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -50565,6 +54469,244 @@ func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequir } } +func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodAffinityTerm{}) // var yyz1 PodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) // var yyz1 WeightedPodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredSchedulingTerm, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) @@ -50723,7 +54865,7 @@ func (x codecSelfer1234) decSliceVolume(v *[]Volume, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 168) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 176) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -51199,7 +55341,7 @@ func (x codecSelfer1234) decSliceContainerStatus(v *[]ContainerStatus, d *codec1 yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 128) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -51318,7 +55460,7 @@ func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Deco yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 520) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 672) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -51437,7 +55579,7 @@ func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationControlle yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 240) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -51556,7 +55698,7 @@ func (x codecSelfer1234) decSliceService(v *[]Service, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 336) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 408) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -52032,7 +56174,7 @@ func (x codecSelfer1234) decSliceServiceAccount(v *[]ServiceAccount, d *codec197 yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 240) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -52270,7 +56412,7 @@ func (x codecSelfer1234) decSliceEndpointAddress(v *[]EndpointAddress, d *codec1 yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -52508,7 +56650,7 @@ func (x codecSelfer1234) decSliceEndpoints(v *[]Endpoints, d *codec1978.Decoder) yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 216) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -52977,7 +57119,7 @@ func (x codecSelfer1234) decResourceList(v *ResourceList, d *codec1978.Decoder) yyl1 := r.ReadMapStart() yybh1 := z.DecBasicHandle() if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 72) yyv1 = make(map[ResourceName]pkg3_resource.Quantity, yyrl1) *v = yyv1 } @@ -53098,7 +57240,7 @@ func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 488) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 568) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -53327,7 +57469,7 @@ func (x codecSelfer1234) decSliceNamespace(v *[]Namespace, d *codec1978.Decoder) yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 232) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -53446,7 +57588,7 @@ func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 440) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 488) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -53526,7 +57668,7 @@ func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) { } } -func (x codecSelfer1234) encSliceruntime_Object(v []pkg8_runtime.Object, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceruntime_Object(v []pkg7_runtime.Object, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -53548,7 +57690,7 @@ func (x codecSelfer1234) encSliceruntime_Object(v []pkg8_runtime.Object, e *code z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceruntime_Object(v *[]pkg8_runtime.Object, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceruntime_Object(v *[]pkg7_runtime.Object, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -53559,7 +57701,7 @@ func (x codecSelfer1234) decSliceruntime_Object(v *[]pkg8_runtime.Object, d *cod _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []pkg8_runtime.Object{} + yyv1 = []pkg7_runtime.Object{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -53579,10 +57721,10 @@ func (x codecSelfer1234) decSliceruntime_Object(v *[]pkg8_runtime.Object, d *cod if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]pkg8_runtime.Object, yyrl1) + yyv1 = make([]pkg7_runtime.Object, yyrl1) } } else { - yyv1 = make([]pkg8_runtime.Object, yyrl1) + yyv1 = make([]pkg7_runtime.Object, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -53635,7 +57777,7 @@ func (x codecSelfer1234) decSliceruntime_Object(v *[]pkg8_runtime.Object, d *cod for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, nil) // var yyz1 pkg8_runtime.Object + yyv1 = append(yyv1, nil) // var yyz1 pkg7_runtime.Object yyc1 = true } yyh1.ElemContainerState(yyj1) @@ -53662,7 +57804,7 @@ func (x codecSelfer1234) decSliceruntime_Object(v *[]pkg8_runtime.Object, d *cod yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg8_runtime.Object{} + yyv1 = []pkg7_runtime.Object{} yyc1 = true } } @@ -53830,7 +57972,7 @@ func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decode yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 216) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -54059,7 +58201,7 @@ func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978. yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 240) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -54406,7 +58548,7 @@ func (x codecSelfer1234) decSliceSecret(v *[]Secret, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 216) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -54525,7 +58667,7 @@ func (x codecSelfer1234) decSliceConfigMap(v *[]ConfigMap, d *codec1978.Decoder) yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 200) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 248) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -54763,7 +58905,7 @@ func (x codecSelfer1234) decSliceComponentStatus(v *[]ComponentStatus, d *codec1 yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 216) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/types.go index a82aa45c075a..4847ba434c55 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/types.go @@ -138,6 +138,16 @@ type ObjectMeta struct { // objects. Annotation keys have the same formatting restrictions as Label keys. See the // comments on Labels for details. Annotations map[string]string `json:"annotations,omitempty"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. + OwnerReferences []OwnerReference `json:"ownerReferences,omitempty"` + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + Finalizers []string `json:"finalizers,omitempty"` } const ( @@ -219,6 +229,8 @@ type VolumeSource struct { AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"` // ConfigMap represents a configMap that should populate this volume ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty"` + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty"` } // Similar to VolumeSource but meant for the administrator who creates PVs. @@ -257,6 +269,8 @@ type PersistentVolumeSource struct { Flocker *FlockerVolumeSource `json:"flocker,omitempty"` // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"` + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty"` } type PersistentVolumeClaimVolumeSource struct { @@ -305,7 +319,7 @@ const ( // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. // The volume plugin must support Deletion. PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" - // PersistentVolumeReclaimRetain means the volume will left in its current phase (Released) for manual reclamation by the administrator. + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. // The default policy is Retain. PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" ) @@ -401,6 +415,10 @@ const ( ClaimPending PersistentVolumeClaimPhase = "Pending" // used for PersistentVolumeClaims that are bound ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" ) // Represents a host path mapped into a pod. @@ -456,7 +474,7 @@ type GCEPersistentDiskVolumeSource struct { // Optional: Partition on the disk to mount. // If omitted, kubelet will attempt to mount the device name. // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - Partition int `json:"partition,omitempty"` + Partition int32 `json:"partition,omitempty"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. ReadOnly bool `json:"readOnly,omitempty"` @@ -472,7 +490,7 @@ type ISCSIVolumeSource struct { // Required: target iSCSI Qualified Name IQN string `json:"iqn,omitempty"` // Required: iSCSI target lun number - Lun int `json:"lun,omitempty"` + Lun int32 `json:"lun,omitempty"` // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. ISCSIInterface string `json:"iscsiInterface,omitempty"` // Filesystem type to mount. @@ -492,7 +510,7 @@ type FCVolumeSource struct { // Required: FC target world wide names (WWNs) TargetWWNs []string `json:"targetWWNs"` // Required: FC target lun number - Lun *int `json:"lun"` + Lun *int32 `json:"lun"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. @@ -512,7 +530,11 @@ type FlexVolumeSource struct { // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. FSType string `json:"fsType,omitempty"` - // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. SecretRef *LocalObjectReference `json:"secretRef,omitempty"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. @@ -538,7 +560,7 @@ type AWSElasticBlockStoreVolumeSource struct { // Optional: Partition on the disk to mount. // If omitted, kubelet will attempt to mount the device name. // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - Partition int `json:"partition,omitempty"` + Partition int32 `json:"partition,omitempty"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. ReadOnly bool `json:"readOnly,omitempty"` @@ -568,6 +590,14 @@ type GitRepoVolumeSource struct { type SecretVolumeSource struct { // Name of the secret in the pod's namespace to use. SecretName string `json:"secretName,omitempty"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error. Paths must be relative and may not contain + // the '..' path or start with '..'. + Items []KeyToPath `json:"items,omitempty"` } // Represents an NFS mount that lasts the lifetime of a pod. @@ -676,7 +706,10 @@ type DownwardAPIVolumeFile struct { // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' Path string `json:"path"` // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - FieldRef ObjectFieldSelector `json:"fieldRef"` + FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty"` } // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. @@ -690,6 +723,16 @@ type AzureFileVolumeSource struct { ReadOnly bool `json:"readOnly,omitempty"` } +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string `json:"volumePath"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string `json:"fsType,omitempty"` +} + // Adapts a ConfigMap into a volume. // // The contents of the target ConfigMap's Data field will be presented in a @@ -727,9 +770,9 @@ type ContainerPort struct { Name string `json:"name,omitempty"` // Optional: If specified, this must be a valid port number, 0 < x < 65536. // If HostNetwork is specified, this must match ContainerPort. - HostPort int `json:"hostPort,omitempty"` + HostPort int32 `json:"hostPort,omitempty"` // Required: This must be a valid port number, 0 < x < 65536. - ContainerPort int `json:"containerPort"` + ContainerPort int32 `json:"containerPort"` // Required: Supports "TCP" and "UDP". Protocol Protocol `json:"protocol,omitempty"` // Optional: What host IP to bind the external port to. @@ -744,6 +787,9 @@ type VolumeMount struct { ReadOnly bool `json:"readOnly,omitempty"` // Required. Must not contain ':'. MountPath string `json:"mountPath"` + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + SubPath string `json:"subPath,omitempty"` } // EnvVar represents an environment variable present in a Container. @@ -768,6 +814,9 @@ type EnvVar struct { type EnvVarSource struct { // Selects a field of the pod; only name and namespace are supported. FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty"` // Selects a key of a ConfigMap. ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty"` // Selects a key of a secret in the pod's namespace. @@ -784,6 +833,16 @@ type ObjectFieldSelector struct { FieldPath string `json:"fieldPath"` } +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + ContainerName string `json:"containerName,omitempty"` + // Required: resource to select + Resource string `json:"resource"` + // Specifies the output format of the exposed resources, defaults to "1" + Divisor resource.Quantity `json:"divisor,omitempty"` +} + // Selects a key from a ConfigMap. type ConfigMapKeySelector struct { // The ConfigMap to select from. @@ -854,16 +913,16 @@ type Probe struct { // The action taken to determine the health of a container Handler `json:",inline"` // Length of time before health checking is activated. In seconds. - InitialDelaySeconds int `json:"initialDelaySeconds,omitempty"` + InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty"` // Length of time before health checking times out. In seconds. - TimeoutSeconds int `json:"timeoutSeconds,omitempty"` + TimeoutSeconds int32 `json:"timeoutSeconds,omitempty"` // How often (in seconds) to perform the probe. - PeriodSeconds int `json:"periodSeconds,omitempty"` + PeriodSeconds int32 `json:"periodSeconds,omitempty"` // Minimum consecutive successes for the probe to be considered successful after having failed. // Must be 1 for liveness. - SuccessThreshold int `json:"successThreshold,omitempty"` + SuccessThreshold int32 `json:"successThreshold,omitempty"` // Minimum consecutive failures for the probe to be considered failed after having succeeded. - FailureThreshold int `json:"failureThreshold,omitempty"` + FailureThreshold int32 `json:"failureThreshold,omitempty"` } // PullPolicy describes a policy for if/when to pull a container image @@ -994,8 +1053,8 @@ type ContainerStateRunning struct { } type ContainerStateTerminated struct { - ExitCode int `json:"exitCode"` - Signal int `json:"signal,omitempty"` + ExitCode int32 `json:"exitCode"` + Signal int32 `json:"signal,omitempty"` Reason string `json:"reason,omitempty"` Message string `json:"message,omitempty"` StartedAt unversioned.Time `json:"startedAt,omitempty"` @@ -1021,7 +1080,7 @@ type ContainerStatus struct { Ready bool `json:"ready"` // Note that this is calculated from dead containers. But those containers are subject to // garbage collection. This value will get capped at 5 by GC. - RestartCount int `json:"restartCount"` + RestartCount int32 `json:"restartCount"` Image string `json:"image"` ImageID string `json:"imageID"` ContainerID string `json:"containerID,omitempty"` @@ -1054,9 +1113,13 @@ type PodConditionType string // These are valid conditions of pod. const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" // PodReady means the pod is able to service requests and should be added to the // load balancing pools of all matching services. PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" ) type PodCondition struct { @@ -1145,11 +1208,109 @@ const ( NodeSelectorOpLt NodeSelectorOperator = "Lt" ) -// Affinity is a group of affinity scheduling rules, currently -// only node affinity, but in the future also inter-pod affinity. +// Affinity is a group of affinity scheduling rules. type Affinity struct { // Describes node affinity scheduling rules for the pod. NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty"` + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + PodAffinity *PodAffinity `json:"podAffinity,omitempty"` + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty"` +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int `json:"weight"` + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm"` +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key matches that of any node on which +// a pod of the set of pods is running. +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + LabelSelector *unversioned.LabelSelector `json:"labelSelector,omitempty"` + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // nil list means "this pod's namespace," empty list means "all namespaces" + // The json tag here is not "omitempty" since we need to distinguish nil and empty. + // See https://golang.org/pkg/encoding/json/#Marshal for more details. + Namespaces []string `json:"namespaces"` + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + TopologyKey string `json:"topologyKey,omitempty"` } // Node affinity is a group of node affinity scheduling rules. @@ -1184,15 +1345,84 @@ type NodeAffinity struct { // (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). type PreferredSchedulingTerm struct { // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - Weight int `json:"weight"` + Weight int32 `json:"weight"` // A node selector term, associated with the corresponding weight. Preference NodeSelectorTerm `json:"preference"` } +// The node this Taint is attached to has the effect "effect" on +// any pod that that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"` + // Required. The taint value corresponding to the taint key. + Value string `json:"value,omitempty"` + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule and PreferNoSchedule. + Effect TaintEffect `json:"effect"` +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // do not allow pods to start on Kubelet unless they tolerate the taint, + // but allow all already-running pods to continue running. + // Enforced by the scheduler and Kubelet. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // do not allow pods to start on Kubelet unless they tolerate the taint, + // and evict any already-running pods that do not tolerate the taint. + // Enforced by the scheduler and Kubelet. + // TaintEffectNoScheduleNoAdmitNoExecute = "NoScheduleNoAdmitNoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +type Toleration struct { + // Required. Key is the taint key that the toleration applies to. + Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key"` + // operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + Operator TolerationOperator `json:"operator,omitempty"` + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + Value string `json:"value,omitempty"` + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule and PreferNoSchedule. + Effect TaintEffect `json:"effect,omitempty"` + // TODO: For forgiveness (#1574), we'd eventually add at least a grace period + // here, and possibly an occurrence threshold and period. +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + // PodSpec is a description of a pod type PodSpec struct { Volumes []Volume `json:"volumes"` - // Required: there must be at least one container in a pod. + // List of initialization containers belonging to the pod. + InitContainers []Container `json:"-"` + // List of containers belonging to the pod. Containers []Container `json:"containers"` RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"` // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. @@ -1225,6 +1455,12 @@ type PodSpec struct { // If specified, these secrets will be passed to individual puller implementations for them to use. For example, // in the case of docker, only DockerConfig type secrets are honored. ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"` + // Specifies the hostname of the Pod. + // If not specified, the pod's hostname will be set to a system-defined value. + Hostname string `json:"hostname,omitempty"` + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + Subdomain string `json:"subdomain,omitempty"` } // PodSecurityContext holds pod-level security attributes and common container settings. @@ -1293,6 +1529,11 @@ type PodStatus struct { // This is before the Kubelet pulled the container image(s) for the pod. StartTime *unversioned.Time `json:"startTime,omitempty"` + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses + InitContainerStatuses []ContainerStatus `json:"-"` // The list has one entry per container in the manifest. Each entry is // currently the output of `docker inspect`. This output format is *not* // final and should not be relied upon. @@ -1358,7 +1599,7 @@ type PodTemplateList struct { // a TemplateRef or a Template set. type ReplicationControllerSpec struct { // Replicas is the number of desired replicas. - Replicas int `json:"replicas"` + Replicas int32 `json:"replicas"` // Selector is a label query over pods that should match the Replicas count. Selector map[string]string `json:"selector"` @@ -1378,10 +1619,10 @@ type ReplicationControllerSpec struct { // controller. type ReplicationControllerStatus struct { // Replicas is the number of actual replicas. - Replicas int `json:"replicas"` + Replicas int32 `json:"replicas"` // The number of pods that have labels matching the labels of the pod template of the replication controller. - FullyLabeledReplicas int `json:"fullyLabeledReplicas,omitempty"` + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` // ObservedGeneration is the most recent generation observed by the controller. ObservedGeneration int64 `json:"observedGeneration,omitempty"` @@ -1510,8 +1751,13 @@ type ServiceSpec struct { // This field will be ignored if the cloud-provider does not support the feature. LoadBalancerIP string `json:"loadBalancerIP,omitempty"` - // Required: Supports "ClientIP" and "None". Used to maintain session affinity. + // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"` + + // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` } type ServicePort struct { @@ -1525,7 +1771,7 @@ type ServicePort struct { Protocol Protocol `json:"protocol"` // The port that will be exposed on the service. - Port int `json:"port"` + Port int32 `json:"port"` // Optional: The target port on pods selected by this service. If this // is a string, it will be looked up as a named port in the target @@ -1537,7 +1783,7 @@ type ServicePort struct { // The port on each node on which this service is exposed. // Default is to auto-allocate a port if the ServiceType of this Service requires one. - NodePort int `json:"nodePort"` + NodePort int32 `json:"nodePort"` } // +genclient=true @@ -1624,9 +1870,13 @@ type EndpointSubset struct { // EndpointAddress is a tuple that describes single IP address. type EndpointAddress struct { // The IP of this endpoint. + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. // TODO: This should allow hostname or IP, see #4447. IP string - + // Optional: Hostname of this endpoint + // Meant to be used by DNS servers etc. + Hostname string `json:"hostname,omitempty"` // Optional: The kubernetes object related to the entry point. TargetRef *ObjectReference } @@ -1638,7 +1888,7 @@ type EndpointPort struct { Name string // The port number. - Port int + Port int32 // The IP protocol for this port. Protocol Protocol @@ -1671,8 +1921,14 @@ type NodeSpec struct { // DaemonEndpoint contains information about a single Daemon endpoint. type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercased for backwards compat (since it was falling back to var name of + 'Port'). + */ + // Port number of the given endpoint. - Port int `json:port` + Port int32 `json:"Port"` } // NodeDaemonEndpoints lists ports opened by daemons running on the Node. @@ -1699,6 +1955,10 @@ type NodeSystemInfo struct { KubeletVersion string `json:"kubeletVersion"` // KubeProxy Version reported by the node. KubeProxyVersion string `json:"kubeProxyVersion"` + // The Operating System reported by the node + OperatingSystem string `json:"operatingSystem"` + // The Architecture reported by the node + Architecture string `json:"architecture"` } // NodeStatus is information about the current status of a node. @@ -1718,7 +1978,7 @@ type NodeStatus struct { // Set of ids/uuids to uniquely identify the node. NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"` // List of container images on this node - Images []ContainerImage `json:"images",omitempty` + Images []ContainerImage `json:"images,omitempty"` } // Describe a container image @@ -1752,6 +2012,10 @@ const ( // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk // space on the node. NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" ) type NodeCondition struct { @@ -1789,6 +2053,11 @@ type NodeResources struct { // ResourceName is the name identifying various resources in a ResourceList. type ResourceName string +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. const ( // CPU, in cores. (500m = .5 cores) ResourceCPU ResourceName = "cpu" @@ -1796,6 +2065,8 @@ const ( ResourceMemory ResourceName = "memory" // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) ResourceStorage ResourceName = "storage" + // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. + ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" // Number of Pods that may be running on this Node: see ResourcePods ) @@ -1836,6 +2107,7 @@ type FinalizerName string // These are internal finalizer values to Kubernetes, must be qualified name unless defined here const ( FinalizerKubernetes FinalizerName = "kubernetes" + FinalizerOrphan string = "orphan" ) // NamespaceStatus is information about the current status of a Namespace. @@ -1887,6 +2159,12 @@ type Binding struct { Target ObjectReference `json:"target"` } +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + UID *types.UID `json:"uid,omitempty"` +} + // DeleteOptions may be provided when deleting an API object type DeleteOptions struct { unversioned.TypeMeta `json:",inline"` @@ -1894,7 +2172,15 @@ type DeleteOptions struct { // Optional duration in seconds before the object should be deleted. Value must be non-negative integer. // The value zero indicates delete immediately. If this value is nil, the default grace period for the // specified type will be used. - GracePeriodSeconds *int64 `json:"gracePeriodSeconds"` + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"` + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + Preconditions *Preconditions `json:"preconditions,omitempty"` + + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + OrphanDependents *bool `json:"orphanDependents,omitempty"` } // ExportOptions is the query options to the standard REST get call. @@ -2027,6 +2313,23 @@ type ServiceProxyOptions struct { Path string } +// OwnerReference contains enough information to let you identify an owning +// object. Currently, an owning object must be in the same namespace, so there +// is no namespace field. +type OwnerReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion"` + // Kind of the referent. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + Kind string `json:"kind"` + // Name of the referent. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + Name string `json:"name"` + // UID of the referent. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + UID types.UID `json:"uid"` +} + // ObjectReference contains enough information to let you inspect or modify the referred object. type ObjectReference struct { Kind string `json:"kind,omitempty"` @@ -2104,7 +2407,7 @@ type Event struct { LastTimestamp unversioned.Time `json:"lastTimestamp,omitempty"` // The number of times this event has occurred. - Count int `json:"count,omitempty"` + Count int32 `json:"count,omitempty"` // Type of this event (Normal, Warning), new types could be added in the future. Type string `json:"type,omitempty"` @@ -2194,6 +2497,10 @@ const ( ResourceConfigMaps ResourceName = "configmaps" // ResourcePersistentVolumeClaims, number ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" // CPU request, in cores. (500m = .5 cores) ResourceRequestsCPU ResourceName = "requests.cpu" // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) @@ -2385,7 +2692,7 @@ type ConfigMapList struct { unversioned.ListMeta `json:"metadata,omitempty"` // Items is the list of ConfigMaps. - Items []ConfigMap `json:"items,omitempty"` + Items []ConfigMap `json:"items"` } // These constants are for remote command execution and port forwarding and are @@ -2539,4 +2846,14 @@ type RangeAllocation struct { const ( // "default-scheduler" is the name of default scheduler. DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int = 1 + + // When the --failure-domains scheduler flag is not specified, + // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. + DefaultFailureDomains string = unversioned.LabelHostname + "," + unversioned.LabelZoneFailureDomain + "," + unversioned.LabelZoneRegion ) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/deep_copy_generated.go new file mode 100644 index 000000000000..c4a551d657b6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/deep_copy_generated.go @@ -0,0 +1,315 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package unversioned + +import ( + "time" + + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func DeepCopy_unversioned_APIGroup(in APIGroup, out *APIGroup, c *conversion.Cloner) error { + if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.Name = in.Name + if in.Versions != nil { + in, out := in.Versions, &out.Versions + *out = make([]GroupVersionForDiscovery, len(in)) + for i := range in { + if err := DeepCopy_unversioned_GroupVersionForDiscovery(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Versions = nil + } + if err := DeepCopy_unversioned_GroupVersionForDiscovery(in.PreferredVersion, &out.PreferredVersion, c); err != nil { + return err + } + if in.ServerAddressByClientCIDRs != nil { + in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(in)) + for i := range in { + if err := DeepCopy_unversioned_ServerAddressByClientCIDR(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.ServerAddressByClientCIDRs = nil + } + return nil +} + +func DeepCopy_unversioned_APIGroupList(in APIGroupList, out *APIGroupList, c *conversion.Cloner) error { + if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if in.Groups != nil { + in, out := in.Groups, &out.Groups + *out = make([]APIGroup, len(in)) + for i := range in { + if err := DeepCopy_unversioned_APIGroup(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Groups = nil + } + return nil +} + +func DeepCopy_unversioned_APIResource(in APIResource, out *APIResource, c *conversion.Cloner) error { + out.Name = in.Name + out.Namespaced = in.Namespaced + out.Kind = in.Kind + return nil +} + +func DeepCopy_unversioned_APIResourceList(in APIResourceList, out *APIResourceList, c *conversion.Cloner) error { + if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.GroupVersion = in.GroupVersion + if in.APIResources != nil { + in, out := in.APIResources, &out.APIResources + *out = make([]APIResource, len(in)) + for i := range in { + if err := DeepCopy_unversioned_APIResource(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.APIResources = nil + } + return nil +} + +func DeepCopy_unversioned_APIVersions(in APIVersions, out *APIVersions, c *conversion.Cloner) error { + if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if in.Versions != nil { + in, out := in.Versions, &out.Versions + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Versions = nil + } + if in.ServerAddressByClientCIDRs != nil { + in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(in)) + for i := range in { + if err := DeepCopy_unversioned_ServerAddressByClientCIDR(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.ServerAddressByClientCIDRs = nil + } + return nil +} + +func DeepCopy_unversioned_Duration(in Duration, out *Duration, c *conversion.Cloner) error { + out.Duration = in.Duration + return nil +} + +func DeepCopy_unversioned_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error { + if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.Export = in.Export + out.Exact = in.Exact + return nil +} + +func DeepCopy_unversioned_GroupKind(in GroupKind, out *GroupKind, c *conversion.Cloner) error { + out.Group = in.Group + out.Kind = in.Kind + return nil +} + +func DeepCopy_unversioned_GroupResource(in GroupResource, out *GroupResource, c *conversion.Cloner) error { + out.Group = in.Group + out.Resource = in.Resource + return nil +} + +func DeepCopy_unversioned_GroupVersion(in GroupVersion, out *GroupVersion, c *conversion.Cloner) error { + out.Group = in.Group + out.Version = in.Version + return nil +} + +func DeepCopy_unversioned_GroupVersionForDiscovery(in GroupVersionForDiscovery, out *GroupVersionForDiscovery, c *conversion.Cloner) error { + out.GroupVersion = in.GroupVersion + out.Version = in.Version + return nil +} + +func DeepCopy_unversioned_GroupVersionKind(in GroupVersionKind, out *GroupVersionKind, c *conversion.Cloner) error { + out.Group = in.Group + out.Version = in.Version + out.Kind = in.Kind + return nil +} + +func DeepCopy_unversioned_GroupVersionResource(in GroupVersionResource, out *GroupVersionResource, c *conversion.Cloner) error { + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + return nil +} + +func DeepCopy_unversioned_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error { + if in.MatchLabels != nil { + in, out := in.MatchLabels, &out.MatchLabels + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val + } + } else { + out.MatchLabels = nil + } + if in.MatchExpressions != nil { + in, out := in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(in)) + for i := range in { + if err := DeepCopy_unversioned_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.MatchExpressions = nil + } + return nil +} + +func DeepCopy_unversioned_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error { + out.Key = in.Key + out.Operator = in.Operator + if in.Values != nil { + in, out := in.Values, &out.Values + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Values = nil + } + return nil +} + +func DeepCopy_unversioned_ListMeta(in ListMeta, out *ListMeta, c *conversion.Cloner) error { + out.SelfLink = in.SelfLink + out.ResourceVersion = in.ResourceVersion + return nil +} + +func DeepCopy_unversioned_Patch(in Patch, out *Patch, c *conversion.Cloner) error { + return nil +} + +func DeepCopy_unversioned_RootPaths(in RootPaths, out *RootPaths, c *conversion.Cloner) error { + if in.Paths != nil { + in, out := in.Paths, &out.Paths + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Paths = nil + } + return nil +} + +func DeepCopy_unversioned_ServerAddressByClientCIDR(in ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, c *conversion.Cloner) error { + out.ClientCIDR = in.ClientCIDR + out.ServerAddress = in.ServerAddress + return nil +} + +func DeepCopy_unversioned_Status(in Status, out *Status, c *conversion.Cloner) error { + if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + out.Status = in.Status + out.Message = in.Message + out.Reason = in.Reason + if in.Details != nil { + in, out := in.Details, &out.Details + *out = new(StatusDetails) + if err := DeepCopy_unversioned_StatusDetails(*in, *out, c); err != nil { + return err + } + } else { + out.Details = nil + } + out.Code = in.Code + return nil +} + +func DeepCopy_unversioned_StatusCause(in StatusCause, out *StatusCause, c *conversion.Cloner) error { + out.Type = in.Type + out.Message = in.Message + out.Field = in.Field + return nil +} + +func DeepCopy_unversioned_StatusDetails(in StatusDetails, out *StatusDetails, c *conversion.Cloner) error { + out.Name = in.Name + out.Group = in.Group + out.Kind = in.Kind + if in.Causes != nil { + in, out := in.Causes, &out.Causes + *out = make([]StatusCause, len(in)) + for i := range in { + if err := DeepCopy_unversioned_StatusCause(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Causes = nil + } + out.RetryAfterSeconds = in.RetryAfterSeconds + return nil +} + +func DeepCopy_unversioned_Time(in Time, out *Time, c *conversion.Cloner) error { + if newVal, err := c.DeepCopy(in.Time); err != nil { + return err + } else { + out.Time = newVal.(time.Time) + } + return nil +} + +func DeepCopy_unversioned_Timestamp(in Timestamp, out *Timestamp, c *conversion.Cloner) error { + out.Seconds = in.Seconds + out.Nanos = in.Nanos + return nil +} + +func DeepCopy_unversioned_TypeMeta(in TypeMeta, out *TypeMeta, c *conversion.Cloner) error { + out.Kind = in.Kind + out.APIVersion = in.APIVersion + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/duration.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/duration.go index 6ff634ee525e..cdaf257300b4 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/duration.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/duration.go @@ -25,7 +25,7 @@ import ( // marshaling to YAML and JSON. In particular, it marshals into strings, which // can be used as map keys in json. type Duration struct { - time.Duration + time.Duration `protobuf:"varint,1,opt,name=duration,casttype=time.Duration"` } // UnmarshalJSON implements the json.Unmarshaller interface. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/duration_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/duration_test.go new file mode 100644 index 000000000000..6650ca9aa160 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/duration_test.go @@ -0,0 +1,153 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "encoding/json" + "testing" + "time" + + "github.com/ghodss/yaml" +) + +type DurationHolder struct { + D Duration `json:"d"` +} + +func TestDurationMarshalYAML(t *testing.T) { + cases := []struct { + input Duration + result string + }{ + {Duration{5 * time.Second}, "d: 5s\n"}, + {Duration{2 * time.Minute}, "d: 2m0s\n"}, + {Duration{time.Hour + 3*time.Millisecond}, "d: 1h0m0.003s\n"}, + } + + for _, c := range cases { + input := DurationHolder{c.input} + result, err := yaml.Marshal(&input) + if err != nil { + t.Errorf("Failed to marshal input: %q: %v", input, err) + } + if string(result) != c.result { + t.Errorf("Failed to marshal input: %q: expected %q, got %q", input, c.result, string(result)) + } + } +} + +func TestDurationUnmarshalYAML(t *testing.T) { + cases := []struct { + input string + result Duration + }{ + {"d: 0s\n", Duration{}}, + {"d: 5s\n", Duration{5 * time.Second}}, + {"d: 2m0s\n", Duration{2 * time.Minute}}, + {"d: 1h0m0.003s\n", Duration{time.Hour + 3*time.Millisecond}}, + + // Units with zero values can optionally be dropped + {"d: 2m\n", Duration{2 * time.Minute}}, + {"d: 1h0.003s\n", Duration{time.Hour + 3*time.Millisecond}}, + } + + for _, c := range cases { + var result DurationHolder + if err := yaml.Unmarshal([]byte(c.input), &result); err != nil { + t.Errorf("Failed to unmarshal input %q: %v", c.input, err) + } + if result.D != c.result { + t.Errorf("Failed to unmarshal input %q: expected %q, got %q", c.input, c.result, result) + } + } +} + +func TestDurationMarshalJSON(t *testing.T) { + cases := []struct { + input Duration + result string + }{ + {Duration{5 * time.Second}, `{"d":"5s"}`}, + {Duration{2 * time.Minute}, `{"d":"2m0s"}`}, + {Duration{time.Hour + 3*time.Millisecond}, `{"d":"1h0m0.003s"}`}, + } + + for _, c := range cases { + input := DurationHolder{c.input} + result, err := json.Marshal(&input) + if err != nil { + t.Errorf("Failed to marshal input: %q: %v", input, err) + } + if string(result) != c.result { + t.Errorf("Failed to marshal input: %q: expected %q, got %q", input, c.result, string(result)) + } + } +} + +func TestDurationUnmarshalJSON(t *testing.T) { + cases := []struct { + input string + result Duration + }{ + {`{"d":"0s"}`, Duration{}}, + {`{"d":"5s"}`, Duration{5 * time.Second}}, + {`{"d":"2m0s"}`, Duration{2 * time.Minute}}, + {`{"d":"1h0m0.003s"}`, Duration{time.Hour + 3*time.Millisecond}}, + + // Units with zero values can optionally be dropped + {`{"d":"2m"}`, Duration{2 * time.Minute}}, + {`{"d":"1h0.003s"}`, Duration{time.Hour + 3*time.Millisecond}}, + } + + for _, c := range cases { + var result DurationHolder + if err := json.Unmarshal([]byte(c.input), &result); err != nil { + t.Errorf("Failed to unmarshal input %q: %v", c.input, err) + } + if result.D != c.result { + t.Errorf("Failed to unmarshal input %q: expected %q, got %q", c.input, c.result, result) + } + } +} + +func TestDurationMarshalJSONUnmarshalYAML(t *testing.T) { + cases := []struct { + input Duration + }{ + {Duration{}}, + {Duration{5 * time.Second}}, + {Duration{2 * time.Minute}}, + {Duration{time.Hour + 3*time.Millisecond}}, + } + + for i, c := range cases { + input := DurationHolder{c.input} + jsonMarshalled, err := json.Marshal(&input) + if err != nil { + t.Errorf("%d-1: Failed to marshal input: '%v': %v", i, input, err) + } + + var result DurationHolder + if err := yaml.Unmarshal(jsonMarshalled, &result); err != nil { + t.Errorf("%d-2: Failed to unmarshal '%+v': %v", i, string(jsonMarshalled), err) + } + + if input.D != result.D { + t.Errorf("%d-4: Failed to marshal input '%#v': got %#v", i, input, result) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/generated.pb.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/generated.pb.go new file mode 100644 index 000000000000..cb9803552ed7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/generated.pb.go @@ -0,0 +1,4212 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/api/unversioned/generated.proto +// DO NOT EDIT! + +/* + Package unversioned is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/api/unversioned/generated.proto + + It has these top-level messages: + APIGroup + APIGroupList + APIResource + APIResourceList + APIVersions + Duration + ExportOptions + GroupKind + GroupResource + GroupVersion + GroupVersionForDiscovery + GroupVersionKind + GroupVersionResource + LabelSelector + LabelSelectorRequirement + ListMeta + RootPaths + ServerAddressByClientCIDR + Status + StatusCause + StatusDetails + Time + Timestamp + TypeMeta +*/ +package unversioned + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import time "time" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *APIGroup) Reset() { *m = APIGroup{} } +func (m *APIGroup) String() string { return proto.CompactTextString(m) } +func (*APIGroup) ProtoMessage() {} + +func (m *APIGroupList) Reset() { *m = APIGroupList{} } +func (m *APIGroupList) String() string { return proto.CompactTextString(m) } +func (*APIGroupList) ProtoMessage() {} + +func (m *APIResource) Reset() { *m = APIResource{} } +func (m *APIResource) String() string { return proto.CompactTextString(m) } +func (*APIResource) ProtoMessage() {} + +func (m *APIResourceList) Reset() { *m = APIResourceList{} } +func (m *APIResourceList) String() string { return proto.CompactTextString(m) } +func (*APIResourceList) ProtoMessage() {} + +func (m *APIVersions) Reset() { *m = APIVersions{} } +func (*APIVersions) ProtoMessage() {} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} + +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (m *ExportOptions) String() string { return proto.CompactTextString(m) } +func (*ExportOptions) ProtoMessage() {} + +func (m *GroupKind) Reset() { *m = GroupKind{} } +func (*GroupKind) ProtoMessage() {} + +func (m *GroupResource) Reset() { *m = GroupResource{} } +func (*GroupResource) ProtoMessage() {} + +func (m *GroupVersion) Reset() { *m = GroupVersion{} } +func (*GroupVersion) ProtoMessage() {} + +func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } +func (m *GroupVersionForDiscovery) String() string { return proto.CompactTextString(m) } +func (*GroupVersionForDiscovery) ProtoMessage() {} + +func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } +func (*GroupVersionKind) ProtoMessage() {} + +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} + +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (m *LabelSelector) String() string { return proto.CompactTextString(m) } +func (*LabelSelector) ProtoMessage() {} + +func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } +func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) } +func (*LabelSelectorRequirement) ProtoMessage() {} + +func (m *ListMeta) Reset() { *m = ListMeta{} } +func (m *ListMeta) String() string { return proto.CompactTextString(m) } +func (*ListMeta) ProtoMessage() {} + +func (m *RootPaths) Reset() { *m = RootPaths{} } +func (m *RootPaths) String() string { return proto.CompactTextString(m) } +func (*RootPaths) ProtoMessage() {} + +func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } +func (m *ServerAddressByClientCIDR) String() string { return proto.CompactTextString(m) } +func (*ServerAddressByClientCIDR) ProtoMessage() {} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} + +func (m *StatusCause) Reset() { *m = StatusCause{} } +func (m *StatusCause) String() string { return proto.CompactTextString(m) } +func (*StatusCause) ProtoMessage() {} + +func (m *StatusDetails) Reset() { *m = StatusDetails{} } +func (m *StatusDetails) String() string { return proto.CompactTextString(m) } +func (*StatusDetails) ProtoMessage() {} + +func (m *Time) Reset() { *m = Time{} } +func (m *Time) String() string { return proto.CompactTextString(m) } +func (*Time) ProtoMessage() {} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (m *TypeMeta) String() string { return proto.CompactTextString(m) } +func (*TypeMeta) ProtoMessage() {} + +func init() { + proto.RegisterType((*APIGroup)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIGroup") + proto.RegisterType((*APIGroupList)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIGroupList") + proto.RegisterType((*APIResource)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIResource") + proto.RegisterType((*APIResourceList)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIResourceList") + proto.RegisterType((*APIVersions)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIVersions") + proto.RegisterType((*Duration)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Duration") + proto.RegisterType((*ExportOptions)(nil), "k8s.io.kubernetes.pkg.api.unversioned.ExportOptions") + proto.RegisterType((*GroupKind)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupKind") + proto.RegisterType((*GroupResource)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupResource") + proto.RegisterType((*GroupVersion)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersion") + proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersionForDiscovery") + proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersionKind") + proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersionResource") + proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.api.unversioned.LabelSelector") + proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.api.unversioned.LabelSelectorRequirement") + proto.RegisterType((*ListMeta)(nil), "k8s.io.kubernetes.pkg.api.unversioned.ListMeta") + proto.RegisterType((*RootPaths)(nil), "k8s.io.kubernetes.pkg.api.unversioned.RootPaths") + proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.kubernetes.pkg.api.unversioned.ServerAddressByClientCIDR") + proto.RegisterType((*Status)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Status") + proto.RegisterType((*StatusCause)(nil), "k8s.io.kubernetes.pkg.api.unversioned.StatusCause") + proto.RegisterType((*StatusDetails)(nil), "k8s.io.kubernetes.pkg.api.unversioned.StatusDetails") + proto.RegisterType((*Time)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Time") + proto.RegisterType((*Timestamp)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Timestamp") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.kubernetes.pkg.api.unversioned.TypeMeta") +} +func (m *APIGroup) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIGroup) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if len(m.Versions) > 0 { + for _, msg := range m.Versions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.PreferredVersion.Size())) + n1, err := m.PreferredVersion.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIGroupList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIGroupList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for _, msg := range m.Groups { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIResource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIResource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x10 + i++ + if m.Namespaced { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + return i, nil +} + +func (m *APIResourceList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIResourceList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion))) + i += copy(data[i:], m.GroupVersion) + if len(m.APIResources) > 0 { + for _, msg := range m.APIResources { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIVersions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIVersions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Versions) > 0 { + for _, s := range m.Versions { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Duration) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Duration) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Duration)) + return i, nil +} + +func (m *ExportOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ExportOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Export { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + if m.Exact { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *GroupKind) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupKind) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + return i, nil +} + +func (m *GroupResource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupResource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + return i, nil +} + +func (m *GroupVersion) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupVersion) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + return i, nil +} + +func (m *GroupVersionForDiscovery) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupVersionForDiscovery) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion))) + i += copy(data[i:], m.GroupVersion) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + return i, nil +} + +func (m *GroupVersionKind) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupVersionKind) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + return i, nil +} + +func (m *GroupVersionResource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupVersionResource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + return i, nil +} + +func (m *LabelSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k := range m.MatchLabels { + data[i] = 0xa + i++ + v := m.MatchLabels[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ListMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ListMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) + i += copy(data[i:], m.SelfLink) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + return i, nil +} + +func (m *RootPaths) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RootPaths) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ServerAddressByClientCIDR) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServerAddressByClientCIDR) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClientCIDR))) + i += copy(data[i:], m.ClientCIDR) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServerAddress))) + i += copy(data[i:], m.ServerAddress) + return i, nil +} + +func (m *Status) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Status) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + if m.Details != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Details.Size())) + n3, err := m.Details.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + } + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Code)) + return i, nil +} + +func (m *StatusCause) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatusCause) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Field))) + i += copy(data[i:], m.Field) + return i, nil +} + +func (m *StatusDetails) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatusDetails) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + if len(m.Causes) > 0 { + for _, msg := range m.Causes { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RetryAfterSeconds)) + return i, nil +} + +func (m *Timestamp) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Timestamp) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Seconds)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Nanos)) + return i, nil +} + +func (m *TypeMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TypeMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *APIGroup) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.PreferredVersion.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIGroupList) Size() (n int) { + var l int + _ = l + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIResource) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *APIResourceList) Size() (n int) { + var l int + _ = l + l = len(m.GroupVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.APIResources) > 0 { + for _, e := range m.APIResources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIVersions) Size() (n int) { + var l int + _ = l + if len(m.Versions) > 0 { + for _, s := range m.Versions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Duration) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Duration)) + return n +} + +func (m *ExportOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *GroupKind) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupResource) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersion) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionForDiscovery) Size() (n int) { + var l int + _ = l + l = len(m.GroupVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionKind) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionResource) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LabelSelector) Size() (n int) { + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k, v := range m.MatchLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ListMeta) Size() (n int) { + var l int + _ = l + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RootPaths) Size() (n int) { + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServerAddressByClientCIDR) Size() (n int) { + var l int + _ = l + l = len(m.ClientCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServerAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Status) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + if m.Details != nil { + l = m.Details.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Code)) + return n +} + +func (m *StatusCause) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Field) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatusDetails) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Causes) > 0 { + for _, e := range m.Causes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.RetryAfterSeconds)) + return n +} + +func (m *Timestamp) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Seconds)) + n += 1 + sovGenerated(uint64(m.Nanos)) + return n +} + +func (m *TypeMeta) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *APIGroup) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, GroupVersionForDiscovery{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PreferredVersion.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIGroupList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, APIGroup{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaced", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Namespaced = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResourceList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResourceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResourceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIResources = append(m.APIResources, APIResource{}) + if err := m.APIResources[len(m.APIResources)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIVersions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIVersions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIVersions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Duration) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Duration |= (time.Duration(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Export = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Exact = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupKind) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupResource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersion) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionForDiscovery) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionKind) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionResource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.MatchLabels == nil { + m.MatchLabels = make(map[string]string) + } + m.MatchLabels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootPaths) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootPaths: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootPaths: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientCIDR = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddress = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Status) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = StatusReason(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Details == nil { + m.Details = &StatusDetails{} + } + if err := m.Details.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Code |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusCause) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = CauseType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Field = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusDetails) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Causes = append(m.Causes, StatusCause{}) + if err := m.Causes[len(m.Causes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryAfterSeconds", wireType) + } + m.RetryAfterSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Timestamp) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Seconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Nanos |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/generated.proto b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/generated.proto new file mode 100644 index 000000000000..def4a6d6f65e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/generated.proto @@ -0,0 +1,377 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.api.unversioned; + +import "k8s.io/kubernetes/pkg/runtime/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "unversioned"; + +// APIGroup contains the name, the supported versions, and the preferred version +// of a group. +message APIGroup { + // name is the name of the group. + optional string name = 1; + + // versions are the versions supported in this group. + repeated GroupVersionForDiscovery versions = 2; + + // preferredVersion is the version preferred by the API server, which + // probably is the storage version. + optional GroupVersionForDiscovery preferredVersion = 3; + + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; +} + +// APIGroupList is a list of APIGroup, to allow clients to discover the API at +// /apis. +message APIGroupList { + // groups is a list of APIGroup. + repeated APIGroup groups = 1; +} + +// APIResource specifies the name of a resource and whether it is namespaced. +message APIResource { + // name is the name of the resource. + optional string name = 1; + + // namespaced indicates if a resource is namespaced or not. + optional bool namespaced = 2; + + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') + optional string kind = 3; +} + +// APIResourceList is a list of APIResource, it is used to expose the name of the +// resources supported in a specific group and version, and if the resource +// is namespaced. +message APIResourceList { + // groupVersion is the group and version this APIResourceList is for. + optional string groupVersion = 1; + + // resources contains the name of the resources and if they are namespaced. + repeated APIResource resources = 2; +} + +// APIVersions lists the versions that are available, to allow clients to +// discover the API at /api, which is the root path of the legacy v1 API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message APIVersions { + // versions are the api versions that are available. + repeated string versions = 1; + + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2; +} + +// Duration is a wrapper around time.Duration which supports correct +// marshaling to YAML and JSON. In particular, it marshals into strings, which +// can be used as map keys in json. +message Duration { + optional int64 duration = 1; +} + +// ExportOptions is the query options to the standard REST get call. +message ExportOptions { + // Should this value be exported. Export strips fields that a user can not specify.` + optional bool export = 1; + + // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' + optional bool exact = 2; +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupKind { + optional string group = 1; + + optional string kind = 2; +} + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupResource { + optional string group = 1; + + optional string resource = 2; +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersion { + optional string group = 1; + + optional string version = 2; +} + +// GroupVersion contains the "group/version" and "version" string of a version. +// It is made a struct to keep extensiblity. +message GroupVersionForDiscovery { + // groupVersion specifies the API group and version in the form "group/version" + optional string groupVersion = 1; + + // version specifies the version in the form of "version". This is to save + // the clients the trouble of splitting the GroupVersion. + optional string version = 2; +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersionKind { + optional string group = 1; + + optional string version = 2; + + optional string kind = 3; +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersionResource { + optional string group = 1; + + optional string version = 2; + + optional string resource = 3; +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +message LabelSelector { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + map matchLabels = 1; + + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + repeated LabelSelectorRequirement matchExpressions = 2; +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message LabelSelectorRequirement { + // key is the label key that the selector applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + optional string operator = 2; + + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + repeated string values = 3; +} + +// ListMeta describes metadata that synthetic resources must have, including lists and +// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. +message ListMeta { + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + optional string selfLink = 1; + + // String that identifies the server's internal version of this object that + // can be used by clients to determine when objects have changed. + // Value must be treated as opaque by clients and passed unmodified back to the server. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + optional string resourceVersion = 2; +} + +// RootPaths lists the paths available at root. +// For example: "/healthz", "/apis". +message RootPaths { + // paths are the paths available at root. + repeated string paths = 1; +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +message ServerAddressByClientCIDR { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + optional string clientCIDR = 1; + + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + optional string serverAddress = 2; +} + +// Status is a return value for calls that don't return other objects. +message Status { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional ListMeta metadata = 1; + + // Status of the operation. + // One of: "Success" or "Failure". + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional string status = 2; + + // A human-readable description of the status of this operation. + optional string message = 3; + + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. A Reason clarifies an HTTP status + // code but does not override it. + optional string reason = 4; + + // Extended data associated with the reason. Each reason may define its + // own extended details. This field is optional and the data returned + // is not guaranteed to conform to any schema except that defined by + // the reason type. + optional StatusDetails details = 5; + + // Suggested HTTP return code for this status, 0 if not set. + optional int32 code = 6; +} + +// StatusCause provides more information about an api.Status failure, including +// cases when multiple errors are encountered. +message StatusCause { + // A machine-readable description of the cause of the error. If this value is + // empty there is no information available. + optional string reason = 1; + + // A human-readable description of the cause of the error. This field may be + // presented as-is to a reader. + optional string message = 2; + + // The field of the resource that has caused this error, as named by its JSON + // serialization. May include dot and postfix notation for nested attributes. + // Arrays are zero-indexed. Fields may appear more than once in an array of + // causes due to fields having multiple errors. + // Optional. + // + // Examples: + // "name" - the field "name" on the current resource + // "items[0].name" - the field "name" on the first array entry in "items" + optional string field = 3; +} + +// StatusDetails is a set of additional properties that MAY be set by the +// server to provide additional information about a response. The Reason +// field of a Status object defines what attributes will be set. Clients +// must ignore fields that do not match the defined type of each attribute, +// and should assume that any attribute may be empty, invalid, or under +// defined. +message StatusDetails { + // The name attribute of the resource associated with the status StatusReason + // (when there is a single name which can be described). + optional string name = 1; + + // The group attribute of the resource associated with the status StatusReason. + optional string group = 2; + + // The kind attribute of the resource associated with the status StatusReason. + // On some operations may differ from the requested resource Kind. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional string kind = 3; + + // The Causes array includes more details associated with the StatusReason + // failure. Not all StatusReasons may provide detailed causes. + repeated StatusCause causes = 4; + + // If specified, the time in seconds before the operation should be retried. + optional int32 retryAfterSeconds = 5; +} + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +message Time { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// Timestamp is a struct that is equivalent to Time, but intended for +// protobuf marshalling/unmarshalling. It is generated into a serialization +// that matches Time. Do not use in Go structs. +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +message TypeMeta { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional string kind = 1; + + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources + optional string apiVersion = 2; +} + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/group_version.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/group_version.go index 5d350432c3ba..167002c3fc71 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/group_version.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/group_version.go @@ -29,8 +29,8 @@ import ( // `resource.group.com` -> `group=com, version=group, resource=resource` and `group=group.com, resource=resource` func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { var gvr *GroupVersionResource - s := strings.SplitN(arg, ".", 3) - if len(s) == 3 { + if strings.Count(arg, ".") >= 2 { + s := strings.SplitN(arg, ".", 3) gvr = &GroupVersionResource{Group: s[2], Version: s[1], Resource: s[0]} } @@ -42,8 +42,8 @@ func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupResource struct { - Group string - Resource string + Group string `protobuf:"bytes,1,opt,name=group"` + Resource string `protobuf:"bytes,2,opt,name=resource"` } func (gr GroupResource) WithVersion(version string) GroupVersionResource { @@ -64,12 +64,11 @@ func (gr *GroupResource) String() string { // ParseGroupResource turns "resource.group" string into a GroupResource struct. Empty strings are allowed // for each field. func ParseGroupResource(gr string) GroupResource { - s := strings.SplitN(gr, ".", 2) - if len(s) == 1 { - return GroupResource{Resource: s[0]} + if i := strings.Index(gr, "."); i == -1 { + return GroupResource{Resource: gr} + } else { + return GroupResource{Group: gr[i+1:], Resource: gr[:i]} } - - return GroupResource{Group: s[1], Resource: s[0]} } // GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion @@ -77,9 +76,9 @@ func ParseGroupResource(gr string) GroupResource { // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupVersionResource struct { - Group string - Version string - Resource string + Group string `protobuf:"bytes,1,opt,name=group"` + Version string `protobuf:"bytes,2,opt,name=version"` + Resource string `protobuf:"bytes,3,opt,name=resource"` } func (gvr GroupVersionResource) IsEmpty() bool { @@ -103,8 +102,8 @@ func (gvr *GroupVersionResource) String() string { // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupKind struct { - Group string - Kind string + Group string `protobuf:"bytes,1,opt,name=group"` + Kind string `protobuf:"bytes,2,opt,name=kind"` } func (gk GroupKind) IsEmpty() bool { @@ -127,9 +126,9 @@ func (gk *GroupKind) String() string { // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupVersionKind struct { - Group string - Version string - Kind string + Group string `protobuf:"bytes,1,opt,name=group"` + Version string `protobuf:"bytes,2,opt,name=version"` + Kind string `protobuf:"bytes,3,opt,name=kind"` } // IsEmpty returns true if group, version, and kind are empty @@ -153,8 +152,8 @@ func (gvk GroupVersionKind) String() string { // // +protobuf.options.(gogoproto.goproto_stringer)=false type GroupVersion struct { - Group string - Version string + Group string `protobuf:"bytes,1,opt,name=group"` + Version string `protobuf:"bytes,2,opt,name=version"` } // IsEmpty returns true if group and version are empty @@ -189,18 +188,14 @@ func ParseGroupVersion(gv string) (GroupVersion, error) { return GroupVersion{}, nil } - s := strings.Split(gv, "/") - // "v1" is the only special case. Otherwise GroupVersion is expected to contain - // one "/" dividing the string into two parts. - switch { - case len(s) == 1 && gv == "v1": - return GroupVersion{"", "v1"}, nil - case len(s) == 1: - return GroupVersion{"", s[0]}, nil - case len(s) == 2: - return GroupVersion{s[0], s[1]}, nil + switch strings.Count(gv, "/") { + case 0: + return GroupVersion{"", gv}, nil + case 1: + i := strings.Index(gv, "/") + return GroupVersion{gv[:i], gv[i+1:]}, nil default: - return GroupVersion{}, fmt.Errorf("Unexpected GroupVersion string: %v", gv) + return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv) } } @@ -259,11 +254,11 @@ func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) { // do not use TypeMeta. This method exists to support test types and legacy serializations // that have a distinct group and kind. // TODO: further reduce usage of this method. -func FromAPIVersionAndKind(apiVersion, kind string) *GroupVersionKind { +func FromAPIVersionAndKind(apiVersion, kind string) GroupVersionKind { if gv, err := ParseGroupVersion(apiVersion); err == nil { - return &GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} + return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} } - return &GroupVersionKind{Kind: kind} + return GroupVersionKind{Kind: kind} } // All objects that are serialized from a Scheme encode their type information. This interface is used @@ -273,10 +268,10 @@ func FromAPIVersionAndKind(apiVersion, kind string) *GroupVersionKind { type ObjectKind interface { // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil // should clear the current setting. - SetGroupVersionKind(kind *GroupVersionKind) + SetGroupVersionKind(kind GroupVersionKind) // GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does // not expose or provide these fields. - GroupVersionKind() *GroupVersionKind + GroupVersionKind() GroupVersionKind } // EmptyObjectKind implements the ObjectKind interface as a noop @@ -286,7 +281,7 @@ var EmptyObjectKind = emptyObjectKind{} type emptyObjectKind struct{} // SetGroupVersionKind implements the ObjectKind interface -func (emptyObjectKind) SetGroupVersionKind(gvk *GroupVersionKind) {} +func (emptyObjectKind) SetGroupVersionKind(gvk GroupVersionKind) {} // GroupVersionKind implements the ObjectKind interface -func (emptyObjectKind) GroupVersionKind() *GroupVersionKind { return nil } +func (emptyObjectKind) GroupVersionKind() GroupVersionKind { return GroupVersionKind{} } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/group_version_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/group_version_test.go new file mode 100644 index 000000000000..d2934fdf9bc5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/group_version_test.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/ugorji/go/codec" +) + +func TestGroupVersionParse(t *testing.T) { + tests := []struct { + input string + out GroupVersion + err func(error) bool + }{ + {input: "v1", out: GroupVersion{Version: "v1"}}, + {input: "v2", out: GroupVersion{Version: "v2"}}, + {input: "/v1", out: GroupVersion{Version: "v1"}}, + {input: "v1/", out: GroupVersion{Group: "v1"}}, + {input: "/v1/", err: func(err error) bool { return err.Error() == "unexpected GroupVersion string: /v1/" }}, + {input: "v1/a", out: GroupVersion{Group: "v1", Version: "a"}}, + } + for i, test := range tests { + out, err := ParseGroupVersion(test.input) + if test.err == nil && err != nil || err == nil && test.err != nil { + t.Errorf("%d: unexpected error: %v", i, err) + continue + } + if test.err != nil && !test.err(err) { + t.Errorf("%d: unexpected error: %v", i, err) + continue + } + if out != test.out { + t.Errorf("%d: unexpected output: %#v", i, out) + } + } +} + +func TestGroupResourceParse(t *testing.T) { + tests := []struct { + input string + out GroupResource + }{ + {input: "v1", out: GroupResource{Resource: "v1"}}, + {input: ".v1", out: GroupResource{Group: "v1"}}, + {input: "v1.", out: GroupResource{Resource: "v1"}}, + {input: "v1.a", out: GroupResource{Group: "a", Resource: "v1"}}, + {input: "b.v1.a", out: GroupResource{Group: "v1.a", Resource: "b"}}, + } + for i, test := range tests { + out := ParseGroupResource(test.input) + if out != test.out { + t.Errorf("%d: unexpected output: %#v", i, out) + } + } +} + +func TestParseResourceArg(t *testing.T) { + tests := []struct { + input string + gvr *GroupVersionResource + gr GroupResource + }{ + {input: "v1", gr: GroupResource{Resource: "v1"}}, + {input: ".v1", gr: GroupResource{Group: "v1"}}, + {input: "v1.", gr: GroupResource{Resource: "v1"}}, + {input: "v1.a", gr: GroupResource{Group: "a", Resource: "v1"}}, + {input: "b.v1.a", gvr: &GroupVersionResource{Group: "a", Version: "v1", Resource: "b"}, gr: GroupResource{Group: "v1.a", Resource: "b"}}, + } + for i, test := range tests { + gvr, gr := ParseResourceArg(test.input) + if (gvr != nil && test.gvr == nil) || (gvr == nil && test.gvr != nil) || (test.gvr != nil && *gvr != *test.gvr) { + t.Errorf("%d: unexpected output: %#v", i, gvr) + } + if gr != test.gr { + t.Errorf("%d: unexpected output: %#v", i, gr) + } + } +} + +type GroupVersionHolder struct { + GV GroupVersion `json:"val"` +} + +func TestGroupVersionUnmarshalJSON(t *testing.T) { + cases := []struct { + input []byte + expect GroupVersion + }{ + {[]byte(`{"val": "v1"}`), GroupVersion{"", "v1"}}, + {[]byte(`{"val": "extensions/v1beta1"}`), GroupVersion{"extensions", "v1beta1"}}, + } + + for _, c := range cases { + var result GroupVersionHolder + // test golang lib's JSON codec + if err := json.Unmarshal([]byte(c.input), &result); err != nil { + t.Errorf("JSON codec failed to unmarshal input '%v': %v", c.input, err) + } + if !reflect.DeepEqual(result.GV, c.expect) { + t.Errorf("JSON codec failed to unmarshal input '%s': expected %+v, got %+v", c.input, c.expect, result.GV) + } + // test the Ugorji codec + if err := codec.NewDecoderBytes(c.input, new(codec.JsonHandle)).Decode(&result); err != nil { + t.Errorf("Ugorji codec failed to unmarshal input '%v': %v", c.input, err) + } + if !reflect.DeepEqual(result.GV, c.expect) { + t.Errorf("Ugorji codec failed to unmarshal input '%s': expected %+v, got %+v", c.input, c.expect, result.GV) + } + } +} + +func TestGroupVersionMarshalJSON(t *testing.T) { + cases := []struct { + input GroupVersion + expect []byte + }{ + {GroupVersion{"", "v1"}, []byte(`{"val":"v1"}`)}, + {GroupVersion{"extensions", "v1beta1"}, []byte(`{"val":"extensions/v1beta1"}`)}, + } + + for _, c := range cases { + input := GroupVersionHolder{c.input} + result, err := json.Marshal(&input) + if err != nil { + t.Errorf("Failed to marshal input '%v': %v", input, err) + } + if !reflect.DeepEqual(result, c.expect) { + t.Errorf("Failed to marshal input '%+v': expected: %s, got: %s", input, c.expect, result) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/helpers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/helpers_test.go new file mode 100644 index 000000000000..f803d43665fb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/helpers_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/labels" +) + +func TestLabelSelectorAsSelector(t *testing.T) { + matchLabels := map[string]string{"foo": "bar"} + matchExpressions := []LabelSelectorRequirement{{ + Key: "baz", + Operator: LabelSelectorOpIn, + Values: []string{"qux", "norf"}, + }} + mustParse := func(s string) labels.Selector { + out, e := labels.Parse(s) + if e != nil { + panic(e) + } + return out + } + tc := []struct { + in *LabelSelector + out labels.Selector + expectErr bool + }{ + {in: nil, out: labels.Nothing()}, + {in: &LabelSelector{}, out: labels.Everything()}, + { + in: &LabelSelector{MatchLabels: matchLabels}, + out: mustParse("foo=bar"), + }, + { + in: &LabelSelector{MatchExpressions: matchExpressions}, + out: mustParse("baz in (norf,qux)"), + }, + { + in: &LabelSelector{MatchLabels: matchLabels, MatchExpressions: matchExpressions}, + out: mustParse("baz in (norf,qux),foo=bar"), + }, + { + in: &LabelSelector{ + MatchExpressions: []LabelSelectorRequirement{{ + Key: "baz", + Operator: LabelSelectorOpExists, + Values: []string{"qux", "norf"}, + }}, + }, + expectErr: true, + }, + } + + for i, tc := range tc { + out, err := LabelSelectorAsSelector(tc.in) + if err == nil && tc.expectErr { + t.Errorf("[%v]expected error but got none.", i) + } + if err != nil && !tc.expectErr { + t.Errorf("[%v]did not expect error but got: %v", i, err) + } + if !reflect.DeepEqual(out, tc.out) { + t.Errorf("[%v]expected:\n\t%+v\nbut got:\n\t%+v", i, tc.out, out) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/register.go index babc21dfe84e..3078e88ecfc0 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/register.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/register.go @@ -25,12 +25,12 @@ func Kind(kind string) GroupKind { } // SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta -func (obj *TypeMeta) SetGroupVersionKind(gvk *GroupVersionKind) { +func (obj *TypeMeta) SetGroupVersionKind(gvk GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } // GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta -func (obj *TypeMeta) GroupVersionKind() *GroupVersionKind { +func (obj *TypeMeta) GroupVersionKind() GroupVersionKind { return FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/time.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/time.go index 1180e6bd14f4..df94bbe72c89 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/time.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/time.go @@ -28,8 +28,9 @@ import ( // of the factory methods that the time package offers. // // +protobuf.options.marshal=false +// +protobuf.as=Timestamp type Time struct { - time.Time `protobuf:"Timestamp,1,req,name=time"` + time.Time `protobuf:"-"` } // NewTime returns a wrapped instance of the provided time diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go index 6d6fe5d4e7be..496d5d98ce2c 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go @@ -1,5 +1,3 @@ -// +build proto - /* Copyright 2015 The Kubernetes Authors All rights reserved. @@ -22,59 +20,66 @@ import ( "time" ) -// ProtoTime is a struct that is equivalent to Time, but intended for +// Timestamp is a struct that is equivalent to Time, but intended for // protobuf marshalling/unmarshalling. It is generated into a serialization // that matches Time. Do not use in Go structs. -type ProtoTime struct { - // Represents the time of an event. - Timestamp Timestamp `json:"timestamp"` -} - -// Timestamp is a protobuf Timestamp compatible representation of time.Time type Timestamp struct { // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `json:"seconds"` + Seconds int64 `json:"seconds" protobuf:"varint,1,opt,name=seconds"` // Non-negative fractions of a second at nanosecond resolution. Negative // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `json:"nanos"` + // inclusive. This field may be limited in precision depending on context. + Nanos int32 `json:"nanos" protobuf:"varint,2,opt,name=nanos"` } -// ProtoTime returns the Time as a new ProtoTime value. -func (m *Time) ProtoTime() *ProtoTime { +// Timestamp returns the Time as a new Timestamp value. +func (m *Time) ProtoTime() *Timestamp { if m == nil { - return &ProtoTime{} + return &Timestamp{} } - return &ProtoTime{ - Timestamp: Timestamp{ - Seconds: m.Time.Unix(), - Nanos: int32(m.Time.Nanosecond()), - }, + return &Timestamp{ + Seconds: m.Time.Unix(), + Nanos: int32(m.Time.Nanosecond()), } } // Size implements the protobuf marshalling interface. -func (m *Time) Size() (n int) { return m.ProtoTime().Size() } +func (m *Time) Size() (n int) { + if m == nil || m.Time.IsZero() { + return 0 + } + return m.ProtoTime().Size() +} // Reset implements the protobuf marshalling interface. func (m *Time) Unmarshal(data []byte) error { - p := ProtoTime{} + if len(data) == 0 { + m.Time = time.Time{} + return nil + } + p := Timestamp{} if err := p.Unmarshal(data); err != nil { return err } - m.Time = time.Unix(p.Timestamp.Seconds, int64(p.Timestamp.Nanos)) + m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() return nil } // Marshal implements the protobuf marshalling interface. func (m *Time) Marshal() (data []byte, err error) { + if m == nil || m.Time.IsZero() { + return nil, nil + } return m.ProtoTime().Marshal() } // MarshalTo implements the protobuf marshalling interface. func (m *Time) MarshalTo(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } return m.ProtoTime().MarshalTo(data) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/time_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/time_test.go new file mode 100644 index 000000000000..60c61a7378de --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/time_test.go @@ -0,0 +1,173 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "encoding/json" + "reflect" + "testing" + "time" + + "github.com/ghodss/yaml" +) + +type TimeHolder struct { + T Time `json:"t"` +} + +func TestTimeMarshalYAML(t *testing.T) { + cases := []struct { + input Time + result string + }{ + {Time{}, "t: null\n"}, + {Date(1998, time.May, 5, 1, 5, 5, 50, time.FixedZone("test", -4*60*60)), "t: 1998-05-05T05:05:05Z\n"}, + {Date(1998, time.May, 5, 5, 5, 5, 0, time.UTC), "t: 1998-05-05T05:05:05Z\n"}, + } + + for _, c := range cases { + input := TimeHolder{c.input} + result, err := yaml.Marshal(&input) + if err != nil { + t.Errorf("Failed to marshal input: '%v': %v", input, err) + } + if string(result) != c.result { + t.Errorf("Failed to marshal input: '%v': expected %+v, got %q", input, c.result, string(result)) + } + } +} + +func TestTimeUnmarshalYAML(t *testing.T) { + cases := []struct { + input string + result Time + }{ + {"t: null\n", Time{}}, + {"t: 1998-05-05T05:05:05Z\n", Time{Date(1998, time.May, 5, 5, 5, 5, 0, time.UTC).Local()}}, + } + + for _, c := range cases { + var result TimeHolder + if err := yaml.Unmarshal([]byte(c.input), &result); err != nil { + t.Errorf("Failed to unmarshal input '%v': %v", c.input, err) + } + if result.T != c.result { + t.Errorf("Failed to unmarshal input '%v': expected %+v, got %+v", c.input, c.result, result) + } + } +} + +func TestTimeMarshalJSON(t *testing.T) { + cases := []struct { + input Time + result string + }{ + {Time{}, "{\"t\":null}"}, + {Date(1998, time.May, 5, 5, 5, 5, 50, time.UTC), "{\"t\":\"1998-05-05T05:05:05Z\"}"}, + {Date(1998, time.May, 5, 5, 5, 5, 0, time.UTC), "{\"t\":\"1998-05-05T05:05:05Z\"}"}, + } + + for _, c := range cases { + input := TimeHolder{c.input} + result, err := json.Marshal(&input) + if err != nil { + t.Errorf("Failed to marshal input: '%v': %v", input, err) + } + if string(result) != c.result { + t.Errorf("Failed to marshal input: '%v': expected %+v, got %q", input, c.result, string(result)) + } + } +} + +func TestTimeUnmarshalJSON(t *testing.T) { + cases := []struct { + input string + result Time + }{ + {"{\"t\":null}", Time{}}, + {"{\"t\":\"1998-05-05T05:05:05Z\"}", Time{Date(1998, time.May, 5, 5, 5, 5, 0, time.UTC).Local()}}, + } + + for _, c := range cases { + var result TimeHolder + if err := json.Unmarshal([]byte(c.input), &result); err != nil { + t.Errorf("Failed to unmarshal input '%v': %v", c.input, err) + } + if result.T != c.result { + t.Errorf("Failed to unmarshal input '%v': expected %+v, got %+v", c.input, c.result, result) + } + } +} + +func TestTimeMarshalJSONUnmarshalYAML(t *testing.T) { + cases := []struct { + input Time + }{ + {Time{}}, + {Date(1998, time.May, 5, 5, 5, 5, 50, time.Local).Rfc3339Copy()}, + {Date(1998, time.May, 5, 5, 5, 5, 0, time.Local).Rfc3339Copy()}, + } + + for i, c := range cases { + input := TimeHolder{c.input} + jsonMarshalled, err := json.Marshal(&input) + if err != nil { + t.Errorf("%d-1: Failed to marshal input: '%v': %v", i, input, err) + } + + var result TimeHolder + err = yaml.Unmarshal(jsonMarshalled, &result) + if err != nil { + t.Errorf("%d-2: Failed to unmarshal '%+v': %v", i, string(jsonMarshalled), err) + } + + iN, iO := input.T.Zone() + oN, oO := result.T.Zone() + if iN != oN || iO != oO { + t.Errorf("%d-3: Time zones differ before and after serialization %s:%d %s:%d", i, iN, iO, oN, oO) + } + + if input.T.UnixNano() != result.T.UnixNano() { + t.Errorf("%d-4: Failed to marshal input '%#v': got %#v", i, input, result) + } + } +} + +func TestTimeProto(t *testing.T) { + cases := []struct { + input Time + }{ + {Time{}}, + {Date(1998, time.May, 5, 1, 5, 5, 50, time.Local)}, + {Date(1998, time.May, 5, 5, 5, 5, 0, time.Local)}, + } + + for _, c := range cases { + input := c.input + data, err := input.Marshal() + if err != nil { + t.Fatalf("Failed to marshal input: '%v': %v", input, err) + } + time := Time{} + if err := time.Unmarshal(data); err != nil { + t.Fatalf("Failed to unmarshal output: '%v': %v", input, err) + } + if !reflect.DeepEqual(input, time) { + t.Errorf("Marshal->Unmarshal is not idempotent: '%v' vs '%v'", input, time) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/types.go index 786be86cf55e..5006b2d0b78f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/types.go @@ -35,14 +35,14 @@ type TypeMeta struct { // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - Kind string `json:"kind,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources - APIVersion string `json:"apiVersion,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` } // ListMeta describes metadata that synthetic resources must have, including lists and @@ -51,51 +51,51 @@ type ListMeta struct { // SelfLink is a URL representing this object. // Populated by the system. // Read-only. - SelfLink string `json:"selfLink,omitempty"` + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` // String that identifies the server's internal version of this object that // can be used by clients to determine when objects have changed. // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#concurrency-control-and-consistency - ResourceVersion string `json:"resourceVersion,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` } // ExportOptions is the query options to the standard REST get call. type ExportOptions struct { TypeMeta `json:",inline"` // Should this value be exported. Export strips fields that a user can not specify.` - Export bool `json:"export"` + Export bool `json:"export" protobuf:"varint,1,opt,name=export"` // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - Exact bool `json:"exact"` + Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` } // Status is a return value for calls that don't return other objects. type Status struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Status of the operation. // One of: "Success" or "Failure". - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status string `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` // A human-readable description of the status of this operation. - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` // A machine-readable description of why this operation is in the // "Failure" status. If this value is empty there // is no information available. A Reason clarifies an HTTP status // code but does not override it. - Reason StatusReason `json:"reason,omitempty"` + Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason,casttype=StatusReason"` // Extended data associated with the reason. Each reason may define its // own extended details. This field is optional and the data returned // is not guaranteed to conform to any schema except that defined by // the reason type. - Details *StatusDetails `json:"details,omitempty"` + Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"` // Suggested HTTP return code for this status, 0 if not set. - Code int32 `json:"code,omitempty"` + Code int32 `json:"code,omitempty" protobuf:"varint,6,opt,name=code"` } // StatusDetails is a set of additional properties that MAY be set by the @@ -107,18 +107,18 @@ type Status struct { type StatusDetails struct { // The name attribute of the resource associated with the status StatusReason // (when there is a single name which can be described). - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // The group attribute of the resource associated with the status StatusReason. - Group string `json:"group,omitempty"` + Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - Kind string `json:"kind,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` // The Causes array includes more details associated with the StatusReason // failure. Not all StatusReasons may provide detailed causes. - Causes []StatusCause `json:"causes,omitempty"` + Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"` // If specified, the time in seconds before the operation should be retried. - RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty"` + RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty" protobuf:"varint,5,opt,name=retryAfterSeconds"` } // Values of Status.Status @@ -174,10 +174,10 @@ const ( // Status code 409 StatusReasonAlreadyExists StatusReason = "AlreadyExists" - // StatusReasonConflict means the requested update operation cannot be completed - // due to a conflict in the operation. The client may need to alter the request. - // Each resource may define custom details that indicate the nature of the - // conflict. + // StatusReasonConflict means the requested operation cannot be completed + // due to a conflict in the operation. The client may need to alter the + // request. Each resource may define custom details that indicate the + // nature of the conflict. // Status code 409 StatusReasonConflict StatusReason = "Conflict" @@ -257,10 +257,10 @@ const ( type StatusCause struct { // A machine-readable description of the cause of the error. If this value is // empty there is no information available. - Type CauseType `json:"reason,omitempty"` + Type CauseType `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason,casttype=CauseType"` // A human-readable description of the cause of the error. This field may be // presented as-is to a reader. - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` // The field of the resource that has caused this error, as named by its JSON // serialization. May include dot and postfix notation for nested attributes. // Arrays are zero-indexed. Fields may appear more than once in an array of @@ -270,7 +270,7 @@ type StatusCause struct { // Examples: // "name" - the field "name" on the current resource // "items[0].name" - the field "name" on the first array entry in "items" - Field string `json:"field,omitempty"` + Field string `json:"field,omitempty" protobuf:"bytes,3,opt,name=field"` } // CauseType is a machine readable value providing more detail about what @@ -307,7 +307,7 @@ const ( type APIVersions struct { TypeMeta `json:",inline"` // versions are the api versions that are available. - Versions []string `json:"versions"` + Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"` // a map of client CIDR to server address that is serving this group. // This is to help clients reach servers in the most network-efficient way possible. // Clients can use the appropriate server address as per the CIDR that they match. @@ -315,7 +315,7 @@ type APIVersions struct { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs"` + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"` } // APIGroupList is a list of APIGroup, to allow clients to discover the API at @@ -323,7 +323,7 @@ type APIVersions struct { type APIGroupList struct { TypeMeta `json:",inline"` // groups is a list of APIGroup. - Groups []APIGroup `json:"groups"` + Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"` } // APIGroup contains the name, the supported versions, and the preferred version @@ -331,12 +331,12 @@ type APIGroupList struct { type APIGroup struct { TypeMeta `json:",inline"` // name is the name of the group. - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // versions are the versions supported in this group. - Versions []GroupVersionForDiscovery `json:"versions"` + Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"` // preferredVersion is the version preferred by the API server, which // probably is the storage version. - PreferredVersion GroupVersionForDiscovery `json:"preferredVersion,omitempty"` + PreferredVersion GroupVersionForDiscovery `json:"preferredVersion,omitempty" protobuf:"bytes,3,opt,name=preferredVersion"` // a map of client CIDR to server address that is serving this group. // This is to help clients reach servers in the most network-efficient way possible. // Clients can use the appropriate server address as per the CIDR that they match. @@ -344,36 +344,36 @@ type APIGroup struct { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs"` + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` } // ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. type ServerAddressByClientCIDR struct { // The CIDR with which clients can match their IP to figure out the server address that they should use. - ClientCIDR string `json:"clientCIDR"` + ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` // Address of this server, suitable for a client that matches the above CIDR. // This can be a hostname, hostname:port, IP or IP:port. - ServerAddress string `json:"serverAddress"` + ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` } // GroupVersion contains the "group/version" and "version" string of a version. // It is made a struct to keep extensiblity. type GroupVersionForDiscovery struct { // groupVersion specifies the API group and version in the form "group/version" - GroupVersion string `json:"groupVersion"` + GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` // version specifies the version in the form of "version". This is to save // the clients the trouble of splitting the GroupVersion. - Version string `json:"version"` + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` } // APIResource specifies the name of a resource and whether it is namespaced. type APIResource struct { // name is the name of the resource. - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // namespaced indicates if a resource is namespaced or not. - Namespaced bool `json:"namespaced"` + Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"` // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') - Kind string `json:"kind"` + Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` } // APIResourceList is a list of APIResource, it is used to expose the name of the @@ -382,16 +382,16 @@ type APIResource struct { type APIResourceList struct { TypeMeta `json:",inline"` // groupVersion is the group and version this APIResourceList is for. - GroupVersion string `json:"groupVersion"` + GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` // resources contains the name of the resources and if they are namespaced. - APIResources []APIResource `json:"resources"` + APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"` } // RootPaths lists the paths available at root. // For example: "/healthz", "/apis". type RootPaths struct { // paths are the paths available at root. - Paths []string `json:"paths"` + Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"` } // TODO: remove me when watch is refactored @@ -429,24 +429,24 @@ type LabelSelector struct { // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels // map is equivalent to an element of matchExpressions, whose key field is "key", the // operator is "In", and the values array contains only "value". The requirements are ANDed. - MatchLabels map[string]string `json:"matchLabels,omitempty"` + MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` // matchExpressions is a list of label selector requirements. The requirements are ANDed. - MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty"` + MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` } // A label selector requirement is a selector that contains values, a key, and an operator that // relates the key and values. type LabelSelectorRequirement struct { // key is the label key that the selector applies to. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"` + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` // operator represents a key's relationship to a set of values. // Valid operators ard In, NotIn, Exists and DoesNotExist. - Operator LabelSelectorOperator `json:"operator"` + Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` // values is an array of string values. If the operator is In or NotIn, // the values array must be non-empty. If the operator is Exists or DoesNotExist, // the values array must be empty. This array is replaced during a strategic // merge patch. - Values []string `json:"values,omitempty"` + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` } // A label selector operator is the set of operators that can be used in a selector requirement. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go index b45a46e625b1..8caef8e54900 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -123,7 +123,7 @@ func (LabelSelectorRequirement) SwaggerDoc() map[string]string { var map_ListMeta = map[string]string{ "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", - "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", } func (ListMeta) SwaggerDoc() map[string]string { @@ -159,8 +159,8 @@ func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { var map_Status = map[string]string{ "": "Status is a return value for calls that don't return other objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", - "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", "message": "A human-readable description of the status of this operation.", "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", @@ -186,7 +186,7 @@ var map_StatusDetails = map[string]string{ "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", - "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried.", } @@ -197,8 +197,8 @@ func (StatusDetails) SwaggerDoc() map[string]string { var map_TypeMeta = map[string]string{ "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", - "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", - "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources", } func (TypeMeta) SwaggerDoc() map[string]string { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go index f42f37b04825..47852e3e29f2 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go @@ -18,7 +18,7 @@ package validation import ( "k8s.io/kubernetes/pkg/api/unversioned" - apivalidation "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/util/validation" "k8s.io/kubernetes/pkg/util/validation/field" ) @@ -27,7 +27,7 @@ func ValidateLabelSelector(ps *unversioned.LabelSelector, fldPath *field.Path) f if ps == nil { return allErrs } - allErrs = append(allErrs, apivalidation.ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) + allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) for i, expr := range ps.MatchExpressions { allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...) } @@ -48,6 +48,27 @@ func ValidateLabelSelectorRequirement(sr unversioned.LabelSelectorRequirement, f default: allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) } - allErrs = append(allErrs, apivalidation.ValidateLabelName(sr.Key, fldPath.Child("key"))...) + allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) + return allErrs +} + +// ValidateLabelName validates that the label name is correctly defined. +func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(labelName) { + allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg)) + } + return allErrs +} + +// ValidateLabels validates that a set of labels are correctly defined. +func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for k, v := range labels { + allErrs = append(allErrs, ValidateLabelName(k, fldPath)...) + for _, msg := range validation.IsValidLabelValue(v) { + allErrs = append(allErrs, field.Invalid(fldPath, v, msg)) + } + } return allErrs } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/validation/validation_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/validation/validation_test.go new file mode 100644 index 000000000000..ec1264f4eae5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/validation/validation_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + "testing" + + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func TestValidateLabels(t *testing.T) { + successCases := []map[string]string{ + {"simple": "bar"}, + {"now-with-dashes": "bar"}, + {"1-starts-with-num": "bar"}, + {"1234": "bar"}, + {"simple/simple": "bar"}, + {"now-with-dashes/simple": "bar"}, + {"now-with-dashes/now-with-dashes": "bar"}, + {"now.with.dots/simple": "bar"}, + {"now-with.dashes-and.dots/simple": "bar"}, + {"1-num.2-num/3-num": "bar"}, + {"1234/5678": "bar"}, + {"1.2.3.4/5678": "bar"}, + {"UpperCaseAreOK123": "bar"}, + {"goodvalue": "123_-.BaR"}, + } + for i := range successCases { + errs := ValidateLabels(successCases[i], field.NewPath("field")) + if len(errs) != 0 { + t.Errorf("case[%d] expected success, got %#v", i, errs) + } + } + + labelNameErrorCases := []struct { + labels map[string]string + expect string + }{ + {map[string]string{"nospecialchars^=@": "bar"}, "must match the regex"}, + {map[string]string{"cantendwithadash-": "bar"}, "must match the regex"}, + {map[string]string{"only/one/slash": "bar"}, "must match the regex"}, + {map[string]string{strings.Repeat("a", 254): "bar"}, "must be no more than"}, + } + for i := range labelNameErrorCases { + errs := ValidateLabels(labelNameErrorCases[i].labels, field.NewPath("field")) + if len(errs) != 1 { + t.Errorf("case[%d]: expected failure", i) + } else { + if !strings.Contains(errs[0].Detail, labelNameErrorCases[i].expect) { + t.Errorf("case[%d]: error details do not include %q: %q", i, labelNameErrorCases[i].expect, errs[0].Detail) + } + } + } + + labelValueErrorCases := []struct { + labels map[string]string + expect string + }{ + {map[string]string{"toolongvalue": strings.Repeat("a", 64)}, "must be no more than"}, + {map[string]string{"backslashesinvalue": "some\\bad\\value"}, "must match the regex"}, + {map[string]string{"nocommasallowed": "bad,value"}, "must match the regex"}, + {map[string]string{"strangecharsinvalue": "?#$notsogood"}, "must match the regex"}, + } + for i := range labelValueErrorCases { + errs := ValidateLabels(labelValueErrorCases[i].labels, field.NewPath("field")) + if len(errs) != 1 { + t.Errorf("case[%d]: expected failure", i) + } else { + if !strings.Contains(errs[0].Detail, labelValueErrorCases[i].expect) { + t.Errorf("case[%d]: error details do not include %q: %q", i, labelValueErrorCases[i].expect, errs[0].Detail) + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go index 6c163b784aac..08e4f68892b7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go @@ -16,7 +16,15 @@ limitations under the License. package unversioned -const LabelHostname = "kubernetes.io/hostname" -const LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" -const LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" -const LabelInstanceType = "beta.kubernetes.io/instance-type" +const ( + // If you add a new topology domain here, also consider adding it to the set of default values + // for the scheduler's --failure-domain command-line argument. + LabelHostname = "kubernetes.io/hostname" + LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" + LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" + + LabelInstanceType = "beta.kubernetes.io/instance-type" + + LabelOS = "beta.kubernetes.io/os" + LabelArch = "beta.kubernetes.io/arch" +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/util/group_version_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/util/group_version_test.go new file mode 100644 index 000000000000..d53b5f4e5a8f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/util/group_version_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import "testing" + +func TestGetVersion(t *testing.T) { + testCases := []struct { + groupVersion string + output string + }{ + { + "v1", + "v1", + }, + { + "extensions/v1beta1", + "v1beta1", + }, + } + for _, test := range testCases { + actual := GetVersion(test.groupVersion) + if test.output != actual { + t.Errorf("expect version: %s, got: %s\n", test.output, actual) + } + } +} + +func TestGetGroup(t *testing.T) { + testCases := []struct { + groupVersion string + output string + }{ + { + "v1", + "", + }, + { + "extensions/v1beta1", + "extensions", + }, + } + for _, test := range testCases { + actual := GetGroup(test.groupVersion) + if test.output != actual { + t.Errorf("expect version: %s, got: %s\n", test.output, actual) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/backward_compatibility_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/backward_compatibility_test.go new file mode 100644 index 000000000000..2ef3d926fd42 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/backward_compatibility_test.go @@ -0,0 +1,229 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1_test + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testing/compat" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func TestCompatibility_v1_PodSecurityContext(t *testing.T) { + cases := []struct { + name string + input string + expectedKeys map[string]string + absentKeys []string + }{ + { + name: "hostNetwork = true", + input: ` +{ + "kind":"Pod", + "apiVersion":"v1", + "metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"}, + "spec": { + "hostNetwork": true, + "containers":[{ + "name":"a", + "image":"my-container-image" + }] + } +} +`, + expectedKeys: map[string]string{ + "spec.hostNetwork": "true", + }, + }, + { + name: "hostNetwork = false", + input: ` +{ + "kind":"Pod", + "apiVersion":"v1", + "metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"}, + "spec": { + "hostNetwork": false, + "containers":[{ + "name":"a", + "image":"my-container-image" + }] + } +} +`, + absentKeys: []string{ + "spec.hostNetwork", + }, + }, + { + name: "hostIPC = true", + input: ` +{ + "kind":"Pod", + "apiVersion":"v1", + "metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"}, + "spec": { + "hostIPC": true, + "containers":[{ + "name":"a", + "image":"my-container-image" + }] + } +} +`, + expectedKeys: map[string]string{ + "spec.hostIPC": "true", + }, + }, + { + name: "hostIPC = false", + input: ` +{ + "kind":"Pod", + "apiVersion":"v1", + "metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"}, + "spec": { + "hostIPC": false, + "containers":[{ + "name":"a", + "image":"my-container-image" + }] + } +} +`, + absentKeys: []string{ + "spec.hostIPC", + }, + }, + { + name: "hostPID = true", + input: ` +{ + "kind":"Pod", + "apiVersion":"v1", + "metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"}, + "spec": { + "hostPID": true, + "containers":[{ + "name":"a", + "image":"my-container-image" + }] + } +} +`, + expectedKeys: map[string]string{ + "spec.hostPID": "true", + }, + }, + { + name: "hostPID = false", + input: ` +{ + "kind":"Pod", + "apiVersion":"v1", + "metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"}, + "spec": { + "hostPID": false, + "containers":[{ + "name":"a", + "image":"my-container-image" + }] + } +} +`, + absentKeys: []string{ + "spec.hostPID", + }, + }, + { + name: "reseting defaults for pre-v1.1 mirror pods", + input: ` +{ + "kind":"Pod", + "apiVersion":"v1", + "metadata":{ + "name":"my-pod-name", + "namespace":"my-pod-namespace", + "annotations": { + "kubernetes.io/config.mirror": "mirror" + } + }, + "spec": { + "containers":[{ + "name":"a", + "image":"my-container-image", + "resources": { + "limits": { + "cpu": "100m" + } + } + }] + } +} +`, + absentKeys: []string{ + "spec.terminationGracePeriodSeconds", + "spec.containers[0].resources.requests", + }, + }, + { + name: "preserving defaults for v1.1+ mirror pods", + input: ` + { + "kind":"Pod", + "apiVersion":"v1", + "metadata":{ + "name":"my-pod-name", + "namespace":"my-pod-namespace", + "annotations": { + "kubernetes.io/config.mirror": "cbe924f710c7e26f7693d6a341bcfad0" + } + }, + "spec": { + "containers":[{ + "name":"a", + "image":"my-container-image", + "resources": { + "limits": { + "cpu": "100m" + } + } + }] + } + } + `, + expectedKeys: map[string]string{ + "spec.terminationGracePeriodSeconds": "30", + "spec.containers[0].resources.requests": "map[cpu:100m]", + }, + }, + } + + validator := func(obj runtime.Object) field.ErrorList { + return validation.ValidatePodSpec(&(obj.(*api.Pod).Spec), field.NewPath("spec")) + } + + for _, tc := range cases { + t.Logf("Testing 1.0.0 backward compatibility for %v", tc.name) + compat.TestCompatibility(t, v1.SchemeGroupVersion, []byte(tc.input), validator, tc.expectedKeys, tc.absentKeys) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/conversion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/conversion.go index 56ca88f3677a..7af60d5e8c0d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/conversion.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/conversion.go @@ -17,13 +17,12 @@ limitations under the License. package v1 import ( + "encoding/json" "fmt" - "reflect" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/runtime" - "speter.net/go/exp/math/dec/inf" ) const ( @@ -200,19 +199,8 @@ func addConversionFuncs(scheme *runtime.Scheme) { } func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ReplicationControllerSpec))(in) - } - out.Replicas = new(int32) - *out.Replicas = int32(in.Replicas) - if in.Selector != nil { - out.Selector = make(map[string]string) - for key, val := range in.Selector { - out.Selector[key] = val - } - } else { - out.Selector = nil - } + out.Replicas = &in.Replicas + out.Selector = in.Selector //if in.TemplateRef != nil { // out.TemplateRef = new(ObjectReference) // if err := Convert_api_ObjectReference_To_v1_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil { @@ -233,18 +221,9 @@ func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *a } func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ReplicationControllerSpec))(in) - } - out.Replicas = int(*in.Replicas) - if in.Selector != nil { - out.Selector = make(map[string]string) - for key, val := range in.Selector { - out.Selector[key] = val - } - } else { - out.Selector = nil - } + out.Replicas = *in.Replicas + out.Selector = in.Selector + //if in.TemplateRef != nil { // out.TemplateRef = new(api.ObjectReference) // if err := Convert_v1_ObjectReference_To_api_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil { @@ -264,12 +243,110 @@ func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *R return nil } +func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { + if err := autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s); err != nil { + return err + } + + if old := out.Annotations; old != nil { + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + } + if len(out.Status.InitContainerStatuses) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Status.InitContainerStatuses) + if err != nil { + return err + } + out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + } else { + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + } + return nil +} + +func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { + // TODO: when we move init container to beta, remove these conversions + if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { + var values []ContainerStatus + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + in.Status.InitContainerStatuses = values + } + + if err := autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s); err != nil { + return err + } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + } + return nil +} + +func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { + if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { + return err + } + + // TODO: when we move init container to beta, remove these conversions + if old := out.Annotations; old != nil { + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + } + if len(out.Spec.InitContainers) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Spec.InitContainers) + if err != nil { + return err + } + out.Annotations[PodInitContainersAnnotationKey] = string(value) + } else { + delete(out.Annotations, PodInitContainersAnnotationKey) + } + return nil +} + +func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { + // TODO: when we move init container to beta, remove these conversions + if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok { + var values []Container + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + in.Spec.InitContainers = values + } + + if err := autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s); err != nil { + return err + } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainersAnnotationKey) + } + return nil +} + // The following two PodSpec conversions are done here to support ServiceAccount // as an alias for ServiceAccountName. func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodSpec))(in) - } if in.Volumes != nil { out.Volumes = make([]Volume, len(in.Volumes)) for i := range in.Volumes { @@ -280,6 +357,16 @@ func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversi } else { out.Volumes = nil } + if in.InitContainers != nil { + out.InitContainers = make([]Container, len(in.InitContainers)) + for i := range in.InitContainers { + if err := Convert_api_Container_To_v1_Container(&in.InitContainers[i], &out.InitContainers[i], s); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } if in.Containers != nil { out.Containers = make([]Container, len(in.Containers)) for i := range in.Containers { @@ -290,28 +377,13 @@ func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversi } else { out.Containers = nil } + out.RestartPolicy = RestartPolicy(in.RestartPolicy) - if in.TerminationGracePeriodSeconds != nil { - out.TerminationGracePeriodSeconds = new(int64) - *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } + out.TerminationGracePeriodSeconds = in.TerminationGracePeriodSeconds + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds out.DNSPolicy = DNSPolicy(in.DNSPolicy) - if in.NodeSelector != nil { - out.NodeSelector = make(map[string]string) - for key, val := range in.NodeSelector { - out.NodeSelector[key] = val - } - } else { - out.NodeSelector = nil - } + out.NodeSelector = in.NodeSelector + out.ServiceAccountName = in.ServiceAccountName // DeprecatedServiceAccount is an alias for ServiceAccountName. out.DeprecatedServiceAccount = in.ServiceAccountName @@ -338,13 +410,13 @@ func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversi } else { out.ImagePullSecrets = nil } + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain return nil } func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodSpec))(in) - } + SetDefaults_PodSpec(in) if in.Volumes != nil { out.Volumes = make([]api.Volume, len(in.Volumes)) for i := range in.Volumes { @@ -355,6 +427,16 @@ func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversi } else { out.Volumes = nil } + if in.InitContainers != nil { + out.InitContainers = make([]api.Container, len(in.InitContainers)) + for i := range in.InitContainers { + if err := Convert_v1_Container_To_api_Container(&in.InitContainers[i], &out.InitContainers[i], s); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } if in.Containers != nil { out.Containers = make([]api.Container, len(in.Containers)) for i := range in.Containers { @@ -366,27 +448,10 @@ func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversi out.Containers = nil } out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) - if in.TerminationGracePeriodSeconds != nil { - out.TerminationGracePeriodSeconds = new(int64) - *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } + out.TerminationGracePeriodSeconds = in.TerminationGracePeriodSeconds + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) - if in.NodeSelector != nil { - out.NodeSelector = make(map[string]string) - for key, val := range in.NodeSelector { - out.NodeSelector[key] = val - } - } else { - out.NodeSelector = nil - } + out.NodeSelector = in.NodeSelector // We support DeprecatedServiceAccount as an alias for ServiceAccountName. // If both are specified, ServiceAccountName (the new field) wins. out.ServiceAccountName = in.ServiceAccountName @@ -419,7 +484,8 @@ func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversi } else { out.ImagePullSecrets = nil } - + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain return nil } @@ -427,6 +493,36 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil { return err } + + // TODO: when we move init container to beta, remove these conversions + if len(out.Spec.InitContainers) > 0 || len(out.Status.InitContainerStatuses) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + } + if len(out.Spec.InitContainers) > 0 { + value, err := json.Marshal(out.Spec.InitContainers) + if err != nil { + return err + } + out.Annotations[PodInitContainersAnnotationKey] = string(value) + } else { + delete(out.Annotations, PodInitContainersAnnotationKey) + } + if len(out.Status.InitContainerStatuses) > 0 { + value, err := json.Marshal(out.Status.InitContainerStatuses) + if err != nil { + return err + } + out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + } else { + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + } + // We need to reset certain fields for mirror pods from pre-v1.1 kubelet // (#15960). // TODO: Remove this code after we drop support for v1.0 kubelets. @@ -442,7 +538,35 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error } func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { - return autoConvert_v1_Pod_To_api_Pod(in, out, s) + // TODO: when we move init container to beta, remove these conversions + if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok { + var values []Container + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + in.Spec.InitContainers = values + } + if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { + var values []ContainerStatus + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + in.Status.InitContainerStatuses = values + } + + if err := autoConvert_v1_Pod_To_api_Pod(in, out, s); err != nil { + return err + } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + } + return nil } func Convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { @@ -450,9 +574,7 @@ func Convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service return err } // Publish both externalIPs and deprecatedPublicIPs fields in v1. - for _, ip := range in.ExternalIPs { - out.DeprecatedPublicIPs = append(out.DeprecatedPublicIPs, ip) - } + out.DeprecatedPublicIPs = in.ExternalIPs return nil } @@ -462,19 +584,12 @@ func Convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.Service } // Prefer the legacy deprecatedPublicIPs field, if provided. if len(in.DeprecatedPublicIPs) > 0 { - out.ExternalIPs = nil - for _, ip := range in.DeprecatedPublicIPs { - out.ExternalIPs = append(out.ExternalIPs, ip) - } + out.ExternalIPs = in.DeprecatedPublicIPs } return nil } func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *PodSecurityContext, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodSecurityContext))(in) - } - out.SupplementalGroups = in.SupplementalGroups if in.SELinuxOptions != nil { out.SELinuxOptions = new(SELinuxOptions) @@ -484,32 +599,13 @@ func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurity } else { out.SELinuxOptions = nil } - if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot - } else { - out.RunAsNonRoot = nil - } - if in.FSGroup != nil { - out.FSGroup = new(int64) - *out.FSGroup = *in.FSGroup - } else { - out.FSGroup = nil - } + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.FSGroup = in.FSGroup return nil } func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodSecurityContext))(in) - } - out.SupplementalGroups = in.SupplementalGroups if in.SELinuxOptions != nil { out.SELinuxOptions = new(api.SELinuxOptions) @@ -519,47 +615,27 @@ func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityCont } else { out.SELinuxOptions = nil } - if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot - } else { - out.RunAsNonRoot = nil - } - if in.FSGroup != nil { - out.FSGroup = new(int64) - *out.FSGroup = *in.FSGroup - } else { - out.FSGroup = nil - } + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.FSGroup = in.FSGroup return nil } func Convert_v1_ResourceList_To_api_ResourceList(in *ResourceList, out *api.ResourceList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ResourceList))(in) - } if *in == nil { return nil } - converted := make(api.ResourceList) + if *out == nil { + *out = make(api.ResourceList, len(*in)) + } for key, val := range *in { - value := val.Copy() - // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. // In the future, we should instead reject values that need rounding. - const milliScale = 3 - value.Amount.Round(value.Amount, milliScale, inf.RoundUp) + const milliScale = -3 + val.RoundUp(milliScale) - converted[api.ResourceName(key)] = *value + (*out)[api.ResourceName(key)] = val } - - *out = converted return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/conversion_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/conversion_generated.go index 687da425ad7d..3d1ce27a92c3 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/conversion_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/conversion_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,27 +16,337 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-conversions.sh +// This file was autogenerated by conversion-gen. Do not edit it manually! package v1 import ( - reflect "reflect" - api "k8s.io/kubernetes/pkg/api" resource "k8s.io/kubernetes/pkg/api/resource" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" conversion "k8s.io/kubernetes/pkg/conversion" runtime "k8s.io/kubernetes/pkg/runtime" ) -func autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.AWSElasticBlockStoreVolumeSource))(in) +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, + Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, + Convert_v1_Affinity_To_api_Affinity, + Convert_api_Affinity_To_v1_Affinity, + Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource, + Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, + Convert_v1_Binding_To_api_Binding, + Convert_api_Binding_To_v1_Binding, + Convert_v1_Capabilities_To_api_Capabilities, + Convert_api_Capabilities_To_v1_Capabilities, + Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, + Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, + Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource, + Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource, + Convert_v1_ComponentCondition_To_api_ComponentCondition, + Convert_api_ComponentCondition_To_v1_ComponentCondition, + Convert_v1_ComponentStatus_To_api_ComponentStatus, + Convert_api_ComponentStatus_To_v1_ComponentStatus, + Convert_v1_ComponentStatusList_To_api_ComponentStatusList, + Convert_api_ComponentStatusList_To_v1_ComponentStatusList, + Convert_v1_ConfigMap_To_api_ConfigMap, + Convert_api_ConfigMap_To_v1_ConfigMap, + Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector, + Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, + Convert_v1_ConfigMapList_To_api_ConfigMapList, + Convert_api_ConfigMapList_To_v1_ConfigMapList, + Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource, + Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, + Convert_v1_Container_To_api_Container, + Convert_api_Container_To_v1_Container, + Convert_v1_ContainerImage_To_api_ContainerImage, + Convert_api_ContainerImage_To_v1_ContainerImage, + Convert_v1_ContainerPort_To_api_ContainerPort, + Convert_api_ContainerPort_To_v1_ContainerPort, + Convert_v1_ContainerState_To_api_ContainerState, + Convert_api_ContainerState_To_v1_ContainerState, + Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning, + Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning, + Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated, + Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated, + Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting, + Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting, + Convert_v1_ContainerStatus_To_api_ContainerStatus, + Convert_api_ContainerStatus_To_v1_ContainerStatus, + Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint, + Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint, + Convert_v1_DeleteOptions_To_api_DeleteOptions, + Convert_api_DeleteOptions_To_v1_DeleteOptions, + Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, + Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, + Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, + Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, + Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, + Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, + Convert_v1_EndpointAddress_To_api_EndpointAddress, + Convert_api_EndpointAddress_To_v1_EndpointAddress, + Convert_v1_EndpointPort_To_api_EndpointPort, + Convert_api_EndpointPort_To_v1_EndpointPort, + Convert_v1_EndpointSubset_To_api_EndpointSubset, + Convert_api_EndpointSubset_To_v1_EndpointSubset, + Convert_v1_Endpoints_To_api_Endpoints, + Convert_api_Endpoints_To_v1_Endpoints, + Convert_v1_EndpointsList_To_api_EndpointsList, + Convert_api_EndpointsList_To_v1_EndpointsList, + Convert_v1_EnvVar_To_api_EnvVar, + Convert_api_EnvVar_To_v1_EnvVar, + Convert_v1_EnvVarSource_To_api_EnvVarSource, + Convert_api_EnvVarSource_To_v1_EnvVarSource, + Convert_v1_Event_To_api_Event, + Convert_api_Event_To_v1_Event, + Convert_v1_EventList_To_api_EventList, + Convert_api_EventList_To_v1_EventList, + Convert_v1_EventSource_To_api_EventSource, + Convert_api_EventSource_To_v1_EventSource, + Convert_v1_ExecAction_To_api_ExecAction, + Convert_api_ExecAction_To_v1_ExecAction, + Convert_v1_ExportOptions_To_api_ExportOptions, + Convert_api_ExportOptions_To_v1_ExportOptions, + Convert_v1_FCVolumeSource_To_api_FCVolumeSource, + Convert_api_FCVolumeSource_To_v1_FCVolumeSource, + Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource, + Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource, + Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource, + Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource, + Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource, + Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, + Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource, + Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, + Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, + Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, + Convert_v1_HTTPGetAction_To_api_HTTPGetAction, + Convert_api_HTTPGetAction_To_v1_HTTPGetAction, + Convert_v1_HTTPHeader_To_api_HTTPHeader, + Convert_api_HTTPHeader_To_v1_HTTPHeader, + Convert_v1_Handler_To_api_Handler, + Convert_api_Handler_To_v1_Handler, + Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, + Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource, + Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, + Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, + Convert_v1_KeyToPath_To_api_KeyToPath, + Convert_api_KeyToPath_To_v1_KeyToPath, + Convert_v1_Lifecycle_To_api_Lifecycle, + Convert_api_Lifecycle_To_v1_Lifecycle, + Convert_v1_LimitRange_To_api_LimitRange, + Convert_api_LimitRange_To_v1_LimitRange, + Convert_v1_LimitRangeItem_To_api_LimitRangeItem, + Convert_api_LimitRangeItem_To_v1_LimitRangeItem, + Convert_v1_LimitRangeList_To_api_LimitRangeList, + Convert_api_LimitRangeList_To_v1_LimitRangeList, + Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec, + Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec, + Convert_v1_List_To_api_List, + Convert_api_List_To_v1_List, + Convert_v1_ListOptions_To_api_ListOptions, + Convert_api_ListOptions_To_v1_ListOptions, + Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress, + Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress, + Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus, + Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus, + Convert_v1_LocalObjectReference_To_api_LocalObjectReference, + Convert_api_LocalObjectReference_To_v1_LocalObjectReference, + Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource, + Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource, + Convert_v1_Namespace_To_api_Namespace, + Convert_api_Namespace_To_v1_Namespace, + Convert_v1_NamespaceList_To_api_NamespaceList, + Convert_api_NamespaceList_To_v1_NamespaceList, + Convert_v1_NamespaceSpec_To_api_NamespaceSpec, + Convert_api_NamespaceSpec_To_v1_NamespaceSpec, + Convert_v1_NamespaceStatus_To_api_NamespaceStatus, + Convert_api_NamespaceStatus_To_v1_NamespaceStatus, + Convert_v1_Node_To_api_Node, + Convert_api_Node_To_v1_Node, + Convert_v1_NodeAddress_To_api_NodeAddress, + Convert_api_NodeAddress_To_v1_NodeAddress, + Convert_v1_NodeAffinity_To_api_NodeAffinity, + Convert_api_NodeAffinity_To_v1_NodeAffinity, + Convert_v1_NodeCondition_To_api_NodeCondition, + Convert_api_NodeCondition_To_v1_NodeCondition, + Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints, + Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, + Convert_v1_NodeList_To_api_NodeList, + Convert_api_NodeList_To_v1_NodeList, + Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions, + Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions, + Convert_v1_NodeSelector_To_api_NodeSelector, + Convert_api_NodeSelector_To_v1_NodeSelector, + Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement, + Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement, + Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm, + Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm, + Convert_v1_NodeSpec_To_api_NodeSpec, + Convert_api_NodeSpec_To_v1_NodeSpec, + Convert_v1_NodeStatus_To_api_NodeStatus, + Convert_api_NodeStatus_To_v1_NodeStatus, + Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo, + Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo, + Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, + Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, + Convert_v1_ObjectMeta_To_api_ObjectMeta, + Convert_api_ObjectMeta_To_v1_ObjectMeta, + Convert_v1_ObjectReference_To_api_ObjectReference, + Convert_api_ObjectReference_To_v1_ObjectReference, + Convert_v1_OwnerReference_To_api_OwnerReference, + Convert_api_OwnerReference_To_v1_OwnerReference, + Convert_v1_PersistentVolume_To_api_PersistentVolume, + Convert_api_PersistentVolume_To_v1_PersistentVolume, + Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim, + Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, + Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList, + Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, + Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec, + Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, + Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus, + Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, + Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource, + Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, + Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList, + Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList, + Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource, + Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource, + Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec, + Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, + Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus, + Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, + Convert_v1_Pod_To_api_Pod, + Convert_api_Pod_To_v1_Pod, + Convert_v1_PodAffinity_To_api_PodAffinity, + Convert_api_PodAffinity_To_v1_PodAffinity, + Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm, + Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm, + Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity, + Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity, + Convert_v1_PodAttachOptions_To_api_PodAttachOptions, + Convert_api_PodAttachOptions_To_v1_PodAttachOptions, + Convert_v1_PodCondition_To_api_PodCondition, + Convert_api_PodCondition_To_v1_PodCondition, + Convert_v1_PodExecOptions_To_api_PodExecOptions, + Convert_api_PodExecOptions_To_v1_PodExecOptions, + Convert_v1_PodList_To_api_PodList, + Convert_api_PodList_To_v1_PodList, + Convert_v1_PodLogOptions_To_api_PodLogOptions, + Convert_api_PodLogOptions_To_v1_PodLogOptions, + Convert_v1_PodProxyOptions_To_api_PodProxyOptions, + Convert_api_PodProxyOptions_To_v1_PodProxyOptions, + Convert_v1_PodSecurityContext_To_api_PodSecurityContext, + Convert_api_PodSecurityContext_To_v1_PodSecurityContext, + Convert_v1_PodSpec_To_api_PodSpec, + Convert_api_PodSpec_To_v1_PodSpec, + Convert_v1_PodStatus_To_api_PodStatus, + Convert_api_PodStatus_To_v1_PodStatus, + Convert_v1_PodStatusResult_To_api_PodStatusResult, + Convert_api_PodStatusResult_To_v1_PodStatusResult, + Convert_v1_PodTemplate_To_api_PodTemplate, + Convert_api_PodTemplate_To_v1_PodTemplate, + Convert_v1_PodTemplateList_To_api_PodTemplateList, + Convert_api_PodTemplateList_To_v1_PodTemplateList, + Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec, + Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec, + Convert_v1_Preconditions_To_api_Preconditions, + Convert_api_Preconditions_To_v1_Preconditions, + Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm, + Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm, + Convert_v1_Probe_To_api_Probe, + Convert_api_Probe_To_v1_Probe, + Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource, + Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource, + Convert_v1_RangeAllocation_To_api_RangeAllocation, + Convert_api_RangeAllocation_To_v1_RangeAllocation, + Convert_v1_ReplicationController_To_api_ReplicationController, + Convert_api_ReplicationController_To_v1_ReplicationController, + Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList, + Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList, + Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, + Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus, + Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, + Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector, + Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector, + Convert_v1_ResourceQuota_To_api_ResourceQuota, + Convert_api_ResourceQuota_To_v1_ResourceQuota, + Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList, + Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList, + Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec, + Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, + Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus, + Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, + Convert_v1_ResourceRequirements_To_api_ResourceRequirements, + Convert_api_ResourceRequirements_To_v1_ResourceRequirements, + Convert_v1_SELinuxOptions_To_api_SELinuxOptions, + Convert_api_SELinuxOptions_To_v1_SELinuxOptions, + Convert_v1_Secret_To_api_Secret, + Convert_api_Secret_To_v1_Secret, + Convert_v1_SecretKeySelector_To_api_SecretKeySelector, + Convert_api_SecretKeySelector_To_v1_SecretKeySelector, + Convert_v1_SecretList_To_api_SecretList, + Convert_api_SecretList_To_v1_SecretList, + Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource, + Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource, + Convert_v1_SecurityContext_To_api_SecurityContext, + Convert_api_SecurityContext_To_v1_SecurityContext, + Convert_v1_SerializedReference_To_api_SerializedReference, + Convert_api_SerializedReference_To_v1_SerializedReference, + Convert_v1_Service_To_api_Service, + Convert_api_Service_To_v1_Service, + Convert_v1_ServiceAccount_To_api_ServiceAccount, + Convert_api_ServiceAccount_To_v1_ServiceAccount, + Convert_v1_ServiceAccountList_To_api_ServiceAccountList, + Convert_api_ServiceAccountList_To_v1_ServiceAccountList, + Convert_v1_ServiceList_To_api_ServiceList, + Convert_api_ServiceList_To_v1_ServiceList, + Convert_v1_ServicePort_To_api_ServicePort, + Convert_api_ServicePort_To_v1_ServicePort, + Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions, + Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions, + Convert_v1_ServiceSpec_To_api_ServiceSpec, + Convert_api_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_ServiceStatus_To_api_ServiceStatus, + Convert_api_ServiceStatus_To_v1_ServiceStatus, + Convert_v1_TCPSocketAction_To_api_TCPSocketAction, + Convert_api_TCPSocketAction_To_v1_TCPSocketAction, + Convert_v1_Taint_To_api_Taint, + Convert_api_Taint_To_v1_Taint, + Convert_v1_Toleration_To_api_Toleration, + Convert_api_Toleration_To_v1_Toleration, + Convert_v1_Volume_To_api_Volume, + Convert_api_Volume_To_v1_Volume, + Convert_v1_VolumeMount_To_api_VolumeMount, + Convert_api_VolumeMount_To_v1_VolumeMount, + Convert_v1_VolumeSource_To_api_VolumeSource, + Convert_api_VolumeSource_To_v1_VolumeSource, + Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource, + Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource, + Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm, + Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) } +} + +func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { out.VolumeID = in.VolumeID out.FSType = in.FSType - out.Partition = int32(in.Partition) + out.Partition = in.Partition out.ReadOnly = in.ReadOnly return nil } @@ -43,10 +355,88 @@ func Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolu return autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) } -func autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.AzureFileVolumeSource))(in) +func autoConvert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error { + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(api.NodeAffinity) + if err := Convert_v1_NodeAffinity_To_api_NodeAffinity(*in, *out, s); err != nil { + return err + } + } else { + out.NodeAffinity = nil + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(api.PodAffinity) + if err := Convert_v1_PodAffinity_To_api_PodAffinity(*in, *out, s); err != nil { + return err + } + } else { + out.PodAffinity = nil + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(api.PodAntiAffinity) + if err := Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(*in, *out, s); err != nil { + return err + } + } else { + out.PodAntiAffinity = nil + } + return nil +} + +func Convert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error { + return autoConvert_v1_Affinity_To_api_Affinity(in, out, s) +} + +func autoConvert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error { + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(NodeAffinity) + if err := Convert_api_NodeAffinity_To_v1_NodeAffinity(*in, *out, s); err != nil { + return err + } + } else { + out.NodeAffinity = nil + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + if err := Convert_api_PodAffinity_To_v1_PodAffinity(*in, *out, s); err != nil { + return err + } + } else { + out.PodAffinity = nil + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + if err := Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(*in, *out, s); err != nil { + return err + } + } else { + out.PodAntiAffinity = nil } + return nil +} + +func Convert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error { + return autoConvert_api_Affinity_To_v1_Affinity(in, out, s) +} + +func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error { out.SecretName = in.SecretName out.ShareName = in.ShareName out.ReadOnly = in.ReadOnly @@ -57,9 +447,26 @@ func Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.Azure return autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) } +func autoConvert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { + return autoConvert_v1_Binding_To_api_Binding(in, out, s) +} + func autoConvert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Binding))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err @@ -74,22 +481,47 @@ func Convert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversi return autoConvert_api_Binding_To_v1_Binding(in, out, s) } -func autoConvert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Capabilities))(in) +func autoConvert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]api.Capability, len(*in)) + for i := range *in { + (*out)[i] = api.Capability((*in)[i]) + } + } else { + out.Add = nil } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]api.Capability, len(*in)) + for i := range *in { + (*out)[i] = api.Capability((*in)[i]) + } + } else { + out.Drop = nil + } + return nil +} + +func Convert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { + return autoConvert_v1_Capabilities_To_api_Capabilities(in, out, s) +} + +func autoConvert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error { if in.Add != nil { - out.Add = make([]Capability, len(in.Add)) - for i := range in.Add { - out.Add[i] = Capability(in.Add[i]) + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + for i := range *in { + (*out)[i] = Capability((*in)[i]) } } else { out.Add = nil } if in.Drop != nil { - out.Drop = make([]Capability, len(in.Drop)) - for i := range in.Drop { - out.Drop[i] = Capability(in.Drop[i]) + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + for i := range *in { + (*out)[i] = Capability((*in)[i]) } } else { out.Drop = nil @@ -101,25 +533,37 @@ func Convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capa return autoConvert_api_Capabilities_To_v1_Capabilities(in, out, s) } -func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.CephFSVolumeSource))(in) - } - if in.Monitors != nil { - out.Monitors = make([]string, len(in.Monitors)) - for i := range in.Monitors { - out.Monitors[i] = in.Monitors[i] +func autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = in.Monitors + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(api.LocalObjectReference) + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil { + return err } } else { - out.Monitors = nil + out.SecretRef = nil } + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in, out, s) +} + +func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = in.Monitors out.Path = in.Path out.User = in.User out.SecretFile = in.SecretFile - // unable to generate simple pointer conversion for api.LocalObjectReference -> v1.LocalObjectReference if in.SecretRef != nil { - out.SecretRef = new(LocalObjectReference) - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil { return err } } else { @@ -133,10 +577,18 @@ func Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolum return autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) } +func autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in, out, s) +} + func autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.CinderVolumeSource))(in) - } out.VolumeID = in.VolumeID out.FSType = in.FSType out.ReadOnly = in.ReadOnly @@ -147,10 +599,19 @@ func Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolum return autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) } +func autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { + out.Type = api.ComponentConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +func Convert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { + return autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in, out, s) +} + func autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ComponentCondition))(in) - } out.Type = ComponentConditionType(in.Type) out.Status = ConditionStatus(in.Status) out.Message = in.Message @@ -162,17 +623,43 @@ func Convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCo return autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in, out, s) } +func autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]api.ComponentCondition, len(*in)) + for i := range *in { + if err := Convert_v1_ComponentCondition_To_api_ComponentCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + return nil +} + +func Convert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { + return autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in, out, s) +} + func autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ComponentStatus))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if in.Conditions != nil { - out.Conditions = make([]ComponentCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_api_ComponentCondition_To_v1_ComponentCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + for i := range *in { + if err := Convert_api_ComponentCondition_To_v1_ComponentCondition(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -186,17 +673,43 @@ func Convert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, return autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in, out, s) } +func autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.ComponentStatus, len(*in)) + for i := range *in { + if err := Convert_v1_ComponentStatus_To_api_ComponentStatus(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { + return autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in, out, s) +} + func autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ComponentStatusList))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]ComponentStatus, len(in.Items)) - for i := range in.Items { - if err := Convert_api_ComponentStatus_To_v1_ComponentStatus(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + if err := Convert_api_ComponentStatus_To_v1_ComponentStatus(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -210,21 +723,30 @@ func Convert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.Component return autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) } +func autoConvert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { + SetDefaults_ConfigMap(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + out.Data = in.Data + return nil +} + +func Convert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { + return autoConvert_v1_ConfigMap_To_api_ConfigMap(in, out, s) +} + func autoConvert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ConfigMap))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if in.Data != nil { - out.Data = make(map[string]string) - for key, val := range in.Data { - out.Data[key] = val - } - } else { - out.Data = nil - } + out.Data = in.Data return nil } @@ -232,10 +754,19 @@ func Convert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s return autoConvert_api_ConfigMap_To_v1_ConfigMap(in, out, s) } -func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ConfigMapKeySelector))(in) +func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err } + out.Key = in.Key + return nil +} + +func Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error { if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } @@ -247,17 +778,18 @@ func Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigM return autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) } -func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ConfigMapList))(in) +func autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]ConfigMap, len(in.Items)) - for i := range in.Items { - if err := Convert_api_ConfigMap_To_v1_ConfigMap(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]api.ConfigMap, len(*in)) + for i := range *in { + if err := Convert_v1_ConfigMap_To_api_ConfigMap(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -267,21 +799,22 @@ func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, ou return nil } -func Convert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { - return autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in, out, s) +func Convert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { + return autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in, out, s) } -func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ConfigMapVolumeSource))(in) +func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]KeyToPath, len(in.Items)) - for i := range in.Items { - if err := Convert_api_KeyToPath_To_v1_KeyToPath(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + if err := Convert_api_ConfigMap_To_v1_ConfigMap(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -291,88 +824,209 @@ func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.C return nil } -func Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) +func Convert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { + return autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in, out, s) } -func autoConvert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Container))(in) - } - out.Name = in.Name - out.Image = in.Image - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - if in.Args != nil { - out.Args = make([]string, len(in.Args)) - for i := range in.Args { - out.Args[i] = in.Args[i] - } - } else { - out.Args = nil - } - out.WorkingDir = in.WorkingDir - if in.Ports != nil { - out.Ports = make([]ContainerPort, len(in.Ports)) - for i := range in.Ports { - if err := Convert_api_ContainerPort_To_v1_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil +func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err } - if in.Env != nil { - out.Env = make([]EnvVar, len(in.Env)) - for i := range in.Env { - if err := Convert_api_EnvVar_To_v1_EnvVar(&in.Env[i], &out.Env[i], s); err != nil { + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.KeyToPath, len(*in)) + for i := range *in { + if err := Convert_v1_KeyToPath_To_api_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { - out.Env = nil + out.Items = nil + } + return nil +} + +func Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := Convert_api_KeyToPath_To_v1_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { + SetDefaults_Container(in) + out.Name = in.Name + out.Image = in.Image + out.Command = in.Command + out.Args = in.Args + out.WorkingDir = in.WorkingDir + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]api.ContainerPort, len(*in)) + for i := range *in { + if err := Convert_v1_ContainerPort_To_api_ContainerPort(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Ports = nil + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api.EnvVar, len(*in)) + for i := range *in { + if err := Convert_v1_EnvVar_To_api_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Env = nil + } + if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]api.VolumeMount, len(*in)) + for i := range *in { + if err := Convert_v1_VolumeMount_To_api_VolumeMount(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.VolumeMounts = nil + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(api.Probe) + if err := Convert_v1_Probe_To_api_Probe(*in, *out, s); err != nil { + return err + } + } else { + out.LivenessProbe = nil + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(api.Probe) + if err := Convert_v1_Probe_To_api_Probe(*in, *out, s); err != nil { + return err + } + } else { + out.ReadinessProbe = nil + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(api.Lifecycle) + if err := Convert_v1_Lifecycle_To_api_Lifecycle(*in, *out, s); err != nil { + return err + } + } else { + out.Lifecycle = nil + } + out.TerminationMessagePath = in.TerminationMessagePath + out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(api.SecurityContext) + if err := Convert_v1_SecurityContext_To_api_SecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +func Convert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { + return autoConvert_v1_Container_To_api_Container(in, out, s) +} + +func autoConvert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = in.Command + out.Args = in.Args + out.WorkingDir = in.WorkingDir + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + for i := range *in { + if err := Convert_api_ContainerPort_To_v1_ContainerPort(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Ports = nil + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + if err := Convert_api_EnvVar_To_v1_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Env = nil } if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err } if in.VolumeMounts != nil { - out.VolumeMounts = make([]VolumeMount, len(in.VolumeMounts)) - for i := range in.VolumeMounts { - if err := Convert_api_VolumeMount_To_v1_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + for i := range *in { + if err := Convert_api_VolumeMount_To_v1_VolumeMount(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.VolumeMounts = nil } - // unable to generate simple pointer conversion for api.Probe -> v1.Probe if in.LivenessProbe != nil { - out.LivenessProbe = new(Probe) - if err := Convert_api_Probe_To_v1_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + if err := Convert_api_Probe_To_v1_Probe(*in, *out, s); err != nil { return err } } else { out.LivenessProbe = nil } - // unable to generate simple pointer conversion for api.Probe -> v1.Probe if in.ReadinessProbe != nil { - out.ReadinessProbe = new(Probe) - if err := Convert_api_Probe_To_v1_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + if err := Convert_api_Probe_To_v1_Probe(*in, *out, s); err != nil { return err } } else { out.ReadinessProbe = nil } - // unable to generate simple pointer conversion for api.Lifecycle -> v1.Lifecycle if in.Lifecycle != nil { - out.Lifecycle = new(Lifecycle) - if err := Convert_api_Lifecycle_To_v1_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + if err := Convert_api_Lifecycle_To_v1_Lifecycle(*in, *out, s); err != nil { return err } } else { @@ -380,10 +1034,10 @@ func autoConvert_api_Container_To_v1_Container(in *api.Container, out *Container } out.TerminationMessagePath = in.TerminationMessagePath out.ImagePullPolicy = PullPolicy(in.ImagePullPolicy) - // unable to generate simple pointer conversion for api.SecurityContext -> v1.SecurityContext if in.SecurityContext != nil { - out.SecurityContext = new(SecurityContext) - if err := Convert_api_SecurityContext_To_v1_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + if err := Convert_api_SecurityContext_To_v1_SecurityContext(*in, *out, s); err != nil { return err } } else { @@ -399,18 +1053,18 @@ func Convert_api_Container_To_v1_Container(in *api.Container, out *Container, s return autoConvert_api_Container_To_v1_Container(in, out, s) } +func autoConvert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { + out.Names = in.Names + out.SizeBytes = in.SizeBytes + return nil +} + +func Convert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { + return autoConvert_v1_ContainerImage_To_api_ContainerImage(in, out, s) +} + func autoConvert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ContainerImage))(in) - } - if in.Names != nil { - out.Names = make([]string, len(in.Names)) - for i := range in.Names { - out.Names[i] = in.Names[i] - } - } else { - out.Names = nil - } + out.Names = in.Names out.SizeBytes = in.SizeBytes return nil } @@ -419,13 +1073,24 @@ func Convert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out return autoConvert_api_ContainerImage_To_v1_ContainerImage(in, out, s) } +func autoConvert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { + SetDefaults_ContainerPort(in) + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = api.Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +func Convert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { + return autoConvert_v1_ContainerPort_To_api_ContainerPort(in, out, s) +} + func autoConvert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ContainerPort))(in) - } out.Name = in.Name - out.HostPort = int32(in.HostPort) - out.ContainerPort = int32(in.ContainerPort) + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort out.Protocol = Protocol(in.Protocol) out.HostIP = in.HostIP return nil @@ -435,32 +1100,64 @@ func Convert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *C return autoConvert_api_ContainerPort_To_v1_ContainerPort(in, out, s) } -func autoConvert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ContainerState))(in) +func autoConvert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = new(api.ContainerStateWaiting) + if err := Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(*in, *out, s); err != nil { + return err + } + } else { + out.Waiting = nil + } + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(api.ContainerStateRunning) + if err := Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(*in, *out, s); err != nil { + return err + } + } else { + out.Running = nil + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + *out = new(api.ContainerStateTerminated) + if err := Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(*in, *out, s); err != nil { + return err + } + } else { + out.Terminated = nil } - // unable to generate simple pointer conversion for api.ContainerStateWaiting -> v1.ContainerStateWaiting + return nil +} + +func Convert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { + return autoConvert_v1_ContainerState_To_api_ContainerState(in, out, s) +} + +func autoConvert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { if in.Waiting != nil { - out.Waiting = new(ContainerStateWaiting) - if err := Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in.Waiting, out.Waiting, s); err != nil { + in, out := &in.Waiting, &out.Waiting + *out = new(ContainerStateWaiting) + if err := Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(*in, *out, s); err != nil { return err } } else { out.Waiting = nil } - // unable to generate simple pointer conversion for api.ContainerStateRunning -> v1.ContainerStateRunning if in.Running != nil { - out.Running = new(ContainerStateRunning) - if err := Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in.Running, out.Running, s); err != nil { + in, out := &in.Running, &out.Running + *out = new(ContainerStateRunning) + if err := Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(*in, *out, s); err != nil { return err } } else { out.Running = nil } - // unable to generate simple pointer conversion for api.ContainerStateTerminated -> v1.ContainerStateTerminated if in.Terminated != nil { - out.Terminated = new(ContainerStateTerminated) - if err := Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in.Terminated, out.Terminated, s); err != nil { + in, out := &in.Terminated, &out.Terminated + *out = new(ContainerStateTerminated) + if err := Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(*in, *out, s); err != nil { return err } } else { @@ -473,10 +1170,18 @@ func Convert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out return autoConvert_api_ContainerState_To_v1_ContainerState(in, out, s) } -func autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ContainerStateRunning))(in) +func autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil { + return err } + return nil +} + +func Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { + return autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in, out, s) +} + +func autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil { return err } @@ -487,12 +1192,28 @@ func Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.Conta return autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) } -func autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ContainerStateTerminated))(in) +func autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.FinishedAt, &out.FinishedAt, s); err != nil { + return err } - out.ExitCode = int32(in.ExitCode) - out.Signal = int32(in.Signal) + out.ContainerID = in.ContainerID + return nil +} + +func Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in, out, s) +} + +func autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal out.Reason = in.Reason out.Message = in.Message if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil { @@ -509,10 +1230,17 @@ func Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api return autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) } +func autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in, out, s) +} + func autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ContainerStateWaiting))(in) - } out.Reason = in.Reason out.Message = in.Message return nil @@ -522,10 +1250,27 @@ func Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.Conta return autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) } -func autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ContainerStatus))(in) +func autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_ContainerState_To_api_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_v1_ContainerState_To_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +func Convert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { + return autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in, out, s) +} + +func autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error { out.Name = in.Name if err := Convert_api_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { return err @@ -534,7 +1279,7 @@ func autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStat return err } out.Ready = in.Ready - out.RestartCount = int32(in.RestartCount) + out.RestartCount = in.RestartCount out.Image = in.Image out.ImageID = in.ImageID out.ContainerID = in.ContainerID @@ -545,63 +1290,152 @@ func Convert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, return autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in, out, s) } -func autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.DaemonEndpoint))(in) - } - out.Port = int32(in.Port) +func autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port return nil } -func Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { - return autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) +func Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { + return autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in, out, s) } -func autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.DeleteOptions))(in) - } - if in.GracePeriodSeconds != nil { - out.GracePeriodSeconds = new(int64) - *out.GracePeriodSeconds = *in.GracePeriodSeconds - } else { - out.GracePeriodSeconds = nil - } +func autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port return nil } -func Convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { - return autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in, out, s) +func Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { + return autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) } -func autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.DownwardAPIVolumeFile))(in) - } - out.Path = in.Path - if err := Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(&in.FieldRef, &out.FieldRef, s); err != nil { +func autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } + out.GracePeriodSeconds = in.GracePeriodSeconds + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + *out = new(api.Preconditions) + if err := Convert_v1_Preconditions_To_api_Preconditions(*in, *out, s); err != nil { + return err + } + } else { + out.Preconditions = nil + } + out.OrphanDependents = in.OrphanDependents return nil } -func Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) +func Convert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { + return autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in, out, s) } -func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.DownwardAPIVolumeSource))(in) +func autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - if in.Items != nil { - out.Items = make([]DownwardAPIVolumeFile, len(in.Items)) - for i := range in.Items { - if err := Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(&in.Items[i], &out.Items[i], s); err != nil { - return err - } + out.GracePeriodSeconds = in.GracePeriodSeconds + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + *out = new(Preconditions) + if err := Convert_api_Preconditions_To_v1_Preconditions(*in, *out, s); err != nil { + return err } } else { - out.Items = nil + out.Preconditions = nil + } + out.OrphanDependents = in.OrphanDependents + return nil +} + +func Convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { + return autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(api.ObjectFieldSelector) + if err := Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(*in, *out, s); err != nil { + return err + } + } else { + out.FieldRef = nil + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(api.ResourceFieldSelector) + if err := Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(*in, *out, s); err != nil { + return err + } + } else { + out.ResourceFieldRef = nil + } + return nil +} + +func Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + if err := Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(*in, *out, s); err != nil { + return err + } + } else { + out.FieldRef = nil + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(*in, *out, s); err != nil { + return err + } + } else { + out.ResourceFieldRef = nil + } + return nil +} + +func Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error { + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil } return nil } @@ -610,10 +1444,16 @@ func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.D return autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) } +func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = api.StorageMedium(in.Medium) + return nil +} + +func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in, out, s) +} + func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EmptyDirVolumeSource))(in) - } out.Medium = StorageMedium(in.Medium) return nil } @@ -622,15 +1462,32 @@ func Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDi return autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) } -func autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EndpointAddress))(in) +func autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(api.ObjectReference) + if err := Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.TargetRef = nil } + return nil +} + +func Convert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { + return autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in, out, s) +} + +func autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error { out.IP = in.IP - // unable to generate simple pointer conversion for api.ObjectReference -> v1.ObjectReference + out.Hostname = in.Hostname if in.TargetRef != nil { - out.TargetRef = new(ObjectReference) - if err := Convert_api_ObjectReference_To_v1_ObjectReference(in.TargetRef, out.TargetRef, s); err != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(ObjectReference) + if err := Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil { return err } } else { @@ -643,12 +1500,20 @@ func Convert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, return autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in, out, s) } +func autoConvert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = api.Protocol(in.Protocol) + return nil +} + +func Convert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { + return autoConvert_v1_EndpointPort_To_api_EndpointPort(in, out, s) +} + func autoConvert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EndpointPort))(in) - } out.Name = in.Name - out.Port = int32(in.Port) + out.Port = in.Port out.Protocol = Protocol(in.Protocol) return nil } @@ -657,14 +1522,53 @@ func Convert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *Endp return autoConvert_api_EndpointPort_To_v1_EndpointPort(in, out, s) } -func autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EndpointSubset))(in) +func autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]api.EndpointAddress, len(*in)) + for i := range *in { + if err := Convert_v1_EndpointAddress_To_api_EndpointAddress(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Addresses = nil + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]api.EndpointAddress, len(*in)) + for i := range *in { + if err := Convert_v1_EndpointAddress_To_api_EndpointAddress(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.NotReadyAddresses = nil + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]api.EndpointPort, len(*in)) + for i := range *in { + if err := Convert_v1_EndpointPort_To_api_EndpointPort(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Ports = nil } + return nil +} + +func Convert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { + return autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in, out, s) +} + +func autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error { if in.Addresses != nil { - out.Addresses = make([]EndpointAddress, len(in.Addresses)) - for i := range in.Addresses { - if err := Convert_api_EndpointAddress_To_v1_EndpointAddress(&in.Addresses[i], &out.Addresses[i], s); err != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := Convert_api_EndpointAddress_To_v1_EndpointAddress(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -672,9 +1576,10 @@ func autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out.Addresses = nil } if in.NotReadyAddresses != nil { - out.NotReadyAddresses = make([]EndpointAddress, len(in.NotReadyAddresses)) - for i := range in.NotReadyAddresses { - if err := Convert_api_EndpointAddress_To_v1_EndpointAddress(&in.NotReadyAddresses[i], &out.NotReadyAddresses[i], s); err != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := Convert_api_EndpointAddress_To_v1_EndpointAddress(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -682,9 +1587,10 @@ func autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out.NotReadyAddresses = nil } if in.Ports != nil { - out.Ports = make([]EndpointPort, len(in.Ports)) - for i := range in.Ports { - if err := Convert_api_EndpointPort_To_v1_EndpointPort(&in.Ports[i], &out.Ports[i], s); err != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + for i := range *in { + if err := Convert_api_EndpointPort_To_v1_EndpointPort(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -698,17 +1604,44 @@ func Convert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out return autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in, out, s) } +func autoConvert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { + SetDefaults_Endpoints(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]api.EndpointSubset, len(*in)) + for i := range *in { + if err := Convert_v1_EndpointSubset_To_api_EndpointSubset(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subsets = nil + } + return nil +} + +func Convert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { + return autoConvert_v1_Endpoints_To_api_Endpoints(in, out, s) +} + func autoConvert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Endpoints))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if in.Subsets != nil { - out.Subsets = make([]EndpointSubset, len(in.Subsets)) - for i := range in.Subsets { - if err := Convert_api_EndpointSubset_To_v1_EndpointSubset(&in.Subsets[i], &out.Subsets[i], s); err != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + if err := Convert_api_EndpointSubset_To_v1_EndpointSubset(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -722,17 +1655,43 @@ func Convert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s return autoConvert_api_Endpoints_To_v1_Endpoints(in, out, s) } +func autoConvert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Endpoints, len(*in)) + for i := range *in { + if err := Convert_v1_Endpoints_To_api_Endpoints(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { + return autoConvert_v1_EndpointsList_To_api_EndpointsList(in, out, s) +} + func autoConvert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EndpointsList))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]Endpoints, len(in.Items)) - for i := range in.Items { - if err := Convert_api_Endpoints_To_v1_Endpoints(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + if err := Convert_api_Endpoints_To_v1_Endpoints(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -746,16 +1705,32 @@ func Convert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *E return autoConvert_api_EndpointsList_To_v1_EndpointsList(in, out, s) } -func autoConvert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EnvVar))(in) +func autoConvert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(api.EnvVarSource) + if err := Convert_v1_EnvVarSource_To_api_EnvVarSource(*in, *out, s); err != nil { + return err + } + } else { + out.ValueFrom = nil } + return nil +} + +func Convert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { + return autoConvert_v1_EnvVar_To_api_EnvVar(in, out, s) +} + +func autoConvert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error { out.Name = in.Name out.Value = in.Value - // unable to generate simple pointer conversion for api.EnvVarSource -> v1.EnvVarSource if in.ValueFrom != nil { - out.ValueFrom = new(EnvVarSource) - if err := Convert_api_EnvVarSource_To_v1_EnvVarSource(in.ValueFrom, out.ValueFrom, s); err != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(EnvVarSource) + if err := Convert_api_EnvVarSource_To_v1_EnvVarSource(*in, *out, s); err != nil { return err } } else { @@ -768,32 +1743,82 @@ func Convert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.S return autoConvert_api_EnvVar_To_v1_EnvVar(in, out, s) } -func autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EnvVarSource))(in) +func autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(api.ObjectFieldSelector) + if err := Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(*in, *out, s); err != nil { + return err + } + } else { + out.FieldRef = nil + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(api.ResourceFieldSelector) + if err := Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(*in, *out, s); err != nil { + return err + } + } else { + out.ResourceFieldRef = nil + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(api.ConfigMapKeySelector) + if err := Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(*in, *out, s); err != nil { + return err + } + } else { + out.ConfigMapKeyRef = nil + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(api.SecretKeySelector) + if err := Convert_v1_SecretKeySelector_To_api_SecretKeySelector(*in, *out, s); err != nil { + return err + } + } else { + out.SecretKeyRef = nil } - // unable to generate simple pointer conversion for api.ObjectFieldSelector -> v1.ObjectFieldSelector + return nil +} + +func Convert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { + return autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in, out, s) +} + +func autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error { if in.FieldRef != nil { - out.FieldRef = new(ObjectFieldSelector) - if err := Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in.FieldRef, out.FieldRef, s); err != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + if err := Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(*in, *out, s); err != nil { return err } } else { out.FieldRef = nil } - // unable to generate simple pointer conversion for api.ConfigMapKeySelector -> v1.ConfigMapKeySelector + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(*in, *out, s); err != nil { + return err + } + } else { + out.ResourceFieldRef = nil + } if in.ConfigMapKeyRef != nil { - out.ConfigMapKeyRef = new(ConfigMapKeySelector) - if err := Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in.ConfigMapKeyRef, out.ConfigMapKeyRef, s); err != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(ConfigMapKeySelector) + if err := Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(*in, *out, s); err != nil { return err } } else { out.ConfigMapKeyRef = nil } - // unable to generate simple pointer conversion for api.SecretKeySelector -> v1.SecretKeySelector if in.SecretKeyRef != nil { - out.SecretKeyRef = new(SecretKeySelector) - if err := Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in.SecretKeyRef, out.SecretKeyRef, s); err != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeySelector) + if err := Convert_api_SecretKeySelector_To_v1_SecretKeySelector(*in, *out, s); err != nil { return err } } else { @@ -806,19 +1831,19 @@ func Convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvV return autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in, out, s) } -func autoConvert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Event))(in) +func autoConvert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { return err } out.Reason = in.Reason out.Message = in.Message - if err := Convert_api_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { + if err := Convert_v1_EventSource_To_api_EventSource(&in.Source, &out.Source, s); err != nil { return err } if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.FirstTimestamp, &out.FirstTimestamp, s); err != nil { @@ -827,31 +1852,87 @@ func autoConvert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.S if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTimestamp, &out.LastTimestamp, s); err != nil { return err } - out.Count = int32(in.Count) + out.Count = in.Count out.Type = in.Type return nil } -func Convert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { - return autoConvert_api_Event_To_v1_Event(in, out, s) +func Convert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { + return autoConvert_v1_Event_To_api_Event(in, out, s) } -func autoConvert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EventList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { +func autoConvert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if in.Items != nil { - out.Items = make([]Event, len(in.Items)) - for i := range in.Items { - if err := Convert_api_Event_To_v1_Event(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil + if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_api_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.FirstTimestamp, &out.FirstTimestamp, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTimestamp, &out.LastTimestamp, s); err != nil { + return err + } + out.Count = in.Count + out.Type = in.Type + return nil +} + +func Convert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { + return autoConvert_api_Event_To_v1_Event(in, out, s) +} + +func autoConvert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Event, len(*in)) + for i := range *in { + if err := Convert_v1_Event_To_api_Event(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { + return autoConvert_v1_EventList_To_api_EventList(in, out, s) +} + +func autoConvert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + if err := Convert_api_Event_To_v1_Event(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil } return nil } @@ -860,10 +1941,17 @@ func Convert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s return autoConvert_api_EventList_To_v1_EventList(in, out, s) } +func autoConvert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +func Convert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { + return autoConvert_v1_EventSource_To_api_EventSource(in, out, s) +} + func autoConvert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EventSource))(in) - } out.Component = in.Component out.Host = in.Host return nil @@ -873,18 +1961,17 @@ func Convert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSo return autoConvert_api_EventSource_To_v1_EventSource(in, out, s) } +func autoConvert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { + out.Command = in.Command + return nil +} + +func Convert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { + return autoConvert_v1_ExecAction_To_api_ExecAction(in, out, s) +} + func autoConvert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ExecAction))(in) - } - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } + out.Command = in.Command return nil } @@ -892,24 +1979,47 @@ func Convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction return autoConvert_api_ExecAction_To_v1_ExecAction(in, out, s) } -func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.FCVolumeSource))(in) - } - if in.TargetWWNs != nil { - out.TargetWWNs = make([]string, len(in.TargetWWNs)) - for i := range in.TargetWWNs { - out.TargetWWNs[i] = in.TargetWWNs[i] - } - } else { - out.TargetWWNs = nil +func autoConvert_v1_ExportOptions_To_api_ExportOptions(in *ExportOptions, out *api.ExportOptions, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - if in.Lun != nil { - out.Lun = new(int32) - *out.Lun = int32(*in.Lun) - } else { - out.Lun = nil + out.Export = in.Export + out.Exact = in.Exact + return nil +} + +func Convert_v1_ExportOptions_To_api_ExportOptions(in *ExportOptions, out *api.ExportOptions, s conversion.Scope) error { + return autoConvert_v1_ExportOptions_To_api_ExportOptions(in, out, s) +} + +func autoConvert_api_ExportOptions_To_v1_ExportOptions(in *api.ExportOptions, out *ExportOptions, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } + out.Export = in.Export + out.Exact = in.Exact + return nil +} + +func Convert_api_ExportOptions_To_v1_ExportOptions(in *api.ExportOptions, out *ExportOptions, s conversion.Scope) error { + return autoConvert_api_ExportOptions_To_v1_ExportOptions(in, out, s) +} + +func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = in.TargetWWNs + out.Lun = in.Lun + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in, out, s) +} + +func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = in.TargetWWNs + out.Lun = in.Lun out.FSType = in.FSType out.ReadOnly = in.ReadOnly return nil @@ -919,30 +2029,41 @@ func Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out return autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) } -func autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.FlexVolumeSource))(in) - } +func autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { out.Driver = in.Driver out.FSType = in.FSType - // unable to generate simple pointer conversion for api.LocalObjectReference -> v1.LocalObjectReference if in.SecretRef != nil { - out.SecretRef = new(LocalObjectReference) - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(api.LocalObjectReference) + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil { return err } } else { out.SecretRef = nil } out.ReadOnly = in.ReadOnly - if in.Options != nil { - out.Options = make(map[string]string) - for key, val := range in.Options { - out.Options[key] = val + out.Options = in.Options + return nil +} + +func Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in, out, s) +} + +func autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil { + return err } } else { - out.Options = nil + out.SecretRef = nil } + out.ReadOnly = in.ReadOnly + out.Options = in.Options return nil } @@ -950,10 +2071,16 @@ func Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSourc return autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) } +func autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + return nil +} + +func Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in, out, s) +} + func autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *FlockerVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.FlockerVolumeSource))(in) - } out.DatasetName = in.DatasetName return nil } @@ -962,13 +2089,22 @@ func Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVo return autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) } +func autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in, out, s) +} + func autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.GCEPersistentDiskVolumeSource))(in) - } out.PDName = in.PDName out.FSType = in.FSType - out.Partition = int32(in.Partition) + out.Partition = in.Partition out.ReadOnly = in.ReadOnly return nil } @@ -977,10 +2113,18 @@ func Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSour return autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) } +func autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +func Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in, out, s) +} + func autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.GitRepoVolumeSource))(in) - } out.Repository = in.Repository out.Revision = in.Revision out.Directory = in.Directory @@ -991,10 +2135,18 @@ func Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVo return autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) } +func autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in, out, s) +} + func autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.GlusterfsVolumeSource))(in) - } out.EndpointsName = in.EndpointsName out.Path = in.Path out.ReadOnly = in.ReadOnly @@ -1005,10 +2157,33 @@ func Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.Glust return autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) } -func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.HTTPGetAction))(in) +func autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { + SetDefaults_HTTPGetAction(in) + out.Path = in.Path + if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { + return err + } + out.Host = in.Host + out.Scheme = api.URIScheme(in.Scheme) + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]api.HTTPHeader, len(*in)) + for i := range *in { + if err := Convert_v1_HTTPHeader_To_api_HTTPHeader(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.HTTPHeaders = nil } + return nil +} + +func Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { + return autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in, out, s) +} + +func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error { out.Path = in.Path if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { return err @@ -1016,9 +2191,10 @@ func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, ou out.Host = in.Host out.Scheme = URIScheme(in.Scheme) if in.HTTPHeaders != nil { - out.HTTPHeaders = make([]HTTPHeader, len(in.HTTPHeaders)) - for i := range in.HTTPHeaders { - if err := Convert_api_HTTPHeader_To_v1_HTTPHeader(&in.HTTPHeaders[i], &out.HTTPHeaders[i], s); err != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + for i := range *in { + if err := Convert_api_HTTPHeader_To_v1_HTTPHeader(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1032,10 +2208,17 @@ func Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *H return autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) } +func autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +func Convert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { + return autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in, out, s) +} + func autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.HTTPHeader))(in) - } out.Name = in.Name out.Value = in.Value return nil @@ -1045,32 +2228,64 @@ func Convert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader return autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in, out, s) } -func autoConvert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Handler))(in) +func autoConvert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(api.ExecAction) + if err := Convert_v1_ExecAction_To_api_ExecAction(*in, *out, s); err != nil { + return err + } + } else { + out.Exec = nil + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(api.HTTPGetAction) + if err := Convert_v1_HTTPGetAction_To_api_HTTPGetAction(*in, *out, s); err != nil { + return err + } + } else { + out.HTTPGet = nil + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(api.TCPSocketAction) + if err := Convert_v1_TCPSocketAction_To_api_TCPSocketAction(*in, *out, s); err != nil { + return err + } + } else { + out.TCPSocket = nil } - // unable to generate simple pointer conversion for api.ExecAction -> v1.ExecAction + return nil +} + +func Convert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { + return autoConvert_v1_Handler_To_api_Handler(in, out, s) +} + +func autoConvert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error { if in.Exec != nil { - out.Exec = new(ExecAction) - if err := Convert_api_ExecAction_To_v1_ExecAction(in.Exec, out.Exec, s); err != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + if err := Convert_api_ExecAction_To_v1_ExecAction(*in, *out, s); err != nil { return err } } else { out.Exec = nil } - // unable to generate simple pointer conversion for api.HTTPGetAction -> v1.HTTPGetAction if in.HTTPGet != nil { - out.HTTPGet = new(HTTPGetAction) - if err := Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in.HTTPGet, out.HTTPGet, s); err != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + if err := Convert_api_HTTPGetAction_To_v1_HTTPGetAction(*in, *out, s); err != nil { return err } } else { out.HTTPGet = nil } - // unable to generate simple pointer conversion for api.TCPSocketAction -> v1.TCPSocketAction if in.TCPSocket != nil { - out.TCPSocket = new(TCPSocketAction) - if err := Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in.TCPSocket, out.TCPSocket, s); err != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + if err := Convert_api_TCPSocketAction_To_v1_TCPSocketAction(*in, *out, s); err != nil { return err } } else { @@ -1083,10 +2298,16 @@ func Convert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversi return autoConvert_api_Handler_To_v1_Handler(in, out, s) } +func autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in, out, s) +} + func autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.HostPathVolumeSource))(in) - } out.Path = in.Path return nil } @@ -1095,53 +2316,95 @@ func Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPat return autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) } -func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ISCSIVolumeSource))(in) - } +func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { + SetDefaults_ISCSIVolumeSource(in) out.TargetPortal = in.TargetPortal out.IQN = in.IQN - out.Lun = int32(in.Lun) + out.Lun = in.Lun out.ISCSIInterface = in.ISCSIInterface out.FSType = in.FSType out.ReadOnly = in.ReadOnly return nil } -func Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) +func Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in, out, s) } -func autoConvert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.KeyToPath))(in) - } - out.Key = in.Key - out.Path = in.Path - return nil -} +func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + return nil +} + +func Convert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { + return autoConvert_v1_KeyToPath_To_api_KeyToPath(in, out, s) +} + +func autoConvert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + return nil +} func Convert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error { return autoConvert_api_KeyToPath_To_v1_KeyToPath(in, out, s) } -func autoConvert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Lifecycle))(in) +func autoConvert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = new(api.Handler) + if err := Convert_v1_Handler_To_api_Handler(*in, *out, s); err != nil { + return err + } + } else { + out.PostStart = nil + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(api.Handler) + if err := Convert_v1_Handler_To_api_Handler(*in, *out, s); err != nil { + return err + } + } else { + out.PreStop = nil } - // unable to generate simple pointer conversion for api.Handler -> v1.Handler + return nil +} + +func Convert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { + return autoConvert_v1_Lifecycle_To_api_Lifecycle(in, out, s) +} + +func autoConvert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error { if in.PostStart != nil { - out.PostStart = new(Handler) - if err := Convert_api_Handler_To_v1_Handler(in.PostStart, out.PostStart, s); err != nil { + in, out := &in.PostStart, &out.PostStart + *out = new(Handler) + if err := Convert_api_Handler_To_v1_Handler(*in, *out, s); err != nil { return err } } else { out.PostStart = nil } - // unable to generate simple pointer conversion for api.Handler -> v1.Handler if in.PreStop != nil { - out.PreStop = new(Handler) - if err := Convert_api_Handler_To_v1_Handler(in.PreStop, out.PreStop, s); err != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(Handler) + if err := Convert_api_Handler_To_v1_Handler(*in, *out, s); err != nil { return err } } else { @@ -1154,9 +2417,26 @@ func Convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s return autoConvert_api_Lifecycle_To_v1_Lifecycle(in, out, s) } +func autoConvert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { + return autoConvert_v1_LimitRange_To_api_LimitRange(in, out, s) +} + func autoConvert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.LimitRange))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err @@ -1171,67 +2451,94 @@ func Convert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange return autoConvert_api_LimitRange_To_v1_LimitRange(in, out, s) } -func autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.LimitRangeItem))(in) +func autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { + SetDefaults_LimitRangeItem(in) + out.Type = api.LimitType(in.Type) + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Max, &out.Max, s); err != nil { + return err + } + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Min, &out.Min, s); err != nil { + return err } + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Default, &out.Default, s); err != nil { + return err + } + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.DefaultRequest, &out.DefaultRequest, s); err != nil { + return err + } + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio, s); err != nil { + return err + } + return nil +} + +func Convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { + return autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in, out, s) +} + +func autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error { out.Type = LimitType(in.Type) if in.Max != nil { - out.Max = make(ResourceList) - for key, val := range in.Max { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { return err } - out.Max[ResourceName(key)] = newVal + (*out)[ResourceName(key)] = *newVal } } else { out.Max = nil } if in.Min != nil { - out.Min = make(ResourceList) - for key, val := range in.Min { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { return err } - out.Min[ResourceName(key)] = newVal + (*out)[ResourceName(key)] = *newVal } } else { out.Min = nil } if in.Default != nil { - out.Default = make(ResourceList) - for key, val := range in.Default { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { return err } - out.Default[ResourceName(key)] = newVal + (*out)[ResourceName(key)] = *newVal } } else { out.Default = nil } if in.DefaultRequest != nil { - out.DefaultRequest = make(ResourceList) - for key, val := range in.DefaultRequest { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { return err } - out.DefaultRequest[ResourceName(key)] = newVal + (*out)[ResourceName(key)] = *newVal } } else { out.DefaultRequest = nil } if in.MaxLimitRequestRatio != nil { - out.MaxLimitRequestRatio = make(ResourceList) - for key, val := range in.MaxLimitRequestRatio { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { return err } - out.MaxLimitRequestRatio[ResourceName(key)] = newVal + (*out)[ResourceName(key)] = *newVal } } else { out.MaxLimitRequestRatio = nil @@ -1243,17 +2550,43 @@ func Convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out return autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) } +func autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.LimitRange, len(*in)) + for i := range *in { + if err := Convert_v1_LimitRange_To_api_LimitRange(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { + return autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in, out, s) +} + func autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.LimitRangeList))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]LimitRange, len(in.Items)) - for i := range in.Items { - if err := Convert_api_LimitRange_To_v1_LimitRange(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + if err := Convert_api_LimitRange_To_v1_LimitRange(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1267,14 +2600,31 @@ func Convert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out return autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in, out, s) } -func autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.LimitRangeSpec))(in) +func autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]api.LimitRangeItem, len(*in)) + for i := range *in { + if err := Convert_v1_LimitRangeItem_To_api_LimitRangeItem(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Limits = nil } + return nil +} + +func Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { + return autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in, out, s) +} + +func autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { if in.Limits != nil { - out.Limits = make([]LimitRangeItem, len(in.Limits)) - for i := range in.Limits { - if err := Convert_api_LimitRangeItem_To_v1_LimitRangeItem(&in.Limits[i], &out.Limits[i], s); err != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + if err := Convert_api_LimitRangeItem_To_v1_LimitRangeItem(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1288,17 +2638,43 @@ func Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out return autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) } +func autoConvert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { + return autoConvert_v1_List_To_api_List(in, out, s) +} + func autoConvert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.List))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]runtime.RawExtension, len(in.Items)) - for i := range in.Items { - if err := s.Convert(&in.Items[i], &out.Items[i], 0); err != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1312,9 +2688,29 @@ func Convert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) er return autoConvert_api_List_To_v1_List(in, out, s) } +func autoConvert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := api.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = in.TimeoutSeconds + return nil +} + +func Convert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { + return autoConvert_v1_ListOptions_To_api_ListOptions(in, out, s) +} + func autoConvert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ListOptions))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { return err @@ -1324,12 +2720,7 @@ func autoConvert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *Lis } out.Watch = in.Watch out.ResourceVersion = in.ResourceVersion - if in.TimeoutSeconds != nil { - out.TimeoutSeconds = new(int64) - *out.TimeoutSeconds = *in.TimeoutSeconds - } else { - out.TimeoutSeconds = nil - } + out.TimeoutSeconds = in.TimeoutSeconds return nil } @@ -1337,10 +2728,17 @@ func Convert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOpt return autoConvert_api_ListOptions_To_v1_ListOptions(in, out, s) } +func autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +func Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in, out, s) +} + func autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.LoadBalancerIngress))(in) - } out.IP = in.IP out.Hostname = in.Hostname return nil @@ -1350,14 +2748,31 @@ func Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalan return autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) } -func autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.LoadBalancerStatus))(in) +func autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]api.LoadBalancerIngress, len(*in)) + for i := range *in { + if err := Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Ingress = nil } + return nil +} + +func Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in, out, s) +} + +func autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error { if in.Ingress != nil { - out.Ingress = make([]LoadBalancerIngress, len(in.Ingress)) - for i := range in.Ingress { - if err := Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(&in.Ingress[i], &out.Ingress[i], s); err != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + for i := range *in { + if err := Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1371,10 +2786,16 @@ func Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalance return autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) } +func autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +func Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { + return autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in, out, s) +} + func autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.LocalObjectReference))(in) - } out.Name = in.Name return nil } @@ -1383,10 +2804,18 @@ func Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalOb return autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) } +func autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in, out, s) +} + func autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NFSVolumeSource))(in) - } out.Server = in.Server out.Path = in.Path out.ReadOnly = in.ReadOnly @@ -1397,9 +2826,29 @@ func Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, return autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) } +func autoConvert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_v1_NamespaceSpec_To_api_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NamespaceStatus_To_api_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { + return autoConvert_v1_Namespace_To_api_Namespace(in, out, s) +} + func autoConvert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Namespace))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err @@ -1417,17 +2866,18 @@ func Convert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s return autoConvert_api_Namespace_To_v1_Namespace(in, out, s) } -func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NamespaceList))(in) +func autoConvert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]Namespace, len(in.Items)) - for i := range in.Items { - if err := Convert_api_Namespace_To_v1_Namespace(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Namespace, len(*in)) + for i := range *in { + if err := Convert_v1_Namespace_To_api_Namespace(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1437,33 +2887,80 @@ func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, ou return nil } -func Convert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { - return autoConvert_api_NamespaceList_To_v1_NamespaceList(in, out, s) +func Convert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { + return autoConvert_v1_NamespaceList_To_api_NamespaceList(in, out, s) } -func autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NamespaceSpec))(in) +func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - if in.Finalizers != nil { - out.Finalizers = make([]FinalizerName, len(in.Finalizers)) - for i := range in.Finalizers { - out.Finalizers[i] = FinalizerName(in.Finalizers[i]) + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + if err := Convert_api_Namespace_To_v1_Namespace(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.Finalizers = nil + out.Items = nil } return nil } -func Convert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { - return autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) -} +func Convert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { + return autoConvert_api_NamespaceList_To_v1_NamespaceList(in, out, s) +} -func autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NamespaceStatus))(in) +func autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]api.FinalizerName, len(*in)) + for i := range *in { + (*out)[i] = api.FinalizerName((*in)[i]) + } + } else { + out.Finalizers = nil + } + return nil +} + +func Convert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { + return autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in, out, s) +} + +func autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + for i := range *in { + (*out)[i] = FinalizerName((*in)[i]) + } + } else { + out.Finalizers = nil } + return nil +} + +func Convert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { + return autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) +} + +func autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { + SetDefaults_NamespaceStatus(in) + out.Phase = api.NamespacePhase(in.Phase) + return nil +} + +func Convert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { + return autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in, out, s) +} + +func autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error { out.Phase = NamespacePhase(in.Phase) return nil } @@ -1472,9 +2969,30 @@ func Convert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, return autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) } +func autoConvert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { + SetDefaults_Node(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_v1_NodeSpec_To_api_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NodeStatus_To_api_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { + return autoConvert_v1_Node_To_api_Node(in, out, s) +} + func autoConvert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Node))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err @@ -1492,10 +3010,17 @@ func Convert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) er return autoConvert_api_Node_To_v1_Node(in, out, s) } +func autoConvert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { + out.Type = api.NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +func Convert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { + return autoConvert_v1_NodeAddress_To_api_NodeAddress(in, out, s) +} + func autoConvert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NodeAddress))(in) - } out.Type = NodeAddressType(in.Type) out.Address = in.Address return nil @@ -1505,10 +3030,81 @@ func Convert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAdd return autoConvert_api_NodeAddress_To_v1_NodeAddress(in, out, s) } -func autoConvert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NodeCondition))(in) +func autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(api.NodeSelector) + if err := Convert_v1_NodeSelector_To_api_NodeSelector(*in, *out, s); err != nil { + return err + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]api.PreferredSchedulingTerm, len(*in)) + for i := range *in { + if err := Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func Convert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { + return autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in, out, s) +} + +func autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(NodeSelector) + if err := Convert_api_NodeSelector_To_v1_NodeSelector(*in, *out, s); err != nil { + return err + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + if err := Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func Convert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error { + return autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in, out, s) +} + +func autoConvert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { + out.Type = api.NodeConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastHeartbeatTime, &out.LastHeartbeatTime, s); err != nil { + return err } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { + return autoConvert_v1_NodeCondition_To_api_NodeCondition(in, out, s) +} + +func autoConvert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error { out.Type = NodeConditionType(in.Type) out.Status = ConditionStatus(in.Status) if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastHeartbeatTime, &out.LastHeartbeatTime, s); err != nil { @@ -1526,10 +3122,18 @@ func Convert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *N return autoConvert_api_NodeCondition_To_v1_NodeCondition(in, out, s) } -func autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NodeDaemonEndpoints))(in) +func autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err } + return nil +} + +func Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error { if err := Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { return err } @@ -1540,17 +3144,43 @@ func Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemo return autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) } +func autoConvert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Node, len(*in)) + for i := range *in { + if err := Convert_v1_Node_To_api_Node(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { + return autoConvert_v1_NodeList_To_api_NodeList(in, out, s) +} + func autoConvert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NodeList))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]Node, len(in.Items)) - for i := range in.Items { - if err := Convert_api_Node_To_v1_Node(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + if err := Convert_api_Node_To_v1_Node(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1564,9 +3194,21 @@ func Convert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conv return autoConvert_api_NodeList_To_v1_NodeList(in, out, s) } +func autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Path = in.Path + return nil +} + +func Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { + return autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in, out, s) +} + func autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *NodeProxyOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NodeProxyOptions))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } out.Path = in.Path return nil @@ -1576,54 +3218,142 @@ func Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOption return autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) } -func autoConvert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NodeSpec))(in) +func autoConvert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error { + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]api.NodeSelectorTerm, len(*in)) + for i := range *in { + if err := Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.NodeSelectorTerms = nil } - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable return nil } -func Convert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { - return autoConvert_api_NodeSpec_To_v1_NodeSpec(in, out, s) +func Convert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error { + return autoConvert_v1_NodeSelector_To_api_NodeSelector(in, out, s) } -func autoConvert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NodeStatus))(in) +func autoConvert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error { + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + if err := Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.NodeSelectorTerms = nil } - if in.Capacity != nil { - out.Capacity = make(ResourceList) - for key, val := range in.Capacity { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { + return nil +} + +func Convert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error { + return autoConvert_api_NodeSelector_To_v1_NodeSelector(in, out, s) +} + +func autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = api.NodeSelectorOperator(in.Operator) + out.Values = in.Values + return nil +} + +func Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = NodeSelectorOperator(in.Operator) + out.Values = in.Values + return nil +} + +func Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]api.NodeSelectorRequirement, len(*in)) + for i := range *in { + if err := Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { return err } - out.Capacity[ResourceName(key)] = newVal } } else { - out.Capacity = nil + out.MatchExpressions = nil } - if in.Allocatable != nil { - out.Allocatable = make(ResourceList) - for key, val := range in.Allocatable { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { + return nil +} + +func Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in, out, s) +} + +func autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error { + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + if err := Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { return err } - out.Allocatable[ResourceName(key)] = newVal } } else { - out.Allocatable = nil + out.MatchExpressions = nil } - out.Phase = NodePhase(in.Phase) + return nil +} + +func Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) +} + +func autoConvert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + return nil +} + +func Convert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { + return autoConvert_v1_NodeSpec_To_api_NodeSpec(in, out, s) +} + +func autoConvert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + return nil +} + +func Convert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { + return autoConvert_api_NodeSpec_To_v1_NodeSpec(in, out, s) +} + +func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { + SetDefaults_NodeStatus(in) + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Capacity, &out.Capacity, s); err != nil { + return err + } + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Allocatable, &out.Allocatable, s); err != nil { + return err + } + out.Phase = api.NodePhase(in.Phase) if in.Conditions != nil { - out.Conditions = make([]NodeCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_api_NodeCondition_To_v1_NodeCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]api.NodeCondition, len(*in)) + for i := range *in { + if err := Convert_v1_NodeCondition_To_api_NodeCondition(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -1631,3254 +3361,73 @@ func autoConvert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeSt out.Conditions = nil } if in.Addresses != nil { - out.Addresses = make([]NodeAddress, len(in.Addresses)) - for i := range in.Addresses { - if err := Convert_api_NodeAddress_To_v1_NodeAddress(&in.Addresses[i], &out.Addresses[i], s); err != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]api.NodeAddress, len(*in)) + for i := range *in { + if err := Convert_v1_NodeAddress_To_api_NodeAddress(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Addresses = nil } - if err := Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + if err := Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { return err } - if err := Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + if err := Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { return err } if in.Images != nil { - out.Images = make([]ContainerImage, len(in.Images)) - for i := range in.Images { - if err := Convert_api_ContainerImage_To_v1_ContainerImage(&in.Images[i], &out.Images[i], s); err != nil { + in, out := &in.Images, &out.Images + *out = make([]api.ContainerImage, len(*in)) + for i := range *in { + if err := Convert_v1_ContainerImage_To_api_ContainerImage(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { - out.Images = nil - } - return nil -} - -func Convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { - return autoConvert_api_NodeStatus_To_v1_NodeStatus(in, out, s) -} - -func autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NodeSystemInfo))(in) - } - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - return nil -} - -func Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { - return autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) -} - -func autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ObjectFieldSelector))(in) - } - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) -} - -func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ObjectMeta))(in) - } - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.CreationTimestamp, &out.CreationTimestamp, s); err != nil { - return err - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.DeletionTimestamp != nil { - out.DeletionTimestamp = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.DeletionTimestamp, out.DeletionTimestamp, s); err != nil { - return err - } - } else { - out.DeletionTimestamp = nil - } - if in.DeletionGracePeriodSeconds != nil { - out.DeletionGracePeriodSeconds = new(int64) - *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds - } else { - out.DeletionGracePeriodSeconds = nil - } - if in.Labels != nil { - out.Labels = make(map[string]string) - for key, val := range in.Labels { - out.Labels[key] = val - } - } else { - out.Labels = nil - } - if in.Annotations != nil { - out.Annotations = make(map[string]string) - for key, val := range in.Annotations { - out.Annotations[key] = val - } - } else { - out.Annotations = nil - } - return nil -} - -func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { - return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s) -} - -func autoConvert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ObjectReference))(in) - } - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = in.UID - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { - return autoConvert_api_ObjectReference_To_v1_ObjectReference(in, out, s) -} - -func autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PersistentVolume))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { - return autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PersistentVolumeClaim))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PersistentVolumeClaimList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]PersistentVolumeClaim, len(in.Items)) - for i := range in.Items { - if err := Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PersistentVolumeClaimSpec))(in) - } - if in.AccessModes != nil { - out.AccessModes = make([]PersistentVolumeAccessMode, len(in.AccessModes)) - for i := range in.AccessModes { - out.AccessModes[i] = PersistentVolumeAccessMode(in.AccessModes[i]) - } - } else { - out.AccessModes = nil - } - if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeName = in.VolumeName - return nil -} - -func Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PersistentVolumeClaimStatus))(in) - } - out.Phase = PersistentVolumeClaimPhase(in.Phase) - if in.AccessModes != nil { - out.AccessModes = make([]PersistentVolumeAccessMode, len(in.AccessModes)) - for i := range in.AccessModes { - out.AccessModes[i] = PersistentVolumeAccessMode(in.AccessModes[i]) - } - } else { - out.AccessModes = nil - } - if in.Capacity != nil { - out.Capacity = make(ResourceList) - for key, val := range in.Capacity { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Capacity[ResourceName(key)] = newVal - } - } else { - out.Capacity = nil - } - return nil -} - -func Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PersistentVolumeClaimVolumeSource))(in) - } - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PersistentVolumeList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]PersistentVolume, len(in.Items)) - for i := range in.Items { - if err := Convert_api_PersistentVolume_To_v1_PersistentVolume(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) -} - -func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PersistentVolumeSource))(in) - } - // unable to generate simple pointer conversion for api.GCEPersistentDiskVolumeSource -> v1.GCEPersistentDiskVolumeSource - if in.GCEPersistentDisk != nil { - out.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - if err := Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in.GCEPersistentDisk, out.GCEPersistentDisk, s); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - // unable to generate simple pointer conversion for api.AWSElasticBlockStoreVolumeSource -> v1.AWSElasticBlockStoreVolumeSource - if in.AWSElasticBlockStore != nil { - out.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - if err := Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore, out.AWSElasticBlockStore, s); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - // unable to generate simple pointer conversion for api.HostPathVolumeSource -> v1.HostPathVolumeSource - if in.HostPath != nil { - out.HostPath = new(HostPathVolumeSource) - if err := Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in.HostPath, out.HostPath, s); err != nil { - return err - } - } else { - out.HostPath = nil - } - // unable to generate simple pointer conversion for api.GlusterfsVolumeSource -> v1.GlusterfsVolumeSource - if in.Glusterfs != nil { - out.Glusterfs = new(GlusterfsVolumeSource) - if err := Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in.Glusterfs, out.Glusterfs, s); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - // unable to generate simple pointer conversion for api.NFSVolumeSource -> v1.NFSVolumeSource - if in.NFS != nil { - out.NFS = new(NFSVolumeSource) - if err := Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in.NFS, out.NFS, s); err != nil { - return err - } - } else { - out.NFS = nil - } - // unable to generate simple pointer conversion for api.RBDVolumeSource -> v1.RBDVolumeSource - if in.RBD != nil { - out.RBD = new(RBDVolumeSource) - if err := Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in.RBD, out.RBD, s); err != nil { - return err - } - } else { - out.RBD = nil - } - // unable to generate simple pointer conversion for api.ISCSIVolumeSource -> v1.ISCSIVolumeSource - if in.ISCSI != nil { - out.ISCSI = new(ISCSIVolumeSource) - if err := Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in.ISCSI, out.ISCSI, s); err != nil { - return err - } - } else { - out.ISCSI = nil - } - // unable to generate simple pointer conversion for api.FlexVolumeSource -> v1.FlexVolumeSource - if in.FlexVolume != nil { - out.FlexVolume = new(FlexVolumeSource) - if err := Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in.FlexVolume, out.FlexVolume, s); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - // unable to generate simple pointer conversion for api.CinderVolumeSource -> v1.CinderVolumeSource - if in.Cinder != nil { - out.Cinder = new(CinderVolumeSource) - if err := Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil { - return err - } - } else { - out.Cinder = nil - } - // unable to generate simple pointer conversion for api.CephFSVolumeSource -> v1.CephFSVolumeSource - if in.CephFS != nil { - out.CephFS = new(CephFSVolumeSource) - if err := Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { - return err - } - } else { - out.CephFS = nil - } - // unable to generate simple pointer conversion for api.FCVolumeSource -> v1.FCVolumeSource - if in.FC != nil { - out.FC = new(FCVolumeSource) - if err := Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in.FC, out.FC, s); err != nil { - return err - } - } else { - out.FC = nil - } - // unable to generate simple pointer conversion for api.FlockerVolumeSource -> v1.FlockerVolumeSource - if in.Flocker != nil { - out.Flocker = new(FlockerVolumeSource) - if err := Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in.Flocker, out.Flocker, s); err != nil { - return err - } - } else { - out.Flocker = nil - } - // unable to generate simple pointer conversion for api.AzureFileVolumeSource -> v1.AzureFileVolumeSource - if in.AzureFile != nil { - out.AzureFile = new(AzureFileVolumeSource) - if err := Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in.AzureFile, out.AzureFile, s); err != nil { - return err - } - } else { - out.AzureFile = nil - } - return nil -} - -func Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) -} - -func autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PersistentVolumeSpec))(in) - } - if in.Capacity != nil { - out.Capacity = make(ResourceList) - for key, val := range in.Capacity { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Capacity[ResourceName(key)] = newVal - } - } else { - out.Capacity = nil - } - if err := Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { - return err - } - if in.AccessModes != nil { - out.AccessModes = make([]PersistentVolumeAccessMode, len(in.AccessModes)) - for i := range in.AccessModes { - out.AccessModes[i] = PersistentVolumeAccessMode(in.AccessModes[i]) - } - } else { - out.AccessModes = nil - } - // unable to generate simple pointer conversion for api.ObjectReference -> v1.ObjectReference - if in.ClaimRef != nil { - out.ClaimRef = new(ObjectReference) - if err := Convert_api_ObjectReference_To_v1_ObjectReference(in.ClaimRef, out.ClaimRef, s); err != nil { - return err - } - } else { - out.ClaimRef = nil - } - out.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) - return nil -} - -func Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) -} - -func autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PersistentVolumeStatus))(in) - } - out.Phase = PersistentVolumePhase(in.Phase) - out.Message = in.Message - out.Reason = in.Reason - return nil -} - -func Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) -} - -func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Pod))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodAttachOptions))(in) - } - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - return nil -} - -func Convert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { - return autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) -} - -func autoConvert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodCondition))(in) - } - out.Type = PodConditionType(in.Type) - out.Status = ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { - return autoConvert_api_PodCondition_To_v1_PodCondition(in, out, s) -} - -func autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodExecOptions))(in) - } - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - return nil -} - -func Convert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { - return autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in, out, s) -} - -func autoConvert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]Pod, len(in.Items)) - for i := range in.Items { - if err := Convert_api_Pod_To_v1_Pod(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { - return autoConvert_api_PodList_To_v1_PodList(in, out, s) -} - -func autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodLogOptions))(in) - } - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - if in.SinceSeconds != nil { - out.SinceSeconds = new(int64) - *out.SinceSeconds = *in.SinceSeconds - } else { - out.SinceSeconds = nil - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.SinceTime != nil { - out.SinceTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.SinceTime, out.SinceTime, s); err != nil { - return err - } - } else { - out.SinceTime = nil - } - out.Timestamps = in.Timestamps - if in.TailLines != nil { - out.TailLines = new(int64) - *out.TailLines = *in.TailLines - } else { - out.TailLines = nil - } - if in.LimitBytes != nil { - out.LimitBytes = new(int64) - *out.LimitBytes = *in.LimitBytes - } else { - out.LimitBytes = nil - } - return nil -} - -func Convert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { - return autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in, out, s) -} - -func autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodProxyOptions))(in) - } - out.Path = in.Path - return nil -} - -func Convert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { - return autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) -} - -func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodSpec))(in) - } - if in.Volumes != nil { - out.Volumes = make([]Volume, len(in.Volumes)) - for i := range in.Volumes { - if err := Convert_api_Volume_To_v1_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.Containers != nil { - out.Containers = make([]Container, len(in.Containers)) - for i := range in.Containers { - if err := Convert_api_Container_To_v1_Container(&in.Containers[i], &out.Containers[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = RestartPolicy(in.RestartPolicy) - if in.TerminationGracePeriodSeconds != nil { - out.TerminationGracePeriodSeconds = new(int64) - *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } - out.DNSPolicy = DNSPolicy(in.DNSPolicy) - if in.NodeSelector != nil { - out.NodeSelector = make(map[string]string) - for key, val := range in.NodeSelector { - out.NodeSelector[key] = val - } - } else { - out.NodeSelector = nil - } - out.ServiceAccountName = in.ServiceAccountName - out.NodeName = in.NodeName - // unable to generate simple pointer conversion for api.PodSecurityContext -> v1.PodSecurityContext - if in.SecurityContext != nil { - if err := s.Convert(&in.SecurityContext, &out.SecurityContext, 0); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - -func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodStatus))(in) - } - out.Phase = PodPhase(in.Phase) - if in.Conditions != nil { - out.Conditions = make([]PodCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_api_PodCondition_To_v1_PodCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.StartTime != nil { - out.StartTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.StartTime, out.StartTime, s); err != nil { - return err - } - } else { - out.StartTime = nil - } - if in.ContainerStatuses != nil { - out.ContainerStatuses = make([]ContainerStatus, len(in.ContainerStatuses)) - for i := range in.ContainerStatuses { - if err := Convert_api_ContainerStatus_To_v1_ContainerStatus(&in.ContainerStatuses[i], &out.ContainerStatuses[i], s); err != nil { - return err - } - } - } else { - out.ContainerStatuses = nil - } - return nil -} - -func Convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { - return autoConvert_api_PodStatus_To_v1_PodStatus(in, out, s) -} - -func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodStatusResult))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { - return autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s) -} - -func autoConvert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodTemplate))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { - return autoConvert_api_PodTemplate_To_v1_PodTemplate(in, out, s) -} - -func autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodTemplateList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]PodTemplate, len(in.Items)) - for i := range in.Items { - if err := Convert_api_PodTemplate_To_v1_PodTemplate(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { - return autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in, out, s) -} - -func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodTemplateSpec))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { - return autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s) -} - -func autoConvert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Probe))(in) - } - if err := Convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = int32(in.InitialDelaySeconds) - out.TimeoutSeconds = int32(in.TimeoutSeconds) - out.PeriodSeconds = int32(in.PeriodSeconds) - out.SuccessThreshold = int32(in.SuccessThreshold) - out.FailureThreshold = int32(in.FailureThreshold) - return nil -} - -func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { - return autoConvert_api_Probe_To_v1_Probe(in, out, s) -} - -func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.RBDVolumeSource))(in) - } - if in.CephMonitors != nil { - out.CephMonitors = make([]string, len(in.CephMonitors)) - for i := range in.CephMonitors { - out.CephMonitors[i] = in.CephMonitors[i] - } - } else { - out.CephMonitors = nil - } - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - // unable to generate simple pointer conversion for api.LocalObjectReference -> v1.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(LocalObjectReference) - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { - return autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) -} - -func autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.RangeAllocation))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - out.Range = in.Range - if err := conversion.ByteSliceCopy(&in.Data, &out.Data, s); err != nil { - return err - } - return nil -} - -func Convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { - return autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in, out, s) -} - -func autoConvert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ReplicationController))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { - return autoConvert_api_ReplicationController_To_v1_ReplicationController(in, out, s) -} - -func autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ReplicationControllerList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]ReplicationController, len(in.Items)) - for i := range in.Items { - if err := Convert_api_ReplicationController_To_v1_ReplicationController(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) -} - -func autoConvert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ReplicationControllerSpec))(in) - } - if err := s.Convert(&in.Replicas, &out.Replicas, 0); err != nil { - return err - } - if in.Selector != nil { - out.Selector = make(map[string]string) - for key, val := range in.Selector { - out.Selector[key] = val - } - } else { - out.Selector = nil - } - // unable to generate simple pointer conversion for api.PodTemplateSpec -> v1.PodTemplateSpec - if in.Template != nil { - out.Template = new(PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ReplicationControllerStatus))(in) - } - out.Replicas = int32(in.Replicas) - out.FullyLabeledReplicas = int32(in.FullyLabeledReplicas) - out.ObservedGeneration = in.ObservedGeneration - return nil -} - -func Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) -} - -func autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ResourceQuota))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { - return autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in, out, s) -} - -func autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ResourceQuotaList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]ResourceQuota, len(in.Items)) - for i := range in.Items { - if err := Convert_api_ResourceQuota_To_v1_ResourceQuota(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) -} - -func autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ResourceQuotaSpec))(in) - } - if in.Hard != nil { - out.Hard = make(ResourceList) - for key, val := range in.Hard { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Hard[ResourceName(key)] = newVal - } - } else { - out.Hard = nil - } - if in.Scopes != nil { - out.Scopes = make([]ResourceQuotaScope, len(in.Scopes)) - for i := range in.Scopes { - out.Scopes[i] = ResourceQuotaScope(in.Scopes[i]) - } - } else { - out.Scopes = nil - } - return nil -} - -func Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) -} - -func autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ResourceQuotaStatus))(in) - } - if in.Hard != nil { - out.Hard = make(ResourceList) - for key, val := range in.Hard { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Hard[ResourceName(key)] = newVal - } - } else { - out.Hard = nil - } - if in.Used != nil { - out.Used = make(ResourceList) - for key, val := range in.Used { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Used[ResourceName(key)] = newVal - } - } else { - out.Used = nil - } - return nil -} - -func Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) -} - -func autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ResourceRequirements))(in) - } - if in.Limits != nil { - out.Limits = make(ResourceList) - for key, val := range in.Limits { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Limits[ResourceName(key)] = newVal - } - } else { - out.Limits = nil - } - if in.Requests != nil { - out.Requests = make(ResourceList) - for key, val := range in.Requests { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Requests[ResourceName(key)] = newVal - } - } else { - out.Requests = nil - } - return nil -} - -func Convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { - return autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) -} - -func autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SELinuxOptions))(in) - } - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { - return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) -} - -func autoConvert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Secret))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if in.Data != nil { - out.Data = make(map[string][]uint8) - for key, val := range in.Data { - newVal := []uint8{} - if err := conversion.ByteSliceCopy(&val, &newVal, s); err != nil { - return err - } - out.Data[key] = newVal - } - } else { - out.Data = nil - } - out.Type = SecretType(in.Type) - return nil -} - -func Convert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { - return autoConvert_api_Secret_To_v1_Secret(in, out, s) -} - -func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SecretKeySelector))(in) - } - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { - return autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) -} - -func autoConvert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SecretList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]Secret, len(in.Items)) - for i := range in.Items { - if err := Convert_api_Secret_To_v1_Secret(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { - return autoConvert_api_SecretList_To_v1_SecretList(in, out, s) -} - -func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SecretVolumeSource))(in) - } - out.SecretName = in.SecretName - return nil -} - -func Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { - return autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) -} - -func autoConvert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SecurityContext))(in) - } - // unable to generate simple pointer conversion for api.Capabilities -> v1.Capabilities - if in.Capabilities != nil { - out.Capabilities = new(Capabilities) - if err := Convert_api_Capabilities_To_v1_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { - return err - } - } else { - out.Capabilities = nil - } - if in.Privileged != nil { - out.Privileged = new(bool) - *out.Privileged = *in.Privileged - } else { - out.Privileged = nil - } - // unable to generate simple pointer conversion for api.SELinuxOptions -> v1.SELinuxOptions - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(SELinuxOptions) - if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot - } else { - out.RunAsNonRoot = nil - } - if in.ReadOnlyRootFilesystem != nil { - out.ReadOnlyRootFilesystem = new(bool) - *out.ReadOnlyRootFilesystem = *in.ReadOnlyRootFilesystem - } else { - out.ReadOnlyRootFilesystem = nil - } - return nil -} - -func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { - return autoConvert_api_SecurityContext_To_v1_SecurityContext(in, out, s) -} - -func autoConvert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SerializedReference))(in) - } - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { - return err - } - return nil -} - -func Convert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { - return autoConvert_api_SerializedReference_To_v1_SerializedReference(in, out, s) -} - -func autoConvert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Service))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { - return autoConvert_api_Service_To_v1_Service(in, out, s) -} - -func autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ServiceAccount))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if in.Secrets != nil { - out.Secrets = make([]ObjectReference, len(in.Secrets)) - for i := range in.Secrets { - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Secrets[i], &out.Secrets[i], s); err != nil { - return err - } - } - } else { - out.Secrets = nil - } - if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - -func Convert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { - return autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in, out, s) -} - -func autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ServiceAccountList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]ServiceAccount, len(in.Items)) - for i := range in.Items { - if err := Convert_api_ServiceAccount_To_v1_ServiceAccount(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { - return autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) -} - -func autoConvert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ServiceList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]Service, len(in.Items)) - for i := range in.Items { - if err := Convert_api_Service_To_v1_Service(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { - return autoConvert_api_ServiceList_To_v1_ServiceList(in, out, s) -} - -func autoConvert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ServicePort))(in) - } - out.Name = in.Name - out.Protocol = Protocol(in.Protocol) - out.Port = int32(in.Port) - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.TargetPort, &out.TargetPort, s); err != nil { - return err - } - out.NodePort = int32(in.NodePort) - return nil -} - -func Convert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { - return autoConvert_api_ServicePort_To_v1_ServicePort(in, out, s) -} - -func autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ServiceProxyOptions))(in) - } - out.Path = in.Path - return nil -} - -func Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { - return autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) -} - -func autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ServiceSpec))(in) - } - out.Type = ServiceType(in.Type) - if in.Ports != nil { - out.Ports = make([]ServicePort, len(in.Ports)) - for i := range in.Ports { - if err := Convert_api_ServicePort_To_v1_ServicePort(&in.Ports[i], &out.Ports[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Selector != nil { - out.Selector = make(map[string]string) - for key, val := range in.Selector { - out.Selector[key] = val - } - } else { - out.Selector = nil - } - out.ClusterIP = in.ClusterIP - if in.ExternalIPs != nil { - out.ExternalIPs = make([]string, len(in.ExternalIPs)) - for i := range in.ExternalIPs { - out.ExternalIPs[i] = in.ExternalIPs[i] - } - } else { - out.ExternalIPs = nil - } - out.LoadBalancerIP = in.LoadBalancerIP - out.SessionAffinity = ServiceAffinity(in.SessionAffinity) - return nil -} - -func autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ServiceStatus))(in) - } - if err := Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { - return err - } - return nil -} - -func Convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { - return autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in, out, s) -} - -func autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.TCPSocketAction))(in) - } - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { - return err - } - return nil -} - -func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { - return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) -} - -func autoConvert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Volume))(in) - } - out.Name = in.Name - if err := Convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -func Convert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { - return autoConvert_api_Volume_To_v1_Volume(in, out, s) -} - -func autoConvert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.VolumeMount))(in) - } - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - return nil -} - -func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { - return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s) -} - -func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.VolumeSource))(in) - } - // unable to generate simple pointer conversion for api.HostPathVolumeSource -> v1.HostPathVolumeSource - if in.HostPath != nil { - out.HostPath = new(HostPathVolumeSource) - if err := Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in.HostPath, out.HostPath, s); err != nil { - return err - } - } else { - out.HostPath = nil - } - // unable to generate simple pointer conversion for api.EmptyDirVolumeSource -> v1.EmptyDirVolumeSource - if in.EmptyDir != nil { - out.EmptyDir = new(EmptyDirVolumeSource) - if err := Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in.EmptyDir, out.EmptyDir, s); err != nil { - return err - } - } else { - out.EmptyDir = nil - } - // unable to generate simple pointer conversion for api.GCEPersistentDiskVolumeSource -> v1.GCEPersistentDiskVolumeSource - if in.GCEPersistentDisk != nil { - out.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - if err := Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in.GCEPersistentDisk, out.GCEPersistentDisk, s); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - // unable to generate simple pointer conversion for api.AWSElasticBlockStoreVolumeSource -> v1.AWSElasticBlockStoreVolumeSource - if in.AWSElasticBlockStore != nil { - out.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - if err := Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore, out.AWSElasticBlockStore, s); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - // unable to generate simple pointer conversion for api.GitRepoVolumeSource -> v1.GitRepoVolumeSource - if in.GitRepo != nil { - out.GitRepo = new(GitRepoVolumeSource) - if err := Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in.GitRepo, out.GitRepo, s); err != nil { - return err - } - } else { - out.GitRepo = nil - } - // unable to generate simple pointer conversion for api.SecretVolumeSource -> v1.SecretVolumeSource - if in.Secret != nil { - out.Secret = new(SecretVolumeSource) - if err := Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in.Secret, out.Secret, s); err != nil { - return err - } - } else { - out.Secret = nil - } - // unable to generate simple pointer conversion for api.NFSVolumeSource -> v1.NFSVolumeSource - if in.NFS != nil { - out.NFS = new(NFSVolumeSource) - if err := Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in.NFS, out.NFS, s); err != nil { - return err - } - } else { - out.NFS = nil - } - // unable to generate simple pointer conversion for api.ISCSIVolumeSource -> v1.ISCSIVolumeSource - if in.ISCSI != nil { - out.ISCSI = new(ISCSIVolumeSource) - if err := Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in.ISCSI, out.ISCSI, s); err != nil { - return err - } - } else { - out.ISCSI = nil - } - // unable to generate simple pointer conversion for api.GlusterfsVolumeSource -> v1.GlusterfsVolumeSource - if in.Glusterfs != nil { - out.Glusterfs = new(GlusterfsVolumeSource) - if err := Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in.Glusterfs, out.Glusterfs, s); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - // unable to generate simple pointer conversion for api.PersistentVolumeClaimVolumeSource -> v1.PersistentVolumeClaimVolumeSource - if in.PersistentVolumeClaim != nil { - out.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - if err := Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in.PersistentVolumeClaim, out.PersistentVolumeClaim, s); err != nil { - return err - } - } else { - out.PersistentVolumeClaim = nil - } - // unable to generate simple pointer conversion for api.RBDVolumeSource -> v1.RBDVolumeSource - if in.RBD != nil { - out.RBD = new(RBDVolumeSource) - if err := Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in.RBD, out.RBD, s); err != nil { - return err - } - } else { - out.RBD = nil - } - // unable to generate simple pointer conversion for api.FlexVolumeSource -> v1.FlexVolumeSource - if in.FlexVolume != nil { - out.FlexVolume = new(FlexVolumeSource) - if err := Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in.FlexVolume, out.FlexVolume, s); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - // unable to generate simple pointer conversion for api.CinderVolumeSource -> v1.CinderVolumeSource - if in.Cinder != nil { - out.Cinder = new(CinderVolumeSource) - if err := Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil { - return err - } - } else { - out.Cinder = nil - } - // unable to generate simple pointer conversion for api.CephFSVolumeSource -> v1.CephFSVolumeSource - if in.CephFS != nil { - out.CephFS = new(CephFSVolumeSource) - if err := Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { - return err - } - } else { - out.CephFS = nil - } - // unable to generate simple pointer conversion for api.FlockerVolumeSource -> v1.FlockerVolumeSource - if in.Flocker != nil { - out.Flocker = new(FlockerVolumeSource) - if err := Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in.Flocker, out.Flocker, s); err != nil { - return err - } - } else { - out.Flocker = nil - } - // unable to generate simple pointer conversion for api.DownwardAPIVolumeSource -> v1.DownwardAPIVolumeSource - if in.DownwardAPI != nil { - out.DownwardAPI = new(DownwardAPIVolumeSource) - if err := Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in.DownwardAPI, out.DownwardAPI, s); err != nil { - return err - } - } else { - out.DownwardAPI = nil - } - // unable to generate simple pointer conversion for api.FCVolumeSource -> v1.FCVolumeSource - if in.FC != nil { - out.FC = new(FCVolumeSource) - if err := Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in.FC, out.FC, s); err != nil { - return err - } - } else { - out.FC = nil - } - // unable to generate simple pointer conversion for api.AzureFileVolumeSource -> v1.AzureFileVolumeSource - if in.AzureFile != nil { - out.AzureFile = new(AzureFileVolumeSource) - if err := Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in.AzureFile, out.AzureFile, s); err != nil { - return err - } - } else { - out.AzureFile = nil - } - // unable to generate simple pointer conversion for api.ConfigMapVolumeSource -> v1.ConfigMapVolumeSource - if in.ConfigMap != nil { - out.ConfigMap = new(ConfigMapVolumeSource) - if err := Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in.ConfigMap, out.ConfigMap, s); err != nil { - return err - } - } else { - out.ConfigMap = nil - } - return nil -} - -func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { - return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s) -} - -func autoConvert_unversioned_ExportOptions_To_v1_ExportOptions(in *unversioned.ExportOptions, out *ExportOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*unversioned.ExportOptions))(in) - } - out.Export = in.Export - out.Exact = in.Exact - return nil -} - -func Convert_unversioned_ExportOptions_To_v1_ExportOptions(in *unversioned.ExportOptions, out *ExportOptions, s conversion.Scope) error { - return autoConvert_unversioned_ExportOptions_To_v1_ExportOptions(in, out, s) -} - -func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*AWSElasticBlockStoreVolumeSource))(in) - } - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = int(in.Partition) - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in, out, s) -} - -func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*AzureFileVolumeSource))(in) - } - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Binding))(in) - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { - return autoConvert_v1_Binding_To_api_Binding(in, out, s) -} - -func autoConvert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Capabilities))(in) - } - if in.Add != nil { - out.Add = make([]api.Capability, len(in.Add)) - for i := range in.Add { - out.Add[i] = api.Capability(in.Add[i]) - } - } else { - out.Add = nil - } - if in.Drop != nil { - out.Drop = make([]api.Capability, len(in.Drop)) - for i := range in.Drop { - out.Drop[i] = api.Capability(in.Drop[i]) - } - } else { - out.Drop = nil - } - return nil -} - -func Convert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { - return autoConvert_v1_Capabilities_To_api_Capabilities(in, out, s) -} - -func autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*CephFSVolumeSource))(in) - } - if in.Monitors != nil { - out.Monitors = make([]string, len(in.Monitors)) - for i := range in.Monitors { - out.Monitors[i] = in.Monitors[i] - } - } else { - out.Monitors = nil - } - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - // unable to generate simple pointer conversion for v1.LocalObjectReference -> api.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(api.LocalObjectReference) - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in, out, s) -} - -func autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*CinderVolumeSource))(in) - } - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in, out, s) -} - -func autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ComponentCondition))(in) - } - out.Type = api.ComponentConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.Message = in.Message - out.Error = in.Error - return nil -} - -func Convert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { - return autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in, out, s) -} - -func autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ComponentStatus))(in) - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if in.Conditions != nil { - out.Conditions = make([]api.ComponentCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_v1_ComponentCondition_To_api_ComponentCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - return nil -} - -func Convert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { - return autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in, out, s) -} - -func autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ComponentStatusList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]api.ComponentStatus, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_ComponentStatus_To_api_ComponentStatus(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { - return autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in, out, s) -} - -func autoConvert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ConfigMap))(in) - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if in.Data != nil { - out.Data = make(map[string]string) - for key, val := range in.Data { - out.Data[key] = val - } - } else { - out.Data = nil - } - return nil -} - -func Convert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { - return autoConvert_v1_ConfigMap_To_api_ConfigMap(in, out, s) -} - -func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ConfigMapKeySelector))(in) - } - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in, out, s) -} - -func autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ConfigMapList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]api.ConfigMap, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_ConfigMap_To_api_ConfigMap(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { - return autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in, out, s) -} - -func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ConfigMapVolumeSource))(in) - } - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]api.KeyToPath, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_KeyToPath_To_api_KeyToPath(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in, out, s) -} - -func autoConvert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Container))(in) - } - out.Name = in.Name - out.Image = in.Image - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - if in.Args != nil { - out.Args = make([]string, len(in.Args)) - for i := range in.Args { - out.Args[i] = in.Args[i] - } - } else { - out.Args = nil - } - out.WorkingDir = in.WorkingDir - if in.Ports != nil { - out.Ports = make([]api.ContainerPort, len(in.Ports)) - for i := range in.Ports { - if err := Convert_v1_ContainerPort_To_api_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Env != nil { - out.Env = make([]api.EnvVar, len(in.Env)) - for i := range in.Env { - if err := Convert_v1_EnvVar_To_api_EnvVar(&in.Env[i], &out.Env[i], s); err != nil { - return err - } - } - } else { - out.Env = nil - } - if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - if in.VolumeMounts != nil { - out.VolumeMounts = make([]api.VolumeMount, len(in.VolumeMounts)) - for i := range in.VolumeMounts { - if err := Convert_v1_VolumeMount_To_api_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil { - return err - } - } - } else { - out.VolumeMounts = nil - } - // unable to generate simple pointer conversion for v1.Probe -> api.Probe - if in.LivenessProbe != nil { - out.LivenessProbe = new(api.Probe) - if err := Convert_v1_Probe_To_api_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil { - return err - } - } else { - out.LivenessProbe = nil - } - // unable to generate simple pointer conversion for v1.Probe -> api.Probe - if in.ReadinessProbe != nil { - out.ReadinessProbe = new(api.Probe) - if err := Convert_v1_Probe_To_api_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil { - return err - } - } else { - out.ReadinessProbe = nil - } - // unable to generate simple pointer conversion for v1.Lifecycle -> api.Lifecycle - if in.Lifecycle != nil { - out.Lifecycle = new(api.Lifecycle) - if err := Convert_v1_Lifecycle_To_api_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil { - return err - } - } else { - out.Lifecycle = nil - } - out.TerminationMessagePath = in.TerminationMessagePath - out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) - // unable to generate simple pointer conversion for v1.SecurityContext -> api.SecurityContext - if in.SecurityContext != nil { - out.SecurityContext = new(api.SecurityContext) - if err := Convert_v1_SecurityContext_To_api_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -func Convert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { - return autoConvert_v1_Container_To_api_Container(in, out, s) -} - -func autoConvert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ContainerImage))(in) - } - if in.Names != nil { - out.Names = make([]string, len(in.Names)) - for i := range in.Names { - out.Names[i] = in.Names[i] - } - } else { - out.Names = nil - } - out.SizeBytes = in.SizeBytes - return nil -} - -func Convert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { - return autoConvert_v1_ContainerImage_To_api_ContainerImage(in, out, s) -} - -func autoConvert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ContainerPort))(in) - } - out.Name = in.Name - out.HostPort = int(in.HostPort) - out.ContainerPort = int(in.ContainerPort) - out.Protocol = api.Protocol(in.Protocol) - out.HostIP = in.HostIP - return nil -} - -func Convert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - return autoConvert_v1_ContainerPort_To_api_ContainerPort(in, out, s) -} - -func autoConvert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ContainerState))(in) - } - // unable to generate simple pointer conversion for v1.ContainerStateWaiting -> api.ContainerStateWaiting - if in.Waiting != nil { - out.Waiting = new(api.ContainerStateWaiting) - if err := Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in.Waiting, out.Waiting, s); err != nil { - return err - } - } else { - out.Waiting = nil - } - // unable to generate simple pointer conversion for v1.ContainerStateRunning -> api.ContainerStateRunning - if in.Running != nil { - out.Running = new(api.ContainerStateRunning) - if err := Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in.Running, out.Running, s); err != nil { - return err - } - } else { - out.Running = nil - } - // unable to generate simple pointer conversion for v1.ContainerStateTerminated -> api.ContainerStateTerminated - if in.Terminated != nil { - out.Terminated = new(api.ContainerStateTerminated) - if err := Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in.Terminated, out.Terminated, s); err != nil { - return err - } - } else { - out.Terminated = nil - } - return nil -} - -func Convert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { - return autoConvert_v1_ContainerState_To_api_ContainerState(in, out, s) -} - -func autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ContainerStateRunning))(in) - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { - return autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in, out, s) -} - -func autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ContainerStateTerminated))(in) - } - out.ExitCode = int(in.ExitCode) - out.Signal = int(in.Signal) - out.Reason = in.Reason - out.Message = in.Message - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.FinishedAt, &out.FinishedAt, s); err != nil { - return err - } - out.ContainerID = in.ContainerID - return nil -} - -func Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { - return autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in, out, s) -} - -func autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ContainerStateWaiting))(in) - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { - return autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in, out, s) -} - -func autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ContainerStatus))(in) - } - out.Name = in.Name - if err := Convert_v1_ContainerState_To_api_ContainerState(&in.State, &out.State, s); err != nil { - return err - } - if err := Convert_v1_ContainerState_To_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { - return err - } - out.Ready = in.Ready - out.RestartCount = int(in.RestartCount) - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID - return nil -} - -func Convert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { - return autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in, out, s) -} - -func autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DaemonEndpoint))(in) - } - out.Port = int(in.Port) - return nil -} - -func Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { - return autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in, out, s) -} - -func autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DeleteOptions))(in) - } - if in.GracePeriodSeconds != nil { - out.GracePeriodSeconds = new(int64) - *out.GracePeriodSeconds = *in.GracePeriodSeconds - } else { - out.GracePeriodSeconds = nil - } - return nil -} - -func Convert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { - return autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DownwardAPIVolumeFile))(in) - } - out.Path = in.Path - if err := Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(&in.FieldRef, &out.FieldRef, s); err != nil { - return err - } - return nil -} - -func Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DownwardAPIVolumeSource))(in) - } - if in.Items != nil { - out.Items = make([]api.DownwardAPIVolumeFile, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*EmptyDirVolumeSource))(in) - } - out.Medium = api.StorageMedium(in.Medium) - return nil -} - -func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in, out, s) -} - -func autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*EndpointAddress))(in) - } - out.IP = in.IP - // unable to generate simple pointer conversion for v1.ObjectReference -> api.ObjectReference - if in.TargetRef != nil { - out.TargetRef = new(api.ObjectReference) - if err := Convert_v1_ObjectReference_To_api_ObjectReference(in.TargetRef, out.TargetRef, s); err != nil { - return err - } - } else { - out.TargetRef = nil - } - return nil -} - -func Convert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { - return autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in, out, s) -} - -func autoConvert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*EndpointPort))(in) - } - out.Name = in.Name - out.Port = int(in.Port) - out.Protocol = api.Protocol(in.Protocol) - return nil -} - -func Convert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { - return autoConvert_v1_EndpointPort_To_api_EndpointPort(in, out, s) -} - -func autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*EndpointSubset))(in) - } - if in.Addresses != nil { - out.Addresses = make([]api.EndpointAddress, len(in.Addresses)) - for i := range in.Addresses { - if err := Convert_v1_EndpointAddress_To_api_EndpointAddress(&in.Addresses[i], &out.Addresses[i], s); err != nil { - return err - } - } - } else { - out.Addresses = nil - } - if in.NotReadyAddresses != nil { - out.NotReadyAddresses = make([]api.EndpointAddress, len(in.NotReadyAddresses)) - for i := range in.NotReadyAddresses { - if err := Convert_v1_EndpointAddress_To_api_EndpointAddress(&in.NotReadyAddresses[i], &out.NotReadyAddresses[i], s); err != nil { - return err - } - } - } else { - out.NotReadyAddresses = nil - } - if in.Ports != nil { - out.Ports = make([]api.EndpointPort, len(in.Ports)) - for i := range in.Ports { - if err := Convert_v1_EndpointPort_To_api_EndpointPort(&in.Ports[i], &out.Ports[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - return nil -} - -func Convert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { - return autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in, out, s) -} - -func autoConvert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Endpoints))(in) - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if in.Subsets != nil { - out.Subsets = make([]api.EndpointSubset, len(in.Subsets)) - for i := range in.Subsets { - if err := Convert_v1_EndpointSubset_To_api_EndpointSubset(&in.Subsets[i], &out.Subsets[i], s); err != nil { - return err - } - } - } else { - out.Subsets = nil - } - return nil -} - -func Convert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { - return autoConvert_v1_Endpoints_To_api_Endpoints(in, out, s) -} - -func autoConvert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*EndpointsList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]api.Endpoints, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_Endpoints_To_api_Endpoints(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { - return autoConvert_v1_EndpointsList_To_api_EndpointsList(in, out, s) -} - -func autoConvert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*EnvVar))(in) - } - out.Name = in.Name - out.Value = in.Value - // unable to generate simple pointer conversion for v1.EnvVarSource -> api.EnvVarSource - if in.ValueFrom != nil { - out.ValueFrom = new(api.EnvVarSource) - if err := Convert_v1_EnvVarSource_To_api_EnvVarSource(in.ValueFrom, out.ValueFrom, s); err != nil { - return err - } - } else { - out.ValueFrom = nil - } - return nil -} - -func Convert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { - return autoConvert_v1_EnvVar_To_api_EnvVar(in, out, s) -} - -func autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*EnvVarSource))(in) - } - // unable to generate simple pointer conversion for v1.ObjectFieldSelector -> api.ObjectFieldSelector - if in.FieldRef != nil { - out.FieldRef = new(api.ObjectFieldSelector) - if err := Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in.FieldRef, out.FieldRef, s); err != nil { - return err - } - } else { - out.FieldRef = nil - } - // unable to generate simple pointer conversion for v1.ConfigMapKeySelector -> api.ConfigMapKeySelector - if in.ConfigMapKeyRef != nil { - out.ConfigMapKeyRef = new(api.ConfigMapKeySelector) - if err := Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in.ConfigMapKeyRef, out.ConfigMapKeyRef, s); err != nil { - return err - } - } else { - out.ConfigMapKeyRef = nil - } - // unable to generate simple pointer conversion for v1.SecretKeySelector -> api.SecretKeySelector - if in.SecretKeyRef != nil { - out.SecretKeyRef = new(api.SecretKeySelector) - if err := Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in.SecretKeyRef, out.SecretKeyRef, s); err != nil { - return err - } - } else { - out.SecretKeyRef = nil - } - return nil -} - -func Convert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - return autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in, out, s) -} - -func autoConvert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Event))(in) - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - if err := Convert_v1_EventSource_To_api_EventSource(&in.Source, &out.Source, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.FirstTimestamp, &out.FirstTimestamp, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTimestamp, &out.LastTimestamp, s); err != nil { - return err - } - out.Count = int(in.Count) - out.Type = in.Type - return nil -} - -func Convert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { - return autoConvert_v1_Event_To_api_Event(in, out, s) -} - -func autoConvert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*EventList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]api.Event, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_Event_To_api_Event(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { - return autoConvert_v1_EventList_To_api_EventList(in, out, s) -} - -func autoConvert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*EventSource))(in) - } - out.Component = in.Component - out.Host = in.Host - return nil -} - -func Convert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { - return autoConvert_v1_EventSource_To_api_EventSource(in, out, s) -} - -func autoConvert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ExecAction))(in) - } - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - return nil -} - -func Convert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { - return autoConvert_v1_ExecAction_To_api_ExecAction(in, out, s) -} - -func autoConvert_v1_ExportOptions_To_unversioned_ExportOptions(in *ExportOptions, out *unversioned.ExportOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ExportOptions))(in) - } - out.Export = in.Export - out.Exact = in.Exact - return nil -} - -func Convert_v1_ExportOptions_To_unversioned_ExportOptions(in *ExportOptions, out *unversioned.ExportOptions, s conversion.Scope) error { - return autoConvert_v1_ExportOptions_To_unversioned_ExportOptions(in, out, s) -} - -func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*FCVolumeSource))(in) - } - if in.TargetWWNs != nil { - out.TargetWWNs = make([]string, len(in.TargetWWNs)) - for i := range in.TargetWWNs { - out.TargetWWNs[i] = in.TargetWWNs[i] - } - } else { - out.TargetWWNs = nil - } - if in.Lun != nil { - out.Lun = new(int) - *out.Lun = int(*in.Lun) - } else { - out.Lun = nil - } - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in, out, s) -} - -func autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*FlexVolumeSource))(in) - } - out.Driver = in.Driver - out.FSType = in.FSType - // unable to generate simple pointer conversion for v1.LocalObjectReference -> api.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(api.LocalObjectReference) - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - if in.Options != nil { - out.Options = make(map[string]string) - for key, val := range in.Options { - out.Options[key] = val - } - } else { - out.Options = nil - } - return nil -} - -func Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in, out, s) -} - -func autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*FlockerVolumeSource))(in) - } - out.DatasetName = in.DatasetName - return nil -} - -func Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in, out, s) -} - -func autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*GCEPersistentDiskVolumeSource))(in) - } - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = int(in.Partition) - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*GitRepoVolumeSource))(in) - } - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -func Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*GlusterfsVolumeSource))(in) - } - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in, out, s) -} - -func autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HTTPGetAction))(in) - } - out.Path = in.Path - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { - return err - } - out.Host = in.Host - out.Scheme = api.URIScheme(in.Scheme) - if in.HTTPHeaders != nil { - out.HTTPHeaders = make([]api.HTTPHeader, len(in.HTTPHeaders)) - for i := range in.HTTPHeaders { - if err := Convert_v1_HTTPHeader_To_api_HTTPHeader(&in.HTTPHeaders[i], &out.HTTPHeaders[i], s); err != nil { - return err - } - } - } else { - out.HTTPHeaders = nil - } - return nil -} - -func Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - return autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in, out, s) -} - -func autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HTTPHeader))(in) - } - out.Name = in.Name - out.Value = in.Value - return nil -} - -func Convert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - return autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in, out, s) -} - -func autoConvert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Handler))(in) - } - // unable to generate simple pointer conversion for v1.ExecAction -> api.ExecAction - if in.Exec != nil { - out.Exec = new(api.ExecAction) - if err := Convert_v1_ExecAction_To_api_ExecAction(in.Exec, out.Exec, s); err != nil { - return err - } - } else { - out.Exec = nil - } - // unable to generate simple pointer conversion for v1.HTTPGetAction -> api.HTTPGetAction - if in.HTTPGet != nil { - out.HTTPGet = new(api.HTTPGetAction) - if err := Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in.HTTPGet, out.HTTPGet, s); err != nil { - return err - } - } else { - out.HTTPGet = nil - } - // unable to generate simple pointer conversion for v1.TCPSocketAction -> api.TCPSocketAction - if in.TCPSocket != nil { - out.TCPSocket = new(api.TCPSocketAction) - if err := Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in.TCPSocket, out.TCPSocket, s); err != nil { - return err - } - } else { - out.TCPSocket = nil - } - return nil -} - -func Convert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { - return autoConvert_v1_Handler_To_api_Handler(in, out, s) -} - -func autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HostPathVolumeSource))(in) - } - out.Path = in.Path - return nil -} - -func Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in, out, s) -} - -func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ISCSIVolumeSource))(in) - } - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = int(in.Lun) - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*KeyToPath))(in) - } - out.Key = in.Key - out.Path = in.Path - return nil -} - -func Convert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - return autoConvert_v1_KeyToPath_To_api_KeyToPath(in, out, s) -} - -func autoConvert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Lifecycle))(in) - } - // unable to generate simple pointer conversion for v1.Handler -> api.Handler - if in.PostStart != nil { - out.PostStart = new(api.Handler) - if err := Convert_v1_Handler_To_api_Handler(in.PostStart, out.PostStart, s); err != nil { - return err - } - } else { - out.PostStart = nil - } - // unable to generate simple pointer conversion for v1.Handler -> api.Handler - if in.PreStop != nil { - out.PreStop = new(api.Handler) - if err := Convert_v1_Handler_To_api_Handler(in.PreStop, out.PreStop, s); err != nil { - return err - } - } else { - out.PreStop = nil - } - return nil -} - -func Convert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - return autoConvert_v1_Lifecycle_To_api_Lifecycle(in, out, s) -} - -func autoConvert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*LimitRange))(in) - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { - return autoConvert_v1_LimitRange_To_api_LimitRange(in, out, s) -} - -func autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*LimitRangeItem))(in) - } - out.Type = api.LimitType(in.Type) - if err := s.Convert(&in.Max, &out.Max, 0); err != nil { - return err - } - if err := s.Convert(&in.Min, &out.Min, 0); err != nil { - return err - } - if err := s.Convert(&in.Default, &out.Default, 0); err != nil { - return err - } - if err := s.Convert(&in.DefaultRequest, &out.DefaultRequest, 0); err != nil { - return err - } - if err := s.Convert(&in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio, 0); err != nil { - return err - } - return nil -} - -func Convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { - return autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in, out, s) -} - -func autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*LimitRangeList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]api.LimitRange, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_LimitRange_To_api_LimitRange(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { - return autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in, out, s) -} - -func autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*LimitRangeSpec))(in) - } - if in.Limits != nil { - out.Limits = make([]api.LimitRangeItem, len(in.Limits)) - for i := range in.Limits { - if err := Convert_v1_LimitRangeItem_To_api_LimitRangeItem(&in.Limits[i], &out.Limits[i], s); err != nil { - return err - } - } - } else { - out.Limits = nil - } - return nil -} - -func Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { - return autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in, out, s) -} - -func autoConvert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*List))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]runtime.Object, len(in.Items)) - for i := range in.Items { - if err := s.Convert(&in.Items[i], &out.Items[i], 0); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { - return autoConvert_v1_List_To_api_List(in, out, s) -} - -func autoConvert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ListOptions))(in) - } - if err := api.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - if err := api.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - if in.TimeoutSeconds != nil { - out.TimeoutSeconds = new(int64) - *out.TimeoutSeconds = *in.TimeoutSeconds - } else { - out.TimeoutSeconds = nil - } - return nil -} - -func Convert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { - return autoConvert_v1_ListOptions_To_api_ListOptions(in, out, s) -} - -func autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*LoadBalancerIngress))(in) - } - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -func Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in, out, s) -} - -func autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*LoadBalancerStatus))(in) - } - if in.Ingress != nil { - out.Ingress = make([]api.LoadBalancerIngress, len(in.Ingress)) - for i := range in.Ingress { - if err := Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(&in.Ingress[i], &out.Ingress[i], s); err != nil { - return err - } - } - } else { - out.Ingress = nil - } - return nil -} - -func Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in, out, s) -} - -func autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*LocalObjectReference))(in) - } - out.Name = in.Name - return nil -} - -func Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - return autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in, out, s) -} - -func autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*NFSVolumeSource))(in) - } - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in, out, s) -} - -func autoConvert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Namespace))(in) - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_NamespaceSpec_To_api_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_NamespaceStatus_To_api_NamespaceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { - return autoConvert_v1_Namespace_To_api_Namespace(in, out, s) -} - -func autoConvert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*NamespaceList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]api.Namespace, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_Namespace_To_api_Namespace(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { - return autoConvert_v1_NamespaceList_To_api_NamespaceList(in, out, s) -} - -func autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*NamespaceSpec))(in) - } - if in.Finalizers != nil { - out.Finalizers = make([]api.FinalizerName, len(in.Finalizers)) - for i := range in.Finalizers { - out.Finalizers[i] = api.FinalizerName(in.Finalizers[i]) - } - } else { - out.Finalizers = nil - } - return nil -} - -func Convert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { - return autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in, out, s) -} - -func autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*NamespaceStatus))(in) - } - out.Phase = api.NamespacePhase(in.Phase) - return nil -} - -func Convert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { - return autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in, out, s) -} - -func autoConvert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Node))(in) - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_NodeSpec_To_api_NodeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_NodeStatus_To_api_NodeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { - return autoConvert_v1_Node_To_api_Node(in, out, s) -} - -func autoConvert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*NodeAddress))(in) - } - out.Type = api.NodeAddressType(in.Type) - out.Address = in.Address - return nil -} - -func Convert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { - return autoConvert_v1_NodeAddress_To_api_NodeAddress(in, out, s) -} - -func autoConvert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*NodeCondition))(in) - } - out.Type = api.NodeConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastHeartbeatTime, &out.LastHeartbeatTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { - return autoConvert_v1_NodeCondition_To_api_NodeCondition(in, out, s) -} - -func autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*NodeDaemonEndpoints))(in) - } - if err := Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { - return err - } - return nil -} - -func Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { - return autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in, out, s) -} - -func autoConvert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*NodeList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]api.Node, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_Node_To_api_Node(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { - return autoConvert_v1_NodeList_To_api_NodeList(in, out, s) -} - -func autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*NodeProxyOptions))(in) - } - out.Path = in.Path - return nil -} - -func Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { - return autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in, out, s) -} - -func autoConvert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*NodeSpec))(in) + out.Images = nil } - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable return nil } -func Convert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { - return autoConvert_v1_NodeSpec_To_api_NodeSpec(in, out, s) +func Convert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { + return autoConvert_v1_NodeStatus_To_api_NodeStatus(in, out, s) } -func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*NodeStatus))(in) - } - if err := s.Convert(&in.Capacity, &out.Capacity, 0); err != nil { - return err +func autoConvert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { + return err + } + (*out)[ResourceName(key)] = *newVal + } + } else { + out.Capacity = nil } - if err := s.Convert(&in.Allocatable, &out.Allocatable, 0); err != nil { - return err + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { + return err + } + (*out)[ResourceName(key)] = *newVal + } + } else { + out.Allocatable = nil } - out.Phase = api.NodePhase(in.Phase) + out.Phase = NodePhase(in.Phase) if in.Conditions != nil { - out.Conditions = make([]api.NodeCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_v1_NodeCondition_To_api_NodeCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + if err := Convert_api_NodeCondition_To_v1_NodeCondition(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -4886,25 +3435,27 @@ func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeSt out.Conditions = nil } if in.Addresses != nil { - out.Addresses = make([]api.NodeAddress, len(in.Addresses)) - for i := range in.Addresses { - if err := Convert_v1_NodeAddress_To_api_NodeAddress(&in.Addresses[i], &out.Addresses[i], s); err != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + for i := range *in { + if err := Convert_api_NodeAddress_To_v1_NodeAddress(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Addresses = nil } - if err := Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + if err := Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { return err } - if err := Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + if err := Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { return err } if in.Images != nil { - out.Images = make([]api.ContainerImage, len(in.Images)) - for i := range in.Images { - if err := Convert_v1_ContainerImage_To_api_ContainerImage(&in.Images[i], &out.Images[i], s); err != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + if err := Convert_api_ContainerImage_To_v1_ContainerImage(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -4914,14 +3465,11 @@ func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeSt return nil } -func Convert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { - return autoConvert_v1_NodeStatus_To_api_NodeStatus(in, out, s) +func Convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { + return autoConvert_api_NodeStatus_To_v1_NodeStatus(in, out, s) } func autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*NodeSystemInfo))(in) - } out.MachineID = in.MachineID out.SystemUUID = in.SystemUUID out.BootID = in.BootID @@ -4930,6 +3478,8 @@ func autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out out.ContainerRuntimeVersion = in.ContainerRuntimeVersion out.KubeletVersion = in.KubeletVersion out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture return nil } @@ -4937,10 +3487,26 @@ func Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *ap return autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in, out, s) } +func autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +func Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { + return autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) +} + func autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ObjectFieldSelector))(in) - } + SetDefaults_ObjectFieldSelector(in) out.APIVersion = in.APIVersion out.FieldPath = in.FieldPath return nil @@ -4950,10 +3516,17 @@ func Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSe return autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in, out, s) } +func autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +func Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) +} + func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ObjectMeta))(in) - } out.Name = in.Name out.GenerateName = in.GenerateName out.Namespace = in.Namespace @@ -4964,48 +3537,64 @@ func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.Object if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.CreationTimestamp, &out.CreationTimestamp, s); err != nil { return err } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.DeletionTimestamp != nil { - out.DeletionTimestamp = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.DeletionTimestamp, out.DeletionTimestamp, s); err != nil { - return err + out.DeletionTimestamp = in.DeletionTimestamp + out.DeletionGracePeriodSeconds = in.DeletionGracePeriodSeconds + out.Labels = in.Labels + out.Annotations = in.Annotations + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]api.OwnerReference, len(*in)) + for i := range *in { + if err := Convert_v1_OwnerReference_To_api_OwnerReference(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.DeletionTimestamp = nil + out.OwnerReferences = nil } - if in.DeletionGracePeriodSeconds != nil { - out.DeletionGracePeriodSeconds = new(int64) - *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds - } else { - out.DeletionGracePeriodSeconds = nil - } - if in.Labels != nil { - out.Labels = make(map[string]string) - for key, val := range in.Labels { - out.Labels[key] = val - } - } else { - out.Labels = nil + out.Finalizers = in.Finalizers + return nil +} + +func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { + return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s) +} + +func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { + out.Name = in.Name + out.GenerateName = in.GenerateName + out.Namespace = in.Namespace + out.SelfLink = in.SelfLink + out.UID = in.UID + out.ResourceVersion = in.ResourceVersion + out.Generation = in.Generation + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.CreationTimestamp, &out.CreationTimestamp, s); err != nil { + return err } - if in.Annotations != nil { - out.Annotations = make(map[string]string) - for key, val := range in.Annotations { - out.Annotations[key] = val + out.DeletionTimestamp = in.DeletionTimestamp + out.DeletionGracePeriodSeconds = in.DeletionGracePeriodSeconds + out.Labels = in.Labels + out.Annotations = in.Annotations + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]OwnerReference, len(*in)) + for i := range *in { + if err := Convert_api_OwnerReference_To_v1_OwnerReference(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.Annotations = nil + out.OwnerReferences = nil } + out.Finalizers = in.Finalizers return nil } -func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s) +func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { + return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s) } func autoConvert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ObjectReference))(in) - } out.Kind = in.Kind out.Namespace = in.Namespace out.Name = in.Name @@ -5020,9 +3609,49 @@ func Convert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out return autoConvert_v1_ObjectReference_To_api_ObjectReference(in, out, s) } +func autoConvert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = in.UID + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +func Convert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { + return autoConvert_api_ObjectReference_To_v1_ObjectReference(in, out, s) +} + +func autoConvert_v1_OwnerReference_To_api_OwnerReference(in *OwnerReference, out *api.OwnerReference, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.Kind = in.Kind + out.Name = in.Name + out.UID = in.UID + return nil +} + +func Convert_v1_OwnerReference_To_api_OwnerReference(in *OwnerReference, out *api.OwnerReference, s conversion.Scope) error { + return autoConvert_v1_OwnerReference_To_api_OwnerReference(in, out, s) +} + +func autoConvert_api_OwnerReference_To_v1_OwnerReference(in *api.OwnerReference, out *OwnerReference, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.Kind = in.Kind + out.Name = in.Name + out.UID = in.UID + return nil +} + +func Convert_api_OwnerReference_To_v1_OwnerReference(in *api.OwnerReference, out *OwnerReference, s conversion.Scope) error { + return autoConvert_api_OwnerReference_To_v1_OwnerReference(in, out, s) +} + func autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PersistentVolume))(in) + SetDefaults_PersistentVolume(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err @@ -5040,9 +3669,30 @@ func Convert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, o return autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in, out, s) } +func autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { + return autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in, out, s) +} + func autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PersistentVolumeClaim))(in) + SetDefaults_PersistentVolumeClaim(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err @@ -5060,17 +3710,38 @@ func Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *Persisten return autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in, out, s) } +func autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) +} + func autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PersistentVolumeClaimList))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]api.PersistentVolumeClaim, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]api.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -5084,14 +3755,37 @@ func Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *P return autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in, out, s) } -func autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PersistentVolumeClaimSpec))(in) +func autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { if in.AccessModes != nil { - out.AccessModes = make([]api.PersistentVolumeAccessMode, len(in.AccessModes)) - for i := range in.AccessModes { - out.AccessModes[i] = api.PersistentVolumeAccessMode(in.AccessModes[i]) + in, out := &in.AccessModes, &out.AccessModes + *out = make([]api.PersistentVolumeAccessMode, len(*in)) + for i := range *in { + (*out)[i] = api.PersistentVolumeAccessMode((*in)[i]) } } else { out.AccessModes = nil @@ -5107,20 +3801,39 @@ func Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *P return autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in, out, s) } -func autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PersistentVolumeClaimStatus))(in) +func autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + for i := range *in { + (*out)[i] = PersistentVolumeAccessMode((*in)[i]) + } + } else { + out.AccessModes = nil + } + if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err } + out.VolumeName = in.VolumeName + return nil +} + +func Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { out.Phase = api.PersistentVolumeClaimPhase(in.Phase) if in.AccessModes != nil { - out.AccessModes = make([]api.PersistentVolumeAccessMode, len(in.AccessModes)) - for i := range in.AccessModes { - out.AccessModes[i] = api.PersistentVolumeAccessMode(in.AccessModes[i]) + in, out := &in.AccessModes, &out.AccessModes + *out = make([]api.PersistentVolumeAccessMode, len(*in)) + for i := range *in { + (*out)[i] = api.PersistentVolumeAccessMode((*in)[i]) } } else { out.AccessModes = nil } - if err := s.Convert(&in.Capacity, &out.Capacity, 0); err != nil { + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Capacity, &out.Capacity, s); err != nil { return err } return nil @@ -5130,30 +3843,94 @@ func Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(i return autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in, out, s) } +func autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = PersistentVolumeClaimPhase(in.Phase) + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + for i := range *in { + (*out)[i] = PersistentVolumeAccessMode((*in)[i]) + } + } else { + out.AccessModes = nil + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { + return err + } + (*out)[ResourceName(key)] = *newVal + } + } else { + out.Capacity = nil + } + return nil +} + +func Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) +} + func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PersistentVolumeClaimVolumeSource))(in) + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_v1_PersistentVolume_To_api_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil } - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly return nil } -func Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in, out, s) +func Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in, out, s) } -func autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PersistentVolumeList))(in) +func autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]api.PersistentVolume, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_PersistentVolume_To_api_PersistentVolume(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_api_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -5163,131 +3940,137 @@ func autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *Persist return nil } -func Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in, out, s) +func Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) } func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PersistentVolumeSource))(in) - } - // unable to generate simple pointer conversion for v1.GCEPersistentDiskVolumeSource -> api.GCEPersistentDiskVolumeSource if in.GCEPersistentDisk != nil { - out.GCEPersistentDisk = new(api.GCEPersistentDiskVolumeSource) - if err := Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in.GCEPersistentDisk, out.GCEPersistentDisk, s); err != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(api.GCEPersistentDiskVolumeSource) + if err := Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(*in, *out, s); err != nil { return err } } else { out.GCEPersistentDisk = nil } - // unable to generate simple pointer conversion for v1.AWSElasticBlockStoreVolumeSource -> api.AWSElasticBlockStoreVolumeSource if in.AWSElasticBlockStore != nil { - out.AWSElasticBlockStore = new(api.AWSElasticBlockStoreVolumeSource) - if err := Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore, out.AWSElasticBlockStore, s); err != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(api.AWSElasticBlockStoreVolumeSource) + if err := Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(*in, *out, s); err != nil { return err } } else { out.AWSElasticBlockStore = nil } - // unable to generate simple pointer conversion for v1.HostPathVolumeSource -> api.HostPathVolumeSource if in.HostPath != nil { - out.HostPath = new(api.HostPathVolumeSource) - if err := Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in.HostPath, out.HostPath, s); err != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(api.HostPathVolumeSource) + if err := Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(*in, *out, s); err != nil { return err } } else { out.HostPath = nil } - // unable to generate simple pointer conversion for v1.GlusterfsVolumeSource -> api.GlusterfsVolumeSource if in.Glusterfs != nil { - out.Glusterfs = new(api.GlusterfsVolumeSource) - if err := Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in.Glusterfs, out.Glusterfs, s); err != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(api.GlusterfsVolumeSource) + if err := Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(*in, *out, s); err != nil { return err } } else { out.Glusterfs = nil } - // unable to generate simple pointer conversion for v1.NFSVolumeSource -> api.NFSVolumeSource if in.NFS != nil { - out.NFS = new(api.NFSVolumeSource) - if err := Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in.NFS, out.NFS, s); err != nil { + in, out := &in.NFS, &out.NFS + *out = new(api.NFSVolumeSource) + if err := Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(*in, *out, s); err != nil { return err } } else { out.NFS = nil } - // unable to generate simple pointer conversion for v1.RBDVolumeSource -> api.RBDVolumeSource if in.RBD != nil { - out.RBD = new(api.RBDVolumeSource) - if err := Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in.RBD, out.RBD, s); err != nil { + in, out := &in.RBD, &out.RBD + *out = new(api.RBDVolumeSource) + if err := Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(*in, *out, s); err != nil { return err } } else { out.RBD = nil } - // unable to generate simple pointer conversion for v1.ISCSIVolumeSource -> api.ISCSIVolumeSource if in.ISCSI != nil { - out.ISCSI = new(api.ISCSIVolumeSource) - if err := Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in.ISCSI, out.ISCSI, s); err != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(api.ISCSIVolumeSource) + if err := Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(*in, *out, s); err != nil { return err } } else { out.ISCSI = nil } - // unable to generate simple pointer conversion for v1.CinderVolumeSource -> api.CinderVolumeSource if in.Cinder != nil { - out.Cinder = new(api.CinderVolumeSource) - if err := Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(api.CinderVolumeSource) + if err := Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(*in, *out, s); err != nil { return err } } else { out.Cinder = nil } - // unable to generate simple pointer conversion for v1.CephFSVolumeSource -> api.CephFSVolumeSource if in.CephFS != nil { - out.CephFS = new(api.CephFSVolumeSource) - if err := Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(api.CephFSVolumeSource) + if err := Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(*in, *out, s); err != nil { return err } } else { out.CephFS = nil } - // unable to generate simple pointer conversion for v1.FCVolumeSource -> api.FCVolumeSource if in.FC != nil { - out.FC = new(api.FCVolumeSource) - if err := Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in.FC, out.FC, s); err != nil { + in, out := &in.FC, &out.FC + *out = new(api.FCVolumeSource) + if err := Convert_v1_FCVolumeSource_To_api_FCVolumeSource(*in, *out, s); err != nil { return err } } else { out.FC = nil } - // unable to generate simple pointer conversion for v1.FlockerVolumeSource -> api.FlockerVolumeSource if in.Flocker != nil { - out.Flocker = new(api.FlockerVolumeSource) - if err := Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in.Flocker, out.Flocker, s); err != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(api.FlockerVolumeSource) + if err := Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(*in, *out, s); err != nil { return err } } else { out.Flocker = nil } - // unable to generate simple pointer conversion for v1.FlexVolumeSource -> api.FlexVolumeSource if in.FlexVolume != nil { - out.FlexVolume = new(api.FlexVolumeSource) - if err := Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in.FlexVolume, out.FlexVolume, s); err != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(api.FlexVolumeSource) + if err := Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(*in, *out, s); err != nil { return err } } else { out.FlexVolume = nil } - // unable to generate simple pointer conversion for v1.AzureFileVolumeSource -> api.AzureFileVolumeSource if in.AzureFile != nil { - out.AzureFile = new(api.AzureFileVolumeSource) - if err := Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in.AzureFile, out.AzureFile, s); err != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(api.AzureFileVolumeSource) + if err := Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(*in, *out, s); err != nil { return err } } else { out.AzureFile = nil } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(api.VsphereVirtualDiskVolumeSource) + if err := Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.VsphereVolume = nil + } return nil } @@ -5295,28 +4078,160 @@ func Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *Persist return autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in, out, s) } -func autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PersistentVolumeSpec))(in) +func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + if err := Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.GCEPersistentDisk = nil + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + if err := Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.AWSElasticBlockStore = nil + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + if err := Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.HostPath = nil + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + if err := Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.Glusterfs = nil + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + if err := Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.NFS = nil + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.RBD = nil + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.ISCSI = nil + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.FlexVolume = nil + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + if err := Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.Cinder = nil + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.CephFS = nil + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := Convert_api_FCVolumeSource_To_v1_FCVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.FC = nil + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + if err := Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.Flocker = nil } - if err := s.Convert(&in.Capacity, &out.Capacity, 0); err != nil { + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + if err := Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.AzureFile = nil + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + if err := Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.VsphereVolume = nil + } + return nil +} + +func Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Capacity, &out.Capacity, s); err != nil { return err } if err := Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { return err } if in.AccessModes != nil { - out.AccessModes = make([]api.PersistentVolumeAccessMode, len(in.AccessModes)) - for i := range in.AccessModes { - out.AccessModes[i] = api.PersistentVolumeAccessMode(in.AccessModes[i]) + in, out := &in.AccessModes, &out.AccessModes + *out = make([]api.PersistentVolumeAccessMode, len(*in)) + for i := range *in { + (*out)[i] = api.PersistentVolumeAccessMode((*in)[i]) } } else { out.AccessModes = nil } - // unable to generate simple pointer conversion for v1.ObjectReference -> api.ObjectReference if in.ClaimRef != nil { - out.ClaimRef = new(api.ObjectReference) - if err := Convert_v1_ObjectReference_To_api_ObjectReference(in.ClaimRef, out.ClaimRef, s); err != nil { + in, out := &in.ClaimRef, &out.ClaimRef + *out = new(api.ObjectReference) + if err := Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil { return err } } else { @@ -5330,10 +4245,50 @@ func Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentV return autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in, out, s) } -func autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PersistentVolumeStatus))(in) +func autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { + return err + } + (*out)[ResourceName(key)] = *newVal + } + } else { + out.Capacity = nil + } + if err := Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + for i := range *in { + (*out)[i] = PersistentVolumeAccessMode((*in)[i]) + } + } else { + out.AccessModes = nil + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + *out = new(ObjectReference) + if err := Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.ClaimRef = nil } + out.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + return nil +} + +func Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { out.Phase = api.PersistentVolumePhase(in.Phase) out.Message = in.Message out.Reason = in.Reason @@ -5344,9 +4299,21 @@ func Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *Persist return autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in, out, s) } +func autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +func Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) +} + func autoConvert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Pod))(in) + SetDefaults_Pod(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err @@ -5360,9 +4327,184 @@ func autoConvert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) er return nil } +func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]api.PodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]api.WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func Convert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAffinity_To_api_PodAffinity(in, out, s) +} + +func autoConvert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func Convert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { + return autoConvert_api_PodAffinity_To_v1_PodAffinity(in, out, s) +} + +func autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = in.LabelSelector + out.Namespaces = in.Namespaces + out.TopologyKey = in.TopologyKey + return nil +} + +func Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in, out, s) +} + +func autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = in.LabelSelector + out.Namespaces = in.Namespaces + out.TopologyKey = in.TopologyKey + return nil +} + +func Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { + return autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) +} + +func autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]api.PodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]api.WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in, out, s) +} + +func autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { + return autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) +} + func autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodAttachOptions))(in) + SetDefaults_PodAttachOptions(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +func Convert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { + return autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in, out, s) +} + +func autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } out.Stdin = in.Stdin out.Stdout = in.Stdout @@ -5372,14 +4514,11 @@ func autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOption return nil } -func Convert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { - return autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in, out, s) +func Convert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { + return autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) } func autoConvert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodCondition))(in) - } out.Type = api.PodConditionType(in.Type) out.Status = api.ConditionStatus(in.Status) if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { @@ -5397,23 +4536,35 @@ func Convert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodC return autoConvert_v1_PodCondition_To_api_PodCondition(in, out, s) } +func autoConvert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { + out.Type = PodConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { + return autoConvert_api_PodCondition_To_v1_PodCondition(in, out, s) +} + func autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodExecOptions))(in) + SetDefaults_PodExecOptions(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } out.Stdin = in.Stdin out.Stdout = in.Stdout out.Stderr = in.Stderr out.TTY = in.TTY out.Container = in.Container - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } + out.Command = in.Command return nil } @@ -5421,17 +4572,35 @@ func Convert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *ap return autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in, out, s) } +func autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + out.Command = in.Command + return nil +} + +func Convert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { + return autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in, out, s) +} + func autoConvert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodList))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]api.Pod, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_Pod_To_api_Pod(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Pod, len(*in)) + for i := range *in { + if err := Convert_v1_Pod_To_api_Pod(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -5445,41 +4614,43 @@ func Convert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversi return autoConvert_v1_PodList_To_api_PodList(in, out, s) } -func autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodLogOptions))(in) +func autoConvert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - if in.SinceSeconds != nil { - out.SinceSeconds = new(int64) - *out.SinceSeconds = *in.SinceSeconds - } else { - out.SinceSeconds = nil + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.SinceTime != nil { - out.SinceTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.SinceTime, out.SinceTime, s); err != nil { - return err + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + if err := Convert_api_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.SinceTime = nil - } - out.Timestamps = in.Timestamps - if in.TailLines != nil { - out.TailLines = new(int64) - *out.TailLines = *in.TailLines - } else { - out.TailLines = nil + out.Items = nil } - if in.LimitBytes != nil { - out.LimitBytes = new(int64) - *out.LimitBytes = *in.LimitBytes - } else { - out.LimitBytes = nil + return nil +} + +func Convert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { + return autoConvert_api_PodList_To_v1_PodList(in, out, s) +} + +func autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = in.SinceSeconds + out.SinceTime = in.SinceTime + out.Timestamps = in.Timestamps + out.TailLines = in.TailLines + out.LimitBytes = in.LimitBytes return nil } @@ -5487,9 +4658,28 @@ func Convert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.P return autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in, out, s) } +func autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = in.SinceSeconds + out.SinceTime = in.SinceTime + out.Timestamps = in.Timestamps + out.TailLines = in.TailLines + out.LimitBytes = in.LimitBytes + return nil +} + +func Convert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { + return autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in, out, s) +} + func autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodProxyOptions))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } out.Path = in.Path return nil @@ -5499,88 +4689,108 @@ func Convert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out return autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in, out, s) } -func autoConvert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodSpec))(in) +func autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Path = in.Path + return nil +} + +func Convert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { + return autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) +} + +func autoConvert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(api.SELinuxOptions) + if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(*in, *out, s); err != nil { + return err + } + } else { + out.SELinuxOptions = nil } + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.SupplementalGroups = in.SupplementalGroups + out.FSGroup = in.FSGroup + return nil +} + +func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { if in.Volumes != nil { - out.Volumes = make([]api.Volume, len(in.Volumes)) - for i := range in.Volumes { - if err := Convert_v1_Volume_To_api_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + if err := Convert_api_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Volumes = nil } - if in.Containers != nil { - out.Containers = make([]api.Container, len(in.Containers)) - for i := range in.Containers { - if err := Convert_v1_Container_To_api_Container(&in.Containers[i], &out.Containers[i], s); err != nil { + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { - out.Containers = nil - } - out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) - if in.TerminationGracePeriodSeconds != nil { - out.TerminationGracePeriodSeconds = new(int64) - *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil + out.InitContainers = nil } - out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) - if in.NodeSelector != nil { - out.NodeSelector = make(map[string]string) - for key, val := range in.NodeSelector { - out.NodeSelector[key] = val + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.NodeSelector = nil + out.Containers = nil } + out.RestartPolicy = RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = in.TerminationGracePeriodSeconds + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + out.DNSPolicy = DNSPolicy(in.DNSPolicy) + out.NodeSelector = in.NodeSelector out.ServiceAccountName = in.ServiceAccountName - // in.DeprecatedServiceAccount has no peer in out out.NodeName = in.NodeName - // in.HostNetwork has no peer in out - // in.HostPID has no peer in out - // in.HostIPC has no peer in out - // unable to generate simple pointer conversion for v1.PodSecurityContext -> api.PodSecurityContext if in.SecurityContext != nil { - if err := s.Convert(&in.SecurityContext, &out.SecurityContext, 0); err != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + if err := Convert_api_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { return err } } else { out.SecurityContext = nil } if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]api.LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + for i := range *in { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.ImagePullSecrets = nil } + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain return nil } func autoConvert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodStatus))(in) - } out.Phase = api.PodPhase(in.Phase) if in.Conditions != nil { - out.Conditions = make([]api.PodCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_v1_PodCondition_To_api_PodCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]api.PodCondition, len(*in)) + for i := range *in { + if err := Convert_v1_PodCondition_To_api_PodCondition(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -5591,19 +4801,23 @@ func autoConvert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus out.Reason = in.Reason out.HostIP = in.HostIP out.PodIP = in.PodIP - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.StartTime != nil { - out.StartTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.StartTime, out.StartTime, s); err != nil { - return err + out.StartTime = in.StartTime + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]api.ContainerStatus, len(*in)) + for i := range *in { + if err := Convert_v1_ContainerStatus_To_api_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.StartTime = nil + out.InitContainerStatuses = nil } if in.ContainerStatuses != nil { - out.ContainerStatuses = make([]api.ContainerStatus, len(in.ContainerStatuses)) - for i := range in.ContainerStatuses { - if err := Convert_v1_ContainerStatus_To_api_ContainerStatus(&in.ContainerStatuses[i], &out.ContainerStatuses[i], s); err != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]api.ContainerStatus, len(*in)) + for i := range *in { + if err := Convert_v1_ContainerStatus_To_api_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -5617,93 +4831,235 @@ func Convert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s return autoConvert_v1_PodStatus_To_api_PodStatus(in, out, s) } -func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodStatusResult))(in) - } +func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { + out.Phase = PodPhase(in.Phase) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + if err := Convert_api_PodCondition_To_v1_PodCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = in.StartTime + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := Convert_api_ContainerStatus_To_v1_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.InitContainerStatuses = nil + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := Convert_api_ContainerStatus_To_v1_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.ContainerStatuses = nil + } + return nil +} + +func Convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { + return autoConvert_api_PodStatus_To_v1_PodStatus(in, out, s) +} + +func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { + return autoConvert_v1_PodTemplate_To_api_PodTemplate(in, out, s) +} + +func autoConvert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { + return autoConvert_api_PodTemplate_To_v1_PodTemplate(in, out, s) +} + +func autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.PodTemplate, len(*in)) + for i := range *in { + if err := Convert_v1_PodTemplate_To_api_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { + return autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in, out, s) +} + +func autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + if err := Convert_api_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { + return autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in, out, s) +} + +func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err } return nil } -func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - return autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s) -} - -func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodTemplate))(in) - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { +func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { + if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { return err } return nil } -func Convert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { - return autoConvert_v1_PodTemplate_To_api_PodTemplate(in, out, s) +func autoConvert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { + out.UID = in.UID + return nil } -func autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodTemplateList))(in) - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]api.PodTemplate, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_PodTemplate_To_api_PodTemplate(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } +func Convert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { + return autoConvert_v1_Preconditions_To_api_Preconditions(in, out, s) +} + +func autoConvert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error { + out.UID = in.UID return nil } -func Convert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { - return autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in, out, s) +func Convert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error { + return autoConvert_api_Preconditions_To_v1_Preconditions(in, out, s) } -func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodTemplateSpec))(in) - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { +func autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { return err } - if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return nil +} + +func Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { return err } return nil } -func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - return autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s) +func Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) } func autoConvert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Probe))(in) - } + SetDefaults_Probe(in) if err := Convert_v1_Handler_To_api_Handler(&in.Handler, &out.Handler, s); err != nil { return err } - out.InitialDelaySeconds = int(in.InitialDelaySeconds) - out.TimeoutSeconds = int(in.TimeoutSeconds) - out.PeriodSeconds = int(in.PeriodSeconds) - out.SuccessThreshold = int(in.SuccessThreshold) - out.FailureThreshold = int(in.FailureThreshold) + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold return nil } @@ -5711,27 +5067,57 @@ func Convert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope return autoConvert_v1_Probe_To_api_Probe(in, out, s) } -func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*RBDVolumeSource))(in) +func autoConvert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { + if err := Convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { + return err } - if in.CephMonitors != nil { - out.CephMonitors = make([]string, len(in.CephMonitors)) - for i := range in.CephMonitors { - out.CephMonitors[i] = in.CephMonitors[i] + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { + return autoConvert_api_Probe_To_v1_Probe(in, out, s) +} + +func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = in.CephMonitors + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(api.LocalObjectReference) + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil { + return err } } else { - out.CephMonitors = nil + out.SecretRef = nil } + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { + return autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in, out, s) +} + +func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = in.CephMonitors out.RBDImage = in.RBDImage out.FSType = in.FSType out.RBDPool = in.RBDPool out.RadosUser = in.RadosUser out.Keyring = in.Keyring - // unable to generate simple pointer conversion for v1.LocalObjectReference -> api.LocalObjectReference if in.SecretRef != nil { - out.SecretRef = new(api.LocalObjectReference) - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil { return err } } else { @@ -5741,19 +5127,19 @@ func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, return nil } -func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - return autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in, out, s) +func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { + return autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) } func autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*RangeAllocation))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } out.Range = in.Range - if err := conversion.ByteSliceCopy(&in.Data, &out.Data, s); err != nil { + if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Data, &out.Data, s); err != nil { return err } return nil @@ -5763,9 +5149,28 @@ func Convert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out return autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in, out, s) } +func autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + out.Range = in.Range + if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +func Convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { + return autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in, out, s) +} + func autoConvert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ReplicationController))(in) + SetDefaults_ReplicationController(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err @@ -5783,17 +5188,38 @@ func Convert_v1_ReplicationController_To_api_ReplicationController(in *Replicati return autoConvert_v1_ReplicationController_To_api_ReplicationController(in, out, s) } +func autoConvert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { + return autoConvert_api_ReplicationController_To_v1_ReplicationController(in, out, s) +} + func autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ReplicationControllerList))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]api.ReplicationController, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_ReplicationController_To_api_ReplicationController(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]api.ReplicationController, len(*in)) + for i := range *in { + if err := Convert_v1_ReplicationController_To_api_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -5807,37 +5233,34 @@ func Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *R return autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in, out, s) } -func autoConvert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ReplicationControllerSpec))(in) +func autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - // in.Replicas has no peer in out - if in.Selector != nil { - out.Selector = make(map[string]string) - for key, val := range in.Selector { - out.Selector[key] = val - } - } else { - out.Selector = nil + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err } - // unable to generate simple pointer conversion for v1.PodTemplateSpec -> api.PodTemplateSpec - if in.Template != nil { - out.Template = new(api.PodTemplateSpec) - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil { - return err + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + if err := Convert_api_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.Template = nil + out.Items = nil } return nil } +func Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { + return autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) +} + func autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ReplicationControllerStatus))(in) - } - out.Replicas = int(in.Replicas) - out.FullyLabeledReplicas = int(in.FullyLabeledReplicas) + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ObservedGeneration = in.ObservedGeneration return nil } @@ -5846,9 +5269,46 @@ func Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(i return autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in, out, s) } +func autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +func Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.Divisor, &out.Divisor, s); err != nil { + return err + } + return nil +} + +func Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in, out, s) +} + +func autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.Divisor, &out.Divisor, s); err != nil { + return err + } + return nil +} + +func Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) +} + func autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ResourceQuota))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err @@ -5866,17 +5326,38 @@ func Convert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.R return autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in, out, s) } +func autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { + return autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in, out, s) +} + func autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ResourceQuotaList))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]api.ResourceQuota, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_ResourceQuota_To_api_ResourceQuota(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]api.ResourceQuota, len(*in)) + for i := range *in { + if err := Convert_v1_ResourceQuota_To_api_ResourceQuota(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -5890,17 +5371,40 @@ func Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList return autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in, out, s) } -func autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ResourceQuotaSpec))(in) +func autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + if err := Convert_api_ResourceQuota_To_v1_ResourceQuota(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil } - if err := s.Convert(&in.Hard, &out.Hard, 0); err != nil { + return nil +} + +func Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { + return autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) +} + +func autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Hard, &out.Hard, s); err != nil { return err } if in.Scopes != nil { - out.Scopes = make([]api.ResourceQuotaScope, len(in.Scopes)) - for i := range in.Scopes { - out.Scopes[i] = api.ResourceQuotaScope(in.Scopes[i]) + in, out := &in.Scopes, &out.Scopes + *out = make([]api.ResourceQuotaScope, len(*in)) + for i := range *in { + (*out)[i] = api.ResourceQuotaScope((*in)[i]) } } else { out.Scopes = nil @@ -5912,14 +5416,41 @@ func Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec return autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in, out, s) } -func autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ResourceQuotaStatus))(in) +func autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { + return err + } + (*out)[ResourceName(key)] = *newVal + } + } else { + out.Hard = nil + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + for i := range *in { + (*out)[i] = ResourceQuotaScope((*in)[i]) + } + } else { + out.Scopes = nil } - if err := s.Convert(&in.Hard, &out.Hard, 0); err != nil { + return nil +} + +func Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Hard, &out.Hard, s); err != nil { return err } - if err := s.Convert(&in.Used, &out.Used, 0); err != nil { + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Used, &out.Used, s); err != nil { return err } return nil @@ -5929,27 +5460,89 @@ func Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuota return autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in, out, s) } +func autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { + return err + } + (*out)[ResourceName(key)] = *newVal + } + } else { + out.Hard = nil + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { + return err + } + (*out)[ResourceName(key)] = *newVal + } + } else { + out.Used = nil + } + return nil +} + +func Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) +} + func autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ResourceRequirements))(in) + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Limits, &out.Limits, s); err != nil { + return err } - if err := s.Convert(&in.Limits, &out.Limits, 0); err != nil { + if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Requests, &out.Requests, s); err != nil { return err } - if err := s.Convert(&in.Requests, &out.Requests, 0); err != nil { - return err + return nil +} + +func Convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { + return autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in, out, s) +} + +func autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { + return err + } + (*out)[ResourceName(key)] = *newVal + } + } else { + out.Limits = nil + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList, len(*in)) + for key, val := range *in { + newVal := new(resource.Quantity) + if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { + return err + } + (*out)[ResourceName(key)] = *newVal + } + } else { + out.Requests = nil } return nil } -func Convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - return autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in, out, s) +func Convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { + return autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) } func autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*SELinuxOptions))(in) - } out.User = in.User out.Role = in.Role out.Type = in.Type @@ -5961,25 +5554,27 @@ func Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *ap return autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in, out, s) } +func autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { + return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) +} + func autoConvert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Secret))(in) + SetDefaults_Secret(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if in.Data != nil { - out.Data = make(map[string][]uint8) - for key, val := range in.Data { - newVal := []uint8{} - if err := conversion.ByteSliceCopy(&val, &newVal, s); err != nil { - return err - } - out.Data[key] = newVal - } - } else { - out.Data = nil - } + out.Data = in.Data out.Type = api.SecretType(in.Type) return nil } @@ -5988,10 +5583,23 @@ func Convert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.S return autoConvert_v1_Secret_To_api_Secret(in, out, s) } -func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*SecretKeySelector))(in) +func autoConvert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err } + out.Data = in.Data + out.Type = SecretType(in.Type) + return nil +} + +func Convert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { + return autoConvert_api_Secret_To_v1_Secret(in, out, s) +} + +func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { return err } @@ -6003,17 +5611,30 @@ func Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector return autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in, out, s) } +func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + return nil +} + +func Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { + return autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) +} + func autoConvert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*SecretList))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]api.Secret, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_Secret_To_api_Secret(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Secret, len(*in)) + for i := range *in { + if err := Convert_v1_Secret_To_api_Secret(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -6027,11 +5648,44 @@ func Convert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList return autoConvert_v1_SecretList_To_api_SecretList(in, out, s) } -func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*SecretVolumeSource))(in) +func autoConvert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + if err := Convert_api_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { + return autoConvert_api_SecretList_To_v1_SecretList(in, out, s) +} + +func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { out.SecretName = in.SecretName + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.KeyToPath, len(*in)) + for i := range *in { + if err := Convert_v1_KeyToPath_To_api_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -6039,62 +5693,89 @@ func Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSou return autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in, out, s) } -func autoConvert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*SecurityContext))(in) +func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := Convert_api_KeyToPath_To_v1_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil } - // unable to generate simple pointer conversion for v1.Capabilities -> api.Capabilities + return nil +} + +func Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { + return autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) +} + +func autoConvert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { if in.Capabilities != nil { - out.Capabilities = new(api.Capabilities) - if err := Convert_v1_Capabilities_To_api_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(api.Capabilities) + if err := Convert_v1_Capabilities_To_api_Capabilities(*in, *out, s); err != nil { return err } } else { out.Capabilities = nil } - if in.Privileged != nil { - out.Privileged = new(bool) - *out.Privileged = *in.Privileged - } else { - out.Privileged = nil - } - // unable to generate simple pointer conversion for v1.SELinuxOptions -> api.SELinuxOptions + out.Privileged = in.Privileged if in.SELinuxOptions != nil { - out.SELinuxOptions = new(api.SELinuxOptions) - if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(api.SELinuxOptions) + if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(*in, *out, s); err != nil { return err } } else { out.SELinuxOptions = nil } - if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem + return nil +} + +func Convert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { + return autoConvert_v1_SecurityContext_To_api_SecurityContext(in, out, s) +} + +func autoConvert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(Capabilities) + if err := Convert_api_Capabilities_To_v1_Capabilities(*in, *out, s); err != nil { + return err + } } else { - out.RunAsNonRoot = nil + out.Capabilities = nil } - if in.ReadOnlyRootFilesystem != nil { - out.ReadOnlyRootFilesystem = new(bool) - *out.ReadOnlyRootFilesystem = *in.ReadOnlyRootFilesystem + out.Privileged = in.Privileged + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(*in, *out, s); err != nil { + return err + } } else { - out.ReadOnlyRootFilesystem = nil + out.SELinuxOptions = nil } + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem return nil } -func Convert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - return autoConvert_v1_SecurityContext_To_api_SecurityContext(in, out, s) +func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { + return autoConvert_api_SecurityContext_To_v1_SecurityContext(in, out, s) } func autoConvert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*SerializedReference))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Reference, &out.Reference, s); err != nil { return err @@ -6106,9 +5787,23 @@ func Convert_v1_SerializedReference_To_api_SerializedReference(in *SerializedRef return autoConvert_v1_SerializedReference_To_api_SerializedReference(in, out, s) } +func autoConvert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +func Convert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { + return autoConvert_api_SerializedReference_To_v1_SerializedReference(in, out, s) +} + func autoConvert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Service))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err @@ -6126,17 +5821,38 @@ func Convert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversi return autoConvert_v1_Service_To_api_Service(in, out, s) } +func autoConvert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_api_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { + return autoConvert_api_Service_To_v1_Service(in, out, s) +} + func autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ServiceAccount))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } if in.Secrets != nil { - out.Secrets = make([]api.ObjectReference, len(in.Secrets)) - for i := range in.Secrets { - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Secrets[i], &out.Secrets[i], s); err != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]api.ObjectReference, len(*in)) + for i := range *in { + if err := Convert_v1_ObjectReference_To_api_ObjectReference(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -6144,9 +5860,10 @@ func autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out out.Secrets = nil } if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]api.LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]api.LocalObjectReference, len(*in)) + for i := range *in { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -6160,17 +5877,54 @@ func Convert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *ap return autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in, out, s) } +func autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + for i := range *in { + if err := Convert_api_ObjectReference_To_v1_ObjectReference(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Secrets = nil + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + for i := range *in { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.ImagePullSecrets = nil + } + return nil +} + +func Convert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { + return autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in, out, s) +} + func autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ServiceAccountList))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]api.ServiceAccount, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_ServiceAccount_To_api_ServiceAccount(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]api.ServiceAccount, len(*in)) + for i := range *in { + if err := Convert_v1_ServiceAccount_To_api_ServiceAccount(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -6184,17 +5938,43 @@ func Convert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountL return autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in, out, s) } +func autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + if err := Convert_api_ServiceAccount_To_v1_ServiceAccount(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { + return autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) +} + func autoConvert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ServiceList))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } if in.Items != nil { - out.Items = make([]api.Service, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_Service_To_api_Service(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Service, len(*in)) + for i := range *in { + if err := Convert_v1_Service_To_api_Service(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -6208,17 +5988,39 @@ func Convert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.Service return autoConvert_v1_ServiceList_To_api_ServiceList(in, out, s) } -func autoConvert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ServicePort))(in) +func autoConvert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + if err := Convert_api_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil } + return nil +} + +func Convert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { + return autoConvert_api_ServiceList_To_v1_ServiceList(in, out, s) +} + +func autoConvert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error { out.Name = in.Name out.Protocol = api.Protocol(in.Protocol) - out.Port = int(in.Port) + out.Port = in.Port if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.TargetPort, &out.TargetPort, s); err != nil { return err } - out.NodePort = int(in.NodePort) + out.NodePort = in.NodePort return nil } @@ -6226,60 +6028,91 @@ func Convert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.Service return autoConvert_v1_ServicePort_To_api_ServicePort(in, out, s) } +func autoConvert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = Protocol(in.Protocol) + out.Port = in.Port + if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.TargetPort, &out.TargetPort, s); err != nil { + return err + } + out.NodePort = in.NodePort + return nil +} + +func Convert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { + return autoConvert_api_ServicePort_To_v1_ServicePort(in, out, s) +} + func autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ServiceProxyOptions))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Path = in.Path + return nil +} + +func Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in, out, s) +} + +func autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } out.Path = in.Path return nil } -func Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { - return autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in, out, s) +func Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) } func autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ServiceSpec))(in) - } + SetDefaults_ServiceSpec(in) if in.Ports != nil { - out.Ports = make([]api.ServicePort, len(in.Ports)) - for i := range in.Ports { - if err := Convert_v1_ServicePort_To_api_ServicePort(&in.Ports[i], &out.Ports[i], s); err != nil { + in, out := &in.Ports, &out.Ports + *out = make([]api.ServicePort, len(*in)) + for i := range *in { + if err := Convert_v1_ServicePort_To_api_ServicePort(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { out.Ports = nil } - if in.Selector != nil { - out.Selector = make(map[string]string) - for key, val := range in.Selector { - out.Selector[key] = val - } - } else { - out.Selector = nil - } + out.Selector = in.Selector out.ClusterIP = in.ClusterIP out.Type = api.ServiceType(in.Type) - if in.ExternalIPs != nil { - out.ExternalIPs = make([]string, len(in.ExternalIPs)) - for i := range in.ExternalIPs { - out.ExternalIPs[i] = in.ExternalIPs[i] + out.ExternalIPs = in.ExternalIPs + out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity) + out.LoadBalancerIP = in.LoadBalancerIP + out.LoadBalancerSourceRanges = in.LoadBalancerSourceRanges + return nil +} + +func autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { + out.Type = ServiceType(in.Type) + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + for i := range *in { + if err := Convert_api_ServicePort_To_v1_ServicePort(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.ExternalIPs = nil + out.Ports = nil } - // in.DeprecatedPublicIPs has no peer in out - out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity) + out.Selector = in.Selector + out.ClusterIP = in.ClusterIP + out.ExternalIPs = in.ExternalIPs out.LoadBalancerIP = in.LoadBalancerIP + out.SessionAffinity = ServiceAffinity(in.SessionAffinity) + out.LoadBalancerSourceRanges = in.LoadBalancerSourceRanges return nil } func autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ServiceStatus))(in) - } if err := Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { return err } @@ -6290,10 +6123,18 @@ func Convert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.S return autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in, out, s) } -func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*TCPSocketAction))(in) +func autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { + if err := Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err } + return nil +} + +func Convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { + return autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in, out, s) +} + +func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { return err } @@ -6304,10 +6145,65 @@ func Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out return autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in, out, s) } -func autoConvert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Volume))(in) +func autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { + if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { + return err } + return nil +} + +func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { + return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) +} + +func autoConvert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = api.TaintEffect(in.Effect) + return nil +} + +func Convert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error { + return autoConvert_v1_Taint_To_api_Taint(in, out, s) +} + +func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = TaintEffect(in.Effect) + return nil +} + +func Convert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error { + return autoConvert_api_Taint_To_v1_Taint(in, out, s) +} + +func autoConvert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = api.TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = api.TaintEffect(in.Effect) + return nil +} + +func Convert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error { + return autoConvert_v1_Toleration_To_api_Toleration(in, out, s) +} + +func autoConvert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = TaintEffect(in.Effect) + return nil +} + +func Convert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error { + return autoConvert_api_Toleration_To_v1_Toleration(in, out, s) +} + +func autoConvert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { + SetDefaults_Volume(in) out.Name = in.Name if err := Convert_v1_VolumeSource_To_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { return err @@ -6319,13 +6215,23 @@ func Convert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.S return autoConvert_v1_Volume_To_api_Volume(in, out, s) } -func autoConvert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*VolumeMount))(in) +func autoConvert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err } + return nil +} + +func Convert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { + return autoConvert_api_Volume_To_v1_Volume(in, out, s) +} + +func autoConvert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error { out.Name = in.Name out.ReadOnly = in.ReadOnly out.MountPath = in.MountPath + out.SubPath = in.SubPath return nil } @@ -6333,181 +6239,199 @@ func Convert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeM return autoConvert_v1_VolumeMount_To_api_VolumeMount(in, out, s) } +func autoConvert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + return nil +} + +func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { + return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s) +} + func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*VolumeSource))(in) - } - // unable to generate simple pointer conversion for v1.HostPathVolumeSource -> api.HostPathVolumeSource if in.HostPath != nil { - out.HostPath = new(api.HostPathVolumeSource) - if err := Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in.HostPath, out.HostPath, s); err != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(api.HostPathVolumeSource) + if err := Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(*in, *out, s); err != nil { return err } } else { out.HostPath = nil } - // unable to generate simple pointer conversion for v1.EmptyDirVolumeSource -> api.EmptyDirVolumeSource if in.EmptyDir != nil { - out.EmptyDir = new(api.EmptyDirVolumeSource) - if err := Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in.EmptyDir, out.EmptyDir, s); err != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(api.EmptyDirVolumeSource) + if err := Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(*in, *out, s); err != nil { return err } } else { out.EmptyDir = nil } - // unable to generate simple pointer conversion for v1.GCEPersistentDiskVolumeSource -> api.GCEPersistentDiskVolumeSource if in.GCEPersistentDisk != nil { - out.GCEPersistentDisk = new(api.GCEPersistentDiskVolumeSource) - if err := Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in.GCEPersistentDisk, out.GCEPersistentDisk, s); err != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(api.GCEPersistentDiskVolumeSource) + if err := Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(*in, *out, s); err != nil { return err } } else { out.GCEPersistentDisk = nil } - // unable to generate simple pointer conversion for v1.AWSElasticBlockStoreVolumeSource -> api.AWSElasticBlockStoreVolumeSource if in.AWSElasticBlockStore != nil { - out.AWSElasticBlockStore = new(api.AWSElasticBlockStoreVolumeSource) - if err := Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore, out.AWSElasticBlockStore, s); err != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(api.AWSElasticBlockStoreVolumeSource) + if err := Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(*in, *out, s); err != nil { return err } } else { out.AWSElasticBlockStore = nil } - // unable to generate simple pointer conversion for v1.GitRepoVolumeSource -> api.GitRepoVolumeSource if in.GitRepo != nil { - out.GitRepo = new(api.GitRepoVolumeSource) - if err := Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in.GitRepo, out.GitRepo, s); err != nil { + in, out := &in.GitRepo, &out.GitRepo + *out = new(api.GitRepoVolumeSource) + if err := Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(*in, *out, s); err != nil { return err } } else { out.GitRepo = nil } - // unable to generate simple pointer conversion for v1.SecretVolumeSource -> api.SecretVolumeSource if in.Secret != nil { - out.Secret = new(api.SecretVolumeSource) - if err := Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in.Secret, out.Secret, s); err != nil { + in, out := &in.Secret, &out.Secret + *out = new(api.SecretVolumeSource) + if err := Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(*in, *out, s); err != nil { return err } } else { out.Secret = nil } - // unable to generate simple pointer conversion for v1.NFSVolumeSource -> api.NFSVolumeSource if in.NFS != nil { - out.NFS = new(api.NFSVolumeSource) - if err := Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in.NFS, out.NFS, s); err != nil { + in, out := &in.NFS, &out.NFS + *out = new(api.NFSVolumeSource) + if err := Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(*in, *out, s); err != nil { return err } } else { out.NFS = nil } - // unable to generate simple pointer conversion for v1.ISCSIVolumeSource -> api.ISCSIVolumeSource if in.ISCSI != nil { - out.ISCSI = new(api.ISCSIVolumeSource) - if err := Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in.ISCSI, out.ISCSI, s); err != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(api.ISCSIVolumeSource) + if err := Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(*in, *out, s); err != nil { return err } } else { out.ISCSI = nil } - // unable to generate simple pointer conversion for v1.GlusterfsVolumeSource -> api.GlusterfsVolumeSource if in.Glusterfs != nil { - out.Glusterfs = new(api.GlusterfsVolumeSource) - if err := Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in.Glusterfs, out.Glusterfs, s); err != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(api.GlusterfsVolumeSource) + if err := Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(*in, *out, s); err != nil { return err } } else { out.Glusterfs = nil } - // unable to generate simple pointer conversion for v1.PersistentVolumeClaimVolumeSource -> api.PersistentVolumeClaimVolumeSource if in.PersistentVolumeClaim != nil { - out.PersistentVolumeClaim = new(api.PersistentVolumeClaimVolumeSource) - if err := Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in.PersistentVolumeClaim, out.PersistentVolumeClaim, s); err != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(api.PersistentVolumeClaimVolumeSource) + if err := Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(*in, *out, s); err != nil { return err } } else { out.PersistentVolumeClaim = nil } - // unable to generate simple pointer conversion for v1.RBDVolumeSource -> api.RBDVolumeSource if in.RBD != nil { - out.RBD = new(api.RBDVolumeSource) - if err := Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in.RBD, out.RBD, s); err != nil { + in, out := &in.RBD, &out.RBD + *out = new(api.RBDVolumeSource) + if err := Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(*in, *out, s); err != nil { return err } } else { out.RBD = nil } - // unable to generate simple pointer conversion for v1.FlexVolumeSource -> api.FlexVolumeSource if in.FlexVolume != nil { - out.FlexVolume = new(api.FlexVolumeSource) - if err := Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in.FlexVolume, out.FlexVolume, s); err != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(api.FlexVolumeSource) + if err := Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(*in, *out, s); err != nil { return err } } else { out.FlexVolume = nil } - // unable to generate simple pointer conversion for v1.CinderVolumeSource -> api.CinderVolumeSource if in.Cinder != nil { - out.Cinder = new(api.CinderVolumeSource) - if err := Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(api.CinderVolumeSource) + if err := Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(*in, *out, s); err != nil { return err } } else { out.Cinder = nil } - // unable to generate simple pointer conversion for v1.CephFSVolumeSource -> api.CephFSVolumeSource if in.CephFS != nil { - out.CephFS = new(api.CephFSVolumeSource) - if err := Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(api.CephFSVolumeSource) + if err := Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(*in, *out, s); err != nil { return err } } else { out.CephFS = nil } - // unable to generate simple pointer conversion for v1.FlockerVolumeSource -> api.FlockerVolumeSource if in.Flocker != nil { - out.Flocker = new(api.FlockerVolumeSource) - if err := Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in.Flocker, out.Flocker, s); err != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(api.FlockerVolumeSource) + if err := Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(*in, *out, s); err != nil { return err } } else { out.Flocker = nil } - // unable to generate simple pointer conversion for v1.DownwardAPIVolumeSource -> api.DownwardAPIVolumeSource if in.DownwardAPI != nil { - out.DownwardAPI = new(api.DownwardAPIVolumeSource) - if err := Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in.DownwardAPI, out.DownwardAPI, s); err != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(api.DownwardAPIVolumeSource) + if err := Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(*in, *out, s); err != nil { return err } } else { out.DownwardAPI = nil } - // unable to generate simple pointer conversion for v1.FCVolumeSource -> api.FCVolumeSource if in.FC != nil { - out.FC = new(api.FCVolumeSource) - if err := Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in.FC, out.FC, s); err != nil { + in, out := &in.FC, &out.FC + *out = new(api.FCVolumeSource) + if err := Convert_v1_FCVolumeSource_To_api_FCVolumeSource(*in, *out, s); err != nil { return err } } else { out.FC = nil } - // unable to generate simple pointer conversion for v1.AzureFileVolumeSource -> api.AzureFileVolumeSource if in.AzureFile != nil { - out.AzureFile = new(api.AzureFileVolumeSource) - if err := Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in.AzureFile, out.AzureFile, s); err != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(api.AzureFileVolumeSource) + if err := Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(*in, *out, s); err != nil { return err } } else { out.AzureFile = nil } - // unable to generate simple pointer conversion for v1.ConfigMapVolumeSource -> api.ConfigMapVolumeSource if in.ConfigMap != nil { - out.ConfigMap = new(api.ConfigMapVolumeSource) - if err := Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in.ConfigMap, out.ConfigMap, s); err != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(api.ConfigMapVolumeSource) + if err := Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(*in, *out, s); err != nil { return err } } else { out.ConfigMap = nil } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(api.VsphereVirtualDiskVolumeSource) + if err := Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.VsphereVolume = nil + } return nil } @@ -6515,273 +6439,234 @@ func Convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.Volu return autoConvert_v1_VolumeSource_To_api_VolumeSource(in, out, s) } -func init() { - err := api.Scheme.AddGeneratedConversionFuncs( - autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, - autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, - autoConvert_api_Binding_To_v1_Binding, - autoConvert_api_Capabilities_To_v1_Capabilities, - autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, - autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource, - autoConvert_api_ComponentCondition_To_v1_ComponentCondition, - autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList, - autoConvert_api_ComponentStatus_To_v1_ComponentStatus, - autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, - autoConvert_api_ConfigMapList_To_v1_ConfigMapList, - autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, - autoConvert_api_ConfigMap_To_v1_ConfigMap, - autoConvert_api_ContainerImage_To_v1_ContainerImage, - autoConvert_api_ContainerPort_To_v1_ContainerPort, - autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning, - autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated, - autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting, - autoConvert_api_ContainerState_To_v1_ContainerState, - autoConvert_api_ContainerStatus_To_v1_ContainerStatus, - autoConvert_api_Container_To_v1_Container, - autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint, - autoConvert_api_DeleteOptions_To_v1_DeleteOptions, - autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, - autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, - autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, - autoConvert_api_EndpointAddress_To_v1_EndpointAddress, - autoConvert_api_EndpointPort_To_v1_EndpointPort, - autoConvert_api_EndpointSubset_To_v1_EndpointSubset, - autoConvert_api_EndpointsList_To_v1_EndpointsList, - autoConvert_api_Endpoints_To_v1_Endpoints, - autoConvert_api_EnvVarSource_To_v1_EnvVarSource, - autoConvert_api_EnvVar_To_v1_EnvVar, - autoConvert_api_EventList_To_v1_EventList, - autoConvert_api_EventSource_To_v1_EventSource, - autoConvert_api_Event_To_v1_Event, - autoConvert_api_ExecAction_To_v1_ExecAction, - autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource, - autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource, - autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource, - autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, - autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, - autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, - autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction, - autoConvert_api_HTTPHeader_To_v1_HTTPHeader, - autoConvert_api_Handler_To_v1_Handler, - autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource, - autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, - autoConvert_api_KeyToPath_To_v1_KeyToPath, - autoConvert_api_Lifecycle_To_v1_Lifecycle, - autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem, - autoConvert_api_LimitRangeList_To_v1_LimitRangeList, - autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec, - autoConvert_api_LimitRange_To_v1_LimitRange, - autoConvert_api_ListOptions_To_v1_ListOptions, - autoConvert_api_List_To_v1_List, - autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress, - autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus, - autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference, - autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource, - autoConvert_api_NamespaceList_To_v1_NamespaceList, - autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec, - autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus, - autoConvert_api_Namespace_To_v1_Namespace, - autoConvert_api_NodeAddress_To_v1_NodeAddress, - autoConvert_api_NodeCondition_To_v1_NodeCondition, - autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, - autoConvert_api_NodeList_To_v1_NodeList, - autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions, - autoConvert_api_NodeSpec_To_v1_NodeSpec, - autoConvert_api_NodeStatus_To_v1_NodeStatus, - autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo, - autoConvert_api_Node_To_v1_Node, - autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, - autoConvert_api_ObjectMeta_To_v1_ObjectMeta, - autoConvert_api_ObjectReference_To_v1_ObjectReference, - autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, - autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, - autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, - autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, - autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, - autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList, - autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource, - autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, - autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, - autoConvert_api_PersistentVolume_To_v1_PersistentVolume, - autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions, - autoConvert_api_PodCondition_To_v1_PodCondition, - autoConvert_api_PodExecOptions_To_v1_PodExecOptions, - autoConvert_api_PodList_To_v1_PodList, - autoConvert_api_PodLogOptions_To_v1_PodLogOptions, - autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions, - autoConvert_api_PodSpec_To_v1_PodSpec, - autoConvert_api_PodStatusResult_To_v1_PodStatusResult, - autoConvert_api_PodStatus_To_v1_PodStatus, - autoConvert_api_PodTemplateList_To_v1_PodTemplateList, - autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec, - autoConvert_api_PodTemplate_To_v1_PodTemplate, - autoConvert_api_Pod_To_v1_Pod, - autoConvert_api_Probe_To_v1_Probe, - autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource, - autoConvert_api_RangeAllocation_To_v1_RangeAllocation, - autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList, - autoConvert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, - autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, - autoConvert_api_ReplicationController_To_v1_ReplicationController, - autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList, - autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, - autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, - autoConvert_api_ResourceQuota_To_v1_ResourceQuota, - autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements, - autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions, - autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector, - autoConvert_api_SecretList_To_v1_SecretList, - autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource, - autoConvert_api_Secret_To_v1_Secret, - autoConvert_api_SecurityContext_To_v1_SecurityContext, - autoConvert_api_SerializedReference_To_v1_SerializedReference, - autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList, - autoConvert_api_ServiceAccount_To_v1_ServiceAccount, - autoConvert_api_ServiceList_To_v1_ServiceList, - autoConvert_api_ServicePort_To_v1_ServicePort, - autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions, - autoConvert_api_ServiceSpec_To_v1_ServiceSpec, - autoConvert_api_ServiceStatus_To_v1_ServiceStatus, - autoConvert_api_Service_To_v1_Service, - autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction, - autoConvert_api_VolumeMount_To_v1_VolumeMount, - autoConvert_api_VolumeSource_To_v1_VolumeSource, - autoConvert_api_Volume_To_v1_Volume, - autoConvert_unversioned_ExportOptions_To_v1_ExportOptions, - autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, - autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource, - autoConvert_v1_Binding_To_api_Binding, - autoConvert_v1_Capabilities_To_api_Capabilities, - autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, - autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource, - autoConvert_v1_ComponentCondition_To_api_ComponentCondition, - autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList, - autoConvert_v1_ComponentStatus_To_api_ComponentStatus, - autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector, - autoConvert_v1_ConfigMapList_To_api_ConfigMapList, - autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource, - autoConvert_v1_ConfigMap_To_api_ConfigMap, - autoConvert_v1_ContainerImage_To_api_ContainerImage, - autoConvert_v1_ContainerPort_To_api_ContainerPort, - autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning, - autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated, - autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting, - autoConvert_v1_ContainerState_To_api_ContainerState, - autoConvert_v1_ContainerStatus_To_api_ContainerStatus, - autoConvert_v1_Container_To_api_Container, - autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint, - autoConvert_v1_DeleteOptions_To_api_DeleteOptions, - autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, - autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, - autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, - autoConvert_v1_EndpointAddress_To_api_EndpointAddress, - autoConvert_v1_EndpointPort_To_api_EndpointPort, - autoConvert_v1_EndpointSubset_To_api_EndpointSubset, - autoConvert_v1_EndpointsList_To_api_EndpointsList, - autoConvert_v1_Endpoints_To_api_Endpoints, - autoConvert_v1_EnvVarSource_To_api_EnvVarSource, - autoConvert_v1_EnvVar_To_api_EnvVar, - autoConvert_v1_EventList_To_api_EventList, - autoConvert_v1_EventSource_To_api_EventSource, - autoConvert_v1_Event_To_api_Event, - autoConvert_v1_ExecAction_To_api_ExecAction, - autoConvert_v1_ExportOptions_To_unversioned_ExportOptions, - autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource, - autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource, - autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource, - autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource, - autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource, - autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, - autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction, - autoConvert_v1_HTTPHeader_To_api_HTTPHeader, - autoConvert_v1_Handler_To_api_Handler, - autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, - autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, - autoConvert_v1_KeyToPath_To_api_KeyToPath, - autoConvert_v1_Lifecycle_To_api_Lifecycle, - autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem, - autoConvert_v1_LimitRangeList_To_api_LimitRangeList, - autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec, - autoConvert_v1_LimitRange_To_api_LimitRange, - autoConvert_v1_ListOptions_To_api_ListOptions, - autoConvert_v1_List_To_api_List, - autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress, - autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus, - autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference, - autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource, - autoConvert_v1_NamespaceList_To_api_NamespaceList, - autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec, - autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus, - autoConvert_v1_Namespace_To_api_Namespace, - autoConvert_v1_NodeAddress_To_api_NodeAddress, - autoConvert_v1_NodeCondition_To_api_NodeCondition, - autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints, - autoConvert_v1_NodeList_To_api_NodeList, - autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions, - autoConvert_v1_NodeSpec_To_api_NodeSpec, - autoConvert_v1_NodeStatus_To_api_NodeStatus, - autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo, - autoConvert_v1_Node_To_api_Node, - autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, - autoConvert_v1_ObjectMeta_To_api_ObjectMeta, - autoConvert_v1_ObjectReference_To_api_ObjectReference, - autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList, - autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec, - autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus, - autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource, - autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim, - autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList, - autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource, - autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec, - autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus, - autoConvert_v1_PersistentVolume_To_api_PersistentVolume, - autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions, - autoConvert_v1_PodCondition_To_api_PodCondition, - autoConvert_v1_PodExecOptions_To_api_PodExecOptions, - autoConvert_v1_PodList_To_api_PodList, - autoConvert_v1_PodLogOptions_To_api_PodLogOptions, - autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions, - autoConvert_v1_PodSpec_To_api_PodSpec, - autoConvert_v1_PodStatusResult_To_api_PodStatusResult, - autoConvert_v1_PodStatus_To_api_PodStatus, - autoConvert_v1_PodTemplateList_To_api_PodTemplateList, - autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec, - autoConvert_v1_PodTemplate_To_api_PodTemplate, - autoConvert_v1_Pod_To_api_Pod, - autoConvert_v1_Probe_To_api_Probe, - autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource, - autoConvert_v1_RangeAllocation_To_api_RangeAllocation, - autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList, - autoConvert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, - autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus, - autoConvert_v1_ReplicationController_To_api_ReplicationController, - autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList, - autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec, - autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus, - autoConvert_v1_ResourceQuota_To_api_ResourceQuota, - autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements, - autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions, - autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector, - autoConvert_v1_SecretList_To_api_SecretList, - autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource, - autoConvert_v1_Secret_To_api_Secret, - autoConvert_v1_SecurityContext_To_api_SecurityContext, - autoConvert_v1_SerializedReference_To_api_SerializedReference, - autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList, - autoConvert_v1_ServiceAccount_To_api_ServiceAccount, - autoConvert_v1_ServiceList_To_api_ServiceList, - autoConvert_v1_ServicePort_To_api_ServicePort, - autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions, - autoConvert_v1_ServiceSpec_To_api_ServiceSpec, - autoConvert_v1_ServiceStatus_To_api_ServiceStatus, - autoConvert_v1_Service_To_api_Service, - autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction, - autoConvert_v1_VolumeMount_To_api_VolumeMount, - autoConvert_v1_VolumeSource_To_api_VolumeSource, - autoConvert_v1_Volume_To_api_Volume, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) +func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + if err := Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.HostPath = nil + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirVolumeSource) + if err := Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.EmptyDir = nil + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + if err := Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.GCEPersistentDisk = nil + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + if err := Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.AWSElasticBlockStore = nil + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + *out = new(GitRepoVolumeSource) + if err := Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.GitRepo = nil + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretVolumeSource) + if err := Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.Secret = nil + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + if err := Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.NFS = nil + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.ISCSI = nil + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + if err := Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.Glusterfs = nil + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + if err := Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.PersistentVolumeClaim = nil + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.RBD = nil + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.FlexVolume = nil + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + if err := Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.Cinder = nil + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.CephFS = nil + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + if err := Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.Flocker = nil + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + if err := Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.DownwardAPI = nil + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := Convert_api_FCVolumeSource_To_v1_FCVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.FC = nil + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + if err := Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.AzureFile = nil + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + if err := Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.ConfigMap = nil + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + if err := Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.VsphereVolume = nil + } + return nil +} + +func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { + return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s) +} + +func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + return nil +} + +func Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + return nil +} + +func Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = int(in.Weight) + if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +func Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in, out, s) +} + +func autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = int32(in.Weight) + if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err } + return nil +} + +func Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/conversion_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/conversion_test.go new file mode 100644 index 000000000000..205777cd28e3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/conversion_test.go @@ -0,0 +1,217 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1_test + +import ( + "net/url" + "reflect" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" + versioned "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/diff" +) + +func TestPodLogOptions(t *testing.T) { + sinceSeconds := int64(1) + sinceTime := unversioned.NewTime(time.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC).Local()) + tailLines := int64(2) + limitBytes := int64(3) + + versionedLogOptions := &versioned.PodLogOptions{ + Container: "mycontainer", + Follow: true, + Previous: true, + SinceSeconds: &sinceSeconds, + SinceTime: &sinceTime, + Timestamps: true, + TailLines: &tailLines, + LimitBytes: &limitBytes, + } + unversionedLogOptions := &api.PodLogOptions{ + Container: "mycontainer", + Follow: true, + Previous: true, + SinceSeconds: &sinceSeconds, + SinceTime: &sinceTime, + Timestamps: true, + TailLines: &tailLines, + LimitBytes: &limitBytes, + } + expectedParameters := url.Values{ + "container": {"mycontainer"}, + "follow": {"true"}, + "previous": {"true"}, + "sinceSeconds": {"1"}, + "sinceTime": {"2000-01-01T12:34:56Z"}, + "timestamps": {"true"}, + "tailLines": {"2"}, + "limitBytes": {"3"}, + } + + codec := runtime.NewParameterCodec(api.Scheme) + + // unversioned -> query params + { + actualParameters, err := codec.EncodeParameters(unversionedLogOptions, versioned.SchemeGroupVersion) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(actualParameters, expectedParameters) { + t.Fatalf("Expected\n%#v\ngot\n%#v", expectedParameters, actualParameters) + } + } + + // versioned -> query params + { + actualParameters, err := codec.EncodeParameters(versionedLogOptions, versioned.SchemeGroupVersion) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(actualParameters, expectedParameters) { + t.Fatalf("Expected\n%#v\ngot\n%#v", expectedParameters, actualParameters) + } + } + + // query params -> versioned + { + convertedLogOptions := &versioned.PodLogOptions{} + err := codec.DecodeParameters(expectedParameters, versioned.SchemeGroupVersion, convertedLogOptions) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(convertedLogOptions, versionedLogOptions) { + t.Fatalf("Unexpected deserialization:\n%s", diff.ObjectGoPrintSideBySide(versionedLogOptions, convertedLogOptions)) + } + } + + // query params -> unversioned + { + convertedLogOptions := &api.PodLogOptions{} + err := codec.DecodeParameters(expectedParameters, versioned.SchemeGroupVersion, convertedLogOptions) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(convertedLogOptions, unversionedLogOptions) { + t.Fatalf("Unexpected deserialization:\n%s", diff.ObjectGoPrintSideBySide(unversionedLogOptions, convertedLogOptions)) + } + } +} + +// TestPodSpecConversion tests that ServiceAccount is an alias for +// ServiceAccountName. +func TestPodSpecConversion(t *testing.T) { + name, other := "foo", "bar" + + // Test internal -> v1. Should have both alias (DeprecatedServiceAccount) + // and new field (ServiceAccountName). + i := &api.PodSpec{ + ServiceAccountName: name, + } + v := versioned.PodSpec{} + if err := api.Scheme.Convert(i, &v); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if v.ServiceAccountName != name { + t.Fatalf("want v1.ServiceAccountName %q, got %q", name, v.ServiceAccountName) + } + if v.DeprecatedServiceAccount != name { + t.Fatalf("want v1.DeprecatedServiceAccount %q, got %q", name, v.DeprecatedServiceAccount) + } + + // Test v1 -> internal. Either DeprecatedServiceAccount, ServiceAccountName, + // or both should translate to ServiceAccountName. ServiceAccountName wins + // if both are set. + testCases := []*versioned.PodSpec{ + // New + {ServiceAccountName: name}, + // Alias + {DeprecatedServiceAccount: name}, + // Both: same + {ServiceAccountName: name, DeprecatedServiceAccount: name}, + // Both: different + {ServiceAccountName: name, DeprecatedServiceAccount: other}, + } + for k, v := range testCases { + got := api.PodSpec{} + err := api.Scheme.Convert(v, &got) + if err != nil { + t.Fatalf("unexpected error for case %d: %v", k, err) + } + if got.ServiceAccountName != name { + t.Fatalf("want api.ServiceAccountName %q, got %q", name, got.ServiceAccountName) + } + } +} + +func TestResourceListConversion(t *testing.T) { + bigMilliQuantity := resource.NewQuantity(resource.MaxMilliValue, resource.DecimalSI) + bigMilliQuantity.Add(resource.MustParse("12345m")) + + tests := []struct { + input versioned.ResourceList + expected api.ResourceList + }{ + { // No changes necessary. + input: versioned.ResourceList{ + versioned.ResourceMemory: resource.MustParse("30M"), + versioned.ResourceCPU: resource.MustParse("100m"), + versioned.ResourceStorage: resource.MustParse("1G"), + }, + expected: api.ResourceList{ + api.ResourceMemory: resource.MustParse("30M"), + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceStorage: resource.MustParse("1G"), + }, + }, + { // Nano-scale values should be rounded up to milli-scale. + input: versioned.ResourceList{ + versioned.ResourceCPU: resource.MustParse("3.000023m"), + versioned.ResourceMemory: resource.MustParse("500.000050m"), + }, + expected: api.ResourceList{ + api.ResourceCPU: resource.MustParse("4m"), + api.ResourceMemory: resource.MustParse("501m"), + }, + }, + { // Large values should still be accurate. + input: versioned.ResourceList{ + versioned.ResourceCPU: *bigMilliQuantity.Copy(), + versioned.ResourceStorage: *bigMilliQuantity.Copy(), + }, + expected: api.ResourceList{ + api.ResourceCPU: *bigMilliQuantity.Copy(), + api.ResourceStorage: *bigMilliQuantity.Copy(), + }, + }, + } + + for i, test := range tests { + output := api.ResourceList{} + err := api.Scheme.Convert(&test.input, &output) + if err != nil { + t.Fatalf("unexpected error for case %d: %v", i, err) + } + if !api.Semantic.DeepEqual(test.expected, output) { + t.Errorf("unexpected conversion for case %d: Expected %+v; Got %+v", i, test.expected, output) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/deep_copy_generated.go index 1252bf4e6e57..6f2b1e15331c 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/deep_copy_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,98 +16,250 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package v1 import ( - time "time" - api "k8s.io/kubernetes/pkg/api" resource "k8s.io/kubernetes/pkg/api/resource" unversioned "k8s.io/kubernetes/pkg/api/unversioned" conversion "k8s.io/kubernetes/pkg/conversion" runtime "k8s.io/kubernetes/pkg/runtime" + types "k8s.io/kubernetes/pkg/types" intstr "k8s.io/kubernetes/pkg/util/intstr" - inf "speter.net/go/exp/math/dec/inf" ) -func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c *conversion.Cloner) error { - if in.Amount != nil { - if newVal, err := c.DeepCopy(in.Amount); err != nil { - return err - } else { - out.Amount = newVal.(*inf.Dec) - } - } else { - out.Amount = nil +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1_AWSElasticBlockStoreVolumeSource, + DeepCopy_v1_Affinity, + DeepCopy_v1_AzureFileVolumeSource, + DeepCopy_v1_Binding, + DeepCopy_v1_Capabilities, + DeepCopy_v1_CephFSVolumeSource, + DeepCopy_v1_CinderVolumeSource, + DeepCopy_v1_ComponentCondition, + DeepCopy_v1_ComponentStatus, + DeepCopy_v1_ComponentStatusList, + DeepCopy_v1_ConfigMap, + DeepCopy_v1_ConfigMapKeySelector, + DeepCopy_v1_ConfigMapList, + DeepCopy_v1_ConfigMapVolumeSource, + DeepCopy_v1_Container, + DeepCopy_v1_ContainerImage, + DeepCopy_v1_ContainerPort, + DeepCopy_v1_ContainerState, + DeepCopy_v1_ContainerStateRunning, + DeepCopy_v1_ContainerStateTerminated, + DeepCopy_v1_ContainerStateWaiting, + DeepCopy_v1_ContainerStatus, + DeepCopy_v1_DaemonEndpoint, + DeepCopy_v1_DeleteOptions, + DeepCopy_v1_DownwardAPIVolumeFile, + DeepCopy_v1_DownwardAPIVolumeSource, + DeepCopy_v1_EmptyDirVolumeSource, + DeepCopy_v1_EndpointAddress, + DeepCopy_v1_EndpointPort, + DeepCopy_v1_EndpointSubset, + DeepCopy_v1_Endpoints, + DeepCopy_v1_EndpointsList, + DeepCopy_v1_EnvVar, + DeepCopy_v1_EnvVarSource, + DeepCopy_v1_Event, + DeepCopy_v1_EventList, + DeepCopy_v1_EventSource, + DeepCopy_v1_ExecAction, + DeepCopy_v1_ExportOptions, + DeepCopy_v1_FCVolumeSource, + DeepCopy_v1_FlexVolumeSource, + DeepCopy_v1_FlockerVolumeSource, + DeepCopy_v1_GCEPersistentDiskVolumeSource, + DeepCopy_v1_GitRepoVolumeSource, + DeepCopy_v1_GlusterfsVolumeSource, + DeepCopy_v1_HTTPGetAction, + DeepCopy_v1_HTTPHeader, + DeepCopy_v1_Handler, + DeepCopy_v1_HostPathVolumeSource, + DeepCopy_v1_ISCSIVolumeSource, + DeepCopy_v1_KeyToPath, + DeepCopy_v1_Lifecycle, + DeepCopy_v1_LimitRange, + DeepCopy_v1_LimitRangeItem, + DeepCopy_v1_LimitRangeList, + DeepCopy_v1_LimitRangeSpec, + DeepCopy_v1_List, + DeepCopy_v1_ListOptions, + DeepCopy_v1_LoadBalancerIngress, + DeepCopy_v1_LoadBalancerStatus, + DeepCopy_v1_LocalObjectReference, + DeepCopy_v1_NFSVolumeSource, + DeepCopy_v1_Namespace, + DeepCopy_v1_NamespaceList, + DeepCopy_v1_NamespaceSpec, + DeepCopy_v1_NamespaceStatus, + DeepCopy_v1_Node, + DeepCopy_v1_NodeAddress, + DeepCopy_v1_NodeAffinity, + DeepCopy_v1_NodeCondition, + DeepCopy_v1_NodeDaemonEndpoints, + DeepCopy_v1_NodeList, + DeepCopy_v1_NodeProxyOptions, + DeepCopy_v1_NodeSelector, + DeepCopy_v1_NodeSelectorRequirement, + DeepCopy_v1_NodeSelectorTerm, + DeepCopy_v1_NodeSpec, + DeepCopy_v1_NodeStatus, + DeepCopy_v1_NodeSystemInfo, + DeepCopy_v1_ObjectFieldSelector, + DeepCopy_v1_ObjectMeta, + DeepCopy_v1_ObjectReference, + DeepCopy_v1_OwnerReference, + DeepCopy_v1_PersistentVolume, + DeepCopy_v1_PersistentVolumeClaim, + DeepCopy_v1_PersistentVolumeClaimList, + DeepCopy_v1_PersistentVolumeClaimSpec, + DeepCopy_v1_PersistentVolumeClaimStatus, + DeepCopy_v1_PersistentVolumeClaimVolumeSource, + DeepCopy_v1_PersistentVolumeList, + DeepCopy_v1_PersistentVolumeSource, + DeepCopy_v1_PersistentVolumeSpec, + DeepCopy_v1_PersistentVolumeStatus, + DeepCopy_v1_Pod, + DeepCopy_v1_PodAffinity, + DeepCopy_v1_PodAffinityTerm, + DeepCopy_v1_PodAntiAffinity, + DeepCopy_v1_PodAttachOptions, + DeepCopy_v1_PodCondition, + DeepCopy_v1_PodExecOptions, + DeepCopy_v1_PodList, + DeepCopy_v1_PodLogOptions, + DeepCopy_v1_PodProxyOptions, + DeepCopy_v1_PodSecurityContext, + DeepCopy_v1_PodSpec, + DeepCopy_v1_PodStatus, + DeepCopy_v1_PodStatusResult, + DeepCopy_v1_PodTemplate, + DeepCopy_v1_PodTemplateList, + DeepCopy_v1_PodTemplateSpec, + DeepCopy_v1_Preconditions, + DeepCopy_v1_PreferredSchedulingTerm, + DeepCopy_v1_Probe, + DeepCopy_v1_RBDVolumeSource, + DeepCopy_v1_RangeAllocation, + DeepCopy_v1_ReplicationController, + DeepCopy_v1_ReplicationControllerList, + DeepCopy_v1_ReplicationControllerSpec, + DeepCopy_v1_ReplicationControllerStatus, + DeepCopy_v1_ResourceFieldSelector, + DeepCopy_v1_ResourceQuota, + DeepCopy_v1_ResourceQuotaList, + DeepCopy_v1_ResourceQuotaSpec, + DeepCopy_v1_ResourceQuotaStatus, + DeepCopy_v1_ResourceRequirements, + DeepCopy_v1_SELinuxOptions, + DeepCopy_v1_Secret, + DeepCopy_v1_SecretKeySelector, + DeepCopy_v1_SecretList, + DeepCopy_v1_SecretVolumeSource, + DeepCopy_v1_SecurityContext, + DeepCopy_v1_SerializedReference, + DeepCopy_v1_Service, + DeepCopy_v1_ServiceAccount, + DeepCopy_v1_ServiceAccountList, + DeepCopy_v1_ServiceList, + DeepCopy_v1_ServicePort, + DeepCopy_v1_ServiceProxyOptions, + DeepCopy_v1_ServiceSpec, + DeepCopy_v1_ServiceStatus, + DeepCopy_v1_TCPSocketAction, + DeepCopy_v1_Taint, + DeepCopy_v1_Toleration, + DeepCopy_v1_Volume, + DeepCopy_v1_VolumeMount, + DeepCopy_v1_VolumeSource, + DeepCopy_v1_VsphereVirtualDiskVolumeSource, + DeepCopy_v1_WeightedPodAffinityTerm, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) } - out.Format = in.Format - return nil } -func deepCopy_unversioned_ListMeta(in unversioned.ListMeta, out *unversioned.ListMeta, c *conversion.Cloner) error { - out.SelfLink = in.SelfLink - out.ResourceVersion = in.ResourceVersion +func DeepCopy_v1_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly return nil } -func deepCopy_unversioned_Time(in unversioned.Time, out *unversioned.Time, c *conversion.Cloner) error { - if newVal, err := c.DeepCopy(in.Time); err != nil { - return err +func DeepCopy_v1_Affinity(in Affinity, out *Affinity, c *conversion.Cloner) error { + if in.NodeAffinity != nil { + in, out := in.NodeAffinity, &out.NodeAffinity + *out = new(NodeAffinity) + if err := DeepCopy_v1_NodeAffinity(*in, *out, c); err != nil { + return err + } } else { - out.Time = newVal.(time.Time) + out.NodeAffinity = nil + } + if in.PodAffinity != nil { + in, out := in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + if err := DeepCopy_v1_PodAffinity(*in, *out, c); err != nil { + return err + } + } else { + out.PodAffinity = nil + } + if in.PodAntiAffinity != nil { + in, out := in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + if err := DeepCopy_v1_PodAntiAffinity(*in, *out, c); err != nil { + return err + } + } else { + out.PodAntiAffinity = nil } return nil } -func deepCopy_unversioned_TypeMeta(in unversioned.TypeMeta, out *unversioned.TypeMeta, c *conversion.Cloner) error { - out.Kind = in.Kind - out.APIVersion = in.APIVersion - return nil -} - -func deepCopy_v1_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_AzureFileVolumeSource(in AzureFileVolumeSource, out *AzureFileVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_AzureFileVolumeSource(in AzureFileVolumeSource, out *AzureFileVolumeSource, c *conversion.Cloner) error { out.SecretName = in.SecretName out.ShareName = in.ShareName out.ReadOnly = in.ReadOnly return nil } -func deepCopy_v1_Binding(in Binding, out *Binding, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_Binding(in Binding, out *Binding, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectReference(in.Target, &out.Target, c); err != nil { + if err := DeepCopy_v1_ObjectReference(in.Target, &out.Target, c); err != nil { return err } return nil } -func deepCopy_v1_Capabilities(in Capabilities, out *Capabilities, c *conversion.Cloner) error { +func DeepCopy_v1_Capabilities(in Capabilities, out *Capabilities, c *conversion.Cloner) error { if in.Add != nil { - out.Add = make([]Capability, len(in.Add)) - for i := range in.Add { - out.Add[i] = in.Add[i] + in, out := in.Add, &out.Add + *out = make([]Capability, len(in)) + for i := range in { + (*out)[i] = in[i] } } else { out.Add = nil } if in.Drop != nil { - out.Drop = make([]Capability, len(in.Drop)) - for i := range in.Drop { - out.Drop[i] = in.Drop[i] + in, out := in.Drop, &out.Drop + *out = make([]Capability, len(in)) + for i := range in { + (*out)[i] = in[i] } } else { out.Drop = nil @@ -113,12 +267,11 @@ func deepCopy_v1_Capabilities(in Capabilities, out *Capabilities, c *conversion. return nil } -func deepCopy_v1_CephFSVolumeSource(in CephFSVolumeSource, out *CephFSVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_CephFSVolumeSource(in CephFSVolumeSource, out *CephFSVolumeSource, c *conversion.Cloner) error { if in.Monitors != nil { - out.Monitors = make([]string, len(in.Monitors)) - for i := range in.Monitors { - out.Monitors[i] = in.Monitors[i] - } + in, out := in.Monitors, &out.Monitors + *out = make([]string, len(in)) + copy(*out, in) } else { out.Monitors = nil } @@ -126,8 +279,9 @@ func deepCopy_v1_CephFSVolumeSource(in CephFSVolumeSource, out *CephFSVolumeSour out.User = in.User out.SecretFile = in.SecretFile if in.SecretRef != nil { - out.SecretRef = new(LocalObjectReference) - if err := deepCopy_v1_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil { + in, out := in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + if err := DeepCopy_v1_LocalObjectReference(*in, *out, c); err != nil { return err } } else { @@ -137,14 +291,14 @@ func deepCopy_v1_CephFSVolumeSource(in CephFSVolumeSource, out *CephFSVolumeSour return nil } -func deepCopy_v1_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error { out.VolumeID = in.VolumeID out.FSType = in.FSType out.ReadOnly = in.ReadOnly return nil } -func deepCopy_v1_ComponentCondition(in ComponentCondition, out *ComponentCondition, c *conversion.Cloner) error { +func DeepCopy_v1_ComponentCondition(in ComponentCondition, out *ComponentCondition, c *conversion.Cloner) error { out.Type = in.Type out.Status = in.Status out.Message = in.Message @@ -152,17 +306,18 @@ func deepCopy_v1_ComponentCondition(in ComponentCondition, out *ComponentConditi return nil } -func deepCopy_v1_ComponentStatus(in ComponentStatus, out *ComponentStatus, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ComponentStatus(in ComponentStatus, out *ComponentStatus, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } if in.Conditions != nil { - out.Conditions = make([]ComponentCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := deepCopy_v1_ComponentCondition(in.Conditions[i], &out.Conditions[i], c); err != nil { + in, out := in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(in)) + for i := range in { + if err := DeepCopy_v1_ComponentCondition(in[i], &(*out)[i], c); err != nil { return err } } @@ -172,17 +327,18 @@ func deepCopy_v1_ComponentStatus(in ComponentStatus, out *ComponentStatus, c *co return nil } -func deepCopy_v1_ComponentStatusList(in ComponentStatusList, out *ComponentStatusList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ComponentStatusList(in ComponentStatusList, out *ComponentStatusList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]ComponentStatus, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_ComponentStatus(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]ComponentStatus, len(in)) + for i := range in { + if err := DeepCopy_v1_ComponentStatus(in[i], &(*out)[i], c); err != nil { return err } } @@ -192,17 +348,18 @@ func deepCopy_v1_ComponentStatusList(in ComponentStatusList, out *ComponentStatu return nil } -func deepCopy_v1_ConfigMap(in ConfigMap, out *ConfigMap, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ConfigMap(in ConfigMap, out *ConfigMap, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } if in.Data != nil { - out.Data = make(map[string]string) - for key, val := range in.Data { - out.Data[key] = val + in, out := in.Data, &out.Data + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val } } else { out.Data = nil @@ -210,25 +367,26 @@ func deepCopy_v1_ConfigMap(in ConfigMap, out *ConfigMap, c *conversion.Cloner) e return nil } -func deepCopy_v1_ConfigMapKeySelector(in ConfigMapKeySelector, out *ConfigMapKeySelector, c *conversion.Cloner) error { - if err := deepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { +func DeepCopy_v1_ConfigMapKeySelector(in ConfigMapKeySelector, out *ConfigMapKeySelector, c *conversion.Cloner) error { + if err := DeepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { return err } out.Key = in.Key return nil } -func deepCopy_v1_ConfigMapList(in ConfigMapList, out *ConfigMapList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ConfigMapList(in ConfigMapList, out *ConfigMapList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]ConfigMap, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_ConfigMap(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]ConfigMap, len(in)) + for i := range in { + if err := DeepCopy_v1_ConfigMap(in[i], &(*out)[i], c); err != nil { return err } } @@ -238,14 +396,15 @@ func deepCopy_v1_ConfigMapList(in ConfigMapList, out *ConfigMapList, c *conversi return nil } -func deepCopy_v1_ConfigMapVolumeSource(in ConfigMapVolumeSource, out *ConfigMapVolumeSource, c *conversion.Cloner) error { - if err := deepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { +func DeepCopy_v1_ConfigMapVolumeSource(in ConfigMapVolumeSource, out *ConfigMapVolumeSource, c *conversion.Cloner) error { + if err := DeepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { return err } if in.Items != nil { - out.Items = make([]KeyToPath, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_KeyToPath(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]KeyToPath, len(in)) + for i := range in { + if err := DeepCopy_v1_KeyToPath(in[i], &(*out)[i], c); err != nil { return err } } @@ -255,30 +414,29 @@ func deepCopy_v1_ConfigMapVolumeSource(in ConfigMapVolumeSource, out *ConfigMapV return nil } -func deepCopy_v1_Container(in Container, out *Container, c *conversion.Cloner) error { +func DeepCopy_v1_Container(in Container, out *Container, c *conversion.Cloner) error { out.Name = in.Name out.Image = in.Image if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } + in, out := in.Command, &out.Command + *out = make([]string, len(in)) + copy(*out, in) } else { out.Command = nil } if in.Args != nil { - out.Args = make([]string, len(in.Args)) - for i := range in.Args { - out.Args[i] = in.Args[i] - } + in, out := in.Args, &out.Args + *out = make([]string, len(in)) + copy(*out, in) } else { out.Args = nil } out.WorkingDir = in.WorkingDir if in.Ports != nil { - out.Ports = make([]ContainerPort, len(in.Ports)) - for i := range in.Ports { - if err := deepCopy_v1_ContainerPort(in.Ports[i], &out.Ports[i], c); err != nil { + in, out := in.Ports, &out.Ports + *out = make([]ContainerPort, len(in)) + for i := range in { + if err := DeepCopy_v1_ContainerPort(in[i], &(*out)[i], c); err != nil { return err } } @@ -286,22 +444,24 @@ func deepCopy_v1_Container(in Container, out *Container, c *conversion.Cloner) e out.Ports = nil } if in.Env != nil { - out.Env = make([]EnvVar, len(in.Env)) - for i := range in.Env { - if err := deepCopy_v1_EnvVar(in.Env[i], &out.Env[i], c); err != nil { + in, out := in.Env, &out.Env + *out = make([]EnvVar, len(in)) + for i := range in { + if err := DeepCopy_v1_EnvVar(in[i], &(*out)[i], c); err != nil { return err } } } else { out.Env = nil } - if err := deepCopy_v1_ResourceRequirements(in.Resources, &out.Resources, c); err != nil { + if err := DeepCopy_v1_ResourceRequirements(in.Resources, &out.Resources, c); err != nil { return err } if in.VolumeMounts != nil { - out.VolumeMounts = make([]VolumeMount, len(in.VolumeMounts)) - for i := range in.VolumeMounts { - if err := deepCopy_v1_VolumeMount(in.VolumeMounts[i], &out.VolumeMounts[i], c); err != nil { + in, out := in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(in)) + for i := range in { + if err := DeepCopy_v1_VolumeMount(in[i], &(*out)[i], c); err != nil { return err } } @@ -309,24 +469,27 @@ func deepCopy_v1_Container(in Container, out *Container, c *conversion.Cloner) e out.VolumeMounts = nil } if in.LivenessProbe != nil { - out.LivenessProbe = new(Probe) - if err := deepCopy_v1_Probe(*in.LivenessProbe, out.LivenessProbe, c); err != nil { + in, out := in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { return err } } else { out.LivenessProbe = nil } if in.ReadinessProbe != nil { - out.ReadinessProbe = new(Probe) - if err := deepCopy_v1_Probe(*in.ReadinessProbe, out.ReadinessProbe, c); err != nil { + in, out := in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { return err } } else { out.ReadinessProbe = nil } if in.Lifecycle != nil { - out.Lifecycle = new(Lifecycle) - if err := deepCopy_v1_Lifecycle(*in.Lifecycle, out.Lifecycle, c); err != nil { + in, out := in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + if err := DeepCopy_v1_Lifecycle(*in, *out, c); err != nil { return err } } else { @@ -335,8 +498,9 @@ func deepCopy_v1_Container(in Container, out *Container, c *conversion.Cloner) e out.TerminationMessagePath = in.TerminationMessagePath out.ImagePullPolicy = in.ImagePullPolicy if in.SecurityContext != nil { - out.SecurityContext = new(SecurityContext) - if err := deepCopy_v1_SecurityContext(*in.SecurityContext, out.SecurityContext, c); err != nil { + in, out := in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + if err := DeepCopy_v1_SecurityContext(*in, *out, c); err != nil { return err } } else { @@ -348,12 +512,11 @@ func deepCopy_v1_Container(in Container, out *Container, c *conversion.Cloner) e return nil } -func deepCopy_v1_ContainerImage(in ContainerImage, out *ContainerImage, c *conversion.Cloner) error { +func DeepCopy_v1_ContainerImage(in ContainerImage, out *ContainerImage, c *conversion.Cloner) error { if in.Names != nil { - out.Names = make([]string, len(in.Names)) - for i := range in.Names { - out.Names[i] = in.Names[i] - } + in, out := in.Names, &out.Names + *out = make([]string, len(in)) + copy(*out, in) } else { out.Names = nil } @@ -361,7 +524,7 @@ func deepCopy_v1_ContainerImage(in ContainerImage, out *ContainerImage, c *conve return nil } -func deepCopy_v1_ContainerPort(in ContainerPort, out *ContainerPort, c *conversion.Cloner) error { +func DeepCopy_v1_ContainerPort(in ContainerPort, out *ContainerPort, c *conversion.Cloner) error { out.Name = in.Name out.HostPort = in.HostPort out.ContainerPort = in.ContainerPort @@ -370,26 +533,29 @@ func deepCopy_v1_ContainerPort(in ContainerPort, out *ContainerPort, c *conversi return nil } -func deepCopy_v1_ContainerState(in ContainerState, out *ContainerState, c *conversion.Cloner) error { +func DeepCopy_v1_ContainerState(in ContainerState, out *ContainerState, c *conversion.Cloner) error { if in.Waiting != nil { - out.Waiting = new(ContainerStateWaiting) - if err := deepCopy_v1_ContainerStateWaiting(*in.Waiting, out.Waiting, c); err != nil { + in, out := in.Waiting, &out.Waiting + *out = new(ContainerStateWaiting) + if err := DeepCopy_v1_ContainerStateWaiting(*in, *out, c); err != nil { return err } } else { out.Waiting = nil } if in.Running != nil { - out.Running = new(ContainerStateRunning) - if err := deepCopy_v1_ContainerStateRunning(*in.Running, out.Running, c); err != nil { + in, out := in.Running, &out.Running + *out = new(ContainerStateRunning) + if err := DeepCopy_v1_ContainerStateRunning(*in, *out, c); err != nil { return err } } else { out.Running = nil } if in.Terminated != nil { - out.Terminated = new(ContainerStateTerminated) - if err := deepCopy_v1_ContainerStateTerminated(*in.Terminated, out.Terminated, c); err != nil { + in, out := in.Terminated, &out.Terminated + *out = new(ContainerStateTerminated) + if err := DeepCopy_v1_ContainerStateTerminated(*in, *out, c); err != nil { return err } } else { @@ -398,40 +564,40 @@ func deepCopy_v1_ContainerState(in ContainerState, out *ContainerState, c *conve return nil } -func deepCopy_v1_ContainerStateRunning(in ContainerStateRunning, out *ContainerStateRunning, c *conversion.Cloner) error { - if err := deepCopy_unversioned_Time(in.StartedAt, &out.StartedAt, c); err != nil { +func DeepCopy_v1_ContainerStateRunning(in ContainerStateRunning, out *ContainerStateRunning, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_Time(in.StartedAt, &out.StartedAt, c); err != nil { return err } return nil } -func deepCopy_v1_ContainerStateTerminated(in ContainerStateTerminated, out *ContainerStateTerminated, c *conversion.Cloner) error { +func DeepCopy_v1_ContainerStateTerminated(in ContainerStateTerminated, out *ContainerStateTerminated, c *conversion.Cloner) error { out.ExitCode = in.ExitCode out.Signal = in.Signal out.Reason = in.Reason out.Message = in.Message - if err := deepCopy_unversioned_Time(in.StartedAt, &out.StartedAt, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.StartedAt, &out.StartedAt, c); err != nil { return err } - if err := deepCopy_unversioned_Time(in.FinishedAt, &out.FinishedAt, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.FinishedAt, &out.FinishedAt, c); err != nil { return err } out.ContainerID = in.ContainerID return nil } -func deepCopy_v1_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error { +func DeepCopy_v1_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error { out.Reason = in.Reason out.Message = in.Message return nil } -func deepCopy_v1_ContainerStatus(in ContainerStatus, out *ContainerStatus, c *conversion.Cloner) error { +func DeepCopy_v1_ContainerStatus(in ContainerStatus, out *ContainerStatus, c *conversion.Cloner) error { out.Name = in.Name - if err := deepCopy_v1_ContainerState(in.State, &out.State, c); err != nil { + if err := DeepCopy_v1_ContainerState(in.State, &out.State, c); err != nil { return err } - if err := deepCopy_v1_ContainerState(in.LastTerminationState, &out.LastTerminationState, c); err != nil { + if err := DeepCopy_v1_ContainerState(in.LastTerminationState, &out.LastTerminationState, c); err != nil { return err } out.Ready = in.Ready @@ -442,37 +608,70 @@ func deepCopy_v1_ContainerStatus(in ContainerStatus, out *ContainerStatus, c *co return nil } -func deepCopy_v1_DaemonEndpoint(in DaemonEndpoint, out *DaemonEndpoint, c *conversion.Cloner) error { +func DeepCopy_v1_DaemonEndpoint(in DaemonEndpoint, out *DaemonEndpoint, c *conversion.Cloner) error { out.Port = in.Port return nil } -func deepCopy_v1_DeleteOptions(in DeleteOptions, out *DeleteOptions, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_DeleteOptions(in DeleteOptions, out *DeleteOptions, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if in.GracePeriodSeconds != nil { - out.GracePeriodSeconds = new(int64) - *out.GracePeriodSeconds = *in.GracePeriodSeconds + in, out := in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = *in } else { out.GracePeriodSeconds = nil } + if in.Preconditions != nil { + in, out := in.Preconditions, &out.Preconditions + *out = new(Preconditions) + if err := DeepCopy_v1_Preconditions(*in, *out, c); err != nil { + return err + } + } else { + out.Preconditions = nil + } + if in.OrphanDependents != nil { + in, out := in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = *in + } else { + out.OrphanDependents = nil + } return nil } -func deepCopy_v1_DownwardAPIVolumeFile(in DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, c *conversion.Cloner) error { +func DeepCopy_v1_DownwardAPIVolumeFile(in DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, c *conversion.Cloner) error { out.Path = in.Path - if err := deepCopy_v1_ObjectFieldSelector(in.FieldRef, &out.FieldRef, c); err != nil { - return err + if in.FieldRef != nil { + in, out := in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + if err := DeepCopy_v1_ObjectFieldSelector(*in, *out, c); err != nil { + return err + } + } else { + out.FieldRef = nil + } + if in.ResourceFieldRef != nil { + in, out := in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } else { + out.ResourceFieldRef = nil } return nil } -func deepCopy_v1_DownwardAPIVolumeSource(in DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_DownwardAPIVolumeSource(in DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, c *conversion.Cloner) error { if in.Items != nil { - out.Items = make([]DownwardAPIVolumeFile, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_DownwardAPIVolumeFile(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(in)) + for i := range in { + if err := DeepCopy_v1_DownwardAPIVolumeFile(in[i], &(*out)[i], c); err != nil { return err } } @@ -482,16 +681,18 @@ func deepCopy_v1_DownwardAPIVolumeSource(in DownwardAPIVolumeSource, out *Downwa return nil } -func deepCopy_v1_EmptyDirVolumeSource(in EmptyDirVolumeSource, out *EmptyDirVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_EmptyDirVolumeSource(in EmptyDirVolumeSource, out *EmptyDirVolumeSource, c *conversion.Cloner) error { out.Medium = in.Medium return nil } -func deepCopy_v1_EndpointAddress(in EndpointAddress, out *EndpointAddress, c *conversion.Cloner) error { +func DeepCopy_v1_EndpointAddress(in EndpointAddress, out *EndpointAddress, c *conversion.Cloner) error { out.IP = in.IP + out.Hostname = in.Hostname if in.TargetRef != nil { - out.TargetRef = new(ObjectReference) - if err := deepCopy_v1_ObjectReference(*in.TargetRef, out.TargetRef, c); err != nil { + in, out := in.TargetRef, &out.TargetRef + *out = new(ObjectReference) + if err := DeepCopy_v1_ObjectReference(*in, *out, c); err != nil { return err } } else { @@ -500,18 +701,19 @@ func deepCopy_v1_EndpointAddress(in EndpointAddress, out *EndpointAddress, c *co return nil } -func deepCopy_v1_EndpointPort(in EndpointPort, out *EndpointPort, c *conversion.Cloner) error { +func DeepCopy_v1_EndpointPort(in EndpointPort, out *EndpointPort, c *conversion.Cloner) error { out.Name = in.Name out.Port = in.Port out.Protocol = in.Protocol return nil } -func deepCopy_v1_EndpointSubset(in EndpointSubset, out *EndpointSubset, c *conversion.Cloner) error { +func DeepCopy_v1_EndpointSubset(in EndpointSubset, out *EndpointSubset, c *conversion.Cloner) error { if in.Addresses != nil { - out.Addresses = make([]EndpointAddress, len(in.Addresses)) - for i := range in.Addresses { - if err := deepCopy_v1_EndpointAddress(in.Addresses[i], &out.Addresses[i], c); err != nil { + in, out := in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(in)) + for i := range in { + if err := DeepCopy_v1_EndpointAddress(in[i], &(*out)[i], c); err != nil { return err } } @@ -519,9 +721,10 @@ func deepCopy_v1_EndpointSubset(in EndpointSubset, out *EndpointSubset, c *conve out.Addresses = nil } if in.NotReadyAddresses != nil { - out.NotReadyAddresses = make([]EndpointAddress, len(in.NotReadyAddresses)) - for i := range in.NotReadyAddresses { - if err := deepCopy_v1_EndpointAddress(in.NotReadyAddresses[i], &out.NotReadyAddresses[i], c); err != nil { + in, out := in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(in)) + for i := range in { + if err := DeepCopy_v1_EndpointAddress(in[i], &(*out)[i], c); err != nil { return err } } @@ -529,9 +732,10 @@ func deepCopy_v1_EndpointSubset(in EndpointSubset, out *EndpointSubset, c *conve out.NotReadyAddresses = nil } if in.Ports != nil { - out.Ports = make([]EndpointPort, len(in.Ports)) - for i := range in.Ports { - if err := deepCopy_v1_EndpointPort(in.Ports[i], &out.Ports[i], c); err != nil { + in, out := in.Ports, &out.Ports + *out = make([]EndpointPort, len(in)) + for i := range in { + if err := DeepCopy_v1_EndpointPort(in[i], &(*out)[i], c); err != nil { return err } } @@ -541,17 +745,18 @@ func deepCopy_v1_EndpointSubset(in EndpointSubset, out *EndpointSubset, c *conve return nil } -func deepCopy_v1_Endpoints(in Endpoints, out *Endpoints, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_Endpoints(in Endpoints, out *Endpoints, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } if in.Subsets != nil { - out.Subsets = make([]EndpointSubset, len(in.Subsets)) - for i := range in.Subsets { - if err := deepCopy_v1_EndpointSubset(in.Subsets[i], &out.Subsets[i], c); err != nil { + in, out := in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(in)) + for i := range in { + if err := DeepCopy_v1_EndpointSubset(in[i], &(*out)[i], c); err != nil { return err } } @@ -561,17 +766,18 @@ func deepCopy_v1_Endpoints(in Endpoints, out *Endpoints, c *conversion.Cloner) e return nil } -func deepCopy_v1_EndpointsList(in EndpointsList, out *EndpointsList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_EndpointsList(in EndpointsList, out *EndpointsList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]Endpoints, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_Endpoints(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]Endpoints, len(in)) + for i := range in { + if err := DeepCopy_v1_Endpoints(in[i], &(*out)[i], c); err != nil { return err } } @@ -581,12 +787,13 @@ func deepCopy_v1_EndpointsList(in EndpointsList, out *EndpointsList, c *conversi return nil } -func deepCopy_v1_EnvVar(in EnvVar, out *EnvVar, c *conversion.Cloner) error { +func DeepCopy_v1_EnvVar(in EnvVar, out *EnvVar, c *conversion.Cloner) error { out.Name = in.Name out.Value = in.Value if in.ValueFrom != nil { - out.ValueFrom = new(EnvVarSource) - if err := deepCopy_v1_EnvVarSource(*in.ValueFrom, out.ValueFrom, c); err != nil { + in, out := in.ValueFrom, &out.ValueFrom + *out = new(EnvVarSource) + if err := DeepCopy_v1_EnvVarSource(*in, *out, c); err != nil { return err } } else { @@ -595,26 +802,38 @@ func deepCopy_v1_EnvVar(in EnvVar, out *EnvVar, c *conversion.Cloner) error { return nil } -func deepCopy_v1_EnvVarSource(in EnvVarSource, out *EnvVarSource, c *conversion.Cloner) error { +func DeepCopy_v1_EnvVarSource(in EnvVarSource, out *EnvVarSource, c *conversion.Cloner) error { if in.FieldRef != nil { - out.FieldRef = new(ObjectFieldSelector) - if err := deepCopy_v1_ObjectFieldSelector(*in.FieldRef, out.FieldRef, c); err != nil { + in, out := in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + if err := DeepCopy_v1_ObjectFieldSelector(*in, *out, c); err != nil { return err } } else { out.FieldRef = nil } + if in.ResourceFieldRef != nil { + in, out := in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } else { + out.ResourceFieldRef = nil + } if in.ConfigMapKeyRef != nil { - out.ConfigMapKeyRef = new(ConfigMapKeySelector) - if err := deepCopy_v1_ConfigMapKeySelector(*in.ConfigMapKeyRef, out.ConfigMapKeyRef, c); err != nil { + in, out := in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(ConfigMapKeySelector) + if err := DeepCopy_v1_ConfigMapKeySelector(*in, *out, c); err != nil { return err } } else { out.ConfigMapKeyRef = nil } if in.SecretKeyRef != nil { - out.SecretKeyRef = new(SecretKeySelector) - if err := deepCopy_v1_SecretKeySelector(*in.SecretKeyRef, out.SecretKeyRef, c); err != nil { + in, out := in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeySelector) + if err := DeepCopy_v1_SecretKeySelector(*in, *out, c); err != nil { return err } } else { @@ -623,25 +842,25 @@ func deepCopy_v1_EnvVarSource(in EnvVarSource, out *EnvVarSource, c *conversion. return nil } -func deepCopy_v1_Event(in Event, out *Event, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_Event(in Event, out *Event, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectReference(in.InvolvedObject, &out.InvolvedObject, c); err != nil { + if err := DeepCopy_v1_ObjectReference(in.InvolvedObject, &out.InvolvedObject, c); err != nil { return err } out.Reason = in.Reason out.Message = in.Message - if err := deepCopy_v1_EventSource(in.Source, &out.Source, c); err != nil { + if err := DeepCopy_v1_EventSource(in.Source, &out.Source, c); err != nil { return err } - if err := deepCopy_unversioned_Time(in.FirstTimestamp, &out.FirstTimestamp, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.FirstTimestamp, &out.FirstTimestamp, c); err != nil { return err } - if err := deepCopy_unversioned_Time(in.LastTimestamp, &out.LastTimestamp, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastTimestamp, &out.LastTimestamp, c); err != nil { return err } out.Count = in.Count @@ -649,17 +868,18 @@ func deepCopy_v1_Event(in Event, out *Event, c *conversion.Cloner) error { return nil } -func deepCopy_v1_EventList(in EventList, out *EventList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_EventList(in EventList, out *EventList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]Event, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_Event(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]Event, len(in)) + for i := range in { + if err := DeepCopy_v1_Event(in[i], &(*out)[i], c); err != nil { return err } } @@ -669,26 +889,25 @@ func deepCopy_v1_EventList(in EventList, out *EventList, c *conversion.Cloner) e return nil } -func deepCopy_v1_EventSource(in EventSource, out *EventSource, c *conversion.Cloner) error { +func DeepCopy_v1_EventSource(in EventSource, out *EventSource, c *conversion.Cloner) error { out.Component = in.Component out.Host = in.Host return nil } -func deepCopy_v1_ExecAction(in ExecAction, out *ExecAction, c *conversion.Cloner) error { +func DeepCopy_v1_ExecAction(in ExecAction, out *ExecAction, c *conversion.Cloner) error { if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } + in, out := in.Command, &out.Command + *out = make([]string, len(in)) + copy(*out, in) } else { out.Command = nil } return nil } -func deepCopy_v1_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Export = in.Export @@ -696,18 +915,18 @@ func deepCopy_v1_ExportOptions(in ExportOptions, out *ExportOptions, c *conversi return nil } -func deepCopy_v1_FCVolumeSource(in FCVolumeSource, out *FCVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_FCVolumeSource(in FCVolumeSource, out *FCVolumeSource, c *conversion.Cloner) error { if in.TargetWWNs != nil { - out.TargetWWNs = make([]string, len(in.TargetWWNs)) - for i := range in.TargetWWNs { - out.TargetWWNs[i] = in.TargetWWNs[i] - } + in, out := in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(in)) + copy(*out, in) } else { out.TargetWWNs = nil } if in.Lun != nil { - out.Lun = new(int32) - *out.Lun = *in.Lun + in, out := in.Lun, &out.Lun + *out = new(int32) + **out = *in } else { out.Lun = nil } @@ -716,12 +935,13 @@ func deepCopy_v1_FCVolumeSource(in FCVolumeSource, out *FCVolumeSource, c *conve return nil } -func deepCopy_v1_FlexVolumeSource(in FlexVolumeSource, out *FlexVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_FlexVolumeSource(in FlexVolumeSource, out *FlexVolumeSource, c *conversion.Cloner) error { out.Driver = in.Driver out.FSType = in.FSType if in.SecretRef != nil { - out.SecretRef = new(LocalObjectReference) - if err := deepCopy_v1_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil { + in, out := in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + if err := DeepCopy_v1_LocalObjectReference(*in, *out, c); err != nil { return err } } else { @@ -729,9 +949,10 @@ func deepCopy_v1_FlexVolumeSource(in FlexVolumeSource, out *FlexVolumeSource, c } out.ReadOnly = in.ReadOnly if in.Options != nil { - out.Options = make(map[string]string) - for key, val := range in.Options { - out.Options[key] = val + in, out := in.Options, &out.Options + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val } } else { out.Options = nil @@ -739,12 +960,12 @@ func deepCopy_v1_FlexVolumeSource(in FlexVolumeSource, out *FlexVolumeSource, c return nil } -func deepCopy_v1_FlockerVolumeSource(in FlockerVolumeSource, out *FlockerVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_FlockerVolumeSource(in FlockerVolumeSource, out *FlockerVolumeSource, c *conversion.Cloner) error { out.DatasetName = in.DatasetName return nil } -func deepCopy_v1_GCEPersistentDiskVolumeSource(in GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_GCEPersistentDiskVolumeSource(in GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, c *conversion.Cloner) error { out.PDName = in.PDName out.FSType = in.FSType out.Partition = in.Partition @@ -752,31 +973,32 @@ func deepCopy_v1_GCEPersistentDiskVolumeSource(in GCEPersistentDiskVolumeSource, return nil } -func deepCopy_v1_GitRepoVolumeSource(in GitRepoVolumeSource, out *GitRepoVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_GitRepoVolumeSource(in GitRepoVolumeSource, out *GitRepoVolumeSource, c *conversion.Cloner) error { out.Repository = in.Repository out.Revision = in.Revision out.Directory = in.Directory return nil } -func deepCopy_v1_GlusterfsVolumeSource(in GlusterfsVolumeSource, out *GlusterfsVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_GlusterfsVolumeSource(in GlusterfsVolumeSource, out *GlusterfsVolumeSource, c *conversion.Cloner) error { out.EndpointsName = in.EndpointsName out.Path = in.Path out.ReadOnly = in.ReadOnly return nil } -func deepCopy_v1_HTTPGetAction(in HTTPGetAction, out *HTTPGetAction, c *conversion.Cloner) error { +func DeepCopy_v1_HTTPGetAction(in HTTPGetAction, out *HTTPGetAction, c *conversion.Cloner) error { out.Path = in.Path - if err := deepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { + if err := intstr.DeepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { return err } out.Host = in.Host out.Scheme = in.Scheme if in.HTTPHeaders != nil { - out.HTTPHeaders = make([]HTTPHeader, len(in.HTTPHeaders)) - for i := range in.HTTPHeaders { - if err := deepCopy_v1_HTTPHeader(in.HTTPHeaders[i], &out.HTTPHeaders[i], c); err != nil { + in, out := in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(in)) + for i := range in { + if err := DeepCopy_v1_HTTPHeader(in[i], &(*out)[i], c); err != nil { return err } } @@ -786,32 +1008,35 @@ func deepCopy_v1_HTTPGetAction(in HTTPGetAction, out *HTTPGetAction, c *conversi return nil } -func deepCopy_v1_HTTPHeader(in HTTPHeader, out *HTTPHeader, c *conversion.Cloner) error { +func DeepCopy_v1_HTTPHeader(in HTTPHeader, out *HTTPHeader, c *conversion.Cloner) error { out.Name = in.Name out.Value = in.Value return nil } -func deepCopy_v1_Handler(in Handler, out *Handler, c *conversion.Cloner) error { +func DeepCopy_v1_Handler(in Handler, out *Handler, c *conversion.Cloner) error { if in.Exec != nil { - out.Exec = new(ExecAction) - if err := deepCopy_v1_ExecAction(*in.Exec, out.Exec, c); err != nil { + in, out := in.Exec, &out.Exec + *out = new(ExecAction) + if err := DeepCopy_v1_ExecAction(*in, *out, c); err != nil { return err } } else { out.Exec = nil } if in.HTTPGet != nil { - out.HTTPGet = new(HTTPGetAction) - if err := deepCopy_v1_HTTPGetAction(*in.HTTPGet, out.HTTPGet, c); err != nil { + in, out := in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + if err := DeepCopy_v1_HTTPGetAction(*in, *out, c); err != nil { return err } } else { out.HTTPGet = nil } if in.TCPSocket != nil { - out.TCPSocket = new(TCPSocketAction) - if err := deepCopy_v1_TCPSocketAction(*in.TCPSocket, out.TCPSocket, c); err != nil { + in, out := in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + if err := DeepCopy_v1_TCPSocketAction(*in, *out, c); err != nil { return err } } else { @@ -820,12 +1045,12 @@ func deepCopy_v1_Handler(in Handler, out *Handler, c *conversion.Cloner) error { return nil } -func deepCopy_v1_HostPathVolumeSource(in HostPathVolumeSource, out *HostPathVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_HostPathVolumeSource(in HostPathVolumeSource, out *HostPathVolumeSource, c *conversion.Cloner) error { out.Path = in.Path return nil } -func deepCopy_v1_ISCSIVolumeSource(in ISCSIVolumeSource, out *ISCSIVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_ISCSIVolumeSource(in ISCSIVolumeSource, out *ISCSIVolumeSource, c *conversion.Cloner) error { out.TargetPortal = in.TargetPortal out.IQN = in.IQN out.Lun = in.Lun @@ -835,24 +1060,26 @@ func deepCopy_v1_ISCSIVolumeSource(in ISCSIVolumeSource, out *ISCSIVolumeSource, return nil } -func deepCopy_v1_KeyToPath(in KeyToPath, out *KeyToPath, c *conversion.Cloner) error { +func DeepCopy_v1_KeyToPath(in KeyToPath, out *KeyToPath, c *conversion.Cloner) error { out.Key = in.Key out.Path = in.Path return nil } -func deepCopy_v1_Lifecycle(in Lifecycle, out *Lifecycle, c *conversion.Cloner) error { +func DeepCopy_v1_Lifecycle(in Lifecycle, out *Lifecycle, c *conversion.Cloner) error { if in.PostStart != nil { - out.PostStart = new(Handler) - if err := deepCopy_v1_Handler(*in.PostStart, out.PostStart, c); err != nil { + in, out := in.PostStart, &out.PostStart + *out = new(Handler) + if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { return err } } else { out.PostStart = nil } if in.PreStop != nil { - out.PreStop = new(Handler) - if err := deepCopy_v1_Handler(*in.PreStop, out.PreStop, c); err != nil { + in, out := in.PreStop, &out.PreStop + *out = new(Handler) + if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { return err } } else { @@ -861,77 +1088,82 @@ func deepCopy_v1_Lifecycle(in Lifecycle, out *Lifecycle, c *conversion.Cloner) e return nil } -func deepCopy_v1_LimitRange(in LimitRange, out *LimitRange, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_LimitRange(in LimitRange, out *LimitRange, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_LimitRangeSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1_LimitRangeSpec(in.Spec, &out.Spec, c); err != nil { return err } return nil } -func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error { +func DeepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error { out.Type = in.Type if in.Max != nil { - out.Max = make(ResourceList) - for key, val := range in.Max { + in, out := in.Max, &out.Max + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.Max[key] = *newVal + (*out)[key] = *newVal } } else { out.Max = nil } if in.Min != nil { - out.Min = make(ResourceList) - for key, val := range in.Min { + in, out := in.Min, &out.Min + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.Min[key] = *newVal + (*out)[key] = *newVal } } else { out.Min = nil } if in.Default != nil { - out.Default = make(ResourceList) - for key, val := range in.Default { + in, out := in.Default, &out.Default + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.Default[key] = *newVal + (*out)[key] = *newVal } } else { out.Default = nil } if in.DefaultRequest != nil { - out.DefaultRequest = make(ResourceList) - for key, val := range in.DefaultRequest { + in, out := in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.DefaultRequest[key] = *newVal + (*out)[key] = *newVal } } else { out.DefaultRequest = nil } if in.MaxLimitRequestRatio != nil { - out.MaxLimitRequestRatio = make(ResourceList) - for key, val := range in.MaxLimitRequestRatio { + in, out := in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.MaxLimitRequestRatio[key] = *newVal + (*out)[key] = *newVal } } else { out.MaxLimitRequestRatio = nil @@ -939,17 +1171,18 @@ func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conve return nil } -func deepCopy_v1_LimitRangeList(in LimitRangeList, out *LimitRangeList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_LimitRangeList(in LimitRangeList, out *LimitRangeList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]LimitRange, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_LimitRange(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]LimitRange, len(in)) + for i := range in { + if err := DeepCopy_v1_LimitRange(in[i], &(*out)[i], c); err != nil { return err } } @@ -959,11 +1192,12 @@ func deepCopy_v1_LimitRangeList(in LimitRangeList, out *LimitRangeList, c *conve return nil } -func deepCopy_v1_LimitRangeSpec(in LimitRangeSpec, out *LimitRangeSpec, c *conversion.Cloner) error { +func DeepCopy_v1_LimitRangeSpec(in LimitRangeSpec, out *LimitRangeSpec, c *conversion.Cloner) error { if in.Limits != nil { - out.Limits = make([]LimitRangeItem, len(in.Limits)) - for i := range in.Limits { - if err := deepCopy_v1_LimitRangeItem(in.Limits[i], &out.Limits[i], c); err != nil { + in, out := in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(in)) + for i := range in { + if err := DeepCopy_v1_LimitRangeItem(in[i], &(*out)[i], c); err != nil { return err } } @@ -973,17 +1207,18 @@ func deepCopy_v1_LimitRangeSpec(in LimitRangeSpec, out *LimitRangeSpec, c *conve return nil } -func deepCopy_v1_List(in List, out *List, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_List(in List, out *List, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]runtime.RawExtension, len(in.Items)) - for i := range in.Items { - if err := deepCopy_runtime_RawExtension(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]runtime.RawExtension, len(in)) + for i := range in { + if err := runtime.DeepCopy_runtime_RawExtension(in[i], &(*out)[i], c); err != nil { return err } } @@ -993,8 +1228,8 @@ func deepCopy_v1_List(in List, out *List, c *conversion.Cloner) error { return nil } -func deepCopy_v1_ListOptions(in ListOptions, out *ListOptions, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ListOptions(in ListOptions, out *ListOptions, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.LabelSelector = in.LabelSelector @@ -1002,25 +1237,27 @@ func deepCopy_v1_ListOptions(in ListOptions, out *ListOptions, c *conversion.Clo out.Watch = in.Watch out.ResourceVersion = in.ResourceVersion if in.TimeoutSeconds != nil { - out.TimeoutSeconds = new(int64) - *out.TimeoutSeconds = *in.TimeoutSeconds + in, out := in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = *in } else { out.TimeoutSeconds = nil } return nil } -func deepCopy_v1_LoadBalancerIngress(in LoadBalancerIngress, out *LoadBalancerIngress, c *conversion.Cloner) error { +func DeepCopy_v1_LoadBalancerIngress(in LoadBalancerIngress, out *LoadBalancerIngress, c *conversion.Cloner) error { out.IP = in.IP out.Hostname = in.Hostname return nil } -func deepCopy_v1_LoadBalancerStatus(in LoadBalancerStatus, out *LoadBalancerStatus, c *conversion.Cloner) error { +func DeepCopy_v1_LoadBalancerStatus(in LoadBalancerStatus, out *LoadBalancerStatus, c *conversion.Cloner) error { if in.Ingress != nil { - out.Ingress = make([]LoadBalancerIngress, len(in.Ingress)) - for i := range in.Ingress { - if err := deepCopy_v1_LoadBalancerIngress(in.Ingress[i], &out.Ingress[i], c); err != nil { + in, out := in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(in)) + for i := range in { + if err := DeepCopy_v1_LoadBalancerIngress(in[i], &(*out)[i], c); err != nil { return err } } @@ -1030,45 +1267,46 @@ func deepCopy_v1_LoadBalancerStatus(in LoadBalancerStatus, out *LoadBalancerStat return nil } -func deepCopy_v1_LocalObjectReference(in LocalObjectReference, out *LocalObjectReference, c *conversion.Cloner) error { +func DeepCopy_v1_LocalObjectReference(in LocalObjectReference, out *LocalObjectReference, c *conversion.Cloner) error { out.Name = in.Name return nil } -func deepCopy_v1_NFSVolumeSource(in NFSVolumeSource, out *NFSVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_NFSVolumeSource(in NFSVolumeSource, out *NFSVolumeSource, c *conversion.Cloner) error { out.Server = in.Server out.Path = in.Path out.ReadOnly = in.ReadOnly return nil } -func deepCopy_v1_Namespace(in Namespace, out *Namespace, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_Namespace(in Namespace, out *Namespace, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_NamespaceSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1_NamespaceSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1_NamespaceStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1_NamespaceStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1_NamespaceList(in NamespaceList, out *NamespaceList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_NamespaceList(in NamespaceList, out *NamespaceList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]Namespace, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_Namespace(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]Namespace, len(in)) + for i := range in { + if err := DeepCopy_v1_Namespace(in[i], &(*out)[i], c); err != nil { return err } } @@ -1078,11 +1316,12 @@ func deepCopy_v1_NamespaceList(in NamespaceList, out *NamespaceList, c *conversi return nil } -func deepCopy_v1_NamespaceSpec(in NamespaceSpec, out *NamespaceSpec, c *conversion.Cloner) error { +func DeepCopy_v1_NamespaceSpec(in NamespaceSpec, out *NamespaceSpec, c *conversion.Cloner) error { if in.Finalizers != nil { - out.Finalizers = make([]FinalizerName, len(in.Finalizers)) - for i := range in.Finalizers { - out.Finalizers[i] = in.Finalizers[i] + in, out := in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(in)) + for i := range in { + (*out)[i] = in[i] } } else { out.Finalizers = nil @@ -1090,40 +1329,64 @@ func deepCopy_v1_NamespaceSpec(in NamespaceSpec, out *NamespaceSpec, c *conversi return nil } -func deepCopy_v1_NamespaceStatus(in NamespaceStatus, out *NamespaceStatus, c *conversion.Cloner) error { +func DeepCopy_v1_NamespaceStatus(in NamespaceStatus, out *NamespaceStatus, c *conversion.Cloner) error { out.Phase = in.Phase return nil } -func deepCopy_v1_Node(in Node, out *Node, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_Node(in Node, out *Node, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_NodeSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1_NodeSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1_NodeStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1_NodeStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1_NodeAddress(in NodeAddress, out *NodeAddress, c *conversion.Cloner) error { +func DeepCopy_v1_NodeAddress(in NodeAddress, out *NodeAddress, c *conversion.Cloner) error { out.Type = in.Type out.Address = in.Address return nil } -func deepCopy_v1_NodeCondition(in NodeCondition, out *NodeCondition, c *conversion.Cloner) error { +func DeepCopy_v1_NodeAffinity(in NodeAffinity, out *NodeAffinity, c *conversion.Cloner) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(NodeSelector) + if err := DeepCopy_v1_NodeSelector(*in, *out, c); err != nil { + return err + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(in)) + for i := range in { + if err := DeepCopy_v1_PreferredSchedulingTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func DeepCopy_v1_NodeCondition(in NodeCondition, out *NodeCondition, c *conversion.Cloner) error { out.Type = in.Type out.Status = in.Status - if err := deepCopy_unversioned_Time(in.LastHeartbeatTime, &out.LastHeartbeatTime, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastHeartbeatTime, &out.LastHeartbeatTime, c); err != nil { return err } - if err := deepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { return err } out.Reason = in.Reason @@ -1131,24 +1394,25 @@ func deepCopy_v1_NodeCondition(in NodeCondition, out *NodeCondition, c *conversi return nil } -func deepCopy_v1_NodeDaemonEndpoints(in NodeDaemonEndpoints, out *NodeDaemonEndpoints, c *conversion.Cloner) error { - if err := deepCopy_v1_DaemonEndpoint(in.KubeletEndpoint, &out.KubeletEndpoint, c); err != nil { +func DeepCopy_v1_NodeDaemonEndpoints(in NodeDaemonEndpoints, out *NodeDaemonEndpoints, c *conversion.Cloner) error { + if err := DeepCopy_v1_DaemonEndpoint(in.KubeletEndpoint, &out.KubeletEndpoint, c); err != nil { return err } return nil } -func deepCopy_v1_NodeList(in NodeList, out *NodeList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_NodeList(in NodeList, out *NodeList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]Node, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_Node(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]Node, len(in)) + for i := range in { + if err := DeepCopy_v1_Node(in[i], &(*out)[i], c); err != nil { return err } } @@ -1158,15 +1422,58 @@ func deepCopy_v1_NodeList(in NodeList, out *NodeList, c *conversion.Cloner) erro return nil } -func deepCopy_v1_NodeProxyOptions(in NodeProxyOptions, out *NodeProxyOptions, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_NodeProxyOptions(in NodeProxyOptions, out *NodeProxyOptions, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Path = in.Path return nil } -func deepCopy_v1_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) error { +func DeepCopy_v1_NodeSelector(in NodeSelector, out *NodeSelector, c *conversion.Cloner) error { + if in.NodeSelectorTerms != nil { + in, out := in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(in)) + for i := range in { + if err := DeepCopy_v1_NodeSelectorTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.NodeSelectorTerms = nil + } + return nil +} + +func DeepCopy_v1_NodeSelectorRequirement(in NodeSelectorRequirement, out *NodeSelectorRequirement, c *conversion.Cloner) error { + out.Key = in.Key + out.Operator = in.Operator + if in.Values != nil { + in, out := in.Values, &out.Values + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Values = nil + } + return nil +} + +func DeepCopy_v1_NodeSelectorTerm(in NodeSelectorTerm, out *NodeSelectorTerm, c *conversion.Cloner) error { + if in.MatchExpressions != nil { + in, out := in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(in)) + for i := range in { + if err := DeepCopy_v1_NodeSelectorRequirement(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.MatchExpressions = nil + } + return nil +} + +func DeepCopy_v1_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) error { out.PodCIDR = in.PodCIDR out.ExternalID = in.ExternalID out.ProviderID = in.ProviderID @@ -1174,36 +1481,39 @@ func deepCopy_v1_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) erro return nil } -func deepCopy_v1_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error { +func DeepCopy_v1_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error { if in.Capacity != nil { - out.Capacity = make(ResourceList) - for key, val := range in.Capacity { + in, out := in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.Capacity[key] = *newVal + (*out)[key] = *newVal } } else { out.Capacity = nil } if in.Allocatable != nil { - out.Allocatable = make(ResourceList) - for key, val := range in.Allocatable { + in, out := in.Allocatable, &out.Allocatable + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.Allocatable[key] = *newVal + (*out)[key] = *newVal } } else { out.Allocatable = nil } out.Phase = in.Phase if in.Conditions != nil { - out.Conditions = make([]NodeCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := deepCopy_v1_NodeCondition(in.Conditions[i], &out.Conditions[i], c); err != nil { + in, out := in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(in)) + for i := range in { + if err := DeepCopy_v1_NodeCondition(in[i], &(*out)[i], c); err != nil { return err } } @@ -1211,25 +1521,27 @@ func deepCopy_v1_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner out.Conditions = nil } if in.Addresses != nil { - out.Addresses = make([]NodeAddress, len(in.Addresses)) - for i := range in.Addresses { - if err := deepCopy_v1_NodeAddress(in.Addresses[i], &out.Addresses[i], c); err != nil { + in, out := in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(in)) + for i := range in { + if err := DeepCopy_v1_NodeAddress(in[i], &(*out)[i], c); err != nil { return err } } } else { out.Addresses = nil } - if err := deepCopy_v1_NodeDaemonEndpoints(in.DaemonEndpoints, &out.DaemonEndpoints, c); err != nil { + if err := DeepCopy_v1_NodeDaemonEndpoints(in.DaemonEndpoints, &out.DaemonEndpoints, c); err != nil { return err } - if err := deepCopy_v1_NodeSystemInfo(in.NodeInfo, &out.NodeInfo, c); err != nil { + if err := DeepCopy_v1_NodeSystemInfo(in.NodeInfo, &out.NodeInfo, c); err != nil { return err } if in.Images != nil { - out.Images = make([]ContainerImage, len(in.Images)) - for i := range in.Images { - if err := deepCopy_v1_ContainerImage(in.Images[i], &out.Images[i], c); err != nil { + in, out := in.Images, &out.Images + *out = make([]ContainerImage, len(in)) + for i := range in { + if err := DeepCopy_v1_ContainerImage(in[i], &(*out)[i], c); err != nil { return err } } @@ -1239,7 +1551,7 @@ func deepCopy_v1_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner return nil } -func deepCopy_v1_NodeSystemInfo(in NodeSystemInfo, out *NodeSystemInfo, c *conversion.Cloner) error { +func DeepCopy_v1_NodeSystemInfo(in NodeSystemInfo, out *NodeSystemInfo, c *conversion.Cloner) error { out.MachineID = in.MachineID out.SystemUUID = in.SystemUUID out.BootID = in.BootID @@ -1248,16 +1560,18 @@ func deepCopy_v1_NodeSystemInfo(in NodeSystemInfo, out *NodeSystemInfo, c *conve out.ContainerRuntimeVersion = in.ContainerRuntimeVersion out.KubeletVersion = in.KubeletVersion out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture return nil } -func deepCopy_v1_ObjectFieldSelector(in ObjectFieldSelector, out *ObjectFieldSelector, c *conversion.Cloner) error { +func DeepCopy_v1_ObjectFieldSelector(in ObjectFieldSelector, out *ObjectFieldSelector, c *conversion.Cloner) error { out.APIVersion = in.APIVersion out.FieldPath = in.FieldPath return nil } -func deepCopy_v1_ObjectMeta(in ObjectMeta, out *ObjectMeta, c *conversion.Cloner) error { +func DeepCopy_v1_ObjectMeta(in ObjectMeta, out *ObjectMeta, c *conversion.Cloner) error { out.Name = in.Name out.GenerateName = in.GenerateName out.Namespace = in.Namespace @@ -1265,43 +1579,65 @@ func deepCopy_v1_ObjectMeta(in ObjectMeta, out *ObjectMeta, c *conversion.Cloner out.UID = in.UID out.ResourceVersion = in.ResourceVersion out.Generation = in.Generation - if err := deepCopy_unversioned_Time(in.CreationTimestamp, &out.CreationTimestamp, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.CreationTimestamp, &out.CreationTimestamp, c); err != nil { return err } if in.DeletionTimestamp != nil { - out.DeletionTimestamp = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.DeletionTimestamp, out.DeletionTimestamp, c); err != nil { + in, out := in.DeletionTimestamp, &out.DeletionTimestamp + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { return err } } else { out.DeletionTimestamp = nil } if in.DeletionGracePeriodSeconds != nil { - out.DeletionGracePeriodSeconds = new(int64) - *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds + in, out := in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) + **out = *in } else { out.DeletionGracePeriodSeconds = nil } if in.Labels != nil { - out.Labels = make(map[string]string) - for key, val := range in.Labels { - out.Labels[key] = val + in, out := in.Labels, &out.Labels + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val } } else { out.Labels = nil } if in.Annotations != nil { - out.Annotations = make(map[string]string) - for key, val := range in.Annotations { - out.Annotations[key] = val + in, out := in.Annotations, &out.Annotations + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val } } else { out.Annotations = nil } + if in.OwnerReferences != nil { + in, out := in.OwnerReferences, &out.OwnerReferences + *out = make([]OwnerReference, len(in)) + for i := range in { + if err := DeepCopy_v1_OwnerReference(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.OwnerReferences = nil + } + if in.Finalizers != nil { + in, out := in.Finalizers, &out.Finalizers + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Finalizers = nil + } return nil } -func deepCopy_v1_ObjectReference(in ObjectReference, out *ObjectReference, c *conversion.Cloner) error { +func DeepCopy_v1_ObjectReference(in ObjectReference, out *ObjectReference, c *conversion.Cloner) error { out.Kind = in.Kind out.Namespace = in.Namespace out.Name = in.Name @@ -1312,49 +1648,58 @@ func deepCopy_v1_ObjectReference(in ObjectReference, out *ObjectReference, c *co return nil } -func deepCopy_v1_PersistentVolume(in PersistentVolume, out *PersistentVolume, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_OwnerReference(in OwnerReference, out *OwnerReference, c *conversion.Cloner) error { + out.APIVersion = in.APIVersion + out.Kind = in.Kind + out.Name = in.Name + out.UID = in.UID + return nil +} + +func DeepCopy_v1_PersistentVolume(in PersistentVolume, out *PersistentVolume, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_PersistentVolumeSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1_PersistentVolumeSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1_PersistentVolumeStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1_PersistentVolumeStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1_PersistentVolumeClaim(in PersistentVolumeClaim, out *PersistentVolumeClaim, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_PersistentVolumeClaim(in PersistentVolumeClaim, out *PersistentVolumeClaim, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_PersistentVolumeClaimSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1_PersistentVolumeClaimSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1_PersistentVolumeClaimStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1_PersistentVolumeClaimStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1_PersistentVolumeClaimList(in PersistentVolumeClaimList, out *PersistentVolumeClaimList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_PersistentVolumeClaimList(in PersistentVolumeClaimList, out *PersistentVolumeClaimList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]PersistentVolumeClaim, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_PersistentVolumeClaim(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(in)) + for i := range in { + if err := DeepCopy_v1_PersistentVolumeClaim(in[i], &(*out)[i], c); err != nil { return err } } @@ -1364,40 +1709,43 @@ func deepCopy_v1_PersistentVolumeClaimList(in PersistentVolumeClaimList, out *Pe return nil } -func deepCopy_v1_PersistentVolumeClaimSpec(in PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, c *conversion.Cloner) error { +func DeepCopy_v1_PersistentVolumeClaimSpec(in PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, c *conversion.Cloner) error { if in.AccessModes != nil { - out.AccessModes = make([]PersistentVolumeAccessMode, len(in.AccessModes)) - for i := range in.AccessModes { - out.AccessModes[i] = in.AccessModes[i] + in, out := in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(in)) + for i := range in { + (*out)[i] = in[i] } } else { out.AccessModes = nil } - if err := deepCopy_v1_ResourceRequirements(in.Resources, &out.Resources, c); err != nil { + if err := DeepCopy_v1_ResourceRequirements(in.Resources, &out.Resources, c); err != nil { return err } out.VolumeName = in.VolumeName return nil } -func deepCopy_v1_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, c *conversion.Cloner) error { +func DeepCopy_v1_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, c *conversion.Cloner) error { out.Phase = in.Phase if in.AccessModes != nil { - out.AccessModes = make([]PersistentVolumeAccessMode, len(in.AccessModes)) - for i := range in.AccessModes { - out.AccessModes[i] = in.AccessModes[i] + in, out := in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(in)) + for i := range in { + (*out)[i] = in[i] } } else { out.AccessModes = nil } if in.Capacity != nil { - out.Capacity = make(ResourceList) - for key, val := range in.Capacity { + in, out := in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.Capacity[key] = *newVal + (*out)[key] = *newVal } } else { out.Capacity = nil @@ -1405,23 +1753,24 @@ func deepCopy_v1_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, out return nil } -func deepCopy_v1_PersistentVolumeClaimVolumeSource(in PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_PersistentVolumeClaimVolumeSource(in PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, c *conversion.Cloner) error { out.ClaimName = in.ClaimName out.ReadOnly = in.ReadOnly return nil } -func deepCopy_v1_PersistentVolumeList(in PersistentVolumeList, out *PersistentVolumeList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_PersistentVolumeList(in PersistentVolumeList, out *PersistentVolumeList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]PersistentVolume, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_PersistentVolume(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]PersistentVolume, len(in)) + for i := range in { + if err := DeepCopy_v1_PersistentVolume(in[i], &(*out)[i], c); err != nil { return err } } @@ -1431,141 +1780,166 @@ func deepCopy_v1_PersistentVolumeList(in PersistentVolumeList, out *PersistentVo return nil } -func deepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *PersistentVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *PersistentVolumeSource, c *conversion.Cloner) error { if in.GCEPersistentDisk != nil { - out.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - if err := deepCopy_v1_GCEPersistentDiskVolumeSource(*in.GCEPersistentDisk, out.GCEPersistentDisk, c); err != nil { + in, out := in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + if err := DeepCopy_v1_GCEPersistentDiskVolumeSource(*in, *out, c); err != nil { return err } } else { out.GCEPersistentDisk = nil } if in.AWSElasticBlockStore != nil { - out.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - if err := deepCopy_v1_AWSElasticBlockStoreVolumeSource(*in.AWSElasticBlockStore, out.AWSElasticBlockStore, c); err != nil { + in, out := in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + if err := DeepCopy_v1_AWSElasticBlockStoreVolumeSource(*in, *out, c); err != nil { return err } } else { out.AWSElasticBlockStore = nil } if in.HostPath != nil { - out.HostPath = new(HostPathVolumeSource) - if err := deepCopy_v1_HostPathVolumeSource(*in.HostPath, out.HostPath, c); err != nil { + in, out := in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + if err := DeepCopy_v1_HostPathVolumeSource(*in, *out, c); err != nil { return err } } else { out.HostPath = nil } if in.Glusterfs != nil { - out.Glusterfs = new(GlusterfsVolumeSource) - if err := deepCopy_v1_GlusterfsVolumeSource(*in.Glusterfs, out.Glusterfs, c); err != nil { + in, out := in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + if err := DeepCopy_v1_GlusterfsVolumeSource(*in, *out, c); err != nil { return err } } else { out.Glusterfs = nil } if in.NFS != nil { - out.NFS = new(NFSVolumeSource) - if err := deepCopy_v1_NFSVolumeSource(*in.NFS, out.NFS, c); err != nil { + in, out := in.NFS, &out.NFS + *out = new(NFSVolumeSource) + if err := DeepCopy_v1_NFSVolumeSource(*in, *out, c); err != nil { return err } } else { out.NFS = nil } if in.RBD != nil { - out.RBD = new(RBDVolumeSource) - if err := deepCopy_v1_RBDVolumeSource(*in.RBD, out.RBD, c); err != nil { + in, out := in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { return err } } else { out.RBD = nil } if in.ISCSI != nil { - out.ISCSI = new(ISCSIVolumeSource) - if err := deepCopy_v1_ISCSIVolumeSource(*in.ISCSI, out.ISCSI, c); err != nil { + in, out := in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { return err } } else { out.ISCSI = nil } if in.Cinder != nil { - out.Cinder = new(CinderVolumeSource) - if err := deepCopy_v1_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil { + in, out := in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + if err := DeepCopy_v1_CinderVolumeSource(*in, *out, c); err != nil { return err } } else { out.Cinder = nil } if in.CephFS != nil { - out.CephFS = new(CephFSVolumeSource) - if err := deepCopy_v1_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil { + in, out := in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { return err } } else { out.CephFS = nil } if in.FC != nil { - out.FC = new(FCVolumeSource) - if err := deepCopy_v1_FCVolumeSource(*in.FC, out.FC, c); err != nil { + in, out := in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { return err } } else { out.FC = nil } if in.Flocker != nil { - out.Flocker = new(FlockerVolumeSource) - if err := deepCopy_v1_FlockerVolumeSource(*in.Flocker, out.Flocker, c); err != nil { + in, out := in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + if err := DeepCopy_v1_FlockerVolumeSource(*in, *out, c); err != nil { return err } } else { out.Flocker = nil } if in.FlexVolume != nil { - out.FlexVolume = new(FlexVolumeSource) - if err := deepCopy_v1_FlexVolumeSource(*in.FlexVolume, out.FlexVolume, c); err != nil { + in, out := in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { return err } } else { out.FlexVolume = nil } if in.AzureFile != nil { - out.AzureFile = new(AzureFileVolumeSource) - if err := deepCopy_v1_AzureFileVolumeSource(*in.AzureFile, out.AzureFile, c); err != nil { + in, out := in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + if err := DeepCopy_v1_AzureFileVolumeSource(*in, *out, c); err != nil { return err } } else { out.AzureFile = nil } + if in.VsphereVolume != nil { + in, out := in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + if err := DeepCopy_v1_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } else { + out.VsphereVolume = nil + } return nil } -func deepCopy_v1_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error { +func DeepCopy_v1_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error { if in.Capacity != nil { - out.Capacity = make(ResourceList) - for key, val := range in.Capacity { + in, out := in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.Capacity[key] = *newVal + (*out)[key] = *newVal } } else { out.Capacity = nil } - if err := deepCopy_v1_PersistentVolumeSource(in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { + if err := DeepCopy_v1_PersistentVolumeSource(in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { return err } if in.AccessModes != nil { - out.AccessModes = make([]PersistentVolumeAccessMode, len(in.AccessModes)) - for i := range in.AccessModes { - out.AccessModes[i] = in.AccessModes[i] + in, out := in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(in)) + for i := range in { + (*out)[i] = in[i] } } else { out.AccessModes = nil } if in.ClaimRef != nil { - out.ClaimRef = new(ObjectReference) - if err := deepCopy_v1_ObjectReference(*in.ClaimRef, out.ClaimRef, c); err != nil { + in, out := in.ClaimRef, &out.ClaimRef + *out = new(ObjectReference) + if err := DeepCopy_v1_ObjectReference(*in, *out, c); err != nil { return err } } else { @@ -1575,31 +1949,104 @@ func deepCopy_v1_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVo return nil } -func deepCopy_v1_PersistentVolumeStatus(in PersistentVolumeStatus, out *PersistentVolumeStatus, c *conversion.Cloner) error { +func DeepCopy_v1_PersistentVolumeStatus(in PersistentVolumeStatus, out *PersistentVolumeStatus, c *conversion.Cloner) error { out.Phase = in.Phase out.Message = in.Message out.Reason = in.Reason return nil } -func deepCopy_v1_Pod(in Pod, out *Pod, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_Pod(in Pod, out *Pod, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_PodSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1_PodSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1_PodStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1_PodStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_PodAffinity(in PodAffinity, out *PodAffinity, c *conversion.Cloner) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_v1_PodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_v1_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func DeepCopy_v1_PodAffinityTerm(in PodAffinityTerm, out *PodAffinityTerm, c *conversion.Cloner) error { + if in.LabelSelector != nil { + in, out := in.LabelSelector, &out.LabelSelector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.LabelSelector = nil + } + if in.Namespaces != nil { + in, out := in.Namespaces, &out.Namespaces + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Namespaces = nil + } + out.TopologyKey = in.TopologyKey + return nil +} + +func DeepCopy_v1_PodAntiAffinity(in PodAntiAffinity, out *PodAntiAffinity, c *conversion.Cloner) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_v1_PodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_v1_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func DeepCopy_v1_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Stdin = in.Stdin @@ -1610,13 +2057,13 @@ func deepCopy_v1_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c return nil } -func deepCopy_v1_PodCondition(in PodCondition, out *PodCondition, c *conversion.Cloner) error { +func DeepCopy_v1_PodCondition(in PodCondition, out *PodCondition, c *conversion.Cloner) error { out.Type = in.Type out.Status = in.Status - if err := deepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { return err } - if err := deepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { return err } out.Reason = in.Reason @@ -1624,8 +2071,8 @@ func deepCopy_v1_PodCondition(in PodCondition, out *PodCondition, c *conversion. return nil } -func deepCopy_v1_PodExecOptions(in PodExecOptions, out *PodExecOptions, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_PodExecOptions(in PodExecOptions, out *PodExecOptions, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Stdin = in.Stdin @@ -1634,27 +2081,27 @@ func deepCopy_v1_PodExecOptions(in PodExecOptions, out *PodExecOptions, c *conve out.TTY = in.TTY out.Container = in.Container if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } + in, out := in.Command, &out.Command + *out = make([]string, len(in)) + copy(*out, in) } else { out.Command = nil } return nil } -func deepCopy_v1_PodList(in PodList, out *PodList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_PodList(in PodList, out *PodList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]Pod, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_Pod(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]Pod, len(in)) + for i := range in { + if err := DeepCopy_v1_Pod(in[i], &(*out)[i], c); err != nil { return err } } @@ -1664,22 +2111,24 @@ func deepCopy_v1_PodList(in PodList, out *PodList, c *conversion.Cloner) error { return nil } -func deepCopy_v1_PodLogOptions(in PodLogOptions, out *PodLogOptions, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_PodLogOptions(in PodLogOptions, out *PodLogOptions, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Container = in.Container out.Follow = in.Follow out.Previous = in.Previous if in.SinceSeconds != nil { - out.SinceSeconds = new(int64) - *out.SinceSeconds = *in.SinceSeconds + in, out := in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = *in } else { out.SinceSeconds = nil } if in.SinceTime != nil { - out.SinceTime = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.SinceTime, out.SinceTime, c); err != nil { + in, out := in.SinceTime, &out.SinceTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { return err } } else { @@ -1687,81 +2136,99 @@ func deepCopy_v1_PodLogOptions(in PodLogOptions, out *PodLogOptions, c *conversi } out.Timestamps = in.Timestamps if in.TailLines != nil { - out.TailLines = new(int64) - *out.TailLines = *in.TailLines + in, out := in.TailLines, &out.TailLines + *out = new(int64) + **out = *in } else { out.TailLines = nil } if in.LimitBytes != nil { - out.LimitBytes = new(int64) - *out.LimitBytes = *in.LimitBytes + in, out := in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = *in } else { out.LimitBytes = nil } return nil } -func deepCopy_v1_PodProxyOptions(in PodProxyOptions, out *PodProxyOptions, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_PodProxyOptions(in PodProxyOptions, out *PodProxyOptions, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Path = in.Path return nil } -func deepCopy_v1_PodSecurityContext(in PodSecurityContext, out *PodSecurityContext, c *conversion.Cloner) error { +func DeepCopy_v1_PodSecurityContext(in PodSecurityContext, out *PodSecurityContext, c *conversion.Cloner) error { if in.SELinuxOptions != nil { - out.SELinuxOptions = new(SELinuxOptions) - if err := deepCopy_v1_SELinuxOptions(*in.SELinuxOptions, out.SELinuxOptions, c); err != nil { + in, out := in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + if err := DeepCopy_v1_SELinuxOptions(*in, *out, c); err != nil { return err } } else { out.SELinuxOptions = nil } if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser + in, out := in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = *in } else { out.RunAsUser = nil } if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot + in, out := in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = *in } else { out.RunAsNonRoot = nil } if in.SupplementalGroups != nil { - out.SupplementalGroups = make([]int64, len(in.SupplementalGroups)) - for i := range in.SupplementalGroups { - out.SupplementalGroups[i] = in.SupplementalGroups[i] - } + in, out := in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(in)) + copy(*out, in) } else { out.SupplementalGroups = nil } if in.FSGroup != nil { - out.FSGroup = new(int64) - *out.FSGroup = *in.FSGroup + in, out := in.FSGroup, &out.FSGroup + *out = new(int64) + **out = *in } else { out.FSGroup = nil } return nil } -func deepCopy_v1_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error { +func DeepCopy_v1_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error { if in.Volumes != nil { - out.Volumes = make([]Volume, len(in.Volumes)) - for i := range in.Volumes { - if err := deepCopy_v1_Volume(in.Volumes[i], &out.Volumes[i], c); err != nil { + in, out := in.Volumes, &out.Volumes + *out = make([]Volume, len(in)) + for i := range in { + if err := DeepCopy_v1_Volume(in[i], &(*out)[i], c); err != nil { return err } } } else { out.Volumes = nil } + if in.InitContainers != nil { + in, out := in.InitContainers, &out.InitContainers + *out = make([]Container, len(in)) + for i := range in { + if err := DeepCopy_v1_Container(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } if in.Containers != nil { - out.Containers = make([]Container, len(in.Containers)) - for i := range in.Containers { - if err := deepCopy_v1_Container(in.Containers[i], &out.Containers[i], c); err != nil { + in, out := in.Containers, &out.Containers + *out = make([]Container, len(in)) + for i := range in { + if err := DeepCopy_v1_Container(in[i], &(*out)[i], c); err != nil { return err } } @@ -1770,22 +2237,25 @@ func deepCopy_v1_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error { } out.RestartPolicy = in.RestartPolicy if in.TerminationGracePeriodSeconds != nil { - out.TerminationGracePeriodSeconds = new(int64) - *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds + in, out := in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = *in } else { out.TerminationGracePeriodSeconds = nil } if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds + in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = *in } else { out.ActiveDeadlineSeconds = nil } out.DNSPolicy = in.DNSPolicy if in.NodeSelector != nil { - out.NodeSelector = make(map[string]string) - for key, val := range in.NodeSelector { - out.NodeSelector[key] = val + in, out := in.NodeSelector, &out.NodeSelector + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val } } else { out.NodeSelector = nil @@ -1797,32 +2267,37 @@ func deepCopy_v1_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error { out.HostPID = in.HostPID out.HostIPC = in.HostIPC if in.SecurityContext != nil { - out.SecurityContext = new(PodSecurityContext) - if err := deepCopy_v1_PodSecurityContext(*in.SecurityContext, out.SecurityContext, c); err != nil { + in, out := in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + if err := DeepCopy_v1_PodSecurityContext(*in, *out, c); err != nil { return err } } else { out.SecurityContext = nil } if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := deepCopy_v1_LocalObjectReference(in.ImagePullSecrets[i], &out.ImagePullSecrets[i], c); err != nil { + in, out := in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(in)) + for i := range in { + if err := DeepCopy_v1_LocalObjectReference(in[i], &(*out)[i], c); err != nil { return err } } } else { out.ImagePullSecrets = nil } + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain return nil } -func deepCopy_v1_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) error { +func DeepCopy_v1_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) error { out.Phase = in.Phase if in.Conditions != nil { - out.Conditions = make([]PodCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := deepCopy_v1_PodCondition(in.Conditions[i], &out.Conditions[i], c); err != nil { + in, out := in.Conditions, &out.Conditions + *out = make([]PodCondition, len(in)) + for i := range in { + if err := DeepCopy_v1_PodCondition(in[i], &(*out)[i], c); err != nil { return err } } @@ -1834,17 +2309,30 @@ func deepCopy_v1_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) e out.HostIP = in.HostIP out.PodIP = in.PodIP if in.StartTime != nil { - out.StartTime = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.StartTime, out.StartTime, c); err != nil { + in, out := in.StartTime, &out.StartTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { return err } } else { out.StartTime = nil } + if in.InitContainerStatuses != nil { + in, out := in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(in)) + for i := range in { + if err := DeepCopy_v1_ContainerStatus(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.InitContainerStatuses = nil + } if in.ContainerStatuses != nil { - out.ContainerStatuses = make([]ContainerStatus, len(in.ContainerStatuses)) - for i := range in.ContainerStatuses { - if err := deepCopy_v1_ContainerStatus(in.ContainerStatuses[i], &out.ContainerStatuses[i], c); err != nil { + in, out := in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(in)) + for i := range in { + if err := DeepCopy_v1_ContainerStatus(in[i], &(*out)[i], c); err != nil { return err } } @@ -1854,43 +2342,44 @@ func deepCopy_v1_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) e return nil } -func deepCopy_v1_PodStatusResult(in PodStatusResult, out *PodStatusResult, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_PodStatusResult(in PodStatusResult, out *PodStatusResult, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_PodStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1_PodStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1_PodTemplate(in PodTemplate, out *PodTemplate, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_PodTemplate(in PodTemplate, out *PodTemplate, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + if err := DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { return err } return nil } -func deepCopy_v1_PodTemplateList(in PodTemplateList, out *PodTemplateList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_PodTemplateList(in PodTemplateList, out *PodTemplateList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]PodTemplate, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_PodTemplate(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]PodTemplate, len(in)) + for i := range in { + if err := DeepCopy_v1_PodTemplate(in[i], &(*out)[i], c); err != nil { return err } } @@ -1900,18 +2389,41 @@ func deepCopy_v1_PodTemplateList(in PodTemplateList, out *PodTemplateList, c *co return nil } -func deepCopy_v1_PodTemplateSpec(in PodTemplateSpec, out *PodTemplateSpec, c *conversion.Cloner) error { - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { +func DeepCopy_v1_PodTemplateSpec(in PodTemplateSpec, out *PodTemplateSpec, c *conversion.Cloner) error { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_PodSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1_PodSpec(in.Spec, &out.Spec, c); err != nil { return err } return nil } -func deepCopy_v1_Probe(in Probe, out *Probe, c *conversion.Cloner) error { - if err := deepCopy_v1_Handler(in.Handler, &out.Handler, c); err != nil { +func DeepCopy_v1_Preconditions(in Preconditions, out *Preconditions, c *conversion.Cloner) error { + if in.UID != nil { + in, out := in.UID, &out.UID + *out = new(types.UID) + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + **out = newVal.(types.UID) + } + } else { + out.UID = nil + } + return nil +} + +func DeepCopy_v1_PreferredSchedulingTerm(in PreferredSchedulingTerm, out *PreferredSchedulingTerm, c *conversion.Cloner) error { + out.Weight = in.Weight + if err := DeepCopy_v1_NodeSelectorTerm(in.Preference, &out.Preference, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1_Probe(in Probe, out *Probe, c *conversion.Cloner) error { + if err := DeepCopy_v1_Handler(in.Handler, &out.Handler, c); err != nil { return err } out.InitialDelaySeconds = in.InitialDelaySeconds @@ -1922,12 +2434,11 @@ func deepCopy_v1_Probe(in Probe, out *Probe, c *conversion.Cloner) error { return nil } -func deepCopy_v1_RBDVolumeSource(in RBDVolumeSource, out *RBDVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_RBDVolumeSource(in RBDVolumeSource, out *RBDVolumeSource, c *conversion.Cloner) error { if in.CephMonitors != nil { - out.CephMonitors = make([]string, len(in.CephMonitors)) - for i := range in.CephMonitors { - out.CephMonitors[i] = in.CephMonitors[i] - } + in, out := in.CephMonitors, &out.CephMonitors + *out = make([]string, len(in)) + copy(*out, in) } else { out.CephMonitors = nil } @@ -1937,8 +2448,9 @@ func deepCopy_v1_RBDVolumeSource(in RBDVolumeSource, out *RBDVolumeSource, c *co out.RadosUser = in.RadosUser out.Keyring = in.Keyring if in.SecretRef != nil { - out.SecretRef = new(LocalObjectReference) - if err := deepCopy_v1_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil { + in, out := in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + if err := DeepCopy_v1_LocalObjectReference(*in, *out, c); err != nil { return err } } else { @@ -1948,52 +2460,52 @@ func deepCopy_v1_RBDVolumeSource(in RBDVolumeSource, out *RBDVolumeSource, c *co return nil } -func deepCopy_v1_RangeAllocation(in RangeAllocation, out *RangeAllocation, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_RangeAllocation(in RangeAllocation, out *RangeAllocation, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } out.Range = in.Range if in.Data != nil { - out.Data = make([]uint8, len(in.Data)) - for i := range in.Data { - out.Data[i] = in.Data[i] - } + in, out := in.Data, &out.Data + *out = make([]byte, len(in)) + copy(*out, in) } else { out.Data = nil } return nil } -func deepCopy_v1_ReplicationController(in ReplicationController, out *ReplicationController, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ReplicationController(in ReplicationController, out *ReplicationController, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_ReplicationControllerSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1_ReplicationControllerSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1_ReplicationControllerStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1_ReplicationControllerStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1_ReplicationControllerList(in ReplicationControllerList, out *ReplicationControllerList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ReplicationControllerList(in ReplicationControllerList, out *ReplicationControllerList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]ReplicationController, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_ReplicationController(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]ReplicationController, len(in)) + for i := range in { + if err := DeepCopy_v1_ReplicationController(in[i], &(*out)[i], c); err != nil { return err } } @@ -2003,24 +2515,27 @@ func deepCopy_v1_ReplicationControllerList(in ReplicationControllerList, out *Re return nil } -func deepCopy_v1_ReplicationControllerSpec(in ReplicationControllerSpec, out *ReplicationControllerSpec, c *conversion.Cloner) error { +func DeepCopy_v1_ReplicationControllerSpec(in ReplicationControllerSpec, out *ReplicationControllerSpec, c *conversion.Cloner) error { if in.Replicas != nil { - out.Replicas = new(int32) - *out.Replicas = *in.Replicas + in, out := in.Replicas, &out.Replicas + *out = new(int32) + **out = *in } else { out.Replicas = nil } if in.Selector != nil { - out.Selector = make(map[string]string) - for key, val := range in.Selector { - out.Selector[key] = val + in, out := in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val } } else { out.Selector = nil } if in.Template != nil { - out.Template = new(PodTemplateSpec) - if err := deepCopy_v1_PodTemplateSpec(*in.Template, out.Template, c); err != nil { + in, out := in.Template, &out.Template + *out = new(PodTemplateSpec) + if err := DeepCopy_v1_PodTemplateSpec(*in, *out, c); err != nil { return err } } else { @@ -2029,40 +2544,50 @@ func deepCopy_v1_ReplicationControllerSpec(in ReplicationControllerSpec, out *Re return nil } -func deepCopy_v1_ReplicationControllerStatus(in ReplicationControllerStatus, out *ReplicationControllerStatus, c *conversion.Cloner) error { +func DeepCopy_v1_ReplicationControllerStatus(in ReplicationControllerStatus, out *ReplicationControllerStatus, c *conversion.Cloner) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ObservedGeneration = in.ObservedGeneration return nil } -func deepCopy_v1_ResourceQuota(in ResourceQuota, out *ResourceQuota, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ResourceFieldSelector(in ResourceFieldSelector, out *ResourceFieldSelector, c *conversion.Cloner) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + if err := resource.DeepCopy_resource_Quantity(in.Divisor, &out.Divisor, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return nil +} + +func DeepCopy_v1_ResourceQuota(in ResourceQuota, out *ResourceQuota, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ResourceQuotaSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_ResourceQuotaStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1_ResourceQuotaSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_ResourceQuotaStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]ResourceQuota, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_ResourceQuota(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]ResourceQuota, len(in)) + for i := range in { + if err := DeepCopy_v1_ResourceQuota(in[i], &(*out)[i], c); err != nil { return err } } @@ -2072,23 +2597,25 @@ func deepCopy_v1_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList, return nil } -func deepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error { +func DeepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error { if in.Hard != nil { - out.Hard = make(ResourceList) - for key, val := range in.Hard { + in, out := in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.Hard[key] = *newVal + (*out)[key] = *newVal } } else { out.Hard = nil } if in.Scopes != nil { - out.Scopes = make([]ResourceQuotaScope, len(in.Scopes)) - for i := range in.Scopes { - out.Scopes[i] = in.Scopes[i] + in, out := in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(in)) + for i := range in { + (*out)[i] = in[i] } } else { out.Scopes = nil @@ -2096,27 +2623,29 @@ func deepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, return nil } -func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error { +func DeepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error { if in.Hard != nil { - out.Hard = make(ResourceList) - for key, val := range in.Hard { + in, out := in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.Hard[key] = *newVal + (*out)[key] = *newVal } } else { out.Hard = nil } if in.Used != nil { - out.Used = make(ResourceList) - for key, val := range in.Used { + in, out := in.Used, &out.Used + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.Used[key] = *newVal + (*out)[key] = *newVal } } else { out.Used = nil @@ -2124,27 +2653,29 @@ func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaS return nil } -func deepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error { +func DeepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error { if in.Limits != nil { - out.Limits = make(ResourceList) - for key, val := range in.Limits { + in, out := in.Limits, &out.Limits + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.Limits[key] = *newVal + (*out)[key] = *newVal } } else { out.Limits = nil } if in.Requests != nil { - out.Requests = make(ResourceList) - for key, val := range in.Requests { + in, out := in.Requests, &out.Requests + *out = make(ResourceList) + for key, val := range in { newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { return err } - out.Requests[key] = *newVal + (*out)[key] = *newVal } } else { out.Requests = nil @@ -2152,7 +2683,7 @@ func deepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequ return nil } -func deepCopy_v1_SELinuxOptions(in SELinuxOptions, out *SELinuxOptions, c *conversion.Cloner) error { +func DeepCopy_v1_SELinuxOptions(in SELinuxOptions, out *SELinuxOptions, c *conversion.Cloner) error { out.User = in.User out.Role = in.Role out.Type = in.Type @@ -2160,20 +2691,21 @@ func deepCopy_v1_SELinuxOptions(in SELinuxOptions, out *SELinuxOptions, c *conve return nil } -func deepCopy_v1_Secret(in Secret, out *Secret, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_Secret(in Secret, out *Secret, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } if in.Data != nil { - out.Data = make(map[string][]uint8) - for key, val := range in.Data { + in, out := in.Data, &out.Data + *out = make(map[string][]byte) + for key, val := range in { if newVal, err := c.DeepCopy(val); err != nil { return err } else { - out.Data[key] = newVal.([]uint8) + (*out)[key] = newVal.([]byte) } } } else { @@ -2183,25 +2715,26 @@ func deepCopy_v1_Secret(in Secret, out *Secret, c *conversion.Cloner) error { return nil } -func deepCopy_v1_SecretKeySelector(in SecretKeySelector, out *SecretKeySelector, c *conversion.Cloner) error { - if err := deepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { +func DeepCopy_v1_SecretKeySelector(in SecretKeySelector, out *SecretKeySelector, c *conversion.Cloner) error { + if err := DeepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { return err } out.Key = in.Key return nil } -func deepCopy_v1_SecretList(in SecretList, out *SecretList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_SecretList(in SecretList, out *SecretList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]Secret, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_Secret(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]Secret, len(in)) + for i := range in { + if err := DeepCopy_v1_Secret(in[i], &(*out)[i], c); err != nil { return err } } @@ -2211,92 +2744,110 @@ func deepCopy_v1_SecretList(in SecretList, out *SecretList, c *conversion.Cloner return nil } -func deepCopy_v1_SecretVolumeSource(in SecretVolumeSource, out *SecretVolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_SecretVolumeSource(in SecretVolumeSource, out *SecretVolumeSource, c *conversion.Cloner) error { out.SecretName = in.SecretName + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]KeyToPath, len(in)) + for i := range in { + if err := DeepCopy_v1_KeyToPath(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } -func deepCopy_v1_SecurityContext(in SecurityContext, out *SecurityContext, c *conversion.Cloner) error { +func DeepCopy_v1_SecurityContext(in SecurityContext, out *SecurityContext, c *conversion.Cloner) error { if in.Capabilities != nil { - out.Capabilities = new(Capabilities) - if err := deepCopy_v1_Capabilities(*in.Capabilities, out.Capabilities, c); err != nil { + in, out := in.Capabilities, &out.Capabilities + *out = new(Capabilities) + if err := DeepCopy_v1_Capabilities(*in, *out, c); err != nil { return err } } else { out.Capabilities = nil } if in.Privileged != nil { - out.Privileged = new(bool) - *out.Privileged = *in.Privileged + in, out := in.Privileged, &out.Privileged + *out = new(bool) + **out = *in } else { out.Privileged = nil } if in.SELinuxOptions != nil { - out.SELinuxOptions = new(SELinuxOptions) - if err := deepCopy_v1_SELinuxOptions(*in.SELinuxOptions, out.SELinuxOptions, c); err != nil { + in, out := in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + if err := DeepCopy_v1_SELinuxOptions(*in, *out, c); err != nil { return err } } else { out.SELinuxOptions = nil } if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser + in, out := in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = *in } else { out.RunAsUser = nil } if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot + in, out := in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = *in } else { out.RunAsNonRoot = nil } if in.ReadOnlyRootFilesystem != nil { - out.ReadOnlyRootFilesystem = new(bool) - *out.ReadOnlyRootFilesystem = *in.ReadOnlyRootFilesystem + in, out := in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + *out = new(bool) + **out = *in } else { out.ReadOnlyRootFilesystem = nil } return nil } -func deepCopy_v1_SerializedReference(in SerializedReference, out *SerializedReference, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_SerializedReference(in SerializedReference, out *SerializedReference, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectReference(in.Reference, &out.Reference, c); err != nil { + if err := DeepCopy_v1_ObjectReference(in.Reference, &out.Reference, c); err != nil { return err } return nil } -func deepCopy_v1_Service(in Service, out *Service, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_Service(in Service, out *Service, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_ServiceSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1_ServiceSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1_ServiceStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1_ServiceStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1_ServiceAccount(in ServiceAccount, out *ServiceAccount, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ServiceAccount(in ServiceAccount, out *ServiceAccount, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } if in.Secrets != nil { - out.Secrets = make([]ObjectReference, len(in.Secrets)) - for i := range in.Secrets { - if err := deepCopy_v1_ObjectReference(in.Secrets[i], &out.Secrets[i], c); err != nil { + in, out := in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(in)) + for i := range in { + if err := DeepCopy_v1_ObjectReference(in[i], &(*out)[i], c); err != nil { return err } } @@ -2304,9 +2855,10 @@ func deepCopy_v1_ServiceAccount(in ServiceAccount, out *ServiceAccount, c *conve out.Secrets = nil } if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := deepCopy_v1_LocalObjectReference(in.ImagePullSecrets[i], &out.ImagePullSecrets[i], c); err != nil { + in, out := in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(in)) + for i := range in { + if err := DeepCopy_v1_LocalObjectReference(in[i], &(*out)[i], c); err != nil { return err } } @@ -2316,17 +2868,18 @@ func deepCopy_v1_ServiceAccount(in ServiceAccount, out *ServiceAccount, c *conve return nil } -func deepCopy_v1_ServiceAccountList(in ServiceAccountList, out *ServiceAccountList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ServiceAccountList(in ServiceAccountList, out *ServiceAccountList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]ServiceAccount, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_ServiceAccount(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]ServiceAccount, len(in)) + for i := range in { + if err := DeepCopy_v1_ServiceAccount(in[i], &(*out)[i], c); err != nil { return err } } @@ -2336,17 +2889,18 @@ func deepCopy_v1_ServiceAccountList(in ServiceAccountList, out *ServiceAccountLi return nil } -func deepCopy_v1_ServiceList(in ServiceList, out *ServiceList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ServiceList(in ServiceList, out *ServiceList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]Service, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_Service(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]Service, len(in)) + for i := range in { + if err := DeepCopy_v1_Service(in[i], &(*out)[i], c); err != nil { return err } } @@ -2356,30 +2910,31 @@ func deepCopy_v1_ServiceList(in ServiceList, out *ServiceList, c *conversion.Clo return nil } -func deepCopy_v1_ServicePort(in ServicePort, out *ServicePort, c *conversion.Cloner) error { +func DeepCopy_v1_ServicePort(in ServicePort, out *ServicePort, c *conversion.Cloner) error { out.Name = in.Name out.Protocol = in.Protocol out.Port = in.Port - if err := deepCopy_intstr_IntOrString(in.TargetPort, &out.TargetPort, c); err != nil { + if err := intstr.DeepCopy_intstr_IntOrString(in.TargetPort, &out.TargetPort, c); err != nil { return err } out.NodePort = in.NodePort return nil } -func deepCopy_v1_ServiceProxyOptions(in ServiceProxyOptions, out *ServiceProxyOptions, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_ServiceProxyOptions(in ServiceProxyOptions, out *ServiceProxyOptions, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Path = in.Path return nil } -func deepCopy_v1_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cloner) error { +func DeepCopy_v1_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cloner) error { if in.Ports != nil { - out.Ports = make([]ServicePort, len(in.Ports)) - for i := range in.Ports { - if err := deepCopy_v1_ServicePort(in.Ports[i], &out.Ports[i], c); err != nil { + in, out := in.Ports, &out.Ports + *out = make([]ServicePort, len(in)) + for i := range in { + if err := DeepCopy_v1_ServicePort(in[i], &(*out)[i], c); err != nil { return err } } @@ -2387,9 +2942,10 @@ func deepCopy_v1_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Clo out.Ports = nil } if in.Selector != nil { - out.Selector = make(map[string]string) - for key, val := range in.Selector { - out.Selector[key] = val + in, out := in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val } } else { out.Selector = nil @@ -2397,380 +2953,270 @@ func deepCopy_v1_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Clo out.ClusterIP = in.ClusterIP out.Type = in.Type if in.ExternalIPs != nil { - out.ExternalIPs = make([]string, len(in.ExternalIPs)) - for i := range in.ExternalIPs { - out.ExternalIPs[i] = in.ExternalIPs[i] - } + in, out := in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(in)) + copy(*out, in) } else { out.ExternalIPs = nil } if in.DeprecatedPublicIPs != nil { - out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs)) - for i := range in.DeprecatedPublicIPs { - out.DeprecatedPublicIPs[i] = in.DeprecatedPublicIPs[i] - } + in, out := in.DeprecatedPublicIPs, &out.DeprecatedPublicIPs + *out = make([]string, len(in)) + copy(*out, in) } else { out.DeprecatedPublicIPs = nil } out.SessionAffinity = in.SessionAffinity out.LoadBalancerIP = in.LoadBalancerIP + if in.LoadBalancerSourceRanges != nil { + in, out := in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.LoadBalancerSourceRanges = nil + } return nil } -func deepCopy_v1_ServiceStatus(in ServiceStatus, out *ServiceStatus, c *conversion.Cloner) error { - if err := deepCopy_v1_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil { +func DeepCopy_v1_ServiceStatus(in ServiceStatus, out *ServiceStatus, c *conversion.Cloner) error { + if err := DeepCopy_v1_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil { return err } return nil } -func deepCopy_v1_TCPSocketAction(in TCPSocketAction, out *TCPSocketAction, c *conversion.Cloner) error { - if err := deepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { +func DeepCopy_v1_TCPSocketAction(in TCPSocketAction, out *TCPSocketAction, c *conversion.Cloner) error { + if err := intstr.DeepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { return err } return nil } -func deepCopy_v1_Volume(in Volume, out *Volume, c *conversion.Cloner) error { +func DeepCopy_v1_Taint(in Taint, out *Taint, c *conversion.Cloner) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = in.Effect + return nil +} + +func DeepCopy_v1_Toleration(in Toleration, out *Toleration, c *conversion.Cloner) error { + out.Key = in.Key + out.Operator = in.Operator + out.Value = in.Value + out.Effect = in.Effect + return nil +} + +func DeepCopy_v1_Volume(in Volume, out *Volume, c *conversion.Cloner) error { out.Name = in.Name - if err := deepCopy_v1_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil { + if err := DeepCopy_v1_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil { return err } return nil } -func deepCopy_v1_VolumeMount(in VolumeMount, out *VolumeMount, c *conversion.Cloner) error { +func DeepCopy_v1_VolumeMount(in VolumeMount, out *VolumeMount, c *conversion.Cloner) error { out.Name = in.Name out.ReadOnly = in.ReadOnly out.MountPath = in.MountPath + out.SubPath = in.SubPath return nil } -func deepCopy_v1_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion.Cloner) error { +func DeepCopy_v1_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion.Cloner) error { if in.HostPath != nil { - out.HostPath = new(HostPathVolumeSource) - if err := deepCopy_v1_HostPathVolumeSource(*in.HostPath, out.HostPath, c); err != nil { + in, out := in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + if err := DeepCopy_v1_HostPathVolumeSource(*in, *out, c); err != nil { return err } } else { out.HostPath = nil } if in.EmptyDir != nil { - out.EmptyDir = new(EmptyDirVolumeSource) - if err := deepCopy_v1_EmptyDirVolumeSource(*in.EmptyDir, out.EmptyDir, c); err != nil { + in, out := in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirVolumeSource) + if err := DeepCopy_v1_EmptyDirVolumeSource(*in, *out, c); err != nil { return err } } else { out.EmptyDir = nil } if in.GCEPersistentDisk != nil { - out.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - if err := deepCopy_v1_GCEPersistentDiskVolumeSource(*in.GCEPersistentDisk, out.GCEPersistentDisk, c); err != nil { + in, out := in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + if err := DeepCopy_v1_GCEPersistentDiskVolumeSource(*in, *out, c); err != nil { return err } } else { out.GCEPersistentDisk = nil } if in.AWSElasticBlockStore != nil { - out.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - if err := deepCopy_v1_AWSElasticBlockStoreVolumeSource(*in.AWSElasticBlockStore, out.AWSElasticBlockStore, c); err != nil { + in, out := in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + if err := DeepCopy_v1_AWSElasticBlockStoreVolumeSource(*in, *out, c); err != nil { return err } } else { out.AWSElasticBlockStore = nil } if in.GitRepo != nil { - out.GitRepo = new(GitRepoVolumeSource) - if err := deepCopy_v1_GitRepoVolumeSource(*in.GitRepo, out.GitRepo, c); err != nil { + in, out := in.GitRepo, &out.GitRepo + *out = new(GitRepoVolumeSource) + if err := DeepCopy_v1_GitRepoVolumeSource(*in, *out, c); err != nil { return err } } else { out.GitRepo = nil } if in.Secret != nil { - out.Secret = new(SecretVolumeSource) - if err := deepCopy_v1_SecretVolumeSource(*in.Secret, out.Secret, c); err != nil { + in, out := in.Secret, &out.Secret + *out = new(SecretVolumeSource) + if err := DeepCopy_v1_SecretVolumeSource(*in, *out, c); err != nil { return err } } else { out.Secret = nil } if in.NFS != nil { - out.NFS = new(NFSVolumeSource) - if err := deepCopy_v1_NFSVolumeSource(*in.NFS, out.NFS, c); err != nil { + in, out := in.NFS, &out.NFS + *out = new(NFSVolumeSource) + if err := DeepCopy_v1_NFSVolumeSource(*in, *out, c); err != nil { return err } } else { out.NFS = nil } if in.ISCSI != nil { - out.ISCSI = new(ISCSIVolumeSource) - if err := deepCopy_v1_ISCSIVolumeSource(*in.ISCSI, out.ISCSI, c); err != nil { + in, out := in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { return err } } else { out.ISCSI = nil } if in.Glusterfs != nil { - out.Glusterfs = new(GlusterfsVolumeSource) - if err := deepCopy_v1_GlusterfsVolumeSource(*in.Glusterfs, out.Glusterfs, c); err != nil { + in, out := in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + if err := DeepCopy_v1_GlusterfsVolumeSource(*in, *out, c); err != nil { return err } } else { out.Glusterfs = nil } if in.PersistentVolumeClaim != nil { - out.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - if err := deepCopy_v1_PersistentVolumeClaimVolumeSource(*in.PersistentVolumeClaim, out.PersistentVolumeClaim, c); err != nil { + in, out := in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + if err := DeepCopy_v1_PersistentVolumeClaimVolumeSource(*in, *out, c); err != nil { return err } } else { out.PersistentVolumeClaim = nil } if in.RBD != nil { - out.RBD = new(RBDVolumeSource) - if err := deepCopy_v1_RBDVolumeSource(*in.RBD, out.RBD, c); err != nil { + in, out := in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { return err } } else { out.RBD = nil } if in.FlexVolume != nil { - out.FlexVolume = new(FlexVolumeSource) - if err := deepCopy_v1_FlexVolumeSource(*in.FlexVolume, out.FlexVolume, c); err != nil { + in, out := in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { return err } } else { out.FlexVolume = nil } if in.Cinder != nil { - out.Cinder = new(CinderVolumeSource) - if err := deepCopy_v1_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil { + in, out := in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + if err := DeepCopy_v1_CinderVolumeSource(*in, *out, c); err != nil { return err } } else { out.Cinder = nil } if in.CephFS != nil { - out.CephFS = new(CephFSVolumeSource) - if err := deepCopy_v1_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil { + in, out := in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { return err } } else { out.CephFS = nil } if in.Flocker != nil { - out.Flocker = new(FlockerVolumeSource) - if err := deepCopy_v1_FlockerVolumeSource(*in.Flocker, out.Flocker, c); err != nil { + in, out := in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + if err := DeepCopy_v1_FlockerVolumeSource(*in, *out, c); err != nil { return err } } else { out.Flocker = nil } if in.DownwardAPI != nil { - out.DownwardAPI = new(DownwardAPIVolumeSource) - if err := deepCopy_v1_DownwardAPIVolumeSource(*in.DownwardAPI, out.DownwardAPI, c); err != nil { + in, out := in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + if err := DeepCopy_v1_DownwardAPIVolumeSource(*in, *out, c); err != nil { return err } } else { out.DownwardAPI = nil } if in.FC != nil { - out.FC = new(FCVolumeSource) - if err := deepCopy_v1_FCVolumeSource(*in.FC, out.FC, c); err != nil { + in, out := in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { return err } } else { out.FC = nil } if in.AzureFile != nil { - out.AzureFile = new(AzureFileVolumeSource) - if err := deepCopy_v1_AzureFileVolumeSource(*in.AzureFile, out.AzureFile, c); err != nil { + in, out := in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + if err := DeepCopy_v1_AzureFileVolumeSource(*in, *out, c); err != nil { return err } } else { out.AzureFile = nil } if in.ConfigMap != nil { - out.ConfigMap = new(ConfigMapVolumeSource) - if err := deepCopy_v1_ConfigMapVolumeSource(*in.ConfigMap, out.ConfigMap, c); err != nil { + in, out := in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + if err := DeepCopy_v1_ConfigMapVolumeSource(*in, *out, c); err != nil { return err } } else { out.ConfigMap = nil } - return nil -} - -func deepCopy_runtime_RawExtension(in runtime.RawExtension, out *runtime.RawExtension, c *conversion.Cloner) error { - if in.RawJSON != nil { - out.RawJSON = make([]uint8, len(in.RawJSON)) - for i := range in.RawJSON { - out.RawJSON[i] = in.RawJSON[i] + if in.VsphereVolume != nil { + in, out := in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + if err := DeepCopy_v1_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil { + return err } } else { - out.RawJSON = nil - } - if newVal, err := c.DeepCopy(in.Object); err != nil { - return err - } else if newVal == nil { - out.Object = nil - } else { - out.Object = newVal.(runtime.Object) + out.VsphereVolume = nil } return nil } -func deepCopy_intstr_IntOrString(in intstr.IntOrString, out *intstr.IntOrString, c *conversion.Cloner) error { - out.Type = in.Type - out.IntVal = in.IntVal - out.StrVal = in.StrVal +func DeepCopy_v1_VsphereVirtualDiskVolumeSource(in VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, c *conversion.Cloner) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType return nil } -func init() { - err := api.Scheme.AddGeneratedDeepCopyFuncs( - deepCopy_resource_Quantity, - deepCopy_unversioned_ListMeta, - deepCopy_unversioned_Time, - deepCopy_unversioned_TypeMeta, - deepCopy_v1_AWSElasticBlockStoreVolumeSource, - deepCopy_v1_AzureFileVolumeSource, - deepCopy_v1_Binding, - deepCopy_v1_Capabilities, - deepCopy_v1_CephFSVolumeSource, - deepCopy_v1_CinderVolumeSource, - deepCopy_v1_ComponentCondition, - deepCopy_v1_ComponentStatus, - deepCopy_v1_ComponentStatusList, - deepCopy_v1_ConfigMap, - deepCopy_v1_ConfigMapKeySelector, - deepCopy_v1_ConfigMapList, - deepCopy_v1_ConfigMapVolumeSource, - deepCopy_v1_Container, - deepCopy_v1_ContainerImage, - deepCopy_v1_ContainerPort, - deepCopy_v1_ContainerState, - deepCopy_v1_ContainerStateRunning, - deepCopy_v1_ContainerStateTerminated, - deepCopy_v1_ContainerStateWaiting, - deepCopy_v1_ContainerStatus, - deepCopy_v1_DaemonEndpoint, - deepCopy_v1_DeleteOptions, - deepCopy_v1_DownwardAPIVolumeFile, - deepCopy_v1_DownwardAPIVolumeSource, - deepCopy_v1_EmptyDirVolumeSource, - deepCopy_v1_EndpointAddress, - deepCopy_v1_EndpointPort, - deepCopy_v1_EndpointSubset, - deepCopy_v1_Endpoints, - deepCopy_v1_EndpointsList, - deepCopy_v1_EnvVar, - deepCopy_v1_EnvVarSource, - deepCopy_v1_Event, - deepCopy_v1_EventList, - deepCopy_v1_EventSource, - deepCopy_v1_ExecAction, - deepCopy_v1_ExportOptions, - deepCopy_v1_FCVolumeSource, - deepCopy_v1_FlexVolumeSource, - deepCopy_v1_FlockerVolumeSource, - deepCopy_v1_GCEPersistentDiskVolumeSource, - deepCopy_v1_GitRepoVolumeSource, - deepCopy_v1_GlusterfsVolumeSource, - deepCopy_v1_HTTPGetAction, - deepCopy_v1_HTTPHeader, - deepCopy_v1_Handler, - deepCopy_v1_HostPathVolumeSource, - deepCopy_v1_ISCSIVolumeSource, - deepCopy_v1_KeyToPath, - deepCopy_v1_Lifecycle, - deepCopy_v1_LimitRange, - deepCopy_v1_LimitRangeItem, - deepCopy_v1_LimitRangeList, - deepCopy_v1_LimitRangeSpec, - deepCopy_v1_List, - deepCopy_v1_ListOptions, - deepCopy_v1_LoadBalancerIngress, - deepCopy_v1_LoadBalancerStatus, - deepCopy_v1_LocalObjectReference, - deepCopy_v1_NFSVolumeSource, - deepCopy_v1_Namespace, - deepCopy_v1_NamespaceList, - deepCopy_v1_NamespaceSpec, - deepCopy_v1_NamespaceStatus, - deepCopy_v1_Node, - deepCopy_v1_NodeAddress, - deepCopy_v1_NodeCondition, - deepCopy_v1_NodeDaemonEndpoints, - deepCopy_v1_NodeList, - deepCopy_v1_NodeProxyOptions, - deepCopy_v1_NodeSpec, - deepCopy_v1_NodeStatus, - deepCopy_v1_NodeSystemInfo, - deepCopy_v1_ObjectFieldSelector, - deepCopy_v1_ObjectMeta, - deepCopy_v1_ObjectReference, - deepCopy_v1_PersistentVolume, - deepCopy_v1_PersistentVolumeClaim, - deepCopy_v1_PersistentVolumeClaimList, - deepCopy_v1_PersistentVolumeClaimSpec, - deepCopy_v1_PersistentVolumeClaimStatus, - deepCopy_v1_PersistentVolumeClaimVolumeSource, - deepCopy_v1_PersistentVolumeList, - deepCopy_v1_PersistentVolumeSource, - deepCopy_v1_PersistentVolumeSpec, - deepCopy_v1_PersistentVolumeStatus, - deepCopy_v1_Pod, - deepCopy_v1_PodAttachOptions, - deepCopy_v1_PodCondition, - deepCopy_v1_PodExecOptions, - deepCopy_v1_PodList, - deepCopy_v1_PodLogOptions, - deepCopy_v1_PodProxyOptions, - deepCopy_v1_PodSecurityContext, - deepCopy_v1_PodSpec, - deepCopy_v1_PodStatus, - deepCopy_v1_PodStatusResult, - deepCopy_v1_PodTemplate, - deepCopy_v1_PodTemplateList, - deepCopy_v1_PodTemplateSpec, - deepCopy_v1_Probe, - deepCopy_v1_RBDVolumeSource, - deepCopy_v1_RangeAllocation, - deepCopy_v1_ReplicationController, - deepCopy_v1_ReplicationControllerList, - deepCopy_v1_ReplicationControllerSpec, - deepCopy_v1_ReplicationControllerStatus, - deepCopy_v1_ResourceQuota, - deepCopy_v1_ResourceQuotaList, - deepCopy_v1_ResourceQuotaSpec, - deepCopy_v1_ResourceQuotaStatus, - deepCopy_v1_ResourceRequirements, - deepCopy_v1_SELinuxOptions, - deepCopy_v1_Secret, - deepCopy_v1_SecretKeySelector, - deepCopy_v1_SecretList, - deepCopy_v1_SecretVolumeSource, - deepCopy_v1_SecurityContext, - deepCopy_v1_SerializedReference, - deepCopy_v1_Service, - deepCopy_v1_ServiceAccount, - deepCopy_v1_ServiceAccountList, - deepCopy_v1_ServiceList, - deepCopy_v1_ServicePort, - deepCopy_v1_ServiceProxyOptions, - deepCopy_v1_ServiceSpec, - deepCopy_v1_ServiceStatus, - deepCopy_v1_TCPSocketAction, - deepCopy_v1_Volume, - deepCopy_v1_VolumeMount, - deepCopy_v1_VolumeSource, - deepCopy_runtime_RawExtension, - deepCopy_intstr_IntOrString, - ) - if err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) +func DeepCopy_v1_WeightedPodAffinityTerm(in WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, c *conversion.Cloner) error { + out.Weight = in.Weight + if err := DeepCopy_v1_PodAffinityTerm(in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { + return err } + return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/defaults.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/defaults.go index 2dac9940da98..938df8ec8f58 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/defaults.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/defaults.go @@ -25,230 +25,255 @@ import ( func addDefaultingFuncs(scheme *runtime.Scheme) { scheme.AddDefaultingFuncs( - func(obj *PodExecOptions) { - obj.Stdout = true - obj.Stderr = true - }, - func(obj *PodAttachOptions) { - obj.Stdout = true - obj.Stderr = true - }, - func(obj *ReplicationController) { - var labels map[string]string - if obj.Spec.Template != nil { - labels = obj.Spec.Template.Labels - } - // TODO: support templates defined elsewhere when we support them in the API - if labels != nil { - if len(obj.Spec.Selector) == 0 { - obj.Spec.Selector = labels - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } - }, - func(obj *Volume) { - if util.AllPtrFieldsNil(&obj.VolumeSource) { - obj.VolumeSource = VolumeSource{ - EmptyDir: &EmptyDirVolumeSource{}, - } - } - }, - func(obj *ContainerPort) { - if obj.Protocol == "" { - obj.Protocol = ProtocolTCP - } - }, - func(obj *Container) { - if obj.ImagePullPolicy == "" { - _, tag := parsers.ParseImageName(obj.Image) - // Check image tag + SetDefaults_PodExecOptions, + SetDefaults_PodAttachOptions, + SetDefaults_ReplicationController, + SetDefaults_Volume, + SetDefaults_ContainerPort, + SetDefaults_Container, + SetDefaults_ServiceSpec, + SetDefaults_Pod, + SetDefaults_PodSpec, + SetDefaults_Probe, + SetDefaults_Secret, + SetDefaults_PersistentVolume, + SetDefaults_PersistentVolumeClaim, + SetDefaults_ISCSIVolumeSource, + SetDefaults_Endpoints, + SetDefaults_HTTPGetAction, + SetDefaults_NamespaceStatus, + SetDefaults_Node, + SetDefaults_NodeStatus, + SetDefaults_ObjectFieldSelector, + SetDefaults_LimitRangeItem, + SetDefaults_ConfigMap, + ) +} - if tag == "latest" { - obj.ImagePullPolicy = PullAlways - } else { - obj.ImagePullPolicy = PullIfNotPresent - } - } - if obj.TerminationMessagePath == "" { - obj.TerminationMessagePath = TerminationMessagePathDefault - } - }, - func(obj *ServiceSpec) { - if obj.SessionAffinity == "" { - obj.SessionAffinity = ServiceAffinityNone - } - if obj.Type == "" { - obj.Type = ServiceTypeClusterIP - } - for i := range obj.Ports { - sp := &obj.Ports[i] - if sp.Protocol == "" { - sp.Protocol = ProtocolTCP - } - if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") { - sp.TargetPort = intstr.FromInt(int(sp.Port)) - } - } - }, - func(obj *Pod) { - // If limits are specified, but requests are not, default requests to limits - // This is done here rather than a more specific defaulting pass on ResourceRequirements - // because we only want this defaulting semantic to take place on a Pod and not a PodTemplate - for i := range obj.Spec.Containers { - // set requests to limits if requests are not specified, but limits are - if obj.Spec.Containers[i].Resources.Limits != nil { - if obj.Spec.Containers[i].Resources.Requests == nil { - obj.Spec.Containers[i].Resources.Requests = make(ResourceList) - } - for key, value := range obj.Spec.Containers[i].Resources.Limits { - if _, exists := obj.Spec.Containers[i].Resources.Requests[key]; !exists { - obj.Spec.Containers[i].Resources.Requests[key] = *(value.Copy()) - } - } - } - } - }, - func(obj *PodSpec) { - if obj.DNSPolicy == "" { - obj.DNSPolicy = DNSClusterFirst - } - if obj.RestartPolicy == "" { - obj.RestartPolicy = RestartPolicyAlways - } - if obj.HostNetwork { - defaultHostNetworkPorts(&obj.Containers) - } - if obj.SecurityContext == nil { - obj.SecurityContext = &PodSecurityContext{} - } - if obj.TerminationGracePeriodSeconds == nil { - period := int64(DefaultTerminationGracePeriodSeconds) - obj.TerminationGracePeriodSeconds = &period - } - }, - func(obj *Probe) { - if obj.TimeoutSeconds == 0 { - obj.TimeoutSeconds = 1 - } - if obj.PeriodSeconds == 0 { - obj.PeriodSeconds = 10 - } - if obj.SuccessThreshold == 0 { - obj.SuccessThreshold = 1 - } - if obj.FailureThreshold == 0 { - obj.FailureThreshold = 3 - } - }, - func(obj *Secret) { - if obj.Type == "" { - obj.Type = SecretTypeOpaque - } - }, - func(obj *PersistentVolume) { - if obj.Status.Phase == "" { - obj.Status.Phase = VolumePending - } - if obj.Spec.PersistentVolumeReclaimPolicy == "" { - obj.Spec.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimRetain - } - }, - func(obj *PersistentVolumeClaim) { - if obj.Status.Phase == "" { - obj.Status.Phase = ClaimPending - } - }, - func(obj *ISCSIVolumeSource) { - if obj.ISCSIInterface == "" { - obj.ISCSIInterface = "default" - } - }, - func(obj *Endpoints) { - for i := range obj.Subsets { - ss := &obj.Subsets[i] - for i := range ss.Ports { - ep := &ss.Ports[i] - if ep.Protocol == "" { - ep.Protocol = ProtocolTCP - } - } - } - }, - func(obj *HTTPGetAction) { - if obj.Path == "" { - obj.Path = "/" - } - if obj.Scheme == "" { - obj.Scheme = URISchemeHTTP - } - }, - func(obj *NamespaceStatus) { - if obj.Phase == "" { - obj.Phase = NamespaceActive - } - }, - func(obj *Node) { - if obj.Spec.ExternalID == "" { - obj.Spec.ExternalID = obj.Name - } - }, - func(obj *NodeStatus) { - if obj.Allocatable == nil && obj.Capacity != nil { - obj.Allocatable = make(ResourceList, len(obj.Capacity)) - for key, value := range obj.Capacity { - obj.Allocatable[key] = *(value.Copy()) +func SetDefaults_PodExecOptions(obj *PodExecOptions) { + obj.Stdout = true + obj.Stderr = true +} +func SetDefaults_PodAttachOptions(obj *PodAttachOptions) { + obj.Stdout = true + obj.Stderr = true +} +func SetDefaults_ReplicationController(obj *ReplicationController) { + var labels map[string]string + if obj.Spec.Template != nil { + labels = obj.Spec.Template.Labels + } + // TODO: support templates defined elsewhere when we support them in the API + if labels != nil { + if len(obj.Spec.Selector) == 0 { + obj.Spec.Selector = labels + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} +func SetDefaults_Volume(obj *Volume) { + if util.AllPtrFieldsNil(&obj.VolumeSource) { + obj.VolumeSource = VolumeSource{ + EmptyDir: &EmptyDirVolumeSource{}, + } + } +} +func SetDefaults_ContainerPort(obj *ContainerPort) { + if obj.Protocol == "" { + obj.Protocol = ProtocolTCP + } +} +func SetDefaults_Container(obj *Container) { + if obj.ImagePullPolicy == "" { + // Ignore error and assume it has been validated elsewhere + _, tag, _, _ := parsers.ParseImageName(obj.Image) + + // Check image tag + + if tag == "latest" { + obj.ImagePullPolicy = PullAlways + } else { + obj.ImagePullPolicy = PullIfNotPresent + } + } + if obj.TerminationMessagePath == "" { + obj.TerminationMessagePath = TerminationMessagePathDefault + } +} +func SetDefaults_ServiceSpec(obj *ServiceSpec) { + if obj.SessionAffinity == "" { + obj.SessionAffinity = ServiceAffinityNone + } + if obj.Type == "" { + obj.Type = ServiceTypeClusterIP + } + for i := range obj.Ports { + sp := &obj.Ports[i] + if sp.Protocol == "" { + sp.Protocol = ProtocolTCP + } + if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") { + sp.TargetPort = intstr.FromInt(int(sp.Port)) + } + } +} +func SetDefaults_Pod(obj *Pod) { + // If limits are specified, but requests are not, default requests to limits + // This is done here rather than a more specific defaulting pass on ResourceRequirements + // because we only want this defaulting semantic to take place on a Pod and not a PodTemplate + for i := range obj.Spec.Containers { + // set requests to limits if requests are not specified, but limits are + if obj.Spec.Containers[i].Resources.Limits != nil { + if obj.Spec.Containers[i].Resources.Requests == nil { + obj.Spec.Containers[i].Resources.Requests = make(ResourceList) + } + for key, value := range obj.Spec.Containers[i].Resources.Limits { + if _, exists := obj.Spec.Containers[i].Resources.Requests[key]; !exists { + obj.Spec.Containers[i].Resources.Requests[key] = *(value.Copy()) } - obj.Allocatable = obj.Capacity } - }, - func(obj *ObjectFieldSelector) { - if obj.APIVersion == "" { - obj.APIVersion = "v1" + } + } +} +func SetDefaults_PodSpec(obj *PodSpec) { + if obj.DNSPolicy == "" { + obj.DNSPolicy = DNSClusterFirst + } + if obj.RestartPolicy == "" { + obj.RestartPolicy = RestartPolicyAlways + } + if obj.HostNetwork { + defaultHostNetworkPorts(&obj.Containers) + } + if obj.SecurityContext == nil { + obj.SecurityContext = &PodSecurityContext{} + } + if obj.TerminationGracePeriodSeconds == nil { + period := int64(DefaultTerminationGracePeriodSeconds) + obj.TerminationGracePeriodSeconds = &period + } +} +func SetDefaults_Probe(obj *Probe) { + if obj.TimeoutSeconds == 0 { + obj.TimeoutSeconds = 1 + } + if obj.PeriodSeconds == 0 { + obj.PeriodSeconds = 10 + } + if obj.SuccessThreshold == 0 { + obj.SuccessThreshold = 1 + } + if obj.FailureThreshold == 0 { + obj.FailureThreshold = 3 + } +} +func SetDefaults_Secret(obj *Secret) { + if obj.Type == "" { + obj.Type = SecretTypeOpaque + } +} +func SetDefaults_PersistentVolume(obj *PersistentVolume) { + if obj.Status.Phase == "" { + obj.Status.Phase = VolumePending + } + if obj.Spec.PersistentVolumeReclaimPolicy == "" { + obj.Spec.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimRetain + } +} +func SetDefaults_PersistentVolumeClaim(obj *PersistentVolumeClaim) { + if obj.Status.Phase == "" { + obj.Status.Phase = ClaimPending + } +} +func SetDefaults_ISCSIVolumeSource(obj *ISCSIVolumeSource) { + if obj.ISCSIInterface == "" { + obj.ISCSIInterface = "default" + } +} +func SetDefaults_Endpoints(obj *Endpoints) { + for i := range obj.Subsets { + ss := &obj.Subsets[i] + for i := range ss.Ports { + ep := &ss.Ports[i] + if ep.Protocol == "" { + ep.Protocol = ProtocolTCP } - }, - func(obj *LimitRangeItem) { - // for container limits, we apply default values - if obj.Type == LimitTypeContainer { + } + } +} +func SetDefaults_HTTPGetAction(obj *HTTPGetAction) { + if obj.Path == "" { + obj.Path = "/" + } + if obj.Scheme == "" { + obj.Scheme = URISchemeHTTP + } +} +func SetDefaults_NamespaceStatus(obj *NamespaceStatus) { + if obj.Phase == "" { + obj.Phase = NamespaceActive + } +} +func SetDefaults_Node(obj *Node) { + if obj.Spec.ExternalID == "" { + obj.Spec.ExternalID = obj.Name + } +} +func SetDefaults_NodeStatus(obj *NodeStatus) { + if obj.Allocatable == nil && obj.Capacity != nil { + obj.Allocatable = make(ResourceList, len(obj.Capacity)) + for key, value := range obj.Capacity { + obj.Allocatable[key] = *(value.Copy()) + } + obj.Allocatable = obj.Capacity + } +} +func SetDefaults_ObjectFieldSelector(obj *ObjectFieldSelector) { + if obj.APIVersion == "" { + obj.APIVersion = "v1" + } +} +func SetDefaults_LimitRangeItem(obj *LimitRangeItem) { + // for container limits, we apply default values + if obj.Type == LimitTypeContainer { - if obj.Default == nil { - obj.Default = make(ResourceList) - } - if obj.DefaultRequest == nil { - obj.DefaultRequest = make(ResourceList) - } + if obj.Default == nil { + obj.Default = make(ResourceList) + } + if obj.DefaultRequest == nil { + obj.DefaultRequest = make(ResourceList) + } - // If a default limit is unspecified, but the max is specified, default the limit to the max - for key, value := range obj.Max { - if _, exists := obj.Default[key]; !exists { - obj.Default[key] = *(value.Copy()) - } - } - // If a default limit is specified, but the default request is not, default request to limit - for key, value := range obj.Default { - if _, exists := obj.DefaultRequest[key]; !exists { - obj.DefaultRequest[key] = *(value.Copy()) - } - } - // If a default request is not specified, but the min is provided, default request to the min - for key, value := range obj.Min { - if _, exists := obj.DefaultRequest[key]; !exists { - obj.DefaultRequest[key] = *(value.Copy()) - } - } + // If a default limit is unspecified, but the max is specified, default the limit to the max + for key, value := range obj.Max { + if _, exists := obj.Default[key]; !exists { + obj.Default[key] = *(value.Copy()) + } + } + // If a default limit is specified, but the default request is not, default request to limit + for key, value := range obj.Default { + if _, exists := obj.DefaultRequest[key]; !exists { + obj.DefaultRequest[key] = *(value.Copy()) } - }, - func(obj *ConfigMap) { - if obj.Data == nil { - obj.Data = make(map[string]string) + } + // If a default request is not specified, but the min is provided, default request to the min + for key, value := range obj.Min { + if _, exists := obj.DefaultRequest[key]; !exists { + obj.DefaultRequest[key] = *(value.Copy()) } - }, - ) + } + } +} +func SetDefaults_ConfigMap(obj *ConfigMap) { + if obj.Data == nil { + obj.Data = make(map[string]string) + } } // With host networking default all container ports to host ports. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/defaults_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/defaults_test.go new file mode 100644 index 000000000000..b1a78e758e37 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/defaults_test.go @@ -0,0 +1,645 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1_test + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + versioned "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/intstr" +) + +func roundTrip(t *testing.T, obj runtime.Object) runtime.Object { + codec := api.Codecs.LegacyCodec(versioned.SchemeGroupVersion) + data, err := runtime.Encode(codec, obj) + if err != nil { + t.Errorf("%v\n %#v", err, obj) + return nil + } + obj2, err := runtime.Decode(codec, data) + if err != nil { + t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj) + return nil + } + obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object) + err = api.Scheme.Convert(obj2, obj3) + if err != nil { + t.Errorf("%v\nSource: %#v", err, obj2) + return nil + } + return obj3 +} + +func TestSetDefaultReplicationController(t *testing.T) { + tests := []struct { + rc *versioned.ReplicationController + expectLabels bool + expectSelector bool + }{ + { + rc: &versioned.ReplicationController{ + Spec: versioned.ReplicationControllerSpec{ + Template: &versioned.PodTemplateSpec{ + ObjectMeta: versioned.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectLabels: true, + expectSelector: true, + }, + { + rc: &versioned.ReplicationController{ + ObjectMeta: versioned.ObjectMeta{ + Labels: map[string]string{ + "bar": "foo", + }, + }, + Spec: versioned.ReplicationControllerSpec{ + Template: &versioned.PodTemplateSpec{ + ObjectMeta: versioned.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectLabels: false, + expectSelector: true, + }, + { + rc: &versioned.ReplicationController{ + ObjectMeta: versioned.ObjectMeta{ + Labels: map[string]string{ + "bar": "foo", + }, + }, + Spec: versioned.ReplicationControllerSpec{ + Selector: map[string]string{ + "some": "other", + }, + Template: &versioned.PodTemplateSpec{ + ObjectMeta: versioned.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectLabels: false, + expectSelector: false, + }, + { + rc: &versioned.ReplicationController{ + Spec: versioned.ReplicationControllerSpec{ + Selector: map[string]string{ + "some": "other", + }, + Template: &versioned.PodTemplateSpec{ + ObjectMeta: versioned.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectLabels: true, + expectSelector: false, + }, + } + + for _, test := range tests { + rc := test.rc + obj2 := roundTrip(t, runtime.Object(rc)) + rc2, ok := obj2.(*versioned.ReplicationController) + if !ok { + t.Errorf("unexpected object: %v", rc2) + t.FailNow() + } + if test.expectSelector != reflect.DeepEqual(rc2.Spec.Selector, rc2.Spec.Template.Labels) { + if test.expectSelector { + t.Errorf("expected: %v, got: %v", rc2.Spec.Template.Labels, rc2.Spec.Selector) + } else { + t.Errorf("unexpected equality: %v", rc.Spec.Selector) + } + } + if test.expectLabels != reflect.DeepEqual(rc2.Labels, rc2.Spec.Template.Labels) { + if test.expectLabels { + t.Errorf("expected: %v, got: %v", rc2.Spec.Template.Labels, rc2.Labels) + } else { + t.Errorf("unexpected equality: %v", rc.Labels) + } + } + } +} + +func newInt(val int32) *int32 { + p := new(int32) + *p = val + return p +} + +func TestSetDefaultReplicationControllerReplicas(t *testing.T) { + tests := []struct { + rc versioned.ReplicationController + expectReplicas int32 + }{ + { + rc: versioned.ReplicationController{ + Spec: versioned.ReplicationControllerSpec{ + Template: &versioned.PodTemplateSpec{ + ObjectMeta: versioned.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectReplicas: 1, + }, + { + rc: versioned.ReplicationController{ + Spec: versioned.ReplicationControllerSpec{ + Replicas: newInt(0), + Template: &versioned.PodTemplateSpec{ + ObjectMeta: versioned.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectReplicas: 0, + }, + { + rc: versioned.ReplicationController{ + Spec: versioned.ReplicationControllerSpec{ + Replicas: newInt(3), + Template: &versioned.PodTemplateSpec{ + ObjectMeta: versioned.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectReplicas: 3, + }, + } + + for _, test := range tests { + rc := &test.rc + obj2 := roundTrip(t, runtime.Object(rc)) + rc2, ok := obj2.(*versioned.ReplicationController) + if !ok { + t.Errorf("unexpected object: %v", rc2) + t.FailNow() + } + if rc2.Spec.Replicas == nil { + t.Errorf("unexpected nil Replicas") + } else if test.expectReplicas != *rc2.Spec.Replicas { + t.Errorf("expected: %d replicas, got: %d", test.expectReplicas, *rc2.Spec.Replicas) + } + } +} + +func TestSetDefaultService(t *testing.T) { + svc := &versioned.Service{} + obj2 := roundTrip(t, runtime.Object(svc)) + svc2 := obj2.(*versioned.Service) + if svc2.Spec.SessionAffinity != versioned.ServiceAffinityNone { + t.Errorf("Expected default session affinity type:%s, got: %s", versioned.ServiceAffinityNone, svc2.Spec.SessionAffinity) + } + if svc2.Spec.Type != versioned.ServiceTypeClusterIP { + t.Errorf("Expected default type:%s, got: %s", versioned.ServiceTypeClusterIP, svc2.Spec.Type) + } +} + +func TestSetDefaultSecret(t *testing.T) { + s := &versioned.Secret{} + obj2 := roundTrip(t, runtime.Object(s)) + s2 := obj2.(*versioned.Secret) + + if s2.Type != versioned.SecretTypeOpaque { + t.Errorf("Expected secret type %v, got %v", versioned.SecretTypeOpaque, s2.Type) + } +} + +func TestSetDefaultPersistentVolume(t *testing.T) { + pv := &versioned.PersistentVolume{} + obj2 := roundTrip(t, runtime.Object(pv)) + pv2 := obj2.(*versioned.PersistentVolume) + + if pv2.Status.Phase != versioned.VolumePending { + t.Errorf("Expected volume phase %v, got %v", versioned.VolumePending, pv2.Status.Phase) + } + if pv2.Spec.PersistentVolumeReclaimPolicy != versioned.PersistentVolumeReclaimRetain { + t.Errorf("Expected pv reclaim policy %v, got %v", versioned.PersistentVolumeReclaimRetain, pv2.Spec.PersistentVolumeReclaimPolicy) + } +} + +func TestSetDefaultPersistentVolumeClaim(t *testing.T) { + pvc := &versioned.PersistentVolumeClaim{} + obj2 := roundTrip(t, runtime.Object(pvc)) + pvc2 := obj2.(*versioned.PersistentVolumeClaim) + + if pvc2.Status.Phase != versioned.ClaimPending { + t.Errorf("Expected claim phase %v, got %v", versioned.ClaimPending, pvc2.Status.Phase) + } +} + +func TestSetDefaulEndpointsProtocol(t *testing.T) { + in := &versioned.Endpoints{Subsets: []versioned.EndpointSubset{ + {Ports: []versioned.EndpointPort{{}, {Protocol: "UDP"}, {}}}, + }} + obj := roundTrip(t, runtime.Object(in)) + out := obj.(*versioned.Endpoints) + + for i := range out.Subsets { + for j := range out.Subsets[i].Ports { + if in.Subsets[i].Ports[j].Protocol == "" { + if out.Subsets[i].Ports[j].Protocol != versioned.ProtocolTCP { + t.Errorf("Expected protocol %s, got %s", versioned.ProtocolTCP, out.Subsets[i].Ports[j].Protocol) + } + } else { + if out.Subsets[i].Ports[j].Protocol != in.Subsets[i].Ports[j].Protocol { + t.Errorf("Expected protocol %s, got %s", in.Subsets[i].Ports[j].Protocol, out.Subsets[i].Ports[j].Protocol) + } + } + } + } +} + +func TestSetDefaulServiceTargetPort(t *testing.T) { + in := &versioned.Service{Spec: versioned.ServiceSpec{Ports: []versioned.ServicePort{{Port: 1234}}}} + obj := roundTrip(t, runtime.Object(in)) + out := obj.(*versioned.Service) + if out.Spec.Ports[0].TargetPort != intstr.FromInt(1234) { + t.Errorf("Expected TargetPort to be defaulted, got %v", out.Spec.Ports[0].TargetPort) + } + + in = &versioned.Service{Spec: versioned.ServiceSpec{Ports: []versioned.ServicePort{{Port: 1234, TargetPort: intstr.FromInt(5678)}}}} + obj = roundTrip(t, runtime.Object(in)) + out = obj.(*versioned.Service) + if out.Spec.Ports[0].TargetPort != intstr.FromInt(5678) { + t.Errorf("Expected TargetPort to be unchanged, got %v", out.Spec.Ports[0].TargetPort) + } +} + +func TestSetDefaultServicePort(t *testing.T) { + // Unchanged if set. + in := &versioned.Service{Spec: versioned.ServiceSpec{ + Ports: []versioned.ServicePort{ + {Protocol: "UDP", Port: 9376, TargetPort: intstr.FromString("p")}, + {Protocol: "UDP", Port: 8675, TargetPort: intstr.FromInt(309)}, + }, + }} + out := roundTrip(t, runtime.Object(in)).(*versioned.Service) + if out.Spec.Ports[0].Protocol != versioned.ProtocolUDP { + t.Errorf("Expected protocol %s, got %s", versioned.ProtocolUDP, out.Spec.Ports[0].Protocol) + } + if out.Spec.Ports[0].TargetPort != intstr.FromString("p") { + t.Errorf("Expected port %v, got %v", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort) + } + if out.Spec.Ports[1].Protocol != versioned.ProtocolUDP { + t.Errorf("Expected protocol %s, got %s", versioned.ProtocolUDP, out.Spec.Ports[1].Protocol) + } + if out.Spec.Ports[1].TargetPort != intstr.FromInt(309) { + t.Errorf("Expected port %v, got %v", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort) + } + + // Defaulted. + in = &versioned.Service{Spec: versioned.ServiceSpec{ + Ports: []versioned.ServicePort{ + {Protocol: "", Port: 9376, TargetPort: intstr.FromString("")}, + {Protocol: "", Port: 8675, TargetPort: intstr.FromInt(0)}, + }, + }} + out = roundTrip(t, runtime.Object(in)).(*versioned.Service) + if out.Spec.Ports[0].Protocol != versioned.ProtocolTCP { + t.Errorf("Expected protocol %s, got %s", versioned.ProtocolTCP, out.Spec.Ports[0].Protocol) + } + if out.Spec.Ports[0].TargetPort != intstr.FromInt(int(in.Spec.Ports[0].Port)) { + t.Errorf("Expected port %v, got %v", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort) + } + if out.Spec.Ports[1].Protocol != versioned.ProtocolTCP { + t.Errorf("Expected protocol %s, got %s", versioned.ProtocolTCP, out.Spec.Ports[1].Protocol) + } + if out.Spec.Ports[1].TargetPort != intstr.FromInt(int(in.Spec.Ports[1].Port)) { + t.Errorf("Expected port %v, got %v", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort) + } +} + +func TestSetDefaultNamespace(t *testing.T) { + s := &versioned.Namespace{} + obj2 := roundTrip(t, runtime.Object(s)) + s2 := obj2.(*versioned.Namespace) + + if s2.Status.Phase != versioned.NamespaceActive { + t.Errorf("Expected phase %v, got %v", versioned.NamespaceActive, s2.Status.Phase) + } +} + +func TestSetDefaultPodSpecHostNetwork(t *testing.T) { + portNum := int32(8080) + s := versioned.PodSpec{} + s.HostNetwork = true + s.Containers = []versioned.Container{ + { + Ports: []versioned.ContainerPort{ + { + ContainerPort: portNum, + }, + }, + }, + } + pod := &versioned.Pod{ + Spec: s, + } + obj2 := roundTrip(t, runtime.Object(pod)) + pod2 := obj2.(*versioned.Pod) + s2 := pod2.Spec + + hostPortNum := s2.Containers[0].Ports[0].HostPort + if hostPortNum != portNum { + t.Errorf("Expected container port to be defaulted, was made %d instead of %d", hostPortNum, portNum) + } +} + +func TestSetDefaultNodeExternalID(t *testing.T) { + name := "node0" + n := &versioned.Node{} + n.Name = name + obj2 := roundTrip(t, runtime.Object(n)) + n2 := obj2.(*versioned.Node) + if n2.Spec.ExternalID != name { + t.Errorf("Expected default External ID: %s, got: %s", name, n2.Spec.ExternalID) + } + if n2.Spec.ProviderID != "" { + t.Errorf("Expected empty default Cloud Provider ID, got: %s", n2.Spec.ProviderID) + } +} + +func TestSetDefaultNodeStatusAllocatable(t *testing.T) { + capacity := versioned.ResourceList{ + versioned.ResourceCPU: resource.MustParse("1000m"), + versioned.ResourceMemory: resource.MustParse("10G"), + } + allocatable := versioned.ResourceList{ + versioned.ResourceCPU: resource.MustParse("500m"), + versioned.ResourceMemory: resource.MustParse("5G"), + } + tests := []struct { + capacity versioned.ResourceList + allocatable versioned.ResourceList + expectedAllocatable versioned.ResourceList + }{{ // Everything set, no defaulting. + capacity: capacity, + allocatable: allocatable, + expectedAllocatable: allocatable, + }, { // Allocatable set, no defaulting. + capacity: nil, + allocatable: allocatable, + expectedAllocatable: allocatable, + }, { // Capacity set, allocatable defaults to capacity. + capacity: capacity, + allocatable: nil, + expectedAllocatable: capacity, + }, { // Nothing set, allocatable "defaults" to capacity. + capacity: nil, + allocatable: nil, + expectedAllocatable: nil, + }} + + copyResourceList := func(rl versioned.ResourceList) versioned.ResourceList { + if rl == nil { + return nil + } + copy := make(versioned.ResourceList, len(rl)) + for k, v := range rl { + copy[k] = *v.Copy() + } + return copy + } + + resourceListsEqual := func(a versioned.ResourceList, b versioned.ResourceList) bool { + if len(a) != len(b) { + return false + } + for k, v := range a { + vb, found := b[k] + if !found { + return false + } + if v.Cmp(vb) != 0 { + return false + } + } + return true + } + + for i, testcase := range tests { + node := versioned.Node{ + Status: versioned.NodeStatus{ + Capacity: copyResourceList(testcase.capacity), + Allocatable: copyResourceList(testcase.allocatable), + }, + } + node2 := roundTrip(t, runtime.Object(&node)).(*versioned.Node) + actual := node2.Status.Allocatable + expected := testcase.expectedAllocatable + if !resourceListsEqual(expected, actual) { + t.Errorf("[%d] Expected NodeStatus.Allocatable: %+v; Got: %+v", i, expected, actual) + } + } +} + +func TestSetDefaultObjectFieldSelectorAPIVersion(t *testing.T) { + s := versioned.PodSpec{ + Containers: []versioned.Container{ + { + Env: []versioned.EnvVar{ + { + ValueFrom: &versioned.EnvVarSource{ + FieldRef: &versioned.ObjectFieldSelector{}, + }, + }, + }, + }, + }, + } + pod := &versioned.Pod{ + Spec: s, + } + obj2 := roundTrip(t, runtime.Object(pod)) + pod2 := obj2.(*versioned.Pod) + s2 := pod2.Spec + + apiVersion := s2.Containers[0].Env[0].ValueFrom.FieldRef.APIVersion + if apiVersion != "v1" { + t.Errorf("Expected default APIVersion v1, got: %v", apiVersion) + } +} + +func TestSetDefaultRequestsPod(t *testing.T) { + // verify we default if limits are specified + s := versioned.PodSpec{} + s.Containers = []versioned.Container{ + { + Resources: versioned.ResourceRequirements{ + Limits: versioned.ResourceList{ + versioned.ResourceCPU: resource.MustParse("100m"), + }, + }, + }, + } + pod := &versioned.Pod{ + Spec: s, + } + output := roundTrip(t, runtime.Object(pod)) + pod2 := output.(*versioned.Pod) + defaultRequest := pod2.Spec.Containers[0].Resources.Requests + requestValue := defaultRequest[versioned.ResourceCPU] + if requestValue.String() != "100m" { + t.Errorf("Expected request cpu: %s, got: %s", "100m", requestValue.String()) + } + + // verify we do nothing if no limits are specified + s = versioned.PodSpec{} + s.Containers = []versioned.Container{{}} + pod = &versioned.Pod{ + Spec: s, + } + output = roundTrip(t, runtime.Object(pod)) + pod2 = output.(*versioned.Pod) + defaultRequest = pod2.Spec.Containers[0].Resources.Requests + requestValue = defaultRequest[versioned.ResourceCPU] + if requestValue.String() != "0" { + t.Errorf("Expected 0 request value, got: %s", requestValue.String()) + } +} + +func TestDefaultRequestIsNotSetForReplicationController(t *testing.T) { + s := versioned.PodSpec{} + s.Containers = []versioned.Container{ + { + Resources: versioned.ResourceRequirements{ + Limits: versioned.ResourceList{ + versioned.ResourceCPU: resource.MustParse("100m"), + }, + }, + }, + } + rc := &versioned.ReplicationController{ + Spec: versioned.ReplicationControllerSpec{ + Replicas: newInt(3), + Template: &versioned.PodTemplateSpec{ + ObjectMeta: versioned.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: s, + }, + }, + } + output := roundTrip(t, runtime.Object(rc)) + rc2 := output.(*versioned.ReplicationController) + defaultRequest := rc2.Spec.Template.Spec.Containers[0].Resources.Requests + requestValue := defaultRequest[versioned.ResourceCPU] + if requestValue.String() != "0" { + t.Errorf("Expected 0 request value, got: %s", requestValue.String()) + } +} + +func TestSetDefaultLimitRangeItem(t *testing.T) { + limitRange := &versioned.LimitRange{ + ObjectMeta: versioned.ObjectMeta{ + Name: "test-defaults", + }, + Spec: versioned.LimitRangeSpec{ + Limits: []versioned.LimitRangeItem{{ + Type: versioned.LimitTypeContainer, + Max: versioned.ResourceList{ + versioned.ResourceCPU: resource.MustParse("100m"), + }, + Min: versioned.ResourceList{ + versioned.ResourceMemory: resource.MustParse("100Mi"), + }, + Default: versioned.ResourceList{}, + DefaultRequest: versioned.ResourceList{}, + }}, + }, + } + + output := roundTrip(t, runtime.Object(limitRange)) + limitRange2 := output.(*versioned.LimitRange) + defaultLimit := limitRange2.Spec.Limits[0].Default + defaultRequest := limitRange2.Spec.Limits[0].DefaultRequest + + // verify that default cpu was set to the max + defaultValue := defaultLimit[versioned.ResourceCPU] + if defaultValue.String() != "100m" { + t.Errorf("Expected default cpu: %s, got: %s", "100m", defaultValue.String()) + } + // verify that default request was set to the limit + requestValue := defaultRequest[versioned.ResourceCPU] + if requestValue.String() != "100m" { + t.Errorf("Expected request cpu: %s, got: %s", "100m", requestValue.String()) + } + // verify that if a min is provided, it will be the default if no limit is specified + requestMinValue := defaultRequest[versioned.ResourceMemory] + if requestMinValue.String() != "100Mi" { + t.Errorf("Expected request memory: %s, got: %s", "100Mi", requestMinValue.String()) + } +} + +func TestSetDefaultProbe(t *testing.T) { + originalProbe := versioned.Probe{} + expectedProbe := versioned.Probe{ + InitialDelaySeconds: 0, + TimeoutSeconds: 1, + PeriodSeconds: 10, + SuccessThreshold: 1, + FailureThreshold: 3, + } + + pod := &versioned.Pod{ + Spec: versioned.PodSpec{ + Containers: []versioned.Container{{LivenessProbe: &originalProbe}}, + }, + } + + output := roundTrip(t, runtime.Object(pod)).(*versioned.Pod) + actualProbe := *output.Spec.Containers[0].LivenessProbe + if actualProbe != expectedProbe { + t.Errorf("Expected probe: %+v\ngot: %+v\n", expectedProbe, actualProbe) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/doc.go index 84569e9f2282..bf85d77a1695 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/doc.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/doc.go @@ -15,4 +15,5 @@ limitations under the License. */ // Package v1 is the v1 version of the API. +// +genconversion=true package v1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/generated.pb.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/generated.pb.go new file mode 100644 index 000000000000..c78a2c03504e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/generated.pb.go @@ -0,0 +1,34331 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/api/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/api/v1/generated.proto + + It has these top-level messages: + AWSElasticBlockStoreVolumeSource + Affinity + AzureFileVolumeSource + Binding + Capabilities + CephFSVolumeSource + CinderVolumeSource + ComponentCondition + ComponentStatus + ComponentStatusList + ConfigMap + ConfigMapKeySelector + ConfigMapList + ConfigMapVolumeSource + Container + ContainerImage + ContainerPort + ContainerState + ContainerStateRunning + ContainerStateTerminated + ContainerStateWaiting + ContainerStatus + DaemonEndpoint + DeleteOptions + DownwardAPIVolumeFile + DownwardAPIVolumeSource + EmptyDirVolumeSource + EndpointAddress + EndpointPort + EndpointSubset + Endpoints + EndpointsList + EnvVar + EnvVarSource + Event + EventList + EventSource + ExecAction + ExportOptions + FCVolumeSource + FlexVolumeSource + FlockerVolumeSource + GCEPersistentDiskVolumeSource + GitRepoVolumeSource + GlusterfsVolumeSource + HTTPGetAction + HTTPHeader + Handler + HostPathVolumeSource + ISCSIVolumeSource + KeyToPath + Lifecycle + LimitRange + LimitRangeItem + LimitRangeList + LimitRangeSpec + List + ListOptions + LoadBalancerIngress + LoadBalancerStatus + LocalObjectReference + NFSVolumeSource + Namespace + NamespaceList + NamespaceSpec + NamespaceStatus + Node + NodeAddress + NodeAffinity + NodeCondition + NodeDaemonEndpoints + NodeList + NodeProxyOptions + NodeSelector + NodeSelectorRequirement + NodeSelectorTerm + NodeSpec + NodeStatus + NodeSystemInfo + ObjectFieldSelector + ObjectMeta + ObjectReference + OwnerReference + PersistentVolume + PersistentVolumeClaim + PersistentVolumeClaimList + PersistentVolumeClaimSpec + PersistentVolumeClaimStatus + PersistentVolumeClaimVolumeSource + PersistentVolumeList + PersistentVolumeSource + PersistentVolumeSpec + PersistentVolumeStatus + Pod + PodAffinity + PodAffinityTerm + PodAntiAffinity + PodAttachOptions + PodCondition + PodExecOptions + PodList + PodLogOptions + PodProxyOptions + PodSecurityContext + PodSpec + PodStatus + PodStatusResult + PodTemplate + PodTemplateList + PodTemplateSpec + Preconditions + PreferredSchedulingTerm + Probe + RBDVolumeSource + RangeAllocation + ReplicationController + ReplicationControllerList + ReplicationControllerSpec + ReplicationControllerStatus + ResourceFieldSelector + ResourceQuota + ResourceQuotaList + ResourceQuotaSpec + ResourceQuotaStatus + ResourceRequirements + SELinuxOptions + Secret + SecretKeySelector + SecretList + SecretVolumeSource + SecurityContext + SerializedReference + Service + ServiceAccount + ServiceAccountList + ServiceList + ServicePort + ServiceProxyOptions + ServiceSpec + ServiceStatus + TCPSocketAction + Taint + Toleration + Volume + VolumeMount + VolumeSource + VsphereVirtualDiskVolumeSource + WeightedPodAffinityTerm +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_resource "k8s.io/kubernetes/pkg/api/resource" +import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" +import k8s_io_kubernetes_pkg_runtime "k8s.io/kubernetes/pkg/runtime" + +import k8s_io_kubernetes_pkg_types "k8s.io/kubernetes/pkg/types" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } +func (m *AWSElasticBlockStoreVolumeSource) String() string { return proto.CompactTextString(m) } +func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} + +func (m *Affinity) Reset() { *m = Affinity{} } +func (m *Affinity) String() string { return proto.CompactTextString(m) } +func (*Affinity) ProtoMessage() {} + +func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } +func (m *AzureFileVolumeSource) String() string { return proto.CompactTextString(m) } +func (*AzureFileVolumeSource) ProtoMessage() {} + +func (m *Binding) Reset() { *m = Binding{} } +func (m *Binding) String() string { return proto.CompactTextString(m) } +func (*Binding) ProtoMessage() {} + +func (m *Capabilities) Reset() { *m = Capabilities{} } +func (m *Capabilities) String() string { return proto.CompactTextString(m) } +func (*Capabilities) ProtoMessage() {} + +func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } +func (m *CephFSVolumeSource) String() string { return proto.CompactTextString(m) } +func (*CephFSVolumeSource) ProtoMessage() {} + +func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } +func (m *CinderVolumeSource) String() string { return proto.CompactTextString(m) } +func (*CinderVolumeSource) ProtoMessage() {} + +func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } +func (m *ComponentCondition) String() string { return proto.CompactTextString(m) } +func (*ComponentCondition) ProtoMessage() {} + +func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } +func (m *ComponentStatus) String() string { return proto.CompactTextString(m) } +func (*ComponentStatus) ProtoMessage() {} + +func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } +func (m *ComponentStatusList) String() string { return proto.CompactTextString(m) } +func (*ComponentStatusList) ProtoMessage() {} + +func (m *ConfigMap) Reset() { *m = ConfigMap{} } +func (m *ConfigMap) String() string { return proto.CompactTextString(m) } +func (*ConfigMap) ProtoMessage() {} + +func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } +func (m *ConfigMapKeySelector) String() string { return proto.CompactTextString(m) } +func (*ConfigMapKeySelector) ProtoMessage() {} + +func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } +func (m *ConfigMapList) String() string { return proto.CompactTextString(m) } +func (*ConfigMapList) ProtoMessage() {} + +func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } +func (m *ConfigMapVolumeSource) String() string { return proto.CompactTextString(m) } +func (*ConfigMapVolumeSource) ProtoMessage() {} + +func (m *Container) Reset() { *m = Container{} } +func (m *Container) String() string { return proto.CompactTextString(m) } +func (*Container) ProtoMessage() {} + +func (m *ContainerImage) Reset() { *m = ContainerImage{} } +func (m *ContainerImage) String() string { return proto.CompactTextString(m) } +func (*ContainerImage) ProtoMessage() {} + +func (m *ContainerPort) Reset() { *m = ContainerPort{} } +func (m *ContainerPort) String() string { return proto.CompactTextString(m) } +func (*ContainerPort) ProtoMessage() {} + +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (m *ContainerState) String() string { return proto.CompactTextString(m) } +func (*ContainerState) ProtoMessage() {} + +func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } +func (m *ContainerStateRunning) String() string { return proto.CompactTextString(m) } +func (*ContainerStateRunning) ProtoMessage() {} + +func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } +func (m *ContainerStateTerminated) String() string { return proto.CompactTextString(m) } +func (*ContainerStateTerminated) ProtoMessage() {} + +func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } +func (m *ContainerStateWaiting) String() string { return proto.CompactTextString(m) } +func (*ContainerStateWaiting) ProtoMessage() {} + +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (m *ContainerStatus) String() string { return proto.CompactTextString(m) } +func (*ContainerStatus) ProtoMessage() {} + +func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } +func (m *DaemonEndpoint) String() string { return proto.CompactTextString(m) } +func (*DaemonEndpoint) ProtoMessage() {} + +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (m *DeleteOptions) String() string { return proto.CompactTextString(m) } +func (*DeleteOptions) ProtoMessage() {} + +func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } +func (m *DownwardAPIVolumeFile) String() string { return proto.CompactTextString(m) } +func (*DownwardAPIVolumeFile) ProtoMessage() {} + +func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } +func (m *DownwardAPIVolumeSource) String() string { return proto.CompactTextString(m) } +func (*DownwardAPIVolumeSource) ProtoMessage() {} + +func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } +func (m *EmptyDirVolumeSource) String() string { return proto.CompactTextString(m) } +func (*EmptyDirVolumeSource) ProtoMessage() {} + +func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } +func (m *EndpointAddress) String() string { return proto.CompactTextString(m) } +func (*EndpointAddress) ProtoMessage() {} + +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (m *EndpointPort) String() string { return proto.CompactTextString(m) } +func (*EndpointPort) ProtoMessage() {} + +func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } +func (m *EndpointSubset) String() string { return proto.CompactTextString(m) } +func (*EndpointSubset) ProtoMessage() {} + +func (m *Endpoints) Reset() { *m = Endpoints{} } +func (m *Endpoints) String() string { return proto.CompactTextString(m) } +func (*Endpoints) ProtoMessage() {} + +func (m *EndpointsList) Reset() { *m = EndpointsList{} } +func (m *EndpointsList) String() string { return proto.CompactTextString(m) } +func (*EndpointsList) ProtoMessage() {} + +func (m *EnvVar) Reset() { *m = EnvVar{} } +func (m *EnvVar) String() string { return proto.CompactTextString(m) } +func (*EnvVar) ProtoMessage() {} + +func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } +func (m *EnvVarSource) String() string { return proto.CompactTextString(m) } +func (*EnvVarSource) ProtoMessage() {} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} + +func (m *EventList) Reset() { *m = EventList{} } +func (m *EventList) String() string { return proto.CompactTextString(m) } +func (*EventList) ProtoMessage() {} + +func (m *EventSource) Reset() { *m = EventSource{} } +func (m *EventSource) String() string { return proto.CompactTextString(m) } +func (*EventSource) ProtoMessage() {} + +func (m *ExecAction) Reset() { *m = ExecAction{} } +func (m *ExecAction) String() string { return proto.CompactTextString(m) } +func (*ExecAction) ProtoMessage() {} + +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (m *ExportOptions) String() string { return proto.CompactTextString(m) } +func (*ExportOptions) ProtoMessage() {} + +func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } +func (m *FCVolumeSource) String() string { return proto.CompactTextString(m) } +func (*FCVolumeSource) ProtoMessage() {} + +func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } +func (m *FlexVolumeSource) String() string { return proto.CompactTextString(m) } +func (*FlexVolumeSource) ProtoMessage() {} + +func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } +func (m *FlockerVolumeSource) String() string { return proto.CompactTextString(m) } +func (*FlockerVolumeSource) ProtoMessage() {} + +func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } +func (m *GCEPersistentDiskVolumeSource) String() string { return proto.CompactTextString(m) } +func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} + +func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } +func (m *GitRepoVolumeSource) String() string { return proto.CompactTextString(m) } +func (*GitRepoVolumeSource) ProtoMessage() {} + +func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } +func (m *GlusterfsVolumeSource) String() string { return proto.CompactTextString(m) } +func (*GlusterfsVolumeSource) ProtoMessage() {} + +func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } +func (m *HTTPGetAction) String() string { return proto.CompactTextString(m) } +func (*HTTPGetAction) ProtoMessage() {} + +func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } +func (m *HTTPHeader) String() string { return proto.CompactTextString(m) } +func (*HTTPHeader) ProtoMessage() {} + +func (m *Handler) Reset() { *m = Handler{} } +func (m *Handler) String() string { return proto.CompactTextString(m) } +func (*Handler) ProtoMessage() {} + +func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } +func (m *HostPathVolumeSource) String() string { return proto.CompactTextString(m) } +func (*HostPathVolumeSource) ProtoMessage() {} + +func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } +func (m *ISCSIVolumeSource) String() string { return proto.CompactTextString(m) } +func (*ISCSIVolumeSource) ProtoMessage() {} + +func (m *KeyToPath) Reset() { *m = KeyToPath{} } +func (m *KeyToPath) String() string { return proto.CompactTextString(m) } +func (*KeyToPath) ProtoMessage() {} + +func (m *Lifecycle) Reset() { *m = Lifecycle{} } +func (m *Lifecycle) String() string { return proto.CompactTextString(m) } +func (*Lifecycle) ProtoMessage() {} + +func (m *LimitRange) Reset() { *m = LimitRange{} } +func (m *LimitRange) String() string { return proto.CompactTextString(m) } +func (*LimitRange) ProtoMessage() {} + +func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } +func (m *LimitRangeItem) String() string { return proto.CompactTextString(m) } +func (*LimitRangeItem) ProtoMessage() {} + +func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } +func (m *LimitRangeList) String() string { return proto.CompactTextString(m) } +func (*LimitRangeList) ProtoMessage() {} + +func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } +func (m *LimitRangeSpec) String() string { return proto.CompactTextString(m) } +func (*LimitRangeSpec) ProtoMessage() {} + +func (m *List) Reset() { *m = List{} } +func (m *List) String() string { return proto.CompactTextString(m) } +func (*List) ProtoMessage() {} + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (m *ListOptions) String() string { return proto.CompactTextString(m) } +func (*ListOptions) ProtoMessage() {} + +func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } +func (m *LoadBalancerIngress) String() string { return proto.CompactTextString(m) } +func (*LoadBalancerIngress) ProtoMessage() {} + +func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } +func (m *LoadBalancerStatus) String() string { return proto.CompactTextString(m) } +func (*LoadBalancerStatus) ProtoMessage() {} + +func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } +func (m *LocalObjectReference) String() string { return proto.CompactTextString(m) } +func (*LocalObjectReference) ProtoMessage() {} + +func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } +func (m *NFSVolumeSource) String() string { return proto.CompactTextString(m) } +func (*NFSVolumeSource) ProtoMessage() {} + +func (m *Namespace) Reset() { *m = Namespace{} } +func (m *Namespace) String() string { return proto.CompactTextString(m) } +func (*Namespace) ProtoMessage() {} + +func (m *NamespaceList) Reset() { *m = NamespaceList{} } +func (m *NamespaceList) String() string { return proto.CompactTextString(m) } +func (*NamespaceList) ProtoMessage() {} + +func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } +func (m *NamespaceSpec) String() string { return proto.CompactTextString(m) } +func (*NamespaceSpec) ProtoMessage() {} + +func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } +func (m *NamespaceStatus) String() string { return proto.CompactTextString(m) } +func (*NamespaceStatus) ProtoMessage() {} + +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} + +func (m *NodeAddress) Reset() { *m = NodeAddress{} } +func (m *NodeAddress) String() string { return proto.CompactTextString(m) } +func (*NodeAddress) ProtoMessage() {} + +func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } +func (m *NodeAffinity) String() string { return proto.CompactTextString(m) } +func (*NodeAffinity) ProtoMessage() {} + +func (m *NodeCondition) Reset() { *m = NodeCondition{} } +func (m *NodeCondition) String() string { return proto.CompactTextString(m) } +func (*NodeCondition) ProtoMessage() {} + +func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } +func (m *NodeDaemonEndpoints) String() string { return proto.CompactTextString(m) } +func (*NodeDaemonEndpoints) ProtoMessage() {} + +func (m *NodeList) Reset() { *m = NodeList{} } +func (m *NodeList) String() string { return proto.CompactTextString(m) } +func (*NodeList) ProtoMessage() {} + +func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } +func (m *NodeProxyOptions) String() string { return proto.CompactTextString(m) } +func (*NodeProxyOptions) ProtoMessage() {} + +func (m *NodeSelector) Reset() { *m = NodeSelector{} } +func (m *NodeSelector) String() string { return proto.CompactTextString(m) } +func (*NodeSelector) ProtoMessage() {} + +func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } +func (m *NodeSelectorRequirement) String() string { return proto.CompactTextString(m) } +func (*NodeSelectorRequirement) ProtoMessage() {} + +func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } +func (m *NodeSelectorTerm) String() string { return proto.CompactTextString(m) } +func (*NodeSelectorTerm) ProtoMessage() {} + +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (m *NodeSpec) String() string { return proto.CompactTextString(m) } +func (*NodeSpec) ProtoMessage() {} + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (m *NodeStatus) String() string { return proto.CompactTextString(m) } +func (*NodeStatus) ProtoMessage() {} + +func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } +func (m *NodeSystemInfo) String() string { return proto.CompactTextString(m) } +func (*NodeSystemInfo) ProtoMessage() {} + +func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } +func (m *ObjectFieldSelector) String() string { return proto.CompactTextString(m) } +func (*ObjectFieldSelector) ProtoMessage() {} + +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (m *ObjectMeta) String() string { return proto.CompactTextString(m) } +func (*ObjectMeta) ProtoMessage() {} + +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (m *ObjectReference) String() string { return proto.CompactTextString(m) } +func (*ObjectReference) ProtoMessage() {} + +func (m *OwnerReference) Reset() { *m = OwnerReference{} } +func (m *OwnerReference) String() string { return proto.CompactTextString(m) } +func (*OwnerReference) ProtoMessage() {} + +func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } +func (m *PersistentVolume) String() string { return proto.CompactTextString(m) } +func (*PersistentVolume) ProtoMessage() {} + +func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } +func (m *PersistentVolumeClaim) String() string { return proto.CompactTextString(m) } +func (*PersistentVolumeClaim) ProtoMessage() {} + +func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } +func (m *PersistentVolumeClaimList) String() string { return proto.CompactTextString(m) } +func (*PersistentVolumeClaimList) ProtoMessage() {} + +func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } +func (m *PersistentVolumeClaimSpec) String() string { return proto.CompactTextString(m) } +func (*PersistentVolumeClaimSpec) ProtoMessage() {} + +func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } +func (m *PersistentVolumeClaimStatus) String() string { return proto.CompactTextString(m) } +func (*PersistentVolumeClaimStatus) ProtoMessage() {} + +func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } +func (m *PersistentVolumeClaimVolumeSource) String() string { return proto.CompactTextString(m) } +func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} + +func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } +func (m *PersistentVolumeList) String() string { return proto.CompactTextString(m) } +func (*PersistentVolumeList) ProtoMessage() {} + +func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } +func (m *PersistentVolumeSource) String() string { return proto.CompactTextString(m) } +func (*PersistentVolumeSource) ProtoMessage() {} + +func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } +func (m *PersistentVolumeSpec) String() string { return proto.CompactTextString(m) } +func (*PersistentVolumeSpec) ProtoMessage() {} + +func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } +func (m *PersistentVolumeStatus) String() string { return proto.CompactTextString(m) } +func (*PersistentVolumeStatus) ProtoMessage() {} + +func (m *Pod) Reset() { *m = Pod{} } +func (m *Pod) String() string { return proto.CompactTextString(m) } +func (*Pod) ProtoMessage() {} + +func (m *PodAffinity) Reset() { *m = PodAffinity{} } +func (m *PodAffinity) String() string { return proto.CompactTextString(m) } +func (*PodAffinity) ProtoMessage() {} + +func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } +func (m *PodAffinityTerm) String() string { return proto.CompactTextString(m) } +func (*PodAffinityTerm) ProtoMessage() {} + +func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } +func (m *PodAntiAffinity) String() string { return proto.CompactTextString(m) } +func (*PodAntiAffinity) ProtoMessage() {} + +func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } +func (m *PodAttachOptions) String() string { return proto.CompactTextString(m) } +func (*PodAttachOptions) ProtoMessage() {} + +func (m *PodCondition) Reset() { *m = PodCondition{} } +func (m *PodCondition) String() string { return proto.CompactTextString(m) } +func (*PodCondition) ProtoMessage() {} + +func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } +func (m *PodExecOptions) String() string { return proto.CompactTextString(m) } +func (*PodExecOptions) ProtoMessage() {} + +func (m *PodList) Reset() { *m = PodList{} } +func (m *PodList) String() string { return proto.CompactTextString(m) } +func (*PodList) ProtoMessage() {} + +func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } +func (m *PodLogOptions) String() string { return proto.CompactTextString(m) } +func (*PodLogOptions) ProtoMessage() {} + +func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } +func (m *PodProxyOptions) String() string { return proto.CompactTextString(m) } +func (*PodProxyOptions) ProtoMessage() {} + +func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } +func (m *PodSecurityContext) String() string { return proto.CompactTextString(m) } +func (*PodSecurityContext) ProtoMessage() {} + +func (m *PodSpec) Reset() { *m = PodSpec{} } +func (m *PodSpec) String() string { return proto.CompactTextString(m) } +func (*PodSpec) ProtoMessage() {} + +func (m *PodStatus) Reset() { *m = PodStatus{} } +func (m *PodStatus) String() string { return proto.CompactTextString(m) } +func (*PodStatus) ProtoMessage() {} + +func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } +func (m *PodStatusResult) String() string { return proto.CompactTextString(m) } +func (*PodStatusResult) ProtoMessage() {} + +func (m *PodTemplate) Reset() { *m = PodTemplate{} } +func (m *PodTemplate) String() string { return proto.CompactTextString(m) } +func (*PodTemplate) ProtoMessage() {} + +func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } +func (m *PodTemplateList) String() string { return proto.CompactTextString(m) } +func (*PodTemplateList) ProtoMessage() {} + +func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } +func (m *PodTemplateSpec) String() string { return proto.CompactTextString(m) } +func (*PodTemplateSpec) ProtoMessage() {} + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (m *Preconditions) String() string { return proto.CompactTextString(m) } +func (*Preconditions) ProtoMessage() {} + +func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } +func (m *PreferredSchedulingTerm) String() string { return proto.CompactTextString(m) } +func (*PreferredSchedulingTerm) ProtoMessage() {} + +func (m *Probe) Reset() { *m = Probe{} } +func (m *Probe) String() string { return proto.CompactTextString(m) } +func (*Probe) ProtoMessage() {} + +func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } +func (m *RBDVolumeSource) String() string { return proto.CompactTextString(m) } +func (*RBDVolumeSource) ProtoMessage() {} + +func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } +func (m *RangeAllocation) String() string { return proto.CompactTextString(m) } +func (*RangeAllocation) ProtoMessage() {} + +func (m *ReplicationController) Reset() { *m = ReplicationController{} } +func (m *ReplicationController) String() string { return proto.CompactTextString(m) } +func (*ReplicationController) ProtoMessage() {} + +func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } +func (m *ReplicationControllerList) String() string { return proto.CompactTextString(m) } +func (*ReplicationControllerList) ProtoMessage() {} + +func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } +func (m *ReplicationControllerSpec) String() string { return proto.CompactTextString(m) } +func (*ReplicationControllerSpec) ProtoMessage() {} + +func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } +func (m *ReplicationControllerStatus) String() string { return proto.CompactTextString(m) } +func (*ReplicationControllerStatus) ProtoMessage() {} + +func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } +func (m *ResourceFieldSelector) String() string { return proto.CompactTextString(m) } +func (*ResourceFieldSelector) ProtoMessage() {} + +func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } +func (m *ResourceQuota) String() string { return proto.CompactTextString(m) } +func (*ResourceQuota) ProtoMessage() {} + +func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } +func (m *ResourceQuotaList) String() string { return proto.CompactTextString(m) } +func (*ResourceQuotaList) ProtoMessage() {} + +func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } +func (m *ResourceQuotaSpec) String() string { return proto.CompactTextString(m) } +func (*ResourceQuotaSpec) ProtoMessage() {} + +func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } +func (m *ResourceQuotaStatus) String() string { return proto.CompactTextString(m) } +func (*ResourceQuotaStatus) ProtoMessage() {} + +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (m *ResourceRequirements) String() string { return proto.CompactTextString(m) } +func (*ResourceRequirements) ProtoMessage() {} + +func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } +func (m *SELinuxOptions) String() string { return proto.CompactTextString(m) } +func (*SELinuxOptions) ProtoMessage() {} + +func (m *Secret) Reset() { *m = Secret{} } +func (m *Secret) String() string { return proto.CompactTextString(m) } +func (*Secret) ProtoMessage() {} + +func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } +func (m *SecretKeySelector) String() string { return proto.CompactTextString(m) } +func (*SecretKeySelector) ProtoMessage() {} + +func (m *SecretList) Reset() { *m = SecretList{} } +func (m *SecretList) String() string { return proto.CompactTextString(m) } +func (*SecretList) ProtoMessage() {} + +func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } +func (m *SecretVolumeSource) String() string { return proto.CompactTextString(m) } +func (*SecretVolumeSource) ProtoMessage() {} + +func (m *SecurityContext) Reset() { *m = SecurityContext{} } +func (m *SecurityContext) String() string { return proto.CompactTextString(m) } +func (*SecurityContext) ProtoMessage() {} + +func (m *SerializedReference) Reset() { *m = SerializedReference{} } +func (m *SerializedReference) String() string { return proto.CompactTextString(m) } +func (*SerializedReference) ProtoMessage() {} + +func (m *Service) Reset() { *m = Service{} } +func (m *Service) String() string { return proto.CompactTextString(m) } +func (*Service) ProtoMessage() {} + +func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } +func (m *ServiceAccount) String() string { return proto.CompactTextString(m) } +func (*ServiceAccount) ProtoMessage() {} + +func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } +func (m *ServiceAccountList) String() string { return proto.CompactTextString(m) } +func (*ServiceAccountList) ProtoMessage() {} + +func (m *ServiceList) Reset() { *m = ServiceList{} } +func (m *ServiceList) String() string { return proto.CompactTextString(m) } +func (*ServiceList) ProtoMessage() {} + +func (m *ServicePort) Reset() { *m = ServicePort{} } +func (m *ServicePort) String() string { return proto.CompactTextString(m) } +func (*ServicePort) ProtoMessage() {} + +func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } +func (m *ServiceProxyOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceProxyOptions) ProtoMessage() {} + +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (m *ServiceSpec) String() string { return proto.CompactTextString(m) } +func (*ServiceSpec) ProtoMessage() {} + +func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } +func (m *ServiceStatus) String() string { return proto.CompactTextString(m) } +func (*ServiceStatus) ProtoMessage() {} + +func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } +func (m *TCPSocketAction) String() string { return proto.CompactTextString(m) } +func (*TCPSocketAction) ProtoMessage() {} + +func (m *Taint) Reset() { *m = Taint{} } +func (m *Taint) String() string { return proto.CompactTextString(m) } +func (*Taint) ProtoMessage() {} + +func (m *Toleration) Reset() { *m = Toleration{} } +func (m *Toleration) String() string { return proto.CompactTextString(m) } +func (*Toleration) ProtoMessage() {} + +func (m *Volume) Reset() { *m = Volume{} } +func (m *Volume) String() string { return proto.CompactTextString(m) } +func (*Volume) ProtoMessage() {} + +func (m *VolumeMount) Reset() { *m = VolumeMount{} } +func (m *VolumeMount) String() string { return proto.CompactTextString(m) } +func (*VolumeMount) ProtoMessage() {} + +func (m *VolumeSource) Reset() { *m = VolumeSource{} } +func (m *VolumeSource) String() string { return proto.CompactTextString(m) } +func (*VolumeSource) ProtoMessage() {} + +func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } +func (m *VsphereVirtualDiskVolumeSource) String() string { return proto.CompactTextString(m) } +func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} + +func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } +func (m *WeightedPodAffinityTerm) String() string { return proto.CompactTextString(m) } +func (*WeightedPodAffinityTerm) ProtoMessage() {} + +func init() { + proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.AWSElasticBlockStoreVolumeSource") + proto.RegisterType((*Affinity)(nil), "k8s.io.kubernetes.pkg.api.v1.Affinity") + proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.AzureFileVolumeSource") + proto.RegisterType((*Binding)(nil), "k8s.io.kubernetes.pkg.api.v1.Binding") + proto.RegisterType((*Capabilities)(nil), "k8s.io.kubernetes.pkg.api.v1.Capabilities") + proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.CephFSVolumeSource") + proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.CinderVolumeSource") + proto.RegisterType((*ComponentCondition)(nil), "k8s.io.kubernetes.pkg.api.v1.ComponentCondition") + proto.RegisterType((*ComponentStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ComponentStatus") + proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.kubernetes.pkg.api.v1.ComponentStatusList") + proto.RegisterType((*ConfigMap)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMap") + proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapKeySelector") + proto.RegisterType((*ConfigMapList)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapList") + proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapVolumeSource") + proto.RegisterType((*Container)(nil), "k8s.io.kubernetes.pkg.api.v1.Container") + proto.RegisterType((*ContainerImage)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerImage") + proto.RegisterType((*ContainerPort)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerPort") + proto.RegisterType((*ContainerState)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerState") + proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerStateRunning") + proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerStateTerminated") + proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerStateWaiting") + proto.RegisterType((*ContainerStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerStatus") + proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.kubernetes.pkg.api.v1.DaemonEndpoint") + proto.RegisterType((*DeleteOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.DeleteOptions") + proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.kubernetes.pkg.api.v1.DownwardAPIVolumeFile") + proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.DownwardAPIVolumeSource") + proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.EmptyDirVolumeSource") + proto.RegisterType((*EndpointAddress)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointAddress") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointPort") + proto.RegisterType((*EndpointSubset)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointSubset") + proto.RegisterType((*Endpoints)(nil), "k8s.io.kubernetes.pkg.api.v1.Endpoints") + proto.RegisterType((*EndpointsList)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointsList") + proto.RegisterType((*EnvVar)(nil), "k8s.io.kubernetes.pkg.api.v1.EnvVar") + proto.RegisterType((*EnvVarSource)(nil), "k8s.io.kubernetes.pkg.api.v1.EnvVarSource") + proto.RegisterType((*Event)(nil), "k8s.io.kubernetes.pkg.api.v1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.kubernetes.pkg.api.v1.EventList") + proto.RegisterType((*EventSource)(nil), "k8s.io.kubernetes.pkg.api.v1.EventSource") + proto.RegisterType((*ExecAction)(nil), "k8s.io.kubernetes.pkg.api.v1.ExecAction") + proto.RegisterType((*ExportOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.ExportOptions") + proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.FCVolumeSource") + proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.FlexVolumeSource") + proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.FlockerVolumeSource") + proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.GlusterfsVolumeSource") + proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.kubernetes.pkg.api.v1.HTTPGetAction") + proto.RegisterType((*HTTPHeader)(nil), "k8s.io.kubernetes.pkg.api.v1.HTTPHeader") + proto.RegisterType((*Handler)(nil), "k8s.io.kubernetes.pkg.api.v1.Handler") + proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.ISCSIVolumeSource") + proto.RegisterType((*KeyToPath)(nil), "k8s.io.kubernetes.pkg.api.v1.KeyToPath") + proto.RegisterType((*Lifecycle)(nil), "k8s.io.kubernetes.pkg.api.v1.Lifecycle") + proto.RegisterType((*LimitRange)(nil), "k8s.io.kubernetes.pkg.api.v1.LimitRange") + proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.kubernetes.pkg.api.v1.LimitRangeItem") + proto.RegisterType((*LimitRangeList)(nil), "k8s.io.kubernetes.pkg.api.v1.LimitRangeList") + proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.LimitRangeSpec") + proto.RegisterType((*List)(nil), "k8s.io.kubernetes.pkg.api.v1.List") + proto.RegisterType((*ListOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.ListOptions") + proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.kubernetes.pkg.api.v1.LoadBalancerIngress") + proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.LoadBalancerStatus") + proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.kubernetes.pkg.api.v1.LocalObjectReference") + proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.NFSVolumeSource") + proto.RegisterType((*Namespace)(nil), "k8s.io.kubernetes.pkg.api.v1.Namespace") + proto.RegisterType((*NamespaceList)(nil), "k8s.io.kubernetes.pkg.api.v1.NamespaceList") + proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.NamespaceSpec") + proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.NamespaceStatus") + proto.RegisterType((*Node)(nil), "k8s.io.kubernetes.pkg.api.v1.Node") + proto.RegisterType((*NodeAddress)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeAddress") + proto.RegisterType((*NodeAffinity)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeAffinity") + proto.RegisterType((*NodeCondition)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeCondition") + proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeList)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeList") + proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeProxyOptions") + proto.RegisterType((*NodeSelector)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSelector") + proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSelectorRequirement") + proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSelectorTerm") + proto.RegisterType((*NodeSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSpec") + proto.RegisterType((*NodeStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeStatus") + proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSystemInfo") + proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.kubernetes.pkg.api.v1.ObjectFieldSelector") + proto.RegisterType((*ObjectMeta)(nil), "k8s.io.kubernetes.pkg.api.v1.ObjectMeta") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.kubernetes.pkg.api.v1.ObjectReference") + proto.RegisterType((*OwnerReference)(nil), "k8s.io.kubernetes.pkg.api.v1.OwnerReference") + proto.RegisterType((*PersistentVolume)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolume") + proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaim") + proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaimList") + proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaimSpec") + proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaimStatus") + proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaimVolumeSource") + proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeList") + proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeSource") + proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeSpec") + proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeStatus") + proto.RegisterType((*Pod)(nil), "k8s.io.kubernetes.pkg.api.v1.Pod") + proto.RegisterType((*PodAffinity)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAffinity") + proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAffinityTerm") + proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAntiAffinity") + proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAttachOptions") + proto.RegisterType((*PodCondition)(nil), "k8s.io.kubernetes.pkg.api.v1.PodCondition") + proto.RegisterType((*PodExecOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodExecOptions") + proto.RegisterType((*PodList)(nil), "k8s.io.kubernetes.pkg.api.v1.PodList") + proto.RegisterType((*PodLogOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodLogOptions") + proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodProxyOptions") + proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.kubernetes.pkg.api.v1.PodSecurityContext") + proto.RegisterType((*PodSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PodSpec") + proto.RegisterType((*PodStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.PodStatus") + proto.RegisterType((*PodStatusResult)(nil), "k8s.io.kubernetes.pkg.api.v1.PodStatusResult") + proto.RegisterType((*PodTemplate)(nil), "k8s.io.kubernetes.pkg.api.v1.PodTemplate") + proto.RegisterType((*PodTemplateList)(nil), "k8s.io.kubernetes.pkg.api.v1.PodTemplateList") + proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec") + proto.RegisterType((*Preconditions)(nil), "k8s.io.kubernetes.pkg.api.v1.Preconditions") + proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.PreferredSchedulingTerm") + proto.RegisterType((*Probe)(nil), "k8s.io.kubernetes.pkg.api.v1.Probe") + proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.RBDVolumeSource") + proto.RegisterType((*RangeAllocation)(nil), "k8s.io.kubernetes.pkg.api.v1.RangeAllocation") + proto.RegisterType((*ReplicationController)(nil), "k8s.io.kubernetes.pkg.api.v1.ReplicationController") + proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.kubernetes.pkg.api.v1.ReplicationControllerList") + proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.ReplicationControllerSpec") + proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceQuota)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceQuota") + proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceQuotaList") + proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceQuotaSpec") + proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceQuotaStatus") + proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceRequirements") + proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.SELinuxOptions") + proto.RegisterType((*Secret)(nil), "k8s.io.kubernetes.pkg.api.v1.Secret") + proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretKeySelector") + proto.RegisterType((*SecretList)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretList") + proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretVolumeSource") + proto.RegisterType((*SecurityContext)(nil), "k8s.io.kubernetes.pkg.api.v1.SecurityContext") + proto.RegisterType((*SerializedReference)(nil), "k8s.io.kubernetes.pkg.api.v1.SerializedReference") + proto.RegisterType((*Service)(nil), "k8s.io.kubernetes.pkg.api.v1.Service") + proto.RegisterType((*ServiceAccount)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceAccount") + proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceAccountList") + proto.RegisterType((*ServiceList)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceList") + proto.RegisterType((*ServicePort)(nil), "k8s.io.kubernetes.pkg.api.v1.ServicePort") + proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceProxyOptions") + proto.RegisterType((*ServiceSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceSpec") + proto.RegisterType((*ServiceStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceStatus") + proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.kubernetes.pkg.api.v1.TCPSocketAction") + proto.RegisterType((*Taint)(nil), "k8s.io.kubernetes.pkg.api.v1.Taint") + proto.RegisterType((*Toleration)(nil), "k8s.io.kubernetes.pkg.api.v1.Toleration") + proto.RegisterType((*Volume)(nil), "k8s.io.kubernetes.pkg.api.v1.Volume") + proto.RegisterType((*VolumeMount)(nil), "k8s.io.kubernetes.pkg.api.v1.VolumeMount") + proto.RegisterType((*VolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.VolumeSource") + proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.VsphereVirtualDiskVolumeSource") + proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.WeightedPodAffinityTerm") +} +func (m *AWSElasticBlockStoreVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) + i += copy(data[i:], m.VolumeID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Partition)) + data[i] = 0x20 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *Affinity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Affinity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NodeAffinity != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.NodeAffinity.Size())) + n1, err := m.NodeAffinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.PodAffinity != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodAffinity.Size())) + n2, err := m.PodAffinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.PodAntiAffinity != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodAntiAffinity.Size())) + n3, err := m.PodAntiAffinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *AzureFileVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *AzureFileVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) + i += copy(data[i:], m.SecretName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ShareName))) + i += copy(data[i:], m.ShareName) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *Binding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Binding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n5, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *Capabilities) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Capabilities) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Drop) > 0 { + for _, s := range m.Drop { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *CephFSVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CephFSVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.User))) + i += copy(data[i:], m.User) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SecretFile))) + i += copy(data[i:], m.SecretFile) + if m.SecretRef != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n6, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + data[i] = 0x30 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *CinderVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CinderVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) + i += copy(data[i:], m.VolumeID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *ComponentCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ComponentCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Error))) + i += copy(data[i:], m.Error) + return i, nil +} + +func (m *ComponentStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ComponentStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ComponentStatusList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ComponentStatusList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n8, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ConfigMap) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMap) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Data) > 0 { + for k := range m.Data { + data[i] = 0x12 + i++ + v := m.Data[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *ConfigMapKeySelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapKeySelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n10, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + return i, nil +} + +func (m *ConfigMapList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ConfigMapVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n12, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Container) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Container) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Image))) + i += copy(data[i:], m.Image) + if len(m.Command) > 0 { + for _, s := range m.Command { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.WorkingDir))) + i += copy(data[i:], m.WorkingDir) + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Env) > 0 { + for _, msg := range m.Env { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) + n13, err := m.Resources.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + if len(m.VolumeMounts) > 0 { + for _, msg := range m.VolumeMounts { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.LivenessProbe != nil { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LivenessProbe.Size())) + n14, err := m.LivenessProbe.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.ReadinessProbe != nil { + data[i] = 0x5a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ReadinessProbe.Size())) + n15, err := m.ReadinessProbe.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.Lifecycle != nil { + data[i] = 0x62 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Lifecycle.Size())) + n16, err := m.Lifecycle.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + } + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TerminationMessagePath))) + i += copy(data[i:], m.TerminationMessagePath) + data[i] = 0x72 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ImagePullPolicy))) + i += copy(data[i:], m.ImagePullPolicy) + if m.SecurityContext != nil { + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) + n17, err := m.SecurityContext.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + } + data[i] = 0x80 + i++ + data[i] = 0x1 + i++ + if m.Stdin { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x88 + i++ + data[i] = 0x1 + i++ + if m.StdinOnce { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x90 + i++ + data[i] = 0x1 + i++ + if m.TTY { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *ContainerImage) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerImage) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SizeBytes)) + return i, nil +} + +func (m *ContainerPort) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerPort) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.HostPort)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ContainerPort)) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) + i += copy(data[i:], m.Protocol) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.HostIP))) + i += copy(data[i:], m.HostIP) + return i, nil +} + +func (m *ContainerState) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerState) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Waiting != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Waiting.Size())) + n18, err := m.Waiting.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.Running != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Running.Size())) + n19, err := m.Running.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.Terminated != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Terminated.Size())) + n20, err := m.Terminated.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + } + return i, nil +} + +func (m *ContainerStateRunning) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerStateRunning) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) + n21, err := m.StartedAt.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n21 + return i, nil +} + +func (m *ContainerStateTerminated) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerStateTerminated) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ExitCode)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Signal)) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) + n22, err := m.StartedAt.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n22 + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FinishedAt.Size())) + n23, err := m.FinishedAt.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n23 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID))) + i += copy(data[i:], m.ContainerID) + return i, nil +} + +func (m *ContainerStateWaiting) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerStateWaiting) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *ContainerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.State.Size())) + n24, err := m.State.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n24 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTerminationState.Size())) + n25, err := m.LastTerminationState.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n25 + data[i] = 0x20 + i++ + if m.Ready { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RestartCount)) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Image))) + i += copy(data[i:], m.Image) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ImageID))) + i += copy(data[i:], m.ImageID) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID))) + i += copy(data[i:], m.ContainerID) + return i, nil +} + +func (m *DaemonEndpoint) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonEndpoint) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port)) + return i, nil +} + +func (m *DeleteOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeleteOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GracePeriodSeconds != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Preconditions.Size())) + n26, err := m.Preconditions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.OrphanDependents != nil { + data[i] = 0x18 + i++ + if *m.OrphanDependents { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *DownwardAPIVolumeFile) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DownwardAPIVolumeFile) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + if m.FieldRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) + n27, err := m.FieldRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.ResourceFieldRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size())) + n28, err := m.ResourceFieldRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n28 + } + return i, nil +} + +func (m *DownwardAPIVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DownwardAPIVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EmptyDirVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EmptyDirVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Medium))) + i += copy(data[i:], m.Medium) + return i, nil +} + +func (m *EndpointAddress) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EndpointAddress) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.IP))) + i += copy(data[i:], m.IP) + if m.TargetRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetRef.Size())) + n29, err := m.TargetRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n29 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) + i += copy(data[i:], m.Hostname) + return i, nil +} + +func (m *EndpointPort) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EndpointPort) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port)) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) + i += copy(data[i:], m.Protocol) + return i, nil +} + +func (m *EndpointSubset) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EndpointSubset) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Addresses) > 0 { + for _, msg := range m.Addresses { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NotReadyAddresses) > 0 { + for _, msg := range m.NotReadyAddresses { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Endpoints) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Endpoints) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n30, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n30 + if len(m.Subsets) > 0 { + for _, msg := range m.Subsets { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EndpointsList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EndpointsList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n31, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n31 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EnvVar) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EnvVar) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + if m.ValueFrom != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ValueFrom.Size())) + n32, err := m.ValueFrom.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} + +func (m *EnvVarSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EnvVarSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.FieldRef != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) + n33, err := m.FieldRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n33 + } + if m.ResourceFieldRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size())) + n34, err := m.ResourceFieldRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if m.ConfigMapKeyRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ConfigMapKeyRef.Size())) + n35, err := m.ConfigMapKeyRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n35 + } + if m.SecretKeyRef != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretKeyRef.Size())) + n36, err := m.SecretKeyRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n36 + } + return i, nil +} + +func (m *Event) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Event) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n37, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n37 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.InvolvedObject.Size())) + n38, err := m.InvolvedObject.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n38 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Source.Size())) + n39, err := m.Source.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n39 + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FirstTimestamp.Size())) + n40, err := m.FirstTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n40 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTimestamp.Size())) + n41, err := m.LastTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n41 + data[i] = 0x40 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Count)) + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + return i, nil +} + +func (m *EventList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EventList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n42, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n42 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EventSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EventSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Component))) + i += copy(data[i:], m.Component) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Host))) + i += copy(data[i:], m.Host) + return i, nil +} + +func (m *ExecAction) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ExecAction) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ExportOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ExportOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Export { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + if m.Exact { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *FCVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *FCVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if m.Lun != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Lun)) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x20 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *FlexVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *FlexVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Driver))) + i += copy(data[i:], m.Driver) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + if m.SecretRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n43, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n43 + } + data[i] = 0x20 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.Options) > 0 { + for k := range m.Options { + data[i] = 0x2a + i++ + v := m.Options[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *FlockerVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *FlockerVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DatasetName))) + i += copy(data[i:], m.DatasetName) + return i, nil +} + +func (m *GCEPersistentDiskVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GCEPersistentDiskVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PDName))) + i += copy(data[i:], m.PDName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Partition)) + data[i] = 0x20 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *GitRepoVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GitRepoVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Repository))) + i += copy(data[i:], m.Repository) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Revision))) + i += copy(data[i:], m.Revision) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Directory))) + i += copy(data[i:], m.Directory) + return i, nil +} + +func (m *GlusterfsVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GlusterfsVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.EndpointsName))) + i += copy(data[i:], m.EndpointsName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *HTTPGetAction) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HTTPGetAction) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) + n44, err := m.Port.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n44 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Host))) + i += copy(data[i:], m.Host) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Scheme))) + i += copy(data[i:], m.Scheme) + if len(m.HTTPHeaders) > 0 { + for _, msg := range m.HTTPHeaders { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HTTPHeader) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HTTPHeader) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + return i, nil +} + +func (m *Handler) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Handler) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Exec != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Exec.Size())) + n45, err := m.Exec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n45 + } + if m.HTTPGet != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.HTTPGet.Size())) + n46, err := m.HTTPGet.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n46 + } + if m.TCPSocket != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TCPSocket.Size())) + n47, err := m.TCPSocket.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n47 + } + return i, nil +} + +func (m *HostPathVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HostPathVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + return i, nil +} + +func (m *ISCSIVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ISCSIVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TargetPortal))) + i += copy(data[i:], m.TargetPortal) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.IQN))) + i += copy(data[i:], m.IQN) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Lun)) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ISCSIInterface))) + i += copy(data[i:], m.ISCSIInterface) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x30 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *KeyToPath) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *KeyToPath) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + return i, nil +} + +func (m *Lifecycle) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Lifecycle) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PostStart != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PostStart.Size())) + n48, err := m.PostStart.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n48 + } + if m.PreStop != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PreStop.Size())) + n49, err := m.PreStop.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n49 + } + return i, nil +} + +func (m *LimitRange) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LimitRange) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n50, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n50 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n51, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n51 + return i, nil +} + +func (m *LimitRangeItem) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if len(m.Max) > 0 { + for k := range m.Max { + data[i] = 0x12 + i++ + v := m.Max[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n52, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n52 + } + } + if len(m.Min) > 0 { + for k := range m.Min { + data[i] = 0x1a + i++ + v := m.Min[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n53, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n53 + } + } + if len(m.Default) > 0 { + for k := range m.Default { + data[i] = 0x22 + i++ + v := m.Default[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n54, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n54 + } + } + if len(m.DefaultRequest) > 0 { + for k := range m.DefaultRequest { + data[i] = 0x2a + i++ + v := m.DefaultRequest[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n55, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n55 + } + } + if len(m.MaxLimitRequestRatio) > 0 { + for k := range m.MaxLimitRequestRatio { + data[i] = 0x32 + i++ + v := m.MaxLimitRequestRatio[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n56, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n56 + } + } + return i, nil +} + +func (m *LimitRangeList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LimitRangeList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n57, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n57 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LimitRangeSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LimitRangeSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Limits) > 0 { + for _, msg := range m.Limits { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *List) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *List) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n58, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n58 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ListOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ListOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.LabelSelector))) + i += copy(data[i:], m.LabelSelector) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FieldSelector))) + i += copy(data[i:], m.FieldSelector) + data[i] = 0x18 + i++ + if m.Watch { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + if m.TimeoutSeconds != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TimeoutSeconds)) + } + return i, nil +} + +func (m *LoadBalancerIngress) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LoadBalancerIngress) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.IP))) + i += copy(data[i:], m.IP) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) + i += copy(data[i:], m.Hostname) + return i, nil +} + +func (m *LoadBalancerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LoadBalancerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LocalObjectReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LocalObjectReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *NFSVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NFSVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Server))) + i += copy(data[i:], m.Server) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *Namespace) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Namespace) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n59, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n59 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n60, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n60 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n61, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n61 + return i, nil +} + +func (m *NamespaceList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NamespaceList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n62, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n62 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NamespaceSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NamespaceSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *NamespaceStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NamespaceStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + return i, nil +} + +func (m *Node) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Node) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n63, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n63 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n64, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n64 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n65, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n65 + return i, nil +} + +func (m *NodeAddress) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeAddress) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Address))) + i += copy(data[i:], m.Address) + return i, nil +} + +func (m *NodeAffinity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeAffinity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) + n66, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n66 + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastHeartbeatTime.Size())) + n67, err := m.LastHeartbeatTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n67 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n68, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n68 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *NodeDaemonEndpoints) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeDaemonEndpoints) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.KubeletEndpoint.Size())) + n69, err := m.KubeletEndpoint.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n69 + return i, nil +} + +func (m *NodeList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n70, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n70 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeProxyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeProxyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + return i, nil +} + +func (m *NodeSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, msg := range m.NodeSelectorTerms { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeSelectorRequirement) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSelectorRequirement) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *NodeSelectorTerm) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSelectorTerm) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PodCIDR))) + i += copy(data[i:], m.PodCIDR) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ExternalID))) + i += copy(data[i:], m.ExternalID) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ProviderID))) + i += copy(data[i:], m.ProviderID) + data[i] = 0x20 + i++ + if m.Unschedulable { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *NodeStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + for k := range m.Capacity { + data[i] = 0xa + i++ + v := m.Capacity[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n71, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n71 + } + } + if len(m.Allocatable) > 0 { + for k := range m.Allocatable { + data[i] = 0x12 + i++ + v := m.Allocatable[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n72, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n72 + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Addresses) > 0 { + for _, msg := range m.Addresses { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DaemonEndpoints.Size())) + n73, err := m.DaemonEndpoints.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n73 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.NodeInfo.Size())) + n74, err := m.NodeInfo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n74 + if len(m.Images) > 0 { + for _, msg := range m.Images { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeSystemInfo) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSystemInfo) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MachineID))) + i += copy(data[i:], m.MachineID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SystemUUID))) + i += copy(data[i:], m.SystemUUID) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.BootID))) + i += copy(data[i:], m.BootID) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.KernelVersion))) + i += copy(data[i:], m.KernelVersion) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.OSImage))) + i += copy(data[i:], m.OSImage) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContainerRuntimeVersion))) + i += copy(data[i:], m.ContainerRuntimeVersion) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.KubeletVersion))) + i += copy(data[i:], m.KubeletVersion) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.KubeProxyVersion))) + i += copy(data[i:], m.KubeProxyVersion) + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.OperatingSystem))) + i += copy(data[i:], m.OperatingSystem) + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Architecture))) + i += copy(data[i:], m.Architecture) + return i, nil +} + +func (m *ObjectFieldSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectFieldSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FieldPath))) + i += copy(data[i:], m.FieldPath) + return i, nil +} + +func (m *ObjectMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.GenerateName))) + i += copy(data[i:], m.GenerateName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) + i += copy(data[i:], m.SelfLink) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Generation)) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CreationTimestamp.Size())) + n75, err := m.CreationTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n75 + if m.DeletionTimestamp != nil { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(m.DeletionTimestamp.Size())) + n76, err := m.DeletionTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n76 + } + if m.DeletionGracePeriodSeconds != nil { + data[i] = 0x50 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k := range m.Labels { + data[i] = 0x5a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + data[i] = 0x62 + i++ + v := m.Annotations[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.OwnerReferences) > 0 { + for _, msg := range m.OwnerReferences { + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + data[i] = 0x72 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ObjectReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FieldPath))) + i += copy(data[i:], m.FieldPath) + return i, nil +} + +func (m *OwnerReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *OwnerReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + return i, nil +} + +func (m *PersistentVolume) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolume) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n77, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n77 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n78, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n78 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n79, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n79 + return i, nil +} + +func (m *PersistentVolumeClaim) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaim) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n80, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n80 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n81, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n81 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n82, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n82 + return i, nil +} + +func (m *PersistentVolumeClaimList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaimList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n83, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n83 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PersistentVolumeClaimSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaimSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) + n84, err := m.Resources.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n84 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeName))) + i += copy(data[i:], m.VolumeName) + return i, nil +} + +func (m *PersistentVolumeClaimStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaimStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Capacity) > 0 { + for k := range m.Capacity { + data[i] = 0x1a + i++ + v := m.Capacity[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n85, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n85 + } + } + return i, nil +} + +func (m *PersistentVolumeClaimVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaimVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClaimName))) + i += copy(data[i:], m.ClaimName) + data[i] = 0x10 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *PersistentVolumeList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n86, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n86 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PersistentVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GCEPersistentDisk != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) + n87, err := m.GCEPersistentDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n87 + } + if m.AWSElasticBlockStore != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) + n88, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n88 + } + if m.HostPath != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) + n89, err := m.HostPath.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n89 + } + if m.Glusterfs != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) + n90, err := m.Glusterfs.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n90 + } + if m.NFS != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) + n91, err := m.NFS.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n91 + } + if m.RBD != nil { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) + n92, err := m.RBD.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n92 + } + if m.ISCSI != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) + n93, err := m.ISCSI.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n93 + } + if m.Cinder != nil { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) + n94, err := m.Cinder.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n94 + } + if m.CephFS != nil { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) + n95, err := m.CephFS.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n95 + } + if m.FC != nil { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) + n96, err := m.FC.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n96 + } + if m.Flocker != nil { + data[i] = 0x5a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) + n97, err := m.Flocker.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n97 + } + if m.FlexVolume != nil { + data[i] = 0x62 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) + n98, err := m.FlexVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n98 + } + if m.AzureFile != nil { + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) + n99, err := m.AzureFile.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n99 + } + if m.VsphereVolume != nil { + data[i] = 0x72 + i++ + i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size())) + n100, err := m.VsphereVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n100 + } + return i, nil +} + +func (m *PersistentVolumeSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + for k := range m.Capacity { + data[i] = 0xa + i++ + v := m.Capacity[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n101, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n101 + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeSource.Size())) + n102, err := m.PersistentVolumeSource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n102 + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if m.ClaimRef != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ClaimRef.Size())) + n103, err := m.ClaimRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n103 + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PersistentVolumeReclaimPolicy))) + i += copy(data[i:], m.PersistentVolumeReclaimPolicy) + return i, nil +} + +func (m *PersistentVolumeStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + return i, nil +} + +func (m *Pod) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Pod) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n104, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n104 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n105, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n105 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n106, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n106 + return i, nil +} + +func (m *PodAffinity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodAffinity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodAffinityTerm) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodAffinityTerm) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LabelSelector != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LabelSelector.Size())) + n107, err := m.LabelSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n107 + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TopologyKey))) + i += copy(data[i:], m.TopologyKey) + return i, nil +} + +func (m *PodAntiAffinity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodAntiAffinity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodAttachOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodAttachOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Stdin { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + if m.Stdout { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x18 + i++ + if m.Stderr { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x20 + i++ + if m.TTY { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Container))) + i += copy(data[i:], m.Container) + return i, nil +} + +func (m *PodCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) + n108, err := m.LastProbeTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n108 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n109, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n109 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *PodExecOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodExecOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Stdin { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + if m.Stdout { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x18 + i++ + if m.Stderr { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x20 + i++ + if m.TTY { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Container))) + i += copy(data[i:], m.Container) + if len(m.Command) > 0 { + for _, s := range m.Command { + data[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *PodList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n110, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n110 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodLogOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodLogOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Container))) + i += copy(data[i:], m.Container) + data[i] = 0x10 + i++ + if m.Follow { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x18 + i++ + if m.Previous { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.SinceSeconds != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SinceTime.Size())) + n111, err := m.SinceTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n111 + } + data[i] = 0x30 + i++ + if m.Timestamps { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.TailLines != nil { + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + data[i] = 0x40 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.LimitBytes)) + } + return i, nil +} + +func (m *PodProxyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodProxyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + return i, nil +} + +func (m *PodSecurityContext) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSecurityContext) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SELinuxOptions != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) + n112, err := m.SELinuxOptions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n112 + } + if m.RunAsUser != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + data[i] = 0x18 + i++ + if *m.RunAsNonRoot { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if len(m.SupplementalGroups) > 0 { + for _, num := range m.SupplementalGroups { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(num)) + } + } + if m.FSGroup != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.FSGroup)) + } + return i, nil +} + +func (m *PodSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Containers) > 0 { + for _, msg := range m.Containers { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.RestartPolicy))) + i += copy(data[i:], m.RestartPolicy) + if m.TerminationGracePeriodSeconds != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DNSPolicy))) + i += copy(data[i:], m.DNSPolicy) + if len(m.NodeSelector) > 0 { + for k := range m.NodeSelector { + data[i] = 0x3a + i++ + v := m.NodeSelector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServiceAccountName))) + i += copy(data[i:], m.ServiceAccountName) + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DeprecatedServiceAccount))) + i += copy(data[i:], m.DeprecatedServiceAccount) + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.NodeName))) + i += copy(data[i:], m.NodeName) + data[i] = 0x58 + i++ + if m.HostNetwork { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x60 + i++ + if m.HostPID { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x68 + i++ + if m.HostIPC { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.SecurityContext != nil { + data[i] = 0x72 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) + n113, err := m.SecurityContext.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n113 + } + if len(m.ImagePullSecrets) > 0 { + for _, msg := range m.ImagePullSecrets { + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x82 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) + i += copy(data[i:], m.Hostname) + data[i] = 0x8a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Subdomain))) + i += copy(data[i:], m.Subdomain) + return i, nil +} + +func (m *PodStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.HostIP))) + i += copy(data[i:], m.HostIP) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PodIP))) + i += copy(data[i:], m.PodIP) + if m.StartTime != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) + n114, err := m.StartTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n114 + } + if len(m.ContainerStatuses) > 0 { + for _, msg := range m.ContainerStatuses { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodStatusResult) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodStatusResult) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n115, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n115 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n116, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n116 + return i, nil +} + +func (m *PodTemplate) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodTemplate) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n117, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n117 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n118, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n118 + return i, nil +} + +func (m *PodTemplateList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodTemplateList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n119, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n119 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodTemplateSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodTemplateSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n120, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n120 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n121, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n121 + return i, nil +} + +func (m *Preconditions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Preconditions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.UID != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.UID))) + i += copy(data[i:], *m.UID) + } + return i, nil +} + +func (m *PreferredSchedulingTerm) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PreferredSchedulingTerm) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Weight)) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Preference.Size())) + n122, err := m.Preference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n122 + return i, nil +} + +func (m *Probe) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Probe) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Handler.Size())) + n123, err := m.Handler.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n123 + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.InitialDelaySeconds)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TimeoutSeconds)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PeriodSeconds)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SuccessThreshold)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FailureThreshold)) + return i, nil +} + +func (m *RBDVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RBDVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.RBDImage))) + i += copy(data[i:], m.RBDImage) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.RBDPool))) + i += copy(data[i:], m.RBDPool) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.RadosUser))) + i += copy(data[i:], m.RadosUser) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Keyring))) + i += copy(data[i:], m.Keyring) + if m.SecretRef != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n124, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n124 + } + data[i] = 0x40 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *RangeAllocation) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RangeAllocation) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n125, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n125 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Range))) + i += copy(data[i:], m.Range) + if m.Data != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Data))) + i += copy(data[i:], m.Data) + } + return i, nil +} + +func (m *ReplicationController) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationController) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n126, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n126 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n127, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n127 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n128, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n128 + return i, nil +} + +func (m *ReplicationControllerList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n129, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n129 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicationControllerSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k := range m.Selector { + data[i] = 0x12 + i++ + v := m.Selector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if m.Template != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n130, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n130 + } + return i, nil +} + +func (m *ReplicationControllerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + return i, nil +} + +func (m *ResourceFieldSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceFieldSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContainerName))) + i += copy(data[i:], m.ContainerName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Divisor.Size())) + n131, err := m.Divisor.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n131 + return i, nil +} + +func (m *ResourceQuota) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceQuota) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n132, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n132 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n133, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n133 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n134, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n134 + return i, nil +} + +func (m *ResourceQuotaList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceQuotaList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n135, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n135 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceQuotaSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceQuotaSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hard) > 0 { + for k := range m.Hard { + data[i] = 0xa + i++ + v := m.Hard[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n136, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n136 + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ResourceQuotaStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceQuotaStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hard) > 0 { + for k := range m.Hard { + data[i] = 0xa + i++ + v := m.Hard[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n137, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n137 + } + } + if len(m.Used) > 0 { + for k := range m.Used { + data[i] = 0x12 + i++ + v := m.Used[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n138, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n138 + } + } + return i, nil +} + +func (m *ResourceRequirements) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceRequirements) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Limits) > 0 { + for k := range m.Limits { + data[i] = 0xa + i++ + v := m.Limits[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n139, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n139 + } + } + if len(m.Requests) > 0 { + for k := range m.Requests { + data[i] = 0x12 + i++ + v := m.Requests[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n140, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n140 + } + } + return i, nil +} + +func (m *SELinuxOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SELinuxOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.User))) + i += copy(data[i:], m.User) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Role))) + i += copy(data[i:], m.Role) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Level))) + i += copy(data[i:], m.Level) + return i, nil +} + +func (m *Secret) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Secret) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n141, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n141 + if len(m.Data) > 0 { + for k := range m.Data { + data[i] = 0x12 + i++ + v := m.Data[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + return i, nil +} + +func (m *SecretKeySelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretKeySelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n142, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n142 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + return i, nil +} + +func (m *SecretList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n143, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n143 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SecretVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) + i += copy(data[i:], m.SecretName) + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SecurityContext) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecurityContext) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Capabilities != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Capabilities.Size())) + n144, err := m.Capabilities.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n144 + } + if m.Privileged != nil { + data[i] = 0x10 + i++ + if *m.Privileged { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.SELinuxOptions != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) + n145, err := m.SELinuxOptions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n145 + } + if m.RunAsUser != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + data[i] = 0x28 + i++ + if *m.RunAsNonRoot { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.ReadOnlyRootFilesystem != nil { + data[i] = 0x30 + i++ + if *m.ReadOnlyRootFilesystem { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SerializedReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SerializedReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Reference.Size())) + n146, err := m.Reference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n146 + return i, nil +} + +func (m *Service) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Service) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n147, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n147 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n148, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n148 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n149, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n149 + return i, nil +} + +func (m *ServiceAccount) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceAccount) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n150, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n150 + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.ImagePullSecrets) > 0 { + for _, msg := range m.ImagePullSecrets { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ServiceAccountList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceAccountList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n151, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n151 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ServiceList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n152, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n152 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ServicePort) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServicePort) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) + i += copy(data[i:], m.Protocol) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port)) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetPort.Size())) + n153, err := m.TargetPort.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n153 + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NodePort)) + return i, nil +} + +func (m *ServiceProxyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceProxyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + return i, nil +} + +func (m *ServiceSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Selector) > 0 { + for k := range m.Selector { + data[i] = 0x12 + i++ + v := m.Selector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClusterIP))) + i += copy(data[i:], m.ClusterIP) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.DeprecatedPublicIPs) > 0 { + for _, s := range m.DeprecatedPublicIPs { + data[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SessionAffinity))) + i += copy(data[i:], m.SessionAffinity) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.LoadBalancerIP))) + i += copy(data[i:], m.LoadBalancerIP) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + data[i] = 0x4a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ServiceStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) + n154, err := m.LoadBalancer.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n154 + return i, nil +} + +func (m *TCPSocketAction) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TCPSocketAction) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) + n155, err := m.Port.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n155 + return i, nil +} + +func (m *Taint) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Taint) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Effect))) + i += copy(data[i:], m.Effect) + return i, nil +} + +func (m *Toleration) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Toleration) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Effect))) + i += copy(data[i:], m.Effect) + return i, nil +} + +func (m *Volume) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Volume) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.VolumeSource.Size())) + n156, err := m.VolumeSource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n156 + return i, nil +} + +func (m *VolumeMount) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *VolumeMount) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x10 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MountPath))) + i += copy(data[i:], m.MountPath) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SubPath))) + i += copy(data[i:], m.SubPath) + return i, nil +} + +func (m *VolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *VolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HostPath != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) + n157, err := m.HostPath.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n157 + } + if m.EmptyDir != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.EmptyDir.Size())) + n158, err := m.EmptyDir.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n158 + } + if m.GCEPersistentDisk != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) + n159, err := m.GCEPersistentDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n159 + } + if m.AWSElasticBlockStore != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) + n160, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n160 + } + if m.GitRepo != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.GitRepo.Size())) + n161, err := m.GitRepo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n161 + } + if m.Secret != nil { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Secret.Size())) + n162, err := m.Secret.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n162 + } + if m.NFS != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) + n163, err := m.NFS.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n163 + } + if m.ISCSI != nil { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) + n164, err := m.ISCSI.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n164 + } + if m.Glusterfs != nil { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) + n165, err := m.Glusterfs.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n165 + } + if m.PersistentVolumeClaim != nil { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeClaim.Size())) + n166, err := m.PersistentVolumeClaim.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n166 + } + if m.RBD != nil { + data[i] = 0x5a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) + n167, err := m.RBD.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n167 + } + if m.FlexVolume != nil { + data[i] = 0x62 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) + n168, err := m.FlexVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n168 + } + if m.Cinder != nil { + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) + n169, err := m.Cinder.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n169 + } + if m.CephFS != nil { + data[i] = 0x72 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) + n170, err := m.CephFS.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n170 + } + if m.Flocker != nil { + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) + n171, err := m.Flocker.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n171 + } + if m.DownwardAPI != nil { + data[i] = 0x82 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DownwardAPI.Size())) + n172, err := m.DownwardAPI.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n172 + } + if m.FC != nil { + data[i] = 0x8a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) + n173, err := m.FC.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n173 + } + if m.AzureFile != nil { + data[i] = 0x92 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) + n174, err := m.AzureFile.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n174 + } + if m.ConfigMap != nil { + data[i] = 0x9a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ConfigMap.Size())) + n175, err := m.ConfigMap.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n175 + } + if m.VsphereVolume != nil { + data[i] = 0xa2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size())) + n176, err := m.VsphereVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n176 + } + return i, nil +} + +func (m *VsphereVirtualDiskVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumePath))) + i += copy(data[i:], m.VolumePath) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + return i, nil +} + +func (m *WeightedPodAffinityTerm) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *WeightedPodAffinityTerm) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Weight)) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodAffinityTerm.Size())) + n177, err := m.PodAffinityTerm.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n177 + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n +} + +func (m *Affinity) Size() (n int) { + var l int + _ = l + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAffinity != nil { + l = m.PodAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAntiAffinity != nil { + l = m.PodAntiAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AzureFileVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Binding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Capabilities) Size() (n int) { + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Drop) > 0 { + for _, s := range m.Drop { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CephFSVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *CinderVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ComponentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ComponentStatus) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ComponentStatusList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigMap) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ConfigMapKeySelector) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConfigMapList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigMapVolumeSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Container) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 3 + n += 3 + n += 3 + return n +} + +func (m *ContainerImage) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.SizeBytes)) + return n +} + +func (m *ContainerPort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostPort)) + n += 1 + sovGenerated(uint64(m.ContainerPort)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerState) Size() (n int) { + var l int + _ = l + if m.Waiting != nil { + l = m.Waiting.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Running != nil { + l = m.Running.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Terminated != nil { + l = m.Terminated.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ContainerStateRunning) Size() (n int) { + var l int + _ = l + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStateTerminated) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ExitCode)) + n += 1 + sovGenerated(uint64(m.Signal)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStateWaiting) Size() (n int) { + var l int + _ = l + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.State.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTerminationState.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 1 + sovGenerated(uint64(m.RestartCount)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonEndpoint) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + return n +} + +func (m *DeleteOptions) Size() (n int) { + var l int + _ = l + if m.GracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + l = m.Preconditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.OrphanDependents != nil { + n += 2 + } + return n +} + +func (m *DownwardAPIVolumeFile) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DownwardAPIVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EmptyDirVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Medium) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointAddress) Size() (n int) { + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointPort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointSubset) Size() (n int) { + var l int + _ = l + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NotReadyAddresses) > 0 { + for _, e := range m.NotReadyAddresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Endpoints) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subsets) > 0 { + for _, e := range m.Subsets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EndpointsList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EnvVar) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + if m.ValueFrom != nil { + l = m.ValueFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EnvVarSource) Size() (n int) { + var l int + _ = l + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMapKeyRef != nil { + l = m.ConfigMapKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretKeyRef != nil { + l = m.SecretKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Event) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.InvolvedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EventSource) Size() (n int) { + var l int + _ = l + l = len(m.Component) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExecAction) Size() (n int) { + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ExportOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *FCVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Lun != nil { + n += 1 + sovGenerated(uint64(*m.Lun)) + } + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *FlexVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *FlockerVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.DatasetName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GCEPersistentDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.PDName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n +} + +func (m *GitRepoVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Repository) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Revision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Directory) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GlusterfsVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *HTTPGetAction) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scheme) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.HTTPHeaders) > 0 { + for _, e := range m.HTTPHeaders { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HTTPHeader) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Handler) Size() (n int) { + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HostPathVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ISCSIVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *KeyToPath) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Lifecycle) Size() (n int) { + var l int + _ = l + if m.PostStart != nil { + l = m.PostStart.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreStop != nil { + l = m.PreStop.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitRange) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitRangeItem) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Max) > 0 { + for k, v := range m.Max { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Min) > 0 { + for k, v := range m.Min { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Default) > 0 { + for k, v := range m.Default { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.DefaultRequest) > 0 { + for k, v := range m.DefaultRequest { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MaxLimitRequestRatio) > 0 { + for k, v := range m.MaxLimitRequestRatio { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *LimitRangeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LimitRangeSpec) Size() (n int) { + var l int + _ = l + if len(m.Limits) > 0 { + for _, e := range m.Limits { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *List) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ListOptions) Size() (n int) { + var l int + _ = l + l = len(m.LabelSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldSelector) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + return n +} + +func (m *LoadBalancerIngress) Size() (n int) { + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LoadBalancerStatus) Size() (n int) { + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NFSVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Server) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Namespace) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamespaceList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamespaceSpec) Size() (n int) { + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamespaceStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Node) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeAddress) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Address) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeAffinity) Size() (n int) { + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastHeartbeatTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeDaemonEndpoints) Size() (n int) { + var l int + _ = l + l = m.KubeletEndpoint.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeSelector) Size() (n int) { + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, e := range m.NodeSelectorTerms { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSelectorTerm) Size() (n int) { + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSpec) Size() (n int) { + var l int + _ = l + l = len(m.PodCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExternalID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *NodeStatus) Size() (n int) { + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Allocatable) > 0 { + for k, v := range m.Allocatable { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DaemonEndpoints.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.NodeInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSystemInfo) Size() (n int) { + var l int + _ = l + l = len(m.MachineID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SystemUUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BootID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KernelVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OSImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerRuntimeVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeProxyVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OperatingSystem) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Architecture) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectFieldSelector) Size() (n int) { + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMeta) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GenerateName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + l = m.CreationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeletionTimestamp != nil { + l = m.DeletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DeletionGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.OwnerReferences) > 0 { + for _, e := range m.OwnerReferences { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OwnerReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolume) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeClaim) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeClaimList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PersistentVolumeClaimSpec) Size() (n int) { + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeClaimStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.ClaimName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *PersistentVolumeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PersistentVolumeSource) Size() (n int) { + var l int + _ = l + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeSpec) Size() (n int) { + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.PersistentVolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ClaimRef != nil { + l = m.ClaimRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.PersistentVolumeReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Pod) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAffinity) Size() (n int) { + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAffinityTerm) Size() (n int) { + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAntiAffinity) Size() (n int) { + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAttachOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodExecOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodLogOptions) Size() (n int) { + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + return n +} + +func (m *PodProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityContext) Size() (n int) { + var l int + _ = l + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) + } + } + if m.FSGroup != nil { + n += 1 + sovGenerated(uint64(*m.FSGroup)) + } + return n +} + +func (m *PodSpec) Size() (n int) { + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.TerminationGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + l = len(m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + n += 2 + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Hostname) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 2 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodIP) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ContainerStatuses) > 0 { + for _, e := range m.ContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatusResult) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplate) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplateList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodTemplateSpec) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Preconditions) Size() (n int) { + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PreferredSchedulingTerm) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.Preference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Probe) Size() (n int) { + var l int + _ = l + l = m.Handler.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) + n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) + n += 1 + sovGenerated(uint64(m.PeriodSeconds)) + n += 1 + sovGenerated(uint64(m.SuccessThreshold)) + n += 1 + sovGenerated(uint64(m.FailureThreshold)) + return n +} + +func (m *RBDVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *RangeAllocation) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Range) + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicationController) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicationControllerSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicationControllerStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + return n +} + +func (m *ResourceFieldSelector) Size() (n int) { + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Divisor.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuota) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuotaList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaSpec) Size() (n int) { + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaStatus) Size() (n int) { + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Used) > 0 { + for k, v := range m.Used { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceRequirements) Size() (n int) { + var l int + _ = l + if len(m.Limits) > 0 { + for k, v := range m.Limits { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SELinuxOptions) Size() (n int) { + var l int + _ = l + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Role) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Secret) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretKeySelector) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecretVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecurityContext) Size() (n int) { + var l int + _ = l + if m.Capabilities != nil { + l = m.Capabilities.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Privileged != nil { + n += 2 + } + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if m.ReadOnlyRootFilesystem != nil { + n += 2 + } + return n +} + +func (m *SerializedReference) Size() (n int) { + var l int + _ = l + l = m.Reference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Service) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceAccount) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServicePort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.NodePort)) + return n +} + +func (m *ServiceProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceSpec) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ClusterIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.DeprecatedPublicIPs) > 0 { + for _, s := range m.DeprecatedPublicIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SessionAffinity) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LoadBalancerIP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceStatus) Size() (n int) { + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TCPSocketAction) Size() (n int) { + var l int + _ = l + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Taint) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Toleration) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Volume) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.VolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeMount) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeSource) Size() (n int) { + var l int + _ = l + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EmptyDir != nil { + l = m.EmptyDir.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitRepo != nil { + l = m.GitRepo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PersistentVolumeClaim != nil { + l = m.PersistentVolumeClaim.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WeightedPodAffinityTerm) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.PodAffinityTerm.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Partition |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Affinity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Affinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeAffinity == nil { + m.NodeAffinity = &NodeAffinity{} + } + if err := m.NodeAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAffinity == nil { + m.PodAffinity = &PodAffinity{} + } + if err := m.PodAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAntiAffinity == nil { + m.PodAntiAffinity = &PodAntiAffinity{} + } + if err := m.PodAntiAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFileVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Binding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Binding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Capabilities) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Add = append(m.Add, Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drop = append(m.Drop, Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CephFSVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Monitors = append(m.Monitors, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretFile = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CinderVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ComponentConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ComponentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatusList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ComponentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMap) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Data == nil { + m.Data = make(map[string]string) + } + m.Data[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ConfigMap{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Container) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Container: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkingDir = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} + } + if err := m.LivenessProbe.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullPolicy = PullPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} + } + if err := m.SecurityContext.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerImage) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + } + m.SizeBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.SizeBytes |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerPort) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + } + m.HostPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.HostPort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + } + m.ContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ContainerPort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerState) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Waiting == nil { + m.Waiting = &ContainerStateWaiting{} + } + if err := m.Waiting.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Running == nil { + m.Running = &ContainerStateRunning{} + } + if err := m.Running.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Terminated == nil { + m.Terminated = &ContainerStateTerminated{} + } + if err := m.Terminated.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateRunning) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateTerminated) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + m.ExitCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ExitCode |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + } + m.Signal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Signal |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FinishedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateWaiting) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.State.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTerminationState.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ready = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) + } + m.RestartCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.RestartCount |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonEndpoint) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Port |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.GracePeriodSeconds = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Preconditions == nil { + m.Preconditions = &Preconditions{} + } + if err := m.Preconditions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OrphanDependents = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmptyDirVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Medium = StorageMedium(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointAddress) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetRef == nil { + m.TargetRef = &ObjectReference{} + } + if err := m.TargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Port |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSubset) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, EndpointAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoints) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subsets = append(m.Subsets, EndpointSubset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointsList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Endpoints{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVar) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValueFrom == nil { + m.ValueFrom = &EnvVarSource{} + } + if err := m.ValueFrom.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVarSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &ConfigMapKeySelector{} + } + if err := m.ConfigMapKeyRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretKeyRef == nil { + m.SecretKeyRef = &SecretKeySelector{} + } + if err := m.SecretKeyRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Event) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InvolvedObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.InvolvedObject.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FirstTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Count |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Component = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecAction) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Export = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Exact = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FCVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetWWNs = append(m.TargetWWNs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Lun = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlexVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlexVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlexVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Options == nil { + m.Options = make(map[string]string) + } + m.Options[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlockerVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatasetName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PDName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PDName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Partition |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitRepoVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Repository = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Directory = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointsName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPGetAction) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scheme = URIScheme(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HTTPHeaders = append(m.HTTPHeaders, HTTPHeader{}) + if err := m.HTTPHeaders[len(m.HTTPHeaders)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPHeader) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Handler) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Handler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Handler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exec == nil { + m.Exec = &ExecAction{} + } + if err := m.Exec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTPGet == nil { + m.HTTPGet = &HTTPGetAction{} + } + if err := m.HTTPGet.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TCPSocket == nil { + m.TCPSocket = &TCPSocketAction{} + } + if err := m.TCPSocket.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostPathVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetPortal = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IQN = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + m.Lun = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Lun |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ISCSIInterface = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyToPath) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Lifecycle) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PostStart == nil { + m.PostStart = &Handler{} + } + if err := m.PostStart.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreStop == nil { + m.PreStop = &Handler{} + } + if err := m.PreStop.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRange) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeItem) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeItem: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeItem: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Max == nil { + m.Max = make(ResourceList) + } + m.Max[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Min == nil { + m.Min = make(ResourceList) + } + m.Min[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Default == nil { + m.Default = make(ResourceList) + } + m.Default[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.DefaultRequest == nil { + m.DefaultRequest = make(ResourceList) + } + m.DefaultRequest[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxLimitRequestRatio", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.MaxLimitRequestRatio == nil { + m.MaxLimitRequestRatio = make(ResourceList) + } + m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, LimitRange{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Limits = append(m.Limits, LimitRangeItem{}) + if err := m.Limits[len(m.Limits)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *List) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: List: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, k8s_io_kubernetes_pkg_runtime.RawExtension{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Watch = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LoadBalancerIngress) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LoadBalancerIngress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LoadBalancerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LoadBalancerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, LoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NFSVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Server = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Namespace) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Namespace{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, FinalizerName(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NamespacePhase(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Node) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Node: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeAddress) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NodeAddressType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeAffinity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequiredDuringSchedulingIgnoredDuringExecution == nil { + m.RequiredDuringSchedulingIgnoredDuringExecution = &NodeSelector{} + } + if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, PreferredSchedulingTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NodeConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeartbeatTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastHeartbeatTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeDaemonEndpoints) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.KubeletEndpoint.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Node{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeProxyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelectorTerms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeSelectorTerms = append(m.NodeSelectorTerms, NodeSelectorTerm{}) + if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = NodeSelectorOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, NodeSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodCIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodCIDR = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unschedulable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Unschedulable = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Allocatable == nil { + m.Allocatable = make(ResourceList) + } + m.Allocatable[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NodePhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, NodeCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, NodeAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DaemonEndpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DaemonEndpoints.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.NodeInfo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ContainerImage{}) + if err := m.Images[len(m.Images)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSystemInfo) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSystemInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSystemInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MachineID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SystemUUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SystemUUID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BootID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BootID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KernelVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OSImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OSImage = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimeVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerRuntimeVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeletVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeProxyVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeProxyVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OperatingSystem", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OperatingSystem = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectFieldSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectFieldSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldPath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenerateName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Generation |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CreationTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeletionTimestamp == nil { + m.DeletionTimestamp = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.DeletionTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DeletionGracePeriodSeconds = &v + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Labels == nil { + m.Labels = make(map[string]string) + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldPath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OwnerReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolume) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PersistentVolumeClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PersistentVolumeClaimPhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClaimName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PersistentVolume{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GCEPersistentDisk == nil { + m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} + } + if err := m.GCEPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AWSElasticBlockStore == nil { + m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} + } + if err := m.AWSElasticBlockStore.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HostPath == nil { + m.HostPath = &HostPathVolumeSource{} + } + if err := m.HostPath.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Glusterfs == nil { + m.Glusterfs = &GlusterfsVolumeSource{} + } + if err := m.Glusterfs.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NFS == nil { + m.NFS = &NFSVolumeSource{} + } + if err := m.NFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RBD == nil { + m.RBD = &RBDVolumeSource{} + } + if err := m.RBD.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ISCSI == nil { + m.ISCSI = &ISCSIVolumeSource{} + } + if err := m.ISCSI.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cinder == nil { + m.Cinder = &CinderVolumeSource{} + } + if err := m.Cinder.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CephFS == nil { + m.CephFS = &CephFSVolumeSource{} + } + if err := m.CephFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FC == nil { + m.FC = &FCVolumeSource{} + } + if err := m.FC.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flocker == nil { + m.Flocker = &FlockerVolumeSource{} + } + if err := m.Flocker.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FlexVolume == nil { + m.FlexVolume = &FlexVolumeSource{} + } + if err := m.FlexVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureFile == nil { + m.AzureFile = &AzureFileVolumeSource{} + } + if err := m.AzureFile.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VsphereVolume == nil { + m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} + } + if err := m.VsphereVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PersistentVolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClaimRef == nil { + m.ClaimRef = &ObjectReference{} + } + if err := m.ClaimRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeReclaimPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PersistentVolumePhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Pod) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAffinity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAffinityTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAffinityTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKey = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAntiAffinity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAttachOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PodConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodExecOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Pod{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodLogOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.SinceTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodProxyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityContext) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsUser = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RunAsNonRoot = &b + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FSGroup = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, Container{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RestartPolicy = RestartPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TerminationGracePeriodSeconds = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DNSPolicy = DNSPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedServiceAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedServiceAccount = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostNetwork = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostPID = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostIPC = bool(v != 0) + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &PodSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subdomain = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PodPhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PodCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{}) + if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodStatusResult) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodStatusResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodStatusResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplate) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Preconditions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex]) + m.UID = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreferredSchedulingTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreferredSchedulingTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreferredSchedulingTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Weight |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Preference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Probe) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Probe: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Probe: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Handler.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialDelaySeconds", wireType) + } + m.InitialDelaySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.InitialDelaySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + m.TimeoutSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.TimeoutSeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType) + } + m.PeriodSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.PeriodSeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessThreshold", wireType) + } + m.SuccessThreshold = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.SuccessThreshold |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureThreshold", wireType) + } + m.FailureThreshold = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.FailureThreshold |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RBDVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RBDVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RBDVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CephMonitors = append(m.CephMonitors, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDImage = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDPool = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RadosUser = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyring = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeAllocation) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeAllocation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeAllocation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Range = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationController) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationController: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationController: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicationController{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Selector == nil { + m.Selector = make(map[string]string) + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &PodTemplateSpec{} + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceFieldSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Divisor.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuota) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceQuota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Hard == nil { + m.Hard = make(ResourceList) + } + m.Hard[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, ResourceQuotaScope(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Hard == nil { + m.Hard = make(ResourceList) + } + m.Hard[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Used", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Used == nil { + m.Used = make(ResourceList) + } + m.Used[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRequirements) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Limits == nil { + m.Limits = make(ResourceList) + } + m.Limits[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Requests == nil { + m.Requests = make(ResourceList) + } + m.Requests[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Secret) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := make([]byte, mapbyteLen) + copy(mapvalue, data[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + if m.Data == nil { + m.Data = make(map[string][]byte) + } + m.Data[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SecretType(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretKeySelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Secret{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityContext) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capabilities == nil { + m.Capabilities = &Capabilities{} + } + if err := m.Capabilities.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Privileged = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsUser = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RunAsNonRoot = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnlyRootFilesystem = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SerializedReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SerializedReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SerializedReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Reference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Service) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Service: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccount) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, ObjectReference{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ServiceAccount{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Service{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServicePort) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServicePort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Port |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetPort.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType) + } + m.NodePort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.NodePort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceProxyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, ServicePort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Selector == nil { + m.Selector = make(map[string]string) + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ServiceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalIPs = append(m.ExternalIPs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedPublicIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedPublicIPs = append(m.DeprecatedPublicIPs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionAffinity = ServiceAffinity(data[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TCPSocketAction) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TCPSocketAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TCPSocketAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Taint) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Taint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Taint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = TaintEffect(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Toleration) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Toleration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = TolerationOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = TaintEffect(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Volume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.VolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeMount) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountPath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubPath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HostPath == nil { + m.HostPath = &HostPathVolumeSource{} + } + if err := m.HostPath.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmptyDir", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EmptyDir == nil { + m.EmptyDir = &EmptyDirVolumeSource{} + } + if err := m.EmptyDir.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GCEPersistentDisk == nil { + m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} + } + if err := m.GCEPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AWSElasticBlockStore == nil { + m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} + } + if err := m.AWSElasticBlockStore.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitRepo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitRepo == nil { + m.GitRepo = &GitRepoVolumeSource{} + } + if err := m.GitRepo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &SecretVolumeSource{} + } + if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NFS == nil { + m.NFS = &NFSVolumeSource{} + } + if err := m.NFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ISCSI == nil { + m.ISCSI = &ISCSIVolumeSource{} + } + if err := m.ISCSI.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Glusterfs == nil { + m.Glusterfs = &GlusterfsVolumeSource{} + } + if err := m.Glusterfs.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaim", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PersistentVolumeClaim == nil { + m.PersistentVolumeClaim = &PersistentVolumeClaimVolumeSource{} + } + if err := m.PersistentVolumeClaim.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RBD == nil { + m.RBD = &RBDVolumeSource{} + } + if err := m.RBD.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FlexVolume == nil { + m.FlexVolume = &FlexVolumeSource{} + } + if err := m.FlexVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cinder == nil { + m.Cinder = &CinderVolumeSource{} + } + if err := m.Cinder.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CephFS == nil { + m.CephFS = &CephFSVolumeSource{} + } + if err := m.CephFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flocker == nil { + m.Flocker = &FlockerVolumeSource{} + } + if err := m.Flocker.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownwardAPI == nil { + m.DownwardAPI = &DownwardAPIVolumeSource{} + } + if err := m.DownwardAPI.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FC == nil { + m.FC = &FCVolumeSource{} + } + if err := m.FC.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureFile == nil { + m.AzureFile = &AzureFileVolumeSource{} + } + if err := m.AzureFile.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapVolumeSource{} + } + if err := m.ConfigMap.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VsphereVolume == nil { + m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} + } + if err := m.VsphereVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VsphereVirtualDiskVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumePath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WeightedPodAffinityTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WeightedPodAffinityTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WeightedPodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Weight |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinityTerm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodAffinityTerm.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/generated.proto b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/generated.proto new file mode 100644 index 000000000000..bd4dbe7ab7b2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/generated.proto @@ -0,0 +1,2901 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.api.v1; + +import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/runtime/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +message AWSElasticBlockStoreVolumeSource { + // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + optional string volumeID = 1; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + // TODO: how do we prevent errors in the filesystem from compromising the machine + optional string fsType = 2; + + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + optional int32 partition = 3; + + // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". + // If omitted, the default is "false". + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + optional bool readOnly = 4; +} + +// Affinity is a group of affinity scheduling rules. +message Affinity { + // Describes node affinity scheduling rules for the pod. + optional NodeAffinity nodeAffinity = 1; + + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + optional PodAffinity podAffinity = 2; + + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + optional PodAntiAffinity podAntiAffinity = 3; +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +message AzureFileVolumeSource { + // the name of secret that contains Azure Storage Account Name and Key + optional string secretName = 1; + + // Share Name + optional string shareName = 2; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + optional bool readOnly = 3; +} + +// Binding ties one object to another. +// For example, a pod is bound to a node by a scheduler. +message Binding { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // The target object that you want to bind to the standard object. + optional ObjectReference target = 2; +} + +// Adds and removes POSIX capabilities from running containers. +message Capabilities { + // Added capabilities + repeated string add = 1; + + // Removed capabilities + repeated string drop = 2; +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +message CephFSVolumeSource { + // Required: Monitors is a collection of Ceph monitors + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + repeated string monitors = 1; + + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + optional string path = 2; + + // Optional: User is the rados user name, default is admin + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + optional string user = 3; + + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + optional string secretFile = 4; + + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + optional LocalObjectReference secretRef = 5; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + optional bool readOnly = 6; +} + +// Represents a cinder volume resource in Openstack. +// A Cinder volume must exist before mounting to a container. +// The volume must also be in the same region as the kubelet. +// Cinder volumes support ownership management and SELinux relabeling. +message CinderVolumeSource { + // volume id used to identify the volume in cinder + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + optional string volumeID = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + optional string fsType = 2; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + optional bool readOnly = 3; +} + +// Information about the condition of a component. +message ComponentCondition { + // Type of condition for a component. + // Valid value: "Healthy" + optional string type = 1; + + // Status of the condition for a component. + // Valid values for "Healthy": "True", "False", or "Unknown". + optional string status = 2; + + // Message about the condition for a component. + // For example, information about a health check. + optional string message = 3; + + // Condition error code for a component. + // For example, a health check error code. + optional string error = 4; +} + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +message ComponentStatus { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // List of component conditions observed + repeated ComponentCondition conditions = 2; +} + +// Status of all the conditions for the component as a list of ComponentStatus objects. +message ComponentStatusList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of ComponentStatus objects. + repeated ComponentStatus items = 2; +} + +// ConfigMap holds configuration data for pods to consume. +message ConfigMap { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Data contains the configuration data. + // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. + map data = 2; +} + +// Selects a key from a ConfigMap. +message ConfigMapKeySelector { + // The ConfigMap to select from. + optional LocalObjectReference localObjectReference = 1; + + // The key to select. + optional string key = 2; +} + +// ConfigMapList is a resource containing a list of ConfigMap objects. +message ConfigMapList { + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of ConfigMaps. + repeated ConfigMap items = 2; +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +message ConfigMapVolumeSource { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error. Paths must be relative and may not contain + // the '..' path or start with '..'. + repeated KeyToPath items = 2; +} + +// A single application container that you want to run within a pod. +message Container { + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + optional string name = 1; + + // Docker image name. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md + optional string image = 2; + + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands + repeated string command = 3; + + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands + repeated string args = 4; + + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + optional string workingDir = 5; + + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + repeated ContainerPort ports = 6; + + // List of environment variables to set in the container. + // Cannot be updated. + repeated EnvVar env = 7; + + // Compute Resources required by this container. + // Cannot be updated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources + optional ResourceRequirements resources = 8; + + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + repeated VolumeMount volumeMounts = 9; + + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + optional Probe livenessProbe = 10; + + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + optional Probe readinessProbe = 11; + + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + optional Lifecycle lifecycle = 12; + + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Defaults to /dev/termination-log. + // Cannot be updated. + optional string terminationMessagePath = 13; + + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#updating-images + optional string imagePullPolicy = 14; + + // Security options the pod should run with. + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md + optional SecurityContext securityContext = 15; + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + optional bool stdin = 16; + + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + optional bool stdinOnce = 17; + + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + optional bool tty = 18; +} + +// Describe a container image +message ContainerImage { + // Names by which this image is known. + // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + repeated string names = 1; + + // The size of the image in bytes. + optional int64 sizeBytes = 2; +} + +// ContainerPort represents a network port in a single container. +message ContainerPort { + // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + // named port in a pod must have a unique name. Name for the port that can be + // referred to by services. + optional string name = 1; + + // Number of port to expose on the host. + // If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // Most containers do not need this. + optional int32 hostPort = 2; + + // Number of port to expose on the pod's IP address. + // This must be a valid port number, 0 < x < 65536. + optional int32 containerPort = 3; + + // Protocol for port. Must be UDP or TCP. + // Defaults to "TCP". + optional string protocol = 4; + + // What host IP to bind the external port to. + optional string hostIP = 5; +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +message ContainerState { + // Details about a waiting container + optional ContainerStateWaiting waiting = 1; + + // Details about a running container + optional ContainerStateRunning running = 2; + + // Details about a terminated container + optional ContainerStateTerminated terminated = 3; +} + +// ContainerStateRunning is a running state of a container. +message ContainerStateRunning { + // Time at which the container was last (re-)started + optional k8s.io.kubernetes.pkg.api.unversioned.Time startedAt = 1; +} + +// ContainerStateTerminated is a terminated state of a container. +message ContainerStateTerminated { + // Exit status from the last termination of the container + optional int32 exitCode = 1; + + // Signal from the last termination of the container + optional int32 signal = 2; + + // (brief) reason from the last termination of the container + optional string reason = 3; + + // Message regarding the last termination of the container + optional string message = 4; + + // Time at which previous execution of the container started + optional k8s.io.kubernetes.pkg.api.unversioned.Time startedAt = 5; + + // Time at which the container last terminated + optional k8s.io.kubernetes.pkg.api.unversioned.Time finishedAt = 6; + + // Container's ID in the format 'docker://' + optional string containerID = 7; +} + +// ContainerStateWaiting is a waiting state of a container. +message ContainerStateWaiting { + // (brief) reason the container is not yet running. + optional string reason = 1; + + // Message regarding why the container is not yet running. + optional string message = 2; +} + +// ContainerStatus contains details for the current status of this container. +message ContainerStatus { + // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Cannot be updated. + optional string name = 1; + + // Details about the container's current condition. + optional ContainerState state = 2; + + // Details about the container's last termination condition. + optional ContainerState lastState = 3; + + // Specifies whether the container has passed its readiness probe. + optional bool ready = 4; + + // The number of times the container has been restarted, currently based on + // the number of dead containers that have not yet been removed. + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + optional int32 restartCount = 5; + + // The image the container is running. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md + // TODO(dchen1107): Which image the container is running with? + optional string image = 6; + + // ImageID of the container's image. + optional string imageID = 7; + + // Container's ID in the format 'docker://'. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#container-information + optional string containerID = 8; +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +message DaemonEndpoint { + // Port number of the given endpoint. + optional int32 Port = 1; +} + +// DeleteOptions may be provided when deleting an API object +message DeleteOptions { + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + optional int64 gracePeriodSeconds = 1; + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + optional Preconditions preconditions = 2; + + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + optional bool orphanDependents = 3; +} + +// DownwardAPIVolumeFile represents information to create the file containing the pod field +message DownwardAPIVolumeFile { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + optional string path = 1; + + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + optional ObjectFieldSelector fieldRef = 2; + + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + optional ResourceFieldSelector resourceFieldRef = 3; +} + +// DownwardAPIVolumeSource represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +message DownwardAPIVolumeSource { + // Items is a list of downward API volume file + repeated DownwardAPIVolumeFile items = 1; +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +message EmptyDirVolumeSource { + // What type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // Must be an empty string (default) or Memory. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir + optional string medium = 1; +} + +// EndpointAddress is a tuple that describes single IP address. +message EndpointAddress { + // The IP of this endpoint. + // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), + // or link-local multicast ((224.0.0.0/24). + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, See #4447. + optional string ip = 1; + + // The Hostname of this endpoint + optional string hostname = 3; + + // Reference to object providing the endpoint. + optional ObjectReference targetRef = 2; +} + +// EndpointPort is a tuple that describes a single port. +message EndpointPort { + // The name of this port (corresponds to ServicePort.Name). + // Must be a DNS_LABEL. + // Optional only if one port is defined. + optional string name = 1; + + // The port number of the endpoint. + optional int32 port = 2; + + // The IP protocol for this port. + // Must be UDP or TCP. + // Default is TCP. + optional string protocol = 3; +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +message EndpointSubset { + // IP addresses which offer the related ports that are marked as ready. These endpoints + // should be considered safe for load balancers and clients to utilize. + repeated EndpointAddress addresses = 1; + + // IP addresses which offer the related ports but are not currently marked as ready + // because they have not yet finished starting, have recently failed a readiness check, + // or have recently failed a liveness check. + repeated EndpointAddress notReadyAddresses = 2; + + // Port numbers available on the related IP addresses. + repeated EndpointPort ports = 3; +} + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +message Endpoints { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // The set of all endpoints is the union of all subsets. Addresses are placed into + // subsets according to the IPs they share. A single address with multiple ports, + // some of which are ready and some of which are not (because they come from + // different containers) will result in the address being displayed in different + // subsets for the different ports. No address will appear in both Addresses and + // NotReadyAddresses in the same subset. + // Sets of addresses and ports that comprise a service. + repeated EndpointSubset subsets = 2; +} + +// EndpointsList is a list of endpoints. +message EndpointsList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of endpoints. + repeated Endpoints items = 2; +} + +// EnvVar represents an environment variable present in a Container. +message EnvVar { + // Name of the environment variable. Must be a C_IDENTIFIER. + optional string name = 1; + + // Variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // Defaults to "". + optional string value = 2; + + // Source for the environment variable's value. Cannot be used if value is not empty. + optional EnvVarSource valueFrom = 3; +} + +// EnvVarSource represents a source for the value of an EnvVar. +message EnvVarSource { + // Selects a field of the pod; only name and namespace are supported. + optional ObjectFieldSelector fieldRef = 1; + + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + optional ResourceFieldSelector resourceFieldRef = 2; + + // Selects a key of a ConfigMap. + optional ConfigMapKeySelector configMapKeyRef = 3; + + // Selects a key of a secret in the pod's namespace + optional SecretKeySelector secretKeyRef = 4; +} + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +message Event { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // The object that this event is about. + optional ObjectReference involvedObject = 2; + + // This should be a short, machine understandable string that gives the reason + // for the transition into the object's current status. + // TODO: provide exact specification for format. + optional string reason = 3; + + // A human-readable description of the status of this operation. + // TODO: decide on maximum length. + optional string message = 4; + + // The component reporting this event. Should be a short machine understandable string. + optional EventSource source = 5; + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + optional k8s.io.kubernetes.pkg.api.unversioned.Time firstTimestamp = 6; + + // The time at which the most recent occurrence of this event was recorded. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTimestamp = 7; + + // The number of times this event has occurred. + optional int32 count = 8; + + // Type of this event (Normal, Warning), new types could be added in the future + optional string type = 9; +} + +// EventList is a list of events. +message EventList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of events + repeated Event items = 2; +} + +// EventSource contains information for an event. +message EventSource { + // Component from which the event is generated. + optional string component = 1; + + // Host name on which the event is generated. + optional string host = 2; +} + +// ExecAction describes a "run in container" action. +message ExecAction { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + repeated string command = 1; +} + +// ExportOptions is the query options to the standard REST get call. +message ExportOptions { + // Should this value be exported. Export strips fields that a user can not specify. + optional bool export = 1; + + // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' + optional bool exact = 2; +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +message FCVolumeSource { + // Required: FC target world wide names (WWNs) + repeated string targetWWNs = 1; + + // Required: FC target lun number + optional int32 lun = 2; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + optional string fsType = 3; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + optional bool readOnly = 4; +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using a exec based plugin. This is an alpha feature and may change in future. +message FlexVolumeSource { + // Driver is the name of the driver to use for this volume. + optional string driver = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + optional string fsType = 2; + + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + optional LocalObjectReference secretRef = 3; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + optional bool readOnly = 4; + + // Optional: Extra command options if any. + map options = 5; +} + +// Represents a Flocker volume mounted by the Flocker agent. +// Flocker volumes do not support ownership management or SELinux relabeling. +message FlockerVolumeSource { + // Required: the volume name. This is going to be store on metadata -> name on the payload for Flocker + optional string datasetName = 1; +} + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +message GCEPersistentDiskVolumeSource { + // Unique name of the PD resource in GCE. Used to identify the disk in GCE. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + optional string pdName = 1; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + // TODO: how do we prevent errors in the filesystem from compromising the machine + optional string fsType = 2; + + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + optional int32 partition = 3; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + optional bool readOnly = 4; +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +message GitRepoVolumeSource { + // Repository URL + optional string repository = 1; + + // Commit hash for the specified revision. + optional string revision = 2; + + // Target directory name. + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + optional string directory = 3; +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +message GlusterfsVolumeSource { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md#create-a-pod + optional string endpoints = 1; + + // Path is the Glusterfs volume path. + // More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md#create-a-pod + optional string path = 2; + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md#create-a-pod + optional bool readOnly = 3; +} + +// HTTPGetAction describes an action based on HTTP Get requests. +message HTTPGetAction { + // Path to access on the HTTP server. + optional string path = 1; + + // Name or number of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 2; + + // Host name to connect to, defaults to the pod IP. You probably want to set + // "Host" in httpHeaders instead. + optional string host = 3; + + // Scheme to use for connecting to the host. + // Defaults to HTTP. + optional string scheme = 4; + + // Custom headers to set in the request. HTTP allows repeated headers. + repeated HTTPHeader httpHeaders = 5; +} + +// HTTPHeader describes a custom header to be used in HTTP probes +message HTTPHeader { + // The header field name + optional string name = 1; + + // The header field value + optional string value = 2; +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +message Handler { + // One and only one of the following should be specified. + // Exec specifies the action to take. + optional ExecAction exec = 1; + + // HTTPGet specifies the http request to perform. + optional HTTPGetAction httpGet = 2; + + // TCPSocket specifies an action involving a TCP port. + // TCP hooks not yet supported + // TODO: implement a realistic TCP lifecycle hook + optional TCPSocketAction tcpSocket = 3; +} + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +message HostPathVolumeSource { + // Path of the directory on the host. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath + optional string path = 1; +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +message ISCSIVolumeSource { + // iSCSI target portal. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + optional string targetPortal = 1; + + // Target iSCSI Qualified Name. + optional string iqn = 2; + + // iSCSI target lun number. + optional int32 lun = 3; + + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + optional string iscsiInterface = 4; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + optional string fsType = 5; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + optional bool readOnly = 6; +} + +// Maps a string key to a path within a volume. +message KeyToPath { + // The key to project. + optional string key = 1; + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + optional string path = 2; +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +message Lifecycle { + // PostStart is called immediately after a container is created. If the handler fails, + // the container is terminated and restarted according to its restart policy. + // Other management of the container blocks until the hook completes. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details + optional Handler postStart = 1; + + // PreStop is called immediately before a container is terminated. + // The container is terminated after the handler completes. + // The reason for termination is passed to the handler. + // Regardless of the outcome of the handler, the container is eventually terminated. + // Other management of the container blocks until the hook completes. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details + optional Handler preStop = 2; +} + +// LimitRange sets resource usage limits for each kind of resource in a Namespace. +message LimitRange { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Spec defines the limits enforced. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional LimitRangeSpec spec = 2; +} + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. +message LimitRangeItem { + // Type of resource that this limit applies to. + optional string type = 1; + + // Max usage constraints on this kind by resource name. + map max = 2; + + // Min usage constraints on this kind by resource name. + map min = 3; + + // Default resource requirement limit value by resource name if resource limit is omitted. + map default = 4; + + // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. + map defaultRequest = 5; + + // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. + map maxLimitRequestRatio = 6; +} + +// LimitRangeList is a list of LimitRange items. +message LimitRangeList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of LimitRange objects. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md + repeated LimitRange items = 2; +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind. +message LimitRangeSpec { + // Limits is the list of LimitRangeItem objects that are enforced. + repeated LimitRangeItem limits = 1; +} + +// List holds a list of objects, which may not be known by the server. +message List { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of objects + repeated k8s.io.kubernetes.pkg.runtime.RawExtension items = 2; +} + +// ListOptions is the query options to a standard REST list call. +message ListOptions { + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + optional string labelSelector = 1; + + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + optional string fieldSelector = 2; + + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + optional bool watch = 3; + + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + optional string resourceVersion = 4; + + // Timeout for the list/watch call. + optional int64 timeoutSeconds = 5; +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +message LoadBalancerIngress { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + optional string ip = 1; + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + optional string hostname = 2; +} + +// LoadBalancerStatus represents the status of a load-balancer. +message LoadBalancerStatus { + // Ingress is a list containing ingress points for the load-balancer. + // Traffic intended for the service should be sent to these ingress points. + repeated LoadBalancerIngress ingress = 1; +} + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +message LocalObjectReference { + // Name of the referent. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // TODO: Add other useful fields. apiVersion, kind, uid? + optional string name = 1; +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +message NFSVolumeSource { + // Server is the hostname or IP address of the NFS server. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + optional string server = 1; + + // Path that is exported by the NFS server. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + optional string path = 2; + + // ReadOnly here will force + // the NFS export to be mounted with read-only permissions. + // Defaults to false. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + optional bool readOnly = 3; +} + +// Namespace provides a scope for Names. +// Use of multiple namespaces is optional. +message Namespace { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Spec defines the behavior of the Namespace. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional NamespaceSpec spec = 2; + + // Status describes the current status of a Namespace. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional NamespaceStatus status = 3; +} + +// NamespaceList is a list of Namespaces. +message NamespaceList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of Namespace objects in the list. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md + repeated Namespace items = 2; +} + +// NamespaceSpec describes the attributes on a Namespace. +message NamespaceSpec { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. + // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers + repeated string finalizers = 1; +} + +// NamespaceStatus is information about the current status of a Namespace. +message NamespaceStatus { + // Phase is the current lifecycle phase of the namespace. + // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases + optional string phase = 1; +} + +// Node is a worker node in Kubernetes, formerly known as minion. +// Each node will have a unique identifier in the cache (i.e. in etcd). +message Node { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Spec defines the behavior of a node. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional NodeSpec spec = 2; + + // Most recently observed status of the node. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional NodeStatus status = 3; +} + +// NodeAddress contains information for the node's address. +message NodeAddress { + // Node address type, one of Hostname, ExternalIP or InternalIP. + optional string type = 1; + + // The node address. + optional string address = 2; +} + +// Node affinity is a group of node affinity scheduling rules. +message NodeAffinity { + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + optional NodeSelector requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// NodeCondition contains condition infromation for a node. +message NodeCondition { + // Type of node condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time we got an update on a given condition. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastHeartbeatTime = 3; + + // Last time the condition transit from one status to another. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + optional string reason = 5; + + // Human readable message indicating details about last transition. + optional string message = 6; +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +message NodeDaemonEndpoints { + // Endpoint on which Kubelet is listening. + optional DaemonEndpoint kubeletEndpoint = 1; +} + +// NodeList is the whole list of all Nodes which have been registered with master. +message NodeList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of nodes + repeated Node items = 2; +} + +// NodeProxyOptions is the query options to a Node's proxy call. +message NodeProxyOptions { + // Path is the URL path to use for the current proxy request to node. + optional string path = 1; +} + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +message NodeSelector { + // Required. A list of node selector terms. The terms are ORed. + repeated NodeSelectorTerm nodeSelectorTerms = 1; +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +message NodeSelectorRequirement { + // The label key that the selector applies to. + optional string key = 1; + + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + optional string operator = 2; + + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + repeated string values = 3; +} + +// A null or empty node selector term matches no objects. +message NodeSelectorTerm { + // Required. A list of node selector requirements. The requirements are ANDed. + repeated NodeSelectorRequirement matchExpressions = 1; +} + +// NodeSpec describes the attributes that a node is created with. +message NodeSpec { + // PodCIDR represents the pod IP range assigned to the node. + optional string podCIDR = 1; + + // External ID of the node assigned by some machine database (e.g. a cloud provider). + // Deprecated. + optional string externalID = 2; + + // ID of the node assigned by the cloud provider in the format: :// + optional string providerID = 3; + + // Unschedulable controls node schedulability of new pods. By default, node is schedulable. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"` + optional bool unschedulable = 4; +} + +// NodeStatus is information about the current status of a node. +message NodeStatus { + // Capacity represents the total resources of a node. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity for more details. + map capacity = 1; + + // Allocatable represents the resources of a node that are available for scheduling. + // Defaults to Capacity. + map allocatable = 2; + + // NodePhase is the recently observed lifecycle phase of the node. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase + optional string phase = 3; + + // Conditions is an array of current observed node conditions. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition + repeated NodeCondition conditions = 4; + + // List of addresses reachable to the node. + // Queried from cloud provider, if available. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses + repeated NodeAddress addresses = 5; + + // Endpoints of daemons running on the Node. + optional NodeDaemonEndpoints daemonEndpoints = 6; + + // Set of ids/uuids to uniquely identify the node. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info + optional NodeSystemInfo nodeInfo = 7; + + // List of container images on this node + repeated ContainerImage images = 8; +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +message NodeSystemInfo { + // Machine ID reported by the node. + optional string machineID = 1; + + // System UUID reported by the node. + optional string systemUUID = 2; + + // Boot ID reported by the node. + optional string bootID = 3; + + // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). + optional string kernelVersion = 4; + + // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). + optional string osImage = 5; + + // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). + optional string containerRuntimeVersion = 6; + + // Kubelet Version reported by the node. + optional string kubeletVersion = 7; + + // KubeProxy Version reported by the node. + optional string kubeProxyVersion = 8; + + // The Operating System reported by the node + optional string operatingSystem = 9; + + // The Architecture reported by the node + optional string architecture = 10; +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +message ObjectFieldSelector { + // Version of the schema the FieldPath is written in terms of, defaults to "v1". + optional string apiVersion = 1; + + // Path of the field to select in the specified API version. + optional string fieldPath = 2; +} + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +message ObjectMeta { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + optional string name = 1; + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + optional string generateName = 2; + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md + optional string namespace = 3; + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + optional string selfLink = 4; + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + optional string uid = 5; + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + optional string resourceVersion = 6; + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + optional int64 generation = 7; + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.Time creationTimestamp = 8; + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource will be deleted (no longer visible from + // resource lists, and not reachable by name) after the time in this field. Once set, this + // value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet + // will send a hard termination signal to the container. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.Time deletionTimestamp = 9; + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + optional int64 deletionGracePeriodSeconds = 10; + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md + // TODO: replace map[string]string with labels.LabelSet type + map labels = 11; + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/annotations.md + map annotations = 12; + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. + repeated OwnerReference ownerReferences = 13; + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + repeated string finalizers = 14; +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +message ObjectReference { + // Kind of the referent. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional string kind = 1; + + // Namespace of the referent. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md + optional string namespace = 2; + + // Name of the referent. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + optional string name = 3; + + // UID of the referent. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + optional string uid = 4; + + // API version of the referent. + optional string apiVersion = 5; + + // Specific resourceVersion to which this reference is made, if any. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + optional string resourceVersion = 6; + + // If referring to a piece of an object instead of an entire object, this string + // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + // For example, if the object reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + optional string fieldPath = 7; +} + +// OwnerReference contains enough information to let you identify an owning +// object. Currently, an owning object must be in the same namespace, so there +// is no namespace field. +message OwnerReference { + // API version of the referent. + optional string apiVersion = 5; + + // Kind of the referent. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional string kind = 1; + + // Name of the referent. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + optional string name = 3; + + // UID of the referent. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + optional string uid = 4; +} + +// PersistentVolume (PV) is a storage resource provisioned by an administrator. +// It is analogous to a node. +// More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md +message PersistentVolume { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Spec defines a specification of a persistent volume owned by the cluster. + // Provisioned by an administrator. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes + optional PersistentVolumeSpec spec = 2; + + // Status represents the current information/status for the persistent volume. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes + optional PersistentVolumeStatus status = 3; +} + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +message PersistentVolumeClaim { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Spec defines the desired characteristics of a volume requested by a pod author. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + optional PersistentVolumeClaimSpec spec = 2; + + // Status represents the current information/status of a persistent volume claim. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + optional PersistentVolumeClaimStatus status = 3; +} + +// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. +message PersistentVolumeClaimList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // A list of persistent volume claims. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + repeated PersistentVolumeClaim items = 2; +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +message PersistentVolumeClaimSpec { + // AccessModes contains the desired access modes the volume should have. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1 + repeated string accessModes = 1; + + // Resources represents the minimum resources the volume should have. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources + optional ResourceRequirements resources = 2; + + // VolumeName is the binding reference to the PersistentVolume backing this claim. + optional string volumeName = 3; +} + +// PersistentVolumeClaimStatus is the current status of a persistent volume claim. +message PersistentVolumeClaimStatus { + // Phase represents the current phase of PersistentVolumeClaim. + optional string phase = 1; + + // AccessModes contains the actual access modes the volume backing the PVC has. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1 + repeated string accessModes = 2; + + // Represents the actual resources of the underlying volume. + map capacity = 3; +} + +// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. +// This volume finds the bound PV and mounts that volume for the pod. A +// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another +// type of volume that is owned by someone else (the system). +message PersistentVolumeClaimVolumeSource { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + optional string claimName = 1; + + // Will force the ReadOnly setting in VolumeMounts. + // Default false. + optional bool readOnly = 2; +} + +// PersistentVolumeList is a list of PersistentVolume items. +message PersistentVolumeList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of persistent volumes. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md + repeated PersistentVolume items = 2; +} + +// PersistentVolumeSource is similar to VolumeSource but meant for the +// administrator who creates PVs. Exactly one of its members must be set. +message PersistentVolumeSource { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; + + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; + + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath + optional HostPathVolumeSource hostPath = 3; + + // Glusterfs represents a Glusterfs volume that is attached to a host and + // exposed to the pod. Provisioned by an admin. + // More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md + optional GlusterfsVolumeSource glusterfs = 4; + + // NFS represents an NFS mount on the host. Provisioned by an admin. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + optional NFSVolumeSource nfs = 5; + + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md + optional RBDVolumeSource rbd = 6; + + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + optional ISCSIVolumeSource iscsi = 7; + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + optional CinderVolumeSource cinder = 8; + + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + optional CephFSVolumeSource cephfs = 9; + + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + optional FCVolumeSource fc = 10; + + // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + optional FlockerVolumeSource flocker = 11; + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using a exec based plugin. This is an + // alpha feature and may change in future. + optional FlexVolumeSource flexVolume = 12; + + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + optional AzureFileVolumeSource azureFile = 13; + + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + optional VsphereVirtualDiskVolumeSource vsphereVolume = 14; +} + +// PersistentVolumeSpec is the specification of a persistent volume. +message PersistentVolumeSpec { + // A description of the persistent volume's resources and capacity. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity + map capacity = 1; + + // The actual volume backing the persistent volume. + optional PersistentVolumeSource persistentVolumeSource = 2; + + // AccessModes contains all ways the volume can be mounted. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes + repeated string accessModes = 3; + + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // Expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#binding + optional ObjectReference claimRef = 4; + + // What happens to a persistent volume when released from its claim. + // Valid options are Retain (default) and Recycle. + // Recyling must be supported by the volume plugin underlying this persistent volume. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#recycling-policy + optional string persistentVolumeReclaimPolicy = 5; +} + +// PersistentVolumeStatus is the current status of a persistent volume. +message PersistentVolumeStatus { + // Phase indicates if a volume is available, bound to a claim, or released by a claim. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#phase + optional string phase = 1; + + // A human-readable message indicating details about why the volume is in this state. + optional string message = 2; + + // Reason is a brief CamelCase string that describes any failure and is meant + // for machine parsing and tidy display in the CLI. + optional string reason = 3; +} + +// Pod is a collection of containers that can run on a host. This resource is created +// by clients and scheduled onto hosts. +message Pod { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Specification of the desired behavior of the pod. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional PodSpec spec = 2; + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional PodStatus status = 3; +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +message PodAffinity { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key tches that of any node on which +// a pod of the set of pods is running +message PodAffinityTerm { + // A label query over a set of resources, in this case pods. + optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector labelSelector = 1; + + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // nil list means "this pod's namespace," empty list means "all namespaces" + // The json tag here is not "omitempty" since we need to distinguish nil and empty. + // See https://golang.org/pkg/encoding/json/#Marshal for more details. + repeated string namespaces = 2; + + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + optional string topologyKey = 3; +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +message PodAntiAffinity { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// PodAttachOptions is the query options to a Pod's remote attach call. +// --- +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +message PodAttachOptions { + // Stdin if true, redirects the standard input stream of the pod for this call. + // Defaults to false. + optional bool stdin = 1; + + // Stdout if true indicates that stdout is to be redirected for the attach call. + // Defaults to true. + optional bool stdout = 2; + + // Stderr if true indicates that stderr is to be redirected for the attach call. + // Defaults to true. + optional bool stderr = 3; + + // TTY if true indicates that a tty will be allocated for the attach call. + // This is passed through the container runtime so the tty + // is allocated on the worker node by the container runtime. + // Defaults to false. + optional bool tty = 4; + + // The container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + optional string container = 5; +} + +// PodCondition contains details for the current condition of this pod. +message PodCondition { + // Type is the type of the condition. + // Currently only Ready. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions + optional string type = 1; + + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions + optional string status = 2; + + // Last time we probed the condition. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; + + // Last time the condition transitioned from one status to another. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; + + // Unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 5; + + // Human-readable message indicating details about last transition. + optional string message = 6; +} + +// PodExecOptions is the query options to a Pod's remote exec call. +// --- +// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +message PodExecOptions { + // Redirect the standard input stream of the pod for this call. + // Defaults to false. + optional bool stdin = 1; + + // Redirect the standard output stream of the pod for this call. + // Defaults to true. + optional bool stdout = 2; + + // Redirect the standard error stream of the pod for this call. + // Defaults to true. + optional bool stderr = 3; + + // TTY if true indicates that a tty will be allocated for the exec call. + // Defaults to false. + optional bool tty = 4; + + // Container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + optional string container = 5; + + // Command is the remote command to execute. argv array. Not executed within a shell. + repeated string command = 6; +} + +// PodList is a list of Pods. +message PodList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of pods. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pods.md + repeated Pod items = 2; +} + +// PodLogOptions is the query options for a Pod's logs REST call. +message PodLogOptions { + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + optional string container = 1; + + // Follow the log stream of the pod. Defaults to false. + optional bool follow = 2; + + // Return previous terminated container logs. Defaults to false. + optional bool previous = 3; + + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional int64 sinceSeconds = 4; + + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional k8s.io.kubernetes.pkg.api.unversioned.Time sinceTime = 5; + + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + optional bool timestamps = 6; + + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + optional int64 tailLines = 7; + + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + optional int64 limitBytes = 8; +} + +// PodProxyOptions is the query options to a Pod's proxy call. +message PodProxyOptions { + // Path is the URL path to use for the current proxy request to pod. + optional string path = 1; +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +message PodSecurityContext { + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + optional SELinuxOptions seLinuxOptions = 1; + + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + optional int64 runAsUser = 2; + + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + optional bool runAsNonRoot = 3; + + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + repeated int64 supplementalGroups = 4; + + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + optional int64 fsGroup = 5; +} + +// PodSpec is a description of a pod. +message PodSpec { + // List of volumes that can be mounted by containers belonging to the pod. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md + repeated Volume volumes = 1; + + // List of containers belonging to the pod. + // Containers cannot currently be added or removed. + // There must be at least one container in a Pod. + // Cannot be updated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md + repeated Container containers = 2; + + // Restart policy for all containers within the pod. + // One of Always, OnFailure, Never. + // Default to Always. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#restartpolicy + optional string restartPolicy = 3; + + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + optional int64 terminationGracePeriodSeconds = 4; + + // Optional duration in seconds the pod may be active on the node relative to + // StartTime before the system will actively try to mark it failed and kill associated containers. + // Value must be a positive integer. + optional int64 activeDeadlineSeconds = 5; + + // Set DNS policy for containers within the pod. + // One of 'ClusterFirst' or 'Default'. + // Defaults to "ClusterFirst". + optional string dnsPolicy = 6; + + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/node-selection/README.md + map nodeSelector = 7; + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + optional string serviceAccountName = 8; + + // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // Deprecated: Use serviceAccountName instead. + optional string serviceAccount = 9; + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + optional string nodeName = 10; + + // Host networking requested for this pod. Use the host's network namespace. + // If this option is set, the ports that will be used must be specified. + // Default to false. + optional bool hostNetwork = 11; + + // Use the host's pid namespace. + // Optional: Default to false. + optional bool hostPID = 12; + + // Use the host's ipc namespace. + // Optional: Default to false. + optional bool hostIPC = 13; + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + optional PodSecurityContext securityContext = 14; + + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod + repeated LocalObjectReference imagePullSecrets = 15; + + // Specifies the hostname of the Pod + // If not specified, the pod's hostname will be set to a system-defined value. + optional string hostname = 16; + + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + optional string subdomain = 17; +} + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +message PodStatus { + // Current condition of the pod. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-phase + optional string phase = 1; + + // Current service state of pod. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions + repeated PodCondition conditions = 2; + + // A human readable message indicating details about why the pod is in this condition. + optional string message = 3; + + // A brief CamelCase message indicating details about why the pod is in this state. + // e.g. 'OutOfDisk' + optional string reason = 4; + + // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + optional string hostIP = 5; + + // IP address allocated to the pod. Routable at least within the cluster. + // Empty if not yet allocated. + optional string podIP = 6; + + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 7; + + // The list has one entry per container in the manifest. Each entry is currently the output + // of `docker inspect`. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses + repeated ContainerStatus containerStatuses = 8; +} + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +message PodStatusResult { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional PodStatus status = 2; +} + +// PodTemplate describes a template for creating copies of a predefined pod. +message PodTemplate { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Template defines the pods that will be created from this pod template. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional PodTemplateSpec template = 2; +} + +// PodTemplateList is a list of PodTemplates. +message PodTemplateList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of pod templates + repeated PodTemplate items = 2; +} + +// PodTemplateSpec describes the data a pod should have when created from a template +message PodTemplateSpec { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Specification of the desired behavior of the pod. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional PodSpec spec = 2; +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +message Preconditions { + // Specifies the target UID. + optional string uid = 1; +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +message PreferredSchedulingTerm { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + optional int32 weight = 1; + + // A node selector term, associated with the corresponding weight. + optional NodeSelectorTerm preference = 2; +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +message Probe { + // The action taken to determine the health of a container + optional Handler handler = 1; + + // Number of seconds after the container has started before liveness probes are initiated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + optional int32 initialDelaySeconds = 2; + + // Number of seconds after which the probe times out. + // Defaults to 1 second. Minimum value is 1. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + optional int32 timeoutSeconds = 3; + + // How often (in seconds) to perform the probe. + // Default to 10 seconds. Minimum value is 1. + optional int32 periodSeconds = 4; + + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + optional int32 successThreshold = 5; + + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + optional int32 failureThreshold = 6; +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +message RBDVolumeSource { + // A collection of Ceph monitors. + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it + repeated string monitors = 1; + + // The rados image name. + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it + optional string image = 2; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + optional string fsType = 3; + + // The rados pool name. + // Default is rbd. + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it. + optional string pool = 4; + + // The rados user name. + // Default is admin. + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it + optional string user = 5; + + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it + optional string keyring = 6; + + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is empty. + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it + optional LocalObjectReference secretRef = 7; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it + optional bool readOnly = 8; +} + +// RangeAllocation is not a public type. +message RangeAllocation { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Range is string that identifies the range represented by 'data'. + optional string range = 2; + + // Data is a bit array containing all allocated addresses in the previous segment. + optional bytes data = 3; +} + +// ReplicationController represents the configuration of a replication controller. +message ReplicationController { + // If the Labels of a ReplicationController are empty, they are defaulted to + // be the same as the Pod(s) that the replication controller manages. + // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the replication controller. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional ReplicationControllerSpec spec = 2; + + // Status is the most recently observed status of the replication controller. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional ReplicationControllerStatus status = 3; +} + +// ReplicationControllerList is a collection of replication controllers. +message ReplicationControllerList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of replication controllers. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md + repeated ReplicationController items = 2; +} + +// ReplicationControllerSpec is the specification of a replication controller. +message ReplicationControllerSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + optional int32 replicas = 1; + + // Selector is a label query over pods that should match the Replicas count. + // If Selector is empty, it is defaulted to the labels present on the Pod template. + // Label keys and values that must match in order to be controlled by this replication + // controller, if empty defaulted to labels on Pod template. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + map selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. This takes precedence over a TemplateRef. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template + optional PodTemplateSpec template = 3; +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +message ReplicationControllerStatus { + // Replicas is the most recently oberved number of replicas. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + optional int32 fullyLabeledReplicas = 2; + + // ObservedGeneration reflects the generation of the most recently observed replication controller. + optional int64 observedGeneration = 3; +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +message ResourceFieldSelector { + // Container name: required for volumes, optional for env vars + optional string containerName = 1; + + // Required: resource to select + optional string resource = 2; + + // Specifies the output format of the exposed resources, defaults to "1" + optional k8s.io.kubernetes.pkg.api.resource.Quantity divisor = 3; +} + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +message ResourceQuota { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Spec defines the desired quota. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional ResourceQuotaSpec spec = 2; + + // Status defines the actual enforced quota and its current usage. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional ResourceQuotaStatus status = 3; +} + +// ResourceQuotaList is a list of ResourceQuota items. +message ResourceQuotaList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of ResourceQuota objects. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + repeated ResourceQuota items = 2; +} + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. +message ResourceQuotaSpec { + // Hard is the set of desired hard limits for each named resource. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + map hard = 1; + + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + repeated string scopes = 2; +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use. +message ResourceQuotaStatus { + // Hard is the set of enforced hard limits for each named resource. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + map hard = 1; + + // Used is the current observed total usage of the resource in the namespace. + map used = 2; +} + +// ResourceRequirements describes the compute resource requirements. +message ResourceRequirements { + // Limits describes the maximum amount of compute resources allowed. + // More info: http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications + map limits = 1; + + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. + // More info: http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications + map requests = 2; +} + +// SELinuxOptions are the labels to be applied to the container +message SELinuxOptions { + // User is a SELinux user label that applies to the container. + optional string user = 1; + + // Role is a SELinux role label that applies to the container. + optional string role = 2; + + // Type is a SELinux type label that applies to the container. + optional string type = 3; + + // Level is SELinux level label that applies to the container. + optional string level = 4; +} + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +message Secret { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN + // or leading dot followed by valid DNS_SUBDOMAIN. + // The serialized form of the secret data is a base64 encoded string, + // representing the arbitrary (possibly non-string) data value here. + // Described in https://tools.ietf.org/html/rfc4648#section-4 + map data = 2; + + // Used to facilitate programmatic handling of secret data. + optional string type = 3; +} + +// SecretKeySelector selects a key of a Secret. +message SecretKeySelector { + // The name of the secret in the pod's namespace to select from. + optional LocalObjectReference localObjectReference = 1; + + // The key of the secret to select from. Must be a valid secret key. + optional string key = 2; +} + +// SecretList is a list of Secret. +message SecretList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of secret objects. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md + repeated Secret items = 2; +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +message SecretVolumeSource { + // Name of the secret in the pod's namespace to use. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets + optional string secretName = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error. Paths must be relative and may not contain + // the '..' path or start with '..'. + repeated KeyToPath items = 2; +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +message SecurityContext { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + optional Capabilities capabilities = 1; + + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + optional bool privileged = 2; + + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + optional SELinuxOptions seLinuxOptions = 3; + + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + optional int64 runAsUser = 4; + + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + optional bool runAsNonRoot = 5; + + // Whether this container has a read-only root filesystem. + // Default is false. + optional bool readOnlyRootFilesystem = 6; +} + +// SerializedReference is a reference to serialized object. +message SerializedReference { + // The reference to an object in the system. + optional ObjectReference reference = 1; +} + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +message Service { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Spec defines the behavior of a service. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional ServiceSpec spec = 2; + + // Most recently observed status of the service. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional ServiceStatus status = 3; +} + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +message ServiceAccount { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional ObjectMeta metadata = 1; + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md + repeated ObjectReference secrets = 2; + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret + repeated LocalObjectReference imagePullSecrets = 3; +} + +// ServiceAccountList is a list of ServiceAccount objects +message ServiceAccountList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of ServiceAccounts. + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts + repeated ServiceAccount items = 2; +} + +// ServiceList holds a list of services. +message ServiceList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of services + repeated Service items = 2; +} + +// ServicePort contains information on service's port. +message ServicePort { + // The name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + // Optional if only one ServicePort is defined on this service. + optional string name = 1; + + // The IP protocol for this port. Supports "TCP" and "UDP". + // Default is TCP. + optional string protocol = 2; + + // The port that will be exposed by this service. + optional int32 port = 3; + + // Number or name of the port to access on the pods targeted by the service. + // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + // If this is a string, it will be looked up as a named port in the + // target Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#defining-a-service + optional k8s.io.kubernetes.pkg.util.intstr.IntOrString targetPort = 4; + + // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. + // Usually assigned by the system. If specified, it will be allocated to the service + // if unused or else creation of the service will fail. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#type--nodeport + optional int32 nodePort = 5; +} + +// ServiceProxyOptions is the query options to a Service's proxy call. +message ServiceProxyOptions { + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + optional string path = 1; +} + +// ServiceSpec describes the attributes that a user creates on a service. +message ServiceSpec { + // The list of ports that are exposed by this service. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies + repeated ServicePort ports = 1; + + // This service will route traffic to pods having labels matching this selector. + // Label keys and values that must match in order to receive traffic for this service. + // If empty, all pods are selected, if not specified, endpoints must be manually specified. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#overview + map selector = 2; + + // ClusterIP is usually assigned by the master and is the IP address of the service. + // If specified, it will be allocated to the service if it is unused + // or else creation of the service will fail. + // Valid values are None, empty string (""), or a valid IP address. + // 'None' can be specified for a headless service when proxying is not required. + // Cannot be updated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies + optional string clusterIP = 3; + + // Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer. + // Defaults to ClusterIP. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#external-services + optional string type = 4; + + // externalIPs is a list of IP addresses for which nodes in the cluster + // will also accept traffic for this service. These IPs are not managed by + // Kubernetes. The user is responsible for ensuring that traffic arrives + // at a node with this IP. A common example is external load-balancers + // that are not part of the Kubernetes system. A previous form of this + // functionality exists as the deprecatedPublicIPs field. When using this + // field, callers should also clear the deprecatedPublicIPs field. + repeated string externalIPs = 5; + + // deprecatedPublicIPs is deprecated and replaced by the externalIPs field + // with almost the exact same semantics. This field is retained in the v1 + // API for compatibility until at least 8/20/2016. It will be removed from + // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are + // set, deprecatedPublicIPs is used. + // +genconversion=false + repeated string deprecatedPublicIPs = 6; + + // Supports "ClientIP" and "None". Used to maintain session affinity. + // Enable client IP based session affinity. + // Must be ClientIP or None. + // Defaults to None. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies + optional string sessionAffinity = 7; + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + optional string loadBalancerIP = 8; + + // If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md + repeated string loadBalancerSourceRanges = 9; +} + +// ServiceStatus represents the current status of a service. +message ServiceStatus { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + optional LoadBalancerStatus loadBalancer = 1; +} + +// TCPSocketAction describes an action based on opening a socket +message TCPSocketAction { + // Number or name of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 1; +} + +// The node this Taint is attached to has the effect "effect" on +// any pod that that does not tolerate the Taint. +message Taint { + // Required. The taint key to be applied to a node. + optional string key = 1; + + // Required. The taint value corresponding to the taint key. + optional string value = 2; + + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule and PreferNoSchedule. + optional string effect = 3; +} + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +message Toleration { + // Required. Key is the taint key that the toleration applies to. + optional string key = 1; + + // operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + optional string operator = 2; + + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + optional string value = 3; + + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule and PreferNoSchedule. + optional string effect = 4; +} + +// Volume represents a named volume in a pod that may be accessed by any container in the pod. +message Volume { + // Volume's name. + // Must be a DNS_LABEL and unique within the pod. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + optional string name = 1; + + // VolumeSource represents the location and type of the mounted volume. + // If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + optional VolumeSource volumeSource = 2; +} + +// VolumeMount describes a mounting of a Volume within a container. +message VolumeMount { + // This must match the Name of a Volume. + optional string name = 1; + + // Mounted read-only if true, read-write otherwise (false or unspecified). + // Defaults to false. + optional bool readOnly = 2; + + // Path within the container at which the volume should be mounted. Must + // not contain ':'. + optional string mountPath = 3; + + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + optional string subPath = 4; +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +message VolumeSource { + // HostPath represents a pre-existing file or directory on the host + // machine that is directly exposed to the container. This is generally + // used for system agents or other privileged things that are allowed + // to see the host machine. Most containers will NOT need this. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + optional HostPathVolumeSource hostPath = 1; + + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir + optional EmptyDirVolumeSource emptyDir = 2; + + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; + + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; + + // GitRepo represents a git repository at a particular revision. + optional GitRepoVolumeSource gitRepo = 5; + + // Secret represents a secret that should populate this volume. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets + optional SecretVolumeSource secret = 6; + + // NFS represents an NFS mount on the host that shares a pod's lifetime + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + optional NFSVolumeSource nfs = 7; + + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://releases.k8s.io/HEAD/examples/iscsi/README.md + optional ISCSIVolumeSource iscsi = 8; + + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md + optional GlusterfsVolumeSource glusterfs = 9; + + // PersistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; + + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md + optional RBDVolumeSource rbd = 11; + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using a exec based plugin. This is an + // alpha feature and may change in future. + optional FlexVolumeSource flexVolume = 12; + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + optional CinderVolumeSource cinder = 13; + + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + optional CephFSVolumeSource cephfs = 14; + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + optional FlockerVolumeSource flocker = 15; + + // DownwardAPI represents downward API about the pod that should populate this volume + optional DownwardAPIVolumeSource downwardAPI = 16; + + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + optional FCVolumeSource fc = 17; + + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + optional AzureFileVolumeSource azureFile = 18; + + // ConfigMap represents a configMap that should populate this volume + optional ConfigMapVolumeSource configMap = 19; + + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + optional VsphereVirtualDiskVolumeSource vsphereVolume = 20; +} + +// Represents a vSphere volume resource. +message VsphereVirtualDiskVolumeSource { + // Path that identifies vSphere volume vmdk + optional string volumePath = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + optional string fsType = 2; +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +message WeightedPodAffinityTerm { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + optional int32 weight = 1; + + // Required. A pod affinity term, associated with the corresponding weight. + optional PodAffinityTerm podAffinityTerm = 2; +} + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/register.go index 760836e37a43..1a8342c63ac5 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/register.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/register.go @@ -19,6 +19,7 @@ package v1 import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" ) // GroupName is the group name use in this package @@ -87,6 +88,9 @@ func addKnownTypes(scheme *runtime.Scheme) { // Add common types scheme.AddKnownTypes(SchemeGroupVersion, &unversioned.Status{}) + + // Add the watch version that applies + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) } func (obj *Pod) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/types.generated.go index 1dc4938a8927..4e66fdb43c28 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/types.generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/types.generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,12 +27,11 @@ import ( codec1978 "github.com/ugorji/go/codec" pkg3_resource "k8s.io/kubernetes/pkg/api/resource" pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg6_runtime "k8s.io/kubernetes/pkg/runtime" + pkg5_runtime "k8s.io/kubernetes/pkg/runtime" pkg1_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" + pkg4_intstr "k8s.io/kubernetes/pkg/util/intstr" "reflect" "runtime" - pkg4_inf "speter.net/go/exp/math/dec/inf" time "time" ) @@ -68,12 +67,11 @@ func init() { if false { // reference the types, but skip this branch at build/run time var v0 pkg3_resource.Quantity var v1 pkg2_unversioned.Time - var v2 pkg6_runtime.RawExtension + var v2 pkg5_runtime.RawExtension var v3 pkg1_types.UID - var v4 pkg5_intstr.IntOrString - var v5 pkg4_inf.Dec - var v6 time.Time - _, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6 + var v4 pkg4_intstr.IntOrString + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 } } @@ -91,7 +89,7 @@ func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [12]bool + var yyq2 [14]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.Name != "" @@ -106,9 +104,11 @@ func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[9] = x.DeletionGracePeriodSeconds != nil yyq2[10] = len(x.Labels) != 0 yyq2[11] = len(x.Annotations) != 0 + yyq2[12] = len(x.OwnerReferences) != 0 + yyq2[13] = len(x.Finalizers) != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(12) + r.EncodeArrayStart(14) } else { yynn2 = 0 for _, b := range yyq2 { @@ -477,6 +477,72 @@ func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.OwnerReferences == nil { + r.EncodeNil() + } else { + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.OwnerReferences == nil { + r.EncodeNil() + } else { + yym45 := z.EncBinary() + _ = yym45 + if false { + } else { + h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym47 := z.EncBinary() + _ = yym47 + if false { + } else { + z.F.EncSliceStringV(x.Finalizers, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("finalizers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym48 := z.EncBinary() + _ = yym48 + if false { + } else { + z.F.EncSliceStringV(x.Finalizers, false, e) + } + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -658,6 +724,30 @@ func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.F.DecMapStringStringX(yyv19, false, d) } } + case "ownerReferences": + if r.TryDecodeAsNil() { + x.OwnerReferences = nil + } else { + yyv21 := &x.OwnerReferences + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + h.decSliceOwnerReference((*[]OwnerReference)(yyv21), d) + } + } + case "finalizers": + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv23 := &x.Finalizers + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + z.F.DecSliceStringX(yyv23, false, d) + } + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -669,16 +759,16 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj21 int - var yyb21 bool - var yyhl21 bool = l >= 0 - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + var yyj25 int + var yyb25 bool + var yyhl25 bool = l >= 0 + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -688,13 +778,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Name = string(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -704,13 +794,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.GenerateName = string(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -720,13 +810,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Namespace = string(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -736,13 +826,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.SelfLink = string(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -752,13 +842,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.UID = pkg1_types.UID(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -768,13 +858,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.ResourceVersion = string(r.DecodeString()) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -784,13 +874,13 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Generation = int64(r.DecodeInt(64)) } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -798,26 +888,26 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.CreationTimestamp = pkg2_unversioned.Time{} } else { - yyv29 := &x.CreationTimestamp - yym30 := z.DecBinary() - _ = yym30 + yyv33 := &x.CreationTimestamp + yym34 := z.DecBinary() + _ = yym34 if false { - } else if z.HasExtensions() && z.DecExt(yyv29) { - } else if yym30 { - z.DecBinaryUnmarshal(yyv29) - } else if !yym30 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv29) + } else if z.HasExtensions() && z.DecExt(yyv33) { + } else if yym34 { + z.DecBinaryUnmarshal(yyv33) + } else if !yym34 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv33) } else { - z.DecFallback(yyv29, false) + z.DecFallback(yyv33, false) } } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -830,25 +920,25 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.DeletionTimestamp == nil { x.DeletionTimestamp = new(pkg2_unversioned.Time) } - yym32 := z.DecBinary() - _ = yym32 + yym36 := z.DecBinary() + _ = yym36 if false { } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym32 { + } else if yym36 { z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym32 && z.IsJSONHandle() { + } else if !yym36 && z.IsJSONHandle() { z.DecJSONUnmarshal(x.DeletionTimestamp) } else { z.DecFallback(x.DeletionTimestamp, false) } } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -861,20 +951,20 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.DeletionGracePeriodSeconds == nil { x.DeletionGracePeriodSeconds = new(int64) } - yym34 := z.DecBinary() - _ = yym34 + yym38 := z.DecBinary() + _ = yym38 if false { } else { *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) } } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -882,21 +972,21 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Labels = nil } else { - yyv35 := &x.Labels - yym36 := z.DecBinary() - _ = yym36 + yyv39 := &x.Labels + yym40 := z.DecBinary() + _ = yym40 if false { } else { - z.F.DecMapStringStringX(yyv35, false, d) + z.F.DecMapStringStringX(yyv39, false, d) } } - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -904,26 +994,70 @@ func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Annotations = nil } else { - yyv37 := &x.Annotations - yym38 := z.DecBinary() - _ = yym38 + yyv41 := &x.Annotations + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + z.F.DecMapStringStringX(yyv41, false, d) + } + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.OwnerReferences = nil + } else { + yyv43 := &x.OwnerReferences + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + h.decSliceOwnerReference((*[]OwnerReference)(yyv43), d) + } + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv45 := &x.Finalizers + yym46 := z.DecBinary() + _ = yym46 if false { } else { - z.F.DecMapStringStringX(yyv37, false, d) + z.F.DecSliceStringX(yyv45, false, d) } } for { - yyj21++ - if yyhl21 { - yyb21 = yyj21 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb21 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb21 { + if yyb25 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj21-1, "") + z.DecStructFieldNotFound(yyj25-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -942,7 +1076,7 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [20]bool + var yyq2 [21]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[1] = x.VolumeSource.HostPath != nil && x.HostPath != nil @@ -964,9 +1098,10 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[17] = x.VolumeSource.FC != nil && x.FC != nil yyq2[18] = x.VolumeSource.AzureFile != nil && x.AzureFile != nil yyq2[19] = x.VolumeSource.ConfigMap != nil && x.ConfigMap != nil + yyq2[20] = x.VolumeSource.VsphereVolume != nil && x.VsphereVolume != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(20) + r.EncodeArrayStart(21) } else { yynn2 = 1 for _, b := range yyq2 { @@ -1699,6 +1834,43 @@ func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { } } } + var yyn63 bool + if x.VolumeSource.VsphereVolume == nil { + yyn63 = true + goto LABEL63 + } + LABEL63: + if yyr2 || yy2arr2 { + if yyn63 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn63 { + r.EncodeNil() + } else { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -2032,6 +2204,20 @@ func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } x.ConfigMap.CodecDecodeSelf(d) } + case "vsphereVolume": + if x.VolumeSource.VsphereVolume == nil { + x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -2043,16 +2229,16 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj24 int - var yyb24 bool - var yyhl24 bool = l >= 0 - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + var yyj25 int + var yyb25 bool + var yyhl25 bool = l >= 0 + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2065,13 +2251,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.HostPath == nil { x.VolumeSource.HostPath = new(HostPathVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2089,13 +2275,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.EmptyDir == nil { x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2113,13 +2299,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.GCEPersistentDisk == nil { x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2137,13 +2323,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.AWSElasticBlockStore == nil { x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2161,13 +2347,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.GitRepo == nil { x.VolumeSource.GitRepo = new(GitRepoVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2185,13 +2371,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.Secret == nil { x.VolumeSource.Secret = new(SecretVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2209,13 +2395,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.NFS == nil { x.VolumeSource.NFS = new(NFSVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2233,13 +2419,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.ISCSI == nil { x.VolumeSource.ISCSI = new(ISCSIVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2257,13 +2443,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.Glusterfs == nil { x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2281,13 +2467,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.PersistentVolumeClaim == nil { x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2305,13 +2491,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.RBD == nil { x.VolumeSource.RBD = new(RBDVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2329,13 +2515,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.FlexVolume == nil { x.VolumeSource.FlexVolume = new(FlexVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2353,13 +2539,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.Cinder == nil { x.VolumeSource.Cinder = new(CinderVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2377,13 +2563,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.CephFS == nil { x.VolumeSource.CephFS = new(CephFSVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2401,13 +2587,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.Flocker == nil { x.VolumeSource.Flocker = new(FlockerVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2425,13 +2611,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.DownwardAPI == nil { x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2449,13 +2635,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.FC == nil { x.VolumeSource.FC = new(FCVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2473,13 +2659,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.AzureFile == nil { x.VolumeSource.AzureFile = new(AzureFileVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2497,13 +2683,13 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.VolumeSource.ConfigMap == nil { x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2518,18 +2704,42 @@ func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.ConfigMap.CodecDecodeSelf(d) } + if x.VolumeSource.VsphereVolume == nil { + x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l + } else { + yyb25 = r.CheckBreak() + } + if yyb25 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } for { - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l + yyj25++ + if yyhl25 { + yyb25 = yyj25 > l } else { - yyb24 = r.CheckBreak() + yyb25 = r.CheckBreak() } - if yyb24 { + if yyb25 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj24-1, "") + z.DecStructFieldNotFound(yyj25-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2548,7 +2758,7 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [19]bool + var yyq2 [20]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.HostPath != nil @@ -2570,9 +2780,10 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[16] = x.FC != nil yyq2[17] = x.AzureFile != nil yyq2[18] = x.ConfigMap != nil + yyq2[19] = x.VsphereVolume != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(19) + r.EncodeArrayStart(20) } else { yynn2 = 0 for _, b := range yyq2 { @@ -3020,6 +3231,29 @@ func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -3290,6 +3524,17 @@ func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } x.ConfigMap.CodecDecodeSelf(d) } + case "vsphereVolume": + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -3301,16 +3546,16 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj23 int - var yyb23 bool - var yyhl23 bool = l >= 0 - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3325,13 +3570,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.HostPath.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3346,13 +3591,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.EmptyDir.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3367,13 +3612,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.GCEPersistentDisk.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3388,13 +3633,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.AWSElasticBlockStore.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3409,13 +3654,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.GitRepo.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3430,13 +3675,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Secret.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3451,13 +3696,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.NFS.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3472,13 +3717,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.ISCSI.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3493,13 +3738,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Glusterfs.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3514,13 +3759,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.PersistentVolumeClaim.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3535,13 +3780,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.RBD.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3556,13 +3801,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.FlexVolume.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3577,13 +3822,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Cinder.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3598,13 +3843,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.CephFS.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3619,13 +3864,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Flocker.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3640,13 +3885,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.DownwardAPI.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3661,13 +3906,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.FC.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3682,13 +3927,13 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.AzureFile.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3703,18 +3948,39 @@ func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.ConfigMap.CodecDecodeSelf(d) } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } for { - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l } else { - yyb23 = r.CheckBreak() + yyb24 = r.CheckBreak() } - if yyb23 { + if yyb24 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj23-1, "") + z.DecStructFieldNotFound(yyj24-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -3943,7 +4209,7 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [13]bool + var yyq2 [14]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.GCEPersistentDisk != nil @@ -3959,9 +4225,10 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[10] = x.Flocker != nil yyq2[11] = x.FlexVolume != nil yyq2[12] = x.AzureFile != nil + yyq2[13] = x.VsphereVolume != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(13) + r.EncodeArrayStart(14) } else { yynn2 = 0 for _, b := range yyq2 { @@ -4271,6 +4538,29 @@ func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -4475,6 +4765,17 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Deco } x.AzureFile.CodecDecodeSelf(d) } + case "vsphereVolume": + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -4486,16 +4787,16 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj17 int - var yyb17 bool - var yyhl17 bool = l >= 0 - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4510,13 +4811,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.GCEPersistentDisk.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4531,13 +4832,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.AWSElasticBlockStore.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4552,13 +4853,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.HostPath.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4573,13 +4874,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.Glusterfs.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4594,13 +4895,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.NFS.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4615,13 +4916,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.RBD.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4636,13 +4937,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.ISCSI.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4657,13 +4958,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.Cinder.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4678,13 +4979,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.CephFS.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4699,13 +5000,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.FC.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4720,13 +5021,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.Flocker.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4741,13 +5042,13 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.FlexVolume.CodecDecodeSelf(d) } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4762,18 +5063,39 @@ func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.De } x.AzureFile.CodecDecodeSelf(d) } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } for { - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb17 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb17 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj17-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -5135,7 +5457,7 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [17]bool + var yyq2 [18]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = len(x.Capacity) != 0 @@ -5155,9 +5477,10 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[14] = x.PersistentVolumeSource.Flocker != nil && x.Flocker != nil yyq2[15] = x.PersistentVolumeSource.FlexVolume != nil && x.FlexVolume != nil yyq2[16] = x.PersistentVolumeSource.AzureFile != nil && x.AzureFile != nil + yyq2[17] = x.PersistentVolumeSource.VsphereVolume != nil && x.VsphereVolume != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(17) + r.EncodeArrayStart(18) } else { yynn2 = 0 for _, b := range yyq2 { @@ -5743,6 +6066,43 @@ func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } + var yyn54 bool + if x.PersistentVolumeSource.VsphereVolume == nil { + yyn54 = true + goto LABEL54 + } + LABEL54: + if yyr2 || yy2arr2 { + if yyn54 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn54 { + r.EncodeNil() + } else { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -6022,6 +6382,20 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decode } x.AzureFile.CodecDecodeSelf(d) } + case "vsphereVolume": + if x.PersistentVolumeSource.VsphereVolume == nil { + x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -6033,16 +6407,16 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj22 int - var yyb22 bool - var yyhl22 bool = l >= 0 - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6050,16 +6424,16 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.Capacity = nil } else { - yyv23 := &x.Capacity - yyv23.CodecDecodeSelf(d) + yyv24 := &x.Capacity + yyv24.CodecDecodeSelf(d) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6067,21 +6441,21 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.AccessModes = nil } else { - yyv24 := &x.AccessModes - yym25 := z.DecBinary() - _ = yym25 + yyv25 := &x.AccessModes + yym26 := z.DecBinary() + _ = yym26 if false { } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv24), d) + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv25), d) } } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6096,13 +6470,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco } x.ClaimRef.CodecDecodeSelf(d) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6115,13 +6489,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.GCEPersistentDisk == nil { x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6139,13 +6513,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.AWSElasticBlockStore == nil { x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6163,13 +6537,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.HostPath == nil { x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6187,13 +6561,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.Glusterfs == nil { x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6211,13 +6585,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.NFS == nil { x.PersistentVolumeSource.NFS = new(NFSVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6235,13 +6609,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.RBD == nil { x.PersistentVolumeSource.RBD = new(RBDVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6259,13 +6633,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.ISCSI == nil { x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6283,13 +6657,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.Cinder == nil { x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6307,13 +6681,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.CephFS == nil { x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6331,13 +6705,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.FC == nil { x.PersistentVolumeSource.FC = new(FCVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6355,13 +6729,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.Flocker == nil { x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6379,13 +6753,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.FlexVolume == nil { x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6403,13 +6777,13 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco if x.PersistentVolumeSource.AzureFile == nil { x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) } - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6424,18 +6798,42 @@ func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Deco } x.AzureFile.CodecDecodeSelf(d) } + if x.PersistentVolumeSource.VsphereVolume == nil { + x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } for { - yyj22++ - if yyhl22 { - yyb22 = yyj22 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb22 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb22 { + if yyb23 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj22-1, "") + z.DecStructFieldNotFound(yyj23-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -11615,13 +12013,14 @@ func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.SecretName != "" + yyq2[1] = len(x.Items) != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) + r.EncodeArrayStart(2) } else { yynn2 = 0 for _, b := range yyq2 { @@ -11657,6 +12056,39 @@ func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -11724,6 +12156,18 @@ func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } else { x.SecretName = string(r.DecodeString()) } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv5 := &x.Items + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv5), d) + } + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -11735,16 +12179,16 @@ func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb5 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb5 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -11754,18 +12198,40 @@ func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decode } else { x.SecretName = string(r.DecodeString()) } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv9 := &x.Items + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv9), d) + } + } for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb5 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb5 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -13009,7 +13475,7 @@ func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -13026,13 +13492,12 @@ func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = len(x.Items) != 0 - yyq2[1] = x.Name != "" + yyq2[1] = x.FSType != "" var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn2 = 0 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -13043,35 +13508,21 @@ func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) } } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumePath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) } } if yyr2 || yy2arr2 { @@ -13081,7 +13532,7 @@ func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) } } else { r.EncodeString(codecSelferC_UTF81234, "") @@ -13089,13 +13540,13 @@ func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym8 := z.EncBinary() _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) } } } @@ -13108,7 +13559,7 @@ func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *VsphereVirtualDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -13138,7 +13589,7 @@ func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -13160,23 +13611,17 @@ func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decod yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "items": + case "volumePath": if r.TryDecodeAsNil() { - x.Items = nil + x.VolumePath = "" } else { - yyv4 := &x.Items - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv4), d) - } + x.VolumePath = string(r.DecodeString()) } - case "name": + case "fsType": if r.TryDecodeAsNil() { - x.Name = "" + x.FSType = "" } else { - x.Name = string(r.DecodeString()) + x.FSType = string(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -13185,68 +13630,62 @@ func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decod z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ConfigMapVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb7 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb7 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Items = nil + x.VolumePath = "" } else { - yyv8 := &x.Items - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv8), d) - } + x.VolumePath = string(r.DecodeString()) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb7 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb7 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Name = "" + x.FSType = "" } else { - x.Name = string(r.DecodeString()) + x.FSType = string(r.DecodeString()) } for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb7 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb7 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -13263,11 +13702,13 @@ func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[0] = len(x.Items) != 0 + yyq2[1] = x.Name != "" var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn2 = 2 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -13278,40 +13719,60 @@ func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { + if yyq2[0] { + if x.Items == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } } } if yyr2 || yy2arr2 { @@ -13323,7 +13784,222 @@ func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv4), d) + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + x.Name = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv8 := &x.Items + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv8), d) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + x.Name = string(r.DecodeString()) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -13807,13 +14483,14 @@ func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[1] = x.ReadOnly != false + yyq2[3] = x.SubPath != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) + r.EncodeArrayStart(4) } else { yynn2 = 2 for _, b := range yyq2 { @@ -13887,6 +14564,31 @@ func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -13966,6 +14668,12 @@ func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } else { x.MountPath = string(r.DecodeString()) } + case "subPath": + if r.TryDecodeAsNil() { + x.SubPath = "" + } else { + x.SubPath = string(r.DecodeString()) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -13977,16 +14685,16 @@ func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -13996,13 +14704,13 @@ func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Name = string(r.DecodeString()) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14012,13 +14720,13 @@ func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.ReadOnly = bool(r.DecodeBool()) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14028,18 +14736,34 @@ func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.MountPath = string(r.DecodeString()) } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SubPath = "" + } else { + x.SubPath = string(r.DecodeString()) + } for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -14324,15 +15048,16 @@ func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.FieldRef != nil - yyq2[1] = x.ConfigMapKeyRef != nil - yyq2[2] = x.SecretKeyRef != nil + yyq2[1] = x.ResourceFieldRef != nil + yyq2[2] = x.ConfigMapKeyRef != nil + yyq2[3] = x.SecretKeyRef != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) + r.EncodeArrayStart(4) } else { yynn2 = 0 for _, b := range yyq2 { @@ -14369,6 +15094,29 @@ func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { if x.ConfigMapKeyRef == nil { r.EncodeNil() } else { @@ -14378,7 +15126,7 @@ func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2[1] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("configMapKeyRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -14391,7 +15139,7 @@ func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { + if yyq2[3] { if x.SecretKeyRef == nil { r.EncodeNil() } else { @@ -14401,7 +15149,7 @@ func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2[2] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("secretKeyRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -14484,6 +15232,17 @@ func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } x.FieldRef.CodecDecodeSelf(d) } + case "resourceFieldRef": + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } case "configMapKeyRef": if r.TryDecodeAsNil() { if x.ConfigMapKeyRef != nil { @@ -14517,16 +15276,16 @@ func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14541,13 +15300,34 @@ func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.FieldRef.CodecDecodeSelf(d) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14562,13 +15342,13 @@ func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.ConfigMapKeyRef.CodecDecodeSelf(d) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -14584,17 +15364,17 @@ func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.SecretKeyRef.CodecDecodeSelf(d) } for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -14809,7 +15589,7 @@ func (x *ObjectFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decod z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -14823,13 +15603,14 @@ func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[1] = x.Name != "" + yyq2[0] = x.ContainerName != "" + yyq2[2] = true var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(3) } else { yynn2 = 1 for _, b := range yyq2 { @@ -14842,45 +15623,78 @@ func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) + r.EncodeString(codecSelferC_UTF81234, string("resource")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 + yym8 := z.EncBinary() + _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 + if yyq2[2] { + yy10 := &x.Divisor + yym11 := z.EncBinary() + _ = yym11 if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + z.EncFallback(yy10) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq2[1] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) + r.EncodeString(codecSelferC_UTF81234, string("divisor")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 + yy12 := &x.Divisor + yym13 := z.EncBinary() + _ = yym13 if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + z.EncFallback(yy12) } } } @@ -14893,7 +15707,7 @@ func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -14923,7 +15737,7 @@ func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -14945,17 +15759,32 @@ func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decode yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "key": + case "containerName": if r.TryDecodeAsNil() { - x.Key = "" + x.ContainerName = "" } else { - x.Key = string(r.DecodeString()) + x.ContainerName = string(r.DecodeString()) } - case "name": + case "resource": if r.TryDecodeAsNil() { - x.Name = "" + x.Resource = "" } else { - x.Name = string(r.DecodeString()) + x.Resource = string(r.DecodeString()) + } + case "divisor": + if r.TryDecodeAsNil() { + x.Divisor = pkg3_resource.Quantity{} + } else { + yyv6 := &x.Divisor + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } } default: z.DecStructFieldNotFound(-1, yys3) @@ -14964,62 +15793,87 @@ func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decode z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ResourceFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Key = "" + x.ContainerName = "" } else { - x.Key = string(r.DecodeString()) + x.ContainerName = string(r.DecodeString()) } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Name = "" + x.Resource = "" } else { - x.Name = string(r.DecodeString()) + x.Resource = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Divisor = pkg3_resource.Quantity{} + } else { + yyv11 := &x.Divisor + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -15103,7 +15957,7 @@ func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15133,7 +15987,7 @@ func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15174,7 +16028,7 @@ func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15229,7 +16083,7 @@ func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -15246,11 +16100,12 @@ func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[1] = x.Name != "" var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn2 = 2 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -15265,36 +16120,42 @@ func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) + r.EncodeString(codecSelferC_UTF81234, string("key")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym5 := z.EncBinary() _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } } } if yyr2 || yy2arr2 { @@ -15306,7 +16167,210 @@ func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *HTTPHeader) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + x.Key = string(r.DecodeString()) + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + x.Name = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + x.Key = string(r.DecodeString()) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + x.Name = string(r.DecodeString()) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HTTPHeader) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15660,7 +16724,7 @@ func (x *HTTPGetAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } case "port": if r.TryDecodeAsNil() { - x.Port = pkg5_intstr.IntOrString{} + x.Port = pkg4_intstr.IntOrString{} } else { yyv5 := &x.Port yym6 := z.DecBinary() @@ -15739,7 +16803,7 @@ func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Port = pkg5_intstr.IntOrString{} + x.Port = pkg4_intstr.IntOrString{} } else { yyv13 := &x.Port yym14 := z.DecBinary() @@ -15968,7 +17032,7 @@ func (x *TCPSocketAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { switch yys3 { case "port": if r.TryDecodeAsNil() { - x.Port = pkg5_intstr.IntOrString{} + x.Port = pkg4_intstr.IntOrString{} } else { yyv4 := &x.Port yym5 := z.DecBinary() @@ -16007,7 +17071,7 @@ func (x *TCPSocketAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Port = pkg5_intstr.IntOrString{} + x.Port = pkg4_intstr.IntOrString{} } else { yyv7 := &x.Port yym8 := z.DecBinary() @@ -21879,13 +22943,15 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool + var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.NodeAffinity != nil + yyq2[1] = x.PodAffinity != nil + yyq2[2] = x.PodAntiAffinity != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) + r.EncodeArrayStart(3) } else { yynn2 = 0 for _, b := range yyq2 { @@ -21919,6 +22985,52 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -21991,6 +23103,28 @@ func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } x.NodeAffinity.CodecDecodeSelf(d) } + case "podAffinity": + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + case "podAntiAffinity": + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -22002,16 +23136,16 @@ func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb5 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb5 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -22026,23 +23160,65 @@ func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.NodeAffinity.CodecDecodeSelf(d) } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb5 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb5 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -22059,7 +23235,7 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 var yynn2 int if yyr2 || yy2arr2 { @@ -22080,7 +23256,12 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } } } else { r.EncodeNil() @@ -22093,7 +23274,12 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { r.EncodeNil() } else { - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } } } } @@ -22107,7 +23293,7 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym7 if false { } else { - h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) } } } else { @@ -22125,7 +23311,7 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym8 if false { } else { - h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) } } } @@ -22139,7 +23325,1018 @@ func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("weight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.PodAffinityTerm + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.PodAffinityTerm + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "weight": + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + x.Weight = int32(r.DecodeInt(32)) + } + case "podAffinityTerm": + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv5 := &x.PodAffinityTerm + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + x.Weight = int32(r.DecodeInt(32)) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv8 := &x.PodAffinityTerm + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.LabelSelector != nil + yyq2[2] = x.TopologyKey != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespaces")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("topologyKey")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "labelSelector": + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_unversioned.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + case "namespaces": + if r.TryDecodeAsNil() { + x.Namespaces = nil + } else { + yyv6 := &x.Namespaces + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "topologyKey": + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + x.TopologyKey = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_unversioned.LabelSelector) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespaces = nil + } else { + yyv12 := &x.Namespaces + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecSliceStringX(yyv12, false, d) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + x.TopologyKey = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -22191,28 +24388,461 @@ func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "requiredDuringSchedulingIgnoredDuringExecution": + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + } else { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) + } + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv5 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + } else { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) + } + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("weight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.Preference + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preference")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.Preference + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "weight": + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + x.Weight = int32(r.DecodeInt(32)) + } + case "preference": + if r.TryDecodeAsNil() { + x.Preference = NodeSelectorTerm{} + } else { + yyv5 := &x.Preference + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + x.Weight = int32(r.DecodeInt(32)) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Preference = NodeSelectorTerm{} + } else { + yyv8 := &x.Preference + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Value != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Effect.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("effect")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Effect.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Taint) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Taint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": if r.TryDecodeAsNil() { - if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } + x.Key = "" } else { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) - } - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + x.Key = string(r.DecodeString()) } - case "preferredDuringSchedulingIgnoredDuringExecution": + case "value": if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil + x.Value = "" } else { - yyv5 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv5), d) - } + x.Value = string(r.DecodeString()) + } + case "effect": + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + x.Effect = TaintEffect(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -22221,7 +24851,7 @@ func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -22240,14 +24870,9 @@ func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } + x.Key = "" } else { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) - } - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + x.Key = string(r.DecodeString()) } yyj7++ if yyhl7 { @@ -22261,15 +24886,25 @@ func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil + x.Value = "" } else { - yyv9 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv9), d) - } + x.Value = string(r.DecodeString()) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + x.Effect = TaintEffect(r.DecodeString()) } for { yyj7++ @@ -22287,7 +24922,33 @@ func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { +func (x TaintEffect) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TaintEffect) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -22301,14 +24962,18 @@ func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[0] = x.Key != "" + yyq2[1] = x.Operator != "" + yyq2[2] = x.Value != "" + yyq2[3] = x.Effect != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(4) } else { - yynn2 = 2 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -22319,33 +24984,83 @@ func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } } else { - r.EncodeInt(int64(x.Weight)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("weight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + x.Operator.CodecEncodeSelf(e) } else { - r.EncodeInt(int64(x.Weight)) + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operator")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Operator.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Preference - yy7.CodecEncodeSelf(e) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preference")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Preference - yy9.CodecEncodeSelf(e) + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Effect.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("effect")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Effect.CodecEncodeSelf(e) + } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) @@ -22356,7 +25071,7 @@ func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *Toleration) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -22386,7 +25101,7 @@ func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *Toleration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -22408,18 +25123,29 @@ func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Dec yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "weight": + case "key": if r.TryDecodeAsNil() { - x.Weight = 0 + x.Key = "" } else { - x.Weight = int(r.DecodeInt(codecSelferBitsize1234)) + x.Key = string(r.DecodeString()) } - case "preference": + case "operator": if r.TryDecodeAsNil() { - x.Preference = NodeSelectorTerm{} + x.Operator = "" } else { - yyv5 := &x.Preference - yyv5.CodecDecodeSelf(d) + x.Operator = TolerationOperator(r.DecodeString()) + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + x.Value = string(r.DecodeString()) + } + case "effect": + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + x.Effect = TaintEffect(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -22428,62 +25154,119 @@ func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Dec z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Weight = 0 + x.Key = "" + } else { + x.Key = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - x.Weight = int(r.DecodeInt(codecSelferBitsize1234)) + yyb8 = r.CheckBreak() } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Operator = "" } else { - yyb6 = r.CheckBreak() + x.Operator = TolerationOperator(r.DecodeString()) } - if yyb6 { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Preference = NodeSelectorTerm{} + x.Value = "" } else { - yyv8 := &x.Preference - yyv8.CodecDecodeSelf(d) + x.Value = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + x.Effect = TaintEffect(r.DecodeString()) } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } +func (x TolerationOperator) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TolerationOperator) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) @@ -22498,7 +25281,7 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [15]bool + var yyq2 [17]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = len(x.Volumes) != 0 @@ -22515,9 +25298,11 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[12] = x.HostIPC != false yyq2[13] = x.SecurityContext != nil yyq2[14] = len(x.ImagePullSecrets) != 0 + yyq2[15] = x.Hostname != "" + yyq2[16] = x.Subdomain != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(15) + r.EncodeArrayStart(17) } else { yynn2 = 1 for _, b := range yyq2 { @@ -22927,6 +25712,56 @@ func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + yym53 := z.EncBinary() + _ = yym53 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym54 := z.EncBinary() + _ = yym54 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + yym56 := z.EncBinary() + _ = yym56 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subdomain")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym57 := z.EncBinary() + _ = yym57 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -23127,6 +25962,18 @@ func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv23), d) } } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + x.Hostname = string(r.DecodeString()) + } + case "subdomain": + if r.TryDecodeAsNil() { + x.Subdomain = "" + } else { + x.Subdomain = string(r.DecodeString()) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -23138,16 +25985,16 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj25 int - var yyb25 bool - var yyhl25 bool = l >= 0 - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + var yyj27 int + var yyb27 bool + var yyhl27 bool = l >= 0 + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23155,21 +26002,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Volumes = nil } else { - yyv26 := &x.Volumes - yym27 := z.DecBinary() - _ = yym27 + yyv28 := &x.Volumes + yym29 := z.DecBinary() + _ = yym29 if false { } else { - h.decSliceVolume((*[]Volume)(yyv26), d) + h.decSliceVolume((*[]Volume)(yyv28), d) } } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23177,21 +26024,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Containers = nil } else { - yyv28 := &x.Containers - yym29 := z.DecBinary() - _ = yym29 + yyv30 := &x.Containers + yym31 := z.DecBinary() + _ = yym31 if false { } else { - h.decSliceContainer((*[]Container)(yyv28), d) + h.decSliceContainer((*[]Container)(yyv30), d) } } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23201,13 +26048,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.RestartPolicy = RestartPolicy(r.DecodeString()) } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23220,20 +26067,20 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.TerminationGracePeriodSeconds == nil { x.TerminationGracePeriodSeconds = new(int64) } - yym32 := z.DecBinary() - _ = yym32 + yym34 := z.DecBinary() + _ = yym34 if false { } else { *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) } } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23246,20 +26093,20 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.ActiveDeadlineSeconds == nil { x.ActiveDeadlineSeconds = new(int64) } - yym34 := z.DecBinary() - _ = yym34 + yym36 := z.DecBinary() + _ = yym36 if false { } else { *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) } } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23269,13 +26116,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.DNSPolicy = DNSPolicy(r.DecodeString()) } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23283,21 +26130,21 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.NodeSelector = nil } else { - yyv36 := &x.NodeSelector - yym37 := z.DecBinary() - _ = yym37 + yyv38 := &x.NodeSelector + yym39 := z.DecBinary() + _ = yym39 if false { } else { - z.F.DecMapStringStringX(yyv36, false, d) + z.F.DecMapStringStringX(yyv38, false, d) } } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23307,13 +26154,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.ServiceAccountName = string(r.DecodeString()) } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23323,13 +26170,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.DeprecatedServiceAccount = string(r.DecodeString()) } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23339,13 +26186,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.NodeName = string(r.DecodeString()) } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23355,13 +26202,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.HostNetwork = bool(r.DecodeBool()) } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23371,13 +26218,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.HostPID = bool(r.DecodeBool()) } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23387,13 +26234,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.HostIPC = bool(r.DecodeBool()) } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23408,13 +26255,13 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.SecurityContext.CodecDecodeSelf(d) } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -23422,26 +26269,58 @@ func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ImagePullSecrets = nil } else { - yyv45 := &x.ImagePullSecrets - yym46 := z.DecBinary() - _ = yym46 + yyv47 := &x.ImagePullSecrets + yym48 := z.DecBinary() + _ = yym48 if false { } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv45), d) + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv47), d) } } + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + x.Hostname = string(r.DecodeString()) + } + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subdomain = "" + } else { + x.Subdomain = string(r.DecodeString()) + } for { - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l } else { - yyb25 = r.CheckBreak() + yyb27 = r.CheckBreak() } - if yyb25 { + if yyb27 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj25-1, "") + z.DecStructFieldNotFound(yyj27-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -28249,7 +31128,7 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool + var yyq2 [9]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[1] = len(x.Selector) != 0 @@ -28259,9 +31138,10 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[5] = len(x.DeprecatedPublicIPs) != 0 yyq2[6] = x.SessionAffinity != "" yyq2[7] = x.LoadBalancerIP != "" + yyq2[8] = len(x.LoadBalancerSourceRanges) != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) + r.EncodeArrayStart(9) } else { yynn2 = 1 for _, b := range yyq2 { @@ -28478,6 +31358,39 @@ func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.LoadBalancerSourceRanges == nil { + r.EncodeNil() + } else { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancerSourceRanges")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LoadBalancerSourceRanges == nil { + r.EncodeNil() + } else { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) + } + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -28611,6 +31524,18 @@ func (x *ServiceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } else { x.LoadBalancerIP = string(r.DecodeString()) } + case "loadBalancerSourceRanges": + if r.TryDecodeAsNil() { + x.LoadBalancerSourceRanges = nil + } else { + yyv16 := &x.LoadBalancerSourceRanges + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecSliceStringX(yyv16, false, d) + } + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -28622,16 +31547,16 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28639,21 +31564,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Ports = nil } else { - yyv17 := &x.Ports - yym18 := z.DecBinary() - _ = yym18 + yyv19 := &x.Ports + yym20 := z.DecBinary() + _ = yym20 if false { } else { - h.decSliceServicePort((*[]ServicePort)(yyv17), d) + h.decSliceServicePort((*[]ServicePort)(yyv19), d) } } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28661,21 +31586,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Selector = nil } else { - yyv19 := &x.Selector - yym20 := z.DecBinary() - _ = yym20 + yyv21 := &x.Selector + yym22 := z.DecBinary() + _ = yym22 if false { } else { - z.F.DecMapStringStringX(yyv19, false, d) + z.F.DecMapStringStringX(yyv21, false, d) } } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28685,13 +31610,13 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.ClusterIP = string(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28701,13 +31626,13 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Type = ServiceType(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28715,21 +31640,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ExternalIPs = nil } else { - yyv23 := &x.ExternalIPs - yym24 := z.DecBinary() - _ = yym24 + yyv25 := &x.ExternalIPs + yym26 := z.DecBinary() + _ = yym26 if false { } else { - z.F.DecSliceStringX(yyv23, false, d) + z.F.DecSliceStringX(yyv25, false, d) } } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28737,21 +31662,21 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.DeprecatedPublicIPs = nil } else { - yyv25 := &x.DeprecatedPublicIPs - yym26 := z.DecBinary() - _ = yym26 + yyv27 := &x.DeprecatedPublicIPs + yym28 := z.DecBinary() + _ = yym28 if false { } else { - z.F.DecSliceStringX(yyv25, false, d) + z.F.DecSliceStringX(yyv27, false, d) } } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28761,13 +31686,13 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.SessionAffinity = ServiceAffinity(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -28777,18 +31702,40 @@ func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.LoadBalancerIP = string(r.DecodeString()) } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LoadBalancerSourceRanges = nil + } else { + yyv31 := &x.LoadBalancerSourceRanges + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + z.F.DecSliceStringX(yyv31, false, d) + } + } for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -29025,7 +31972,7 @@ func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } case "targetPort": if r.TryDecodeAsNil() { - x.TargetPort = pkg5_intstr.IntOrString{} + x.TargetPort = pkg4_intstr.IntOrString{} } else { yyv7 := &x.TargetPort yym8 := z.DecBinary() @@ -29118,7 +32065,7 @@ func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.TargetPort = pkg5_intstr.IntOrString{} + x.TargetPort = pkg4_intstr.IntOrString{} } else { yyv14 := &x.TargetPort yym15 := z.DecBinary() @@ -31248,13 +34195,14 @@ func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[1] = x.TargetRef != nil + yyq2[1] = x.Hostname != "" + yyq2[2] = x.TargetRef != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(3) } else { yynn2 = 1 for _, b := range yyq2 { @@ -31287,6 +34235,31 @@ func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { if x.TargetRef == nil { r.EncodeNil() } else { @@ -31296,7 +34269,7 @@ func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq2[1] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("targetRef")) z.EncSendContainerState(codecSelfer_containerMapValue1234) @@ -31374,6 +34347,12 @@ func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } else { x.IP = string(r.DecodeString()) } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + x.Hostname = string(r.DecodeString()) + } case "targetRef": if r.TryDecodeAsNil() { if x.TargetRef != nil { @@ -31396,16 +34375,16 @@ func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31415,13 +34394,29 @@ func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) } else { x.IP = string(r.DecodeString()) } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + x.Hostname = string(r.DecodeString()) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -31437,17 +34432,17 @@ func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) x.TargetRef.CodecDecodeSelf(d) } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -32696,14 +35691,14 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool + var yyq2 [10]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) + r.EncodeArrayStart(10) } else { - yynn2 = 8 + yynn2 = 10 for _, b := range yyq2 { if b { yynn2++ @@ -32864,6 +35859,44 @@ func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operatingSystem")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("architecture")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -32973,6 +36006,18 @@ func (x *NodeSystemInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } else { x.KubeProxyVersion = string(r.DecodeString()) } + case "operatingSystem": + if r.TryDecodeAsNil() { + x.OperatingSystem = "" + } else { + x.OperatingSystem = string(r.DecodeString()) + } + case "architecture": + if r.TryDecodeAsNil() { + x.Architecture = "" + } else { + x.Architecture = string(r.DecodeString()) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -32984,16 +36029,16 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33003,13 +36048,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.MachineID = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33019,13 +36064,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.SystemUUID = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33035,13 +36080,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.BootID = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33051,13 +36096,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.KernelVersion = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33067,13 +36112,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.OSImage = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33083,13 +36128,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.ContainerRuntimeVersion = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33099,13 +36144,13 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.KubeletVersion = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -33115,18 +36160,50 @@ func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.KubeProxyVersion = string(r.DecodeString()) } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.OperatingSystem = "" + } else { + x.OperatingSystem = string(r.DecodeString()) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Architecture = "" + } else { + x.Architecture = string(r.DecodeString()) + } for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb12 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb12 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -33155,11 +36232,12 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { yyq2[4] = len(x.Addresses) != 0 yyq2[5] = true yyq2[6] = true + yyq2[7] = len(x.Images) != 0 var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(8) } else { - yynn2 = 1 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -33331,28 +36409,34 @@ func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Images == nil { - r.EncodeNil() - } else { - yym29 := z.EncBinary() - _ = yym29 - if false { + if yyq2[7] { + if x.Images == nil { + r.EncodeNil() } else { - h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + } } + } else { + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("images")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Images == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("images")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Images == nil { + r.EncodeNil() } else { - h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + } } } } @@ -36735,7 +39819,7 @@ func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *Preconditions) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -36749,16 +39833,15 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool + var yyq2 [1]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[1] = x.Kind != "" - yyq2[2] = x.APIVersion != "" + yyq2[0] = x.UID != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) + r.EncodeArrayStart(1) } else { - yynn2 = 1 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -36769,38 +39852,309 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.GracePeriodSeconds == nil { + if yyq2[0] { + if x.UID == nil { + r.EncodeNil() + } else { + yy4 := *x.UID + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy4)) + } + } + } else { r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.UID == nil { + r.EncodeNil() + } else { + yy6 := *x.UID + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Preconditions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "uid": + if r.TryDecodeAsNil() { + if x.UID != nil { + x.UID = nil + } + } else { + if x.UID == nil { + x.UID = new(pkg1_types.UID) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.UID) { } else { - yy4 := *x.GracePeriodSeconds - yym5 := z.EncBinary() - _ = yym5 - if false { + *((*string)(x.UID)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.UID != nil { + x.UID = nil + } + } else { + if x.UID == nil { + x.UID = new(pkg1_types.UID) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.UID) { + } else { + *((*string)(x.UID)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.GracePeriodSeconds != nil + yyq2[1] = x.Preconditions != nil + yyq2[2] = x.OrphanDependents != nil + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.GracePeriodSeconds == nil { + r.EncodeNil() } else { - r.EncodeInt(int64(yy4)) + yy4 := *x.GracePeriodSeconds + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } } + } else { + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GracePeriodSeconds == nil { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy6 := *x.GracePeriodSeconds + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Preconditions == nil { + r.EncodeNil() + } else { + x.Preconditions.CodecEncodeSelf(e) + } + } else { r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preconditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Preconditions == nil { + r.EncodeNil() + } else { + x.Preconditions.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.OrphanDependents == nil { + r.EncodeNil() + } else { + yy12 := *x.OrphanDependents + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } } else { - yy6 := *x.GracePeriodSeconds - yym7 := z.EncBinary() - _ = yym7 - if false { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("orphanDependents")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.OrphanDependents == nil { + r.EncodeNil() } else { - r.EncodeInt(int64(yy6)) + yy14 := *x.OrphanDependents + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeBool(bool(yy14)) + } } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym9 := z.EncBinary() - _ = yym9 + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -36809,12 +40163,12 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[1] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 + yym18 := z.EncBinary() + _ = yym18 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -36823,9 +40177,9 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -36834,12 +40188,12 @@ func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[2] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 + yym21 := z.EncBinary() + _ = yym21 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -36923,6 +40277,33 @@ func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) } } + case "preconditions": + if r.TryDecodeAsNil() { + if x.Preconditions != nil { + x.Preconditions = nil + } + } else { + if x.Preconditions == nil { + x.Preconditions = new(Preconditions) + } + x.Preconditions.CodecDecodeSelf(d) + } + case "orphanDependents": + if r.TryDecodeAsNil() { + if x.OrphanDependents != nil { + x.OrphanDependents = nil + } + } else { + if x.OrphanDependents == nil { + x.OrphanDependents = new(bool) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*bool)(x.OrphanDependents)) = r.DecodeBool() + } + } case "kind": if r.TryDecodeAsNil() { x.Kind = "" @@ -36946,16 +40327,16 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb8 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb8 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -36968,20 +40349,67 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if x.GracePeriodSeconds == nil { x.GracePeriodSeconds = new(int64) } - yym10 := z.DecBinary() - _ = yym10 + yym13 := z.DecBinary() + _ = yym13 if false { } else { *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) } } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb8 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb8 { + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Preconditions != nil { + x.Preconditions = nil + } + } else { + if x.Preconditions == nil { + x.Preconditions = new(Preconditions) + } + x.Preconditions.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.OrphanDependents != nil { + x.OrphanDependents = nil + } + } else { + if x.OrphanDependents == nil { + x.OrphanDependents = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.OrphanDependents)) = r.DecodeBool() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -36991,13 +40419,13 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Kind = string(r.DecodeString()) } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb8 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb8 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -37008,17 +40436,17 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.APIVersion = string(r.DecodeString()) } for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb8 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb8 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -40053,7 +43481,272 @@ func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + yyq2[1] = x.Kind != "" + yyq2[2] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + x.Path = string(r.DecodeString()) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + x.Path = string(r.DecodeString()) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *OwnerReference) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -40067,17 +43760,14 @@ func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.Path != "" - yyq2[1] = x.Kind != "" - yyq2[2] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) + r.EncodeArrayStart(4) } else { - yynn2 = 0 + yynn2 = 4 for _, b := range yyq2 { if b { yynn2++ @@ -40088,77 +43778,80 @@ func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } + yym7 := z.EncBinary() + _ = yym7 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } + yym10 := z.EncBinary() + _ = yym10 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) } } if yyr2 || yy2arr2 { @@ -40170,7 +43863,7 @@ func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *OwnerReference) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -40200,7 +43893,7 @@ func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *OwnerReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -40222,11 +43915,11 @@ func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "path": + case "apiVersion": if r.TryDecodeAsNil() { - x.Path = "" + x.APIVersion = "" } else { - x.Path = string(r.DecodeString()) + x.APIVersion = string(r.DecodeString()) } case "kind": if r.TryDecodeAsNil() { @@ -40234,11 +43927,17 @@ func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } else { x.Kind = string(r.DecodeString()) } - case "apiVersion": + case "name": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Name = "" } else { - x.APIVersion = string(r.DecodeString()) + x.Name = string(r.DecodeString()) + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + x.UID = pkg1_types.UID(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -40247,36 +43946,36 @@ func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *OwnerReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Path = "" + x.APIVersion = "" } else { - x.Path = string(r.DecodeString()) + x.APIVersion = string(r.DecodeString()) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -40286,34 +43985,50 @@ func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decod } else { x.Kind = string(r.DecodeString()) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Name = "" } else { - x.APIVersion = string(r.DecodeString()) + x.Name = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + x.UID = pkg1_types.UID(r.DecodeString()) } for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -42522,7 +46237,7 @@ func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym9 if false { } else { - h.encSliceruntime_RawExtension(([]pkg6_runtime.RawExtension)(x.Items), e) + h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) } } } else { @@ -42536,7 +46251,7 @@ func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym10 if false { } else { - h.encSliceruntime_RawExtension(([]pkg6_runtime.RawExtension)(x.Items), e) + h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) } } } @@ -42673,7 +46388,7 @@ func (x *List) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { _ = yym7 if false { } else { - h.decSliceruntime_RawExtension((*[]pkg6_runtime.RawExtension)(yyv6), d) + h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv6), d) } } case "kind": @@ -42744,7 +46459,7 @@ func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { _ = yym14 if false { } else { - h.decSliceruntime_RawExtension((*[]pkg6_runtime.RawExtension)(yyv13), d) + h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv13), d) } } yyj10++ @@ -46294,14 +50009,13 @@ func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = true - yyq2[1] = len(x.Items) != 0 yyq2[2] = x.Kind != "" yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2 = 0 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -46341,34 +50055,28 @@ func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Items == nil { - r.EncodeNil() + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceConfigMap(([]ConfigMap)(x.Items), e) - } + h.encSliceConfigMap(([]ConfigMap)(x.Items), e) } - } else { - r.EncodeNil() } } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceConfigMap(([]ConfigMap)(x.Items), e) - } + h.encSliceConfigMap(([]ConfigMap)(x.Items), e) } } } @@ -47806,14 +51514,16 @@ func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[1] = x.FieldRef != nil + yyq2[2] = x.ResourceFieldRef != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(3) } else { - yynn2 = 2 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -47843,14 +51553,49 @@ func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.FieldRef - yy7.CodecEncodeSelf(e) + if yyq2[1] { + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.FieldRef - yy9.CodecEncodeSelf(e) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) @@ -47921,10 +51666,25 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decod } case "fieldRef": if r.TryDecodeAsNil() { - x.FieldRef = ObjectFieldSelector{} + if x.FieldRef != nil { + x.FieldRef = nil + } } else { - yyv5 := &x.FieldRef - yyv5.CodecDecodeSelf(d) + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + case "resourceFieldRef": + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) } default: z.DecStructFieldNotFound(-1, yys3) @@ -47937,16 +51697,16 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -47956,35 +51716,60 @@ func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Dec } else { x.Path = string(r.DecodeString()) } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.FieldRef = ObjectFieldSelector{} + if x.FieldRef != nil { + x.FieldRef = nil + } } else { - yyv8 := &x.FieldRef - yyv8.CodecDecodeSelf(d) + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -49208,6 +52993,125 @@ func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } +func (x codecSelfer1234) encSliceOwnerReference(v []OwnerReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceOwnerReference(v *[]OwnerReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []OwnerReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]OwnerReference, yyrl1) + } + } else { + yyv1 = make([]OwnerReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = OwnerReference{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, OwnerReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = OwnerReference{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, OwnerReference{}) // var yyz1 OwnerReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = OwnerReference{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []OwnerReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + func (x codecSelfer1234) encSlicePersistentVolumeAccessMode(v []PersistentVolumeAccessMode, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) @@ -49357,7 +53261,7 @@ func (x codecSelfer1234) decSlicePersistentVolume(v *[]PersistentVolume, d *code yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 400) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 456) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -49476,7 +53380,7 @@ func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClai yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 344) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -50181,7 +54085,7 @@ func (x codecSelfer1234) decSliceVolumeMount(v *[]VolumeMount, d *codec1978.Deco yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -50499,6 +54403,244 @@ func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequir } } +func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodAffinityTerm{}) // var yyz1 PodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) // var yyz1 WeightedPodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredSchedulingTerm, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) @@ -50657,7 +54799,7 @@ func (x codecSelfer1234) decSliceVolume(v *[]Volume, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 168) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 176) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -51252,7 +55394,7 @@ func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 520) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 648) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -51371,7 +55513,7 @@ func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Deco yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 544) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 696) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -51490,7 +55632,7 @@ func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationControlle yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 232) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -51847,7 +55989,7 @@ func (x codecSelfer1234) decSliceService(v *[]Service, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 360) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 432) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -52085,7 +56227,7 @@ func (x codecSelfer1234) decSliceServiceAccount(v *[]ServiceAccount, d *codec197 yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 240) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -52323,7 +56465,7 @@ func (x codecSelfer1234) decSliceEndpointAddress(v *[]EndpointAddress, d *codec1 yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -52561,7 +56703,7 @@ func (x codecSelfer1234) decSliceEndpoints(v *[]Endpoints, d *codec1978.Decoder) yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 216) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -53030,7 +57172,7 @@ func (x codecSelfer1234) decResourceList(v *ResourceList, d *codec1978.Decoder) yyl1 := r.ReadMapStart() yybh1 := z.DecBasicHandle() if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 72) yyv1 = make(map[ResourceName]pkg3_resource.Quantity, yyrl1) *v = yyv1 } @@ -53151,7 +57293,7 @@ func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 488) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 568) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -53380,7 +57522,7 @@ func (x codecSelfer1234) decSliceNamespace(v *[]Namespace, d *codec1978.Decoder) yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 232) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -53499,7 +57641,7 @@ func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 440) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 488) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -53579,7 +57721,7 @@ func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) { } } -func (x codecSelfer1234) encSliceruntime_RawExtension(v []pkg6_runtime.RawExtension, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceruntime_RawExtension(v []pkg5_runtime.RawExtension, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -53600,7 +57742,7 @@ func (x codecSelfer1234) encSliceruntime_RawExtension(v []pkg6_runtime.RawExtens z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg6_runtime.RawExtension, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg5_runtime.RawExtension, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -53611,7 +57753,7 @@ func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg6_runtime.RawExten _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []pkg6_runtime.RawExtension{} + yyv1 = []pkg5_runtime.RawExtension{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -53631,10 +57773,10 @@ func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg6_runtime.RawExten if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]pkg6_runtime.RawExtension, yyrl1) + yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) } } else { - yyv1 = make([]pkg6_runtime.RawExtension, yyrl1) + yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -53649,7 +57791,7 @@ func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg6_runtime.RawExten for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg6_runtime.RawExtension{} + yyv1[yyj1] = pkg5_runtime.RawExtension{} } else { yyv2 := &yyv1[yyj1] yym3 := z.DecBinary() @@ -53666,10 +57808,10 @@ func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg6_runtime.RawExten } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, pkg6_runtime.RawExtension{}) + yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg6_runtime.RawExtension{} + yyv1[yyj1] = pkg5_runtime.RawExtension{} } else { yyv4 := &yyv1[yyj1] yym5 := z.DecBinary() @@ -53691,13 +57833,13 @@ func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg6_runtime.RawExten for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, pkg6_runtime.RawExtension{}) // var yyz1 pkg6_runtime.RawExtension + yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) // var yyz1 pkg5_runtime.RawExtension yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg6_runtime.RawExtension{} + yyv1[yyj1] = pkg5_runtime.RawExtension{} } else { yyv6 := &yyv1[yyj1] yym7 := z.DecBinary() @@ -53720,7 +57862,7 @@ func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg6_runtime.RawExten yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg6_runtime.RawExtension{} + yyv1 = []pkg5_runtime.RawExtension{} yyc1 = true } } @@ -53888,7 +58030,7 @@ func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decode yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 216) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -54117,7 +58259,7 @@ func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978. yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 240) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -54464,7 +58606,7 @@ func (x codecSelfer1234) decSliceSecret(v *[]Secret, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 216) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -54583,7 +58725,7 @@ func (x codecSelfer1234) decSliceConfigMap(v *[]ConfigMap, d *codec1978.Decoder) yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 200) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 248) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -54821,7 +58963,7 @@ func (x codecSelfer1234) decSliceComponentStatus(v *[]ComponentStatus, d *codec1 yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 216) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] @@ -54940,7 +59082,7 @@ func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFil yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/types.go index 12ad83191044..382c5513fbef 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/types.go @@ -71,8 +71,8 @@ type ObjectMeta struct { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names - Name string `json:"name,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // GenerateName is an optional prefix, used by the server, to generate a unique // name ONLY IF the Name field has not been provided. @@ -88,8 +88,8 @@ type ObjectMeta struct { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#idempotency - GenerateName string `json:"generateName,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` // Namespace defines the space within each name must be unique. An empty namespace is // equivalent to the "default" namespace, but "default" is the canonical representation. @@ -98,13 +98,13 @@ type ObjectMeta struct { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/namespaces.md - Namespace string `json:"namespace,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` // SelfLink is a URL representing this object. // Populated by the system. // Read-only. - SelfLink string `json:"selfLink,omitempty"` + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` // UID is the unique in time and space value for this object. It is typically generated by // the server on successful creation of a resource and is not allowed to change on PUT @@ -112,8 +112,8 @@ type ObjectMeta struct { // // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#uids - UID types.UID `json:"uid,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` // An opaque value that represents the internal version of this object that can // be used by clients to determine when objects have changed. May be used for optimistic @@ -124,12 +124,12 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#concurrency-control-and-consistency - ResourceVersion string `json:"resourceVersion,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` // A sequence number representing a specific generation of the desired state. // Populated by the system. Read-only. - Generation int64 `json:"generation,omitempty"` + Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"` // CreationTimestamp is a timestamp representing the server time when this object was // created. It is not guaranteed to be set in happens-before order across separate operations. @@ -138,8 +138,8 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Null for lists. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - CreationTimestamp unversioned.Time `json:"creationTimestamp,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + CreationTimestamp unversioned.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not @@ -154,27 +154,37 @@ type ObjectMeta struct { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - DeletionTimestamp *unversioned.Time `json:"deletionTimestamp,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + DeletionTimestamp *unversioned.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` // Number of seconds allowed for this object to gracefully terminate before // it will be removed from the system. Only set when deletionTimestamp is also set. // May only be shortened. // Read-only. - DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"` // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md // TODO: replace map[string]string with labels.LabelSet type - Labels map[string]string `json:"labels,omitempty"` + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/annotations.md - Annotations map[string]string `json:"annotations,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/annotations.md + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. + OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` } const ( @@ -188,12 +198,12 @@ const ( type Volume struct { // Volume's name. // Must be a DNS_LABEL and unique within the pod. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names - Name string `json:"name"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // VolumeSource represents the location and type of the mounted volume. // If not specified, the Volume is implied to be an EmptyDir. // This implied behavior is deprecated and will be removed in a future version. - VolumeSource `json:",inline"` + VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"` } // Represents the source of a volume to mount. @@ -203,66 +213,68 @@ type VolumeSource struct { // machine that is directly exposed to the container. This is generally // used for system agents or other privileged things that are allowed // to see the host machine. Most containers will NOT need this. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#hostpath + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath // --- // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not // mount host directories as read/write. - HostPath *HostPathVolumeSource `json:"hostPath,omitempty"` + HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"` // EmptyDir represents a temporary directory that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#emptydir - EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir + EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk - GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"` // AWSElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#awselasticblockstore - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` // GitRepo represents a git repository at a particular revision. - GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty"` + GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"` // Secret represents a secret that should populate this volume. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#secrets - Secret *SecretVolumeSource `json:"secret,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets + Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"` // NFS represents an NFS mount on the host that shares a pod's lifetime - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#nfs - NFS *NFSVolumeSource `json:"nfs,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.2/examples/iscsi/README.md - ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/iscsi/README.md + ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.2/examples/glusterfs/README.md - Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md + Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` // PersistentVolumeClaimVolumeSource represents a reference to a // PersistentVolumeClaim in the same namespace. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md - RBD *RBDVolumeSource `json:"rbd,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md + RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // FlexVolume represents a generic volume resource that is // provisioned/attached using a exec based plugin. This is an // alpha feature and may change in future. - FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty"` + FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/release-1.2/examples/mysql-cinder-pd/README.md - Cinder *CinderVolumeSource `json:"cinder,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - CephFS *CephFSVolumeSource `json:"cephfs,omitempty"` + CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"` // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - Flocker *FlockerVolumeSource `json:"flocker,omitempty"` + Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"` // DownwardAPI represents downward API about the pod that should populate this volume - DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty"` + DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"` // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - FC *FCVolumeSource `json:"fc,omitempty"` + FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"` // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"` + AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"` // ConfigMap represents a configMap that should populate this volume - ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty"` + ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"` + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -271,11 +283,11 @@ type VolumeSource struct { // type of volume that is owned by someone else (the system). type PersistentVolumeClaimVolumeSource struct { // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - ClaimName string `json:"claimName"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"` // Will force the ReadOnly setting in VolumeMounts. // Default false. - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` } // PersistentVolumeSource is similar to VolumeSource but meant for the @@ -283,91 +295,93 @@ type PersistentVolumeClaimVolumeSource struct { type PersistentVolumeSource struct { // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk - GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"` // AWSElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#awselasticblockstore - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"` // HostPath represents a directory on the host. // Provisioned by a developer or tester. // This is useful for single-node development and testing only! // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#hostpath - HostPath *HostPathVolumeSource `json:"hostPath,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath + HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.2/examples/glusterfs/README.md - Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md + Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` // NFS represents an NFS mount on the host. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#nfs - NFS *NFSVolumeSource `json:"nfs,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md - RBD *RBDVolumeSource `json:"rbd,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md + RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"` + ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/release-1.2/examples/mysql-cinder-pd/README.md - Cinder *CinderVolumeSource `json:"cinder,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - CephFS *CephFSVolumeSource `json:"cephfs,omitempty"` + CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"` // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - FC *FCVolumeSource `json:"fc,omitempty"` + FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"` // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running - Flocker *FlockerVolumeSource `json:"flocker,omitempty"` + Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"` // FlexVolume represents a generic volume resource that is // provisioned/attached using a exec based plugin. This is an // alpha feature and may change in future. - FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty"` + FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"` + AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"` + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"` } // +genclient=true,nonNamespaced=true // PersistentVolume (PV) is a storage resource provisioned by an administrator. // It is analogous to a node. -// More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md +// More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md type PersistentVolume struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines a specification of a persistent volume owned by the cluster. // Provisioned by an administrator. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistent-volumes - Spec PersistentVolumeSpec `json:"spec,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes + Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status represents the current information/status for the persistent volume. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistent-volumes - Status PersistentVolumeStatus `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes + Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // PersistentVolumeSpec is the specification of a persistent volume. type PersistentVolumeSpec struct { // A description of the persistent volume's resources and capacity. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#capacity - Capacity ResourceList `json:"capacity,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` // The actual volume backing the persistent volume. - PersistentVolumeSource `json:",inline"` + PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"` // AccessModes contains all ways the volume can be mounted. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#access-modes - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. // Expected to be non-nil when bound. // claim.VolumeName is the authoritative bind between PV and PVC. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#binding - ClaimRef *ObjectReference `json:"claimRef,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#binding + ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"` // What happens to a persistent volume when released from its claim. // Valid options are Retain (default) and Recycle. // Recyling must be supported by the volume plugin underlying this persistent volume. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#recycling-policy - PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#recycling-policy + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"` } // PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. @@ -380,7 +394,7 @@ const ( // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. // The volume plugin must support Deletion. PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" - // PersistentVolumeReclaimRetain means the volume will left in its current phase (Released) for manual reclamation by the administrator. + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. // The default policy is Retain. PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" ) @@ -388,76 +402,76 @@ const ( // PersistentVolumeStatus is the current status of a persistent volume. type PersistentVolumeStatus struct { // Phase indicates if a volume is available, bound to a claim, or released by a claim. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#phase - Phase PersistentVolumePhase `json:"phase,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#phase + Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"` // A human-readable message indicating details about why the volume is in this state. - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` // Reason is a brief CamelCase string that describes any failure and is meant // for machine parsing and tidy display in the CLI. - Reason string `json:"reason,omitempty"` + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` } // PersistentVolumeList is a list of PersistentVolume items. type PersistentVolumeList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of persistent volumes. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md - Items []PersistentVolume `json:"items"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md + Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"` } // PersistentVolumeClaim is a user's request for and claim to a persistent volume type PersistentVolumeClaim struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired characteristics of a volume requested by a pod author. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - Spec PersistentVolumeClaimSpec `json:"spec,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status represents the current information/status of a persistent volume claim. // Read-only. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - Status PersistentVolumeClaimStatus `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. type PersistentVolumeClaimList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // A list of persistent volume claims. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - Items []PersistentVolumeClaim `json:"items"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"` } // PersistentVolumeClaimSpec describes the common attributes of storage devices // and allows a Source for provider-specific attributes type PersistentVolumeClaimSpec struct { // AccessModes contains the desired access modes the volume should have. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#access-modes-1 - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1 + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` // Resources represents the minimum resources the volume should have. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#resources - Resources ResourceRequirements `json:"resources,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` // VolumeName is the binding reference to the PersistentVolume backing this claim. - VolumeName string `json:"volumeName,omitempty"` + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. type PersistentVolumeClaimStatus struct { // Phase represents the current phase of PersistentVolumeClaim. - Phase PersistentVolumeClaimPhase `json:"phase,omitempty"` + Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"` // AccessModes contains the actual access modes the volume backing the PVC has. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#access-modes-1 - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1 + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` // Represents the actual resources of the underlying volume. - Capacity ResourceList `json:"capacity,omitempty"` + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` } type PersistentVolumeAccessMode string @@ -496,14 +510,18 @@ const ( ClaimPending PersistentVolumeClaimPhase = "Pending" // used for PersistentVolumeClaims that are bound ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" ) // Represents a host path mapped into a pod. // Host path volumes do not support ownership management or SELinux relabeling. type HostPathVolumeSource struct { // Path of the directory on the host. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#hostpath - Path string `json:"path"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath + Path string `json:"path" protobuf:"bytes,1,opt,name=path"` } // Represents an empty directory for a pod. @@ -512,63 +530,63 @@ type EmptyDirVolumeSource struct { // What type of storage medium should back this directory. // The default is "" which means to use the node's default medium. // Must be an empty string (default) or Memory. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#emptydir - Medium StorageMedium `json:"medium,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir + Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"` } // Represents a Glusterfs mount that lasts the lifetime of a pod. // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: http://releases.k8s.io/release-1.2/examples/glusterfs/README.md#create-a-pod - EndpointsName string `json:"endpoints"` + // More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md#create-a-pod + EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` // Path is the Glusterfs volume path. - // More info: http://releases.k8s.io/release-1.2/examples/glusterfs/README.md#create-a-pod - Path string `json:"path"` + // More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md#create-a-pod + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: http://releases.k8s.io/release-1.2/examples/glusterfs/README.md#create-a-pod - ReadOnly bool `json:"readOnly,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md#create-a-pod + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { // A collection of Ceph monitors. - // More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it - CephMonitors []string `json:"monitors"` + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it + CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // The rados image name. - // More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it - RBDImage string `json:"image"` + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it + RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#rbd + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#rbd // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty"` + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // The rados pool name. // Default is rbd. - // More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it. - RBDPool string `json:"pool"` + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it. + RBDPool string `json:"pool" protobuf:"bytes,4,opt,name=pool"` // The rados user name. // Default is admin. - // More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it - RadosUser string `json:"user"` + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it + RadosUser string `json:"user" protobuf:"bytes,5,opt,name=user"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it - Keyring string `json:"keyring"` + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it + Keyring string `json:"keyring" protobuf:"bytes,6,opt,name=keyring"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is empty. - // More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it - SecretRef *LocalObjectReference `json:"secretRef"` + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it + SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,7,opt,name=secretRef"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it - ReadOnly bool `json:"readOnly,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } // Represents a cinder volume resource in Openstack. @@ -577,47 +595,47 @@ type RBDVolumeSource struct { // Cinder volumes support ownership management and SELinux relabeling. type CinderVolumeSource struct { // volume id used to identify the volume in cinder - // More info: http://releases.k8s.io/release-1.2/examples/mysql-cinder-pd/README.md - VolumeID string `json:"volumeID"` + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.2/examples/mysql-cinder-pd/README.md - FSType string `json:"fsType,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/release-1.2/examples/mysql-cinder-pd/README.md - ReadOnly bool `json:"readOnly,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } // Represents a Ceph Filesystem mount that lasts the lifetime of a pod // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: http://releases.k8s.io/release-1.2/examples/cephfs/README.md#how-to-use-it - Monitors []string `json:"monitors"` + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - Path string `json:"path,omitempty"` + Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // Optional: User is the rados user name, default is admin - // More info: http://releases.k8s.io/release-1.2/examples/cephfs/README.md#how-to-use-it - User string `json:"user,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: http://releases.k8s.io/release-1.2/examples/cephfs/README.md#how-to-use-it - SecretFile string `json:"secretFile,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: http://releases.k8s.io/release-1.2/examples/cephfs/README.md#how-to-use-it - SecretRef *LocalObjectReference `json:"secretRef,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/release-1.2/examples/cephfs/README.md#how-to-use-it - ReadOnly bool `json:"readOnly,omitempty"` + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } // Represents a Flocker volume mounted by the Flocker agent. // Flocker volumes do not support ownership management or SELinux relabeling. type FlockerVolumeSource struct { // Required: the volume name. This is going to be store on metadata -> name on the payload for Flocker - DatasetName string `json:"datasetName"` + DatasetName string `json:"datasetName" protobuf:"bytes,1,opt,name=datasetName"` } // StorageMedium defines ways that storage can be allocated to a volume. @@ -646,42 +664,46 @@ const ( // PDs support ownership management and SELinux relabeling. type GCEPersistentDiskVolumeSource struct { // Unique name of the PD resource in GCE. Used to identify the disk in GCE. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk - PDName string `json:"pdName"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty"` + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // The partition in the volume that you want to mount. // If omitted, the default is to mount by volume name. // Examples: For volume /dev/sda1, you specify the partition as "1". // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk - Partition int32 `json:"partition,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk - ReadOnly bool `json:"readOnly,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` } // FlexVolume represents a generic volume resource that is // provisioned/attached using a exec based plugin. This is an alpha feature and may change in future. type FlexVolumeSource struct { // Driver is the name of the driver to use for this volume. - Driver string `json:"driver"` + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - FSType string `json:"fsType,omitempty"` - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - SecretRef *LocalObjectReference `json:"secretRef,omitempty"` + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` // Optional: Extra command options if any. - Options map[string]string `json:"options,omitempty"` + Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"` } // Represents a Persistent Disk resource in AWS. @@ -692,23 +714,23 @@ type FlexVolumeSource struct { // ownership management and SELinux relabeling. type AWSElasticBlockStoreVolumeSource struct { // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#awselasticblockstore - VolumeID string `json:"volumeID"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#awselasticblockstore + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty"` + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // The partition in the volume that you want to mount. // If omitted, the default is to mount by volume name. // Examples: For volume /dev/sda1, you specify the partition as "1". // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - Partition int32 `json:"partition,omitempty"` + Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". // If omitted, the default is "false". - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#awselasticblockstore - ReadOnly bool `json:"readOnly,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` } // Represents a volume that is populated with the contents of a git repository. @@ -716,14 +738,14 @@ type AWSElasticBlockStoreVolumeSource struct { // Git repo volumes support SELinux relabeling. type GitRepoVolumeSource struct { // Repository URL - Repository string `json:"repository"` + Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"` // Commit hash for the specified revision. - Revision string `json:"revision,omitempty"` + Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` // Target directory name. // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the // git repository. Otherwise, if specified, the volume will contain the git repository in // the subdirectory with the given name. - Directory string `json:"directory,omitempty"` + Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"` } // Adapts a Secret into a volume. @@ -733,26 +755,34 @@ type GitRepoVolumeSource struct { // Secret volumes support ownership management and SELinux relabeling. type SecretVolumeSource struct { // Name of the secret in the pod's namespace to use. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#secrets - SecretName string `json:"secretName,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets + SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error. Paths must be relative and may not contain + // the '..' path or start with '..'. + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` } // Represents an NFS mount that lasts the lifetime of a pod. // NFS volumes do not support ownership management or SELinux relabeling. type NFSVolumeSource struct { // Server is the hostname or IP address of the NFS server. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#nfs - Server string `json:"server"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + Server string `json:"server" protobuf:"bytes,1,opt,name=server"` // Path that is exported by the NFS server. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#nfs - Path string `json:"path"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force // the NFS export to be mounted with read-only permissions. // Defaults to false. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#nfs - ReadOnly bool `json:"readOnly,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } // Represents an ISCSI disk. @@ -761,22 +791,22 @@ type NFSVolumeSource struct { type ISCSIVolumeSource struct { // iSCSI target portal. The portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). - TargetPortal string `json:"targetPortal"` + TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` // Target iSCSI Qualified Name. - IQN string `json:"iqn"` + IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` // iSCSI target lun number. - Lun int32 `json:"lun"` + Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - ISCSIInterface string `json:"iscsiInterface,omitempty"` + ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#iscsi + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#iscsi // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty"` + FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } // Represents a Fibre Channel volume. @@ -784,28 +814,38 @@ type ISCSIVolumeSource struct { // Fibre Channel volumes support ownership management and SELinux relabeling. type FCVolumeSource struct { // Required: FC target world wide names (WWNs) - TargetWWNs []string `json:"targetWWNs"` + TargetWWNs []string `json:"targetWWNs" protobuf:"bytes,1,rep,name=targetWWNs"` // Required: FC target lun number - Lun *int32 `json:"lun"` + Lun *int32 `json:"lun" protobuf:"varint,2,opt,name=lun"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty"` + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` } // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. type AzureFileVolumeSource struct { // the name of secret that contains Azure Storage Account Name and Key - SecretName string `json:"secretName"` + SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"` // Share Name - ShareName string `json:"shareName"` + ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"` // Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` } // Adapts a ConfigMap into a volume. @@ -815,7 +855,7 @@ type AzureFileVolumeSource struct { // the items element is populated with specific mappings of keys to paths. // ConfigMap volumes support ownership management and SELinux relabeling. type ConfigMapVolumeSource struct { - LocalObjectReference `json:",inline"` + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // If unspecified, each key-value pair in the Data field of the referenced // ConfigMap will be projected into the volume as a file whose name is the // key and content is the value. If specified, the listed keys will be @@ -823,19 +863,19 @@ type ConfigMapVolumeSource struct { // present. If a key is specified which is not present in the ConfigMap, // the volume setup will error. Paths must be relative and may not contain // the '..' path or start with '..'. - Items []KeyToPath `json:"items,omitempty"` + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` } // Maps a string key to a path within a volume. type KeyToPath struct { // The key to project. - Key string `json:"key"` + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` // The relative path of the file to map the key to. // May not be an absolute path. // May not contain the path element '..'. // May not start with the string '..'. - Path string `json:"path"` + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` } // ContainerPort represents a network port in a single container. @@ -843,38 +883,41 @@ type ContainerPort struct { // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each // named port in a pod must have a unique name. Name for the port that can be // referred to by services. - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // Number of port to expose on the host. // If specified, this must be a valid port number, 0 < x < 65536. // If HostNetwork is specified, this must match ContainerPort. // Most containers do not need this. - HostPort int32 `json:"hostPort,omitempty"` + HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"` // Number of port to expose on the pod's IP address. // This must be a valid port number, 0 < x < 65536. - ContainerPort int32 `json:"containerPort"` + ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"` // Protocol for port. Must be UDP or TCP. // Defaults to "TCP". - Protocol Protocol `json:"protocol,omitempty"` + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"` // What host IP to bind the external port to. - HostIP string `json:"hostIP,omitempty"` + HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` } // VolumeMount describes a mounting of a Volume within a container. type VolumeMount struct { // This must match the Name of a Volume. - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Mounted read-only if true, read-write otherwise (false or unspecified). // Defaults to false. - ReadOnly bool `json:"readOnly,omitempty"` + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` // Path within the container at which the volume should be mounted. Must // not contain ':'. - MountPath string `json:"mountPath"` + MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"` + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"` } // EnvVar represents an environment variable present in a Container. type EnvVar struct { // Name of the environment variable. Must be a C_IDENTIFIER. - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Optional: no more than one of the following may be specified. @@ -886,69 +929,82 @@ type EnvVar struct { // references will never be expanded, regardless of whether the variable // exists or not. // Defaults to "". - Value string `json:"value,omitempty"` + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` // Source for the environment variable's value. Cannot be used if value is not empty. - ValueFrom *EnvVarSource `json:"valueFrom,omitempty"` + ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"` } // EnvVarSource represents a source for the value of an EnvVar. type EnvVarSource struct { // Selects a field of the pod; only name and namespace are supported. - FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty"` + FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"` // Selects a key of a ConfigMap. - ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty"` + ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"` // Selects a key of a secret in the pod's namespace - SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty"` + SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"` } // ObjectFieldSelector selects an APIVersioned field of an object. type ObjectFieldSelector struct { // Version of the schema the FieldPath is written in terms of, defaults to "v1". - APIVersion string `json:"apiVersion,omitempty"` + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` // Path of the field to select in the specified API version. - FieldPath string `json:"fieldPath"` + FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"` +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` + // Required: resource to select + Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"` + // Specifies the output format of the exposed resources, defaults to "1" + Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"` } // Selects a key from a ConfigMap. type ConfigMapKeySelector struct { // The ConfigMap to select from. - LocalObjectReference `json:",inline"` + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key to select. - Key string `json:"key"` + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` } // SecretKeySelector selects a key of a Secret. type SecretKeySelector struct { // The name of the secret in the pod's namespace to select from. - LocalObjectReference `json:",inline"` + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key of the secret to select from. Must be a valid secret key. - Key string `json:"key"` + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` } // HTTPHeader describes a custom header to be used in HTTP probes type HTTPHeader struct { // The header field name - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // The header field value - Value string `json:"value"` + Value string `json:"value" protobuf:"bytes,2,opt,name=value"` } // HTTPGetAction describes an action based on HTTP Get requests. type HTTPGetAction struct { // Path to access on the HTTP server. - Path string `json:"path,omitempty"` + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` // Name or number of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - Port intstr.IntOrString `json:"port"` + Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"` // Host name to connect to, defaults to the pod IP. You probably want to set // "Host" in httpHeaders instead. - Host string `json:"host,omitempty"` + Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"` // Scheme to use for connecting to the host. // Defaults to HTTP. - Scheme URIScheme `json:"scheme,omitempty"` + Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"` // Custom headers to set in the request. HTTP allows repeated headers. - HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty"` + HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"` } // URIScheme identifies the scheme used for connection to a host for Get actions @@ -966,7 +1022,7 @@ type TCPSocketAction struct { // Number or name of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - Port intstr.IntOrString `json:"port"` + Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"` } // ExecAction describes a "run in container" action. @@ -976,30 +1032,30 @@ type ExecAction struct { // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use // a shell, you need to explicitly call out to that shell. // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - Command []string `json:"command,omitempty"` + Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` } // Probe describes a health check to be performed against a container to determine whether it is // alive or ready to receive traffic. type Probe struct { // The action taken to determine the health of a container - Handler `json:",inline"` + Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"` // Number of seconds after the container has started before liveness probes are initiated. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes - InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"` // Number of seconds after which the probe times out. // Defaults to 1 second. Minimum value is 1. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes - TimeoutSeconds int32 `json:"timeoutSeconds,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` // How often (in seconds) to perform the probe. // Default to 10 seconds. Minimum value is 1. - PeriodSeconds int32 `json:"periodSeconds,omitempty"` + PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"` // Minimum consecutive successes for the probe to be considered successful after having failed. // Defaults to 1. Must be 1 for liveness. Minimum value is 1. - SuccessThreshold int32 `json:"successThreshold,omitempty"` + SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"` // Minimum consecutive failures for the probe to be considered failed after having succeeded. // Defaults to 3. Minimum value is 1. - FailureThreshold int32 `json:"failureThreshold,omitempty"` + FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"` } // PullPolicy describes a policy for if/when to pull a container image @@ -1020,21 +1076,21 @@ type Capability string // Adds and removes POSIX capabilities from running containers. type Capabilities struct { // Added capabilities - Add []Capability `json:"add,omitempty"` + Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"` // Removed capabilities - Drop []Capability `json:"drop,omitempty"` + Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"` } // ResourceRequirements describes the compute resource requirements. type ResourceRequirements struct { // Limits describes the maximum amount of compute resources allowed. - // More info: http://releases.k8s.io/release-1.2/docs/design/resources.md#resource-specifications - Limits ResourceList `json:"limits,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications + Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, // otherwise to an implementation-defined value. - // More info: http://releases.k8s.io/release-1.2/docs/design/resources.md#resource-specifications - Requests ResourceList `json:"requests,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications + Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` } const ( @@ -1047,10 +1103,10 @@ type Container struct { // Name of the container specified as a DNS_LABEL. // Each container in a pod must have a unique name (DNS_LABEL). // Cannot be updated. - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Docker image name. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md - Image string `json:"image,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` // Entrypoint array. Not executed within a shell. // The docker image's ENTRYPOINT is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable @@ -1058,8 +1114,8 @@ type Container struct { // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // Cannot be updated. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md#containers-and-commands - Command []string `json:"command,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` // Arguments to the entrypoint. // The docker image's CMD is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable @@ -1067,13 +1123,13 @@ type Container struct { // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // Cannot be updated. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md#containers-and-commands - Args []string `json:"args,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` // Container's working directory. // If not specified, the container runtime's default will be used, which // might be configured in the container image. // Cannot be updated. - WorkingDir string `json:"workingDir,omitempty"` + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` // List of ports to expose from the container. Exposing a port here gives // the system additional information about the network connections a // container uses, but is primarily informational. Not specifying a port here @@ -1081,45 +1137,45 @@ type Container struct { // listening on the default "0.0.0.0" address inside a container will be // accessible from the network. // Cannot be updated. - Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort"` + Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` // List of environment variables to set in the container. // Cannot be updated. - Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` // Compute Resources required by this container. // Cannot be updated. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#resources - Resources ResourceRequirements `json:"resources,omitempty"` - // Pod volumes to mount into the container's filesyste. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. // Cannot be updated. - VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,rep,name=volumeMounts"` // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes - LivenessProbe *Probe `json:"livenessProbe,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` // Periodic probe of container service readiness. // Container will be removed from service endpoints if the probe fails. // Cannot be updated. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes - ReadinessProbe *Probe `json:"readinessProbe,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. - Lifecycle *Lifecycle `json:"lifecycle,omitempty"` + Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` // Optional: Path at which the file to which the container's termination message // will be written is mounted into the container's filesystem. // Message written is intended to be brief final status, such as an assertion failure message. // Defaults to /dev/termination-log. // Cannot be updated. - TerminationMessagePath string `json:"terminationMessagePath,omitempty"` + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` // Image pull policy. // One of Always, Never, IfNotPresent. // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. // Cannot be updated. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md#updating-images - ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#updating-images + ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` // Security options the pod should run with. - // More info: http://releases.k8s.io/release-1.2/docs/design/security_context.md - SecurityContext *SecurityContext `json:"securityContext,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md + SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) // and shouldn't be used for general purpose containers. @@ -1127,7 +1183,7 @@ type Container struct { // Whether this container should allocate a buffer for stdin in the container runtime. If this // is not set, reads from stdin in the container will always result in EOF. // Default is false. - Stdin bool `json:"stdin,omitempty"` + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` // Whether the container runtime should close the stdin channel after it has been opened by // a single attach. When stdin is true the stdin stream will remain open across multiple attach // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the @@ -1135,10 +1191,10 @@ type Container struct { // at which time stdin is closed and remains closed until the container is restarted. If this // flag is false, a container processes that reads from stdin will never receive an EOF. // Default is false - StdinOnce bool `json:"stdinOnce,omitempty"` + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. // Default is false. - TTY bool `json:"tty,omitempty"` + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` } // Handler defines a specific action that should be taken @@ -1146,13 +1202,13 @@ type Container struct { type Handler struct { // One and only one of the following should be specified. // Exec specifies the action to take. - Exec *ExecAction `json:"exec,omitempty"` + Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` // HTTPGet specifies the http request to perform. - HTTPGet *HTTPGetAction `json:"httpGet,omitempty"` + HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` // TCPSocket specifies an action involving a TCP port. // TCP hooks not yet supported // TODO: implement a realistic TCP lifecycle hook - TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty"` + TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` } // Lifecycle describes actions that the management system should take in response to container lifecycle @@ -1162,15 +1218,15 @@ type Lifecycle struct { // PostStart is called immediately after a container is created. If the handler fails, // the container is terminated and restarted according to its restart policy. // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/container-environment.md#hook-details - PostStart *Handler `json:"postStart,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details + PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` // PreStop is called immediately before a container is terminated. // The container is terminated after the handler completes. // The reason for termination is passed to the handler. // Regardless of the outcome of the handler, the container is eventually terminated. // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/container-environment.md#hook-details - PreStop *Handler `json:"preStop,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details + PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` } type ConditionStatus string @@ -1188,33 +1244,33 @@ const ( // ContainerStateWaiting is a waiting state of a container. type ContainerStateWaiting struct { // (brief) reason the container is not yet running. - Reason string `json:"reason,omitempty"` + Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"` // Message regarding why the container is not yet running. - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` } // ContainerStateRunning is a running state of a container. type ContainerStateRunning struct { // Time at which the container was last (re-)started - StartedAt unversioned.Time `json:"startedAt,omitempty"` + StartedAt unversioned.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"` } // ContainerStateTerminated is a terminated state of a container. type ContainerStateTerminated struct { // Exit status from the last termination of the container - ExitCode int32 `json:"exitCode"` + ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"` // Signal from the last termination of the container - Signal int32 `json:"signal,omitempty"` + Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"` // (brief) reason from the last termination of the container - Reason string `json:"reason,omitempty"` + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` // Message regarding the last termination of the container - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` // Time at which previous execution of the container started - StartedAt unversioned.Time `json:"startedAt,omitempty"` + StartedAt unversioned.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"` // Time at which the container last terminated - FinishedAt unversioned.Time `json:"finishedAt,omitempty"` + FinishedAt unversioned.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"` // Container's ID in the format 'docker://' - ContainerID string `json:"containerID,omitempty"` + ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"` } // ContainerState holds a possible state of container. @@ -1222,38 +1278,38 @@ type ContainerStateTerminated struct { // If none of them is specified, the default one is ContainerStateWaiting. type ContainerState struct { // Details about a waiting container - Waiting *ContainerStateWaiting `json:"waiting,omitempty"` + Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"` // Details about a running container - Running *ContainerStateRunning `json:"running,omitempty"` + Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"` // Details about a terminated container - Terminated *ContainerStateTerminated `json:"terminated,omitempty"` + Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"` } // ContainerStatus contains details for the current status of this container. type ContainerStatus struct { // This must be a DNS_LABEL. Each container in a pod must have a unique name. // Cannot be updated. - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Details about the container's current condition. - State ContainerState `json:"state,omitempty"` + State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"` // Details about the container's last termination condition. - LastTerminationState ContainerState `json:"lastState,omitempty"` + LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` // Specifies whether the container has passed its readiness probe. - Ready bool `json:"ready"` + Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` // The number of times the container has been restarted, currently based on // the number of dead containers that have not yet been removed. // Note that this is calculated from dead containers. But those containers are subject to // garbage collection. This value will get capped at 5 by GC. - RestartCount int32 `json:"restartCount"` + RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` // The image the container is running. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md + // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md // TODO(dchen1107): Which image the container is running with? - Image string `json:"image"` + Image string `json:"image" protobuf:"bytes,6,opt,name=image"` // ImageID of the container's image. - ImageID string `json:"imageID"` + ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` // Container's ID in the format 'docker://'. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/container-environment.md#container-information - ContainerID string `json:"containerID,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#container-information + ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` } // PodPhase is a label for the condition of a pod at the current time. @@ -1284,6 +1340,8 @@ type PodConditionType string // These are valid conditions of pod. const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" // PodReady means the pod is able to service requests and should be added to the // load balancing pools of all matching services. PodReady PodConditionType = "Ready" @@ -1293,20 +1351,20 @@ const ( type PodCondition struct { // Type is the type of the condition. // Currently only Ready. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#pod-conditions - Type PodConditionType `json:"type"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions + Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"` // Status is the status of the condition. // Can be True, False, Unknown. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#pod-conditions - Status ConditionStatus `json:"status"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` // Last time we probed the condition. - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` + LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` // Last time the condition transitioned from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` // Unique, one-word, CamelCase reason for the condition's last transition. - Reason string `json:"reason,omitempty"` + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` // Human-readable message indicating details about last transition. - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } // RestartPolicy describes how the container should be restarted. @@ -1342,29 +1400,29 @@ const ( // by the node selector terms. type NodeSelector struct { //Required. A list of node selector terms. The terms are ORed. - NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms"` + NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"` } // A null or empty node selector term matches no objects. type NodeSelectorTerm struct { //Required. A list of node selector requirements. The requirements are ANDed. - MatchExpressions []NodeSelectorRequirement `json:"matchExpressions"` + MatchExpressions []NodeSelectorRequirement `json:"matchExpressions" protobuf:"bytes,1,rep,name=matchExpressions"` } // A node selector requirement is a selector that contains values, a key, and an operator // that relates the key and values. type NodeSelectorRequirement struct { // The label key that the selector applies to. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"` + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` // Represents a key's relationship to a set of values. // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - Operator NodeSelectorOperator `json:"operator"` + Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"` // An array of string values. If the operator is In or NotIn, // the values array must be non-empty. If the operator is Exists or DoesNotExist, // the values array must be empty. If the operator is Gt or Lt, the values // array must have a single element, which will be interpreted as an integer. // This array is replaced during a strategic merge patch. - Values []string `json:"values,omitempty"` + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` } // A node selector operator is the set of operators that can be used in @@ -1380,11 +1438,109 @@ const ( NodeSelectorOpLt NodeSelectorOperator = "Lt" ) -// Affinity is a group of affinity scheduling rules, currently -// only node affinity, but in the future also inter-pod affinity. +// Affinity is a group of affinity scheduling rules. type Affinity struct { // Describes node affinity scheduling rules for the pod. - NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty"` + NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"` + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"` + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"` +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"` +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key tches that of any node on which +// a pod of the set of pods is running +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + LabelSelector *unversioned.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // nil list means "this pod's namespace," empty list means "all namespaces" + // The json tag here is not "omitempty" since we need to distinguish nil and empty. + // See https://golang.org/pkg/encoding/json/#Marshal for more details. + Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"` } // Node affinity is a group of node affinity scheduling rules. @@ -1402,7 +1558,7 @@ type NodeAffinity struct { // If the affinity requirements specified by this field cease to be met // at some point during pod execution (e.g. due to an update), the system // may or may not try to eventually evict the pod from its node. - RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"` // The scheduler will prefer to schedule pods to nodes that satisfy // the affinity expressions specified by this field, but it may choose // a node that violates one or more of the expressions. The node that is @@ -1412,34 +1568,126 @@ type NodeAffinity struct { // compute a sum by iterating through the elements of this field and adding // "weight" to the sum if the node matches the corresponding matchExpressions; the // node(s) with the highest sum are the most preferred. - PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` } // An empty preferred scheduling term matches all objects with implicit weight 0 // (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). type PreferredSchedulingTerm struct { // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - Weight int `json:"weight"` + Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` // A node selector term, associated with the corresponding weight. - Preference NodeSelectorTerm `json:"preference"` + Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"` } +// The node this Taint is attached to has the effect "effect" on +// any pod that that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // Required. The taint value corresponding to the taint key. + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule and PreferNoSchedule. + Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"` +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // do not allow pods to start on Kubelet unless they tolerate the taint, + // but allow all already-running pods to continue running. + // Enforced by the scheduler and Kubelet. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // do not allow pods to start on Kubelet unless they tolerate the taint, + // and evict any already-running pods that do not tolerate the taint. + // Enforced by the scheduler and Kubelet. + // TaintEffectNoScheduleNoAdmitNoExecute = "NoScheduleNoAdmitNoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +type Toleration struct { + // Required. Key is the taint key that the toleration applies to. + Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"` + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"` + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule and PreferNoSchedule. + Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"` + // TODO: For forgiveness (#1574), we'd eventually add at least a grace period + // here, and possibly an occurrence threshold and period. +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +const ( + // This annotation key will be used to contain an array of v1 JSON encoded Containers + // for init containers. The annotation will be placed into the internal type and cleared. + PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers" + // This annotation key will be used to contain an array of v1 JSON encoded + // ContainerStatuses for init containers. The annotation will be placed into the internal + // type and cleared. + PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses" +) + // PodSpec is a description of a pod. type PodSpec struct { // List of volumes that can be mounted by containers belonging to the pod. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md - Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md + Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Init containers are in alpha state and may change without notice. + // Cannot be updated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md + InitContainers []Container `json:"-" patchStrategy:"merge" patchMergeKey:"name"` // List of containers belonging to the pod. // Containers cannot currently be added or removed. // There must be at least one container in a Pod. // Cannot be updated. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md - Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md + Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#restartpolicy - RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#restartpolicy + RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"` // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. // Value must be non-negative integer. The value zero indicates delete immediately. // If this value is nil, the default grace period will be used instead. @@ -1447,49 +1695,55 @@ type PodSpec struct { // a termination signal and the time when the processes are forcibly halted with a kill signal. // Set this value longer than the expected cleanup time for your process. // Defaults to 30 seconds. - TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` // Optional duration in seconds the pod may be active on the node relative to // StartTime before the system will actively try to mark it failed and kill associated containers. // Value must be a positive integer. - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` // Set DNS policy for containers within the pod. // One of 'ClusterFirst' or 'Default'. // Defaults to "ClusterFirst". - DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty"` + DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/node-selection/README.md - NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/node-selection/README.md + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/release-1.2/docs/design/service_accounts.md - ServiceAccountName string `json:"serviceAccountName,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. // Deprecated: Use serviceAccountName instead. - DeprecatedServiceAccount string `json:"serviceAccount,omitempty"` + DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"` // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, // the scheduler simply schedules this pod onto that node, assuming that it fits resource // requirements. - NodeName string `json:"nodeName,omitempty"` + NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` // Host networking requested for this pod. Use the host's network namespace. // If this option is set, the ports that will be used must be specified. // Default to false. - HostNetwork bool `json:"hostNetwork,omitempty"` + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` // Use the host's pid namespace. // Optional: Default to false. - HostPID bool `json:"hostPID,omitempty"` + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"` // Use the host's ipc namespace. // Optional: Default to false. - HostIPC bool `json:"hostIPC,omitempty"` + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"` // SecurityContext holds pod-level security attributes and common container settings. // Optional: Defaults to empty. See type description for default values of each field. - SecurityContext *PodSecurityContext `json:"securityContext,omitempty"` + SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"` // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. For example, // in the case of docker, only DockerConfig type secrets are honored. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod - ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` + // Specifies the hostname of the Pod + // If not specified, the pod's hostname will be set to a system-defined value. + Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"` + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"` } // PodSecurityContext holds pod-level security attributes and common container settings. @@ -1501,24 +1755,24 @@ type PodSecurityContext struct { // container. May also be set in SecurityContext. If set in // both SecurityContext and PodSecurityContext, the value specified in SecurityContext // takes precedence for that container. - SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty"` + SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in SecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence // for that container. - RunAsUser *int64 `json:"runAsUser,omitempty"` + RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"` // Indicates that the container must run as a non-root user. // If true, the Kubelet will validate the image at runtime to ensure that it // does not run as UID 0 (root) and fail to start the container if it does. // If unset or false, no such validation will be performed. // May also be set in SecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"` // A list of groups applied to the first process run in each container, in addition // to the container's primary GID. If unspecified, no groups will be added to // any container. - SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` + SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"` // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: @@ -1528,52 +1782,58 @@ type PodSecurityContext struct { // 3. The permission bits are OR'd with rw-rw---- // // If unset, the Kubelet will not modify the ownership and permissions of any volume. - FSGroup *int64 `json:"fsGroup,omitempty"` + FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"` } // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system. type PodStatus struct { // Current condition of the pod. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#pod-phase - Phase PodPhase `json:"phase,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-phase + Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"` // Current service state of pod. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#pod-conditions - Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions + Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` // A human readable message indicating details about why the pod is in this condition. - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` // A brief CamelCase message indicating details about why the pod is in this state. // e.g. 'OutOfDisk' - Reason string `json:"reason,omitempty"` + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` // IP address of the host to which the pod is assigned. Empty if not yet scheduled. - HostIP string `json:"hostIP,omitempty"` + HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` // IP address allocated to the pod. Routable at least within the cluster. // Empty if not yet allocated. - PodIP string `json:"podIP,omitempty"` + PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. - StartTime *unversioned.Time `json:"startTime,omitempty"` - + StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // Init containers are in alpha state and may change without notice. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses + InitContainerStatuses []ContainerStatus `json:"-"` // The list has one entry per container in the manifest. Each entry is currently the output // of `docker inspect`. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-statuses - ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses + ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded type PodStatusResult struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status PodStatus `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } // +genclient=true @@ -1583,42 +1843,42 @@ type PodStatusResult struct { type Pod struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec PodSpec `json:"spec,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status PodStatus `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // PodList is a list of Pods. type PodList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of pods. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/pods.md - Items []Pod `json:"items"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/pods.md + Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` } // PodTemplateSpec describes the data a pod should have when created from a template type PodTemplateSpec struct { // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec PodSpec `json:"spec,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } // +genclient=true @@ -1627,23 +1887,23 @@ type PodTemplateSpec struct { type PodTemplate struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Template defines the pods that will be created from this pod template. - // http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Template PodTemplateSpec `json:"template,omitempty"` + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } // PodTemplateList is a list of PodTemplates. type PodTemplateList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of pod templates - Items []PodTemplate `json:"items"` + Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` } // ReplicationControllerSpec is the specification of a replication controller. @@ -1651,15 +1911,15 @@ type ReplicationControllerSpec struct { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#what-is-a-replication-controller - Replicas *int32 `json:"replicas,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` // Selector is a label query over pods that should match the Replicas count. // If Selector is empty, it is defaulted to the labels present on the Pod template. // Label keys and values that must match in order to be controlled by this replication // controller, if empty defaulted to labels on Pod template. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors - Selector map[string]string `json:"selector,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` // TemplateRef is a reference to an object that describes the pod that will be created if // insufficient replicas are detected. @@ -1668,22 +1928,22 @@ type ReplicationControllerSpec struct { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#pod-template - Template *PodTemplateSpec `json:"template,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template + Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` } // ReplicationControllerStatus represents the current status of a replication // controller. type ReplicationControllerStatus struct { // Replicas is the most recently oberved number of replicas. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#what-is-a-replication-controller - Replicas int32 `json:"replicas"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` // The number of pods that have labels matching the labels of the pod template of the replication controller. - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` // ObservedGeneration reflects the generation of the most recently observed replication controller. - ObservedGeneration int64 `json:"observedGeneration,omitempty"` + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` } // +genclient=true @@ -1694,31 +1954,31 @@ type ReplicationController struct { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the replication controller. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec ReplicationControllerSpec `json:"spec,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the most recently observed status of the replication controller. // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status ReplicationControllerStatus `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // ReplicationControllerList is a collection of replication controllers. type ReplicationControllerList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of replication controllers. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md - Items []ReplicationController `json:"items"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md + Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"` } // Session Affinity Type string @@ -1754,14 +2014,14 @@ const ( type ServiceStatus struct { // LoadBalancer contains the current status of the load-balancer, // if one is present. - LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty"` + LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` } // LoadBalancerStatus represents the status of a load-balancer. type LoadBalancerStatus struct { // Ingress is a list containing ingress points for the load-balancer. // Traffic intended for the service should be sent to these ingress points. - Ingress []LoadBalancerIngress `json:"ingress,omitempty"` + Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } // LoadBalancerIngress represents the status of a load-balancer ingress point: @@ -1769,24 +2029,24 @@ type LoadBalancerStatus struct { type LoadBalancerIngress struct { // IP is set for load-balancer ingress points that are IP based // (typically GCE or OpenStack load-balancers) - IP string `json:"ip,omitempty"` + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` // Hostname is set for load-balancer ingress points that are DNS based // (typically AWS load-balancers) - Hostname string `json:"hostname,omitempty"` + Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` } // ServiceSpec describes the attributes that a user creates on a service. type ServiceSpec struct { // The list of ports that are exposed by this service. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#virtual-ips-and-service-proxies - Ports []ServicePort `json:"ports"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies + Ports []ServicePort `json:"ports" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` // This service will route traffic to pods having labels matching this selector. // Label keys and values that must match in order to receive traffic for this service. // If empty, all pods are selected, if not specified, endpoints must be manually specified. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#overview - Selector map[string]string `json:"selector,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#overview + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` // ClusterIP is usually assigned by the master and is the IP address of the service. // If specified, it will be allocated to the service if it is unused @@ -1794,13 +2054,13 @@ type ServiceSpec struct { // Valid values are None, empty string (""), or a valid IP address. // 'None' can be specified for a headless service when proxying is not required. // Cannot be updated. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#virtual-ips-and-service-proxies - ClusterIP string `json:"clusterIP,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies + ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"` // Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer. // Defaults to ClusterIP. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#external-services - Type ServiceType `json:"type,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#external-services + Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` // externalIPs is a list of IP addresses for which nodes in the cluster // will also accept traffic for this service. These IPs are not managed by @@ -1809,28 +2069,35 @@ type ServiceSpec struct { // that are not part of the Kubernetes system. A previous form of this // functionality exists as the deprecatedPublicIPs field. When using this // field, callers should also clear the deprecatedPublicIPs field. - ExternalIPs []string `json:"externalIPs,omitempty"` + ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"` // deprecatedPublicIPs is deprecated and replaced by the externalIPs field // with almost the exact same semantics. This field is retained in the v1 // API for compatibility until at least 8/20/2016. It will be removed from // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are // set, deprecatedPublicIPs is used. - DeprecatedPublicIPs []string `json:"deprecatedPublicIPs,omitempty"` + // +genconversion=false + DeprecatedPublicIPs []string `json:"deprecatedPublicIPs,omitempty" protobuf:"bytes,6,rep,name=deprecatedPublicIPs"` // Supports "ClientIP" and "None". Used to maintain session affinity. // Enable client IP based session affinity. // Must be ClientIP or None. // Defaults to None. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#virtual-ips-and-service-proxies - SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies + SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"` // Only applies to Service Type: LoadBalancer // LoadBalancer will get created with the IP specified in this field. // This feature depends on whether the underlying cloud-provider supports specifying // the loadBalancerIP when a load balancer is created. // This field will be ignored if the cloud-provider does not support the feature. - LoadBalancerIP string `json:"loadBalancerIP,omitempty"` + LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"` + + // If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md + LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` } // ServicePort contains information on service's port. @@ -1839,14 +2106,14 @@ type ServicePort struct { // All ports within a ServiceSpec must have unique names. This maps to // the 'Name' field in EndpointPort objects. // Optional if only one ServicePort is defined on this service. - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // The IP protocol for this port. Supports "TCP" and "UDP". // Default is TCP. - Protocol Protocol `json:"protocol,omitempty"` + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` // The port that will be exposed by this service. - Port int32 `json:"port"` + Port int32 `json:"port" protobuf:"varint,3,opt,name=port"` // Number or name of the port to access on the pods targeted by the service. // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. @@ -1855,15 +2122,15 @@ type ServicePort struct { // of the 'port' field is used (an identity map). // This field is ignored for services with clusterIP=None, and should be // omitted or set equal to the 'port' field. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#defining-a-service - TargetPort intstr.IntOrString `json:"targetPort,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#defining-a-service + TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. // Usually assigned by the system. If specified, it will be allocated to the service // if unused or else creation of the service will fail. // Default is to auto-allocate a port if the ServiceType of this Service requires one. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#type--nodeport - NodePort int32 `json:"nodePort,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#type--nodeport + NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"` } // +genclient=true @@ -1874,18 +2141,18 @@ type ServicePort struct { type Service struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a service. - // http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec ServiceSpec `json:"spec,omitempty"` + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status ServiceStatus `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } const ( @@ -1898,11 +2165,11 @@ const ( type ServiceList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of services - Items []Service `json:"items"` + Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient=true @@ -1914,30 +2181,30 @@ type ServiceList struct { type ServiceAccount struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/secrets.md - Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md + Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"` // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret - ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"` } // ServiceAccountList is a list of ServiceAccount objects type ServiceAccountList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ServiceAccounts. - // More info: http://releases.k8s.io/release-1.2/docs/design/service_accounts.md#service-accounts - Items []ServiceAccount `json:"items"` + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts + Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient=true @@ -1957,8 +2224,8 @@ type ServiceAccountList struct { type Endpoints struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The set of all endpoints is the union of all subsets. Addresses are placed into // subsets according to the IPs they share. A single address with multiple ports, @@ -1967,7 +2234,7 @@ type Endpoints struct { // subsets for the different ports. No address will appear in both Addresses and // NotReadyAddresses in the same subset. // Sets of addresses and ports that comprise a service. - Subsets []EndpointSubset `json:"subsets"` + Subsets []EndpointSubset `json:"subsets" protobuf:"bytes,2,rep,name=subsets"` } // EndpointSubset is a group of addresses with a common set of ports. The @@ -1983,13 +2250,13 @@ type Endpoints struct { type EndpointSubset struct { // IP addresses which offer the related ports that are marked as ready. These endpoints // should be considered safe for load balancers and clients to utilize. - Addresses []EndpointAddress `json:"addresses,omitempty"` + Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"` // IP addresses which offer the related ports but are not currently marked as ready // because they have not yet finished starting, have recently failed a readiness check, // or have recently failed a liveness check. - NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty"` + NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"` // Port numbers available on the related IP addresses. - Ports []EndpointPort `json:"ports,omitempty"` + Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"` } // EndpointAddress is a tuple that describes single IP address. @@ -1997,11 +2264,14 @@ type EndpointAddress struct { // The IP of this endpoint. // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), // or link-local multicast ((224.0.0.0/24). + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. // TODO: This should allow hostname or IP, See #4447. - IP string `json:"ip"` - + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` + // The Hostname of this endpoint + Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` // Reference to object providing the endpoint. - TargetRef *ObjectReference `json:"targetRef,omitempty"` + TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"` } // EndpointPort is a tuple that describes a single port. @@ -2009,108 +2279,118 @@ type EndpointPort struct { // The name of this port (corresponds to ServicePort.Name). // Must be a DNS_LABEL. // Optional only if one port is defined. - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // The port number of the endpoint. - Port int32 `json:"port"` + Port int32 `json:"port" protobuf:"varint,2,opt,name=port"` // The IP protocol for this port. // Must be UDP or TCP. // Default is TCP. - Protocol Protocol `json:"protocol,omitempty"` + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` } // EndpointsList is a list of endpoints. type EndpointsList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of endpoints. - Items []Endpoints `json:"items"` + Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"` } // NodeSpec describes the attributes that a node is created with. type NodeSpec struct { // PodCIDR represents the pod IP range assigned to the node. - PodCIDR string `json:"podCIDR,omitempty"` + PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"` // External ID of the node assigned by some machine database (e.g. a cloud provider). // Deprecated. - ExternalID string `json:"externalID,omitempty"` + ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` // ID of the node assigned by the cloud provider in the format: :// - ProviderID string `json:"providerID,omitempty"` + ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: http://releases.k8s.io/release-1.2/docs/admin/node.md#manual-node-administration"` - Unschedulable bool `json:"unschedulable,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"` + Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"` } // DaemonEndpoint contains information about a single Daemon endpoint. type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercased for backwards compat (since it was falling back to var name of + 'Port'). + */ + // Port number of the given endpoint. - Port int32 `json:port` + Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"` } // NodeDaemonEndpoints lists ports opened by daemons running on the Node. type NodeDaemonEndpoints struct { // Endpoint on which Kubelet is listening. - KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty"` + KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"` } // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. type NodeSystemInfo struct { // Machine ID reported by the node. - MachineID string `json:"machineID"` + MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"` // System UUID reported by the node. - SystemUUID string `json:"systemUUID"` + SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"` // Boot ID reported by the node. - BootID string `json:"bootID"` + BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"` // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). - KernelVersion string `json:"kernelVersion"` + KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"` // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). - OSImage string `json:"osImage"` + OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"` // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). - ContainerRuntimeVersion string `json:"containerRuntimeVersion"` + ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"` // Kubelet Version reported by the node. - KubeletVersion string `json:"kubeletVersion"` + KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"` // KubeProxy Version reported by the node. - KubeProxyVersion string `json:"kubeProxyVersion"` + KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"` + // The Operating System reported by the node + OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"` + // The Architecture reported by the node + Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"` } // NodeStatus is information about the current status of a node. type NodeStatus struct { // Capacity represents the total resources of a node. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#capacity for more details. - Capacity ResourceList `json:"capacity,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity for more details. + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` // Allocatable represents the resources of a node that are available for scheduling. // Defaults to Capacity. - Allocatable ResourceList `json:"allocatable,omitempty"` + Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"` // NodePhase is the recently observed lifecycle phase of the node. - // More info: http://releases.k8s.io/release-1.2/docs/admin/node.md#node-phase - Phase NodePhase `json:"phase,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase + Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"` // Conditions is an array of current observed node conditions. - // More info: http://releases.k8s.io/release-1.2/docs/admin/node.md#node-condition - Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition + Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` // List of addresses reachable to the node. // Queried from cloud provider, if available. - // More info: http://releases.k8s.io/release-1.2/docs/admin/node.md#node-addresses - Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses + Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"` // Endpoints of daemons running on the Node. - DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty"` + DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"` // Set of ids/uuids to uniquely identify the node. - // More info: http://releases.k8s.io/release-1.2/docs/admin/node.md#node-info - NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info + NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"` // List of container images on this node - Images []ContainerImage `json:"images",omitempty` + Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"` } // Describe a container image type ContainerImage struct { // Names by which this image is known. // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] - Names []string `json:"names"` + Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` // The size of the image in bytes. - SizeBytes int64 `json:"sizeBytes,omitempty"` + SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"` } type NodePhase string @@ -2136,22 +2416,26 @@ const ( // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk // space on the node. NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" ) // NodeCondition contains condition infromation for a node. type NodeCondition struct { // Type of node condition. - Type NodeConditionType `json:"type"` + Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"` // Status of the condition, one of True, False, Unknown. - Status ConditionStatus `json:"status"` + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` // Last time we got an update on a given condition. - LastHeartbeatTime unversioned.Time `json:"lastHeartbeatTime,omitempty"` + LastHeartbeatTime unversioned.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"` // Last time the condition transit from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty"` + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` // Human readable message indicating details about last transition. - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } type NodeAddressType string @@ -2166,14 +2450,19 @@ const ( // NodeAddress contains information for the node's address. type NodeAddress struct { // Node address type, one of Hostname, ExternalIP or InternalIP. - Type NodeAddressType `json:"type"` + Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"` // The node address. - Address string `json:"address"` + Address string `json:"address" protobuf:"bytes,2,opt,name=address"` } // ResourceName is the name identifying various resources in a ResourceList. type ResourceName string +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. const ( // CPU, in cores. (500m = .5 cores) ResourceCPU ResourceName = "cpu" @@ -2181,6 +2470,9 @@ const ( ResourceMemory ResourceName = "memory" // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) ResourceStorage ResourceName = "storage" + // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. + ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" + // Number of Pods that may be running on this Node: see ResourcePods ) // ResourceList is a set of (resource name, quantity) pairs. @@ -2193,29 +2485,29 @@ type ResourceList map[ResourceName]resource.Quantity type Node struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a node. - // http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec NodeSpec `json:"spec,omitempty"` + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status NodeStatus `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // NodeList is the whole list of all Nodes which have been registered with master. type NodeList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of nodes - Items []Node `json:"items"` + Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"` } type FinalizerName string @@ -2228,15 +2520,15 @@ const ( // NamespaceSpec describes the attributes on a Namespace. type NamespaceSpec struct { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: http://releases.k8s.io/release-1.2/docs/design/namespaces.md#finalizers - Finalizers []FinalizerName `json:"finalizers,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers + Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"` } // NamespaceStatus is information about the current status of a Namespace. type NamespaceStatus struct { // Phase is the current lifecycle phase of the namespace. - // More info: http://releases.k8s.io/release-1.2/docs/design/namespaces.md#phases - Phase NamespacePhase `json:"phase,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases + Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` } type NamespacePhase string @@ -2256,28 +2548,28 @@ const ( type Namespace struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of the Namespace. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec NamespaceSpec `json:"spec,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status describes the current status of a Namespace. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status NamespaceStatus `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // NamespaceList is a list of Namespaces. type NamespaceList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Namespace objects in the list. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/namespaces.md - Items []Namespace `json:"items"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md + Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"` } // Binding ties one object to another. @@ -2285,11 +2577,17 @@ type NamespaceList struct { type Binding struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The target object that you want to bind to the standard object. - Target ObjectReference `json:"target"` + Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` } // DeleteOptions may be provided when deleting an API object @@ -2300,7 +2598,15 @@ type DeleteOptions struct { // The value zero indicates delete immediately. If this value is nil, the default grace period for the // specified type will be used. // Defaults to a per object value if not specified. zero means delete immediately. - GracePeriodSeconds *int64 `json:"gracePeriodSeconds"` + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"` + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` + + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"` } // ExportOptions is the query options to the standard REST get call. @@ -2308,9 +2614,9 @@ type ExportOptions struct { unversioned.TypeMeta `json:",inline"` // Should this value be exported. Export strips fields that a user can not specify. - Export bool `json:"export"` + Export bool `json:"export" protobuf:"varint,1,opt,name=export"` // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - Exact bool `json:"exact"` + Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` } // ListOptions is the query options to a standard REST list call. @@ -2319,18 +2625,18 @@ type ListOptions struct { // A selector to restrict the list of returned objects by their labels. // Defaults to everything. - LabelSelector string `json:"labelSelector,omitempty"` + LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` // A selector to restrict the list of returned objects by their fields. // Defaults to everything. - FieldSelector string `json:"fieldSelector,omitempty"` + FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. - Watch bool `json:"watch,omitempty"` + Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. - ResourceVersion string `json:"resourceVersion,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` // Timeout for the list/watch call. - TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` } // PodLogOptions is the query options for a Pod's logs REST call. @@ -2338,31 +2644,31 @@ type PodLogOptions struct { unversioned.TypeMeta `json:",inline"` // The container for which to stream logs. Defaults to only container if there is one container in the pod. - Container string `json:"container,omitempty"` + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` // Follow the log stream of the pod. Defaults to false. - Follow bool `json:"follow,omitempty"` + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` // Return previous terminated container logs. Defaults to false. - Previous bool `json:"previous,omitempty"` + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` // A relative time in seconds before the current time from which to show logs. If this value // precedes the time a pod was started, only logs since the pod start will be returned. // If this value is in the future, no logs will be returned. // Only one of sinceSeconds or sinceTime may be specified. - SinceSeconds *int64 `json:"sinceSeconds,omitempty"` + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` // An RFC3339 timestamp from which to show logs. If this value // precedes the time a pod was started, only logs since the pod start will be returned. // If this value is in the future, no logs will be returned. // Only one of sinceSeconds or sinceTime may be specified. - SinceTime *unversioned.Time `json:"sinceTime,omitempty"` + SinceTime *unversioned.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line // of log output. Defaults to false. - Timestamps bool `json:"timestamps,omitempty"` + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` // If set, the number of lines from the end of the logs to show. If not specified, // logs are shown from the creation of the container or sinceSeconds or sinceTime - TailLines *int64 `json:"tailLines,omitempty"` + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` // If set, the number of bytes to read from the server before terminating the // log output. This may not display a complete final line of logging, and may return // slightly more or slightly less than the specified limit. - LimitBytes *int64 `json:"limitBytes,omitempty"` + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` } // PodAttachOptions is the query options to a Pod's remote attach call. @@ -2374,25 +2680,25 @@ type PodAttachOptions struct { // Stdin if true, redirects the standard input stream of the pod for this call. // Defaults to false. - Stdin bool `json:"stdin,omitempty"` + Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` // Stdout if true indicates that stdout is to be redirected for the attach call. // Defaults to true. - Stdout bool `json:"stdout,omitempty"` + Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` // Stderr if true indicates that stderr is to be redirected for the attach call. // Defaults to true. - Stderr bool `json:"stderr,omitempty"` + Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` // TTY if true indicates that a tty will be allocated for the attach call. // This is passed through the container runtime so the tty // is allocated on the worker node by the container runtime. // Defaults to false. - TTY bool `json:"tty,omitempty"` + TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` // The container in which to execute the command. // Defaults to only container if there is only one container in the pod. - Container string `json:"container,omitempty"` + Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` } // PodExecOptions is the query options to a Pod's remote exec call. @@ -2404,26 +2710,26 @@ type PodExecOptions struct { // Redirect the standard input stream of the pod for this call. // Defaults to false. - Stdin bool `json:"stdin,omitempty"` + Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` // Redirect the standard output stream of the pod for this call. // Defaults to true. - Stdout bool `json:"stdout,omitempty"` + Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` // Redirect the standard error stream of the pod for this call. // Defaults to true. - Stderr bool `json:"stderr,omitempty"` + Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` // TTY if true indicates that a tty will be allocated for the exec call. // Defaults to false. - TTY bool `json:"tty,omitempty"` + TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` // Container in which to execute the command. // Defaults to only container if there is only one container in the pod. - Container string `json:"container,omitempty"` + Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` // Command is the remote command to execute. argv array. Not executed within a shell. - Command []string `json:"command"` + Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` } // PodProxyOptions is the query options to a Pod's proxy call. @@ -2431,7 +2737,7 @@ type PodProxyOptions struct { unversioned.TypeMeta `json:",inline"` // Path is the URL path to use for the current proxy request to pod. - Path string `json:"path,omitempty"` + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` } // NodeProxyOptions is the query options to a Node's proxy call. @@ -2439,7 +2745,7 @@ type NodeProxyOptions struct { unversioned.TypeMeta `json:",inline"` // Path is the URL path to use for the current proxy request to node. - Path string `json:"path,omitempty"` + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` } // ServiceProxyOptions is the query options to a Service's proxy call. @@ -2451,28 +2757,45 @@ type ServiceProxyOptions struct { // For example, the whole request URL is // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. // Path is _search?q=user:kimchy. - Path string `json:"path,omitempty"` + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` +} + +// OwnerReference contains enough information to let you identify an owning +// object. Currently, an owning object must be in the same namespace, so there +// is no namespace field. +type OwnerReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` + // Kind of the referent. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` } // ObjectReference contains enough information to let you inspect or modify the referred object. type ObjectReference struct { // Kind of the referent. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - Kind string `json:"kind,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // Namespace of the referent. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/namespaces.md - Namespace string `json:"namespace,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` // Name of the referent. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names - Name string `json:"name,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` // UID of the referent. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#uids - UID types.UID `json:"uid,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` // API version of the referent. - APIVersion string `json:"apiVersion,omitempty"` + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` // Specific resourceVersion to which this reference is made, if any. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#concurrency-control-and-consistency - ResourceVersion string `json:"resourceVersion,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` // If referring to a piece of an object instead of an entire object, this string // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. @@ -2482,31 +2805,31 @@ type ObjectReference struct { // index 2 in this pod). This syntax is chosen only to have some well-defined way of // referencing a part of an object. // TODO: this design is not final and this field is subject to change in the future. - FieldPath string `json:"fieldPath,omitempty"` + FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"` } // LocalObjectReference contains enough information to let you locate the // referenced object inside the same namespace. type LocalObjectReference struct { // Name of the referent. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names + // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names // TODO: Add other useful fields. apiVersion, kind, uid? - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` } // SerializedReference is a reference to serialized object. type SerializedReference struct { unversioned.TypeMeta `json:",inline"` // The reference to an object in the system. - Reference ObjectReference `json:"reference,omitempty"` + Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"` } // EventSource contains information for an event. type EventSource struct { // Component from which the event is generated. - Component string `json:"component,omitempty"` + Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"` // Host name on which the event is generated. - Host string `json:"host,omitempty"` + Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` } // Valid values for event types (new types could be added in future) @@ -2524,57 +2847,57 @@ const ( type Event struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // The object that this event is about. - InvolvedObject ObjectReference `json:"involvedObject"` + InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"` // This should be a short, machine understandable string that gives the reason // for the transition into the object's current status. // TODO: provide exact specification for format. - Reason string `json:"reason,omitempty"` + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` // A human-readable description of the status of this operation. // TODO: decide on maximum length. - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` // The component reporting this event. Should be a short machine understandable string. - Source EventSource `json:"source,omitempty"` + Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"` // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - FirstTimestamp unversioned.Time `json:"firstTimestamp,omitempty"` + FirstTimestamp unversioned.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"` // The time at which the most recent occurrence of this event was recorded. - LastTimestamp unversioned.Time `json:"lastTimestamp,omitempty"` + LastTimestamp unversioned.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"` // The number of times this event has occurred. - Count int32 `json:"count,omitempty"` + Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"` // Type of this event (Normal, Warning), new types could be added in the future - Type string `json:"type,omitempty"` + Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"` } // EventList is a list of events. type EventList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of events - Items []Event `json:"items"` + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` } // List holds a list of objects, which may not be known by the server. type List struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of objects - Items []runtime.RawExtension `json:"items"` + Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"` } // LimitType is a type of object that is limited @@ -2590,23 +2913,23 @@ const ( // LimitRangeItem defines a min/max usage limit for any resource that matches on kind. type LimitRangeItem struct { // Type of resource that this limit applies to. - Type LimitType `json:"type,omitempty"` + Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"` // Max usage constraints on this kind by resource name. - Max ResourceList `json:"max,omitempty"` + Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"` // Min usage constraints on this kind by resource name. - Min ResourceList `json:"min,omitempty"` + Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"` // Default resource requirement limit value by resource name if resource limit is omitted. - Default ResourceList `json:"default,omitempty"` + Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"` // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. - DefaultRequest ResourceList `json:"defaultRequest,omitempty"` + DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"` // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. - MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty"` + MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"` } // LimitRangeSpec defines a min/max usage limit for resources that match on kind. type LimitRangeSpec struct { // Limits is the list of LimitRangeItem objects that are enforced. - Limits []LimitRangeItem `json:"limits"` + Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"` } // +genclient=true @@ -2615,24 +2938,24 @@ type LimitRangeSpec struct { type LimitRange struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the limits enforced. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec LimitRangeSpec `json:"spec,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } // LimitRangeList is a list of LimitRange items. type LimitRangeList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of LimitRange objects. - // More info: http://releases.k8s.io/release-1.2/docs/design/admission_control_limit_range.md - Items []LimitRange `json:"items"` + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md + Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"` } // The following identify resource constants for Kubernetes object types @@ -2651,14 +2974,18 @@ const ( ResourceConfigMaps ResourceName = "configmaps" // ResourcePersistentVolumeClaims, number ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" // CPU request, in cores. (500m = .5 cores) - ResourceCPURequest ResourceName = "cpu.request" - // CPU limit, in cores. (500m = .5 cores) - ResourceCPULimit ResourceName = "cpu.limit" + ResourceRequestsCPU ResourceName = "requests.cpu" // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceMemoryRequest ResourceName = "memory.request" + ResourceRequestsMemory ResourceName = "requests.memory" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceMemoryLimit ResourceName = "memory.limit" + ResourceLimitsMemory ResourceName = "limits.memory" ) // A ResourceQuotaScope defines a filter that must match each object tracked by a quota @@ -2678,20 +3005,20 @@ const ( // ResourceQuotaSpec defines the desired hard limits to enforce for Quota. type ResourceQuotaSpec struct { // Hard is the set of desired hard limits for each named resource. - // More info: http://releases.k8s.io/release-1.2/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - Hard ResourceList `json:"hard,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` // A collection of filters that must match each object tracked by a quota. // If not specified, the quota matches all objects. - Scopes []ResourceQuotaScope `json:"scopes,omitempty"` + Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"` } // ResourceQuotaStatus defines the enforced hard limits and observed use. type ResourceQuotaStatus struct { // Hard is the set of enforced hard limits for each named resource. - // More info: http://releases.k8s.io/release-1.2/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - Hard ResourceList `json:"hard,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` // Used is the current observed total usage of the resource in the namespace. - Used ResourceList `json:"used,omitempty"` + Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"` } // +genclient=true @@ -2700,28 +3027,28 @@ type ResourceQuotaStatus struct { type ResourceQuota struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired quota. - // http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec ResourceQuotaSpec `json:"spec,omitempty"` + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status defines the actual enforced quota and its current usage. - // http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status ResourceQuotaStatus `json:"status,omitempty"` + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // ResourceQuotaList is a list of ResourceQuota items. type ResourceQuotaList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of ResourceQuota objects. - // More info: http://releases.k8s.io/release-1.2/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - Items []ResourceQuota `json:"items"` + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient=true @@ -2731,18 +3058,18 @@ type ResourceQuotaList struct { type Secret struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN // or leading dot followed by valid DNS_SUBDOMAIN. // The serialized form of the secret data is a base64 encoded string, // representing the arbitrary (possibly non-string) data value here. // Described in https://tools.ietf.org/html/rfc4648#section-4 - Data map[string][]byte `json:"data,omitempty"` + Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` // Used to facilitate programmatic handling of secret data. - Type SecretType `json:"type,omitempty"` + Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"` } const MaxSecretSize = 1 * 1024 * 1024 @@ -2803,12 +3130,12 @@ const ( type SecretList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of secret objects. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/secrets.md - Items []Secret `json:"items"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md + Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient=true @@ -2817,23 +3144,23 @@ type SecretList struct { type ConfigMap struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Data contains the configuration data. // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. - Data map[string]string `json:"data,omitempty"` + Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` } // ConfigMapList is a resource containing a list of ConfigMap objects. type ConfigMapList struct { unversioned.TypeMeta `json:",inline"` - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of ConfigMaps. - Items []ConfigMap `json:"items,omitempty"` + Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"` } // Type and constants for component health validation. @@ -2848,16 +3175,16 @@ const ( type ComponentCondition struct { // Type of condition for a component. // Valid value: "Healthy" - Type ComponentConditionType `json:"type"` + Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"` // Status of the condition for a component. // Valid values for "Healthy": "True", "False", or "Unknown". - Status ConditionStatus `json:"status"` + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` // Message about the condition for a component. // For example, information about a health check. - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` // Condition error code for a component. // For example, a health check error code. - Error string `json:"error,omitempty"` + Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"` } // +genclient=true,nonNamespaced=true @@ -2866,37 +3193,40 @@ type ComponentCondition struct { type ComponentStatus struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of component conditions observed - Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } // Status of all the conditions for the component as a list of ComponentStatus objects. type ComponentStatusList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ComponentStatus objects. - Items []ComponentStatus `json:"items"` + Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"` } // DownwardAPIVolumeSource represents a volume containing downward API info. // Downward API volumes support ownership management and SELinux relabeling. type DownwardAPIVolumeSource struct { // Items is a list of downward API volume file - Items []DownwardAPIVolumeFile `json:"items,omitempty"` + Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` } // DownwardAPIVolumeFile represents information to create the file containing the pod field type DownwardAPIVolumeFile struct { // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - Path string `json:"path"` + Path string `json:"path" protobuf:"bytes,1,opt,name=path"` // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - FieldRef ObjectFieldSelector `json:"fieldRef"` + FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"` } // SecurityContext holds security configuration that will be applied to a container. @@ -2905,56 +3235,56 @@ type DownwardAPIVolumeFile struct { type SecurityContext struct { // The capabilities to add/drop when running containers. // Defaults to the default set of capabilities granted by the container runtime. - Capabilities *Capabilities `json:"capabilities,omitempty"` + Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"` // Run container in privileged mode. // Processes in privileged containers are essentially equivalent to root on the host. // Defaults to false. - Privileged *bool `json:"privileged,omitempty"` + Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"` // The SELinux context to be applied to the container. // If unspecified, the container runtime will allocate a random SELinux context for each // container. May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. - SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty"` + SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. - RunAsUser *int64 `json:"runAsUser,omitempty"` + RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"` // Indicates that the container must run as a non-root user. // If true, the Kubelet will validate the image at runtime to ensure that it // does not run as UID 0 (root) and fail to start the container if it does. // If unset or false, no such validation will be performed. // May also be set in PodSecurityContext. If set in both SecurityContext and // PodSecurityContext, the value specified in SecurityContext takes precedence. - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"` // Whether this container has a read-only root filesystem. // Default is false. - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` + ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"` } // SELinuxOptions are the labels to be applied to the container type SELinuxOptions struct { // User is a SELinux user label that applies to the container. - User string `json:"user,omitempty"` + User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"` // Role is a SELinux role label that applies to the container. - Role string `json:"role,omitempty"` + Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"` // Type is a SELinux type label that applies to the container. - Type string `json:"type,omitempty"` + Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"` // Level is SELinux level label that applies to the container. - Level string `json:"level,omitempty"` + Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"` } // RangeAllocation is not a public type. type RangeAllocation struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Range is string that identifies the range represented by 'data'. - Range string `json:"range"` + Range string `json:"range" protobuf:"bytes,2,opt,name=range"` // Data is a bit array containing all allocated addresses in the previous segment. - Data []byte `json:"data"` + Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"` } const ( diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go index f1878b5481a9..c18b5e7e4b26 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,10 +29,10 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_AWSElasticBlockStoreVolumeSource = map[string]string{ "": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", - "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#awselasticblockstore", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#awselasticblockstore", + "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore", "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", - "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#awselasticblockstore", + "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore", } func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string { @@ -40,8 +40,10 @@ func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string { } var map_Affinity = map[string]string{ - "": "Affinity is a group of affinity scheduling rules, currently only node affinity, but in the future also inter-pod affinity.", - "nodeAffinity": "Describes node affinity scheduling rules for the pod.", + "": "Affinity is a group of affinity scheduling rules.", + "nodeAffinity": "Describes node affinity scheduling rules for the pod.", + "podAffinity": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", + "podAntiAffinity": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", } func (Affinity) SwaggerDoc() map[string]string { @@ -61,7 +63,7 @@ func (AzureFileVolumeSource) SwaggerDoc() map[string]string { var map_Binding = map[string]string{ "": "Binding ties one object to another. For example, a pod is bound to a node by a scheduler.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "target": "The target object that you want to bind to the standard object.", } @@ -81,12 +83,12 @@ func (Capabilities) SwaggerDoc() map[string]string { var map_CephFSVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/release-1.2/examples/cephfs/README.md#how-to-use-it", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it", "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: http://releases.k8s.io/release-1.2/examples/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/release-1.2/examples/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/release-1.2/examples/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/release-1.2/examples/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it", } func (CephFSVolumeSource) SwaggerDoc() map[string]string { @@ -95,9 +97,9 @@ func (CephFSVolumeSource) SwaggerDoc() map[string]string { var map_CinderVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/release-1.2/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.2/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/release-1.2/examples/mysql-cinder-pd/README.md", + "volumeID": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", } func (CinderVolumeSource) SwaggerDoc() map[string]string { @@ -118,7 +120,7 @@ func (ComponentCondition) SwaggerDoc() map[string]string { var map_ComponentStatus = map[string]string{ "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "conditions": "List of component conditions observed", } @@ -128,7 +130,7 @@ func (ComponentStatus) SwaggerDoc() map[string]string { var map_ComponentStatusList = map[string]string{ "": "Status of all the conditions for the component as a list of ComponentStatus objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", "items": "List of ComponentStatus objects.", } @@ -138,7 +140,7 @@ func (ComponentStatusList) SwaggerDoc() map[string]string { var map_ConfigMap = map[string]string{ "": "ConfigMap holds configuration data for pods to consume.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "data": "Data contains the configuration data. Each key must be a valid DNS_SUBDOMAIN with an optional leading dot.", } @@ -157,7 +159,7 @@ func (ConfigMapKeySelector) SwaggerDoc() map[string]string { var map_ConfigMapList = map[string]string{ "": "ConfigMapList is a resource containing a list of ConfigMap objects.", - "metadata": "More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "items": "Items is the list of ConfigMaps.", } @@ -177,20 +179,20 @@ func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { var map_Container = map[string]string{ "": "A single application container that you want to run within a pod.", "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - "image": "Docker image name. More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md", - "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md#containers-and-commands", - "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md#containers-and-commands", + "image": "Docker image name. More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands", "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", - "resources": "Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#resources", - "volumeMounts": "Pod volumes to mount into the container's filesyste. Cannot be updated.", - "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes", - "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes", + "resources": "Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes", + "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Defaults to /dev/termination-log. Cannot be updated.", - "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md#updating-images", - "securityContext": "Security options the pod should run with. More info: http://releases.k8s.io/release-1.2/docs/design/security_context.md", + "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#updating-images", + "securityContext": "Security options the pod should run with. More info: http://releases.k8s.io/HEAD/docs/design/security_context.md", "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", @@ -275,9 +277,9 @@ var map_ContainerStatus = map[string]string{ "lastState": "Details about the container's last termination condition.", "ready": "Specifies whether the container has passed its readiness probe.", "restartCount": "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.", - "image": "The image the container is running. More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md", + "image": "The image the container is running. More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md", "imageID": "ImageID of the container's image.", - "containerID": "Container's ID in the format 'docker://'. More info: http://releases.k8s.io/release-1.2/docs/user-guide/container-environment.md#container-information", + "containerID": "Container's ID in the format 'docker://'. More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#container-information", } func (ContainerStatus) SwaggerDoc() map[string]string { @@ -296,6 +298,8 @@ func (DaemonEndpoint) SwaggerDoc() map[string]string { var map_DeleteOptions = map[string]string{ "": "DeleteOptions may be provided when deleting an API object", "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", + "orphanDependents": "Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list.", } func (DeleteOptions) SwaggerDoc() map[string]string { @@ -303,9 +307,10 @@ func (DeleteOptions) SwaggerDoc() map[string]string { } var map_DownwardAPIVolumeFile = map[string]string{ - "": "DownwardAPIVolumeFile represents information to create the file containing the pod field", - "path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", - "fieldRef": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + "": "DownwardAPIVolumeFile represents information to create the file containing the pod field", + "path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", + "fieldRef": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", } func (DownwardAPIVolumeFile) SwaggerDoc() map[string]string { @@ -323,7 +328,7 @@ func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { var map_EmptyDirVolumeSource = map[string]string{ "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", - "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#emptydir", + "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir", } func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { @@ -332,7 +337,8 @@ func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { var map_EndpointAddress = map[string]string{ "": "EndpointAddress is a tuple that describes single IP address.", - "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24).", + "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", + "hostname": "The Hostname of this endpoint", "targetRef": "Reference to object providing the endpoint.", } @@ -364,7 +370,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string { var map_Endpoints = map[string]string{ "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", } @@ -374,7 +380,7 @@ func (Endpoints) SwaggerDoc() map[string]string { var map_EndpointsList = map[string]string{ "": "EndpointsList is a list of endpoints.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", "items": "List of endpoints.", } @@ -394,10 +400,11 @@ func (EnvVar) SwaggerDoc() map[string]string { } var map_EnvVarSource = map[string]string{ - "": "EnvVarSource represents a source for the value of an EnvVar.", - "fieldRef": "Selects a field of the pod; only name and namespace are supported.", - "configMapKeyRef": "Selects a key of a ConfigMap.", - "secretKeyRef": "Selects a key of a secret in the pod's namespace", + "": "EnvVarSource represents a source for the value of an EnvVar.", + "fieldRef": "Selects a field of the pod; only name and namespace are supported.", + "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + "configMapKeyRef": "Selects a key of a ConfigMap.", + "secretKeyRef": "Selects a key of a secret in the pod's namespace", } func (EnvVarSource) SwaggerDoc() map[string]string { @@ -406,7 +413,7 @@ func (EnvVarSource) SwaggerDoc() map[string]string { var map_Event = map[string]string{ "": "Event is a report of an event somewhere in the cluster.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "involvedObject": "The object that this event is about.", "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", "message": "A human-readable description of the status of this operation.", @@ -423,7 +430,7 @@ func (Event) SwaggerDoc() map[string]string { var map_EventList = map[string]string{ "": "EventList is a list of events.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", "items": "List of events", } @@ -476,7 +483,7 @@ var map_FlexVolumeSource = map[string]string{ "": "FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.", "driver": "Driver is the name of the driver to use for this volume.", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty.", + "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", "options": "Optional: Extra command options if any.", } @@ -496,10 +503,10 @@ func (FlockerVolumeSource) SwaggerDoc() map[string]string { var map_GCEPersistentDiskVolumeSource = map[string]string{ "": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", - "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk", - "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk", + "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk", + "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk", } func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { @@ -519,9 +526,9 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string { var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/release-1.2/examples/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: http://releases.k8s.io/release-1.2/examples/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/release-1.2/examples/glusterfs/README.md#create-a-pod", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md#create-a-pod", } func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { @@ -564,7 +571,7 @@ func (Handler) SwaggerDoc() map[string]string { var map_HostPathVolumeSource = map[string]string{ "": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", - "path": "Path of the directory on the host. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#hostpath", + "path": "Path of the directory on the host. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath", } func (HostPathVolumeSource) SwaggerDoc() map[string]string { @@ -577,7 +584,7 @@ var map_ISCSIVolumeSource = map[string]string{ "iqn": "Target iSCSI Qualified Name.", "lun": "iSCSI target lun number.", "iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#iscsi", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#iscsi", "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", } @@ -597,8 +604,8 @@ func (KeyToPath) SwaggerDoc() map[string]string { var map_Lifecycle = map[string]string{ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", - "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: http://releases.k8s.io/release-1.2/docs/user-guide/container-environment.md#hook-details", - "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: http://releases.k8s.io/release-1.2/docs/user-guide/container-environment.md#hook-details", + "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details", + "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details", } func (Lifecycle) SwaggerDoc() map[string]string { @@ -607,8 +614,8 @@ func (Lifecycle) SwaggerDoc() map[string]string { var map_LimitRange = map[string]string{ "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the limits enforced. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the limits enforced. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (LimitRange) SwaggerDoc() map[string]string { @@ -631,8 +638,8 @@ func (LimitRangeItem) SwaggerDoc() map[string]string { var map_LimitRangeList = map[string]string{ "": "LimitRangeList is a list of LimitRange items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", - "items": "Items is a list of LimitRange objects. More info: http://releases.k8s.io/release-1.2/docs/design/admission_control_limit_range.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "Items is a list of LimitRange objects. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md", } func (LimitRangeList) SwaggerDoc() map[string]string { @@ -650,7 +657,7 @@ func (LimitRangeSpec) SwaggerDoc() map[string]string { var map_List = map[string]string{ "": "List holds a list of objects, which may not be known by the server.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", "items": "List of objects", } @@ -692,7 +699,7 @@ func (LoadBalancerStatus) SwaggerDoc() map[string]string { var map_LocalObjectReference = map[string]string{ "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "name": "Name of the referent. More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names", + "name": "Name of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", } func (LocalObjectReference) SwaggerDoc() map[string]string { @@ -701,9 +708,9 @@ func (LocalObjectReference) SwaggerDoc() map[string]string { var map_NFSVolumeSource = map[string]string{ "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", - "server": "Server is the hostname or IP address of the NFS server. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#nfs", - "path": "Path that is exported by the NFS server. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#nfs", - "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#nfs", + "server": "Server is the hostname or IP address of the NFS server. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs", + "path": "Path that is exported by the NFS server. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs", + "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs", } func (NFSVolumeSource) SwaggerDoc() map[string]string { @@ -712,9 +719,9 @@ func (NFSVolumeSource) SwaggerDoc() map[string]string { var map_Namespace = map[string]string{ "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Namespace. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", - "status": "Status describes the current status of a Namespace. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of the Namespace. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status describes the current status of a Namespace. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (Namespace) SwaggerDoc() map[string]string { @@ -723,8 +730,8 @@ func (Namespace) SwaggerDoc() map[string]string { var map_NamespaceList = map[string]string{ "": "NamespaceList is a list of Namespaces.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", - "items": "Items is the list of Namespace objects in the list. More info: http://releases.k8s.io/release-1.2/docs/user-guide/namespaces.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "Items is the list of Namespace objects in the list. More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md", } func (NamespaceList) SwaggerDoc() map[string]string { @@ -733,7 +740,7 @@ func (NamespaceList) SwaggerDoc() map[string]string { var map_NamespaceSpec = map[string]string{ "": "NamespaceSpec describes the attributes on a Namespace.", - "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/release-1.2/docs/design/namespaces.md#finalizers", + "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers", } func (NamespaceSpec) SwaggerDoc() map[string]string { @@ -742,7 +749,7 @@ func (NamespaceSpec) SwaggerDoc() map[string]string { var map_NamespaceStatus = map[string]string{ "": "NamespaceStatus is information about the current status of a Namespace.", - "phase": "Phase is the current lifecycle phase of the namespace. More info: http://releases.k8s.io/release-1.2/docs/design/namespaces.md#phases", + "phase": "Phase is the current lifecycle phase of the namespace. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases", } func (NamespaceStatus) SwaggerDoc() map[string]string { @@ -751,9 +758,9 @@ func (NamespaceStatus) SwaggerDoc() map[string]string { var map_Node = map[string]string{ "": "Node is a worker node in Kubernetes, formerly known as minion. Each node will have a unique identifier in the cache (i.e. in etcd).", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a node. http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a node. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (Node) SwaggerDoc() map[string]string { @@ -805,7 +812,7 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { var map_NodeList = map[string]string{ "": "NodeList is the whole list of all Nodes which have been registered with master.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", "items": "List of nodes", } @@ -856,7 +863,7 @@ var map_NodeSpec = map[string]string{ "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", "externalID": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated.", "providerID": "ID of the node assigned by the cloud provider in the format: ://", - "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/release-1.2/docs/admin/node.md#manual-node-administration\"`", + "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration\"`", } func (NodeSpec) SwaggerDoc() map[string]string { @@ -865,13 +872,13 @@ func (NodeSpec) SwaggerDoc() map[string]string { var map_NodeStatus = map[string]string{ "": "NodeStatus is information about the current status of a node.", - "capacity": "Capacity represents the total resources of a node. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#capacity for more details.", + "capacity": "Capacity represents the total resources of a node. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity for more details.", "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", - "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: http://releases.k8s.io/release-1.2/docs/admin/node.md#node-phase", - "conditions": "Conditions is an array of current observed node conditions. More info: http://releases.k8s.io/release-1.2/docs/admin/node.md#node-condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: http://releases.k8s.io/release-1.2/docs/admin/node.md#node-addresses", + "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase", + "conditions": "Conditions is an array of current observed node conditions. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses", "daemonEndpoints": "Endpoints of daemons running on the Node.", - "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: http://releases.k8s.io/release-1.2/docs/admin/node.md#node-info", + "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info", "images": "List of container images on this node", } @@ -889,6 +896,8 @@ var map_NodeSystemInfo = map[string]string{ "containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).", "kubeletVersion": "Kubelet Version reported by the node.", "kubeProxyVersion": "KubeProxy Version reported by the node.", + "operatingSystem": "The Operating System reported by the node", + "architecture": "The Architecture reported by the node", } func (NodeSystemInfo) SwaggerDoc() map[string]string { @@ -907,18 +916,20 @@ func (ObjectFieldSelector) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", - "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#idempotency", - "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/namespaces.md", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md", "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", - "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#uids", - "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource will be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet will send a hard termination signal to the container. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource will be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet will send a hard termination signal to the container. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", - "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md", - "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://releases.k8s.io/release-1.2/docs/user-guide/annotations.md", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://releases.k8s.io/HEAD/docs/user-guide/annotations.md", + "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected.", + "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", } func (ObjectMeta) SwaggerDoc() map[string]string { @@ -927,12 +938,12 @@ func (ObjectMeta) SwaggerDoc() map[string]string { var map_ObjectReference = map[string]string{ "": "ObjectReference contains enough information to let you inspect or modify the referred object.", - "kind": "Kind of the referent. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", - "namespace": "Namespace of the referent. More info: http://releases.k8s.io/release-1.2/docs/user-guide/namespaces.md", - "name": "Name of the referent. More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names", - "uid": "UID of the referent. More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#uids", + "kind": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "namespace": "Namespace of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md", + "name": "Name of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", + "uid": "UID of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids", "apiVersion": "API version of the referent.", - "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", } @@ -940,11 +951,23 @@ func (ObjectReference) SwaggerDoc() map[string]string { return map_ObjectReference } +var map_OwnerReference = map[string]string{ + "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", + "apiVersion": "API version of the referent.", + "kind": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "name": "Name of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", + "uid": "UID of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids", +} + +func (OwnerReference) SwaggerDoc() map[string]string { + return map_OwnerReference +} + var map_PersistentVolume = map[string]string{ - "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistent-volumes", - "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistent-volumes", + "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes", + "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes", } func (PersistentVolume) SwaggerDoc() map[string]string { @@ -953,9 +976,9 @@ func (PersistentVolume) SwaggerDoc() map[string]string { var map_PersistentVolumeClaim = map[string]string{ "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", - "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", + "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", } func (PersistentVolumeClaim) SwaggerDoc() map[string]string { @@ -964,8 +987,8 @@ func (PersistentVolumeClaim) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimList = map[string]string{ "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", - "items": "A list of persistent volume claims. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "A list of persistent volume claims. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", } func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { @@ -974,8 +997,8 @@ func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimSpec = map[string]string{ "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", - "accessModes": "AccessModes contains the desired access modes the volume should have. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#access-modes-1", - "resources": "Resources represents the minimum resources the volume should have. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#resources", + "accessModes": "AccessModes contains the desired access modes the volume should have. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1", + "resources": "Resources represents the minimum resources the volume should have. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", } @@ -986,7 +1009,7 @@ func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimStatus = map[string]string{ "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", "phase": "Phase represents the current phase of PersistentVolumeClaim.", - "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#access-modes-1", + "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1", "capacity": "Represents the actual resources of the underlying volume.", } @@ -996,7 +1019,7 @@ func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimVolumeSource = map[string]string{ "": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", - "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", + "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", "readOnly": "Will force the ReadOnly setting in VolumeMounts. Default false.", } @@ -1006,8 +1029,8 @@ func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { var map_PersistentVolumeList = map[string]string{ "": "PersistentVolumeList is a list of PersistentVolume items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", - "items": "List of persistent volumes. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of persistent volumes. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md", } func (PersistentVolumeList) SwaggerDoc() map[string]string { @@ -1016,19 +1039,20 @@ func (PersistentVolumeList) SwaggerDoc() map[string]string { var map_PersistentVolumeSource = map[string]string{ "": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", - "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk", - "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#awselasticblockstore", - "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#hostpath", - "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/release-1.2/examples/glusterfs/README.md", - "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#nfs", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore", + "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath", + "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md", + "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md", "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/release-1.2/examples/mysql-cinder-pd/README.md", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.", "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1037,10 +1061,10 @@ func (PersistentVolumeSource) SwaggerDoc() map[string]string { var map_PersistentVolumeSpec = map[string]string{ "": "PersistentVolumeSpec is the specification of a persistent volume.", - "capacity": "A description of the persistent volume's resources and capacity. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#capacity", - "accessModes": "AccessModes contains all ways the volume can be mounted. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#access-modes", - "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#binding", - "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recyling must be supported by the volume plugin underlying this persistent volume. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#recycling-policy", + "capacity": "A description of the persistent volume's resources and capacity. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity", + "accessModes": "AccessModes contains all ways the volume can be mounted. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes", + "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#binding", + "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recyling must be supported by the volume plugin underlying this persistent volume. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#recycling-policy", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1049,7 +1073,7 @@ func (PersistentVolumeSpec) SwaggerDoc() map[string]string { var map_PersistentVolumeStatus = map[string]string{ "": "PersistentVolumeStatus is the current status of a persistent volume.", - "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#phase", + "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#phase", "message": "A human-readable message indicating details about why the volume is in this state.", "reason": "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", } @@ -1060,15 +1084,46 @@ func (PersistentVolumeStatus) SwaggerDoc() map[string]string { var map_Pod = map[string]string{ "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (Pod) SwaggerDoc() map[string]string { return map_Pod } +var map_PodAffinity = map[string]string{ + "": "Pod affinity is a group of inter pod affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", +} + +func (PodAffinity) SwaggerDoc() map[string]string { + return map_PodAffinity +} + +var map_PodAffinityTerm = map[string]string{ + "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key tches that of any node on which a pod of the set of pods is running", + "labelSelector": "A label query over a set of resources, in this case pods.", + "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); nil list means \"this pod's namespace,\" empty list means \"all namespaces\" The json tag here is not \"omitempty\" since we need to distinguish nil and empty. See https://golang.org/pkg/encoding/json/#Marshal for more details.", + "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.", +} + +func (PodAffinityTerm) SwaggerDoc() map[string]string { + return map_PodAffinityTerm +} + +var map_PodAntiAffinity = map[string]string{ + "": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", +} + +func (PodAntiAffinity) SwaggerDoc() map[string]string { + return map_PodAntiAffinity +} + var map_PodAttachOptions = map[string]string{ "": "PodAttachOptions is the query options to a Pod's remote attach call.", "stdin": "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.", @@ -1084,8 +1139,8 @@ func (PodAttachOptions) SwaggerDoc() map[string]string { var map_PodCondition = map[string]string{ "": "PodCondition contains details for the current condition of this pod.", - "type": "Type is the type of the condition. Currently only Ready. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#pod-conditions", - "status": "Status is the status of the condition. Can be True, False, Unknown. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#pod-conditions", + "type": "Type is the type of the condition. Currently only Ready. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions", + "status": "Status is the status of the condition. Can be True, False, Unknown. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions", "lastProbeTime": "Last time we probed the condition.", "lastTransitionTime": "Last time the condition transitioned from one status to another.", "reason": "Unique, one-word, CamelCase reason for the condition's last transition.", @@ -1112,8 +1167,8 @@ func (PodExecOptions) SwaggerDoc() map[string]string { var map_PodList = map[string]string{ "": "PodList is a list of Pods.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", - "items": "List of pods. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pods.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of pods. More info: http://releases.k8s.io/HEAD/docs/user-guide/pods.md", } func (PodList) SwaggerDoc() map[string]string { @@ -1160,21 +1215,23 @@ func (PodSecurityContext) SwaggerDoc() map[string]string { var map_PodSpec = map[string]string{ "": "PodSpec is a description of a pod.", - "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md", - "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md", - "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#restartpolicy", + "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md", + "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md", + "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#restartpolicy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", "dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\".", - "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: http://releases.k8s.io/release-1.2/docs/user-guide/node-selection/README.md", - "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/release-1.2/docs/design/service_accounts.md", + "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: http://releases.k8s.io/HEAD/docs/user-guide/node-selection/README.md", + "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md", "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", "hostPID": "Use the host's pid namespace. Optional: Default to false.", "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", - "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod", + "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod", + "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", + "subdomain": "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1183,14 +1240,14 @@ func (PodSpec) SwaggerDoc() map[string]string { var map_PodStatus = map[string]string{ "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.", - "phase": "Current condition of the pod. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#pod-phase", - "conditions": "Current service state of pod. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#pod-conditions", + "phase": "Current condition of the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-phase", + "conditions": "Current service state of pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions", "message": "A human readable message indicating details about why the pod is in this condition.", "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'", "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-statuses", + "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1199,8 +1256,8 @@ func (PodStatus) SwaggerDoc() map[string]string { var map_PodStatusResult = map[string]string{ "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (PodStatusResult) SwaggerDoc() map[string]string { @@ -1209,8 +1266,8 @@ func (PodStatusResult) SwaggerDoc() map[string]string { var map_PodTemplate = map[string]string{ "": "PodTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "template": "Template defines the pods that will be created from this pod template. http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "template": "Template defines the pods that will be created from this pod template. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (PodTemplate) SwaggerDoc() map[string]string { @@ -1219,7 +1276,7 @@ func (PodTemplate) SwaggerDoc() map[string]string { var map_PodTemplateList = map[string]string{ "": "PodTemplateList is a list of PodTemplates.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", "items": "List of pod templates", } @@ -1229,14 +1286,23 @@ func (PodTemplateList) SwaggerDoc() map[string]string { var map_PodTemplateSpec = map[string]string{ "": "PodTemplateSpec describes the data a pod should have when created from a template", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (PodTemplateSpec) SwaggerDoc() map[string]string { return map_PodTemplateSpec } +var map_Preconditions = map[string]string{ + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", +} + +func (Preconditions) SwaggerDoc() map[string]string { + return map_Preconditions +} + var map_PreferredSchedulingTerm = map[string]string{ "": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", "weight": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", @@ -1249,8 +1315,8 @@ func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { var map_Probe = map[string]string{ "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", - "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes", - "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes", + "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes", + "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes", "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", @@ -1262,14 +1328,14 @@ func (Probe) SwaggerDoc() map[string]string { var map_RBDVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#rbd", - "pool": "The rados pool name. Default is rbd. More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it.", - "user": "The rados user name. Default is admin. More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is empty. More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md#how-to-use-it", + "monitors": "A collection of Ceph monitors. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#rbd", + "pool": "The rados pool name. Default is rbd. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it.", + "user": "The rados user name. Default is admin. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is empty. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md#how-to-use-it", } func (RBDVolumeSource) SwaggerDoc() map[string]string { @@ -1278,7 +1344,7 @@ func (RBDVolumeSource) SwaggerDoc() map[string]string { var map_RangeAllocation = map[string]string{ "": "RangeAllocation is not a public type.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "range": "Range is string that identifies the range represented by 'data'.", "data": "Data is a bit array containing all allocated addresses in the previous segment.", } @@ -1289,9 +1355,9 @@ func (RangeAllocation) SwaggerDoc() map[string]string { var map_ReplicationController = map[string]string{ "": "ReplicationController represents the configuration of a replication controller.", - "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (ReplicationController) SwaggerDoc() map[string]string { @@ -1300,8 +1366,8 @@ func (ReplicationController) SwaggerDoc() map[string]string { var map_ReplicationControllerList = map[string]string{ "": "ReplicationControllerList is a collection of replication controllers.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", - "items": "List of replication controllers. More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of replication controllers. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md", } func (ReplicationControllerList) SwaggerDoc() map[string]string { @@ -1310,9 +1376,9 @@ func (ReplicationControllerList) SwaggerDoc() map[string]string { var map_ReplicationControllerSpec = map[string]string{ "": "ReplicationControllerSpec is the specification of a replication controller.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#what-is-a-replication-controller", - "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#pod-template", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller", + "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template", } func (ReplicationControllerSpec) SwaggerDoc() map[string]string { @@ -1321,7 +1387,7 @@ func (ReplicationControllerSpec) SwaggerDoc() map[string]string { var map_ReplicationControllerStatus = map[string]string{ "": "ReplicationControllerStatus represents the current status of a replication controller.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#what-is-a-replication-controller", + "replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.", "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed replication controller.", } @@ -1330,11 +1396,22 @@ func (ReplicationControllerStatus) SwaggerDoc() map[string]string { return map_ReplicationControllerStatus } +var map_ResourceFieldSelector = map[string]string{ + "": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", + "containerName": "Container name: required for volumes, optional for env vars", + "resource": "Required: resource to select", + "divisor": "Specifies the output format of the exposed resources, defaults to \"1\"", +} + +func (ResourceFieldSelector) SwaggerDoc() map[string]string { + return map_ResourceFieldSelector +} + var map_ResourceQuota = map[string]string{ "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired quota. http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", - "status": "Status defines the actual enforced quota and its current usage. http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired quota. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status defines the actual enforced quota and its current usage. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (ResourceQuota) SwaggerDoc() map[string]string { @@ -1343,8 +1420,8 @@ func (ResourceQuota) SwaggerDoc() map[string]string { var map_ResourceQuotaList = map[string]string{ "": "ResourceQuotaList is a list of ResourceQuota items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", - "items": "Items is a list of ResourceQuota objects. More info: http://releases.k8s.io/release-1.2/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "Items is a list of ResourceQuota objects. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", } func (ResourceQuotaList) SwaggerDoc() map[string]string { @@ -1353,7 +1430,7 @@ func (ResourceQuotaList) SwaggerDoc() map[string]string { var map_ResourceQuotaSpec = map[string]string{ "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", - "hard": "Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/release-1.2/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", + "hard": "Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", } @@ -1363,7 +1440,7 @@ func (ResourceQuotaSpec) SwaggerDoc() map[string]string { var map_ResourceQuotaStatus = map[string]string{ "": "ResourceQuotaStatus defines the enforced hard limits and observed use.", - "hard": "Hard is the set of enforced hard limits for each named resource. More info: http://releases.k8s.io/release-1.2/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", + "hard": "Hard is the set of enforced hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", "used": "Used is the current observed total usage of the resource in the namespace.", } @@ -1373,8 +1450,8 @@ func (ResourceQuotaStatus) SwaggerDoc() map[string]string { var map_ResourceRequirements = map[string]string{ "": "ResourceRequirements describes the compute resource requirements.", - "limits": "Limits describes the maximum amount of compute resources allowed. More info: http://releases.k8s.io/release-1.2/docs/design/resources.md#resource-specifications", - "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: http://releases.k8s.io/release-1.2/docs/design/resources.md#resource-specifications", + "limits": "Limits describes the maximum amount of compute resources allowed. More info: http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications", } func (ResourceRequirements) SwaggerDoc() map[string]string { @@ -1395,7 +1472,7 @@ func (SELinuxOptions) SwaggerDoc() map[string]string { var map_Secret = map[string]string{ "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "data": "Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN or leading dot followed by valid DNS_SUBDOMAIN. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", "type": "Used to facilitate programmatic handling of secret data.", } @@ -1415,8 +1492,8 @@ func (SecretKeySelector) SwaggerDoc() map[string]string { var map_SecretList = map[string]string{ "": "SecretList is a list of Secret.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", - "items": "Items is a list of secret objects. More info: http://releases.k8s.io/release-1.2/docs/user-guide/secrets.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "Items is a list of secret objects. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md", } func (SecretList) SwaggerDoc() map[string]string { @@ -1425,7 +1502,8 @@ func (SecretList) SwaggerDoc() map[string]string { var map_SecretVolumeSource = map[string]string{ "": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", - "secretName": "Name of the secret in the pod's namespace to use. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#secrets", + "secretName": "Name of the secret in the pod's namespace to use. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets", + "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error. Paths must be relative and may not contain the '..' path or start with '..'.", } func (SecretVolumeSource) SwaggerDoc() map[string]string { @@ -1457,9 +1535,9 @@ func (SerializedReference) SwaggerDoc() map[string]string { var map_Service = map[string]string{ "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a service. http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a service. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (Service) SwaggerDoc() map[string]string { @@ -1468,9 +1546,9 @@ func (Service) SwaggerDoc() map[string]string { var map_ServiceAccount = map[string]string{ "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://releases.k8s.io/release-1.2/docs/user-guide/secrets.md", - "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://releases.k8s.io/release-1.2/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md", + "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret", } func (ServiceAccount) SwaggerDoc() map[string]string { @@ -1479,8 +1557,8 @@ func (ServiceAccount) SwaggerDoc() map[string]string { var map_ServiceAccountList = map[string]string{ "": "ServiceAccountList is a list of ServiceAccount objects", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", - "items": "List of ServiceAccounts. More info: http://releases.k8s.io/release-1.2/docs/design/service_accounts.md#service-accounts", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of ServiceAccounts. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts", } func (ServiceAccountList) SwaggerDoc() map[string]string { @@ -1489,7 +1567,7 @@ func (ServiceAccountList) SwaggerDoc() map[string]string { var map_ServiceList = map[string]string{ "": "ServiceList holds a list of services.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", "items": "List of services", } @@ -1502,8 +1580,8 @@ var map_ServicePort = map[string]string{ "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\" and \"UDP\". Default is TCP.", "port": "The port that will be exposed by this service.", - "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#defining-a-service", - "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#type--nodeport", + "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#defining-a-service", + "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#type--nodeport", } func (ServicePort) SwaggerDoc() map[string]string { @@ -1520,15 +1598,16 @@ func (ServiceProxyOptions) SwaggerDoc() map[string]string { } var map_ServiceSpec = map[string]string{ - "": "ServiceSpec describes the attributes that a user creates on a service.", - "ports": "The list of ports that are exposed by this service. More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#virtual-ips-and-service-proxies", - "selector": "This service will route traffic to pods having labels matching this selector. Label keys and values that must match in order to receive traffic for this service. If empty, all pods are selected, if not specified, endpoints must be manually specified. More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#overview", - "clusterIP": "ClusterIP is usually assigned by the master and is the IP address of the service. If specified, it will be allocated to the service if it is unused or else creation of the service will fail. Valid values are None, empty string (\"\"), or a valid IP address. 'None' can be specified for a headless service when proxying is not required. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#virtual-ips-and-service-proxies", - "type": "Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer. Defaults to ClusterIP. More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#external-services", - "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system. A previous form of this functionality exists as the deprecatedPublicIPs field. When using this field, callers should also clear the deprecatedPublicIPs field.", - "deprecatedPublicIPs": "deprecatedPublicIPs is deprecated and replaced by the externalIPs field with almost the exact same semantics. This field is retained in the v1 API for compatibility until at least 8/20/2016. It will be removed from any new API revisions. If both deprecatedPublicIPs *and* externalIPs are set, deprecatedPublicIPs is used.", - "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/release-1.2/docs/user-guide/services.md#virtual-ips-and-service-proxies", - "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", + "": "ServiceSpec describes the attributes that a user creates on a service.", + "ports": "The list of ports that are exposed by this service. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies", + "selector": "This service will route traffic to pods having labels matching this selector. Label keys and values that must match in order to receive traffic for this service. If empty, all pods are selected, if not specified, endpoints must be manually specified. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#overview", + "clusterIP": "ClusterIP is usually assigned by the master and is the IP address of the service. If specified, it will be allocated to the service if it is unused or else creation of the service will fail. Valid values are None, empty string (\"\"), or a valid IP address. 'None' can be specified for a headless service when proxying is not required. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies", + "type": "Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer. Defaults to ClusterIP. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#external-services", + "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system. A previous form of this functionality exists as the deprecatedPublicIPs field. When using this field, callers should also clear the deprecatedPublicIPs field.", + "deprecatedPublicIPs": "deprecatedPublicIPs is deprecated and replaced by the externalIPs field with almost the exact same semantics. This field is retained in the v1 API for compatibility until at least 8/20/2016. It will be removed from any new API revisions. If both deprecatedPublicIPs *and* externalIPs are set, deprecatedPublicIPs is used.", + "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies", + "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", + "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -1553,9 +1632,32 @@ func (TCPSocketAction) SwaggerDoc() map[string]string { return map_TCPSocketAction } +var map_Taint = map[string]string{ + "": "The node this Taint is attached to has the effect \"effect\" on any pod that that does not tolerate the Taint.", + "key": "Required. The taint key to be applied to a node.", + "value": "Required. The taint value corresponding to the taint key.", + "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule and PreferNoSchedule.", +} + +func (Taint) SwaggerDoc() map[string]string { + return map_Taint +} + +var map_Toleration = map[string]string{ + "": "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .", + "key": "Required. Key is the taint key that the toleration applies to.", + "operator": "operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", + "value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", + "effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule and PreferNoSchedule.", +} + +func (Toleration) SwaggerDoc() map[string]string { + return map_Toleration +} + var map_Volume = map[string]string{ "": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", - "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names", + "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", } func (Volume) SwaggerDoc() map[string]string { @@ -1567,6 +1669,7 @@ var map_VolumeMount = map[string]string{ "name": "This must match the Name of a Volume.", "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", + "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", } func (VolumeMount) SwaggerDoc() map[string]string { @@ -1575,29 +1678,50 @@ func (VolumeMount) SwaggerDoc() map[string]string { var map_VolumeSource = map[string]string{ "": "Represents the source of a volume to mount. Only one of its members may be specified.", - "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#hostpath", - "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#emptydir", - "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#gcepersistentdisk", - "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#awselasticblockstore", + "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath", + "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore", "gitRepo": "GitRepo represents a git repository at a particular revision.", - "secret": "Secret represents a secret that should populate this volume. More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#secrets", - "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: http://releases.k8s.io/release-1.2/docs/user-guide/volumes.md#nfs", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.2/examples/iscsi/README.md", - "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.2/examples/glusterfs/README.md", - "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.2/examples/rbd/README.md", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/release-1.2/examples/mysql-cinder-pd/README.md", - "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", - "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", - "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "configMap": "ConfigMap represents a configMap that should populate this volume", + "secret": "Secret represents a secret that should populate this volume. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets", + "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/examples/iscsi/README.md", + "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md", + "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "configMap": "ConfigMap represents a configMap that should populate this volume", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", } func (VolumeSource) SwaggerDoc() map[string]string { return map_VolumeSource } +var map_VsphereVirtualDiskVolumeSource = map[string]string{ + "": "Represents a vSphere volume resource.", + "volumePath": "Path that identifies vSphere volume vmdk", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", +} + +func (VsphereVirtualDiskVolumeSource) SwaggerDoc() map[string]string { + return map_VsphereVirtualDiskVolumeSource +} + +var map_WeightedPodAffinityTerm = map[string]string{ + "": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + "weight": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + "podAffinityTerm": "Required. A pod affinity term, associated with the corresponding weight.", +} + +func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { + return map_WeightedPodAffinityTerm +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/events.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/events.go index d1a89c45c6f0..1182429582ef 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/events.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/events.go @@ -37,8 +37,8 @@ func ValidateEvent(event *api.Event) field.ErrorList { event.Namespace != event.InvolvedObject.Namespace { allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match involvedObject")) } - if !validation.IsDNS1123Subdomain(event.Namespace) { - allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, "")) + for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) { + allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg)) } return allErrs } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/events_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/events_test.go new file mode 100644 index 000000000000..a2910d064676 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/events_test.go @@ -0,0 +1,60 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +func TestValidateEvent(t *testing.T) { + table := []struct { + *api.Event + valid bool + }{ + { + &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "test1", + Namespace: "foo", + }, + InvolvedObject: api.ObjectReference{ + Namespace: "bar", + }, + }, + false, + }, { + &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "test1", + Namespace: "aoeu-_-aoeu", + }, + InvolvedObject: api.ObjectReference{ + Namespace: "aoeu-_-aoeu", + }, + }, + false, + }, + } + + for _, item := range table { + if e, a := item.valid, len(ValidateEvent(item.Event)) == 0; e != a { + t.Errorf("%v: expected %v, got %v", item.Event.Name, e, a) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/name.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/name.go index e36775b601d5..cf2eb8bb2990 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/name.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/name.go @@ -28,36 +28,36 @@ var NameMayNotBe = []string{".", ".."} var NameMayNotContain = []string{"/", "%"} // IsValidPathSegmentName validates the name can be safely encoded as a path segment -func IsValidPathSegmentName(name string) (bool, string) { +func IsValidPathSegmentName(name string) []string { for _, illegalName := range NameMayNotBe { if name == illegalName { - return false, fmt.Sprintf(`name may not be %q`, illegalName) + return []string{fmt.Sprintf(`may not be '%s'`, illegalName)} } } for _, illegalContent := range NameMayNotContain { if strings.Contains(name, illegalContent) { - return false, fmt.Sprintf(`name may not contain %q`, illegalContent) + return []string{fmt.Sprintf(`may not contain '%s'`, illegalContent)} } } - return true, "" + return nil } // IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment // It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid -func IsValidPathSegmentPrefix(name string) (bool, string) { +func IsValidPathSegmentPrefix(name string) []string { for _, illegalContent := range NameMayNotContain { if strings.Contains(name, illegalContent) { - return false, fmt.Sprintf(`name may not contain %q`, illegalContent) + return []string{fmt.Sprintf(`may not contain '%s'`, illegalContent)} } } - return true, "" + return nil } // ValidatePathSegmentName validates the name can be safely encoded as a path segment -func ValidatePathSegmentName(name string, prefix bool) (bool, string) { +func ValidatePathSegmentName(name string, prefix bool) []string { if prefix { return IsValidPathSegmentPrefix(name) } else { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/name_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/name_test.go new file mode 100644 index 000000000000..f952a960e771 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/name_test.go @@ -0,0 +1,132 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + "testing" +) + +func TestValidatePathSegmentName(t *testing.T) { + testcases := map[string]struct { + Name string + Prefix bool + ExpectedMsg string + }{ + "empty": { + Name: "", + Prefix: false, + ExpectedMsg: "", + }, + "empty,prefix": { + Name: "", + Prefix: true, + ExpectedMsg: "", + }, + + "valid": { + Name: "foo.bar.baz", + Prefix: false, + ExpectedMsg: "", + }, + "valid,prefix": { + Name: "foo.bar.baz", + Prefix: true, + ExpectedMsg: "", + }, + + // Make sure mixed case, non DNS subdomain characters are tolerated + "valid complex": { + Name: "sha256:ABCDEF012345@ABCDEF012345", + Prefix: false, + ExpectedMsg: "", + }, + // Make sure non-ascii characters are tolerated + "valid extended charset": { + Name: "Iñtërnâtiônàlizætiøn", + Prefix: false, + ExpectedMsg: "", + }, + + "dot": { + Name: ".", + Prefix: false, + ExpectedMsg: ".", + }, + "dot leading": { + Name: ".test", + Prefix: false, + ExpectedMsg: "", + }, + "dot,prefix": { + Name: ".", + Prefix: true, + ExpectedMsg: "", + }, + + "dot dot": { + Name: "..", + Prefix: false, + ExpectedMsg: "..", + }, + "dot dot leading": { + Name: "..test", + Prefix: false, + ExpectedMsg: "", + }, + "dot dot,prefix": { + Name: "..", + Prefix: true, + ExpectedMsg: "", + }, + + "slash": { + Name: "foo/bar", + Prefix: false, + ExpectedMsg: "/", + }, + "slash,prefix": { + Name: "foo/bar", + Prefix: true, + ExpectedMsg: "/", + }, + + "percent": { + Name: "foo%bar", + Prefix: false, + ExpectedMsg: "%", + }, + "percent,prefix": { + Name: "foo%bar", + Prefix: true, + ExpectedMsg: "%", + }, + } + + for k, tc := range testcases { + msgs := ValidatePathSegmentName(tc.Name, tc.Prefix) + if len(tc.ExpectedMsg) == 0 && len(msgs) > 0 { + t.Errorf("%s: expected no message, got %v", k, msgs) + } + if len(tc.ExpectedMsg) > 0 && len(msgs) == 0 { + t.Errorf("%s: expected error message, got none", k) + } + if len(tc.ExpectedMsg) > 0 && !strings.Contains(msgs[0], tc.ExpectedMsg) { + t.Errorf("%s: expected message containing %q, got %v", k, tc.ExpectedMsg, msgs[0]) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/schema.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/schema.go index 6c55bc47faa5..c52a0b6d7154 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/schema.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/schema.go @@ -26,6 +26,7 @@ import ( "github.com/emicklei/go-restful/swagger" "github.com/golang/glog" apiutil "k8s.io/kubernetes/pkg/api/util" + "k8s.io/kubernetes/pkg/runtime" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/yaml" ) @@ -62,15 +63,17 @@ type NullSchema struct{} func (NullSchema) ValidateBytes(data []byte) error { return nil } type SwaggerSchema struct { - api swagger.ApiDeclaration + api swagger.ApiDeclaration + delegate Schema // For delegating to other api groups } -func NewSwaggerSchemaFromBytes(data []byte) (Schema, error) { +func NewSwaggerSchemaFromBytes(data []byte, factory Schema) (Schema, error) { schema := &SwaggerSchema{} err := json.Unmarshal(data, &schema.api) if err != nil { return nil, err } + schema.delegate = factory return schema, nil } @@ -78,11 +81,15 @@ func NewSwaggerSchemaFromBytes(data []byte) (Schema, error) { // It return nil if every item is ok. // Otherwise it return an error list contain errors of every item. func (s *SwaggerSchema) validateList(obj map[string]interface{}) []error { - allErrs := []error{} items, exists := obj["items"] if !exists { - return append(allErrs, fmt.Errorf("no items field in %#v", obj)) + return []error{fmt.Errorf("no items field in %#v", obj)} } + return s.validateItems(items) +} + +func (s *SwaggerSchema) validateItems(items interface{}) []error { + allErrs := []error{} itemList, ok := items.([]interface{}) if !ok { return append(allErrs, fmt.Errorf("items isn't a slice")) @@ -125,6 +132,7 @@ func (s *SwaggerSchema) validateList(obj map[string]interface{}) []error { allErrs = append(allErrs, errs...) } } + return allErrs } @@ -171,6 +179,25 @@ func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName stri allErrs := []error{} models := s.api.Models model, ok := models.At(typeName) + + // Verify the api version matches. This is required for nested types with differing api versions because + // s.api only has schema for 1 api version (the parent object type's version). + // e.g. an extensions/v1beta1 Template embedding a /v1 Service requires the schema for the extensions/v1beta1 + // api to delegate to the schema for the /v1 api. + // Only do this for !ok objects so that cross ApiVersion vendored types take precedence. + if !ok && s.delegate != nil { + fields, mapOk := obj.(map[string]interface{}) + if !mapOk { + return append(allErrs, fmt.Errorf("field %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, obj)) + } + if delegated, err := s.delegateIfDifferentApiVersion(runtime.Unstructured{Object: fields}); delegated { + if err != nil { + allErrs = append(allErrs, err) + } + return allErrs + } + } + if !ok { return append(allErrs, TypeNotFoundError(typeName)) } @@ -194,6 +221,17 @@ func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName stri } for key, value := range fields { details, ok := properties.At(key) + + // Special case for runtime.RawExtension and runtime.Objects because they always fail to validate + // This is because the actual values will be of some sub-type (e.g. Deployment) not the expected + // super-type (RawExtention) + if s.isGenericArray(details) { + errs := s.validateItems(value) + if len(errs) > 0 { + allErrs = append(allErrs, errs...) + } + continue + } if !ok { allErrs = append(allErrs, fmt.Errorf("found invalid field %s for %s", key, typeName)) continue @@ -219,6 +257,42 @@ func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName stri return allErrs } +// delegateIfDifferentApiVersion delegates the validation of an object if its ApiGroup does not match the +// current SwaggerSchema. +// First return value is true if the validation was delegated (by a different ApiGroup SwaggerSchema) +// Second return value is the result of the delegated validation if performed. +func (s *SwaggerSchema) delegateIfDifferentApiVersion(obj runtime.Unstructured) (bool, error) { + // Never delegate objects in the same ApiVersion or we will get infinite recursion + if !s.isDifferentApiVersion(obj) { + return false, nil + } + + // Convert the object back into bytes so that we can pass it to the ValidateBytes function + m, err := json.Marshal(obj.Object) + if err != nil { + return true, err + } + + // Delegate validation of this object to the correct SwaggerSchema for its ApiGroup + return true, s.delegate.ValidateBytes(m) +} + +// isDifferentApiVersion Returns true if obj lives in a different ApiVersion than the SwaggerSchema does. +// The SwaggerSchema will not be able to process objects in different ApiVersions unless they are vendored. +func (s *SwaggerSchema) isDifferentApiVersion(obj runtime.Unstructured) bool { + groupVersion := obj.GetAPIVersion() + return len(groupVersion) > 0 && s.api.ApiVersion != groupVersion +} + +// isGenericArray Returns true if p is an array of generic Objects - either RawExtension or Object. +func (s *SwaggerSchema) isGenericArray(p swagger.ModelProperty) bool { + return p.DataTypeFields.Type != nil && + *p.DataTypeFields.Type == "array" && + p.Items != nil && + p.Items.Ref != nil && + (*p.Items.Ref == "runtime.RawExtension" || *p.Items.Ref == "runtime.Object") +} + // This matches type name in the swagger spec, such as "v1.Binding". var versionRegexp = regexp.MustCompile(`^v.+\..*`) @@ -282,6 +356,9 @@ func (s *SwaggerSchema) validateField(value interface{}, fieldName, fieldType st if _, ok := value.(bool); !ok { return append(allErrs, NewInvalidTypeError(reflect.Bool, reflect.TypeOf(value).Kind(), fieldName)) } + // API servers before release 1.3 produce swagger spec with `type: "any"` as the fallback type, while newer servers produce spec with `type: "object"`. + // We have both here so that kubectl can work with both old and new api servers. + case "object": case "any": default: return append(allErrs, fmt.Errorf("unexpected type: %v", fieldType)) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/schema_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/schema_test.go new file mode 100644 index 000000000000..3499acab2598 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/schema_test.go @@ -0,0 +1,309 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "math/rand" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/runtime" + k8syaml "k8s.io/kubernetes/pkg/util/yaml" + + "github.com/ghodss/yaml" +) + +func readPod(filename string) ([]byte, error) { + data, err := ioutil.ReadFile("testdata/" + testapi.Default.GroupVersion().Version + "/" + filename) + if err != nil { + return nil, err + } + return data, nil +} + +func readSwaggerFile() ([]byte, error) { + return readSwaggerApiFile(testapi.Default) +} + +func readSwaggerApiFile(group testapi.TestGroup) ([]byte, error) { + // TODO: Figure out a better way of finding these files + var pathToSwaggerSpec string + if group.GroupVersion().Group == "" { + pathToSwaggerSpec = "../../../api/swagger-spec/" + group.GroupVersion().Version + ".json" + } else { + pathToSwaggerSpec = "../../../api/swagger-spec/" + group.GroupVersion().Group + "_" + group.GroupVersion().Version + ".json" + } + + return ioutil.ReadFile(pathToSwaggerSpec) +} + +// Mock delegating Schema. Not a full fake impl. +type Factory struct { + defaultSchema Schema + extensionsSchema Schema +} + +var _ Schema = &Factory{} + +// TODO: Consider using a mocking library instead or fully fleshing this out into a fake impl and putting it in some +// generally available location +func (f *Factory) ValidateBytes(data []byte) error { + var obj interface{} + out, err := k8syaml.ToJSON(data) + if err != nil { + return err + } + data = out + if err := json.Unmarshal(data, &obj); err != nil { + return err + } + fields, ok := obj.(map[string]interface{}) + if !ok { + return fmt.Errorf("error in unmarshaling data %s", string(data)) + } + // Note: This only supports the 2 api versions we expect from the test it is currently supporting. + groupVersion := fields["apiVersion"] + switch groupVersion { + case "v1": + return f.defaultSchema.ValidateBytes(data) + case "extensions/v1beta1": + return f.extensionsSchema.ValidateBytes(data) + default: + return fmt.Errorf("Unsupported API version %s", groupVersion) + } +} + +func loadSchemaForTest() (Schema, error) { + data, err := readSwaggerFile() + if err != nil { + return nil, err + } + return NewSwaggerSchemaFromBytes(data, nil) +} + +func loadSchemaForTestWithFactory(group testapi.TestGroup, factory Schema) (Schema, error) { + data, err := readSwaggerApiFile(group) + if err != nil { + return nil, err + } + return NewSwaggerSchemaFromBytes(data, factory) +} + +func NewFactory() (*Factory, error) { + f := &Factory{} + defaultSchema, err := loadSchemaForTestWithFactory(testapi.Default, f) + if err != nil { + return nil, err + } + f.defaultSchema = defaultSchema + extensionSchema, err := loadSchemaForTestWithFactory(testapi.Extensions, f) + if err != nil { + return nil, err + } + f.extensionsSchema = extensionSchema + return f, nil +} + +func TestLoad(t *testing.T) { + _, err := loadSchemaForTest() + if err != nil { + t.Errorf("Failed to load: %v", err) + } +} + +func TestValidateOk(t *testing.T) { + schema, err := loadSchemaForTest() + if err != nil { + t.Errorf("Failed to load: %v", err) + } + tests := []struct { + obj runtime.Object + typeName string + }{ + {obj: &api.Pod{}}, + {obj: &api.Service{}}, + {obj: &api.ReplicationController{}}, + } + + seed := rand.Int63() + apiObjectFuzzer := apitesting.FuzzerFor(nil, testapi.Default.InternalGroupVersion(), rand.NewSource(seed)) + for i := 0; i < 5; i++ { + for _, test := range tests { + testObj := test.obj + apiObjectFuzzer.Fuzz(testObj) + data, err := runtime.Encode(testapi.Default.Codec(), testObj) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + err = schema.ValidateBytes(data) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + } + } +} + +func TestValidateDifferentApiVersions(t *testing.T) { + schema, err := loadSchemaForTest() + if err != nil { + t.Errorf("Failed to load: %v", err) + } + + pod := &api.Pod{} + pod.APIVersion = "v1" + pod.Kind = "Pod" + + deployment := &extensions.Deployment{} + deployment.APIVersion = "extensions/v1beta1" + deployment.Kind = "Deployment" + + list := &api.List{} + list.APIVersion = "v1" + list.Kind = "List" + list.Items = []runtime.Object{pod, deployment} + bytes, err := json.Marshal(list) + if err != nil { + t.Error(err) + } + err = schema.ValidateBytes(bytes) + if err == nil { + t.Error(fmt.Errorf("Expected error when validating different api version and no delegate exists.")) + } + f, err := NewFactory() + if err != nil { + t.Error(fmt.Errorf("Failed to create Schema factory %v.", err)) + } + err = f.ValidateBytes(bytes) + if err != nil { + t.Error(fmt.Errorf("Failed to validate object with multiple ApiGroups: %v.", err)) + } +} + +func TestInvalid(t *testing.T) { + schema, err := loadSchemaForTest() + if err != nil { + t.Errorf("Failed to load: %v", err) + } + tests := []string{ + "invalidPod1.json", // command is a string, instead of []string. + "invalidPod2.json", // hostPort if of type string, instead of int. + "invalidPod3.json", // volumes is not an array of objects. + "invalidPod.yaml", // command is a string, instead of []string. + } + for _, test := range tests { + pod, err := readPod(test) + if err != nil { + t.Errorf("could not read file: %s, err: %v", test, err) + } + err = schema.ValidateBytes(pod) + if err == nil { + t.Errorf("unexpected non-error, err: %s for pod: %s", err, pod) + } + } +} + +func TestValid(t *testing.T) { + schema, err := loadSchemaForTest() + if err != nil { + t.Errorf("Failed to load: %v", err) + } + tests := []string{ + "validPod.yaml", + } + for _, test := range tests { + pod, err := readPod(test) + if err != nil { + t.Errorf("could not read file: %s, err: %v", test, err) + } + err = schema.ValidateBytes(pod) + if err != nil { + t.Errorf("unexpected error %s, for pod %s", err, pod) + } + } +} + +func TestVersionRegex(t *testing.T) { + testCases := []struct { + typeName string + match bool + }{ + { + typeName: "v1.Binding", + match: true, + }, + { + typeName: "v1beta1.Binding", + match: true, + }, + { + typeName: "Binding", + match: false, + }, + } + for _, test := range testCases { + if versionRegexp.MatchString(test.typeName) && !test.match { + t.Errorf("unexpected error: expect %s not to match the regular expression", test.typeName) + } + if !versionRegexp.MatchString(test.typeName) && test.match { + t.Errorf("unexpected error: expect %s to match the regular expression", test.typeName) + } + } +} + +// Tests that validation works fine when spec contains "type": "any" instead of "type": "object" +// Ref: https://github.com/kubernetes/kubernetes/issues/24309 +func TestTypeOAny(t *testing.T) { + data, err := readSwaggerFile() + if err != nil { + t.Errorf("failed to read swagger file: %v", err) + } + // Replace type: "any" in the spec by type: "object" and verify that the validation still passes. + newData := strings.Replace(string(data), `"type": "object"`, `"type": "any"`, -1) + schema, err := NewSwaggerSchemaFromBytes([]byte(newData), nil) + if err != nil { + t.Errorf("Failed to load: %v", err) + } + tests := []string{ + "validPod.yaml", + } + for _, test := range tests { + podBytes, err := readPod(test) + if err != nil { + t.Errorf("could not read file: %s, err: %v", test, err) + } + // Verify that pod has at least one label (labels are type "any") + var pod api.Pod + err = yaml.Unmarshal(podBytes, &pod) + if err != nil { + t.Errorf("error in unmarshalling pod: %v", err) + } + if len(pod.Labels) == 0 { + t.Errorf("invalid test input: the pod should have at least one label") + } + err = schema.ValidateBytes(podBytes) + if err != nil { + t.Errorf("unexpected error %s, for pod %s", err, string(podBytes)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/invalidPod.yaml b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/invalidPod.yaml new file mode 100644 index 000000000000..9557c55ff597 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/invalidPod.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + name: redis-master + name: name +spec: + containers: + - args: "this is a bad command" + image: gcr.io/fake_project/fake_image:fake_tag + name: master diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/invalidPod1.json b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/invalidPod1.json new file mode 100644 index 000000000000..d935742d77c4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/invalidPod1.json @@ -0,0 +1,19 @@ +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "name", + "labels": { + "name": "redis-master" + } + }, + "spec": { + "containers": [ + { + "name": "master", + "image": "gcr.io/fake_project/fake_image:fake_tag", + "args": "this is a bad command" + } + ] + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/invalidPod2.json b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/invalidPod2.json new file mode 100644 index 000000000000..56e8f93bab9a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/invalidPod2.json @@ -0,0 +1,35 @@ +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "apache-php", + "labels": { + "name": "apache-php" + } + }, + "spec": { + "volumes": [{ + "name": "shared-disk" + }], + "containers": [ + { + "name": "apache-php", + "image": "gcr.io/fake_project/fake_image:fake_tag", + "ports": [ + { + "name": "apache", + "hostPort": "13380", + "containerPort": 80, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "name": "shared-disk", + "mountPath": "/var/www/html" + } + ] + } + ] + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/invalidPod3.json b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/invalidPod3.json new file mode 100644 index 000000000000..4d99181dc07b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/invalidPod3.json @@ -0,0 +1,35 @@ +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "apache-php", + "labels": { + "name": "apache-php" + } + }, + "spec": { + "volumes": [ + "name": "shared-disk" + ], + "containers": [ + { + "name": "apache-php", + "image": "gcr.io/fake_project/fake_image:fake_tag", + "ports": [ + { + "name": "apache", + "hostPort": 13380, + "containerPort": 80, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "name": "shared-disk", + "mountPath": "/var/www/html" + } + ] + } + ] + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/validPod.yaml b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/validPod.yaml new file mode 100644 index 000000000000..3849ba7a1f08 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/testdata/v1/validPod.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + name: redis-master + name: name +spec: + containers: + - args: + - this + - is + - an + - ok + - command + image: gcr.io/fake_project/fake_image:fake_tag + name: master diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/validation.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/validation.go index 3d4d906c5713..1144cefb249a 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/validation.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/validation.go @@ -33,6 +33,9 @@ import ( utilpod "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/api/resource" apiservice "k8s.io/kubernetes/pkg/api/service" + "k8s.io/kubernetes/pkg/api/unversioned" + unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/intstr" @@ -55,11 +58,6 @@ func InclusiveRangeErrorMsg(lo, hi int) string { return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) } -var labelValueErrorMsg string = fmt.Sprintf(`must have at most %d characters, matching regex %s: e.g. "MyValue" or ""`, validation.LabelValueMaxLength, validation.LabelValueFmt) -var qualifiedNameErrorMsg string = fmt.Sprintf(`must be a qualified name (at most %d characters, matching regex %s), with an optional DNS subdomain prefix (at most %d characters, matching regex %s) and slash (/): e.g. "MyName" or "example.com/MyName"`, validation.QualifiedNameMaxLength, validation.QualifiedNameFmt, validation.DNS1123SubdomainMaxLength, validation.DNS1123SubdomainFmt) -var DNSSubdomainErrorMsg string = fmt.Sprintf(`must be a DNS subdomain (at most %d characters, matching regex %s): e.g. "example.com"`, validation.DNS1123SubdomainMaxLength, validation.DNS1123SubdomainFmt) -var DNS1123LabelErrorMsg string = fmt.Sprintf(`must be a DNS label (at most %d characters, matching regex %s): e.g. "my-name"`, validation.DNS1123LabelMaxLength, validation.DNS1123LabelFmt) -var DNS952LabelErrorMsg string = fmt.Sprintf(`must be a DNS 952 label (at most %d characters, matching regex %s): e.g. "my-name"`, validation.DNS952LabelMaxLength, validation.DNS952LabelFmt) var pdPartitionErrorMsg string = InclusiveRangeErrorMsg(1, 255) var PortRangeErrorMsg string = InclusiveRangeErrorMsg(1, 65535) var IdRangeErrorMsg string = InclusiveRangeErrorMsg(0, math.MaxInt32) @@ -67,24 +65,9 @@ var PortNameErrorMsg string = fmt.Sprintf(`must be an IANA_SVC_NAME (at most 15 const totalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB -func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if !validation.IsQualifiedName(labelName) { - allErrs = append(allErrs, field.Invalid(fldPath, labelName, qualifiedNameErrorMsg)) - } - return allErrs -} - -// ValidateLabels validates that a set of labels are correctly defined. -func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for k, v := range labels { - allErrs = append(allErrs, ValidateLabelName(k, fldPath)...) - if !validation.IsValidLabelValue(v) { - allErrs = append(allErrs, field.Invalid(fldPath, v, labelValueErrorMsg)) - } - } - return allErrs +// BannedOwners is a black list of object that are not allowed to be owners. +var BannedOwners = map[unversioned.GroupVersionKind]struct{}{ + v1.SchemeGroupVersion.WithKind("Event"): {}, } // ValidateHasLabel requires that api.ObjectMeta has a Label with key and expectedValue @@ -106,8 +89,8 @@ func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) fie allErrs := field.ErrorList{} var totalSize int64 for k, v := range annotations { - if !validation.IsQualifiedName(strings.ToLower(k)) { - allErrs = append(allErrs, field.Invalid(fldPath, k, qualifiedNameErrorMsg)) + for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) { + allErrs = append(allErrs, field.Invalid(fldPath, k, msg)) } totalSize += (int64)(len(k)) + (int64)(len(v)) } @@ -123,12 +106,20 @@ func ValidatePodSpecificAnnotations(annotations map[string]string, fldPath *fiel allErrs = append(allErrs, ValidateAffinityInPodAnnotations(annotations, fldPath)...) } - if hostname, exists := annotations[utilpod.PodHostnameAnnotation]; exists && !validation.IsDNS1123Label(hostname) { - allErrs = append(allErrs, field.Invalid(fldPath, utilpod.PodHostnameAnnotation, DNS1123LabelErrorMsg)) + if annotations[api.TolerationsAnnotationKey] != "" { + allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...) } - if subdomain, exists := annotations[utilpod.PodSubdomainAnnotation]; exists && !validation.IsDNS1123Label(subdomain) { - allErrs = append(allErrs, field.Invalid(fldPath, utilpod.PodSubdomainAnnotation, DNS1123LabelErrorMsg)) + if hostname, exists := annotations[utilpod.PodHostnameAnnotation]; exists { + for _, msg := range validation.IsDNS1123Label(hostname) { + allErrs = append(allErrs, field.Invalid(fldPath, utilpod.PodHostnameAnnotation, msg)) + } + } + + if subdomain, exists := annotations[utilpod.PodSubdomainAnnotation]; exists { + for _, msg := range validation.IsDNS1123Label(subdomain) { + allErrs = append(allErrs, field.Invalid(fldPath, utilpod.PodSubdomainAnnotation, msg)) + } } return allErrs @@ -145,10 +136,42 @@ func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath return allErrs } +func validateOwnerReference(ownerReference api.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + gvk := unversioned.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind) + // gvk.Group is empty for the legacy group. + if len(gvk.Version) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty")) + } + if len(gvk.Kind) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty")) + } + if len(ownerReference.Name) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty")) + } + if len(ownerReference.UID) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty")) + } + if _, ok := BannedOwners[gvk]; ok { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk))) + } + return allErrs +} + +func ValidateOwnerReferences(ownerReferences []api.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, ref := range ownerReferences { + allErrs = append(allErrs, validateOwnerReference(ref, fldPath)...) + } + return allErrs +} + // ValidateNameFunc validates that the provided name is valid for a given resource type. -// Not all resources have the same validation rules for names. Prefix is true if the -// name will have a value appended to it. -type ValidateNameFunc func(name string, prefix bool) (bool, string) +// Not all resources have the same validation rules for names. Prefix is true +// if the name will have a value appended to it. If the name is not valid, +// this returns a list of descriptions of individual characteristics of the +// value that were not valid. Otherwise this returns an empty list or nil. +type ValidateNameFunc func(name string, prefix bool) []string // maskTrailingDash replaces the final character of a string with a subdomain safe // value if is a dash. @@ -162,106 +185,77 @@ func maskTrailingDash(name string) string { // ValidatePodName can be used to check whether the given pod name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidatePodName(name string, prefix bool) (bool, string) { - return NameIsDNSSubdomain(name, prefix) -} +var ValidatePodName = NameIsDNSSubdomain // ValidateReplicationControllerName can be used to check whether the given replication // controller name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidateReplicationControllerName(name string, prefix bool) (bool, string) { - return NameIsDNSSubdomain(name, prefix) -} +var ValidateReplicationControllerName = NameIsDNSSubdomain // ValidateServiceName can be used to check whether the given service name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidateServiceName(name string, prefix bool) (bool, string) { - return NameIsDNS952Label(name, prefix) -} +var ValidateServiceName = NameIsDNS952Label // ValidateNodeName can be used to check whether the given node name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidateNodeName(name string, prefix bool) (bool, string) { - return NameIsDNSSubdomain(name, prefix) -} +var ValidateNodeName = NameIsDNSSubdomain // ValidateNamespaceName can be used to check whether the given namespace name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidateNamespaceName(name string, prefix bool) (bool, string) { - return NameIsDNSLabel(name, prefix) -} +var ValidateNamespaceName = NameIsDNSLabel // ValidateLimitRangeName can be used to check whether the given limit range name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidateLimitRangeName(name string, prefix bool) (bool, string) { - return NameIsDNSSubdomain(name, prefix) -} +var ValidateLimitRangeName = NameIsDNSSubdomain // ValidateResourceQuotaName can be used to check whether the given // resource quota name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidateResourceQuotaName(name string, prefix bool) (bool, string) { - return NameIsDNSSubdomain(name, prefix) -} +var ValidateResourceQuotaName = NameIsDNSSubdomain // ValidateSecretName can be used to check whether the given secret name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidateSecretName(name string, prefix bool) (bool, string) { - return NameIsDNSSubdomain(name, prefix) -} +var ValidateSecretName = NameIsDNSSubdomain // ValidateServiceAccountName can be used to check whether the given service account name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidateServiceAccountName(name string, prefix bool) (bool, string) { - return NameIsDNSSubdomain(name, prefix) -} +var ValidateServiceAccountName = NameIsDNSSubdomain // ValidateEndpointsName can be used to check whether the given endpoints name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidateEndpointsName(name string, prefix bool) (bool, string) { - return NameIsDNSSubdomain(name, prefix) -} +var ValidateEndpointsName = NameIsDNSSubdomain // NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. -func NameIsDNSSubdomain(name string, prefix bool) (bool, string) { +func NameIsDNSSubdomain(name string, prefix bool) []string { if prefix { name = maskTrailingDash(name) } - if validation.IsDNS1123Subdomain(name) { - return true, "" - } - return false, DNSSubdomainErrorMsg + return validation.IsDNS1123Subdomain(name) } // NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label. -func NameIsDNSLabel(name string, prefix bool) (bool, string) { +func NameIsDNSLabel(name string, prefix bool) []string { if prefix { name = maskTrailingDash(name) } - if validation.IsDNS1123Label(name) { - return true, "" - } - return false, DNS1123LabelErrorMsg + return validation.IsDNS1123Label(name) } // NameIsDNS952Label is a ValidateNameFunc for names that must be a DNS 952 label. -func NameIsDNS952Label(name string, prefix bool) (bool, string) { +func NameIsDNS952Label(name string, prefix bool) []string { if prefix { name = maskTrailingDash(name) } - if validation.IsDNS952Label(name) { - return true, "" - } - return false, DNS952LabelErrorMsg + return validation.IsDNS952Label(name) } // Validates that given value is not negative. @@ -298,8 +292,8 @@ func ValidateObjectMeta(meta *api.ObjectMeta, requiresNamespace bool, nameFn Val allErrs := field.ErrorList{} if len(meta.GenerateName) != 0 { - if ok, qualifier := nameFn(meta.GenerateName, true); !ok { - allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GenerateName, qualifier)) + for _, msg := range nameFn(meta.GenerateName, true) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GenerateName, msg)) } } // If the generated name validates, but the calculated value does not, it's a problem with generation, and we @@ -308,15 +302,17 @@ func ValidateObjectMeta(meta *api.ObjectMeta, requiresNamespace bool, nameFn Val if len(meta.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required")) } else { - if ok, qualifier := nameFn(meta.Name, false); !ok { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.Name, qualifier)) + for _, msg := range nameFn(meta.Name, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.Name, msg)) } } if requiresNamespace { if len(meta.Namespace) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "")) - } else if ok, _ := ValidateNamespaceName(meta.Namespace, false); !ok { - allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.Namespace, DNS1123LabelErrorMsg)) + } else { + for _, msg := range ValidateNamespaceName(meta.Namespace, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.Namespace, msg)) + } } } else { if len(meta.Namespace) != 0 { @@ -324,9 +320,12 @@ func ValidateObjectMeta(meta *api.ObjectMeta, requiresNamespace bool, nameFn Val } } allErrs = append(allErrs, ValidateNonnegativeField(meta.Generation, fldPath.Child("generation"))...) - allErrs = append(allErrs, ValidateLabels(meta.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(meta.Labels, fldPath.Child("labels"))...) allErrs = append(allErrs, ValidateAnnotations(meta.Annotations, fldPath.Child("annotations"))...) - + allErrs = append(allErrs, ValidateOwnerReferences(meta.OwnerReferences, fldPath.Child("ownerReferences"))...) + for _, finalizer := range meta.Finalizers { + allErrs = append(allErrs, validateFinalizerName(finalizer, fldPath.Child("finalizers"))...) + } return allErrs } @@ -360,9 +359,12 @@ func ValidateObjectMetaUpdate(newMeta, oldMeta *api.ObjectMeta, fldPath *field.P } // TODO: needs to check if newMeta==nil && oldMeta !=nil after the repair logic is removed. - if newMeta.DeletionGracePeriodSeconds != nil && oldMeta.DeletionGracePeriodSeconds != nil && *newMeta.DeletionGracePeriodSeconds != *oldMeta.DeletionGracePeriodSeconds { + if newMeta.DeletionGracePeriodSeconds != nil && (oldMeta.DeletionGracePeriodSeconds == nil || *newMeta.DeletionGracePeriodSeconds != *oldMeta.DeletionGracePeriodSeconds) { allErrs = append(allErrs, field.Invalid(fldPath.Child("deletionGracePeriodSeconds"), newMeta.DeletionGracePeriodSeconds, "field is immutable; may only be changed via deletion")) } + if newMeta.DeletionTimestamp != nil && (oldMeta.DeletionTimestamp == nil || !newMeta.DeletionTimestamp.Equal(*oldMeta.DeletionTimestamp)) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("deletionTimestamp"), newMeta.DeletionTimestamp, "field is immutable; may only be changed via deletion")) + } // Reject updates that don't specify a resource version if len(newMeta.ResourceVersion) == 0 { @@ -374,8 +376,9 @@ func ValidateObjectMetaUpdate(newMeta, oldMeta *api.ObjectMeta, fldPath *field.P allErrs = append(allErrs, ValidateImmutableField(newMeta.UID, oldMeta.UID, fldPath.Child("uid"))...) allErrs = append(allErrs, ValidateImmutableField(newMeta.CreationTimestamp, oldMeta.CreationTimestamp, fldPath.Child("creationTimestamp"))...) - allErrs = append(allErrs, ValidateLabels(newMeta.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(newMeta.Labels, fldPath.Child("labels"))...) allErrs = append(allErrs, ValidateAnnotations(newMeta.Annotations, fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(newMeta.OwnerReferences, fldPath.Child("ownerReferences"))...) return allErrs } @@ -389,8 +392,10 @@ func validateVolumes(volumes []api.Volume, fldPath *field.Path) (sets.String, fi el := validateVolumeSource(&vol.VolumeSource, idxPath) if len(vol.Name) == 0 { el = append(el, field.Required(idxPath.Child("name"), "")) - } else if !validation.IsDNS1123Label(vol.Name) { - el = append(el, field.Invalid(idxPath.Child("name"), vol.Name, DNS1123LabelErrorMsg)) + } else if msgs := validation.IsDNS1123Label(vol.Name); len(msgs) != 0 { + for i := range msgs { + el = append(el, field.Invalid(idxPath.Child("name"), vol.Name, msgs[i])) + } } else if allNames.Has(vol.Name) { el = append(el, field.Duplicate(idxPath.Child("name"), vol.Name)) } @@ -551,6 +556,14 @@ func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path) field.E numVolumes++ allErrs = append(allErrs, validateAzureFile(source.AzureFile, fldPath.Child("azureFile"))...) } + if source.VsphereVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateVsphereVolumeSource(source.VsphereVolume, fldPath.Child("vsphereVolume"))...) + } + } if numVolumes == 0 { allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type")) } @@ -698,7 +711,38 @@ func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSou allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) } allErrs = append(allErrs, validateVolumeSourcePath(downwardAPIVolumeFile.Path, fldPath.Child("path"))...) - allErrs = append(allErrs, validateObjectFieldSelector(&downwardAPIVolumeFile.FieldRef, &validDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) + if downwardAPIVolumeFile.FieldRef != nil { + allErrs = append(allErrs, validateObjectFieldSelector(downwardAPIVolumeFile.FieldRef, &validDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) + if downwardAPIVolumeFile.ResourceFieldRef != nil { + allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously")) + } + } else if downwardAPIVolumeFile.ResourceFieldRef != nil { + allErrs = append(allErrs, validateContainerResourceFieldSelector(downwardAPIVolumeFile.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), true)...) + } else { + allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required")) + } + } + return allErrs +} + +// This validate will make sure targetPath: +// 1. is not abs path +// 2. does not start with '../' +// 3. does not contain '/../' +// 4. does not end with '/..' +func validateSubPath(targetPath string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if path.IsAbs(targetPath) { + allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must be a relative path")) + } + if strings.HasPrefix(targetPath, "../") { + allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not start with '../'")) + } + if strings.Contains(targetPath, "/../") { + allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not contain '/../'")) + } + if strings.HasSuffix(targetPath, "/..") { + allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not end with '/..'")) } return allErrs } @@ -772,10 +816,18 @@ func validateAzureFile(azure *api.AzureFileVolumeSource, fldPath *field.Path) fi return allErrs } -func ValidatePersistentVolumeName(name string, prefix bool) (bool, string) { - return NameIsDNSSubdomain(name, prefix) +func validateVsphereVolumeSource(cd *api.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cd.VolumePath) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumePath"), "")) + } + return allErrs } +// ValidatePersistentVolumeName checks that a name is appropriate for a +// PersistentVolumeName object. +var ValidatePersistentVolumeName = NameIsDNSSubdomain + var supportedAccessModes = sets.NewString(string(api.ReadWriteOnce), string(api.ReadOnlyMany), string(api.ReadWriteMany)) func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { @@ -900,6 +952,14 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { numVolumes++ allErrs = append(allErrs, validateAzureFile(pv.Spec.AzureFile, specPath.Child("azureFile"))...) } + if pv.Spec.VsphereVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateVsphereVolumeSource(pv.Spec.VsphereVolume, specPath.Child("vsphereVolume"))...) + } + } if numVolumes == 0 { allErrs = append(allErrs, field.Required(specPath, "must specify a volume type")) } @@ -991,10 +1051,10 @@ func validateContainerPorts(ports []api.ContainerPort, fldPath *field.Path) fiel } if port.ContainerPort == 0 { allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, PortRangeErrorMsg)) - } else if !validation.IsValidPortNum(port.ContainerPort) { + } else if !validation.IsValidPortNum(int(port.ContainerPort)) { allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, PortRangeErrorMsg)) } - if port.HostPort != 0 && !validation.IsValidPortNum(port.HostPort) { + if port.HostPort != 0 && !validation.IsValidPortNum(int(port.HostPort)) { allErrs = append(allErrs, field.Invalid(idxPath.Child("hostPort"), port.HostPort, PortRangeErrorMsg)) } if len(port.Protocol) == 0 { @@ -1022,6 +1082,7 @@ func validateEnv(vars []api.EnvVar, fldPath *field.Path) field.ErrorList { } var validFieldPathExpressionsEnv = sets.NewString("metadata.name", "metadata.namespace", "status.podIP") +var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "requests.cpu", "requests.memory") func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -1036,6 +1097,10 @@ func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList numSources++ allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validFieldPathExpressionsEnv, fldPath.Child("fieldRef"))...) } + if ev.ValueFrom.ResourceFieldRef != nil { + numSources++ + allErrs = append(allErrs, validateContainerResourceFieldSelector(ev.ValueFrom.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), false)...) + } if ev.ValueFrom.ConfigMapKeyRef != nil { numSources++ allErrs = append(allErrs, validateConfigMapKeySelector(ev.ValueFrom.ConfigMapKeyRef, fldPath.Child("configMapKeyRef"))...) @@ -1075,6 +1140,42 @@ func validateObjectFieldSelector(fs *api.ObjectFieldSelector, expressions *sets. return allErrs } +func validateContainerResourceFieldSelector(fs *api.ResourceFieldSelector, expressions *sets.String, fldPath *field.Path, volume bool) field.ErrorList { + allErrs := field.ErrorList{} + + if volume && len(fs.ContainerName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("containerName"), "")) + } else if len(fs.Resource) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("resource"), "")) + } else if !expressions.Has(fs.Resource) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("resource"), fs.Resource, expressions.List())) + } + allErrs = append(allErrs, validateContainerResourceDivisor(fs.Resource, fs.Divisor, fldPath)...) + return allErrs +} + +var validContainerResourceDivisorForCPU = sets.NewString("1m", "1") +var validContainerResourceDivisorForMemory = sets.NewString("1m", "1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") + +func validateContainerResourceDivisor(rName string, divisor resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + unsetDivisor := resource.Quantity{} + if unsetDivisor.Cmp(divisor) == 0 { + return allErrs + } + switch rName { + case "limits.cpu", "requests.cpu": + if !validContainerResourceDivisorForCPU.Has(divisor.String()) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, fmt.Sprintf("only divisor's values 1m and 1 are supported with the cpu resource"))) + } + case "limits.memory", "requests.memory": + if !validContainerResourceDivisorForMemory.Has(divisor.String()) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, fmt.Sprintf("only divisor's values 1m, 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the memory resource"))) + } + } + return allErrs +} + func validateConfigMapKeySelector(s *api.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -1107,6 +1208,7 @@ func validateSecretKeySelector(s *api.SecretKeySelector, fldPath *field.Path) fi func validateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} + mountpoints := sets.NewString() for i, mnt := range mounts { idxPath := fldPath.Index(i) @@ -1120,6 +1222,13 @@ func validateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, fldPath } else if strings.Contains(mnt.MountPath, ":") { allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must not contain ':'")) } + if mountpoints.Has(mnt.MountPath) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique")) + } + mountpoints.Insert(mnt.MountPath) + if len(mnt.SubPath) > 0 { + allErrs = append(allErrs, validateSubPath(mnt.SubPath, fldPath.Child("subPath"))...) + } } return allErrs } @@ -1273,6 +1382,37 @@ func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorL return allErrors } +func validateInitContainers(containers, otherContainers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if len(containers) > 0 { + allErrs = append(allErrs, validateContainers(containers, volumes, fldPath)...) + } + + allNames := sets.String{} + for _, ctr := range otherContainers { + allNames.Insert(ctr.Name) + } + for i, ctr := range containers { + idxPath := fldPath.Index(i) + if allNames.Has(ctr.Name) { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name)) + } + if len(ctr.Name) > 0 { + allNames.Insert(ctr.Name) + } + if ctr.Lifecycle != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("lifecycle"), ctr.Lifecycle, "must not be set for init containers")) + } + if ctr.LivenessProbe != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe"), ctr.LivenessProbe, "must not be set for init containers")) + } + if ctr.ReadinessProbe != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe"), ctr.ReadinessProbe, "must not be set for init containers")) + } + } + return allErrs +} + func validateContainers(containers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -1285,8 +1425,10 @@ func validateContainers(containers []api.Container, volumes sets.String, fldPath idxPath := fldPath.Index(i) if len(ctr.Name) == 0 { allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) - } else if !validation.IsDNS1123Label(ctr.Name) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ctr.Name, DNS1123LabelErrorMsg)) + } else if msgs := validation.IsDNS1123Label(ctr.Name); len(msgs) != 0 { + for i := range msgs { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ctr.Name, msgs[i])) + } } else if allNames.Has(ctr.Name) { allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name)) } else { @@ -1379,6 +1521,60 @@ func validateImagePullSecrets(imagePullSecrets []api.LocalObjectReference, fldPa return allErrors } +func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { + if !allowEmpty && len(*effect) == 0 { + return field.ErrorList{field.Required(fldPath, "")} + } + + allErrors := field.ErrorList{} + switch *effect { + // TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit, TaintEffectNoScheduleNoAdmitNoExecute. + case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule: + // case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoScheduleNoAdmit, api.TaintEffectNoScheduleNoAdmitNoExecute: + default: + validValues := []string{ + string(api.TaintEffectNoSchedule), + string(api.TaintEffectPreferNoSchedule), + // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit, TaintEffectNoScheduleNoAdmitNoExecute. + // string(api.TaintEffectNoScheduleNoAdmit), + // string(api.TaintEffectNoScheduleNoAdmitNoExecute), + } + allErrors = append(allErrors, field.NotSupported(fldPath, effect, validValues)) + } + return allErrors +} + +// validateTolerations tests if given tolerations have valid data. +func validateTolerations(tolerations []api.Toleration, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + for i, toleration := range tolerations { + idxPath := fldPath.Index(i) + // validate the toleration key + allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(toleration.Key, idxPath.Child("key"))...) + + // validate toleration operator and value + switch toleration.Operator { + case api.TolerationOpEqual, "": + if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 { + allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";"))) + } + case api.TolerationOpExists: + if len(toleration.Value) > 0 { + allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) + } + default: + validValues := []string{string(api.TolerationOpEqual), string(api.TolerationOpExists)} + allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues)) + } + + // validate toleration effect + if len(toleration.Effect) > 0 { + allErrors = append(allErrors, validateTaintEffect(&toleration.Effect, true, idxPath.Child("effect"))...) + } + } + return allErrors +} + // ValidatePod tests if required fields in the pod are set. func ValidatePod(pod *api.Pod) field.ErrorList { fldPath := field.NewPath("metadata") @@ -1398,19 +1594,20 @@ func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList { allVolumes, vErrs := validateVolumes(spec.Volumes, fldPath.Child("volumes")) allErrs = append(allErrs, vErrs...) allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...) + allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, allVolumes, fldPath.Child("initContainers"))...) allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) - allErrs = append(allErrs, ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...) allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...) if len(spec.ServiceAccountName) > 0 { - if ok, msg := ValidateServiceAccountName(spec.ServiceAccountName, false); !ok { + for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) { allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg)) } } if len(spec.NodeName) > 0 { - if ok, msg := ValidateNodeName(spec.NodeName, false); !ok { + for _, msg := range ValidateNodeName(spec.NodeName, false) { allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), spec.NodeName, msg)) } } @@ -1420,6 +1617,19 @@ func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList { allErrs = append(allErrs, field.Invalid(fldPath.Child("activeDeadlineSeconds"), spec.ActiveDeadlineSeconds, "must be greater than 0")) } } + + if len(spec.Hostname) > 0 { + for _, msg := range validation.IsDNS1123Label(spec.Hostname) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostname"), spec.Hostname, msg)) + } + } + + if len(spec.Subdomain) > 0 { + for _, msg := range validation.IsDNS1123Label(spec.Subdomain) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("subdomain"), spec.Subdomain, msg)) + } + } + return allErrs } @@ -1443,7 +1653,7 @@ func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *fi default: allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator")) } - allErrs = append(allErrs, ValidateLabelName(rq.Key, fldPath.Child("key"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...) return allErrs } @@ -1490,6 +1700,87 @@ func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPa return allErrs } +// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data +func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...) + for _, name := range podAffinityTerm.Namespaces { + for _, msg := range ValidateNamespaceName(name, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg)) + } + } + if !allowEmptyTopologyKey && len(podAffinityTerm.TopologyKey) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can only be empty for PreferredDuringScheduling pod anti affinity")) + } + if len(podAffinityTerm.TopologyKey) != 0 { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...) + } + return allErrs +} + +// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data +func validatePodAffinityTerms(podAffinityTerms []api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, podAffinityTerm := range podAffinityTerms { + allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, allowEmptyTopologyKey, fldPath.Index(i))...) + } + return allErrs +} + +// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data +func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []api.WeightedPodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for j, weightedTerm := range weightedPodAffinityTerms { + if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 { + allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100")) + } + allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, allowEmptyTopologyKey, fldPath.Index(j).Child("podAffinityTerm"))...) + } + return allErrs +} + +// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data +func validatePodAntiAffinity(podAntiAffinity *api.PodAntiAffinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, + // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + //} + if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + // empty topologyKey is not allowed for hard pod anti-affinity + allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false, + fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { + // empty topologyKey is allowed for soft pod anti-affinity + allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, true, + fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + return allErrs +} + +// validatePodAffinity tests that the specified podAffinity fields have valid data +func validatePodAffinity(podAffinity *api.PodAffinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, + // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + //} + if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + // empty topologyKey is not allowed for hard pod affinity + allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false, + fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { + // empty topologyKey is not allowed for soft pod affinity + allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, false, + fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + return allErrs +} + // ValidateAffinityInPodAnnotations tests that the serialized Affinity in Pod.Annotations has valid data func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -1500,23 +1791,45 @@ func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *fi return allErrs } + affinityFldPath := fldPath.Child(api.AffinityAnnotationKey) if affinity.NodeAffinity != nil { na := affinity.NodeAffinity - + naFldPath := affinityFldPath.Child("nodeAffinity") // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented. // if na.RequiredDuringSchedulingRequiredDuringExecution != nil { - // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, naFldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) // } if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) } if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) - + allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) } } + if affinity.PodAffinity != nil { + allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, affinityFldPath.Child("podAffinity"))...) + } + if affinity.PodAntiAffinity != nil { + allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, affinityFldPath.Child("podAntiAffinity"))...) + } + + return allErrs +} + +// ValidateTolerationsInPodAnnotations tests that the serialized tolerations in Pod.Annotations has valid data +func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + tolerations, err := api.GetTolerationsFromPodAnnotations(annotations) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, api.TolerationsAnnotationKey, err.Error())) + return allErrs + } + if len(tolerations) > 0 { + allErrs = append(allErrs, validateTolerations(tolerations, fldPath.Child(api.TolerationsAnnotationKey))...) + } return allErrs } @@ -1692,7 +2005,7 @@ func ValidateService(service *api.Service) field.ErrorList { } if service.Spec.Selector != nil { - allErrs = append(allErrs, ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...) } if len(service.Spec.SessionAffinity) == 0 { @@ -1767,12 +2080,26 @@ func ValidateService(service *api.Service) field.ErrorList { nodePorts[key] = true } - _, err := apiservice.GetLoadBalancerSourceRanges(service.Annotations) - if err != nil { - v := service.Annotations[apiservice.AnnotationLoadBalancerSourceRangesKey] - allErrs = append(allErrs, field.Invalid(field.NewPath("metadata", "annotations").Key(apiservice.AnnotationLoadBalancerSourceRangesKey), v, "must be a comma separated list of CIDRs e.g. 192.168.0.0/16,10.0.0.0/8")) + // Validate SourceRange field and annotation + _, ok := service.Annotations[apiservice.AnnotationLoadBalancerSourceRangesKey] + if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok { + var fieldPath *field.Path + var val string + if len(service.Spec.LoadBalancerSourceRanges) > 0 { + fieldPath = specPath.Child("LoadBalancerSourceRanges") + val = fmt.Sprintf("%v", service.Spec.LoadBalancerSourceRanges) + } else { + fieldPath = field.NewPath("metadata", "annotations").Key(apiservice.AnnotationLoadBalancerSourceRangesKey) + val = service.Annotations[apiservice.AnnotationLoadBalancerSourceRangesKey] + } + if service.Spec.Type != api.ServiceTypeLoadBalancer { + allErrs = append(allErrs, field.Invalid(fieldPath, "", "may only be used when `type` is 'LoadBalancer'")) + } + _, err := apiservice.GetLoadBalancerSourceRanges(service) + if err != nil { + allErrs = append(allErrs, field.Invalid(fieldPath, val, "must be a list of IP ranges. For example, 10.240.0.0/24,10.250.0.0/24 ")) + } } - return allErrs } @@ -1782,8 +2109,10 @@ func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService boo if requireName && len(sp.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) } else if len(sp.Name) != 0 { - if !validation.IsDNS1123Label(sp.Name) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), sp.Name, DNS1123LabelErrorMsg)) + if msgs := validation.IsDNS1123Label(sp.Name); len(msgs) != 0 { + for i := range msgs { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), sp.Name, msgs[i])) + } } else if allNames.Has(sp.Name) { allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), sp.Name)) } else { @@ -1791,7 +2120,7 @@ func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService boo } } - if !validation.IsValidPortNum(sp.Port) { + if !validation.IsValidPortNum(int(sp.Port)) { allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), sp.Port, PortRangeErrorMsg)) } @@ -1874,7 +2203,7 @@ func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path } // Validates the given template and ensures that it is in accordance with the desrired selector and replicas. -func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int, fldPath *field.Path) field.ErrorList { +func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if template == nil { allErrs = append(allErrs, field.Required(fldPath, "")) @@ -1911,7 +2240,7 @@ func ValidateReplicationControllerSpec(spec *api.ReplicationControllerSpec, fldP // ValidatePodTemplateSpec validates the spec of a pod template func ValidatePodTemplateSpec(spec *api.PodTemplateSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateLabels(spec.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...) allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...) allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, fldPath.Child("annotations"))...) allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, fldPath.Child("spec"))...) @@ -1933,9 +2262,51 @@ func ValidateReadOnlyPersistentDisks(volumes []api.Volume, fldPath *field.Path) return allErrs } +// validateTaints tests if given taints have valid data. +func validateTaints(taints []api.Taint, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + for i, currTaint := range taints { + idxPath := fldPath.Index(i) + // validate the taint key + allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(currTaint.Key, idxPath.Child("key"))...) + // validate the taint value + if errs := validation.IsValidLabelValue(currTaint.Value); len(errs) != 0 { + allErrors = append(allErrors, field.Invalid(idxPath.Child("value"), currTaint.Value, strings.Join(errs, ";"))) + } + // validate the taint effect + allErrors = append(allErrors, validateTaintEffect(&currTaint.Effect, false, idxPath.Child("effect"))...) + } + return allErrors +} + +// ValidateTaintsInNodeAnnotations tests that the serialized taints in Node.Annotations has valid data +func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + taints, err := api.GetTaintsFromNodeAnnotations(annotations) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, api.TaintsAnnotationKey, err.Error())) + return allErrs + } + if len(taints) > 0 { + allErrs = append(allErrs, validateTaints(taints, fldPath.Child(api.TaintsAnnotationKey))...) + } + + return allErrs +} + +func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + if annotations[api.TaintsAnnotationKey] != "" { + return ValidateTaintsInNodeAnnotations(annotations, fldPath) + } + return field.ErrorList{} +} + // ValidateNode tests if required fields in the node are set. func ValidateNode(node *api.Node) field.ErrorList { - allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, field.NewPath("metadata")) + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath) + allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) // Only validate spec. All status fields are optional and can be updated later. @@ -1950,7 +2321,9 @@ func ValidateNode(node *api.Node) field.ErrorList { // ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode. func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, field.NewPath("metadata")) + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath) + allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) // TODO: Enable the code once we have better api object.status update model. Currently, // anyone can update node status. @@ -1998,8 +2371,11 @@ func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList { // Refer to docs/design/resources.md for more details. func validateResourceName(value string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if !validation.IsQualifiedName(value) { - return append(allErrs, field.Invalid(fldPath, value, qualifiedNameErrorMsg)) + for _, msg := range validation.IsQualifiedName(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + if len(allErrs) != 0 { + return allErrs } if len(strings.Split(value, "/")) == 1 { @@ -2038,8 +2414,11 @@ func validateResourceQuotaResourceName(value string, fldPath *field.Path) field. // Validate limit range types func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if !validation.IsQualifiedName(value) { - return append(allErrs, field.Invalid(fldPath, value, qualifiedNameErrorMsg)) + for _, msg := range validation.IsQualifiedName(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + if len(allErrs) != 0 { + return allErrs } if len(strings.Split(value, "/")) == 1 { @@ -2270,7 +2649,7 @@ func ValidateSecret(secret *api.Secret) field.ErrorList { if _, exists := secret.Data[api.TLSPrivateKeyKey]; !exists { allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSPrivateKeyKey), "")) } - // TODO: Verify that the key matches the cert. + // TODO: Verify that the key matches the cert. default: // no-op } @@ -2295,9 +2674,7 @@ func ValidateSecretUpdate(newSecret, oldSecret *api.Secret) field.ErrorList { // ValidateConfigMapName can be used to check whether the given ConfigMap name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidateConfigMapName(name string, prefix bool) (bool, string) { - return NameIsDNSSubdomain(name, prefix) -} +var ValidateConfigMapName = NameIsDNSSubdomain // ValidateConfigMap tests whether required fields in the ConfigMap are set. func ValidateConfigMap(cfg *api.ConfigMap) field.ErrorList { @@ -2339,6 +2716,7 @@ func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) fiel func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} limPath := fldPath.Child("limits") + reqPath := fldPath.Child("requests") for resourceName, quantity := range requirements.Limits { fldPath := limPath.Key(string(resourceName)) // Validate resource name. @@ -2349,12 +2727,14 @@ func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPat // Check that request <= limit. requestQuantity, exists := requirements.Requests[resourceName] if exists { - if quantity.Cmp(requestQuantity) < 0 { + // For GPUs, require that no request be set. + if resourceName == api.ResourceNvidiaGPU { + allErrs = append(allErrs, field.Invalid(reqPath, requestQuantity.String(), "cannot be set")) + } else if quantity.Cmp(requestQuantity) < 0 { allErrs = append(allErrs, field.Invalid(fldPath, quantity.String(), "must be greater than or equal to request")) } } } - reqPath := fldPath.Child("requests") for resourceName, quantity := range requirements.Requests { fldPath := reqPath.Key(string(resourceName)) // Validate resource name. @@ -2506,8 +2886,11 @@ func ValidateNamespace(namespace *api.Namespace) field.ErrorList { // Validate finalizer names func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if !validation.IsQualifiedName(stringValue) { - return append(allErrs, field.Invalid(fldPath, stringValue, qualifiedNameErrorMsg)) + for _, msg := range validation.IsQualifiedName(stringValue) { + allErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg)) + } + if len(allErrs) != 0 { + return allErrs } if len(strings.Split(stringValue, "/")) == 1 { @@ -2584,6 +2967,9 @@ func validateEndpointSubsets(subsets []api.EndpointSubset, fldPath *field.Path) for addr := range ss.Addresses { allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr))...) } + for addr := range ss.NotReadyAddresses { + allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr))...) + } for port := range ss.Ports { allErrs = append(allErrs, validateEndpointPort(&ss.Ports[port], len(ss.Ports) > 1, idxPath.Child("ports").Index(port))...) } @@ -2594,8 +2980,15 @@ func validateEndpointSubsets(subsets []api.EndpointSubset, fldPath *field.Path) func validateEndpointAddress(address *api.EndpointAddress, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if !validation.IsValidIPv4(address.IP) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, "must be a valid IPv4 address")) + if !validation.IsValidIP(address.IP) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, "must be a valid IP address")) + } + if len(address.Hostname) > 0 { + for _, msg := range validation.IsDNS1123Label(address.Hostname) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostname"), address.Hostname, msg)) + } + } + if len(allErrs) > 0 { return allErrs } return validateIpIsNotLinkLocalOrLoopback(address.IP, fldPath.Child("ip")) @@ -2627,11 +3020,11 @@ func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *fie if requireName && len(port.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) } else if len(port.Name) != 0 { - if !validation.IsDNS1123Label(port.Name) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), port.Name, DNS1123LabelErrorMsg)) + for _, msg := range validation.IsDNS1123Label(port.Name) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), port.Name, msg)) } } - if !validation.IsValidPortNum(port.Port) { + if !validation.IsValidPortNum(int(port.Port)) { allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), port.Port, PortRangeErrorMsg)) } if len(port.Protocol) == 0 { @@ -2702,8 +3095,8 @@ func ValidateLoadBalancerStatus(status *api.LoadBalancerStatus, fldPath *field.P } } if len(ingress.Hostname) > 0 { - if valid, errMsg := NameIsDNSSubdomain(ingress.Hostname, false); !valid { - allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, errMsg)) + for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg)) } if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP { allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address")) @@ -2724,7 +3117,7 @@ func isValidHostnamesMap(serializedPodHostNames string) bool { } for ip, hostRecord := range podHostNames { - if !validation.IsDNS1123Label(hostRecord.HostName) { + if len(validation.IsDNS1123Label(hostRecord.HostName)) != 0 { return false } if net.ParseIP(ip) == nil { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/validation_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/validation_test.go new file mode 100644 index 000000000000..9e7bbdb23de5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/validation/validation_test.go @@ -0,0 +1,6127 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "math/rand" + "reflect" + "strings" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/service" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/capabilities" + "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func expectPrefix(t *testing.T, prefix string, errs field.ErrorList) { + for i := range errs { + if f, p := errs[i].Field, prefix; !strings.HasPrefix(f, p) { + t.Errorf("expected prefix '%s' for field '%s' (%v)", p, f, errs[i]) + } + } +} + +// Ensure custom name functions are allowed +func TestValidateObjectMetaCustomName(t *testing.T) { + errs := ValidateObjectMeta( + &api.ObjectMeta{Name: "test", GenerateName: "foo"}, + false, + func(s string, prefix bool) []string { + if s == "test" { + return nil + } + return []string{"name-gen"} + }, + field.NewPath("field")) + if len(errs) != 1 { + t.Fatalf("unexpected errors: %v", errs) + } + if !strings.Contains(errs[0].Error(), "name-gen") { + t.Errorf("unexpected error message: %v", errs) + } +} + +// Ensure namespace names follow dns label format +func TestValidateObjectMetaNamespaces(t *testing.T) { + errs := ValidateObjectMeta( + &api.ObjectMeta{Name: "test", Namespace: "foo.bar"}, + true, + func(s string, prefix bool) []string { + return nil + }, + field.NewPath("field")) + if len(errs) != 1 { + t.Fatalf("unexpected errors: %v", errs) + } + if !strings.Contains(errs[0].Error(), `Invalid value: "foo.bar"`) { + t.Errorf("unexpected error message: %v", errs) + } + maxLength := 63 + letters := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") + b := make([]rune, maxLength+1) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + errs = ValidateObjectMeta( + &api.ObjectMeta{Name: "test", Namespace: string(b)}, + true, + func(s string, prefix bool) []string { + return nil + }, + field.NewPath("field")) + if len(errs) != 2 { + t.Fatalf("unexpected errors: %v", errs) + } + if !strings.Contains(errs[0].Error(), "Invalid value") || !strings.Contains(errs[1].Error(), "Invalid value") { + t.Errorf("unexpected error message: %v", errs) + } +} + +func TestValidateObjectMetaOwnerReferences(t *testing.T) { + testCases := []struct { + ownerReferences []api.OwnerReference + expectError bool + }{ + { + []api.OwnerReference{ + { + APIVersion: "thirdpartyVersion", + Kind: "thirdpartyKind", + Name: "name", + UID: "1", + }, + }, + false, + }, + { + // event shouldn't be set as an owner + []api.OwnerReference{ + { + APIVersion: "v1", + Kind: "Event", + Name: "name", + UID: "1", + }, + }, + true, + }, + } + + for _, tc := range testCases { + errs := ValidateObjectMeta( + &api.ObjectMeta{Name: "test", Namespace: "test", OwnerReferences: tc.ownerReferences}, + true, + func(s string, prefix bool) []string { + return nil + }, + field.NewPath("field")) + if len(errs) != 0 && !tc.expectError { + t.Errorf("unexpected error: %v", errs) + } + if len(errs) == 0 && tc.expectError { + t.Errorf("expect error") + } + if len(errs) != 0 && !strings.Contains(errs[0].Error(), "is disallowed from being an owner") { + t.Errorf("unexpected error message: %v", errs) + } + } +} + +func TestValidateObjectMetaUpdateIgnoresCreationTimestamp(t *testing.T) { + if errs := ValidateObjectMetaUpdate( + &api.ObjectMeta{Name: "test", ResourceVersion: "1"}, + &api.ObjectMeta{Name: "test", ResourceVersion: "1", CreationTimestamp: unversioned.NewTime(time.Unix(10, 0))}, + field.NewPath("field"), + ); len(errs) != 0 { + t.Fatalf("unexpected errors: %v", errs) + } + if errs := ValidateObjectMetaUpdate( + &api.ObjectMeta{Name: "test", ResourceVersion: "1", CreationTimestamp: unversioned.NewTime(time.Unix(10, 0))}, + &api.ObjectMeta{Name: "test", ResourceVersion: "1"}, + field.NewPath("field"), + ); len(errs) != 0 { + t.Fatalf("unexpected errors: %v", errs) + } + if errs := ValidateObjectMetaUpdate( + &api.ObjectMeta{Name: "test", ResourceVersion: "1", CreationTimestamp: unversioned.NewTime(time.Unix(10, 0))}, + &api.ObjectMeta{Name: "test", ResourceVersion: "1", CreationTimestamp: unversioned.NewTime(time.Unix(11, 0))}, + field.NewPath("field"), + ); len(errs) != 0 { + t.Fatalf("unexpected errors: %v", errs) + } +} + +func TestValidateObjectMetaUpdatePreventsDeletionFieldMutation(t *testing.T) { + now := unversioned.NewTime(time.Unix(1000, 0).UTC()) + later := unversioned.NewTime(time.Unix(2000, 0).UTC()) + gracePeriodShort := int64(30) + gracePeriodLong := int64(40) + + testcases := map[string]struct { + Old api.ObjectMeta + New api.ObjectMeta + ExpectedNew api.ObjectMeta + ExpectedErrs []string + }{ + "valid without deletion fields": { + Old: api.ObjectMeta{Name: "test", ResourceVersion: "1"}, + New: api.ObjectMeta{Name: "test", ResourceVersion: "1"}, + ExpectedNew: api.ObjectMeta{Name: "test", ResourceVersion: "1"}, + ExpectedErrs: []string{}, + }, + "valid with deletion fields": { + Old: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionTimestamp: &now, DeletionGracePeriodSeconds: &gracePeriodShort}, + New: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionTimestamp: &now, DeletionGracePeriodSeconds: &gracePeriodShort}, + ExpectedNew: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionTimestamp: &now, DeletionGracePeriodSeconds: &gracePeriodShort}, + ExpectedErrs: []string{}, + }, + + "invalid set deletionTimestamp": { + Old: api.ObjectMeta{Name: "test", ResourceVersion: "1"}, + New: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionTimestamp: &now}, + ExpectedNew: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionTimestamp: &now}, + ExpectedErrs: []string{"field.deletionTimestamp: Invalid value: \"1970-01-01T00:16:40Z\": field is immutable; may only be changed via deletion"}, + }, + "invalid clear deletionTimestamp": { + Old: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionTimestamp: &now}, + New: api.ObjectMeta{Name: "test", ResourceVersion: "1"}, + ExpectedNew: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionTimestamp: &now}, + ExpectedErrs: []string{}, // no errors, validation copies the old value + }, + "invalid change deletionTimestamp": { + Old: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionTimestamp: &now}, + New: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionTimestamp: &later}, + ExpectedNew: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionTimestamp: &now}, + ExpectedErrs: []string{}, // no errors, validation copies the old value + }, + + "invalid set deletionGracePeriodSeconds": { + Old: api.ObjectMeta{Name: "test", ResourceVersion: "1"}, + New: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionGracePeriodSeconds: &gracePeriodShort}, + ExpectedNew: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionGracePeriodSeconds: &gracePeriodShort}, + ExpectedErrs: []string{"field.deletionGracePeriodSeconds: Invalid value: 30: field is immutable; may only be changed via deletion"}, + }, + "invalid clear deletionGracePeriodSeconds": { + Old: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionGracePeriodSeconds: &gracePeriodShort}, + New: api.ObjectMeta{Name: "test", ResourceVersion: "1"}, + ExpectedNew: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionGracePeriodSeconds: &gracePeriodShort}, + ExpectedErrs: []string{}, // no errors, validation copies the old value + }, + "invalid change deletionGracePeriodSeconds": { + Old: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionGracePeriodSeconds: &gracePeriodShort}, + New: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionGracePeriodSeconds: &gracePeriodLong}, + ExpectedNew: api.ObjectMeta{Name: "test", ResourceVersion: "1", DeletionGracePeriodSeconds: &gracePeriodLong}, + ExpectedErrs: []string{"field.deletionGracePeriodSeconds: Invalid value: 40: field is immutable; may only be changed via deletion"}, + }, + } + + for k, tc := range testcases { + errs := ValidateObjectMetaUpdate(&tc.New, &tc.Old, field.NewPath("field")) + if len(errs) != len(tc.ExpectedErrs) { + t.Logf("%s: Expected: %#v", k, tc.ExpectedErrs) + t.Logf("%s: Got: %#v", k, errs) + t.Errorf("%s: expected %d errors, got %d", k, len(tc.ExpectedErrs), len(errs)) + continue + } + for i := range errs { + if errs[i].Error() != tc.ExpectedErrs[i] { + t.Errorf("%s: error #%d: expected %q, got %q", k, i, tc.ExpectedErrs[i], errs[i].Error()) + } + } + if !reflect.DeepEqual(tc.New, tc.ExpectedNew) { + t.Errorf("%s: Expected after validation:\n%#v\ngot\n%#v", k, tc.ExpectedNew, tc.New) + } + } +} + +// Ensure trailing slash is allowed in generate name +func TestValidateObjectMetaTrimsTrailingSlash(t *testing.T) { + errs := ValidateObjectMeta( + &api.ObjectMeta{Name: "test", GenerateName: "foo-"}, + false, + NameIsDNSSubdomain, + field.NewPath("field")) + if len(errs) != 0 { + t.Fatalf("unexpected errors: %v", errs) + } +} + +func TestValidateAnnotations(t *testing.T) { + successCases := []map[string]string{ + {"simple": "bar"}, + {"now-with-dashes": "bar"}, + {"1-starts-with-num": "bar"}, + {"1234": "bar"}, + {"simple/simple": "bar"}, + {"now-with-dashes/simple": "bar"}, + {"now-with-dashes/now-with-dashes": "bar"}, + {"now.with.dots/simple": "bar"}, + {"now-with.dashes-and.dots/simple": "bar"}, + {"1-num.2-num/3-num": "bar"}, + {"1234/5678": "bar"}, + {"1.2.3.4/5678": "bar"}, + {"UpperCase123": "bar"}, + {"a": strings.Repeat("b", totalAnnotationSizeLimitB-1)}, + { + "a": strings.Repeat("b", totalAnnotationSizeLimitB/2-1), + "c": strings.Repeat("d", totalAnnotationSizeLimitB/2-1), + }, + } + for i := range successCases { + errs := ValidateAnnotations(successCases[i], field.NewPath("field")) + if len(errs) != 0 { + t.Errorf("case[%d] expected success, got %#v", i, errs) + } + } + + nameErrorCases := []struct { + annotations map[string]string + expect string + }{ + {map[string]string{"nospecialchars^=@": "bar"}, "must match the regex"}, + {map[string]string{"cantendwithadash-": "bar"}, "must match the regex"}, + {map[string]string{"only/one/slash": "bar"}, "must match the regex"}, + {map[string]string{strings.Repeat("a", 254): "bar"}, "must be no more than"}, + } + for i := range nameErrorCases { + errs := ValidateAnnotations(nameErrorCases[i].annotations, field.NewPath("field")) + if len(errs) != 1 { + t.Errorf("case[%d]: expected failure", i) + } else { + if !strings.Contains(errs[0].Detail, nameErrorCases[i].expect) { + t.Errorf("case[%d]: error details do not include %q: %q", i, nameErrorCases[i].expect, errs[0].Detail) + } + } + } + totalSizeErrorCases := []map[string]string{ + {"a": strings.Repeat("b", totalAnnotationSizeLimitB)}, + { + "a": strings.Repeat("b", totalAnnotationSizeLimitB/2), + "c": strings.Repeat("d", totalAnnotationSizeLimitB/2), + }, + } + for i := range totalSizeErrorCases { + errs := ValidateAnnotations(totalSizeErrorCases[i], field.NewPath("field")) + if len(errs) != 1 { + t.Errorf("case[%d] expected failure", i) + } + } +} + +func testVolume(name string, namespace string, spec api.PersistentVolumeSpec) *api.PersistentVolume { + objMeta := api.ObjectMeta{Name: name} + if namespace != "" { + objMeta.Namespace = namespace + } + + return &api.PersistentVolume{ + ObjectMeta: objMeta, + Spec: spec, + } +} + +func TestValidatePersistentVolumes(t *testing.T) { + scenarios := map[string]struct { + isExpectedFailure bool + volume *api.PersistentVolume + }{ + "good-volume": { + isExpectedFailure: false, + volume: testVolume("foo", "", api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{Path: "/foo"}, + }, + }), + }, + "invalid-accessmode": { + isExpectedFailure: true, + volume: testVolume("foo", "", api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + AccessModes: []api.PersistentVolumeAccessMode{"fakemode"}, + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{Path: "/foo"}, + }, + }), + }, + "unexpected-namespace": { + isExpectedFailure: true, + volume: testVolume("foo", "unexpected-namespace", api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{Path: "/foo"}, + }, + }), + }, + "bad-name": { + isExpectedFailure: true, + volume: testVolume("123*Bad(Name", "unexpected-namespace", api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{Path: "/foo"}, + }, + }), + }, + "missing-name": { + isExpectedFailure: true, + volume: testVolume("", "", api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + }), + }, + "missing-capacity": { + isExpectedFailure: true, + volume: testVolume("foo", "", api.PersistentVolumeSpec{}), + }, + "missing-accessmodes": { + isExpectedFailure: true, + volume: testVolume("goodname", "missing-accessmodes", api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{Path: "/foo"}, + }, + }), + }, + "too-many-sources": { + isExpectedFailure: true, + volume: testVolume("", "", api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("5G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{Path: "/foo"}, + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{PDName: "foo", FSType: "ext4"}, + }, + }), + }, + } + + for name, scenario := range scenarios { + errs := ValidatePersistentVolume(scenario.volume) + if len(errs) == 0 && scenario.isExpectedFailure { + t.Errorf("Unexpected success for scenario: %s", name) + } + if len(errs) > 0 && !scenario.isExpectedFailure { + t.Errorf("Unexpected failure for scenario: %s - %+v", name, errs) + } + } + +} + +func testVolumeClaim(name string, namespace string, spec api.PersistentVolumeClaimSpec) *api.PersistentVolumeClaim { + return &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace}, + Spec: spec, + } +} + +func TestValidatePersistentVolumeClaim(t *testing.T) { + scenarios := map[string]struct { + isExpectedFailure bool + claim *api.PersistentVolumeClaim + }{ + "good-claim": { + isExpectedFailure: false, + claim: testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + }, + }), + }, + "invalid-accessmode": { + isExpectedFailure: true, + claim: testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{"fakemode"}, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + }, + }), + }, + "missing-namespace": { + isExpectedFailure: true, + claim: testVolumeClaim("foo", "", api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + }, + }), + }, + "no-access-modes": { + isExpectedFailure: true, + claim: testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{ + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + }, + }), + }, + "no-resource-requests": { + isExpectedFailure: true, + claim: testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + }), + }, + "invalid-resource-requests": { + isExpectedFailure: true, + claim: testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + }), + }, + } + + for name, scenario := range scenarios { + errs := ValidatePersistentVolumeClaim(scenario.claim) + if len(errs) == 0 && scenario.isExpectedFailure { + t.Errorf("Unexpected success for scenario: %s", name) + } + if len(errs) > 0 && !scenario.isExpectedFailure { + t.Errorf("Unexpected failure for scenario: %s - %+v", name, errs) + } + } +} + +func TestValidatePersistentVolumeClaimUpdate(t *testing.T) { + validClaim := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + }, + }) + validUpdateClaim := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + }, + VolumeName: "volume", + }) + invalidUpdateClaimResources := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("20G"), + }, + }, + VolumeName: "volume", + }) + invalidUpdateClaimAccessModes := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + }, + VolumeName: "volume", + }) + scenarios := map[string]struct { + isExpectedFailure bool + oldClaim *api.PersistentVolumeClaim + newClaim *api.PersistentVolumeClaim + }{ + "valid-update": { + isExpectedFailure: false, + oldClaim: validClaim, + newClaim: validUpdateClaim, + }, + "invalid-update-change-resources-on-bound-claim": { + isExpectedFailure: true, + oldClaim: validUpdateClaim, + newClaim: invalidUpdateClaimResources, + }, + "invalid-update-change-access-modes-on-bound-claim": { + isExpectedFailure: true, + oldClaim: validUpdateClaim, + newClaim: invalidUpdateClaimAccessModes, + }, + } + + for name, scenario := range scenarios { + // ensure we have a resource version specified for updates + scenario.oldClaim.ResourceVersion = "1" + scenario.newClaim.ResourceVersion = "1" + errs := ValidatePersistentVolumeClaimUpdate(scenario.newClaim, scenario.oldClaim) + if len(errs) == 0 && scenario.isExpectedFailure { + t.Errorf("Unexpected success for scenario: %s", name) + } + if len(errs) > 0 && !scenario.isExpectedFailure { + t.Errorf("Unexpected failure for scenario: %s - %+v", name, errs) + } + } +} + +func TestValidateVolumes(t *testing.T) { + lun := int32(1) + successCase := []api.Volume{ + {Name: "abc", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/mnt/path1"}}}, + {Name: "123", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/mnt/path2"}}}, + {Name: "abc-123", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/mnt/path3"}}}, + {Name: "empty", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, + {Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{PDName: "my-PD", FSType: "ext4", Partition: 1, ReadOnly: false}}}, + {Name: "awsebs", VolumeSource: api.VolumeSource{AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{VolumeID: "my-PD", FSType: "ext4", Partition: 1, ReadOnly: false}}}, + {Name: "gitrepo", VolumeSource: api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{Repository: "my-repo", Revision: "hashstring", Directory: "target"}}}, + {Name: "gitrepodot", VolumeSource: api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{Repository: "my-repo", Directory: "."}}}, + {Name: "iscsidisk", VolumeSource: api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{TargetPortal: "127.0.0.1", IQN: "iqn.2015-02.example.com:test", Lun: 1, FSType: "ext4", ReadOnly: false}}}, + {Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "my-secret"}}}, + {Name: "glusterfs", VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "host1", Path: "path", ReadOnly: false}}}, + {Name: "flocker", VolumeSource: api.VolumeSource{Flocker: &api.FlockerVolumeSource{DatasetName: "datasetName"}}}, + {Name: "rbd", VolumeSource: api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{"foo"}, RBDImage: "bar", FSType: "ext4"}}}, + {Name: "cinder", VolumeSource: api.VolumeSource{Cinder: &api.CinderVolumeSource{VolumeID: "29ea5088-4f60-4757-962e-dba678767887", FSType: "ext4", ReadOnly: false}}}, + {Name: "cephfs", VolumeSource: api.VolumeSource{CephFS: &api.CephFSVolumeSource{Monitors: []string{"foo"}}}}, + {Name: "downwardapi", VolumeSource: api.VolumeSource{DownwardAPI: &api.DownwardAPIVolumeSource{Items: []api.DownwardAPIVolumeFile{ + {Path: "labels", FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.labels"}}, + {Path: "annotations", FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.annotations"}}, + {Path: "namespace", FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace"}}, + {Path: "name", FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name"}}, + {Path: "path/withslash/andslash", FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.labels"}}, + {Path: "path/./withdot", FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.labels"}}, + {Path: "path/with..dot", FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.labels"}}, + {Path: "second-level-dirent-can-have/..dot", FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.labels"}}, + {Path: "cpu_limit", ResourceFieldRef: &api.ResourceFieldSelector{ + ContainerName: "test-container", + Resource: "limits.cpu"}}, + {Path: "cpu_request", ResourceFieldRef: &api.ResourceFieldSelector{ + ContainerName: "test-container", + Resource: "requests.cpu"}}, + {Path: "memory_limit", ResourceFieldRef: &api.ResourceFieldSelector{ + ContainerName: "test-container", + Resource: "limits.memory"}}, + {Path: "memory_request", ResourceFieldRef: &api.ResourceFieldSelector{ + ContainerName: "test-container", + Resource: "requests.memory"}}, + }}}}, + {Name: "fc", VolumeSource: api.VolumeSource{FC: &api.FCVolumeSource{TargetWWNs: []string{"some_wwn"}, Lun: &lun, FSType: "ext4", ReadOnly: false}}}, + {Name: "flexvolume", VolumeSource: api.VolumeSource{FlexVolume: &api.FlexVolumeSource{Driver: "kubernetes.io/blue", FSType: "ext4"}}}, + {Name: "azure", VolumeSource: api.VolumeSource{AzureFile: &api.AzureFileVolumeSource{SecretName: "key", ShareName: "share", ReadOnly: false}}}, + } + names, errs := validateVolumes(successCase, field.NewPath("field")) + if len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + if len(names) != len(successCase) || !names.HasAll("abc", "123", "abc-123", "empty", "gcepd", "gitrepo", "secret", "iscsidisk", "cinder", "cephfs", "flexvolume", "fc") { + t.Errorf("wrong names result: %v", names) + } + emptyVS := api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}} + emptyPortal := api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{TargetPortal: "", IQN: "iqn.2015-02.example.com:test", Lun: 1, FSType: "ext4", ReadOnly: false}} + emptyIQN := api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{TargetPortal: "127.0.0.1", IQN: "", Lun: 1, FSType: "ext4", ReadOnly: false}} + emptyHosts := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "", Path: "path", ReadOnly: false}} + emptyPath := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "host", Path: "", ReadOnly: false}} + emptyName := api.VolumeSource{Flocker: &api.FlockerVolumeSource{DatasetName: ""}} + emptyMon := api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{}, RBDImage: "bar", FSType: "ext4"}} + emptyImage := api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{"foo"}, RBDImage: "", FSType: "ext4"}} + emptyCephFSMon := api.VolumeSource{CephFS: &api.CephFSVolumeSource{Monitors: []string{}}} + startsWithDots := api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{Repository: "foo", Directory: "..dots/bar"}} + containsDots := api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{Repository: "foo", Directory: "dots/../bar"}} + absPath := api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{Repository: "foo", Directory: "/abstarget"}} + emptyPathName := api.VolumeSource{DownwardAPI: &api.DownwardAPIVolumeSource{Items: []api.DownwardAPIVolumeFile{{Path: "", + FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.labels"}}}, + }} + absolutePathName := api.VolumeSource{DownwardAPI: &api.DownwardAPIVolumeSource{Items: []api.DownwardAPIVolumeFile{{Path: "/absolutepath", + FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.labels"}}}, + }} + dotDotInPath := api.VolumeSource{DownwardAPI: &api.DownwardAPIVolumeSource{Items: []api.DownwardAPIVolumeFile{{Path: "../../passwd", + FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.labels"}}}, + }} + dotDotPathName := api.VolumeSource{DownwardAPI: &api.DownwardAPIVolumeSource{Items: []api.DownwardAPIVolumeFile{{Path: "..badFileName", + FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.labels"}}}, + }} + dotDotFirstLevelDirent := api.VolumeSource{DownwardAPI: &api.DownwardAPIVolumeSource{Items: []api.DownwardAPIVolumeFile{{Path: "..badDirName/goodFileName", + FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.labels"}}}, + }} + fieldRefandResourceFieldRef := api.VolumeSource{DownwardAPI: &api.DownwardAPIVolumeSource{Items: []api.DownwardAPIVolumeFile{{Path: "test", + FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.labels"}, + ResourceFieldRef: &api.ResourceFieldSelector{ + ContainerName: "test-container", + Resource: "requests.memory"}}}, + }} + zeroWWN := api.VolumeSource{FC: &api.FCVolumeSource{TargetWWNs: []string{}, Lun: &lun, FSType: "ext4", ReadOnly: false}} + emptyLun := api.VolumeSource{FC: &api.FCVolumeSource{TargetWWNs: []string{"wwn"}, Lun: nil, FSType: "ext4", ReadOnly: false}} + slashInName := api.VolumeSource{Flocker: &api.FlockerVolumeSource{DatasetName: "foo/bar"}} + emptyAzureSecret := api.VolumeSource{AzureFile: &api.AzureFileVolumeSource{SecretName: "", ShareName: "share", ReadOnly: false}} + emptyAzureShare := api.VolumeSource{AzureFile: &api.AzureFileVolumeSource{SecretName: "name", ShareName: "", ReadOnly: false}} + errorCases := map[string]struct { + V []api.Volume + T field.ErrorType + F string + D string + }{ + "zero-length name": { + []api.Volume{{Name: "", VolumeSource: emptyVS}}, + field.ErrorTypeRequired, + "name", "", + }, + "name > 63 characters": { + []api.Volume{{Name: strings.Repeat("a", 64), VolumeSource: emptyVS}}, + field.ErrorTypeInvalid, + "name", "must be no more than", + }, + "name not a DNS label": { + []api.Volume{{Name: "a.b.c", VolumeSource: emptyVS}}, + field.ErrorTypeInvalid, + "name", "must match the regex", + }, + "name not unique": { + []api.Volume{{Name: "abc", VolumeSource: emptyVS}, {Name: "abc", VolumeSource: emptyVS}}, + field.ErrorTypeDuplicate, + "[1].name", "", + }, + "empty portal": { + []api.Volume{{Name: "badportal", VolumeSource: emptyPortal}}, + field.ErrorTypeRequired, + "iscsi.targetPortal", "", + }, + "empty iqn": { + []api.Volume{{Name: "badiqn", VolumeSource: emptyIQN}}, + field.ErrorTypeRequired, + "iscsi.iqn", "", + }, + "empty hosts": { + []api.Volume{{Name: "badhost", VolumeSource: emptyHosts}}, + field.ErrorTypeRequired, + "glusterfs.endpoints", "", + }, + "empty path": { + []api.Volume{{Name: "badpath", VolumeSource: emptyPath}}, + field.ErrorTypeRequired, + "glusterfs.path", "", + }, + "empty datasetName": { + []api.Volume{{Name: "badname", VolumeSource: emptyName}}, + field.ErrorTypeRequired, + "flocker.datasetName", "", + }, + "empty mon": { + []api.Volume{{Name: "badmon", VolumeSource: emptyMon}}, + field.ErrorTypeRequired, + "rbd.monitors", "", + }, + "empty image": { + []api.Volume{{Name: "badimage", VolumeSource: emptyImage}}, + field.ErrorTypeRequired, + "rbd.image", "", + }, + "empty cephfs mon": { + []api.Volume{{Name: "badmon", VolumeSource: emptyCephFSMon}}, + field.ErrorTypeRequired, + "cephfs.monitors", "", + }, + "empty metatada path": { + []api.Volume{{Name: "emptyname", VolumeSource: emptyPathName}}, + field.ErrorTypeRequired, + "downwardAPI.path", "", + }, + "absolute path": { + []api.Volume{{Name: "absolutepath", VolumeSource: absolutePathName}}, + field.ErrorTypeInvalid, + "downwardAPI.path", "", + }, + "dot dot path": { + []api.Volume{{Name: "dotdotpath", VolumeSource: dotDotInPath}}, + field.ErrorTypeInvalid, + "downwardAPI.path", `must not contain '..'`, + }, + "dot dot file name": { + []api.Volume{{Name: "dotdotfilename", VolumeSource: dotDotPathName}}, + field.ErrorTypeInvalid, + "downwardAPI.path", `must not start with '..'`, + }, + "dot dot first level dirent": { + []api.Volume{{Name: "dotdotdirfilename", VolumeSource: dotDotFirstLevelDirent}}, + field.ErrorTypeInvalid, + "downwardAPI.path", `must not start with '..'`, + }, + "empty wwn": { + []api.Volume{{Name: "badimage", VolumeSource: zeroWWN}}, + field.ErrorTypeRequired, + "fc.targetWWNs", "", + }, + "empty lun": { + []api.Volume{{Name: "badimage", VolumeSource: emptyLun}}, + field.ErrorTypeRequired, + "fc.lun", "", + }, + "slash in datasetName": { + []api.Volume{{Name: "slashinname", VolumeSource: slashInName}}, + field.ErrorTypeInvalid, + "flocker.datasetName", "must not contain '/'", + }, + "starts with '..'": { + []api.Volume{{Name: "badprefix", VolumeSource: startsWithDots}}, + field.ErrorTypeInvalid, + "gitRepo.directory", `must not start with '..'`, + }, + "contains '..'": { + []api.Volume{{Name: "containsdots", VolumeSource: containsDots}}, + field.ErrorTypeInvalid, + "gitRepo.directory", `must not contain '..'`, + }, + "absolute target": { + []api.Volume{{Name: "absolutetarget", VolumeSource: absPath}}, + field.ErrorTypeInvalid, + "gitRepo.directory", "", + }, + "empty secret": { + []api.Volume{{Name: "emptyaccount", VolumeSource: emptyAzureSecret}}, + field.ErrorTypeRequired, + "azureFile.secretName", "", + }, + "empty share": { + []api.Volume{{Name: "emptyaccount", VolumeSource: emptyAzureShare}}, + field.ErrorTypeRequired, + "azureFile.shareName", "", + }, + "fieldRef and ResourceFieldRef together": { + []api.Volume{{Name: "testvolume", VolumeSource: fieldRefandResourceFieldRef}}, + field.ErrorTypeInvalid, + "downwardAPI", "fieldRef and resourceFieldRef can not be specified simultaneously", + }, + } + for k, v := range errorCases { + _, errs := validateVolumes(v.V, field.NewPath("field")) + if len(errs) == 0 { + t.Errorf("expected failure %s for %v", k, v.V) + continue + } + for i := range errs { + if errs[i].Type != v.T { + t.Errorf("%s: expected error to have type %q: %q", k, v.T, errs[i].Type) + } + if !strings.Contains(errs[i].Field, v.F) { + t.Errorf("%s: expected error field %q: %q", k, v.F, errs[i].Field) + } + if !strings.Contains(errs[i].Detail, v.D) { + t.Errorf("%s: expected error detail %q, got %q", k, v.D, errs[i].Detail) + } + } + } +} + +func TestValidatePorts(t *testing.T) { + successCase := []api.ContainerPort{ + {Name: "abc", ContainerPort: 80, HostPort: 80, Protocol: "TCP"}, + {Name: "easy", ContainerPort: 82, Protocol: "TCP"}, + {Name: "as", ContainerPort: 83, Protocol: "UDP"}, + {Name: "do-re-me", ContainerPort: 84, Protocol: "UDP"}, + {ContainerPort: 85, Protocol: "TCP"}, + } + if errs := validateContainerPorts(successCase, field.NewPath("field")); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + + nonCanonicalCase := []api.ContainerPort{ + {ContainerPort: 80, Protocol: "TCP"}, + } + if errs := validateContainerPorts(nonCanonicalCase, field.NewPath("field")); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + + errorCases := map[string]struct { + P []api.ContainerPort + T field.ErrorType + F string + D string + }{ + "name > 15 characters": { + []api.ContainerPort{{Name: strings.Repeat("a", 16), ContainerPort: 80, Protocol: "TCP"}}, + field.ErrorTypeInvalid, + "name", PortNameErrorMsg, + }, + "name not a IANA svc name ": { + []api.ContainerPort{{Name: "a.b.c", ContainerPort: 80, Protocol: "TCP"}}, + field.ErrorTypeInvalid, + "name", PortNameErrorMsg, + }, + "name not a IANA svc name (i.e. a number)": { + []api.ContainerPort{{Name: "80", ContainerPort: 80, Protocol: "TCP"}}, + field.ErrorTypeInvalid, + "name", PortNameErrorMsg, + }, + "name not unique": { + []api.ContainerPort{ + {Name: "abc", ContainerPort: 80, Protocol: "TCP"}, + {Name: "abc", ContainerPort: 81, Protocol: "TCP"}, + }, + field.ErrorTypeDuplicate, + "[1].name", "", + }, + "zero container port": { + []api.ContainerPort{{ContainerPort: 0, Protocol: "TCP"}}, + field.ErrorTypeInvalid, + "containerPort", PortRangeErrorMsg, + }, + "invalid container port": { + []api.ContainerPort{{ContainerPort: 65536, Protocol: "TCP"}}, + field.ErrorTypeInvalid, + "containerPort", PortRangeErrorMsg, + }, + "invalid host port": { + []api.ContainerPort{{ContainerPort: 80, HostPort: 65536, Protocol: "TCP"}}, + field.ErrorTypeInvalid, + "hostPort", PortRangeErrorMsg, + }, + "invalid protocol case": { + []api.ContainerPort{{ContainerPort: 80, Protocol: "tcp"}}, + field.ErrorTypeNotSupported, + "protocol", "supported values: TCP, UDP", + }, + "invalid protocol": { + []api.ContainerPort{{ContainerPort: 80, Protocol: "ICMP"}}, + field.ErrorTypeNotSupported, + "protocol", "supported values: TCP, UDP", + }, + "protocol required": { + []api.ContainerPort{{Name: "abc", ContainerPort: 80}}, + field.ErrorTypeRequired, + "protocol", "", + }, + } + for k, v := range errorCases { + errs := validateContainerPorts(v.P, field.NewPath("field")) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } + for i := range errs { + if errs[i].Type != v.T { + t.Errorf("%s: expected error to have type %q: %q", k, v.T, errs[i].Type) + } + if !strings.Contains(errs[i].Field, v.F) { + t.Errorf("%s: expected error field %q: %q", k, v.F, errs[i].Field) + } + if !strings.Contains(errs[i].Detail, v.D) { + t.Errorf("%s: expected error detail %q, got %q", k, v.D, errs[i].Detail) + } + } + } +} + +func TestValidateEnv(t *testing.T) { + successCase := []api.EnvVar{ + {Name: "abc", Value: "value"}, + {Name: "ABC", Value: "value"}, + {Name: "AbC_123", Value: "value"}, + {Name: "abc", Value: ""}, + { + Name: "abc", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "secret_value", + ValueFrom: &api.EnvVarSource{ + SecretKeyRef: &api.SecretKeySelector{ + LocalObjectReference: api.LocalObjectReference{ + Name: "some-secret", + }, + Key: "secret-key", + }, + }, + }, + { + Name: "ENV_VAR_1", + ValueFrom: &api.EnvVarSource{ + ConfigMapKeyRef: &api.ConfigMapKeySelector{ + LocalObjectReference: api.LocalObjectReference{ + Name: "some-config-map", + }, + Key: "some-key", + }, + }, + }, + } + if errs := validateEnv(successCase, field.NewPath("field")); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + + errorCases := []struct { + name string + envs []api.EnvVar + expectedError string + }{ + { + name: "zero-length name", + envs: []api.EnvVar{{Name: ""}}, + expectedError: "[0].name: Required value", + }, + { + name: "name not a C identifier", + envs: []api.EnvVar{{Name: "a.b.c"}}, + expectedError: `[0].name: Invalid value: "a.b.c": must be a C identifier (matching regex [A-Za-z_][A-Za-z0-9_]*): e.g. "my_name" or "MyName"`, + }, + { + name: "value and valueFrom specified", + envs: []api.EnvVar{{ + Name: "abc", + Value: "foo", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "metadata.name", + }, + }, + }}, + expectedError: "[0].valueFrom: Invalid value: \"\": may not be specified when `value` is not empty", + }, + { + name: "valueFrom.fieldRef and valueFrom.secretKeyRef specified", + envs: []api.EnvVar{{ + Name: "abc", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "metadata.name", + }, + SecretKeyRef: &api.SecretKeySelector{ + LocalObjectReference: api.LocalObjectReference{ + Name: "a-secret", + }, + Key: "a-key", + }, + }, + }}, + expectedError: "[0].valueFrom: Invalid value: \"\": may not have more than one field specified at a time", + }, + { + name: "valueFrom.fieldRef and valueFrom.configMapKeyRef set", + envs: []api.EnvVar{{ + Name: "some_var_name", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "metadata.name", + }, + ConfigMapKeyRef: &api.ConfigMapKeySelector{ + LocalObjectReference: api.LocalObjectReference{ + Name: "some-config-map", + }, + Key: "some-key", + }, + }, + }}, + expectedError: `[0].valueFrom: Invalid value: "": may not have more than one field specified at a time`, + }, + { + name: "valueFrom.fieldRef and valueFrom.secretKeyRef specified", + envs: []api.EnvVar{{ + Name: "abc", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "metadata.name", + }, + SecretKeyRef: &api.SecretKeySelector{ + LocalObjectReference: api.LocalObjectReference{ + Name: "a-secret", + }, + Key: "a-key", + }, + ConfigMapKeyRef: &api.ConfigMapKeySelector{ + LocalObjectReference: api.LocalObjectReference{ + Name: "some-config-map", + }, + Key: "some-key", + }, + }, + }}, + expectedError: `[0].valueFrom: Invalid value: "": may not have more than one field specified at a time`, + }, + { + name: "missing FieldPath on ObjectFieldSelector", + envs: []api.EnvVar{{ + Name: "abc", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + }, + }, + }}, + expectedError: `[0].valueFrom.fieldRef.fieldPath: Required value`, + }, + { + name: "missing APIVersion on ObjectFieldSelector", + envs: []api.EnvVar{{ + Name: "abc", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }}, + expectedError: `[0].valueFrom.fieldRef.apiVersion: Required value`, + }, + { + name: "invalid fieldPath", + envs: []api.EnvVar{{ + Name: "abc", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + FieldPath: "metadata.whoops", + APIVersion: testapi.Default.GroupVersion().String(), + }, + }, + }}, + expectedError: `[0].valueFrom.fieldRef.fieldPath: Invalid value: "metadata.whoops": error converting fieldPath`, + }, + { + name: "invalid fieldPath labels", + envs: []api.EnvVar{{ + Name: "labels", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + FieldPath: "metadata.labels", + APIVersion: "v1", + }, + }, + }}, + expectedError: `[0].valueFrom.fieldRef.fieldPath: Unsupported value: "metadata.labels": supported values: metadata.name, metadata.namespace, status.podIP`, + }, + { + name: "invalid fieldPath annotations", + envs: []api.EnvVar{{ + Name: "abc", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + FieldPath: "metadata.annotations", + APIVersion: "v1", + }, + }, + }}, + expectedError: `[0].valueFrom.fieldRef.fieldPath: Unsupported value: "metadata.annotations": supported values: metadata.name, metadata.namespace, status.podIP`, + }, + { + name: "unsupported fieldPath", + envs: []api.EnvVar{{ + Name: "abc", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + FieldPath: "status.phase", + APIVersion: testapi.Default.GroupVersion().String(), + }, + }, + }}, + expectedError: `valueFrom.fieldRef.fieldPath: Unsupported value: "status.phase": supported values: metadata.name, metadata.namespace, status.podIP`, + }, + } + for _, tc := range errorCases { + if errs := validateEnv(tc.envs, field.NewPath("field")); len(errs) == 0 { + t.Errorf("expected failure for %s", tc.name) + } else { + for i := range errs { + str := errs[i].Error() + if str != "" && !strings.Contains(str, tc.expectedError) { + t.Errorf("%s: expected error detail either empty or %q, got %q", tc.name, tc.expectedError, str) + } + } + } + } +} + +func TestValidateVolumeMounts(t *testing.T) { + volumes := sets.NewString("abc", "123", "abc-123") + + successCase := []api.VolumeMount{ + {Name: "abc", MountPath: "/foo"}, + {Name: "123", MountPath: "/bar"}, + {Name: "abc-123", MountPath: "/baz"}, + {Name: "abc-123", MountPath: "/baa", SubPath: ""}, + {Name: "abc-123", MountPath: "/bab", SubPath: "baz"}, + {Name: "abc-123", MountPath: "/bac", SubPath: ".baz"}, + {Name: "abc-123", MountPath: "/bad", SubPath: "..baz"}, + } + if errs := validateVolumeMounts(successCase, volumes, field.NewPath("field")); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + + errorCases := map[string][]api.VolumeMount{ + "empty name": {{Name: "", MountPath: "/foo"}}, + "name not found": {{Name: "", MountPath: "/foo"}}, + "empty mountpath": {{Name: "abc", MountPath: ""}}, + "colon mountpath": {{Name: "abc", MountPath: "foo:bar"}}, + "mountpath collision": {{Name: "foo", MountPath: "/path/a"}, {Name: "bar", MountPath: "/path/a"}}, + "absolute subpath": {{Name: "abc", MountPath: "/bar", SubPath: "/baz"}}, + "subpath in ..": {{Name: "abc", MountPath: "/bar", SubPath: "../baz"}}, + "subpath contains ..": {{Name: "abc", MountPath: "/bar", SubPath: "baz/../bat"}}, + "subpath ends in ..": {{Name: "abc", MountPath: "/bar", SubPath: "./.."}}, + } + for k, v := range errorCases { + if errs := validateVolumeMounts(v, volumes, field.NewPath("field")); len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } + } +} + +func TestValidateProbe(t *testing.T) { + handler := api.Handler{Exec: &api.ExecAction{Command: []string{"echo"}}} + // These fields must be positive. + positiveFields := [...]string{"InitialDelaySeconds", "TimeoutSeconds", "PeriodSeconds", "SuccessThreshold", "FailureThreshold"} + successCases := []*api.Probe{nil} + for _, field := range positiveFields { + probe := &api.Probe{Handler: handler} + reflect.ValueOf(probe).Elem().FieldByName(field).SetInt(10) + successCases = append(successCases, probe) + } + + for _, p := range successCases { + if errs := validateProbe(p, field.NewPath("field")); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := []*api.Probe{{TimeoutSeconds: 10, InitialDelaySeconds: 10}} + for _, field := range positiveFields { + probe := &api.Probe{Handler: handler} + reflect.ValueOf(probe).Elem().FieldByName(field).SetInt(-10) + errorCases = append(errorCases, probe) + } + for _, p := range errorCases { + if errs := validateProbe(p, field.NewPath("field")); len(errs) == 0 { + t.Errorf("expected failure for %v", p) + } + } +} + +func TestValidateHandler(t *testing.T) { + successCases := []api.Handler{ + {Exec: &api.ExecAction{Command: []string{"echo"}}}, + {HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromInt(1), Host: "", Scheme: "HTTP"}}, + {HTTPGet: &api.HTTPGetAction{Path: "/foo", Port: intstr.FromInt(65535), Host: "host", Scheme: "HTTP"}}, + {HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP"}}, + {HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []api.HTTPHeader{{"Host", "foo.example.com"}}}}, + {HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []api.HTTPHeader{{"X-Forwarded-For", "1.2.3.4"}, {"X-Forwarded-For", "5.6.7.8"}}}}, + } + for _, h := range successCases { + if errs := validateHandler(&h, field.NewPath("field")); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := []api.Handler{ + {}, + {Exec: &api.ExecAction{Command: []string{}}}, + {HTTPGet: &api.HTTPGetAction{Path: "", Port: intstr.FromInt(0), Host: ""}}, + {HTTPGet: &api.HTTPGetAction{Path: "/foo", Port: intstr.FromInt(65536), Host: "host"}}, + {HTTPGet: &api.HTTPGetAction{Path: "", Port: intstr.FromString(""), Host: ""}}, + {HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []api.HTTPHeader{{"Host:", "foo.example.com"}}}}, + {HTTPGet: &api.HTTPGetAction{Path: "/", Port: intstr.FromString("port"), Host: "", Scheme: "HTTP", HTTPHeaders: []api.HTTPHeader{{"X_Forwarded_For", "foo.example.com"}}}}, + } + for _, h := range errorCases { + if errs := validateHandler(&h, field.NewPath("field")); len(errs) == 0 { + t.Errorf("expected failure for %#v", h) + } + } +} + +func TestValidatePullPolicy(t *testing.T) { + type T struct { + Container api.Container + ExpectedPolicy api.PullPolicy + } + testCases := map[string]T{ + "NotPresent1": { + api.Container{Name: "abc", Image: "image:latest", ImagePullPolicy: "IfNotPresent"}, + api.PullIfNotPresent, + }, + "NotPresent2": { + api.Container{Name: "abc1", Image: "image", ImagePullPolicy: "IfNotPresent"}, + api.PullIfNotPresent, + }, + "Always1": { + api.Container{Name: "123", Image: "image:latest", ImagePullPolicy: "Always"}, + api.PullAlways, + }, + "Always2": { + api.Container{Name: "1234", Image: "image", ImagePullPolicy: "Always"}, + api.PullAlways, + }, + "Never1": { + api.Container{Name: "abc-123", Image: "image:latest", ImagePullPolicy: "Never"}, + api.PullNever, + }, + "Never2": { + api.Container{Name: "abc-1234", Image: "image", ImagePullPolicy: "Never"}, + api.PullNever, + }, + } + for k, v := range testCases { + ctr := &v.Container + errs := validatePullPolicy(ctr.ImagePullPolicy, field.NewPath("field")) + if len(errs) != 0 { + t.Errorf("case[%s] expected success, got %#v", k, errs) + } + if ctr.ImagePullPolicy != v.ExpectedPolicy { + t.Errorf("case[%s] expected policy %v, got %v", k, v.ExpectedPolicy, ctr.ImagePullPolicy) + } + } +} + +func getResourceLimits(cpu, memory string) api.ResourceList { + res := api.ResourceList{} + res[api.ResourceCPU] = resource.MustParse(cpu) + res[api.ResourceMemory] = resource.MustParse(memory) + return res +} + +func TestValidateContainers(t *testing.T) { + volumes := sets.String{} + capabilities.SetForTests(capabilities.Capabilities{ + AllowPrivileged: true, + }) + + successCase := []api.Container{ + {Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}, + {Name: "123", Image: "image", ImagePullPolicy: "IfNotPresent"}, + {Name: "abc-123", Image: "image", ImagePullPolicy: "IfNotPresent"}, + { + Name: "life-123", + Image: "image", + Lifecycle: &api.Lifecycle{ + PreStop: &api.Handler{ + Exec: &api.ExecAction{Command: []string{"ls", "-l"}}, + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + { + Name: "resources-test", + Image: "image", + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + api.ResourceName("my.org/resource"): resource.MustParse("10m"), + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + { + Name: "resources-test-with-gpu", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + Limits: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + api.ResourceName(api.ResourceNvidiaGPU): resource.MustParse("1"), + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + { + Name: "resources-request-limit-simple", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("8"), + }, + Limits: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + { + Name: "resources-request-limit-edge", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + api.ResourceName("my.org/resource"): resource.MustParse("10m"), + }, + Limits: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + api.ResourceName("my.org/resource"): resource.MustParse("10m"), + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + { + Name: "resources-request-limit-partials", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("9.5"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + Limits: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName("my.org/resource"): resource.MustParse("10m"), + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + { + Name: "resources-request", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("9.5"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + { + Name: "same-host-port-different-protocol", + Image: "image", + Ports: []api.ContainerPort{ + {ContainerPort: 80, HostPort: 80, Protocol: "TCP"}, + {ContainerPort: 80, HostPort: 80, Protocol: "UDP"}, + }, + ImagePullPolicy: "IfNotPresent", + }, + {Name: "abc-1234", Image: "image", ImagePullPolicy: "IfNotPresent", SecurityContext: fakeValidSecurityContext(true)}, + } + if errs := validateContainers(successCase, volumes, field.NewPath("field")); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + + capabilities.SetForTests(capabilities.Capabilities{ + AllowPrivileged: false, + }) + errorCases := map[string][]api.Container{ + "zero-length name": {{Name: "", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + "name > 63 characters": {{Name: strings.Repeat("a", 64), Image: "image", ImagePullPolicy: "IfNotPresent"}}, + "name not a DNS label": {{Name: "a.b.c", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + "name not unique": { + {Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}, + {Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}, + }, + "zero-length image": {{Name: "abc", Image: "", ImagePullPolicy: "IfNotPresent"}}, + "host port not unique": { + {Name: "abc", Image: "image", Ports: []api.ContainerPort{{ContainerPort: 80, HostPort: 80, Protocol: "TCP"}}, + ImagePullPolicy: "IfNotPresent"}, + {Name: "def", Image: "image", Ports: []api.ContainerPort{{ContainerPort: 81, HostPort: 80, Protocol: "TCP"}}, + ImagePullPolicy: "IfNotPresent"}, + }, + "invalid env var name": { + {Name: "abc", Image: "image", Env: []api.EnvVar{{Name: "ev.1"}}, ImagePullPolicy: "IfNotPresent"}, + }, + "unknown volume name": { + {Name: "abc", Image: "image", VolumeMounts: []api.VolumeMount{{Name: "anything", MountPath: "/foo"}}, + ImagePullPolicy: "IfNotPresent"}, + }, + "invalid lifecycle, no exec command.": { + { + Name: "life-123", + Image: "image", + Lifecycle: &api.Lifecycle{ + PreStop: &api.Handler{ + Exec: &api.ExecAction{}, + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + "invalid lifecycle, no http path.": { + { + Name: "life-123", + Image: "image", + Lifecycle: &api.Lifecycle{ + PreStop: &api.Handler{ + HTTPGet: &api.HTTPGetAction{}, + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + "invalid lifecycle, no tcp socket port.": { + { + Name: "life-123", + Image: "image", + Lifecycle: &api.Lifecycle{ + PreStop: &api.Handler{ + TCPSocket: &api.TCPSocketAction{}, + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + "invalid lifecycle, zero tcp socket port.": { + { + Name: "life-123", + Image: "image", + Lifecycle: &api.Lifecycle{ + PreStop: &api.Handler{ + TCPSocket: &api.TCPSocketAction{ + Port: intstr.FromInt(0), + }, + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + "invalid lifecycle, no action.": { + { + Name: "life-123", + Image: "image", + Lifecycle: &api.Lifecycle{ + PreStop: &api.Handler{}, + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + "invalid liveness probe, no tcp socket port.": { + { + Name: "life-123", + Image: "image", + LivenessProbe: &api.Probe{ + Handler: api.Handler{ + TCPSocket: &api.TCPSocketAction{}, + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + "invalid liveness probe, no action.": { + { + Name: "life-123", + Image: "image", + LivenessProbe: &api.Probe{ + Handler: api.Handler{}, + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + "privilege disabled": { + {Name: "abc", Image: "image", SecurityContext: fakeValidSecurityContext(true)}, + }, + "invalid compute resource": { + { + Name: "abc-123", + Image: "image", + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{ + "disk": resource.MustParse("10G"), + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + "Resource CPU invalid": { + { + Name: "abc-123", + Image: "image", + Resources: api.ResourceRequirements{ + Limits: getResourceLimits("-10", "0"), + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + "Resource Requests CPU invalid": { + { + Name: "abc-123", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: getResourceLimits("-10", "0"), + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + "Resource Memory invalid": { + { + Name: "abc-123", + Image: "image", + Resources: api.ResourceRequirements{ + Limits: getResourceLimits("0", "-10"), + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + "Resource can only have GPU limit": { + { + Name: "resources-request-limit-edge", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + api.ResourceName(api.ResourceNvidiaGPU): resource.MustParse("1"), + }, + Limits: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + api.ResourceName(api.ResourceNvidiaGPU): resource.MustParse("1"), + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + "Request limit simple invalid": { + { + Name: "abc-123", + Image: "image", + Resources: api.ResourceRequirements{ + Limits: getResourceLimits("5", "3"), + Requests: getResourceLimits("6", "3"), + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + "Request limit multiple invalid": { + { + Name: "abc-123", + Image: "image", + Resources: api.ResourceRequirements{ + Limits: getResourceLimits("5", "3"), + Requests: getResourceLimits("6", "4"), + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + } + for k, v := range errorCases { + if errs := validateContainers(v, volumes, field.NewPath("field")); len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } + } +} + +func TestValidateRestartPolicy(t *testing.T) { + successCases := []api.RestartPolicy{ + api.RestartPolicyAlways, + api.RestartPolicyOnFailure, + api.RestartPolicyNever, + } + for _, policy := range successCases { + if errs := validateRestartPolicy(&policy, field.NewPath("field")); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := []api.RestartPolicy{"", "newpolicy"} + + for k, policy := range errorCases { + if errs := validateRestartPolicy(&policy, field.NewPath("field")); len(errs) == 0 { + t.Errorf("expected failure for %d", k) + } + } +} + +func TestValidateDNSPolicy(t *testing.T) { + successCases := []api.DNSPolicy{api.DNSClusterFirst, api.DNSDefault, api.DNSPolicy(api.DNSClusterFirst)} + for _, policy := range successCases { + if errs := validateDNSPolicy(&policy, field.NewPath("field")); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := []api.DNSPolicy{api.DNSPolicy("invalid")} + for _, policy := range errorCases { + if errs := validateDNSPolicy(&policy, field.NewPath("field")); len(errs) == 0 { + t.Errorf("expected failure for %v", policy) + } + } +} + +func TestValidatePodSpec(t *testing.T) { + activeDeadlineSeconds := int64(30) + minID := int64(0) + maxID := int64(2147483647) + successCases := []api.PodSpec{ + { // Populate basic fields, leave defaults for most. + Volumes: []api.Volume{{Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + { // Populate all fields. + Volumes: []api.Volume{ + {Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, + }, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + NodeSelector: map[string]string{ + "key": "value", + }, + NodeName: "foobar", + DNSPolicy: api.DNSClusterFirst, + ActiveDeadlineSeconds: &activeDeadlineSeconds, + ServiceAccountName: "acct", + }, + { // Populate HostNetwork. + Containers: []api.Container{ + {Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", Ports: []api.ContainerPort{ + {HostPort: 8080, ContainerPort: 8080, Protocol: "TCP"}}, + }, + }, + SecurityContext: &api.PodSecurityContext{ + HostNetwork: true, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + { // Populate RunAsUser SupplementalGroups FSGroup with minID 0 + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + SecurityContext: &api.PodSecurityContext{ + SupplementalGroups: []int64{minID}, + RunAsUser: &minID, + FSGroup: &minID, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + { // Populate RunAsUser SupplementalGroups FSGroup with maxID 2147483647 + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + SecurityContext: &api.PodSecurityContext{ + SupplementalGroups: []int64{maxID}, + RunAsUser: &maxID, + FSGroup: &maxID, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + { // Populate HostIPC. + SecurityContext: &api.PodSecurityContext{ + HostIPC: true, + }, + Volumes: []api.Volume{{Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + { // Populate HostPID. + SecurityContext: &api.PodSecurityContext{ + HostPID: true, + }, + Volumes: []api.Volume{{Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + { // Populate Affinity. + Volumes: []api.Volume{{Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + } + for i := range successCases { + if errs := ValidatePodSpec(&successCases[i], field.NewPath("field")); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + activeDeadlineSeconds = int64(0) + minID = int64(-1) + maxID = int64(2147483648) + failureCases := map[string]api.PodSpec{ + "bad volume": { + Volumes: []api.Volume{{}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + "no containers": { + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + "bad container": { + Containers: []api.Container{{}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + "bad DNS policy": { + DNSPolicy: api.DNSPolicy("invalid"), + RestartPolicy: api.RestartPolicyAlways, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + "bad service account name": { + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + ServiceAccountName: "invalidName", + }, + "bad restart policy": { + RestartPolicy: "UnknowPolicy", + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + "with hostNetwork hostPort not equal to containerPort": { + Containers: []api.Container{ + {Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", Ports: []api.ContainerPort{ + {HostPort: 8080, ContainerPort: 2600, Protocol: "TCP"}}, + }, + }, + SecurityContext: &api.PodSecurityContext{ + HostNetwork: true, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + "bad supplementalGroups large than math.MaxInt32": { + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + SecurityContext: &api.PodSecurityContext{ + HostNetwork: false, + SupplementalGroups: []int64{maxID, 1234}, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + "bad supplementalGroups less than 0": { + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + SecurityContext: &api.PodSecurityContext{ + HostNetwork: false, + SupplementalGroups: []int64{minID, 1234}, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + "bad runAsUser large than math.MaxInt32": { + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + SecurityContext: &api.PodSecurityContext{ + HostNetwork: false, + RunAsUser: &maxID, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + "bad runAsUser less than 0": { + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + SecurityContext: &api.PodSecurityContext{ + HostNetwork: false, + RunAsUser: &minID, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + "bad fsGroup large than math.MaxInt32": { + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + SecurityContext: &api.PodSecurityContext{ + HostNetwork: false, + FSGroup: &maxID, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + "bad fsGroup less than 0": { + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + SecurityContext: &api.PodSecurityContext{ + HostNetwork: false, + FSGroup: &minID, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + "bad-active-deadline-seconds": { + Volumes: []api.Volume{ + {Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, + }, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + NodeSelector: map[string]string{ + "key": "value", + }, + NodeName: "foobar", + DNSPolicy: api.DNSClusterFirst, + ActiveDeadlineSeconds: &activeDeadlineSeconds, + }, + "bad nodeName": { + NodeName: "node name", + Volumes: []api.Volume{{Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + } + for k, v := range failureCases { + if errs := ValidatePodSpec(&v, field.NewPath("field")); len(errs) == 0 { + t.Errorf("expected failure for %q", k) + } + } +} + +func TestValidatePod(t *testing.T) { + successCases := []api.Pod{ + { // Basic fields. + ObjectMeta: api.ObjectMeta{Name: "123", Namespace: "ns"}, + Spec: api.PodSpec{ + Volumes: []api.Volume{{Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + { // Just about everything. + ObjectMeta: api.ObjectMeta{Name: "abc.123.do-re-mi", Namespace: "ns"}, + Spec: api.PodSpec{ + Volumes: []api.Volume{ + {Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, + }, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + NodeSelector: map[string]string{ + "key": "value", + }, + NodeName: "foobar", + }, + }, + { // Serialized affinity requirements in annotations. + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + // TODO: Uncomment and move this block into Annotations map once + // RequiredDuringSchedulingRequiredDuringExecution is implemented + // "requiredDuringSchedulingRequiredDuringExecution": { + // "nodeSelectorTerms": [{ + // "matchExpressions": [{ + // "key": "key1", + // "operator": "Exists" + // }] + // }] + // }, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "key2", + "operator": "In", + "values": ["value1", "value2"] + }] + }] + }, + "preferredDuringSchedulingIgnoredDuringExecution": [ + { + "weight": 10, + "preference": {"matchExpressions": [ + { + "key": "foo", + "operator": "In", "values": ["bar"] + } + ]} + } + ] + }}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + { // Serialized pod affinity in affinity requirements in annotations. + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + // TODO: Uncomment and move this block into Annotations map once + // RequiredDuringSchedulingRequiredDuringExecution is implemented + // "requiredDuringSchedulingRequiredDuringExecution": [{ + // "labelSelector": { + // "matchExpressions": [{ + // "key": "key2", + // "operator": "In", + // "values": ["value1", "value2"] + // }] + // }, + // "namespaces":["ns"], + // "topologyKey": "zone" + // }] + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "In", + "values": ["value1", "value2"] + }] + }, + "topologyKey": "zone", + "namespaces": ["ns"] + }], + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "NotIn", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "region" + } + }] + }}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + { // Serialized pod anti affinity with different Label Operators in affinity requirements in annotations. + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + // TODO: Uncomment and move this block into Annotations map once + // RequiredDuringSchedulingRequiredDuringExecution is implemented + // "requiredDuringSchedulingRequiredDuringExecution": [{ + // "labelSelector": { + // "matchExpressions": [{ + // "key": "key2", + // "operator": "In", + // "values": ["value1", "value2"] + // }] + // }, + // "namespaces":["ns"], + // "topologyKey": "zone" + // }] + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "Exists" + }] + }, + "topologyKey": "zone", + "namespaces": ["ns"] + }], + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "DoesNotExist" + }] + }, + "namespaces": ["ns"], + "topologyKey": "region" + } + }] + }}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + { // populate tolerations equal operator in annotations. + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.TolerationsAnnotationKey: ` + [{ + "key": "foo", + "operator": "Equal", + "value": "bar", + "effect": "NoSchedule" + }]`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + { // populate tolerations exists operator in annotations. + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.TolerationsAnnotationKey: ` + [{ + "key": "foo", + "operator": "Exists", + "effect": "NoSchedule" + }]`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + { // empty operator is ok for toleration + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.TolerationsAnnotationKey: ` + [{ + "key": "foo", + "value": "bar", + "effect": "NoSchedule" + }]`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + { // empty efffect is ok for toleration + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.TolerationsAnnotationKey: ` + [{ + "key": "foo", + "operator": "Equal", + "value": "bar" + }]`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + } + for _, pod := range successCases { + if errs := ValidatePod(&pod); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := map[string]api.Pod{ + "bad name": { + ObjectMeta: api.ObjectMeta{Name: "", Namespace: "ns"}, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + "bad namespace": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: ""}, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + "bad spec": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "ns"}, + Spec: api.PodSpec{ + Containers: []api.Container{{}}, + }, + }, + "bad label": { + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "ns", + Labels: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + "invalid json of node affinity in pod annotations": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + `, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid node selector requirement in node affinity in pod annotations, operator can't be null": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"nodeAffinity": {"requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "key1", + }] + }] + }}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid preferredSchedulingTerm in node affinity in pod annotations, weight should be in range 1-100": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [ + { + "weight": 199, + "preference": {"matchExpressions": [ + { + "key": "foo", + "operator": "In", + "values": ["bar"] + } + ]} + } + ]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid requiredDuringSchedulingIgnoredDuringExecution node selector, nodeSelectorTerms must have at least one term": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [] + }, + }}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid requiredDuringSchedulingIgnoredDuringExecution node selector term, matchExpressions must have at least one node selector requirement": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [] + }] + }, + }}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid weight in preferredDuringSchedulingIgnoredDuringExecution in pod affinity annotations, weight should be in range 1-100": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 109, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "NotIn", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "region" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid labelSelector in preferredDuringSchedulingIgnoredDuringExecution in podaffinity annotations, values should be empty if the operator is Exists": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "Exists", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "region" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid name space in preferredDuringSchedulingIgnoredDuringExecution in podaffinity annotations, name space shouldbe valid": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "Exists", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["INVALID_NAMESPACE"], + "topologyKey": "region" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid labelOperator in preferredDuringSchedulingIgnoredDuringExecution in podantiaffinity annotations, labelOperator should be proper": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAntiAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "WrongOp", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "region" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid pod affinity, empty topologyKey is not allowed for hard pod affinity": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": {"requiredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "In", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid pod anti-affinity, empty topologyKey is not allowed for hard pod anti-affinity": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAntiAffinity": {"requiredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "In", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid pod anti-affinity, empty topologyKey is not allowed for soft pod affinity": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "In", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid toleration key": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.TolerationsAnnotationKey: ` + [{ + "key": "nospecialchars^=@", + "operator": "Equal", + "value": "bar", + "effect": "NoSchedule" + }]`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid toleration operator": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.TolerationsAnnotationKey: ` + [{ + "key": "foo", + "operator": "In", + "value": "bar", + "effect": "NoSchedule" + }]`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "value must be empty when `operator` is 'Exists'": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.TolerationsAnnotationKey: ` + [{ + "key": "foo", + "operator": "Exists", + "value": "bar", + "effect": "NoSchedule" + }]`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + } + for k, v := range errorCases { + if errs := ValidatePod(&v); len(errs) == 0 { + t.Errorf("expected failure for %q", k) + } + } +} + +func TestValidatePodUpdate(t *testing.T) { + var ( + activeDeadlineSecondsZero = int64(0) + activeDeadlineSecondsNegative = int64(-30) + activeDeadlineSecondsPositive = int64(30) + activeDeadlineSecondsLarger = int64(31) + + now = unversioned.Now() + grace = int64(30) + grace2 = int64(31) + ) + + tests := []struct { + a api.Pod + b api.Pod + isValid bool + test string + }{ + {api.Pod{}, api.Pod{}, true, "nothing"}, + { + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + }, + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "bar"}, + }, + false, + "ids", + }, + { + api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "bar": "foo", + }, + }, + }, + true, + "labels", + }, + { + api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Annotations: map[string]string{ + "foo": "bar", + }, + }, + }, + api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Annotations: map[string]string{ + "bar": "foo", + }, + }, + }, + true, + "annotations", + }, + { + api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo:V1", + }, + }, + }, + }, + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo:V2", + }, + { + Image: "bar:V2", + }, + }, + }, + }, + false, + "more containers", + }, + { + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.PodSpec{Containers: []api.Container{{Image: "foo:V1"}}}, + }, + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", DeletionTimestamp: &now}, + Spec: api.PodSpec{Containers: []api.Container{{Image: "foo:V1"}}}, + }, + true, + "deletion timestamp filled out", + }, + { + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", DeletionTimestamp: &now, DeletionGracePeriodSeconds: &grace}, + Spec: api.PodSpec{Containers: []api.Container{{Image: "foo:V1"}}}, + }, + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", DeletionTimestamp: &now, DeletionGracePeriodSeconds: &grace2}, + Spec: api.PodSpec{Containers: []api.Container{{Image: "foo:V1"}}}, + }, + false, + "deletion grace period seconds cleared", + }, + { + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo:V1", + }, + }, + }, + }, + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo:V2", + }, + }, + }, + }, + true, + "image change", + }, + { + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.PodSpec{ + Containers: []api.Container{ + {}, + }, + }, + }, + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo:V2", + }, + }, + }, + }, + false, + "image change to empty", + }, + { + api.Pod{ + Spec: api.PodSpec{}, + }, + api.Pod{ + Spec: api.PodSpec{}, + }, + true, + "activeDeadlineSeconds no change, nil", + }, + { + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsPositive, + }, + }, + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsPositive, + }, + }, + true, + "activeDeadlineSeconds no change, set", + }, + { + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsPositive, + }, + }, + api.Pod{}, + true, + "activeDeadlineSeconds change to positive from nil", + }, + { + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsPositive, + }, + }, + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsLarger, + }, + }, + true, + "activeDeadlineSeconds change to smaller positive", + }, + { + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsLarger, + }, + }, + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsPositive, + }, + }, + false, + "activeDeadlineSeconds change to larger positive", + }, + + { + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsNegative, + }, + }, + api.Pod{}, + false, + "activeDeadlineSeconds change to negative from nil", + }, + { + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsNegative, + }, + }, + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsPositive, + }, + }, + false, + "activeDeadlineSeconds change to negative from positive", + }, + { + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsZero, + }, + }, + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsPositive, + }, + }, + true, + "activeDeadlineSeconds change to zero from positive", + }, + { + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsZero, + }, + }, + api.Pod{}, + true, + "activeDeadlineSeconds change to zero from nil", + }, + { + api.Pod{}, + api.Pod{ + Spec: api.PodSpec{ + ActiveDeadlineSeconds: &activeDeadlineSecondsPositive, + }, + }, + false, + "activeDeadlineSeconds change to nil from positive", + }, + + { + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo:V1", + Resources: api.ResourceRequirements{ + Limits: getResourceLimits("100m", "0"), + }, + }, + }, + }, + }, + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo:V2", + Resources: api.ResourceRequirements{ + Limits: getResourceLimits("1000m", "0"), + }, + }, + }, + }, + }, + false, + "cpu change", + }, + { + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo:V1", + Ports: []api.ContainerPort{ + {HostPort: 8080, ContainerPort: 80}, + }, + }, + }, + }, + }, + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo:V2", + Ports: []api.ContainerPort{ + {HostPort: 8000, ContainerPort: 80}, + }, + }, + }, + }, + }, + false, + "port change", + }, + { + api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "Bar": "foo", + }, + }, + }, + true, + "bad label change", + }, + } + + for _, test := range tests { + test.a.ObjectMeta.ResourceVersion = "1" + test.b.ObjectMeta.ResourceVersion = "1" + errs := ValidatePodUpdate(&test.a, &test.b) + if test.isValid { + if len(errs) != 0 { + t.Errorf("unexpected invalid: %s (%+v)\nA: %+v\nB: %+v", test.test, errs, test.a, test.b) + } + } else { + if len(errs) == 0 { + t.Errorf("unexpected valid: %s\nA: %+v\nB: %+v", test.test, test.a, test.b) + } + } + } +} + +func makeValidService() api.Service { + return api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "valid", + Namespace: "valid", + Labels: map[string]string{}, + Annotations: map[string]string{}, + ResourceVersion: "1", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{"key": "val"}, + SessionAffinity: "None", + Type: api.ServiceTypeClusterIP, + Ports: []api.ServicePort{{Name: "p", Protocol: "TCP", Port: 8675, TargetPort: intstr.FromInt(8675)}}, + }, + } +} + +func TestValidateService(t *testing.T) { + testCases := []struct { + name string + tweakSvc func(svc *api.Service) // given a basic valid service, each test case can customize it + numErrs int + }{ + { + name: "missing namespace", + tweakSvc: func(s *api.Service) { + s.Namespace = "" + }, + numErrs: 1, + }, + { + name: "invalid namespace", + tweakSvc: func(s *api.Service) { + s.Namespace = "-123" + }, + numErrs: 1, + }, + { + name: "missing name", + tweakSvc: func(s *api.Service) { + s.Name = "" + }, + numErrs: 1, + }, + { + name: "invalid name", + tweakSvc: func(s *api.Service) { + s.Name = "-123" + }, + numErrs: 1, + }, + { + name: "too long name", + tweakSvc: func(s *api.Service) { + s.Name = strings.Repeat("a", 25) + }, + numErrs: 1, + }, + { + name: "invalid generateName", + tweakSvc: func(s *api.Service) { + s.GenerateName = "-123" + }, + numErrs: 1, + }, + { + name: "too long generateName", + tweakSvc: func(s *api.Service) { + s.GenerateName = strings.Repeat("a", 25) + }, + numErrs: 1, + }, + { + name: "invalid label", + tweakSvc: func(s *api.Service) { + s.Labels["NoUppercaseOrSpecialCharsLike=Equals"] = "bar" + }, + numErrs: 1, + }, + { + name: "invalid annotation", + tweakSvc: func(s *api.Service) { + s.Annotations["NoSpecialCharsLike=Equals"] = "bar" + }, + numErrs: 1, + }, + { + name: "nil selector", + tweakSvc: func(s *api.Service) { + s.Spec.Selector = nil + }, + numErrs: 0, + }, + { + name: "invalid selector", + tweakSvc: func(s *api.Service) { + s.Spec.Selector["NoSpecialCharsLike=Equals"] = "bar" + }, + numErrs: 1, + }, + { + name: "missing session affinity", + tweakSvc: func(s *api.Service) { + s.Spec.SessionAffinity = "" + }, + numErrs: 1, + }, + { + name: "missing type", + tweakSvc: func(s *api.Service) { + s.Spec.Type = "" + }, + numErrs: 1, + }, + { + name: "missing ports", + tweakSvc: func(s *api.Service) { + s.Spec.Ports = nil + }, + numErrs: 1, + }, + { + name: "missing ports but headless", + tweakSvc: func(s *api.Service) { + s.Spec.Ports = nil + s.Spec.ClusterIP = api.ClusterIPNone + }, + numErrs: 0, + }, + { + name: "empty port[0] name", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].Name = "" + }, + numErrs: 0, + }, + { + name: "empty port[1] name", + tweakSvc: func(s *api.Service) { + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "", Protocol: "TCP", Port: 12345, TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 1, + }, + { + name: "empty multi-port port[0] name", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].Name = "" + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "p", Protocol: "TCP", Port: 12345, TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 1, + }, + { + name: "invalid port name", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].Name = "INVALID" + }, + numErrs: 1, + }, + { + name: "missing protocol", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].Protocol = "" + }, + numErrs: 1, + }, + { + name: "invalid protocol", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].Protocol = "INVALID" + }, + numErrs: 1, + }, + { + name: "invalid cluster ip", + tweakSvc: func(s *api.Service) { + s.Spec.ClusterIP = "invalid" + }, + numErrs: 1, + }, + { + name: "missing port", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].Port = 0 + }, + numErrs: 1, + }, + { + name: "invalid port", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].Port = 65536 + }, + numErrs: 1, + }, + { + name: "invalid TargetPort int", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].TargetPort = intstr.FromInt(65536) + }, + numErrs: 1, + }, + { + name: "valid port headless", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].Port = 11722 + s.Spec.Ports[0].TargetPort = intstr.FromInt(11722) + s.Spec.ClusterIP = api.ClusterIPNone + }, + numErrs: 0, + }, + { + name: "invalid port headless 1", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].Port = 11722 + s.Spec.Ports[0].TargetPort = intstr.FromInt(11721) + s.Spec.ClusterIP = api.ClusterIPNone + }, + // in the v1 API, targetPorts on headless services were tolerated. + // once we have version-specific validation, we can reject this on newer API versions, but until then, we have to tolerate it for compatibility. + // numErrs: 1, + numErrs: 0, + }, + { + name: "invalid port headless 2", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].Port = 11722 + s.Spec.Ports[0].TargetPort = intstr.FromString("target") + s.Spec.ClusterIP = api.ClusterIPNone + }, + // in the v1 API, targetPorts on headless services were tolerated. + // once we have version-specific validation, we can reject this on newer API versions, but until then, we have to tolerate it for compatibility. + // numErrs: 1, + numErrs: 0, + }, + { + name: "invalid publicIPs localhost", + tweakSvc: func(s *api.Service) { + s.Spec.ExternalIPs = []string{"127.0.0.1"} + }, + numErrs: 1, + }, + { + name: "invalid publicIPs", + tweakSvc: func(s *api.Service) { + s.Spec.ExternalIPs = []string{"0.0.0.0"} + }, + numErrs: 1, + }, + { + name: "invalid publicIPs host", + tweakSvc: func(s *api.Service) { + s.Spec.ExternalIPs = []string{"myhost.mydomain"} + }, + numErrs: 1, + }, + { + name: "dup port name", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].Name = "p" + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "p", Port: 12345, Protocol: "TCP", TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 1, + }, + { + name: "valid load balancer protocol UDP 1", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Spec.Ports[0].Protocol = "UDP" + }, + numErrs: 0, + }, + { + name: "valid load balancer protocol UDP 2", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Spec.Ports[0] = api.ServicePort{Name: "q", Port: 12345, Protocol: "UDP", TargetPort: intstr.FromInt(12345)} + }, + numErrs: 0, + }, + { + name: "invalid load balancer with mix protocol", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "UDP", TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 1, + }, + { + name: "valid 1", + tweakSvc: func(s *api.Service) { + // do nothing + }, + numErrs: 0, + }, + { + name: "valid 2", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].Protocol = "UDP" + s.Spec.Ports[0].TargetPort = intstr.FromInt(12345) + }, + numErrs: 0, + }, + { + name: "valid 3", + tweakSvc: func(s *api.Service) { + s.Spec.Ports[0].TargetPort = intstr.FromString("http") + }, + numErrs: 0, + }, + { + name: "valid cluster ip - none ", + tweakSvc: func(s *api.Service) { + s.Spec.ClusterIP = "None" + }, + numErrs: 0, + }, + { + name: "valid cluster ip - empty", + tweakSvc: func(s *api.Service) { + s.Spec.ClusterIP = "" + s.Spec.Ports[0].TargetPort = intstr.FromString("http") + }, + numErrs: 0, + }, + { + name: "valid type - cluster", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeClusterIP + }, + numErrs: 0, + }, + { + name: "valid type - loadbalancer", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + }, + numErrs: 0, + }, + { + name: "valid type loadbalancer 2 ports", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP", TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 0, + }, + { + name: "valid external load balancer 2 ports", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP", TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 0, + }, + { + name: "duplicate nodeports", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeNodePort + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 1, Protocol: "TCP", NodePort: 1, TargetPort: intstr.FromInt(1)}) + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "r", Port: 2, Protocol: "TCP", NodePort: 1, TargetPort: intstr.FromInt(2)}) + }, + numErrs: 1, + }, + { + name: "duplicate nodeports (different protocols)", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeNodePort + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 1, Protocol: "TCP", NodePort: 1, TargetPort: intstr.FromInt(1)}) + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "r", Port: 2, Protocol: "UDP", NodePort: 1, TargetPort: intstr.FromInt(2)}) + }, + numErrs: 0, + }, + { + name: "valid type - cluster", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeClusterIP + }, + numErrs: 0, + }, + { + name: "valid type - nodeport", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeNodePort + }, + numErrs: 0, + }, + { + name: "valid type - loadbalancer", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + }, + numErrs: 0, + }, + { + name: "valid type loadbalancer 2 ports", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP", TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 0, + }, + { + name: "valid type loadbalancer with NodePort", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP", NodePort: 12345, TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 0, + }, + { + name: "valid type=NodePort service with NodePort", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeNodePort + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP", NodePort: 12345, TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 0, + }, + { + name: "valid type=NodePort service without NodePort", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeNodePort + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP", TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 0, + }, + { + name: "valid cluster service without NodePort", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeClusterIP + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP", TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 0, + }, + { + name: "invalid cluster service with NodePort", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeClusterIP + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP", NodePort: 12345, TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 1, + }, + { + name: "invalid public service with duplicate NodePort", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeNodePort + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "p1", Port: 1, Protocol: "TCP", NodePort: 1, TargetPort: intstr.FromInt(1)}) + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "p2", Port: 2, Protocol: "TCP", NodePort: 1, TargetPort: intstr.FromInt(2)}) + }, + numErrs: 1, + }, + { + name: "valid type=LoadBalancer", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP", TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 0, + }, + { + // For now we open firewalls, and its insecure if we open 10250, remove this + // when we have better protections in place. + name: "invalid port type=LoadBalancer", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "kubelet", Port: 10250, Protocol: "TCP", TargetPort: intstr.FromInt(12345)}) + }, + numErrs: 1, + }, + { + name: "valid LoadBalancer source range annotation", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Annotations[service.AnnotationLoadBalancerSourceRangesKey] = "1.2.3.4/8, 5.6.7.8/16" + }, + numErrs: 0, + }, + { + name: "empty LoadBalancer source range annotation", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Annotations[service.AnnotationLoadBalancerSourceRangesKey] = "" + }, + numErrs: 0, + }, + { + name: "invalid LoadBalancer source range annotation (hostname)", + tweakSvc: func(s *api.Service) { + s.Annotations[service.AnnotationLoadBalancerSourceRangesKey] = "foo.bar" + }, + numErrs: 2, + }, + { + name: "invalid LoadBalancer source range annotation (invalid CIDR)", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Annotations[service.AnnotationLoadBalancerSourceRangesKey] = "1.2.3.4/33" + }, + numErrs: 1, + }, + { + name: "invalid source range for non LoadBalancer type service", + tweakSvc: func(s *api.Service) { + s.Spec.LoadBalancerSourceRanges = []string{"1.2.3.4/8", "5.6.7.8/16"} + }, + numErrs: 1, + }, + { + name: "valid LoadBalancer source range", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Spec.LoadBalancerSourceRanges = []string{"1.2.3.4/8", "5.6.7.8/16"} + }, + numErrs: 0, + }, + { + name: "empty LoadBalancer source range", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Spec.LoadBalancerSourceRanges = []string{" "} + }, + numErrs: 1, + }, + { + name: "invalid LoadBalancer source range", + tweakSvc: func(s *api.Service) { + s.Spec.Type = api.ServiceTypeLoadBalancer + s.Spec.LoadBalancerSourceRanges = []string{"foo.bar"} + }, + numErrs: 1, + }, + } + + for _, tc := range testCases { + svc := makeValidService() + tc.tweakSvc(&svc) + errs := ValidateService(&svc) + if len(errs) != tc.numErrs { + t.Errorf("Unexpected error list for case %q: %v", tc.name, errs.ToAggregate()) + } + } +} + +func TestValidateReplicationControllerStatusUpdate(t *testing.T) { + validSelector := map[string]string{"a": "b"} + validPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + } + type rcUpdateTest struct { + old api.ReplicationController + update api.ReplicationController + } + successCases := []rcUpdateTest{ + { + old: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + Status: api.ReplicationControllerStatus{ + Replicas: 2, + }, + }, + update: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Replicas: 3, + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + Status: api.ReplicationControllerStatus{ + Replicas: 4, + }, + }, + }, + } + for _, successCase := range successCases { + successCase.old.ObjectMeta.ResourceVersion = "1" + successCase.update.ObjectMeta.ResourceVersion = "1" + if errs := ValidateReplicationControllerStatusUpdate(&successCase.update, &successCase.old); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + errorCases := map[string]rcUpdateTest{ + "negative replicas": { + old: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + Status: api.ReplicationControllerStatus{ + Replicas: 3, + }, + }, + update: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Replicas: 2, + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + Status: api.ReplicationControllerStatus{ + Replicas: -3, + }, + }, + }, + } + for testName, errorCase := range errorCases { + if errs := ValidateReplicationControllerStatusUpdate(&errorCase.update, &errorCase.old); len(errs) == 0 { + t.Errorf("expected failure: %s", testName) + } + } + +} + +func TestValidateReplicationControllerUpdate(t *testing.T) { + validSelector := map[string]string{"a": "b"} + validPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + } + readWriteVolumePodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + Volumes: []api.Volume{{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{PDName: "my-PD", FSType: "ext4", Partition: 1, ReadOnly: false}}}}, + }, + }, + } + invalidSelector := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} + invalidPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + ObjectMeta: api.ObjectMeta{ + Labels: invalidSelector, + }, + }, + } + type rcUpdateTest struct { + old api.ReplicationController + update api.ReplicationController + } + successCases := []rcUpdateTest{ + { + old: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + update: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Replicas: 3, + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + }, + { + old: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + update: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: validSelector, + Template: &readWriteVolumePodTemplate.Template, + }, + }, + }, + } + for _, successCase := range successCases { + successCase.old.ObjectMeta.ResourceVersion = "1" + successCase.update.ObjectMeta.ResourceVersion = "1" + if errs := ValidateReplicationControllerUpdate(&successCase.update, &successCase.old); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + errorCases := map[string]rcUpdateTest{ + "more than one read/write": { + old: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + update: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Replicas: 2, + Selector: validSelector, + Template: &readWriteVolumePodTemplate.Template, + }, + }, + }, + "invalid selector": { + old: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + update: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Replicas: 2, + Selector: invalidSelector, + Template: &validPodTemplate.Template, + }, + }, + }, + "invalid pod": { + old: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + update: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Replicas: 2, + Selector: validSelector, + Template: &invalidPodTemplate.Template, + }, + }, + }, + "negative replicas": { + old: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + update: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Replicas: -1, + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + }, + } + for testName, errorCase := range errorCases { + if errs := ValidateReplicationControllerUpdate(&errorCase.update, &errorCase.old); len(errs) == 0 { + t.Errorf("expected failure: %s", testName) + } + } +} + +func TestValidateReplicationController(t *testing.T) { + validSelector := map[string]string{"a": "b"} + validPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + } + readWriteVolumePodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + Spec: api.PodSpec{ + Volumes: []api.Volume{{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{PDName: "my-PD", FSType: "ext4", Partition: 1, ReadOnly: false}}}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + } + invalidSelector := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} + invalidPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + ObjectMeta: api.ObjectMeta{ + Labels: invalidSelector, + }, + }, + } + successCases := []api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: validSelector, + Template: &readWriteVolumePodTemplate.Template, + }, + }, + } + for _, successCase := range successCases { + if errs := ValidateReplicationController(&successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := map[string]api.ReplicationController{ + "zero-length ID": { + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + "missing-namespace": { + ObjectMeta: api.ObjectMeta{Name: "abc-123"}, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + "empty selector": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Template: &validPodTemplate.Template, + }, + }, + "selector_doesnt_match": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{"foo": "bar"}, + Template: &validPodTemplate.Template, + }, + }, + "invalid manifest": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + }, + }, + "read-write persistent disk with > 1 pod": { + ObjectMeta: api.ObjectMeta{Name: "abc"}, + Spec: api.ReplicationControllerSpec{ + Replicas: 2, + Selector: validSelector, + Template: &readWriteVolumePodTemplate.Template, + }, + }, + "negative_replicas": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: api.ReplicationControllerSpec{ + Replicas: -1, + Selector: validSelector, + }, + }, + "invalid_label": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + Labels: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + "invalid_label 2": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + Labels: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: api.ReplicationControllerSpec{ + Template: &invalidPodTemplate.Template, + }, + }, + "invalid_annotation": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + Annotations: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &validPodTemplate.Template, + }, + }, + "invalid restart policy 1": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + }, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + }, + }, + }, + "invalid restart policy 2": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + }, + Spec: api.ReplicationControllerSpec{ + Selector: validSelector, + Template: &api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyNever, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + }, + }, + }, + } + for k, v := range errorCases { + errs := ValidateReplicationController(&v) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } + for i := range errs { + field := errs[i].Field + if !strings.HasPrefix(field, "spec.template.") && + field != "metadata.name" && + field != "metadata.namespace" && + field != "spec.selector" && + field != "spec.template" && + field != "GCEPersistentDisk.ReadOnly" && + field != "spec.replicas" && + field != "spec.template.labels" && + field != "metadata.annotations" && + field != "metadata.labels" && + field != "status.replicas" { + t.Errorf("%s: missing prefix for: %v", k, errs[i]) + } + } + } +} + +func TestValidateNode(t *testing.T) { + validSelector := map[string]string{"a": "b"} + invalidSelector := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} + successCases := []api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Labels: validSelector, + }, + Status: api.NodeStatus{ + Addresses: []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: "something"}, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + api.ResourceName("my.org/gpu"): resource.MustParse("10"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "external", + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "abc", + }, + Status: api.NodeStatus{ + Addresses: []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: "something"}, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("0"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "external", + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "dedicated-node1", + // Add a valid taint to a node + Annotations: map[string]string{ + api.TaintsAnnotationKey: ` + [{ + "key": "GPU", + "value": "true", + "effect": "NoSchedule" + }]`, + }, + }, + Status: api.NodeStatus{ + Addresses: []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: "something"}, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("0"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "external", + }, + }, + } + for _, successCase := range successCases { + if errs := ValidateNode(&successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := map[string]api.Node{ + "zero-length Name": { + ObjectMeta: api.ObjectMeta{ + Name: "", + Labels: validSelector, + }, + Status: api.NodeStatus{ + Addresses: []api.NodeAddress{}, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "external", + }, + }, + "invalid-labels": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Labels: invalidSelector, + }, + Status: api.NodeStatus{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "external", + }, + }, + "missing-external-id": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Labels: validSelector, + }, + Status: api.NodeStatus{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + }, + "missing-taint-key": { + + ObjectMeta: api.ObjectMeta{ + Name: "dedicated-node1", + // Add a taint with an empty key to a node + Annotations: map[string]string{ + api.TaintsAnnotationKey: ` + [{ + "key": "", + "value": "special-user-1", + "effect": "NoSchedule" + }]`, + }, + }, + Spec: api.NodeSpec{ + ExternalID: "external", + }, + }, + "bad-taint-key": { + ObjectMeta: api.ObjectMeta{ + Name: "dedicated-node1", + // Add a taint with an empty key to a node + Annotations: map[string]string{ + api.TaintsAnnotationKey: ` + [{ + "key": "NoUppercaseOrSpecialCharsLike=Equals", + "value": "special-user-1", + "effect": "NoSchedule" + }]`, + }, + }, + Spec: api.NodeSpec{ + ExternalID: "external", + }, + }, + "bad-taint-value": { + ObjectMeta: api.ObjectMeta{ + Name: "dedicated-node2", + Annotations: map[string]string{ + api.TaintsAnnotationKey: ` + [{ + "key": "dedicated", + "value": "some\\bad\\value", + "effect": "NoSchedule" + }]`, + }, + }, + Status: api.NodeStatus{ + Addresses: []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: "something"}, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("0"), + }, + }, + // Add a taint with an empty value to a node + Spec: api.NodeSpec{ + ExternalID: "external", + }, + }, + "missing-taint-effect": { + ObjectMeta: api.ObjectMeta{ + Name: "dedicated-node3", + // Add a taint with an empty effect to a node + Annotations: map[string]string{ + api.TaintsAnnotationKey: ` + [{ + "key": "dedicated", + "value": "special-user-3", + "effect": "" + }]`, + }, + }, + Status: api.NodeStatus{ + Addresses: []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: "something"}, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("0"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "external", + }, + }, + "invalide-taint-effect": { + ObjectMeta: api.ObjectMeta{ + Name: "dedicated-node3", + // Add a taint with an empty effect to a node + Annotations: map[string]string{ + api.TaintsAnnotationKey: ` + [{ + "key": "dedicated", + "value": "special-user-3", + "effect": "NoExecute" + }]`, + }, + }, + Status: api.NodeStatus{ + Addresses: []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: "something"}, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("0"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "external", + }, + }, + } + for k, v := range errorCases { + errs := ValidateNode(&v) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } + for i := range errs { + field := errs[i].Field + expectedFields := map[string]bool{ + "metadata.name": true, + "metadata.labels": true, + "metadata.annotations": true, + "metadata.namespace": true, + "spec.externalID": true, + "metadata.annotations.scheduler.alpha.kubernetes.io/taints[0].key": true, + "metadata.annotations.scheduler.alpha.kubernetes.io/taints[0].value": true, + "metadata.annotations.scheduler.alpha.kubernetes.io/taints[0].effect": true, + } + if val, ok := expectedFields[field]; ok { + if !val { + t.Errorf("%s: missing prefix for: %v", k, errs[i]) + } + } + } + } +} + +func TestValidateNodeUpdate(t *testing.T) { + tests := []struct { + oldNode api.Node + node api.Node + valid bool + }{ + {api.Node{}, api.Node{}, true}, + {api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo"}}, + api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "bar"}, + }, false}, + {api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "bar"}, + }, + }, api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "baz"}, + }, + }, true}, + {api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + }, api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "baz"}, + }, + }, true}, + {api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"bar": "foo"}, + }, + }, api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "baz"}, + }, + }, true}, + {api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.NodeSpec{ + PodCIDR: "", + }, + }, api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.NodeSpec{ + PodCIDR: "192.168.0.0/16", + }, + }, true}, + {api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.NodeSpec{ + PodCIDR: "192.123.0.0/16", + }, + }, api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.NodeSpec{ + PodCIDR: "192.168.0.0/16", + }, + }, false}, + {api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Status: api.NodeStatus{ + Capacity: api.ResourceList{ + api.ResourceCPU: resource.MustParse("10000"), + api.ResourceMemory: resource.MustParse("100"), + }, + }, + }, api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Status: api.NodeStatus{ + Capacity: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + api.ResourceMemory: resource.MustParse("10000"), + }, + }, + }, true}, + {api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"bar": "foo"}, + }, + Status: api.NodeStatus{ + Capacity: api.ResourceList{ + api.ResourceCPU: resource.MustParse("10000"), + api.ResourceMemory: resource.MustParse("100"), + }, + }, + }, api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"bar": "fooobaz"}, + }, + Status: api.NodeStatus{ + Capacity: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + api.ResourceMemory: resource.MustParse("10000"), + }, + }, + }, true}, + {api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"bar": "foo"}, + }, + Status: api.NodeStatus{ + Addresses: []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: "1.2.3.4"}, + }, + }, + }, api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"bar": "fooobaz"}, + }, + }, true}, + {api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "baz"}, + }, + }, api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"Foo": "baz"}, + }, + }, true}, + {api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.NodeSpec{ + Unschedulable: false, + }, + }, api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.NodeSpec{ + Unschedulable: true, + }, + }, true}, + {api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.NodeSpec{ + Unschedulable: false, + }, + }, api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Status: api.NodeStatus{ + Addresses: []api.NodeAddress{ + {Type: api.NodeExternalIP, Address: "1.1.1.1"}, + {Type: api.NodeExternalIP, Address: "1.1.1.1"}, + }, + }, + }, false}, + {api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.NodeSpec{ + Unschedulable: false, + }, + }, api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Status: api.NodeStatus{ + Addresses: []api.NodeAddress{ + {Type: api.NodeExternalIP, Address: "1.1.1.1"}, + {Type: api.NodeInternalIP, Address: "10.1.1.1"}, + }, + }, + }, true}, + } + for i, test := range tests { + test.oldNode.ObjectMeta.ResourceVersion = "1" + test.node.ObjectMeta.ResourceVersion = "1" + errs := ValidateNodeUpdate(&test.node, &test.oldNode) + if test.valid && len(errs) > 0 { + t.Errorf("%d: Unexpected error: %v", i, errs) + t.Logf("%#v vs %#v", test.oldNode.ObjectMeta, test.node.ObjectMeta) + } + if !test.valid && len(errs) == 0 { + t.Errorf("%d: Unexpected non-error", i) + } + } +} + +func TestValidateServiceUpdate(t *testing.T) { + testCases := []struct { + name string + tweakSvc func(oldSvc, newSvc *api.Service) // given basic valid services, each test case can customize them + numErrs int + }{ + { + name: "no change", + tweakSvc: func(oldSvc, newSvc *api.Service) { + // do nothing + }, + numErrs: 0, + }, + { + name: "change name", + tweakSvc: func(oldSvc, newSvc *api.Service) { + newSvc.Name += "2" + }, + numErrs: 1, + }, + { + name: "change namespace", + tweakSvc: func(oldSvc, newSvc *api.Service) { + newSvc.Namespace += "2" + }, + numErrs: 1, + }, + { + name: "change label valid", + tweakSvc: func(oldSvc, newSvc *api.Service) { + newSvc.Labels["key"] = "other-value" + }, + numErrs: 0, + }, + { + name: "add label", + tweakSvc: func(oldSvc, newSvc *api.Service) { + newSvc.Labels["key2"] = "value2" + }, + numErrs: 0, + }, + { + name: "change cluster IP", + tweakSvc: func(oldSvc, newSvc *api.Service) { + oldSvc.Spec.ClusterIP = "1.2.3.4" + newSvc.Spec.ClusterIP = "8.6.7.5" + }, + numErrs: 1, + }, + { + name: "remove cluster IP", + tweakSvc: func(oldSvc, newSvc *api.Service) { + oldSvc.Spec.ClusterIP = "1.2.3.4" + newSvc.Spec.ClusterIP = "" + }, + numErrs: 1, + }, + { + name: "change affinity", + tweakSvc: func(oldSvc, newSvc *api.Service) { + newSvc.Spec.SessionAffinity = "ClientIP" + }, + numErrs: 0, + }, + { + name: "remove affinity", + tweakSvc: func(oldSvc, newSvc *api.Service) { + newSvc.Spec.SessionAffinity = "" + }, + numErrs: 1, + }, + { + name: "change type", + tweakSvc: func(oldSvc, newSvc *api.Service) { + newSvc.Spec.Type = api.ServiceTypeLoadBalancer + }, + numErrs: 0, + }, + { + name: "remove type", + tweakSvc: func(oldSvc, newSvc *api.Service) { + newSvc.Spec.Type = "" + }, + numErrs: 1, + }, + { + name: "change type -> nodeport", + tweakSvc: func(oldSvc, newSvc *api.Service) { + newSvc.Spec.Type = api.ServiceTypeNodePort + }, + numErrs: 0, + }, + } + + for _, tc := range testCases { + oldSvc := makeValidService() + newSvc := makeValidService() + tc.tweakSvc(&oldSvc, &newSvc) + errs := ValidateServiceUpdate(&newSvc, &oldSvc) + if len(errs) != tc.numErrs { + t.Errorf("Unexpected error list for case %q: %v", tc.name, errs.ToAggregate()) + } + } +} + +func TestValidateResourceNames(t *testing.T) { + table := []struct { + input string + success bool + expect string + }{ + {"memory", true, ""}, + {"cpu", true, ""}, + {"network", false, ""}, + {"disk", false, ""}, + {"", false, ""}, + {".", false, ""}, + {"..", false, ""}, + {"my.favorite.app.co/12345", true, ""}, + {"my.favorite.app.co/_12345", false, ""}, + {"my.favorite.app.co/12345_", false, ""}, + {"kubernetes.io/..", false, ""}, + {"kubernetes.io/" + strings.Repeat("a", 63), true, ""}, + {"kubernetes.io/" + strings.Repeat("a", 64), false, ""}, + {"kubernetes.io//", false, ""}, + {"kubernetes.io", false, ""}, + {"kubernetes.io/will/not/work/", false, ""}, + } + for k, item := range table { + err := validateResourceName(item.input, field.NewPath("field")) + if len(err) != 0 && item.success { + t.Errorf("expected no failure for input %q", item.input) + } else if len(err) == 0 && !item.success { + t.Errorf("expected failure for input %q", item.input) + for i := range err { + detail := err[i].Detail + if detail != "" && !strings.Contains(detail, item.expect) { + t.Errorf("%d: expected error detail either empty or %s, got %s", k, item.expect, detail) + } + } + } + } +} + +func getResourceList(cpu, memory string) api.ResourceList { + res := api.ResourceList{} + if cpu != "" { + res[api.ResourceCPU] = resource.MustParse(cpu) + } + if memory != "" { + res[api.ResourceMemory] = resource.MustParse(memory) + } + return res +} + +func getStorageResourceList(storage string) api.ResourceList { + res := api.ResourceList{} + if storage != "" { + res[api.ResourceStorage] = resource.MustParse(storage) + } + return res +} + +func TestValidateLimitRange(t *testing.T) { + successCases := []struct { + name string + spec api.LimitRangeSpec + }{ + { + name: "all-fields-valid", + spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: getResourceList("100m", "10000Mi"), + Min: getResourceList("5m", "100Mi"), + MaxLimitRequestRatio: getResourceList("10", ""), + }, + { + Type: api.LimitTypeContainer, + Max: getResourceList("100m", "10000Mi"), + Min: getResourceList("5m", "100Mi"), + Default: getResourceList("50m", "500Mi"), + DefaultRequest: getResourceList("10m", "200Mi"), + MaxLimitRequestRatio: getResourceList("10", ""), + }, + }, + }, + }, + { + name: "all-fields-valid-big-numbers", + spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypeContainer, + Max: getResourceList("100m", "10000T"), + Min: getResourceList("5m", "100Mi"), + Default: getResourceList("50m", "500Mi"), + DefaultRequest: getResourceList("10m", "200Mi"), + MaxLimitRequestRatio: getResourceList("10", ""), + }, + }, + }, + }, + { + name: "thirdparty-fields-all-valid-standard-container-resources", + spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: "thirdparty.com/foo", + Max: getResourceList("100m", "10000T"), + Min: getResourceList("5m", "100Mi"), + Default: getResourceList("50m", "500Mi"), + DefaultRequest: getResourceList("10m", "200Mi"), + MaxLimitRequestRatio: getResourceList("10", ""), + }, + }, + }, + }, + { + name: "thirdparty-fields-all-valid-storage-resources", + spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: "thirdparty.com/foo", + Max: getStorageResourceList("10000T"), + Min: getStorageResourceList("100Mi"), + Default: getStorageResourceList("500Mi"), + DefaultRequest: getStorageResourceList("200Mi"), + MaxLimitRequestRatio: getStorageResourceList(""), + }, + }, + }, + }, + } + + for _, successCase := range successCases { + limitRange := &api.LimitRange{ObjectMeta: api.ObjectMeta{Name: successCase.name, Namespace: "foo"}, Spec: successCase.spec} + if errs := ValidateLimitRange(limitRange); len(errs) != 0 { + t.Errorf("Case %v, unexpected error: %v", successCase.name, errs) + } + } + + errorCases := map[string]struct { + R api.LimitRange + D string + }{ + "zero-length-name": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "", Namespace: "foo"}, Spec: api.LimitRangeSpec{}}, + "name or generateName is required", + }, + "zero-length-namespace": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: ""}, Spec: api.LimitRangeSpec{}}, + "", + }, + "invalid-name": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "^Invalid", Namespace: "foo"}, Spec: api.LimitRangeSpec{}}, + "must match the regex", + }, + "invalid-namespace": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "^Invalid"}, Spec: api.LimitRangeSpec{}}, + "must match the regex", + }, + "duplicate-limit-type": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: getResourceList("100m", "10000m"), + Min: getResourceList("0m", "100m"), + }, + { + Type: api.LimitTypePod, + Min: getResourceList("0m", "100m"), + }, + }, + }}, + "", + }, + "default-limit-type-pod": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: getResourceList("100m", "10000m"), + Min: getResourceList("0m", "100m"), + Default: getResourceList("10m", "100m"), + }, + }, + }}, + "may not be specified when `type` is 'Pod'", + }, + "default-request-limit-type-pod": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: getResourceList("100m", "10000m"), + Min: getResourceList("0m", "100m"), + DefaultRequest: getResourceList("10m", "100m"), + }, + }, + }}, + "may not be specified when `type` is 'Pod'", + }, + "min value 100m is greater than max value 10m": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: getResourceList("10m", ""), + Min: getResourceList("100m", ""), + }, + }, + }}, + "min value 100m is greater than max value 10m", + }, + "invalid spec default outside range": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypeContainer, + Max: getResourceList("1", ""), + Min: getResourceList("100m", ""), + Default: getResourceList("2000m", ""), + }, + }, + }}, + "default value 2 is greater than max value 1", + }, + "invalid spec defaultrequest outside range": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypeContainer, + Max: getResourceList("1", ""), + Min: getResourceList("100m", ""), + DefaultRequest: getResourceList("2000m", ""), + }, + }, + }}, + "default request value 2 is greater than max value 1", + }, + "invalid spec defaultrequest more than default": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypeContainer, + Max: getResourceList("2", ""), + Min: getResourceList("100m", ""), + Default: getResourceList("500m", ""), + DefaultRequest: getResourceList("800m", ""), + }, + }, + }}, + "default request value 800m is greater than default limit value 500m", + }, + "invalid spec maxLimitRequestRatio less than 1": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + MaxLimitRequestRatio: getResourceList("800m", ""), + }, + }, + }}, + "ratio 800m is less than 1", + }, + "invalid spec maxLimitRequestRatio greater than max/min": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypeContainer, + Max: getResourceList("", "2Gi"), + Min: getResourceList("", "512Mi"), + MaxLimitRequestRatio: getResourceList("", "10"), + }, + }, + }}, + "ratio 10 is greater than max/min = 4.000000", + }, + "invalid non standard limit type": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: "foo", + Max: getStorageResourceList("10000T"), + Min: getStorageResourceList("100Mi"), + Default: getStorageResourceList("500Mi"), + DefaultRequest: getStorageResourceList("200Mi"), + MaxLimitRequestRatio: getStorageResourceList(""), + }, + }, + }}, + "must be a standard limit type or fully qualified", + }, + } + + for k, v := range errorCases { + errs := ValidateLimitRange(&v.R) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } + for i := range errs { + detail := errs[i].Detail + if !strings.Contains(detail, v.D) { + t.Errorf("[%s]: expected error detail either empty or %q, got %q", k, v.D, detail) + } + } + } + +} + +func TestValidateResourceQuota(t *testing.T) { + spec := api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + api.ResourceMemory: resource.MustParse("10000"), + api.ResourceRequestsCPU: resource.MustParse("100"), + api.ResourceRequestsMemory: resource.MustParse("10000"), + api.ResourceLimitsCPU: resource.MustParse("100"), + api.ResourceLimitsMemory: resource.MustParse("10000"), + api.ResourcePods: resource.MustParse("10"), + api.ResourceServices: resource.MustParse("0"), + api.ResourceReplicationControllers: resource.MustParse("10"), + api.ResourceQuotas: resource.MustParse("10"), + api.ResourceConfigMaps: resource.MustParse("10"), + api.ResourceSecrets: resource.MustParse("10"), + }, + } + + terminatingSpec := api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + api.ResourceLimitsCPU: resource.MustParse("200"), + }, + Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeTerminating}, + } + + nonTerminatingSpec := api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + }, + Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeNotTerminating}, + } + + bestEffortSpec := api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourcePods: resource.MustParse("100"), + }, + Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort}, + } + + nonBestEffortSpec := api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + }, + Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeNotBestEffort}, + } + + // storage is not yet supported as a quota tracked resource + invalidQuotaResourceSpec := api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceStorage: resource.MustParse("10"), + }, + } + + negativeSpec := api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("-100"), + api.ResourceMemory: resource.MustParse("-10000"), + api.ResourcePods: resource.MustParse("-10"), + api.ResourceServices: resource.MustParse("-10"), + api.ResourceReplicationControllers: resource.MustParse("-10"), + api.ResourceQuotas: resource.MustParse("-10"), + api.ResourceConfigMaps: resource.MustParse("-10"), + api.ResourceSecrets: resource.MustParse("-10"), + }, + } + + fractionalComputeSpec := api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + }, + } + + fractionalPodSpec := api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourcePods: resource.MustParse(".1"), + api.ResourceServices: resource.MustParse(".5"), + api.ResourceReplicationControllers: resource.MustParse("1.25"), + api.ResourceQuotas: resource.MustParse("2.5"), + }, + } + + invalidTerminatingScopePairsSpec := api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + }, + Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeTerminating, api.ResourceQuotaScopeNotTerminating}, + } + + invalidBestEffortScopePairsSpec := api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourcePods: resource.MustParse("100"), + }, + Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort, api.ResourceQuotaScopeNotBestEffort}, + } + + invalidScopeNameSpec := api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + }, + Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScope("foo")}, + } + + successCases := []api.ResourceQuota{ + { + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "foo", + }, + Spec: spec, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "foo", + }, + Spec: fractionalComputeSpec, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "foo", + }, + Spec: terminatingSpec, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "foo", + }, + Spec: nonTerminatingSpec, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "foo", + }, + Spec: bestEffortSpec, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "foo", + }, + Spec: nonBestEffortSpec, + }, + } + + for _, successCase := range successCases { + if errs := ValidateResourceQuota(&successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := map[string]struct { + R api.ResourceQuota + D string + }{ + "zero-length Name": { + api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "", Namespace: "foo"}, Spec: spec}, + "name or generateName is required", + }, + "zero-length Namespace": { + api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: ""}, Spec: spec}, + "", + }, + "invalid Name": { + api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "^Invalid", Namespace: "foo"}, Spec: spec}, + "must match the regex", + }, + "invalid Namespace": { + api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "^Invalid"}, Spec: spec}, + "must match the regex", + }, + "negative-limits": { + api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: negativeSpec}, + isNegativeErrorMsg, + }, + "fractional-api-resource": { + api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: fractionalPodSpec}, + isNotIntegerErrorMsg, + }, + "invalid-quota-resource": { + api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: invalidQuotaResourceSpec}, + isInvalidQuotaResource, + }, + "invalid-quota-terminating-pair": { + api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: invalidTerminatingScopePairsSpec}, + "conflicting scopes", + }, + "invalid-quota-besteffort-pair": { + api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: invalidBestEffortScopePairsSpec}, + "conflicting scopes", + }, + "invalid-quota-scope-name": { + api.ResourceQuota{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: invalidScopeNameSpec}, + "unsupported scope", + }, + } + for k, v := range errorCases { + errs := ValidateResourceQuota(&v.R) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } + for i := range errs { + if !strings.Contains(errs[i].Detail, v.D) { + t.Errorf("[%s]: expected error detail either empty or %s, got %s", k, v.D, errs[i].Detail) + } + } + } +} + +func TestValidateNamespace(t *testing.T) { + validLabels := map[string]string{"a": "b"} + invalidLabels := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} + successCases := []api.Namespace{ + { + ObjectMeta: api.ObjectMeta{Name: "abc", Labels: validLabels}, + }, + { + ObjectMeta: api.ObjectMeta{Name: "abc-123"}, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{"example.com/something", "example.com/other"}, + }, + }, + } + for _, successCase := range successCases { + if errs := ValidateNamespace(&successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + errorCases := map[string]struct { + R api.Namespace + D string + }{ + "zero-length name": { + api.Namespace{ObjectMeta: api.ObjectMeta{Name: ""}}, + "", + }, + "defined-namespace": { + api.Namespace{ObjectMeta: api.ObjectMeta{Name: "abc-123", Namespace: "makesnosense"}}, + "", + }, + "invalid-labels": { + api.Namespace{ObjectMeta: api.ObjectMeta{Name: "abc", Labels: invalidLabels}}, + "", + }, + } + for k, v := range errorCases { + errs := ValidateNamespace(&v.R) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } + } +} + +func TestValidateNamespaceFinalizeUpdate(t *testing.T) { + tests := []struct { + oldNamespace api.Namespace + namespace api.Namespace + valid bool + }{ + {api.Namespace{}, api.Namespace{}, true}, + {api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo"}}, + api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo"}, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{"Foo"}, + }, + }, false}, + {api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo"}, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{"foo.com/bar"}, + }, + }, + api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo"}, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{"foo.com/bar", "what.com/bar"}, + }, + }, true}, + {api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "fooemptyfinalizer"}, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{"foo.com/bar"}, + }, + }, + api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "fooemptyfinalizer"}, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{"", "foo.com/bar", "what.com/bar"}, + }, + }, false}, + } + for i, test := range tests { + test.namespace.ObjectMeta.ResourceVersion = "1" + test.oldNamespace.ObjectMeta.ResourceVersion = "1" + errs := ValidateNamespaceFinalizeUpdate(&test.namespace, &test.oldNamespace) + if test.valid && len(errs) > 0 { + t.Errorf("%d: Unexpected error: %v", i, errs) + t.Logf("%#v vs %#v", test.oldNamespace, test.namespace) + } + if !test.valid && len(errs) == 0 { + t.Errorf("%d: Unexpected non-error", i) + } + } +} + +func TestValidateNamespaceStatusUpdate(t *testing.T) { + now := unversioned.Now() + + tests := []struct { + oldNamespace api.Namespace + namespace api.Namespace + valid bool + }{ + {api.Namespace{}, api.Namespace{ + Status: api.NamespaceStatus{ + Phase: api.NamespaceActive, + }, + }, true}, + // Cannot set deletionTimestamp via status update + {api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo"}}, + api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + DeletionTimestamp: &now}, + Status: api.NamespaceStatus{ + Phase: api.NamespaceTerminating, + }, + }, false}, + // Can update phase via status update + {api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + DeletionTimestamp: &now}}, + api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + DeletionTimestamp: &now}, + Status: api.NamespaceStatus{ + Phase: api.NamespaceTerminating, + }, + }, true}, + {api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo"}}, + api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo"}, + Status: api.NamespaceStatus{ + Phase: api.NamespaceTerminating, + }, + }, false}, + {api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo"}}, + api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "bar"}, + Status: api.NamespaceStatus{ + Phase: api.NamespaceTerminating, + }, + }, false}, + } + for i, test := range tests { + test.namespace.ObjectMeta.ResourceVersion = "1" + test.oldNamespace.ObjectMeta.ResourceVersion = "1" + errs := ValidateNamespaceStatusUpdate(&test.namespace, &test.oldNamespace) + if test.valid && len(errs) > 0 { + t.Errorf("%d: Unexpected error: %v", i, errs) + t.Logf("%#v vs %#v", test.oldNamespace.ObjectMeta, test.namespace.ObjectMeta) + } + if !test.valid && len(errs) == 0 { + t.Errorf("%d: Unexpected non-error", i) + } + } +} + +func TestValidateNamespaceUpdate(t *testing.T) { + tests := []struct { + oldNamespace api.Namespace + namespace api.Namespace + valid bool + }{ + {api.Namespace{}, api.Namespace{}, true}, + {api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo1"}}, + api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "bar1"}, + }, false}, + {api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo2", + Labels: map[string]string{"foo": "bar"}, + }, + }, api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo2", + Labels: map[string]string{"foo": "baz"}, + }, + }, true}, + {api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo3", + }, + }, api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo3", + Labels: map[string]string{"foo": "baz"}, + }, + }, true}, + {api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo4", + Labels: map[string]string{"bar": "foo"}, + }, + }, api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo4", + Labels: map[string]string{"foo": "baz"}, + }, + }, true}, + {api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo5", + Labels: map[string]string{"foo": "baz"}, + }, + }, api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo5", + Labels: map[string]string{"Foo": "baz"}, + }, + }, true}, + {api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo6", + Labels: map[string]string{"foo": "baz"}, + }, + }, api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo6", + Labels: map[string]string{"Foo": "baz"}, + }, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{"kubernetes"}, + }, + Status: api.NamespaceStatus{ + Phase: api.NamespaceTerminating, + }, + }, true}, + } + for i, test := range tests { + test.namespace.ObjectMeta.ResourceVersion = "1" + test.oldNamespace.ObjectMeta.ResourceVersion = "1" + errs := ValidateNamespaceUpdate(&test.namespace, &test.oldNamespace) + if test.valid && len(errs) > 0 { + t.Errorf("%d: Unexpected error: %v", i, errs) + t.Logf("%#v vs %#v", test.oldNamespace.ObjectMeta, test.namespace.ObjectMeta) + } + if !test.valid && len(errs) == 0 { + t.Errorf("%d: Unexpected non-error", i) + } + } +} + +func TestValidateSecret(t *testing.T) { + // Opaque secret validation + validSecret := func() api.Secret { + return api.Secret{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}, + Data: map[string][]byte{ + "data-1": []byte("bar"), + }, + } + } + + var ( + emptyName = validSecret() + invalidName = validSecret() + emptyNs = validSecret() + invalidNs = validSecret() + overMaxSize = validSecret() + invalidKey = validSecret() + leadingDotKey = validSecret() + dotKey = validSecret() + doubleDotKey = validSecret() + ) + + emptyName.Name = "" + invalidName.Name = "NoUppercaseOrSpecialCharsLike=Equals" + emptyNs.Namespace = "" + invalidNs.Namespace = "NoUppercaseOrSpecialCharsLike=Equals" + overMaxSize.Data = map[string][]byte{ + "over": make([]byte, api.MaxSecretSize+1), + } + invalidKey.Data["a..b"] = []byte("whoops") + leadingDotKey.Data[".key"] = []byte("bar") + dotKey.Data["."] = []byte("bar") + doubleDotKey.Data[".."] = []byte("bar") + + // kubernetes.io/service-account-token secret validation + validServiceAccountTokenSecret := func() api.Secret { + return api.Secret{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "bar", + Annotations: map[string]string{ + api.ServiceAccountNameKey: "foo", + }, + }, + Type: api.SecretTypeServiceAccountToken, + Data: map[string][]byte{ + "data-1": []byte("bar"), + }, + } + } + + var ( + emptyTokenAnnotation = validServiceAccountTokenSecret() + missingTokenAnnotation = validServiceAccountTokenSecret() + missingTokenAnnotations = validServiceAccountTokenSecret() + ) + emptyTokenAnnotation.Annotations[api.ServiceAccountNameKey] = "" + delete(missingTokenAnnotation.Annotations, api.ServiceAccountNameKey) + missingTokenAnnotations.Annotations = nil + + tests := map[string]struct { + secret api.Secret + valid bool + }{ + "valid": {validSecret(), true}, + "empty name": {emptyName, false}, + "invalid name": {invalidName, false}, + "empty namespace": {emptyNs, false}, + "invalid namespace": {invalidNs, false}, + "over max size": {overMaxSize, false}, + "invalid key": {invalidKey, false}, + "valid service-account-token secret": {validServiceAccountTokenSecret(), true}, + "empty service-account-token annotation": {emptyTokenAnnotation, false}, + "missing service-account-token annotation": {missingTokenAnnotation, false}, + "missing service-account-token annotations": {missingTokenAnnotations, false}, + "leading dot key": {leadingDotKey, true}, + "dot key": {dotKey, false}, + "double dot key": {doubleDotKey, false}, + } + + for name, tc := range tests { + errs := ValidateSecret(&tc.secret) + if tc.valid && len(errs) > 0 { + t.Errorf("%v: Unexpected error: %v", name, errs) + } + if !tc.valid && len(errs) == 0 { + t.Errorf("%v: Unexpected non-error", name) + } + } +} + +func TestValidateDockerConfigSecret(t *testing.T) { + validDockerSecret := func() api.Secret { + return api.Secret{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}, + Type: api.SecretTypeDockercfg, + Data: map[string][]byte{ + api.DockerConfigKey: []byte(`{"https://index.docker.io/v1/": {"auth": "Y2x1ZWRyb29sZXIwMDAxOnBhc3N3b3Jk","email": "fake@example.com"}}`), + }, + } + } + validDockerSecret2 := func() api.Secret { + return api.Secret{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}, + Type: api.SecretTypeDockerConfigJson, + Data: map[string][]byte{ + api.DockerConfigJsonKey: []byte(`{"auths":{"https://index.docker.io/v1/": {"auth": "Y2x1ZWRyb29sZXIwMDAxOnBhc3N3b3Jk","email": "fake@example.com"}}}`), + }, + } + } + + var ( + missingDockerConfigKey = validDockerSecret() + emptyDockerConfigKey = validDockerSecret() + invalidDockerConfigKey = validDockerSecret() + missingDockerConfigKey2 = validDockerSecret2() + emptyDockerConfigKey2 = validDockerSecret2() + invalidDockerConfigKey2 = validDockerSecret2() + ) + + delete(missingDockerConfigKey.Data, api.DockerConfigKey) + emptyDockerConfigKey.Data[api.DockerConfigKey] = []byte("") + invalidDockerConfigKey.Data[api.DockerConfigKey] = []byte("bad") + delete(missingDockerConfigKey2.Data, api.DockerConfigJsonKey) + emptyDockerConfigKey2.Data[api.DockerConfigJsonKey] = []byte("") + invalidDockerConfigKey2.Data[api.DockerConfigJsonKey] = []byte("bad") + + tests := map[string]struct { + secret api.Secret + valid bool + }{ + "valid dockercfg": {validDockerSecret(), true}, + "missing dockercfg": {missingDockerConfigKey, false}, + "empty dockercfg": {emptyDockerConfigKey, false}, + "invalid dockercfg": {invalidDockerConfigKey, false}, + "valid config.json": {validDockerSecret2(), true}, + "missing config.json": {missingDockerConfigKey2, false}, + "empty config.json": {emptyDockerConfigKey2, false}, + "invalid config.json": {invalidDockerConfigKey2, false}, + } + + for name, tc := range tests { + errs := ValidateSecret(&tc.secret) + if tc.valid && len(errs) > 0 { + t.Errorf("%v: Unexpected error: %v", name, errs) + } + if !tc.valid && len(errs) == 0 { + t.Errorf("%v: Unexpected non-error", name) + } + } +} + +func TestValidateBasicAuthSecret(t *testing.T) { + validBasicAuthSecret := func() api.Secret { + return api.Secret{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}, + Type: api.SecretTypeBasicAuth, + Data: map[string][]byte{ + api.BasicAuthUsernameKey: []byte("username"), + api.BasicAuthPasswordKey: []byte("password"), + }, + } + } + + var ( + missingBasicAuthUsernamePasswordKeys = validBasicAuthSecret() + // invalidBasicAuthUsernamePasswordKey = validBasicAuthSecret() + // emptyBasicAuthUsernameKey = validBasicAuthSecret() + // emptyBasicAuthPasswordKey = validBasicAuthSecret() + ) + + delete(missingBasicAuthUsernamePasswordKeys.Data, api.BasicAuthUsernameKey) + delete(missingBasicAuthUsernamePasswordKeys.Data, api.BasicAuthPasswordKey) + + // invalidBasicAuthUsernamePasswordKey.Data[api.BasicAuthUsernameKey] = []byte("bad") + // invalidBasicAuthUsernamePasswordKey.Data[api.BasicAuthPasswordKey] = []byte("bad") + + // emptyBasicAuthUsernameKey.Data[api.BasicAuthUsernameKey] = []byte("") + // emptyBasicAuthPasswordKey.Data[api.BasicAuthPasswordKey] = []byte("") + + tests := map[string]struct { + secret api.Secret + valid bool + }{ + "valid": {validBasicAuthSecret(), true}, + "missing username and password": {missingBasicAuthUsernamePasswordKeys, false}, + // "invalid username and password": {invalidBasicAuthUsernamePasswordKey, false}, + // "empty username": {emptyBasicAuthUsernameKey, false}, + // "empty password": {emptyBasicAuthPasswordKey, false}, + } + + for name, tc := range tests { + errs := ValidateSecret(&tc.secret) + if tc.valid && len(errs) > 0 { + t.Errorf("%v: Unexpected error: %v", name, errs) + } + if !tc.valid && len(errs) == 0 { + t.Errorf("%v: Unexpected non-error", name) + } + } +} + +func TestValidateSSHAuthSecret(t *testing.T) { + validSSHAuthSecret := func() api.Secret { + return api.Secret{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}, + Type: api.SecretTypeSSHAuth, + Data: map[string][]byte{ + api.SSHAuthPrivateKey: []byte("foo-bar-baz"), + }, + } + } + + missingSSHAuthPrivateKey := validSSHAuthSecret() + + delete(missingSSHAuthPrivateKey.Data, api.SSHAuthPrivateKey) + + tests := map[string]struct { + secret api.Secret + valid bool + }{ + "valid": {validSSHAuthSecret(), true}, + "missing private key": {missingSSHAuthPrivateKey, false}, + } + + for name, tc := range tests { + errs := ValidateSecret(&tc.secret) + if tc.valid && len(errs) > 0 { + t.Errorf("%v: Unexpected error: %v", name, errs) + } + if !tc.valid && len(errs) == 0 { + t.Errorf("%v: Unexpected non-error", name) + } + } +} + +func TestValidateEndpoints(t *testing.T) { + successCases := map[string]api.Endpoints{ + "simple endpoint": { + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: "10.10.1.1"}, {IP: "10.10.2.2"}}, + Ports: []api.EndpointPort{{Name: "a", Port: 8675, Protocol: "TCP"}, {Name: "b", Port: 309, Protocol: "TCP"}}, + }, + { + Addresses: []api.EndpointAddress{{IP: "10.10.3.3"}}, + Ports: []api.EndpointPort{{Name: "a", Port: 93, Protocol: "TCP"}, {Name: "b", Port: 76, Protocol: "TCP"}}, + }, + }, + }, + "empty subsets": { + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + }, + "no name required for singleton port": { + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: "10.10.1.1"}}, + Ports: []api.EndpointPort{{Port: 8675, Protocol: "TCP"}}, + }, + }, + }, + } + + for k, v := range successCases { + if errs := ValidateEndpoints(&v); len(errs) != 0 { + t.Errorf("Expected success for %s, got %v", k, errs) + } + } + + errorCases := map[string]struct { + endpoints api.Endpoints + errorType field.ErrorType + errorDetail string + }{ + "missing namespace": { + endpoints: api.Endpoints{ObjectMeta: api.ObjectMeta{Name: "mysvc"}}, + errorType: "FieldValueRequired", + }, + "missing name": { + endpoints: api.Endpoints{ObjectMeta: api.ObjectMeta{Namespace: "namespace"}}, + errorType: "FieldValueRequired", + }, + "invalid namespace": { + endpoints: api.Endpoints{ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "no@#invalid.;chars\"allowed"}}, + errorType: "FieldValueInvalid", + errorDetail: "must match the regex", + }, + "invalid name": { + endpoints: api.Endpoints{ObjectMeta: api.ObjectMeta{Name: "-_Invliad^&Characters", Namespace: "namespace"}}, + errorType: "FieldValueInvalid", + errorDetail: "must match the regex", + }, + "empty addresses": { + endpoints: api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Ports: []api.EndpointPort{{Name: "a", Port: 93, Protocol: "TCP"}}, + }, + }, + }, + errorType: "FieldValueRequired", + }, + "empty ports": { + endpoints: api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: "10.10.3.3"}}, + }, + }, + }, + errorType: "FieldValueRequired", + }, + "invalid IP": { + endpoints: api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: "[2001:0db8:85a3:0042:1000:8a2e:0370:7334]"}}, + Ports: []api.EndpointPort{{Name: "a", Port: 93, Protocol: "TCP"}}, + }, + }, + }, + errorType: "FieldValueInvalid", + errorDetail: "must be a valid IP address", + }, + "Multiple ports, one without name": { + endpoints: api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: "10.10.1.1"}}, + Ports: []api.EndpointPort{{Port: 8675, Protocol: "TCP"}, {Name: "b", Port: 309, Protocol: "TCP"}}, + }, + }, + }, + errorType: "FieldValueRequired", + }, + "Invalid port number": { + endpoints: api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: "10.10.1.1"}}, + Ports: []api.EndpointPort{{Name: "a", Port: 66000, Protocol: "TCP"}}, + }, + }, + }, + errorType: "FieldValueInvalid", + errorDetail: PortRangeErrorMsg, + }, + "Invalid protocol": { + endpoints: api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: "10.10.1.1"}}, + Ports: []api.EndpointPort{{Name: "a", Port: 93, Protocol: "Protocol"}}, + }, + }, + }, + errorType: "FieldValueNotSupported", + }, + "Address missing IP": { + endpoints: api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{}}, + Ports: []api.EndpointPort{{Name: "a", Port: 93, Protocol: "TCP"}}, + }, + }, + }, + errorType: "FieldValueInvalid", + errorDetail: "must be a valid IP address", + }, + "Port missing number": { + endpoints: api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: "10.10.1.1"}}, + Ports: []api.EndpointPort{{Name: "a", Protocol: "TCP"}}, + }, + }, + }, + errorType: "FieldValueInvalid", + errorDetail: PortRangeErrorMsg, + }, + "Port missing protocol": { + endpoints: api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: "10.10.1.1"}}, + Ports: []api.EndpointPort{{Name: "a", Port: 93}}, + }, + }, + }, + errorType: "FieldValueRequired", + }, + "Address is loopback": { + endpoints: api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}}, + Ports: []api.EndpointPort{{Name: "p", Port: 93, Protocol: "TCP"}}, + }, + }, + }, + errorType: "FieldValueInvalid", + errorDetail: "loopback", + }, + "Address is link-local": { + endpoints: api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: "169.254.169.254"}}, + Ports: []api.EndpointPort{{Name: "p", Port: 93, Protocol: "TCP"}}, + }, + }, + }, + errorType: "FieldValueInvalid", + errorDetail: "link-local", + }, + "Address is link-local multicast": { + endpoints: api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: "mysvc", Namespace: "namespace"}, + Subsets: []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: "224.0.0.1"}}, + Ports: []api.EndpointPort{{Name: "p", Port: 93, Protocol: "TCP"}}, + }, + }, + }, + errorType: "FieldValueInvalid", + errorDetail: "link-local multicast", + }, + } + + for k, v := range errorCases { + if errs := ValidateEndpoints(&v.endpoints); len(errs) == 0 || errs[0].Type != v.errorType || !strings.Contains(errs[0].Detail, v.errorDetail) { + t.Errorf("[%s] Expected error type %s with detail %q, got %v", k, v.errorType, v.errorDetail, errs) + } + } +} + +func TestValidateTLSSecret(t *testing.T) { + successCases := map[string]api.Secret{ + "emtpy certificate chain": { + ObjectMeta: api.ObjectMeta{Name: "tls-cert", Namespace: "namespace"}, + Data: map[string][]byte{ + api.TLSCertKey: []byte("public key"), + api.TLSPrivateKeyKey: []byte("private key"), + }, + }, + } + for k, v := range successCases { + if errs := ValidateSecret(&v); len(errs) != 0 { + t.Errorf("Expected success for %s, got %v", k, errs) + } + } + errorCases := map[string]struct { + secrets api.Secret + errorType field.ErrorType + errorDetail string + }{ + "missing public key": { + secrets: api.Secret{ + ObjectMeta: api.ObjectMeta{Name: "tls-cert"}, + Data: map[string][]byte{ + api.TLSCertKey: []byte("public key"), + }, + }, + errorType: "FieldValueRequired", + }, + "missing private key": { + secrets: api.Secret{ + ObjectMeta: api.ObjectMeta{Name: "tls-cert"}, + Data: map[string][]byte{ + api.TLSCertKey: []byte("public key"), + }, + }, + errorType: "FieldValueRequired", + }, + } + for k, v := range errorCases { + if errs := ValidateSecret(&v.secrets); len(errs) == 0 || errs[0].Type != v.errorType || !strings.Contains(errs[0].Detail, v.errorDetail) { + t.Errorf("[%s] Expected error type %s with detail %q, got %v", k, v.errorType, v.errorDetail, errs) + } + } +} + +func TestValidateSecurityContext(t *testing.T) { + priv := false + var runAsUser int64 = 1 + fullValidSC := func() *api.SecurityContext { + return &api.SecurityContext{ + Privileged: &priv, + Capabilities: &api.Capabilities{ + Add: []api.Capability{"foo"}, + Drop: []api.Capability{"bar"}, + }, + SELinuxOptions: &api.SELinuxOptions{ + User: "user", + Role: "role", + Type: "type", + Level: "level", + }, + RunAsUser: &runAsUser, + } + } + + //setup data + allSettings := fullValidSC() + noCaps := fullValidSC() + noCaps.Capabilities = nil + + noSELinux := fullValidSC() + noSELinux.SELinuxOptions = nil + + noPrivRequest := fullValidSC() + noPrivRequest.Privileged = nil + + noRunAsUser := fullValidSC() + noRunAsUser.RunAsUser = nil + + successCases := map[string]struct { + sc *api.SecurityContext + }{ + "all settings": {allSettings}, + "no capabilities": {noCaps}, + "no selinux": {noSELinux}, + "no priv request": {noPrivRequest}, + "no run as user": {noRunAsUser}, + } + for k, v := range successCases { + if errs := ValidateSecurityContext(v.sc, field.NewPath("field")); len(errs) != 0 { + t.Errorf("[%s] Expected success, got %v", k, errs) + } + } + + privRequestWithGlobalDeny := fullValidSC() + requestPrivileged := true + privRequestWithGlobalDeny.Privileged = &requestPrivileged + + negativeRunAsUser := fullValidSC() + var negativeUser int64 = -1 + negativeRunAsUser.RunAsUser = &negativeUser + + errorCases := map[string]struct { + sc *api.SecurityContext + errorType field.ErrorType + errorDetail string + }{ + "request privileged when capabilities forbids": { + sc: privRequestWithGlobalDeny, + errorType: "FieldValueForbidden", + errorDetail: "disallowed by policy", + }, + "negative RunAsUser": { + sc: negativeRunAsUser, + errorType: "FieldValueInvalid", + errorDetail: isNegativeErrorMsg, + }, + } + for k, v := range errorCases { + if errs := ValidateSecurityContext(v.sc, field.NewPath("field")); len(errs) == 0 || errs[0].Type != v.errorType || !strings.Contains(errs[0].Detail, v.errorDetail) { + t.Errorf("[%s] Expected error type %q with detail %q, got %v", k, v.errorType, v.errorDetail, errs) + } + } +} + +func fakeValidSecurityContext(priv bool) *api.SecurityContext { + return &api.SecurityContext{ + Privileged: &priv, + } +} + +func TestValidPodLogOptions(t *testing.T) { + now := unversioned.Now() + negative := int64(-1) + zero := int64(0) + positive := int64(1) + tests := []struct { + opt api.PodLogOptions + errs int + }{ + {api.PodLogOptions{}, 0}, + {api.PodLogOptions{Previous: true}, 0}, + {api.PodLogOptions{Follow: true}, 0}, + {api.PodLogOptions{TailLines: &zero}, 0}, + {api.PodLogOptions{TailLines: &negative}, 1}, + {api.PodLogOptions{TailLines: &positive}, 0}, + {api.PodLogOptions{LimitBytes: &zero}, 1}, + {api.PodLogOptions{LimitBytes: &negative}, 1}, + {api.PodLogOptions{LimitBytes: &positive}, 0}, + {api.PodLogOptions{SinceSeconds: &negative}, 1}, + {api.PodLogOptions{SinceSeconds: &positive}, 0}, + {api.PodLogOptions{SinceSeconds: &zero}, 1}, + {api.PodLogOptions{SinceTime: &now}, 0}, + } + for i, test := range tests { + errs := ValidatePodLogOptions(&test.opt) + if test.errs != len(errs) { + t.Errorf("%d: Unexpected errors: %v", i, errs) + } + } +} + +func TestValidateConfigMap(t *testing.T) { + newConfigMap := func(name, namespace string, data map[string]string) api.ConfigMap { + return api.ConfigMap{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: data, + } + } + + var ( + validConfigMap = newConfigMap("validname", "validns", map[string]string{"key": "value"}) + maxKeyLength = newConfigMap("validname", "validns", map[string]string{strings.Repeat("a", 253): "value"}) + + emptyName = newConfigMap("", "validns", nil) + invalidName = newConfigMap("NoUppercaseOrSpecialCharsLike=Equals", "validns", nil) + emptyNs = newConfigMap("validname", "", nil) + invalidNs = newConfigMap("validname", "NoUppercaseOrSpecialCharsLike=Equals", nil) + invalidKey = newConfigMap("validname", "validns", map[string]string{"a..b": "value"}) + leadingDotKey = newConfigMap("validname", "validns", map[string]string{".ab": "value"}) + dotKey = newConfigMap("validname", "validns", map[string]string{".": "value"}) + doubleDotKey = newConfigMap("validname", "validns", map[string]string{"..": "value"}) + overMaxKeyLength = newConfigMap("validname", "validns", map[string]string{strings.Repeat("a", 254): "value"}) + overMaxSize = newConfigMap("validname", "validns", map[string]string{"key": strings.Repeat("a", api.MaxSecretSize+1)}) + ) + + tests := map[string]struct { + cfg api.ConfigMap + isValid bool + }{ + "valid": {validConfigMap, true}, + "max key length": {maxKeyLength, true}, + "leading dot key": {leadingDotKey, true}, + "empty name": {emptyName, false}, + "invalid name": {invalidName, false}, + "invalid key": {invalidKey, false}, + "empty namespace": {emptyNs, false}, + "invalid namespace": {invalidNs, false}, + "dot key": {dotKey, false}, + "double dot key": {doubleDotKey, false}, + "over max key length": {overMaxKeyLength, false}, + "over max size": {overMaxSize, false}, + } + + for name, tc := range tests { + errs := ValidateConfigMap(&tc.cfg) + if tc.isValid && len(errs) > 0 { + t.Errorf("%v: unexpected error: %v", name, errs) + } + if !tc.isValid && len(errs) == 0 { + t.Errorf("%v: unexpected non-error", name) + } + } +} + +func TestValidateConfigMapUpdate(t *testing.T) { + newConfigMap := func(version, name, namespace string, data map[string]string) api.ConfigMap { + return api.ConfigMap{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: namespace, + ResourceVersion: version, + }, + Data: data, + } + } + + var ( + validConfigMap = newConfigMap("1", "validname", "validns", map[string]string{"key": "value"}) + noVersion = newConfigMap("", "validname", "validns", map[string]string{"key": "value"}) + ) + + cases := []struct { + name string + newCfg api.ConfigMap + oldCfg api.ConfigMap + isValid bool + }{ + { + name: "valid", + newCfg: validConfigMap, + oldCfg: validConfigMap, + isValid: true, + }, + { + name: "invalid", + newCfg: noVersion, + oldCfg: validConfigMap, + isValid: false, + }, + } + + for _, tc := range cases { + errs := ValidateConfigMapUpdate(&tc.newCfg, &tc.oldCfg) + if tc.isValid && len(errs) > 0 { + t.Errorf("%v: unexpected error: %v", tc.name, errs) + } + if !tc.isValid && len(errs) == 0 { + t.Errorf("%v: unexpected non-error", tc.name) + } + } +} + +func TestValidateHasLabel(t *testing.T) { + successCase := api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Labels: map[string]string{ + "other": "blah", + "foo": "bar", + }, + } + if errs := ValidateHasLabel(successCase, field.NewPath("field"), "foo", "bar"); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + + missingCase := api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Labels: map[string]string{ + "other": "blah", + }, + } + if errs := ValidateHasLabel(missingCase, field.NewPath("field"), "foo", "bar"); len(errs) == 0 { + t.Errorf("expected failure") + } + + wrongValueCase := api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Labels: map[string]string{ + "other": "blah", + "foo": "notbar", + }, + } + if errs := ValidateHasLabel(wrongValueCase, field.NewPath("field"), "foo", "bar"); len(errs) == 0 { + t.Errorf("expected failure") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go index 8903fd3a5ce1..c418de3b05b4 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go @@ -35,6 +35,10 @@ var ( // registeredGroupVersions stores all API group versions for which RegisterGroup is called. registeredVersions = map[unversioned.GroupVersion]struct{}{} + // thirdPartyGroupVersions are API versions which are dynamically + // registered (and unregistered) via API calls to the apiserver + thirdPartyGroupVersions []unversioned.GroupVersion + // enabledVersions represents all enabled API versions. It should be a // subset of registeredVersions. Please call EnableVersions() to add // enabled versions. @@ -158,6 +162,60 @@ func IsRegistered(group string) bool { return found } +// IsRegisteredVersion returns if a version is registered. +func IsRegisteredVersion(v unversioned.GroupVersion) bool { + _, found := registeredVersions[v] + return found +} + +// RegisteredGroupVersions returns all registered group versions. +func RegisteredGroupVersions() []unversioned.GroupVersion { + ret := []unversioned.GroupVersion{} + for groupVersion := range registeredVersions { + ret = append(ret, groupVersion) + } + return ret +} + +// IsThirdPartyAPIGroupVersion returns true if the api version is a user-registered group/version. +func IsThirdPartyAPIGroupVersion(gv unversioned.GroupVersion) bool { + for ix := range thirdPartyGroupVersions { + if thirdPartyGroupVersions[ix] == gv { + return true + } + } + return false +} + +// AddThirdPartyAPIGroupVersions sets the list of third party versions, +// registers them in the API machinery and enables them. +// Skips GroupVersions that are already registered. +// Returns the list of GroupVersions that were skipped. +func AddThirdPartyAPIGroupVersions(gvs ...unversioned.GroupVersion) []unversioned.GroupVersion { + filteredGVs := []unversioned.GroupVersion{} + skippedGVs := []unversioned.GroupVersion{} + for ix := range gvs { + if !IsRegisteredVersion(gvs[ix]) { + filteredGVs = append(filteredGVs, gvs[ix]) + } else { + glog.V(3).Infof("Skipping %s, because its already registered", gvs[ix].String()) + skippedGVs = append(skippedGVs, gvs[ix]) + } + } + if len(filteredGVs) == 0 { + return skippedGVs + } + RegisterVersions(filteredGVs) + EnableVersions(filteredGVs...) + next := make([]unversioned.GroupVersion, len(gvs)) + for ix := range filteredGVs { + next[ix] = filteredGVs[ix] + } + thirdPartyGroupVersions = next + + return skippedGVs +} + // TODO: This is an expedient function, because we don't check if a Group is // supported throughout the code base. We will abandon this function and // checking the error returned by the Group() function. @@ -181,9 +239,13 @@ func GroupOrDie(group string) *apimachinery.GroupMeta { // all other groups alphabetical. func RESTMapper(versionPatterns ...unversioned.GroupVersion) meta.RESTMapper { unionMapper := meta.MultiRESTMapper{} + unionedGroups := sets.NewString() for enabledVersion := range enabledVersions { - groupMeta := groupMetaMap[enabledVersion.Group] - unionMapper = append(unionMapper, groupMeta.RESTMapper) + if !unionedGroups.Has(enabledVersion.Group) { + unionedGroups.Insert(enabledVersion.Group) + groupMeta := groupMetaMap[enabledVersion.Group] + unionMapper = append(unionMapper, groupMeta.RESTMapper) + } } if len(versionPatterns) != 0 { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apimachinery/registered/registered_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apimachinery/registered/registered_test.go new file mode 100644 index 000000000000..001b65e9226b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apimachinery/registered/registered_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registered + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" +) + +func TestAllPreferredGroupVersions(t *testing.T) { + testCases := []struct { + groupMetas []apimachinery.GroupMeta + expect string + }{ + { + groupMetas: []apimachinery.GroupMeta{ + { + GroupVersion: unversioned.GroupVersion{Group: "group1", Version: "v1"}, + }, + { + GroupVersion: unversioned.GroupVersion{Group: "group2", Version: "v2"}, + }, + { + GroupVersion: unversioned.GroupVersion{Group: "", Version: "v1"}, + }, + }, + expect: "group1/v1,group2/v2,v1", + }, + { + groupMetas: []apimachinery.GroupMeta{ + { + GroupVersion: unversioned.GroupVersion{Group: "", Version: "v1"}, + }, + }, + expect: "v1", + }, + { + groupMetas: []apimachinery.GroupMeta{}, + expect: "", + }, + } + for _, testCase := range testCases { + for _, groupMeta := range testCase.groupMetas { + RegisterGroup(groupMeta) + } + output := AllPreferredGroupVersions() + if testCase.expect != output { + t.Errorf("Error. expect: %s, got: %s", testCase.expect, output) + } + reset() + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/OWNERS b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/OWNERS new file mode 100644 index 000000000000..d28472e0fd99 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/OWNERS @@ -0,0 +1,6 @@ +assignees: + - bgrant0607 + - erictune + - lavalamp + - smarterclayton + - thockin diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/latest/latest.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/latest/latest.go new file mode 100644 index 000000000000..2618fc4fc248 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/latest/latest.go @@ -0,0 +1,23 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package latest + +import ( + _ "k8s.io/kubernetes/pkg/apis/abac" + _ "k8s.io/kubernetes/pkg/apis/abac/v0" + _ "k8s.io/kubernetes/pkg/apis/abac/v1beta1" +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/register.go new file mode 100644 index 000000000000..c555d5aa979b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/register.go @@ -0,0 +1,40 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package abac + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer" +) + +// Group is the API group for abac +const Group = "abac.authorization.kubernetes.io" + +// Scheme is the default instance of runtime.Scheme to which types in the abac API group are registered. +var Scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme +var Codecs = serializer.NewCodecFactory(Scheme) + +func init() { + Scheme.AddKnownTypes(unversioned.GroupVersion{Group: Group, Version: runtime.APIVersionInternal}, + &Policy{}, + ) +} + +func (obj *Policy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/types.go new file mode 100644 index 000000000000..024c7ee2417a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/types.go @@ -0,0 +1,70 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package abac + +import "k8s.io/kubernetes/pkg/api/unversioned" + +// Policy contains a single ABAC policy rule +type Policy struct { + unversioned.TypeMeta + + // Spec describes the policy rule + Spec PolicySpec +} + +// PolicySpec contains the attributes for a policy rule +type PolicySpec struct { + + // User is the username this rule applies to. + // Either user or group is required to match the request. + // "*" matches all users. + User string + + // Group is the group this rule applies to. + // Either user or group is required to match the request. + // "*" matches all groups. + Group string + + // Readonly matches readonly requests when true, and all requests when false + Readonly bool + + // APIGroup is the name of an API group. APIGroup, Resource, and Namespace are required to match resource requests. + // "*" matches all API groups + APIGroup string + + // Resource is the name of a resource. APIGroup, Resource, and Namespace are required to match resource requests. + // "*" matches all resources + Resource string + + // Namespace is the name of a namespace. APIGroup, Resource, and Namespace are required to match resource requests. + // "*" matches all namespaces (including unnamespaced requests) + Namespace string + + // NonResourcePath matches non-resource request paths. + // "*" matches all paths + // "/foo/*" matches all subpaths of foo + NonResourcePath string + + // TODO: "expires" string in RFC3339 format. + + // TODO: want a way to allow some users to restart containers of a pod but + // not delete or modify it. + + // TODO: want a way to allow a controller to create a pod based only on a + // certain podTemplates. + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v0/conversion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v0/conversion.go new file mode 100644 index 000000000000..c0fda4bd5555 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v0/conversion.go @@ -0,0 +1,58 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v0 + +import ( + api "k8s.io/kubernetes/pkg/apis/abac" + "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + api.Scheme.AddConversionFuncs( + func(in *Policy, out *api.Policy, s conversion.Scope) error { + // Begin by copying all fields + out.Spec.User = in.User + out.Spec.Group = in.Group + out.Spec.Namespace = in.Namespace + out.Spec.Resource = in.Resource + out.Spec.Readonly = in.Readonly + + // In v0, unspecified user and group matches all subjects + if len(in.User) == 0 && len(in.Group) == 0 { + out.Spec.User = "*" + } + + // In v0, leaving namespace empty matches all namespaces + if len(in.Namespace) == 0 { + out.Spec.Namespace = "*" + } + // In v0, leaving resource empty matches all resources + if len(in.Resource) == 0 { + out.Spec.Resource = "*" + } + // Any rule in v0 should match all API groups + out.Spec.APIGroup = "*" + + // In v0, leaving namespace and resource blank allows non-resource paths + if len(in.Namespace) == 0 && len(in.Resource) == 0 { + out.Spec.NonResourcePath = "*" + } + + return nil + }, + ) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v0/conversion_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v0/conversion_test.go new file mode 100644 index 000000000000..ffdbd398d972 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v0/conversion_test.go @@ -0,0 +1,77 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v0_test + +import ( + "reflect" + "testing" + + api "k8s.io/kubernetes/pkg/apis/abac" + "k8s.io/kubernetes/pkg/apis/abac/v0" +) + +func TestConversion(t *testing.T) { + testcases := map[string]struct { + old *v0.Policy + expected *api.Policy + }{ + // a completely empty policy rule allows everything to all users + "empty": { + old: &v0.Policy{}, + expected: &api.Policy{Spec: api.PolicySpec{User: "*", Readonly: false, NonResourcePath: "*", Namespace: "*", Resource: "*", APIGroup: "*"}}, + }, + + // specifying a user is preserved + "user": { + old: &v0.Policy{User: "bob"}, + expected: &api.Policy{Spec: api.PolicySpec{User: "bob", Readonly: false, NonResourcePath: "*", Namespace: "*", Resource: "*", APIGroup: "*"}}, + }, + + // specifying a group is preserved (and no longer matches all users) + "group": { + old: &v0.Policy{Group: "mygroup"}, + expected: &api.Policy{Spec: api.PolicySpec{Group: "mygroup", Readonly: false, NonResourcePath: "*", Namespace: "*", Resource: "*", APIGroup: "*"}}, + }, + + // specifying a namespace removes the * match on non-resource path + "namespace": { + old: &v0.Policy{Namespace: "myns"}, + expected: &api.Policy{Spec: api.PolicySpec{User: "*", Readonly: false, NonResourcePath: "", Namespace: "myns", Resource: "*", APIGroup: "*"}}, + }, + + // specifying a resource removes the * match on non-resource path + "resource": { + old: &v0.Policy{Resource: "myresource"}, + expected: &api.Policy{Spec: api.PolicySpec{User: "*", Readonly: false, NonResourcePath: "", Namespace: "*", Resource: "myresource", APIGroup: "*"}}, + }, + + // specifying a namespace+resource removes the * match on non-resource path + "namespace+resource": { + old: &v0.Policy{Namespace: "myns", Resource: "myresource"}, + expected: &api.Policy{Spec: api.PolicySpec{User: "*", Readonly: false, NonResourcePath: "", Namespace: "myns", Resource: "myresource", APIGroup: "*"}}, + }, + } + for k, tc := range testcases { + internal := &api.Policy{} + if err := api.Scheme.Convert(tc.old, internal); err != nil { + t.Errorf("%s: unexpected error: %v", k, err) + } + if !reflect.DeepEqual(internal, tc.expected) { + t.Errorf("%s: expected\n\t%#v, got \n\t%#v", k, tc.expected, internal) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v0/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v0/register.go new file mode 100644 index 000000000000..d5338045a984 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v0/register.go @@ -0,0 +1,33 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v0 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + api "k8s.io/kubernetes/pkg/apis/abac" +) + +// GroupVersion is the API group and version for abac v0 +var GroupVersion = unversioned.GroupVersion{Group: api.Group, Version: "v0"} + +func init() { + api.Scheme.AddKnownTypes(GroupVersion, + &Policy{}, + ) +} + +func (obj *Policy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v0/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v0/types.go new file mode 100644 index 000000000000..58bb569f40bb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v0/types.go @@ -0,0 +1,45 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v0 + +import "k8s.io/kubernetes/pkg/api/unversioned" + +// Policy contains a single ABAC policy rule +type Policy struct { + unversioned.TypeMeta `json:",inline"` + + // User is the username this rule applies to. + // Either user or group is required to match the request. + // "*" matches all users. + User string `json:"user,omitempty"` + + // Group is the group this rule applies to. + // Either user or group is required to match the request. + // "*" matches all groups. + Group string `json:"group,omitempty"` + + // Readonly matches readonly requests when true, and all requests when false + Readonly bool `json:"readonly,omitempty"` + + // Resource is the name of a resource + // "*" matches all resources + Resource string `json:"resource,omitempty"` + + // Namespace is the name of a namespace + // "*" matches all namespaces (including unnamespaced requests) + Namespace string `json:"namespace,omitempty"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v1beta1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v1beta1/register.go new file mode 100644 index 000000000000..95fd6b3ef18b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v1beta1/register.go @@ -0,0 +1,33 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + api "k8s.io/kubernetes/pkg/apis/abac" +) + +// GroupVersion is the API group and version for abac v1beta1 +var GroupVersion = unversioned.GroupVersion{Group: api.Group, Version: "v1beta1"} + +func init() { + api.Scheme.AddKnownTypes(GroupVersion, + &Policy{}, + ) +} + +func (obj *Policy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v1beta1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v1beta1/types.go new file mode 100644 index 000000000000..7ce61ac4ac5d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/abac/v1beta1/types.go @@ -0,0 +1,60 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/kubernetes/pkg/api/unversioned" + +// Policy contains a single ABAC policy rule +type Policy struct { + unversioned.TypeMeta `json:",inline"` + + // Spec describes the policy rule + Spec PolicySpec `json:"spec"` +} + +// PolicySpec contains the attributes for a policy rule +type PolicySpec struct { + // User is the username this rule applies to. + // Either user or group is required to match the request. + // "*" matches all users. + User string `json:"user,omitempty"` + + // Group is the group this rule applies to. + // Either user or group is required to match the request. + // "*" matches all groups. + Group string `json:"group,omitempty"` + + // Readonly matches readonly requests when true, and all requests when false + Readonly bool `json:"readonly,omitempty"` + + // APIGroup is the name of an API group. APIGroup, Resource, and Namespace are required to match resource requests. + // "*" matches all API groups + APIGroup string `json:"apiGroup,omitempty"` + + // Resource is the name of a resource. APIGroup, Resource, and Namespace are required to match resource requests. + // "*" matches all resources + Resource string `json:"resource,omitempty"` + + // Namespace is the name of a namespace. APIGroup, Resource, and Namespace are required to match resource requests. + // "*" matches all namespaces (including unnamespaced requests) + Namespace string `json:"namespace,omitempty"` + + // NonResourcePath matches non-resource request paths. + // "*" matches all paths + // "/foo/*" matches all subpaths of foo + NonResourcePath string `json:"nonResourcePath,omitempty"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go new file mode 100644 index 000000000000..5a2135c6a990 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go @@ -0,0 +1,117 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package apps + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_apps_PetSet, + DeepCopy_apps_PetSetList, + DeepCopy_apps_PetSetSpec, + DeepCopy_apps_PetSetStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_apps_PetSet(in PetSet, out *PetSet, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_apps_PetSetSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_apps_PetSetStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_apps_PetSetList(in PetSetList, out *PetSetList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]PetSet, len(in)) + for i := range in { + if err := DeepCopy_apps_PetSet(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_apps_PetSetSpec(in PetSetSpec, out *PetSetSpec, c *conversion.Cloner) error { + out.Replicas = in.Replicas + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]api.PersistentVolumeClaim, len(in)) + for i := range in { + if err := api.DeepCopy_api_PersistentVolumeClaim(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + out.ServiceName = in.ServiceName + return nil +} + +func DeepCopy_apps_PetSetStatus(in PetSetStatus, out *PetSetStatus, c *conversion.Cloner) error { + if in.ObservedGeneration != nil { + in, out := in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = *in + } else { + out.ObservedGeneration = nil + } + out.Replicas = in.Replicas + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/install/install.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/install/install.go new file mode 100644 index 000000000000..b4d9011d37fd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/install/install.go @@ -0,0 +1,125 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the apps API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/apps/v1alpha1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/pkg/apis/apps" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", apps.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + // the list of kinds that are scoped at the root of the api hierarchy + // if a kind is not enumerated here, it is assumed to have a namespace scope + rootScoped := sets.NewString() + + ignoredKinds := sets.NewString() + + return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +// interfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1alpha1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(apps.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + apps.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1alpha1.SchemeGroupVersion: + v1alpha1.AddToScheme(api.Scheme) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/register.go new file mode 100644 index 000000000000..5192389bf277 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/register.go @@ -0,0 +1,57 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +func AddToScheme(scheme *runtime.Scheme) { + // Add the API to Scheme. + addKnownTypes(scheme) +} + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) unversioned.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) unversioned.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + // TODO this will get cleaned up with the scheme types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &PetSet{}, + &PetSetList{}, + &api.ListOptions{}, + ) +} + +func (obj *PetSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *PetSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/types.generated.go new file mode 100644 index 000000000000..9903f8e13573 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/types.generated.go @@ -0,0 +1,1634 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package apps + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg2_api "k8s.io/kubernetes/pkg/api" + pkg4_resource "k8s.io/kubernetes/pkg/api/resource" + pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + pkg3_types "k8s.io/kubernetes/pkg/types" + pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg2_api.ObjectMeta + var v1 pkg4_resource.Quantity + var v2 pkg1_unversioned.TypeMeta + var v3 pkg3_types.UID + var v4 pkg5_intstr.IntOrString + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + } +} + +func (x *PetSet) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy14 := &x.Status + yy14.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy16 := &x.Status + yy16.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PetSet) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PetSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PetSetSpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PetSetStatus{} + } else { + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PetSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PetSetSpec{} + } else { + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PetSetStatus{} + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PetSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != 0 + yyq2[1] = x.Selector != nil + yyq2[3] = len(x.VolumeClaimTemplates) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy10 := &x.Template + yy10.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.Template + yy12.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.VolumeClaimTemplates == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceapi_PersistentVolumeClaim(([]pkg2_api.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeClaimTemplates")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumeClaimTemplates == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceapi_PersistentVolumeClaim(([]pkg2_api.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PetSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PetSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_unversioned.LabelSelector) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg2_api.PodTemplateSpec{} + } else { + yyv7 := &x.Template + yyv7.CodecDecodeSelf(d) + } + case "volumeClaimTemplates": + if r.TryDecodeAsNil() { + x.VolumeClaimTemplates = nil + } else { + yyv8 := &x.VolumeClaimTemplates + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decSliceapi_PersistentVolumeClaim((*[]pkg2_api.PersistentVolumeClaim)(yyv8), d) + } + } + case "serviceName": + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + x.ServiceName = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PetSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_unversioned.LabelSelector) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg2_api.PodTemplateSpec{} + } else { + yyv15 := &x.Template + yyv15.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeClaimTemplates = nil + } else { + yyv16 := &x.VolumeClaimTemplates + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + h.decSliceapi_PersistentVolumeClaim((*[]pkg2_api.PersistentVolumeClaim)(yyv16), d) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + x.ServiceName = string(r.DecodeString()) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PetSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy4 := *x.ObservedGeneration + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy6 := *x.ObservedGeneration + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PetSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PetSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PetSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PetSetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSlicePetSet(([]PetSet)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePetSet(([]PetSet)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PetSetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PetSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSlicePetSet((*[]PetSet)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PetSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSlicePetSet((*[]PetSet)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceapi_PersistentVolumeClaim(v []pkg2_api.PersistentVolumeClaim, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceapi_PersistentVolumeClaim(v *[]pkg2_api.PersistentVolumeClaim, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg2_api.PersistentVolumeClaim{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 344) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg2_api.PersistentVolumeClaim, yyrl1) + } + } else { + yyv1 = make([]pkg2_api.PersistentVolumeClaim, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_api.PersistentVolumeClaim{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg2_api.PersistentVolumeClaim{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_api.PersistentVolumeClaim{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg2_api.PersistentVolumeClaim{}) // var yyz1 pkg2_api.PersistentVolumeClaim + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_api.PersistentVolumeClaim{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg2_api.PersistentVolumeClaim{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePetSet(v []PetSet, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePetSet(v *[]PetSet, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PetSet{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 744) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PetSet, yyrl1) + } + } else { + yyv1 = make([]PetSet, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PetSet{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PetSet{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PetSet{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PetSet{}) // var yyz1 PetSet + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PetSet{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PetSet{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/types.go new file mode 100644 index 000000000000..f140cab96e58 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/types.go @@ -0,0 +1,94 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// PetSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The PetSet guarantees that a given network identity will always +// map to the same storage identity. PetSet is currently in alpha and +// and subject to change without notice. +type PetSet struct { + unversioned.TypeMeta `json:",inline"` + api.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired identities of pets in this set. + Spec PetSetSpec `json:"spec,omitempty"` + + // Status is the current status of Pets in this PetSet. This data + // may be out of date by some window of time. + Status PetSetStatus `json:"status,omitempty"` +} + +// A PetSetSpec is the specification of a PetSet. +type PetSetSpec struct { + // Replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + Replicas int `json:"replicas,omitempty"` + + // Selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + Selector *unversioned.LabelSelector `json:"selector,omitempty"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the PetSet + // will fulfill this Template, but have a unique identity from the rest + // of the PetSet. + Template api.PodTemplateSpec `json:"template"` + + // VolumeClaimTemplates is a list of claims that pets are allowed to reference. + // The PetSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pet. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + VolumeClaimTemplates []api.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` + + // ServiceName is the name of the service that governs this PetSet. + // This service must exist before the PetSet, and is responsible for + // the network identity of the set. Pets get DNS/hostnames that follow the + // pattern: pet-specific-string.serviceName.default.svc.cluster.local + // where "pet-specific-string" is managed by the PetSet controller. + ServiceName string `json:"serviceName"` +} + +// PetSetStatus represents the current state of a PetSet. +type PetSetStatus struct { + // most recent generation observed by this autoscaler. + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + + // Replicas is the number of actual replicas. + Replicas int `json:"replicas"` +} + +// PetSetList is a collection of PetSets. +type PetSetList struct { + unversioned.TypeMeta `json:",inline"` + unversioned.ListMeta `json:"metadata,omitempty"` + Items []PetSet `json:"items"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go new file mode 100644 index 000000000000..48f1f2b4d327 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go @@ -0,0 +1,118 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) { + // Add non-generated conversion functions to handle the *int32 -> int + // conversion. A pointer is useful in the versioned type so we can default + // it, but a plain int32 is more convenient in the internal type. These + // functions are the same as the autogenerated ones in every other way. + err := scheme.AddConversionFuncs( + Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec, + Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec, + ) + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } + + err = api.Scheme.AddFieldLabelConversionFunc("apps/v1alpha1", "PetSet", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", "status.successful": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec(in *PetSetSpec, out *apps.PetSetSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = int(*in.Replicas) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]api.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + out.ServiceName = in.ServiceName + return nil +} + +func Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec(in *apps.PetSetSpec, out *PetSetSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = int32(in.Replicas) + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + out.ServiceName = in.ServiceName + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go new file mode 100644 index 000000000000..cfd6ce44b275 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go @@ -0,0 +1,156 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + apps "k8s.io/kubernetes/pkg/apis/apps" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_PetSet_To_apps_PetSet, + Convert_apps_PetSet_To_v1alpha1_PetSet, + Convert_v1alpha1_PetSetList_To_apps_PetSetList, + Convert_apps_PetSetList_To_v1alpha1_PetSetList, + Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec, + Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec, + Convert_v1alpha1_PetSetStatus_To_apps_PetSetStatus, + Convert_apps_PetSetStatus_To_v1alpha1_PetSetStatus, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1alpha1_PetSet_To_apps_PetSet(in *PetSet, out *apps.PetSet, s conversion.Scope) error { + SetDefaults_PetSet(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_PetSet_To_apps_PetSet(in *PetSet, out *apps.PetSet, s conversion.Scope) error { + return autoConvert_v1alpha1_PetSet_To_apps_PetSet(in, out, s) +} + +func autoConvert_apps_PetSet_To_v1alpha1_PetSet(in *apps.PetSet, out *PetSet, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_apps_PetSet_To_v1alpha1_PetSet(in *apps.PetSet, out *PetSet, s conversion.Scope) error { + return autoConvert_apps_PetSet_To_v1alpha1_PetSet(in, out, s) +} + +func autoConvert_v1alpha1_PetSetList_To_apps_PetSetList(in *PetSetList, out *apps.PetSetList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]apps.PetSet, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_PetSet_To_apps_PetSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_PetSetList_To_apps_PetSetList(in *PetSetList, out *apps.PetSetList, s conversion.Scope) error { + return autoConvert_v1alpha1_PetSetList_To_apps_PetSetList(in, out, s) +} + +func autoConvert_apps_PetSetList_To_v1alpha1_PetSetList(in *apps.PetSetList, out *PetSetList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PetSet, len(*in)) + for i := range *in { + if err := Convert_apps_PetSet_To_v1alpha1_PetSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_apps_PetSetList_To_v1alpha1_PetSetList(in *apps.PetSetList, out *PetSetList, s conversion.Scope) error { + return autoConvert_apps_PetSetList_To_v1alpha1_PetSetList(in, out, s) +} + +func autoConvert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(in *PetSetStatus, out *apps.PetSetStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Replicas = int(in.Replicas) + return nil +} + +func Convert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(in *PetSetStatus, out *apps.PetSetStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(in, out, s) +} + +func autoConvert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(in *apps.PetSetStatus, out *PetSetStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Replicas = int32(in.Replicas) + return nil +} + +func Convert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(in *apps.PetSetStatus, out *PetSetStatus, s conversion.Scope) error { + return autoConvert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(in, out, s) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go new file mode 100644 index 000000000000..6e51cacbc25c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go @@ -0,0 +1,124 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1alpha1_PetSet, + DeepCopy_v1alpha1_PetSetList, + DeepCopy_v1alpha1_PetSetSpec, + DeepCopy_v1alpha1_PetSetStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1alpha1_PetSet(in PetSet, out *PetSet, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v1alpha1_PetSetSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1alpha1_PetSetStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1alpha1_PetSetList(in PetSetList, out *PetSetList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]PetSet, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_PetSet(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1alpha1_PetSetSpec(in PetSetSpec, out *PetSetSpec, c *conversion.Cloner) error { + if in.Replicas != nil { + in, out := in.Replicas, &out.Replicas + *out = new(int32) + **out = *in + } else { + out.Replicas = nil + } + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]v1.PersistentVolumeClaim, len(in)) + for i := range in { + if err := v1.DeepCopy_v1_PersistentVolumeClaim(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + out.ServiceName = in.ServiceName + return nil +} + +func DeepCopy_v1alpha1_PetSetStatus(in PetSetStatus, out *PetSetStatus, c *conversion.Cloner) error { + if in.ObservedGeneration != nil { + in, out := in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = *in + } else { + out.ObservedGeneration = nil + } + out.Replicas = in.Replicas + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go new file mode 100644 index 000000000000..e41028138afd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go @@ -0,0 +1,46 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { + scheme.AddDefaultingFuncs( + SetDefaults_PetSet, + ) +} + +func SetDefaults_PetSet(obj *PetSet) { + labels := obj.Spec.Template.Labels + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &unversioned.LabelSelector{ + MatchLabels: labels, + } + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go new file mode 100644 index 000000000000..65a03a2093dc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1alpha1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go new file mode 100644 index 000000000000..88f1bcd40735 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go @@ -0,0 +1,969 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto + + It has these top-level messages: + PetSet + PetSetList + PetSetSpec + PetSetStatus +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *PetSet) Reset() { *m = PetSet{} } +func (m *PetSet) String() string { return proto.CompactTextString(m) } +func (*PetSet) ProtoMessage() {} + +func (m *PetSetList) Reset() { *m = PetSetList{} } +func (m *PetSetList) String() string { return proto.CompactTextString(m) } +func (*PetSetList) ProtoMessage() {} + +func (m *PetSetSpec) Reset() { *m = PetSetSpec{} } +func (m *PetSetSpec) String() string { return proto.CompactTextString(m) } +func (*PetSetSpec) ProtoMessage() {} + +func (m *PetSetStatus) Reset() { *m = PetSetStatus{} } +func (m *PetSetStatus) String() string { return proto.CompactTextString(m) } +func (*PetSetStatus) ProtoMessage() {} + +func init() { + proto.RegisterType((*PetSet)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSet") + proto.RegisterType((*PetSetList)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSetList") + proto.RegisterType((*PetSetSpec)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSetSpec") + proto.RegisterType((*PetSetStatus)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSetStatus") +} +func (m *PetSet) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PetSet) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *PetSetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PetSetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PetSetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PetSetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n5, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n6, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.VolumeClaimTemplates) > 0 { + for _, msg := range m.VolumeClaimTemplates { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) + i += copy(data[i:], m.ServiceName) + return i, nil +} + +func (m *PetSetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PetSetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + } + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *PetSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PetSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PetSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PetSetStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PetSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PetSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PetSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PetSetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PetSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PetSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PetSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PetSetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PetSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PetSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PetSetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PetSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PetSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto new file mode 100644 index 000000000000..6cb15bf3fed9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.apps.v1alpha1; + +import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// PetSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The PetSet guarantees that a given network identity will always +// map to the same storage identity. PetSet is currently in alpha +// and subject to change without notice. +message PetSet { + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec defines the desired identities of pets in this set. + optional PetSetSpec spec = 2; + + // Status is the current status of Pets in this PetSet. This data + // may be out of date by some window of time. + optional PetSetStatus status = 3; +} + +// PetSetList is a collection of PetSets. +message PetSetList { + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + repeated PetSet items = 2; +} + +// A PetSetSpec is the specification of a PetSet. +message PetSetSpec { + // Replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + optional int32 replicas = 1; + + // Selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the PetSet + // will fulfill this Template, but have a unique identity from the rest + // of the PetSet. + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + + // VolumeClaimTemplates is a list of claims that pets are allowed to reference. + // The PetSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pet. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + repeated k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + + // ServiceName is the name of the service that governs this PetSet. + // This service must exist before the PetSet, and is responsible for + // the network identity of the set. Pets get DNS/hostnames that follow the + // pattern: pet-specific-string.serviceName.default.svc.cluster.local + // where "pet-specific-string" is managed by the PetSet controller. + optional string serviceName = 5; +} + +// PetSetStatus represents the current state of a PetSet. +message PetSetStatus { + // most recent generation observed by this autoscaler. + optional int64 observedGeneration = 1; + + // Replicas is the number of actual replicas. + optional int32 replicas = 2; +} + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go new file mode 100644 index 000000000000..e069807754b8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) + addDefaultingFuncs(scheme) + addConversionFuncs(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &PetSet{}, + &PetSetList{}, + &v1.ListOptions{}, + ) + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) +} + +func (obj *PetSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *PetSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.generated.go new file mode 100644 index 000000000000..a548f53133b4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.generated.go @@ -0,0 +1,1664 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1alpha1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg4_resource "k8s.io/kubernetes/pkg/api/resource" + pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" + pkg3_types "k8s.io/kubernetes/pkg/types" + pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg4_resource.Quantity + var v1 pkg1_unversioned.TypeMeta + var v2 pkg2_v1.ObjectMeta + var v3 pkg3_types.UID + var v4 pkg5_intstr.IntOrString + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + } +} + +func (x *PetSet) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy14 := &x.Status + yy14.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy16 := &x.Status + yy16.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PetSet) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PetSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PetSetSpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PetSetStatus{} + } else { + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PetSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PetSetSpec{} + } else { + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PetSetStatus{} + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PetSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.Selector != nil + yyq2[3] = len(x.VolumeClaimTemplates) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.Template + yy12.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.Template + yy14.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.VolumeClaimTemplates == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSlicev1_PersistentVolumeClaim(([]pkg2_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeClaimTemplates")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumeClaimTemplates == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSlicev1_PersistentVolumeClaim(([]pkg2_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PetSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PetSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_unversioned.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg2_v1.PodTemplateSpec{} + } else { + yyv8 := &x.Template + yyv8.CodecDecodeSelf(d) + } + case "volumeClaimTemplates": + if r.TryDecodeAsNil() { + x.VolumeClaimTemplates = nil + } else { + yyv9 := &x.VolumeClaimTemplates + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicev1_PersistentVolumeClaim((*[]pkg2_v1.PersistentVolumeClaim)(yyv9), d) + } + } + case "serviceName": + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + x.ServiceName = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PetSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_unversioned.LabelSelector) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg2_v1.PodTemplateSpec{} + } else { + yyv17 := &x.Template + yyv17.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeClaimTemplates = nil + } else { + yyv18 := &x.VolumeClaimTemplates + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + h.decSlicev1_PersistentVolumeClaim((*[]pkg2_v1.PersistentVolumeClaim)(yyv18), d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + x.ServiceName = string(r.DecodeString()) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PetSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy4 := *x.ObservedGeneration + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy6 := *x.ObservedGeneration + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PetSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PetSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + x.Replicas = int32(r.DecodeInt(32)) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PetSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + x.Replicas = int32(r.DecodeInt(32)) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PetSetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSlicePetSet(([]PetSet)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePetSet(([]PetSet)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PetSetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PetSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSlicePetSet((*[]PetSet)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PetSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSlicePetSet((*[]PetSet)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicev1_PersistentVolumeClaim(v []pkg2_v1.PersistentVolumeClaim, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicev1_PersistentVolumeClaim(v *[]pkg2_v1.PersistentVolumeClaim, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg2_v1.PersistentVolumeClaim{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 344) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg2_v1.PersistentVolumeClaim, yyrl1) + } + } else { + yyv1 = make([]pkg2_v1.PersistentVolumeClaim, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.PersistentVolumeClaim{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg2_v1.PersistentVolumeClaim{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.PersistentVolumeClaim{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg2_v1.PersistentVolumeClaim{}) // var yyz1 pkg2_v1.PersistentVolumeClaim + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.PersistentVolumeClaim{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg2_v1.PersistentVolumeClaim{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePetSet(v []PetSet, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePetSet(v *[]PetSet, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PetSet{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PetSet, yyrl1) + } + } else { + yyv1 = make([]PetSet, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PetSet{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PetSet{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PetSet{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PetSet{}) // var yyz1 PetSet + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PetSet{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PetSet{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go new file mode 100644 index 000000000000..5306483ab4b5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go @@ -0,0 +1,94 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" +) + +// PetSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The PetSet guarantees that a given network identity will always +// map to the same storage identity. PetSet is currently in alpha +// and subject to change without notice. +type PetSet struct { + unversioned.TypeMeta `json:",inline"` + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired identities of pets in this set. + Spec PetSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current status of Pets in this PetSet. This data + // may be out of date by some window of time. + Status PetSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// A PetSetSpec is the specification of a PetSet. +type PetSetSpec struct { + // Replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the PetSet + // will fulfill this Template, but have a unique identity from the rest + // of the PetSet. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // VolumeClaimTemplates is a list of claims that pets are allowed to reference. + // The PetSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pet. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` + + // ServiceName is the name of the service that governs this PetSet. + // This service must exist before the PetSet, and is responsible for + // the network identity of the set. Pets get DNS/hostnames that follow the + // pattern: pet-specific-string.serviceName.default.svc.cluster.local + // where "pet-specific-string" is managed by the PetSet controller. + ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` +} + +// PetSetStatus represents the current state of a PetSet. +type PetSetStatus struct { + // most recent generation observed by this autoscaler. + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Replicas is the number of actual replicas. + Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` +} + +// PetSetList is a collection of PetSets. +type PetSetList struct { + unversioned.TypeMeta `json:",inline"` + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []PetSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000000..6306b48123f6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,71 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_PetSet = map[string]string{ + "": "PetSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe PetSet guarantees that a given network identity will always map to the same storage identity. PetSet is currently in alpha and subject to change without notice.", + "spec": "Spec defines the desired identities of pets in this set.", + "status": "Status is the current status of Pets in this PetSet. This data may be out of date by some window of time.", +} + +func (PetSet) SwaggerDoc() map[string]string { + return map_PetSet +} + +var map_PetSetList = map[string]string{ + "": "PetSetList is a collection of PetSets.", +} + +func (PetSetList) SwaggerDoc() map[string]string { + return map_PetSetList +} + +var map_PetSetSpec = map[string]string{ + "": "A PetSetSpec is the specification of a PetSet.", + "replicas": "Replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "Selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the PetSet will fulfill this Template, but have a unique identity from the rest of the PetSet.", + "volumeClaimTemplates": "VolumeClaimTemplates is a list of claims that pets are allowed to reference. The PetSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pet. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "ServiceName is the name of the service that governs this PetSet. This service must exist before the PetSet, and is responsible for the network identity of the set. Pets get DNS/hostnames that follow the pattern: pet-specific-string.serviceName.default.svc.cluster.local where \"pet-specific-string\" is managed by the PetSet controller.", +} + +func (PetSetSpec) SwaggerDoc() map[string]string { + return map_PetSetSpec +} + +var map_PetSetStatus = map[string]string{ + "": "PetSetStatus represents the current state of a PetSet.", + "observedGeneration": "most recent generation observed by this autoscaler.", + "replicas": "Replicas is the number of actual replicas.", +} + +func (PetSetStatus) SwaggerDoc() map[string]string { + return map_PetSetStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go new file mode 100644 index 000000000000..acbdfedfe1eb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go @@ -0,0 +1,128 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "reflect" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation" + apivalidation "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// ValidatePetSetName can be used to check whether the given PetSet name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +func ValidatePetSetName(name string, prefix bool) []string { + // TODO: Validate that there's name for the suffix inserted by the pets. + // Currently this is just "-index". In the future we may allow a user + // specified list of suffixes and we need to validate the longest one. + return apivalidation.NameIsDNSSubdomain(name, prefix) +} + +// Validates the given template and ensures that it is in accordance with the desired selector. +func ValidatePodTemplateSpecForPetSet(template *api.PodTemplateSpec, selector labels.Selector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if template == nil { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else { + if !selector.Empty() { + // Verify that the PetSet selector matches the labels in template. + labels := labels.Set(template.Labels) + if !selector.Matches(labels) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) + } + } + // TODO: Add validation for PodSpec, currently this will check volumes, which we know will + // fail. We should really check that the union of the given volumes and volumeClaims match + // volume mounts in the containers. + // allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(template.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, apivalidation.ValidateAnnotations(template.Annotations, fldPath.Child("annotations"))...) + allErrs = append(allErrs, apivalidation.ValidatePodSpecificAnnotations(template.Annotations, fldPath.Child("annotations"))...) + } + return allErrs +} + +// ValidatePetSetSpec tests if required fields in the PetSet spec are set. +func ValidatePetSetSpec(spec *apps.PetSetSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) + if spec.Selector == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) + } else { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for petset.")) + } + } + + selector, err := unversioned.LabelSelectorAsSelector(spec.Selector) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "")) + } else { + allErrs = append(allErrs, ValidatePodTemplateSpecForPetSet(&spec.Template, selector, fldPath.Child("template"))...) + } + + if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) + } + + return allErrs +} + +// ValidatePetSet validates a PetSet. +func ValidatePetSet(petSet *apps.PetSet) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&petSet.ObjectMeta, true, ValidatePetSetName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidatePetSetSpec(&petSet.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidatePetSetUpdate tests if required fields in the PetSet are set. +func ValidatePetSetUpdate(petSet, oldPetSet *apps.PetSet) field.ErrorList { + allErrs := field.ErrorList{} + + // TODO: For now we're taking the safe route and disallowing all updates to spec except for Spec.Replicas. + // Enable on a case by case basis. + restoreReplicas := petSet.Spec.Replicas + petSet.Spec.Replicas = oldPetSet.Spec.Replicas + + // The generation changes for this update + restoreGeneration := petSet.Generation + petSet.Generation = oldPetSet.Generation + + if !reflect.DeepEqual(petSet, oldPetSet) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to petset spec for fields other than 'replicas' are forbidden.")) + } + petSet.Spec.Replicas = restoreReplicas + petSet.Generation = restoreGeneration + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(petSet.Spec.Replicas), field.NewPath("spec", "replicas"))...) + return allErrs +} + +// ValidatePetSetStatusUpdate tests if required fields in the PetSet are set. +func ValidatePetSetStatusUpdate(petSet, oldPetSet *apps.PetSet) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&petSet.ObjectMeta, &oldPetSet.ObjectMeta, field.NewPath("metadata"))...) + // TODO: Validate status. + return allErrs +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/validation/validation_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/validation/validation_test.go new file mode 100644 index 000000000000..851ea895fc1a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/apps/validation/validation_test.go @@ -0,0 +1,379 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/apps" +) + +func TestValidatePetSet(t *testing.T) { + validLabels := map[string]string{"a": "b"} + validPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validLabels, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + } + invalidLabels := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} + invalidPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + ObjectMeta: api.ObjectMeta{ + Labels: invalidLabels, + }, + }, + } + successCases := []apps.PetSet{ + { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + } + for _, successCase := range successCases { + if errs := ValidatePetSet(&successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := map[string]apps.PetSet{ + "zero-length ID": { + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + "missing-namespace": { + ObjectMeta: api.ObjectMeta{Name: "abc-123"}, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + "empty selector": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Template: validPodTemplate.Template, + }, + }, + "selector_doesnt_match": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + Template: validPodTemplate.Template, + }, + }, + "invalid manifest": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + }, + }, + "negative_replicas": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Replicas: -1, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + }, + }, + "invalid_label": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + Labels: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + "invalid_label 2": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + Labels: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: apps.PetSetSpec{ + Template: invalidPodTemplate.Template, + }, + }, + "invalid_annotation": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + Annotations: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + "invalid restart policy 1": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + }, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + ObjectMeta: api.ObjectMeta{ + Labels: validLabels, + }, + }, + }, + }, + "invalid restart policy 2": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + }, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyNever, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + ObjectMeta: api.ObjectMeta{ + Labels: validLabels, + }, + }, + }, + }, + } + for k, v := range errorCases { + errs := ValidatePetSet(&v) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } + for i := range errs { + field := errs[i].Field + if !strings.HasPrefix(field, "spec.template.") && + field != "metadata.name" && + field != "metadata.namespace" && + field != "spec.selector" && + field != "spec.template" && + field != "GCEPersistentDisk.ReadOnly" && + field != "spec.replicas" && + field != "spec.template.labels" && + field != "metadata.annotations" && + field != "metadata.labels" && + field != "status.replicas" { + t.Errorf("%s: missing prefix for: %v", k, errs[i]) + } + } + } +} + +func TestValidatePetSetUpdate(t *testing.T) { + validLabels := map[string]string{"a": "b"} + validPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validLabels, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + } + readWriteVolumePodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validLabels, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + Volumes: []api.Volume{{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{PDName: "my-PD", FSType: "ext4", Partition: 1, ReadOnly: false}}}}, + }, + }, + } + invalidLabels := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} + invalidPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + ObjectMeta: api.ObjectMeta{ + Labels: invalidLabels, + }, + }, + } + type psUpdateTest struct { + old apps.PetSet + update apps.PetSet + } + successCases := []psUpdateTest{ + { + old: apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + update: apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Replicas: 3, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + }, + } + for _, successCase := range successCases { + successCase.old.ObjectMeta.ResourceVersion = "1" + successCase.update.ObjectMeta.ResourceVersion = "1" + if errs := ValidatePetSetUpdate(&successCase.update, &successCase.old); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + errorCases := map[string]psUpdateTest{ + "more than one read/write": { + old: apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + update: apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Replicas: 2, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: readWriteVolumePodTemplate.Template, + }, + }, + }, + "updates to a field other than spec.Replicas": { + old: apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + update: apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Replicas: 1, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: readWriteVolumePodTemplate.Template, + }, + }, + }, + "invalid selector": { + old: apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + update: apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Replicas: 2, + Selector: &unversioned.LabelSelector{MatchLabels: invalidLabels}, + Template: validPodTemplate.Template, + }, + }, + }, + "invalid pod": { + old: apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + update: apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Replicas: 2, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: invalidPodTemplate.Template, + }, + }, + }, + "negative replicas": { + old: apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + update: apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: apps.PetSetSpec{ + Replicas: -1, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + }, + } + for testName, errorCase := range errorCases { + if errs := ValidatePetSetUpdate(&errorCase.update, &errorCase.old); len(errs) == 0 { + t.Errorf("expected failure: %s", testName) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go new file mode 100644 index 000000000000..75ac7281ed24 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go @@ -0,0 +1,91 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package authentication + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_authenticationk8sio_TokenReview, + DeepCopy_authenticationk8sio_TokenReviewSpec, + DeepCopy_authenticationk8sio_TokenReviewStatus, + DeepCopy_authenticationk8sio_UserInfo, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_authenticationk8sio_TokenReview(in TokenReview, out *TokenReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_authenticationk8sio_TokenReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_authenticationk8sio_TokenReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_authenticationk8sio_TokenReviewSpec(in TokenReviewSpec, out *TokenReviewSpec, c *conversion.Cloner) error { + out.Token = in.Token + return nil +} + +func DeepCopy_authenticationk8sio_TokenReviewStatus(in TokenReviewStatus, out *TokenReviewStatus, c *conversion.Cloner) error { + out.Authenticated = in.Authenticated + if err := DeepCopy_authenticationk8sio_UserInfo(in.User, &out.User, c); err != nil { + return err + } + return nil +} + +func DeepCopy_authenticationk8sio_UserInfo(in UserInfo, out *UserInfo, c *conversion.Cloner) error { + out.Username = in.Username + out.UID = in.UID + if in.Groups != nil { + in, out := in.Groups, &out.Groups + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Groups = nil + } + if in.Extra != nil { + in, out := in.Extra, &out.Extra + *out = make(map[string][]string) + for key, val := range in { + if newVal, err := c.DeepCopy(val); err != nil { + return err + } else { + (*out)[key] = newVal.([]string) + } + } + } else { + out.Extra = nil + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go new file mode 100644 index 000000000000..29447d217781 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go @@ -0,0 +1,123 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/authentication.k8s.io" + "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/pkg/apis/authentication.k8s.io" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1beta1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", authentication.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + authentication.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1beta1.SchemeGroupVersion: + v1beta1.AddToScheme(api.Scheme) + } + } +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + rootScoped := sets.NewString("TokenReview") + ignoredKinds := sets.NewString() + return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1beta1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(authentication.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go new file mode 100644 index 000000000000..4dda3140fc25 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) unversioned.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) unversioned.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) +} + +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + ) +} + +func (obj *TokenReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.generated.go new file mode 100644 index 000000000000..b3b72d653b1b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.generated.go @@ -0,0 +1,1265 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package authentication + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + "reflect" + "runtime" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_unversioned.TypeMeta + _ = v0 + } +} + +func (x *TokenReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Spec + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Spec + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy9 := &x.Status + yy9.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Status + yy11.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TokenReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TokenReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "Spec": + if r.TryDecodeAsNil() { + x.Spec = TokenReviewSpec{} + } else { + yyv4 := &x.Spec + yyv4.CodecDecodeSelf(d) + } + case "Status": + if r.TryDecodeAsNil() { + x.Status = TokenReviewStatus{} + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = TokenReviewSpec{} + } else { + yyv9 := &x.Spec + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = TokenReviewStatus{} + } else { + yyv10 := &x.Status + yyv10.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *TokenReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Token)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Token")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Token)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TokenReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TokenReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "Token": + if r.TryDecodeAsNil() { + x.Token = "" + } else { + x.Token = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TokenReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Token = "" + } else { + x.Token = string(r.DecodeString()) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *TokenReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Authenticated)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Authenticated")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Authenticated)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.User + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("User")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.User + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TokenReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TokenReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "Authenticated": + if r.TryDecodeAsNil() { + x.Authenticated = false + } else { + x.Authenticated = bool(r.DecodeBool()) + } + case "User": + if r.TryDecodeAsNil() { + x.User = UserInfo{} + } else { + yyv5 := &x.User + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Authenticated = false + } else { + x.Authenticated = bool(r.DecodeBool()) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = UserInfo{} + } else { + yyv8 := &x.User + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Username")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("UID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Groups == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Groups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Groups == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encMapstringSlicestring((map[string][]string)(x.Extra), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encMapstringSlicestring((map[string][]string)(x.Extra), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *UserInfo) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *UserInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "Username": + if r.TryDecodeAsNil() { + x.Username = "" + } else { + x.Username = string(r.DecodeString()) + } + case "UID": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + x.UID = string(r.DecodeString()) + } + case "Groups": + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv6 := &x.Groups + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "Extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv8 := &x.Extra + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decMapstringSlicestring((*map[string][]string)(yyv8), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Username = "" + } else { + x.Username = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + x.UID = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv13 := &x.Groups + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceStringX(yyv13, false, d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv15 := &x.Extra + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decMapstringSlicestring((*map[string][]string)(yyv15), d) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encMapstringSlicestring(v map[string][]string, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yym3 := z.EncBinary() + _ = yym3 + if false { + } else { + z.F.EncSliceStringV(yyv1, false, e) + } + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringSlicestring(v *map[string][]string, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string][]string, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 []string + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yymk1 = string(r.DecodeString()) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv3 := &yymv1 + yym4 := z.DecBinary() + _ = yym4 + if false { + } else { + z.F.DecSliceStringX(yyv3, false, d) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yymk1 = string(r.DecodeString()) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv6 := &yymv1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encSlicestring(v []string, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicestring(v *[]string, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv1[yyj1] = string(r.DecodeString()) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv1[yyj1] = string(r.DecodeString()) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv1[yyj1] = string(r.DecodeString()) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go new file mode 100644 index 000000000000..02ec0d2b2b7a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// TokenReview attempts to authenticate a token to a known user. +type TokenReview struct { + unversioned.TypeMeta + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec + + // Status is filled in by the server and indicates whether the request can be authenticated. + Status TokenReviewStatus +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + Token string +} + +// TokenReviewStatus is the result of the token authentication request. +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + Authenticated bool + // User is the UserInfo associated with the provided token. + User UserInfo +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + Username string + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + UID string + // The names of groups this user is a part of. + Groups []string + // Any additional information provided by the authenticator. + Extra map[string][]string +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go new file mode 100644 index 000000000000..6a8545d13409 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go @@ -0,0 +1,30 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs() + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go new file mode 100644 index 000000000000..9972f82ed100 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go @@ -0,0 +1,143 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + authentication_k8s_io "k8s.io/kubernetes/pkg/apis/authentication.k8s.io" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview, + Convert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview, + Convert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec, + Convert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec, + Convert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus, + Convert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus, + Convert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo, + Convert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview(in *TokenReview, out *authentication_k8s_io.TokenReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview(in *TokenReview, out *authentication_k8s_io.TokenReview, s conversion.Scope) error { + return autoConvert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview(in, out, s) +} + +func autoConvert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview(in *authentication_k8s_io.TokenReview, out *TokenReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview(in *authentication_k8s_io.TokenReview, out *TokenReview, s conversion.Scope) error { + return autoConvert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview(in, out, s) +} + +func autoConvert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(in *TokenReviewSpec, out *authentication_k8s_io.TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(in *TokenReviewSpec, out *authentication_k8s_io.TokenReviewSpec, s conversion.Scope) error { + return autoConvert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(in, out, s) +} + +func autoConvert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication_k8s_io.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication_k8s_io.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + return autoConvert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in, out, s) +} + +func autoConvert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(in *TokenReviewStatus, out *authentication_k8s_io.TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(in *TokenReviewStatus, out *authentication_k8s_io.TokenReviewStatus, s conversion.Scope) error { + return autoConvert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(in, out, s) +} + +func autoConvert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication_k8s_io.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + return nil +} + +func Convert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication_k8s_io.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + return autoConvert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in, out, s) +} + +func autoConvert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(in *UserInfo, out *authentication_k8s_io.UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = in.Groups + out.Extra = in.Extra + return nil +} + +func Convert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(in *UserInfo, out *authentication_k8s_io.UserInfo, s conversion.Scope) error { + return autoConvert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(in, out, s) +} + +func autoConvert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(in *authentication_k8s_io.UserInfo, out *UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = in.Groups + out.Extra = in.Extra + return nil +} + +func Convert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(in *authentication_k8s_io.UserInfo, out *UserInfo, s conversion.Scope) error { + return autoConvert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(in, out, s) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go new file mode 100644 index 000000000000..e44dfc86bf39 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go @@ -0,0 +1,91 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1beta1_TokenReview, + DeepCopy_v1beta1_TokenReviewSpec, + DeepCopy_v1beta1_TokenReviewStatus, + DeepCopy_v1beta1_UserInfo, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1beta1_TokenReview(in TokenReview, out *TokenReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_TokenReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_TokenReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1beta1_TokenReviewSpec(in TokenReviewSpec, out *TokenReviewSpec, c *conversion.Cloner) error { + out.Token = in.Token + return nil +} + +func DeepCopy_v1beta1_TokenReviewStatus(in TokenReviewStatus, out *TokenReviewStatus, c *conversion.Cloner) error { + out.Authenticated = in.Authenticated + if err := DeepCopy_v1beta1_UserInfo(in.User, &out.User, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1beta1_UserInfo(in UserInfo, out *UserInfo, c *conversion.Cloner) error { + out.Username = in.Username + out.UID = in.UID + if in.Groups != nil { + in, out := in.Groups, &out.Groups + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Groups = nil + } + if in.Extra != nil { + in, out := in.Extra, &out.Extra + *out = make(map[string][]string) + for key, val := range in { + if newVal, err := c.DeepCopy(val); err != nil { + return err + } else { + (*out)[key] = newVal.([]string) + } + } + } else { + out.Extra = nil + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go new file mode 100644 index 000000000000..0f3732e36dcb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { + scheme.AddDefaultingFuncs() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go new file mode 100644 index 000000000000..cfdb87c53d84 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1beta1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go new file mode 100644 index 000000000000..e183299c0b5f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go @@ -0,0 +1,44 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} + +func AddToScheme(scheme *runtime.Scheme) { + // Add the API to Scheme. + addKnownTypes(scheme) + addDefaultingFuncs(scheme) + addConversionFuncs(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + ) +} + +func (obj *TokenReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.generated.go new file mode 100644 index 000000000000..62d287ff57a4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.generated.go @@ -0,0 +1,1321 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + "reflect" + "runtime" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_unversioned.TypeMeta + _ = v0 + } +} + +func (x *TokenReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Spec + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Spec + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Status + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Status + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TokenReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TokenReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "spec": + if r.TryDecodeAsNil() { + x.Spec = TokenReviewSpec{} + } else { + yyv4 := &x.Spec + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = TokenReviewStatus{} + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = TokenReviewSpec{} + } else { + yyv9 := &x.Spec + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = TokenReviewStatus{} + } else { + yyv10 := &x.Status + yyv10.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *TokenReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Token != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Token)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("token")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Token)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TokenReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TokenReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "token": + if r.TryDecodeAsNil() { + x.Token = "" + } else { + x.Token = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TokenReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Token = "" + } else { + x.Token = string(r.DecodeString()) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *TokenReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Authenticated != false + yyq2[1] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Authenticated)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("authenticated")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Authenticated)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy7 := &x.User + yy7.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.User + yy9.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TokenReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TokenReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "authenticated": + if r.TryDecodeAsNil() { + x.Authenticated = false + } else { + x.Authenticated = bool(r.DecodeBool()) + } + case "user": + if r.TryDecodeAsNil() { + x.User = UserInfo{} + } else { + yyv5 := &x.User + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Authenticated = false + } else { + x.Authenticated = bool(r.DecodeBool()) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = UserInfo{} + } else { + yyv8 := &x.User + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Username != "" + yyq2[1] = x.UID != "" + yyq2[2] = len(x.Groups) != 0 + yyq2[3] = len(x.Extra) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("username")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Groups == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("groups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Groups == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Extra == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encMapstringSlicestring((map[string][]string)(x.Extra), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encMapstringSlicestring((map[string][]string)(x.Extra), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *UserInfo) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *UserInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "username": + if r.TryDecodeAsNil() { + x.Username = "" + } else { + x.Username = string(r.DecodeString()) + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + x.UID = string(r.DecodeString()) + } + case "groups": + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv6 := &x.Groups + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv8 := &x.Extra + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decMapstringSlicestring((*map[string][]string)(yyv8), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Username = "" + } else { + x.Username = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + x.UID = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv13 := &x.Groups + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceStringX(yyv13, false, d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv15 := &x.Extra + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decMapstringSlicestring((*map[string][]string)(yyv15), d) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encMapstringSlicestring(v map[string][]string, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yym3 := z.EncBinary() + _ = yym3 + if false { + } else { + z.F.EncSliceStringV(yyv1, false, e) + } + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringSlicestring(v *map[string][]string, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string][]string, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 []string + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yymk1 = string(r.DecodeString()) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv3 := &yymv1 + yym4 := z.DecBinary() + _ = yym4 + if false { + } else { + z.F.DecSliceStringX(yyv3, false, d) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yymk1 = string(r.DecodeString()) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv6 := &yymv1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encSlicestring(v []string, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicestring(v *[]string, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv1[yyj1] = string(r.DecodeString()) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv1[yyj1] = string(r.DecodeString()) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv1[yyj1] = string(r.DecodeString()) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go new file mode 100644 index 000000000000..fc136877a9ef --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go @@ -0,0 +1,63 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +type TokenReview struct { + unversioned.TypeMeta `json:",inline"` + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec `json:"spec"` + + // Status is filled in by the server and indicates whether the request can be authenticated. + Status TokenReviewStatus `json:"status,omitempty"` +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + Token string `json:"token,omitempty"` +} + +// TokenReviewStatus is the result of the token authentication request. +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + Authenticated bool `json:"authenticated,omitempty"` + // User is the UserInfo associated with the provided token. + User UserInfo `json:"user,omitempty"` +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + Username string `json:"username,omitempty"` + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + UID string `json:"uid,omitempty"` + // The names of groups this user is a part of. + Groups []string `json:"groups,omitempty"` + // Any additional information provided by the authenticator. + Extra map[string][]string `json:"extra,omitempty"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go index 034a94d1e168..bc40fb33ea3d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,16 +16,155 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package authorization -import api "k8s.io/kubernetes/pkg/api" +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) func init() { - err := api.Scheme.AddGeneratedDeepCopyFuncs() - if err != nil { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_authorization_LocalSubjectAccessReview, + DeepCopy_authorization_NonResourceAttributes, + DeepCopy_authorization_ResourceAttributes, + DeepCopy_authorization_SelfSubjectAccessReview, + DeepCopy_authorization_SelfSubjectAccessReviewSpec, + DeepCopy_authorization_SubjectAccessReview, + DeepCopy_authorization_SubjectAccessReviewSpec, + DeepCopy_authorization_SubjectAccessReviewStatus, + ); err != nil { // if one of the deep copy functions is malformed, detect it immediately. panic(err) } } + +func DeepCopy_authorization_LocalSubjectAccessReview(in LocalSubjectAccessReview, out *LocalSubjectAccessReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_authorization_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_authorization_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_authorization_NonResourceAttributes(in NonResourceAttributes, out *NonResourceAttributes, c *conversion.Cloner) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func DeepCopy_authorization_ResourceAttributes(in ResourceAttributes, out *ResourceAttributes, c *conversion.Cloner) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func DeepCopy_authorization_SelfSubjectAccessReview(in SelfSubjectAccessReview, out *SelfSubjectAccessReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_authorization_SelfSubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_authorization_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_authorization_SelfSubjectAccessReviewSpec(in SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, c *conversion.Cloner) error { + if in.ResourceAttributes != nil { + in, out := in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + if err := DeepCopy_authorization_ResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + if err := DeepCopy_authorization_NonResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + return nil +} + +func DeepCopy_authorization_SubjectAccessReview(in SubjectAccessReview, out *SubjectAccessReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_authorization_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_authorization_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_authorization_SubjectAccessReviewSpec(in SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, c *conversion.Cloner) error { + if in.ResourceAttributes != nil { + in, out := in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + if err := DeepCopy_authorization_ResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + if err := DeepCopy_authorization_NonResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + out.User = in.User + if in.Groups != nil { + in, out := in.Groups, &out.Groups + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Groups = nil + } + if in.Extra != nil { + in, out := in.Extra, &out.Extra + *out = make(map[string][]string) + for key, val := range in { + if newVal, err := c.DeepCopy(val); err != nil { + return err + } else { + (*out)[key] = newVal.([]string) + } + } + } else { + out.Extra = nil + } + return nil +} + +func DeepCopy_authorization_SubjectAccessReviewStatus(in SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, c *conversion.Cloner) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go index 9f00bf014d77..1c071376cac4 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -1551,14 +1551,14 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [5]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) + r.EncodeArrayStart(5) } else { - yynn2 = 4 + yynn2 = 5 for _, b := range yyq2 { if b { yynn2++ @@ -1647,6 +1647,33 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encMapstringSlicestring((map[string][]string)(x.Extra), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encMapstringSlicestring((map[string][]string)(x.Extra), e) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -1748,6 +1775,18 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Dec z.F.DecSliceStringX(yyv7, false, d) } } + case "Extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv9 := &x.Extra + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decMapstringSlicestring((*map[string][]string)(yyv9), d) + } + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -1759,16 +1798,16 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb9 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb9 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1783,13 +1822,13 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D } x.ResourceAttributes.CodecDecodeSelf(d) } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb9 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb9 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1804,13 +1843,13 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D } x.NonResourceAttributes.CodecDecodeSelf(d) } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb9 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb9 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1820,13 +1859,13 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D } else { x.User = string(r.DecodeString()) } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb9 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb9 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1834,26 +1873,48 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Groups = nil } else { - yyv13 := &x.Groups - yym14 := z.DecBinary() - _ = yym14 + yyv15 := &x.Groups + yym16 := z.DecBinary() + _ = yym16 if false { } else { - z.F.DecSliceStringX(yyv13, false, d) + z.F.DecSliceStringX(yyv15, false, d) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv17 := &x.Extra + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decMapstringSlicestring((*map[string][]string)(yyv17), d) } } for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb9 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb9 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2279,3 +2340,231 @@ func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978 } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } + +func (x codecSelfer1234) encMapstringSlicestring(v map[string][]string, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yym3 := z.EncBinary() + _ = yym3 + if false { + } else { + z.F.EncSliceStringV(yyv1, false, e) + } + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringSlicestring(v *map[string][]string, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string][]string, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 []string + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yymk1 = string(r.DecodeString()) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv3 := &yymv1 + yym4 := z.DecBinary() + _ = yym4 + if false { + } else { + z.F.DecSliceStringX(yyv3, false, d) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yymk1 = string(r.DecodeString()) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv6 := &yymv1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encSlicestring(v []string, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicestring(v *[]string, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv1[yyj1] = string(r.DecodeString()) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv1[yyj1] = string(r.DecodeString()) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv1[yyj1] = string(r.DecodeString()) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/types.go index 5feb0e5468d3..8cfdfbe9775f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/types.go @@ -101,6 +101,9 @@ type SubjectAccessReviewSpec struct { User string // Groups is the groups you're testing for. Groups []string + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + Extra map[string][]string } // SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAttributes diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go index 2a0daee203c4..a475c0fd0a73 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,16 +16,318 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-conversions.sh +// This file was autogenerated by conversion-gen. Do not edit it manually! package v1beta1 -import api "k8s.io/kubernetes/pkg/api" +import ( + api "k8s.io/kubernetes/pkg/api" + authorization "k8s.io/kubernetes/pkg/apis/authorization" + conversion "k8s.io/kubernetes/pkg/conversion" +) func init() { - err := api.Scheme.AddGeneratedConversionFuncs() - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview, + Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview, + Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes, + Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes, + Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes, + Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes, + Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview, + Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview, + Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec, + Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec, + Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview, + Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview, + Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec, + Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec, + Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus, + Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. panic(err) } } + +func autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + return autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in, out, s) +} + +func autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in, out, s) +} + +func autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + return autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in, out, s) +} + +func autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in, out, s) +} + +func autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(authorization.ResourceAttributes) + if err := Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(authorization.NonResourceAttributes) + if err := Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + return nil +} + +func Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + if err := Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + if err := Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + return nil +} + +func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in, out, s) +} + +func autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(authorization.ResourceAttributes) + if err := Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(authorization.NonResourceAttributes) + if err := Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + out.User = in.User + out.Groups = in.Groups + out.Extra = in.Extra + return nil +} + +func Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + if err := Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + if err := Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + out.User = in.User + out.Groups = in.Groups + out.Extra = in.Extra + return nil +} + +func Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + return nil +} + +func Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + return nil +} + +func Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in, out, s) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go index ccf024c8bffe..94a35650cb45 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,16 +16,155 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package v1beta1 -import api "k8s.io/kubernetes/pkg/api" +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) func init() { - err := api.Scheme.AddGeneratedDeepCopyFuncs() - if err != nil { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1beta1_LocalSubjectAccessReview, + DeepCopy_v1beta1_NonResourceAttributes, + DeepCopy_v1beta1_ResourceAttributes, + DeepCopy_v1beta1_SelfSubjectAccessReview, + DeepCopy_v1beta1_SelfSubjectAccessReviewSpec, + DeepCopy_v1beta1_SubjectAccessReview, + DeepCopy_v1beta1_SubjectAccessReviewSpec, + DeepCopy_v1beta1_SubjectAccessReviewStatus, + ); err != nil { // if one of the deep copy functions is malformed, detect it immediately. panic(err) } } + +func DeepCopy_v1beta1_LocalSubjectAccessReview(in LocalSubjectAccessReview, out *LocalSubjectAccessReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1beta1_NonResourceAttributes(in NonResourceAttributes, out *NonResourceAttributes, c *conversion.Cloner) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func DeepCopy_v1beta1_ResourceAttributes(in ResourceAttributes, out *ResourceAttributes, c *conversion.Cloner) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func DeepCopy_v1beta1_SelfSubjectAccessReview(in SelfSubjectAccessReview, out *SelfSubjectAccessReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(in SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, c *conversion.Cloner) error { + if in.ResourceAttributes != nil { + in, out := in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + if err := DeepCopy_v1beta1_ResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + if err := DeepCopy_v1beta1_NonResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + return nil +} + +func DeepCopy_v1beta1_SubjectAccessReview(in SubjectAccessReview, out *SubjectAccessReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1beta1_SubjectAccessReviewSpec(in SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, c *conversion.Cloner) error { + if in.ResourceAttributes != nil { + in, out := in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + if err := DeepCopy_v1beta1_ResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + if err := DeepCopy_v1beta1_NonResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + out.User = in.User + if in.Groups != nil { + in, out := in.Groups, &out.Groups + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Groups = nil + } + if in.Extra != nil { + in, out := in.Extra, &out.Extra + *out = make(map[string][]string) + for key, val := range in { + if newVal, err := c.DeepCopy(val); err != nil { + return err + } else { + (*out)[key] = newVal.([]string) + } + } + } else { + out.Extra = nil + } + return nil +} + +func DeepCopy_v1beta1_SubjectAccessReviewStatus(in SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, c *conversion.Cloner) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go new file mode 100644 index 000000000000..cfdb87c53d84 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1beta1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go index 906fcb57788c..3b5e4fbe9322 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -1635,16 +1635,17 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [5]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.ResourceAttributes != nil yyq2[1] = x.NonResourceAttributes != nil yyq2[2] = x.User != "" yyq2[3] = len(x.Groups) != 0 + yyq2[4] = len(x.Extra) != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) + r.EncodeArrayStart(5) } else { yynn2 = 0 for _, b := range yyq2 { @@ -1759,6 +1760,39 @@ func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Extra == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encMapstringSlicestring((map[string][]string)(x.Extra), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encMapstringSlicestring((map[string][]string)(x.Extra), e) + } + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -1860,6 +1894,18 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Dec z.F.DecSliceStringX(yyv7, false, d) } } + case "extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv9 := &x.Extra + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decMapstringSlicestring((*map[string][]string)(yyv9), d) + } + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -1871,16 +1917,16 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb9 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb9 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1895,13 +1941,13 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D } x.ResourceAttributes.CodecDecodeSelf(d) } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb9 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb9 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1916,13 +1962,13 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D } x.NonResourceAttributes.CodecDecodeSelf(d) } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb9 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb9 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1932,13 +1978,13 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D } else { x.User = string(r.DecodeString()) } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb9 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb9 { + if yyb11 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1946,26 +1992,48 @@ func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.D if r.TryDecodeAsNil() { x.Groups = nil } else { - yyv13 := &x.Groups - yym14 := z.DecBinary() - _ = yym14 + yyv15 := &x.Groups + yym16 := z.DecBinary() + _ = yym16 if false { } else { - z.F.DecSliceStringX(yyv13, false, d) + z.F.DecSliceStringX(yyv15, false, d) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv17 := &x.Extra + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decMapstringSlicestring((*map[string][]string)(yyv17), d) } } for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l } else { - yyb9 = r.CheckBreak() + yyb11 = r.CheckBreak() } - if yyb9 { + if yyb11 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") + z.DecStructFieldNotFound(yyj11-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -2412,3 +2480,231 @@ func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978 } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } + +func (x codecSelfer1234) encMapstringSlicestring(v map[string][]string, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yym3 := z.EncBinary() + _ = yym3 + if false { + } else { + z.F.EncSliceStringV(yyv1, false, e) + } + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringSlicestring(v *map[string][]string, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string][]string, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 []string + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yymk1 = string(r.DecodeString()) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv3 := &yymv1 + yym4 := z.DecBinary() + _ = yym4 + if false { + } else { + z.F.DecSliceStringX(yyv3, false, d) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yymk1 = string(r.DecodeString()) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv6 := &yymv1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encSlicestring(v []string, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicestring(v *[]string, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv1[yyj1] = string(r.DecodeString()) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv1[yyj1] = string(r.DecodeString()) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv1[yyj1] = string(r.DecodeString()) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go index 0a8cb69a74ad..27078e9fc0e6 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go @@ -100,6 +100,9 @@ type SubjectAccessReviewSpec struct { User string `json:"user,omitempty"` // Groups is the groups you're testing for. Groups []string `json:"group,omitempty"` + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + Extra map[string][]string `json:"extra,omitempty"` } // SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go index d9910ef144ea..d4c337db797e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -98,6 +98,7 @@ var map_SubjectAccessReviewSpec = map[string]string{ "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", "user": "User is the user you're testing for. If you specify \"User\" but not \"Group\", then is it interpreted as \"What if User were not a member of any groups", "group": "Groups is the groups you're testing for.", + "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", } func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/validation/validation_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/validation/validation_test.go new file mode 100644 index 000000000000..c2776c404ed2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/authorization/validation/validation_test.go @@ -0,0 +1,135 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + "testing" + + authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func TestValidateSARSpec(t *testing.T) { + successCases := []authorizationapi.SubjectAccessReviewSpec{ + {ResourceAttributes: &authorizationapi.ResourceAttributes{}, User: "me"}, + {NonResourceAttributes: &authorizationapi.NonResourceAttributes{}, Groups: []string{"my-group"}}, + } + for _, successCase := range successCases { + if errs := ValidateSubjectAccessReviewSpec(successCase, field.NewPath("spec")); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := []struct { + name string + obj authorizationapi.SubjectAccessReviewSpec + msg string + }{ + { + name: "neither request", + obj: authorizationapi.SubjectAccessReviewSpec{User: "me"}, + msg: "exactly one of nonResourceAttributes or resourceAttributes must be specified", + }, + { + name: "both requests", + obj: authorizationapi.SubjectAccessReviewSpec{ + ResourceAttributes: &authorizationapi.ResourceAttributes{}, + NonResourceAttributes: &authorizationapi.NonResourceAttributes{}, + User: "me", + }, + msg: "cannot be specified in combination with resourceAttributes", + }, + { + name: "no subject", + obj: authorizationapi.SubjectAccessReviewSpec{ + ResourceAttributes: &authorizationapi.ResourceAttributes{}, + }, + msg: `spec.user: Invalid value: "": at least one of user or group must be specified`, + }, + } + + for _, c := range errorCases { + errs := ValidateSubjectAccessReviewSpec(c.obj, field.NewPath("spec")) + if len(errs) == 0 { + t.Errorf("%s: expected failure for %q", c.name, c.msg) + } else if !strings.Contains(errs[0].Error(), c.msg) { + t.Errorf("%s: unexpected error: %q, expected: %q", c.name, errs[0], c.msg) + } + + errs = ValidateSubjectAccessReview(&authorizationapi.SubjectAccessReview{Spec: c.obj}) + if len(errs) == 0 { + t.Errorf("%s: expected failure for %q", c.name, c.msg) + } else if !strings.Contains(errs[0].Error(), c.msg) { + t.Errorf("%s: unexpected error: %q, expected: %q", c.name, errs[0], c.msg) + } + errs = ValidateLocalSubjectAccessReview(&authorizationapi.LocalSubjectAccessReview{Spec: c.obj}) + if len(errs) == 0 { + t.Errorf("%s: expected failure for %q", c.name, c.msg) + } else if !strings.Contains(errs[0].Error(), c.msg) { + t.Errorf("%s: unexpected error: %q, expected: %q", c.name, errs[0], c.msg) + } + + } +} + +func TestValidateSelfSAR(t *testing.T) { + successCases := []authorizationapi.SelfSubjectAccessReviewSpec{ + {ResourceAttributes: &authorizationapi.ResourceAttributes{}}, + } + for _, successCase := range successCases { + if errs := ValidateSelfSubjectAccessReviewSpec(successCase, field.NewPath("spec")); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := []struct { + name string + obj authorizationapi.SelfSubjectAccessReviewSpec + msg string + }{ + { + name: "neither request", + obj: authorizationapi.SelfSubjectAccessReviewSpec{}, + msg: "exactly one of nonResourceAttributes or resourceAttributes must be specified", + }, + { + name: "both requests", + obj: authorizationapi.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &authorizationapi.ResourceAttributes{}, + NonResourceAttributes: &authorizationapi.NonResourceAttributes{}, + }, + msg: "cannot be specified in combination with resourceAttributes", + }, + } + + for _, c := range errorCases { + errs := ValidateSelfSubjectAccessReviewSpec(c.obj, field.NewPath("spec")) + if len(errs) == 0 { + t.Errorf("%s: expected failure for %q", c.name, c.msg) + } else if !strings.Contains(errs[0].Error(), c.msg) { + t.Errorf("%s: unexpected error: %q, expected: %q", c.name, errs[0], c.msg) + } + + errs = ValidateSelfSubjectAccessReview(&authorizationapi.SelfSubjectAccessReview{Spec: c.obj}) + if len(errs) == 0 { + t.Errorf("%s: expected failure for %q", c.name, c.msg) + } else if !strings.Contains(errs[0].Error(), c.msg) { + t.Errorf("%s: unexpected error: %q, expected: %q", c.name, errs[0], c.msg) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/deep_copy_generated.go index 3ad4f2799695..d78bad7cbc39 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/deep_copy_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,16 +16,150 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package autoscaling -import api "k8s.io/kubernetes/pkg/api" +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) func init() { - err := api.Scheme.AddGeneratedDeepCopyFuncs() - if err != nil { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_autoscaling_CrossVersionObjectReference, + DeepCopy_autoscaling_HorizontalPodAutoscaler, + DeepCopy_autoscaling_HorizontalPodAutoscalerList, + DeepCopy_autoscaling_HorizontalPodAutoscalerSpec, + DeepCopy_autoscaling_HorizontalPodAutoscalerStatus, + DeepCopy_autoscaling_Scale, + DeepCopy_autoscaling_ScaleSpec, + DeepCopy_autoscaling_ScaleStatus, + ); err != nil { // if one of the deep copy functions is malformed, detect it immediately. panic(err) } } + +func DeepCopy_autoscaling_CrossVersionObjectReference(in CrossVersionObjectReference, out *CrossVersionObjectReference, c *conversion.Cloner) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func DeepCopy_autoscaling_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_autoscaling_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_autoscaling_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(in)) + for i := range in { + if err := DeepCopy_autoscaling_HorizontalPodAutoscaler(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { + if err := DeepCopy_autoscaling_CrossVersionObjectReference(in.ScaleTargetRef, &out.ScaleTargetRef, c); err != nil { + return err + } + if in.MinReplicas != nil { + in, out := in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = *in + } else { + out.MinReplicas = nil + } + out.MaxReplicas = in.MaxReplicas + if in.TargetCPUUtilizationPercentage != nil { + in, out := in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage + *out = new(int32) + **out = *in + } else { + out.TargetCPUUtilizationPercentage = nil + } + return nil +} + +func DeepCopy_autoscaling_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { + if in.ObservedGeneration != nil { + in, out := in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = *in + } else { + out.ObservedGeneration = nil + } + if in.LastScaleTime != nil { + in, out := in.LastScaleTime, &out.LastScaleTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { + return err + } + } else { + out.LastScaleTime = nil + } + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + if in.CurrentCPUUtilizationPercentage != nil { + in, out := in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage + *out = new(int32) + **out = *in + } else { + out.CurrentCPUUtilizationPercentage = nil + } + return nil +} + +func DeepCopy_autoscaling_Scale(in Scale, out *Scale, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_autoscaling_ScaleSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_autoscaling_ScaleStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_autoscaling_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { + out.Replicas = in.Replicas + return nil +} + +func DeepCopy_autoscaling_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { + out.Replicas = in.Replicas + out.Selector = in.Selector + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/register.go index dfc86f24d2c7..7af721243073 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/register.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/register.go @@ -19,7 +19,6 @@ package autoscaling import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/runtime" ) @@ -48,10 +47,12 @@ func AddToScheme(scheme *runtime.Scheme) { func addKnownTypes(scheme *runtime.Scheme) { scheme.AddKnownTypes(SchemeGroupVersion, &Scale{}, - &extensions.HorizontalPodAutoscaler{}, - &extensions.HorizontalPodAutoscalerList{}, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, &api.ListOptions{}, ) } -func (obj *Scale) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *Scale) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *HorizontalPodAutoscaler) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *HorizontalPodAutoscalerList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go index 7302ebca6961..fdd05919003c 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -535,7 +535,7 @@ func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Replicas = int32(r.DecodeInt(32)) } default: z.DecStructFieldNotFound(-1, yys3) @@ -565,7 +565,7 @@ func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Replicas = int32(r.DecodeInt(32)) } for { yyj5++ @@ -723,7 +723,7 @@ func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Replicas = int32(r.DecodeInt(32)) } case "selector": if r.TryDecodeAsNil() { @@ -759,7 +759,7 @@ func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Replicas = int32(r.DecodeInt(32)) } yyj6++ if yyhl6 { @@ -792,3 +792,1868 @@ func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } + +func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + x.Name = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + x.Name = string(r.DecodeString()) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.MinReplicas != nil + yyq2[3] = x.TargetCPUUtilizationPercentage != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.ScaleTargetRef + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ScaleTargetRef + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.MinReplicas == nil { + r.EncodeNil() + } else { + yy9 := *x.MinReplicas + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MinReplicas == nil { + r.EncodeNil() + } else { + yy11 := *x.MinReplicas + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(yy11)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.MaxReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(x.MaxReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.TargetCPUUtilizationPercentage == nil { + r.EncodeNil() + } else { + yy17 := *x.TargetCPUUtilizationPercentage + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeInt(int64(yy17)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetCPUUtilizationPercentage")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetCPUUtilizationPercentage == nil { + r.EncodeNil() + } else { + yy19 := *x.TargetCPUUtilizationPercentage + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(yy19)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "scaleTargetRef": + if r.TryDecodeAsNil() { + x.ScaleTargetRef = CrossVersionObjectReference{} + } else { + yyv4 := &x.ScaleTargetRef + yyv4.CodecDecodeSelf(d) + } + case "minReplicas": + if r.TryDecodeAsNil() { + if x.MinReplicas != nil { + x.MinReplicas = nil + } + } else { + if x.MinReplicas == nil { + x.MinReplicas = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) + } + } + case "maxReplicas": + if r.TryDecodeAsNil() { + x.MaxReplicas = 0 + } else { + x.MaxReplicas = int32(r.DecodeInt(32)) + } + case "targetCPUUtilizationPercentage": + if r.TryDecodeAsNil() { + if x.TargetCPUUtilizationPercentage != nil { + x.TargetCPUUtilizationPercentage = nil + } + } else { + if x.TargetCPUUtilizationPercentage == nil { + x.TargetCPUUtilizationPercentage = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ScaleTargetRef = CrossVersionObjectReference{} + } else { + yyv11 := &x.ScaleTargetRef + yyv11.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MinReplicas != nil { + x.MinReplicas = nil + } + } else { + if x.MinReplicas == nil { + x.MinReplicas = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MaxReplicas = 0 + } else { + x.MaxReplicas = int32(r.DecodeInt(32)) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetCPUUtilizationPercentage != nil { + x.TargetCPUUtilizationPercentage = nil + } + } else { + if x.TargetCPUUtilizationPercentage == nil { + x.TargetCPUUtilizationPercentage = new(int32) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != nil + yyq2[1] = x.LastScaleTime != nil + yyq2[4] = x.CurrentCPUUtilizationPercentage != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy4 := *x.ObservedGeneration + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy6 := *x.ObservedGeneration + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.LastScaleTime == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { + } else if yym9 { + z.EncBinaryMarshal(x.LastScaleTime) + } else if !yym9 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScaleTime) + } else { + z.EncFallback(x.LastScaleTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LastScaleTime == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { + } else if yym10 { + z.EncBinaryMarshal(x.LastScaleTime) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScaleTime) + } else { + z.EncFallback(x.LastScaleTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(x.CurrentReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.CurrentReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(x.DesiredReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.DesiredReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.CurrentCPUUtilizationPercentage == nil { + r.EncodeNil() + } else { + yy18 := *x.CurrentCPUUtilizationPercentage + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(yy18)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CurrentCPUUtilizationPercentage == nil { + r.EncodeNil() + } else { + yy20 := *x.CurrentCPUUtilizationPercentage + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeInt(int64(yy20)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + case "lastScaleTime": + if r.TryDecodeAsNil() { + if x.LastScaleTime != nil { + x.LastScaleTime = nil + } + } else { + if x.LastScaleTime == nil { + x.LastScaleTime = new(pkg1_unversioned.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.LastScaleTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScaleTime) + } else { + z.DecFallback(x.LastScaleTime, false) + } + } + case "currentReplicas": + if r.TryDecodeAsNil() { + x.CurrentReplicas = 0 + } else { + x.CurrentReplicas = int32(r.DecodeInt(32)) + } + case "desiredReplicas": + if r.TryDecodeAsNil() { + x.DesiredReplicas = 0 + } else { + x.DesiredReplicas = int32(r.DecodeInt(32)) + } + case "currentCPUUtilizationPercentage": + if r.TryDecodeAsNil() { + if x.CurrentCPUUtilizationPercentage != nil { + x.CurrentCPUUtilizationPercentage = nil + } + } else { + if x.CurrentCPUUtilizationPercentage == nil { + x.CurrentCPUUtilizationPercentage = new(int32) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LastScaleTime != nil { + x.LastScaleTime = nil + } + } else { + if x.LastScaleTime == nil { + x.LastScaleTime = new(pkg1_unversioned.Time) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { + } else if yym16 { + z.DecBinaryUnmarshal(x.LastScaleTime) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScaleTime) + } else { + z.DecFallback(x.LastScaleTime, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentReplicas = 0 + } else { + x.CurrentReplicas = int32(r.DecodeInt(32)) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredReplicas = 0 + } else { + x.DesiredReplicas = int32(r.DecodeInt(32)) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CurrentCPUUtilizationPercentage != nil { + x.CurrentCPUUtilizationPercentage = nil + } + } else { + if x.CurrentCPUUtilizationPercentage == nil { + x.CurrentCPUUtilizationPercentage = new(int32) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy14 := &x.Status + yy14.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy16 := &x.Status + yy16.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = HorizontalPodAutoscalerSpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = HorizontalPodAutoscalerStatus{} + } else { + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = HorizontalPodAutoscalerSpec{} + } else { + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = HorizontalPodAutoscalerStatus{} + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 344) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/types.go index 99080a8a6606..3e60def9235d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/types.go @@ -24,30 +24,97 @@ import ( // Scale represents a scaling request for a resource. type Scale struct { unversioned.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. api.ObjectMeta `json:"metadata,omitempty"` - // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Spec ScaleSpec `json:"spec,omitempty"` - // current status of the scale. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. Status ScaleStatus `json:"status,omitempty"` } // ScaleSpec describes the attributes of a scale subresource. type ScaleSpec struct { // desired number of instances for the scaled object. - Replicas int `json:"replicas,omitempty"` + Replicas int32 `json:"replicas,omitempty"` } // ScaleStatus represents the current status of a scale subresource. type ScaleStatus struct { // actual number of observed instances of the scaled object. - Replicas int `json:"replicas"` + Replicas int32 `json:"replicas"` // label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors Selector string `json:"selector,omitempty"` } + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // API version of the referent + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` +} + +// specification of a horizontal pod autoscaler. +type HorizontalPodAutoscalerSpec struct { + // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption + // and will set the desired number of pods by using its Scale subresource. + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef"` + // lower limit for the number of pods that can be set by the autoscaler, default 1. + MinReplicas *int32 `json:"minReplicas,omitempty"` + // upper limit for the number of pods that can be set by the autoscaler. It cannot be smaller than MinReplicas. + MaxReplicas int32 `json:"maxReplicas"` + // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // if not specified the default autoscaling policy will be used. + TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty"` +} + +// current status of a horizontal pod autoscaler +type HorizontalPodAutoscalerStatus struct { + // most recent generation observed by this autoscaler. + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + + // last time the HorizontalPodAutoscaler scaled the number of pods; + // used by the autoscaler to control how often the number of pods is changed. + LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty"` + + // current number of replicas of pods managed by this autoscaler. + CurrentReplicas int32 `json:"currentReplicas"` + + // desired number of replicas of pods managed by this autoscaler. + DesiredReplicas int32 `json:"desiredReplicas"` + + // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // e.g. 70 means that an average pod is using now 70% of its requested CPU. + CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"` +} + +// +genclient=true + +// configuration of a horizontal pod autoscaler. +type HorizontalPodAutoscaler struct { + unversioned.TypeMeta `json:",inline"` + api.ObjectMeta `json:"metadata,omitempty"` + + // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty"` + + // current information about the autoscaler. + Status HorizontalPodAutoscalerStatus `json:"status,omitempty"` +} + +// list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + unversioned.TypeMeta `json:",inline"` + unversioned.ListMeta `json:"metadata,omitempty"` + + // list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler `json:"items"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go deleted file mode 100644 index 286ce3fe8a4d..000000000000 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "reflect" - - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_extensions_SubresourceReference_To_v1_CrossVersionObjectReference, - Convert_v1_CrossVersionObjectReference_To_extensions_SubresourceReference, - Convert_extensions_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, - Convert_v1_HorizontalPodAutoscalerSpec_To_extensions_HorizontalPodAutoscalerSpec, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func Convert_extensions_SubresourceReference_To_v1_CrossVersionObjectReference(in *extensions.SubresourceReference, out *CrossVersionObjectReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.SubresourceReference))(in) - } - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - return nil -} - -func Convert_v1_CrossVersionObjectReference_To_extensions_SubresourceReference(in *CrossVersionObjectReference, out *extensions.SubresourceReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*CrossVersionObjectReference))(in) - } - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - out.Subresource = "scale" - return nil -} - -func Convert_extensions_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *extensions.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.HorizontalPodAutoscalerSpec))(in) - } - if err := Convert_extensions_SubresourceReference_To_v1_CrossVersionObjectReference(&in.ScaleRef, &out.ScaleTargetRef, s); err != nil { - return err - } - if in.MinReplicas != nil { - out.MinReplicas = new(int32) - *out.MinReplicas = int32(*in.MinReplicas) - } else { - out.MinReplicas = nil - } - out.MaxReplicas = int32(in.MaxReplicas) - if in.CPUUtilization != nil { - out.TargetCPUUtilizationPercentage = new(int32) - *out.TargetCPUUtilizationPercentage = int32(in.CPUUtilization.TargetPercentage) - } - return nil -} - -func Convert_v1_HorizontalPodAutoscalerSpec_To_extensions_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *extensions.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HorizontalPodAutoscalerSpec))(in) - } - if err := Convert_v1_CrossVersionObjectReference_To_extensions_SubresourceReference(&in.ScaleTargetRef, &out.ScaleRef, s); err != nil { - return err - } - if in.MinReplicas != nil { - out.MinReplicas = new(int) - *out.MinReplicas = int(*in.MinReplicas) - } else { - out.MinReplicas = nil - } - out.MaxReplicas = int(in.MaxReplicas) - if in.TargetCPUUtilizationPercentage != nil { - out.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: int(*in.TargetCPUUtilizationPercentage)} - } - return nil -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go index f55d38fd948d..11ca6a056d8a 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,200 +16,106 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-conversions.sh +// This file was autogenerated by conversion-gen. Do not edit it manually! package v1 import ( - reflect "reflect" - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - extensions "k8s.io/kubernetes/pkg/apis/extensions" conversion "k8s.io/kubernetes/pkg/conversion" ) -func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ObjectMeta))(in) +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference, + Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference, + Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, + Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, + Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList, + Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList, + Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, + Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, + Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, + Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, + Convert_v1_Scale_To_autoscaling_Scale, + Convert_autoscaling_Scale_To_v1_Scale, + Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec, + Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec, + Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus, + Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) } +} + +func autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.CreationTimestamp, &out.CreationTimestamp, s); err != nil { - return err - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.DeletionTimestamp != nil { - out.DeletionTimestamp = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.DeletionTimestamp, out.DeletionTimestamp, s); err != nil { - return err - } - } else { - out.DeletionTimestamp = nil - } - if in.DeletionGracePeriodSeconds != nil { - out.DeletionGracePeriodSeconds = new(int64) - *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds - } else { - out.DeletionGracePeriodSeconds = nil - } - if in.Labels != nil { - out.Labels = make(map[string]string) - for key, val := range in.Labels { - out.Labels[key] = val - } - } else { - out.Labels = nil - } - if in.Annotations != nil { - out.Annotations = make(map[string]string) - for key, val := range in.Annotations { - out.Annotations[key] = val - } - } else { - out.Annotations = nil - } + out.APIVersion = in.APIVersion return nil } -func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { - return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s) +func Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s) } -func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *v1.ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ObjectMeta))(in) - } +func autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.CreationTimestamp, &out.CreationTimestamp, s); err != nil { - return err - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.DeletionTimestamp != nil { - out.DeletionTimestamp = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.DeletionTimestamp, out.DeletionTimestamp, s); err != nil { - return err - } - } else { - out.DeletionTimestamp = nil - } - if in.DeletionGracePeriodSeconds != nil { - out.DeletionGracePeriodSeconds = new(int64) - *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds - } else { - out.DeletionGracePeriodSeconds = nil - } - if in.Labels != nil { - out.Labels = make(map[string]string) - for key, val := range in.Labels { - out.Labels[key] = val - } - } else { - out.Labels = nil - } - if in.Annotations != nil { - out.Annotations = make(map[string]string) - for key, val := range in.Annotations { - out.Annotations[key] = val - } - } else { - out.Annotations = nil - } + out.APIVersion = in.APIVersion return nil } -func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *v1.ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s) +func Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in, out, s) } -func autoConvert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*autoscaling.Scale))(in) - } +func autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + SetDefaults_HorizontalPodAutoscaler(in) if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - if err := Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { - return autoConvert_autoscaling_Scale_To_v1_Scale(in, out, s) +func Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s) } -func autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*autoscaling.ScaleSpec))(in) - } - out.Replicas = int32(in.Replicas) - return nil -} - -func Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { - return autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in, out, s) -} - -func autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*autoscaling.ScaleStatus))(in) - } - out.Replicas = int32(in.Replicas) - out.Selector = in.Selector - return nil -} - -func Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { - return autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in, out, s) -} - -func autoConvert_v1_HorizontalPodAutoscaler_To_extensions_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *extensions.HorizontalPodAutoscaler, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HorizontalPodAutoscaler))(in) - } +func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - if err := Convert_v1_HorizontalPodAutoscalerSpec_To_extensions_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1_HorizontalPodAutoscalerStatus_To_extensions_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_v1_HorizontalPodAutoscaler_To_extensions_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *extensions.HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_v1_HorizontalPodAutoscaler_To_extensions_HorizontalPodAutoscaler(in, out, s) +func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in, out, s) } -func autoConvert_v1_HorizontalPodAutoscalerList_To_extensions_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *extensions.HorizontalPodAutoscalerList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HorizontalPodAutoscalerList))(in) - } +func autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } @@ -215,9 +123,10 @@ func autoConvert_v1_HorizontalPodAutoscalerList_To_extensions_HorizontalPodAutos return err } if in.Items != nil { - out.Items = make([]extensions.HorizontalPodAutoscaler, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_HorizontalPodAutoscaler_To_extensions_HorizontalPodAutoscaler(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]autoscaling.HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -227,229 +136,165 @@ func autoConvert_v1_HorizontalPodAutoscalerList_To_extensions_HorizontalPodAutos return nil } -func Convert_v1_HorizontalPodAutoscalerList_To_extensions_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *extensions.HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_v1_HorizontalPodAutoscalerList_To_extensions_HorizontalPodAutoscalerList(in, out, s) -} - -func autoConvert_v1_HorizontalPodAutoscalerSpec_To_extensions_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *extensions.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HorizontalPodAutoscalerSpec))(in) - } - // in.ScaleTargetRef has no peer in out - if in.MinReplicas != nil { - out.MinReplicas = new(int) - *out.MinReplicas = int(*in.MinReplicas) - } else { - out.MinReplicas = nil - } - out.MaxReplicas = int(in.MaxReplicas) - // in.TargetCPUUtilizationPercentage has no peer in out - return nil +func Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) } -func autoConvert_v1_HorizontalPodAutoscalerStatus_To_extensions_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *extensions.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HorizontalPodAutoscalerStatus))(in) +func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - if in.ObservedGeneration != nil { - out.ObservedGeneration = new(int64) - *out.ObservedGeneration = *in.ObservedGeneration - } else { - out.ObservedGeneration = nil + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.LastScaleTime != nil { - out.LastScaleTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.LastScaleTime, out.LastScaleTime, s); err != nil { - return err + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.LastScaleTime = nil - } - out.CurrentReplicas = int(in.CurrentReplicas) - out.DesiredReplicas = int(in.DesiredReplicas) - if in.CurrentCPUUtilizationPercentage != nil { - out.CurrentCPUUtilizationPercentage = new(int) - *out.CurrentCPUUtilizationPercentage = int(*in.CurrentCPUUtilizationPercentage) - } else { - out.CurrentCPUUtilizationPercentage = nil + out.Items = nil } return nil } -func Convert_v1_HorizontalPodAutoscalerStatus_To_extensions_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *extensions.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_v1_HorizontalPodAutoscalerStatus_To_extensions_HorizontalPodAutoscalerStatus(in, out, s) +func Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in, out, s) } -func autoConvert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Scale))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { +func autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { return err } - if err := Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil { + out.MinReplicas = in.MinReplicas + out.MaxReplicas = in.MaxReplicas + out.TargetCPUUtilizationPercentage = in.TargetCPUUtilizationPercentage + return nil +} + +func Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + return autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { return err } + out.MinReplicas = in.MinReplicas + out.MaxReplicas = in.MaxReplicas + out.TargetCPUUtilizationPercentage = in.TargetCPUUtilizationPercentage return nil } -func Convert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { - return autoConvert_v1_Scale_To_autoscaling_Scale(in, out, s) +func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in, out, s) } -func autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ScaleSpec))(in) - } - out.Replicas = int(in.Replicas) +func autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.LastScaleTime = in.LastScaleTime + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage return nil } -func Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { - return autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s) +func Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s) } -func autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ScaleStatus))(in) - } - out.Replicas = int(in.Replicas) - out.Selector = in.Selector +func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.LastScaleTime = in.LastScaleTime + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage return nil } -func Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { - return autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in, out, s) +func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in, out, s) } -func autoConvert_extensions_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *extensions.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.HorizontalPodAutoscaler))(in) - } +func autoConvert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - if err := Convert_extensions_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_extensions_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *extensions.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_extensions_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in, out, s) +func Convert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { + return autoConvert_v1_Scale_To_autoscaling_Scale(in, out, s) } -func autoConvert_extensions_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *extensions.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.HorizontalPodAutoscalerList))(in) - } +func autoConvert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - if in.Items != nil { - out.Items = make([]HorizontalPodAutoscaler, len(in.Items)) - for i := range in.Items { - if err := Convert_extensions_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil + if err := Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err } return nil } -func Convert_extensions_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *extensions.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_extensions_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in, out, s) +func Convert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { + return autoConvert_autoscaling_Scale_To_v1_Scale(in, out, s) } -func autoConvert_extensions_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *extensions.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.HorizontalPodAutoscalerSpec))(in) - } - // in.ScaleRef has no peer in out - if in.MinReplicas != nil { - out.MinReplicas = new(int32) - *out.MinReplicas = int32(*in.MinReplicas) - } else { - out.MinReplicas = nil - } - out.MaxReplicas = int32(in.MaxReplicas) - // in.CPUUtilization has no peer in out +func autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas return nil } -func autoConvert_extensions_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *extensions.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.HorizontalPodAutoscalerStatus))(in) - } - if in.ObservedGeneration != nil { - out.ObservedGeneration = new(int64) - *out.ObservedGeneration = *in.ObservedGeneration - } else { - out.ObservedGeneration = nil - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.LastScaleTime != nil { - out.LastScaleTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.LastScaleTime, out.LastScaleTime, s); err != nil { - return err - } - } else { - out.LastScaleTime = nil - } - out.CurrentReplicas = int32(in.CurrentReplicas) - out.DesiredReplicas = int32(in.DesiredReplicas) - if in.CurrentCPUUtilizationPercentage != nil { - out.CurrentCPUUtilizationPercentage = new(int32) - *out.CurrentCPUUtilizationPercentage = int32(*in.CurrentCPUUtilizationPercentage) - } else { - out.CurrentCPUUtilizationPercentage = nil - } +func Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s) +} + +func autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +func Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + return autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in, out, s) +} + +func autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = in.Selector return nil } -func Convert_extensions_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *extensions.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_extensions_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in, out, s) +func Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { + return autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in, out, s) +} + +func autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = in.Selector + return nil } -func init() { - err := api.Scheme.AddGeneratedConversionFuncs( - autoConvert_api_ObjectMeta_To_v1_ObjectMeta, - autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec, - autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus, - autoConvert_autoscaling_Scale_To_v1_Scale, - autoConvert_extensions_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList, - autoConvert_extensions_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, - autoConvert_extensions_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, - autoConvert_extensions_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, - autoConvert_v1_HorizontalPodAutoscalerList_To_extensions_HorizontalPodAutoscalerList, - autoConvert_v1_HorizontalPodAutoscalerSpec_To_extensions_HorizontalPodAutoscalerSpec, - autoConvert_v1_HorizontalPodAutoscalerStatus_To_extensions_HorizontalPodAutoscalerStatus, - autoConvert_v1_HorizontalPodAutoscaler_To_extensions_HorizontalPodAutoscaler, - autoConvert_v1_ObjectMeta_To_api_ObjectMeta, - autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec, - autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus, - autoConvert_v1_Scale_To_autoscaling_Scale, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } +func Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in, out, s) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go index a71749abb83a..6932ba638106 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,118 +16,68 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package v1 import ( - time "time" - api "k8s.io/kubernetes/pkg/api" unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" + api_v1 "k8s.io/kubernetes/pkg/api/v1" conversion "k8s.io/kubernetes/pkg/conversion" ) -func deepCopy_unversioned_ListMeta(in unversioned.ListMeta, out *unversioned.ListMeta, c *conversion.Cloner) error { - out.SelfLink = in.SelfLink - out.ResourceVersion = in.ResourceVersion - return nil -} - -func deepCopy_unversioned_Time(in unversioned.Time, out *unversioned.Time, c *conversion.Cloner) error { - if newVal, err := c.DeepCopy(in.Time); err != nil { - return err - } else { - out.Time = newVal.(time.Time) - } - return nil -} - -func deepCopy_unversioned_TypeMeta(in unversioned.TypeMeta, out *unversioned.TypeMeta, c *conversion.Cloner) error { - out.Kind = in.Kind - out.APIVersion = in.APIVersion - return nil -} - -func deepCopy_v1_ObjectMeta(in v1.ObjectMeta, out *v1.ObjectMeta, c *conversion.Cloner) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := deepCopy_unversioned_Time(in.CreationTimestamp, &out.CreationTimestamp, c); err != nil { - return err - } - if in.DeletionTimestamp != nil { - out.DeletionTimestamp = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.DeletionTimestamp, out.DeletionTimestamp, c); err != nil { - return err - } - } else { - out.DeletionTimestamp = nil - } - if in.DeletionGracePeriodSeconds != nil { - out.DeletionGracePeriodSeconds = new(int64) - *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds - } else { - out.DeletionGracePeriodSeconds = nil - } - if in.Labels != nil { - out.Labels = make(map[string]string) - for key, val := range in.Labels { - out.Labels[key] = val - } - } else { - out.Labels = nil - } - if in.Annotations != nil { - out.Annotations = make(map[string]string) - for key, val := range in.Annotations { - out.Annotations[key] = val - } - } else { - out.Annotations = nil +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1_CrossVersionObjectReference, + DeepCopy_v1_HorizontalPodAutoscaler, + DeepCopy_v1_HorizontalPodAutoscalerList, + DeepCopy_v1_HorizontalPodAutoscalerSpec, + DeepCopy_v1_HorizontalPodAutoscalerStatus, + DeepCopy_v1_Scale, + DeepCopy_v1_ScaleSpec, + DeepCopy_v1_ScaleStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) } - return nil } -func deepCopy_v1_CrossVersionObjectReference(in CrossVersionObjectReference, out *CrossVersionObjectReference, c *conversion.Cloner) error { +func DeepCopy_v1_CrossVersionObjectReference(in CrossVersionObjectReference, out *CrossVersionObjectReference, c *conversion.Cloner) error { out.Kind = in.Kind out.Name = in.Name out.APIVersion = in.APIVersion return nil } -func deepCopy_v1_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := api_v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]HorizontalPodAutoscaler, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_HorizontalPodAutoscaler(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(in)) + for i := range in { + if err := DeepCopy_v1_HorizontalPodAutoscaler(in[i], &(*out)[i], c); err != nil { return err } } @@ -135,36 +87,40 @@ func deepCopy_v1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out return nil } -func deepCopy_v1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { - if err := deepCopy_v1_CrossVersionObjectReference(in.ScaleTargetRef, &out.ScaleTargetRef, c); err != nil { +func DeepCopy_v1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { + if err := DeepCopy_v1_CrossVersionObjectReference(in.ScaleTargetRef, &out.ScaleTargetRef, c); err != nil { return err } if in.MinReplicas != nil { - out.MinReplicas = new(int32) - *out.MinReplicas = *in.MinReplicas + in, out := in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = *in } else { out.MinReplicas = nil } out.MaxReplicas = in.MaxReplicas if in.TargetCPUUtilizationPercentage != nil { - out.TargetCPUUtilizationPercentage = new(int32) - *out.TargetCPUUtilizationPercentage = *in.TargetCPUUtilizationPercentage + in, out := in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage + *out = new(int32) + **out = *in } else { out.TargetCPUUtilizationPercentage = nil } return nil } -func deepCopy_v1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { +func DeepCopy_v1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { if in.ObservedGeneration != nil { - out.ObservedGeneration = new(int64) - *out.ObservedGeneration = *in.ObservedGeneration + in, out := in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = *in } else { out.ObservedGeneration = nil } if in.LastScaleTime != nil { - out.LastScaleTime = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.LastScaleTime, out.LastScaleTime, c); err != nil { + in, out := in.LastScaleTime, &out.LastScaleTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { return err } } else { @@ -173,58 +129,38 @@ func deepCopy_v1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out.CurrentReplicas = in.CurrentReplicas out.DesiredReplicas = in.DesiredReplicas if in.CurrentCPUUtilizationPercentage != nil { - out.CurrentCPUUtilizationPercentage = new(int32) - *out.CurrentCPUUtilizationPercentage = *in.CurrentCPUUtilizationPercentage + in, out := in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage + *out = new(int32) + **out = *in } else { out.CurrentCPUUtilizationPercentage = nil } return nil } -func deepCopy_v1_Scale(in Scale, out *Scale, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_Scale(in Scale, out *Scale, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := api_v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_ScaleSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1_ScaleSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1_ScaleStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1_ScaleStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { +func DeepCopy_v1_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { out.Replicas = in.Replicas return nil } -func deepCopy_v1_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { +func DeepCopy_v1_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { out.Replicas = in.Replicas out.Selector = in.Selector return nil } - -func init() { - err := api.Scheme.AddGeneratedDeepCopyFuncs( - deepCopy_unversioned_ListMeta, - deepCopy_unversioned_Time, - deepCopy_unversioned_TypeMeta, - deepCopy_v1_ObjectMeta, - deepCopy_v1_CrossVersionObjectReference, - deepCopy_v1_HorizontalPodAutoscaler, - deepCopy_v1_HorizontalPodAutoscalerList, - deepCopy_v1_HorizontalPodAutoscalerSpec, - deepCopy_v1_HorizontalPodAutoscalerStatus, - deepCopy_v1_Scale, - deepCopy_v1_ScaleSpec, - deepCopy_v1_ScaleStatus, - ) - if err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go index 2b5173dcd2d0..3fb24c46b786 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go @@ -22,11 +22,13 @@ import ( func addDefaultingFuncs(scheme *runtime.Scheme) { scheme.AddDefaultingFuncs( - func(obj *HorizontalPodAutoscaler) { - if obj.Spec.MinReplicas == nil { - minReplicas := int32(1) - obj.Spec.MinReplicas = &minReplicas - } - }, + SetDefaults_HorizontalPodAutoscaler, ) } + +func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { + if obj.Spec.MinReplicas == nil { + minReplicas := int32(1) + obj.Spec.MinReplicas = &minReplicas + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go new file mode 100644 index 000000000000..1c67cc3a94c4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go new file mode 100644 index 000000000000..e90dd5d62c7f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go @@ -0,0 +1,1612 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto + + It has these top-level messages: + CrossVersionObjectReference + HorizontalPodAutoscaler + HorizontalPodAutoscalerList + HorizontalPodAutoscalerSpec + HorizontalPodAutoscalerStatus + Scale + ScaleSpec + ScaleStatus +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } +func (m *CrossVersionObjectReference) String() string { return proto.CompactTextString(m) } +func (*CrossVersionObjectReference) ProtoMessage() {} + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (m *HorizontalPodAutoscaler) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscaler) ProtoMessage() {} + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (m *HorizontalPodAutoscalerList) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (m *HorizontalPodAutoscalerSpec) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (m *HorizontalPodAutoscalerStatus) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} + +func (m *Scale) Reset() { *m = Scale{} } +func (m *Scale) String() string { return proto.CompactTextString(m) } +func (*Scale) ProtoMessage() {} + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (m *ScaleSpec) String() string { return proto.CompactTextString(m) } +func (*ScaleSpec) ProtoMessage() {} + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (m *ScaleStatus) String() string { return proto.CompactTextString(m) } +func (*ScaleStatus) ProtoMessage() {} + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.CrossVersionObjectReference") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*Scale)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ScaleStatus") +} +func (m *CrossVersionObjectReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CrossVersionObjectReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + return i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleTargetRef.Size())) + n5, err := m.ScaleTargetRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.MinReplicas != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas)) + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas)) + if m.TargetCPUUtilizationPercentage != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TargetCPUUtilizationPercentage)) + } + return i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size())) + n6, err := m.LastScaleTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas)) + if m.CurrentCPUUtilizationPercentage != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.CurrentCPUUtilizationPercentage)) + } + return i, nil +} + +func (m *Scale) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Scale) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n8, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n9, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + return i, nil +} + +func (m *ScaleSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Selector))) + i += copy(data[i:], m.Selector) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *CrossVersionObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + var l int + _ = l + l = m.ScaleTargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if m.TargetCPUUtilizationPercentage != nil { + n += 1 + sovGenerated(uint64(*m.TargetCPUUtilizationPercentage)) + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if m.CurrentCPUUtilizationPercentage != nil { + n += 1 + sovGenerated(uint64(*m.CurrentCPUUtilizationPercentage)) + } + return n +} + +func (m *Scale) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + l = len(m.Selector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MaxReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetCPUUtilizationPercentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetCPUUtilizationPercentage = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentCPUUtilizationPercentage = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto new file mode 100644 index 000000000000..7905f68a83d5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto @@ -0,0 +1,131 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.autoscaling.v1; + +import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +message CrossVersionObjectReference { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + optional string kind = 1; + + // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + optional string name = 2; + + // API version of the referent + optional string apiVersion = 3; +} + +// configuration of a horizontal pod autoscaler. +message HorizontalPodAutoscaler { + // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + optional HorizontalPodAutoscalerSpec spec = 2; + + // current information about the autoscaler. + optional HorizontalPodAutoscalerStatus status = 3; +} + +// list of horizontal pod autoscaler objects. +message HorizontalPodAutoscalerList { + // Standard list metadata. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // list of horizontal pod autoscaler objects. + repeated HorizontalPodAutoscaler items = 2; +} + +// specification of a horizontal pod autoscaler. +message HorizontalPodAutoscalerSpec { + // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption + // and will set the desired number of pods by using its Scale subresource. + optional CrossVersionObjectReference scaleTargetRef = 1; + + // lower limit for the number of pods that can be set by the autoscaler, default 1. + optional int32 minReplicas = 2; + + // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + optional int32 maxReplicas = 3; + + // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // if not specified the default autoscaling policy will be used. + optional int32 targetCPUUtilizationPercentage = 4; +} + +// current status of a horizontal pod autoscaler +message HorizontalPodAutoscalerStatus { + // most recent generation observed by this autoscaler. + optional int64 observedGeneration = 1; + + // last time the HorizontalPodAutoscaler scaled the number of pods; + // used by the autoscaler to control how often the number of pods is changed. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScaleTime = 2; + + // current number of replicas of pods managed by this autoscaler. + optional int32 currentReplicas = 3; + + // desired number of replicas of pods managed by this autoscaler. + optional int32 desiredReplicas = 4; + + // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // e.g. 70 means that an average pod is using now 70% of its requested CPU. + optional int32 currentCPUUtilizationPercentage = 5; +} + +// Scale represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + optional ScaleSpec spec = 2; + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + optional ScaleStatus status = 3; +} + +// ScaleSpec describes the attributes of a scale subresource. +message ScaleSpec { + // desired number of instances for the scaled object. + optional int32 replicas = 1; +} + +// ScaleStatus represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info about label selectors: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + optional string selector = 2; +} + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go index 5af7611c57fd..dc3cff1576e7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go @@ -20,6 +20,7 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" ) // GroupName is the group name use in this package @@ -31,7 +32,6 @@ var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1 func AddToScheme(scheme *runtime.Scheme) { addKnownTypes(scheme) addDefaultingFuncs(scheme) - addConversionFuncs(scheme) } // Adds the list of known types to api.Scheme. @@ -42,6 +42,7 @@ func addKnownTypes(scheme *runtime.Scheme) { &Scale{}, &v1.ListOptions{}, ) + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) } func (obj *HorizontalPodAutoscaler) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go index 1e5a195c22ee..d8401bb8bccb 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -2578,7 +2578,7 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 344) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go index 20b96728d22a..227e25703d59 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go @@ -23,98 +23,100 @@ import ( // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds" - Kind string `json:"kind"` - // Name of the referent; More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names - Name string `json:"name"` + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` // API version of the referent - APIVersion string `json:"apiVersion,omitempty"` + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } // specification of a horizontal pod autoscaler. type HorizontalPodAutoscalerSpec struct { // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption // and will set the desired number of pods by using its Scale subresource. - ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef"` + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` // lower limit for the number of pods that can be set by the autoscaler, default 1. - MinReplicas *int32 `json:"minReplicas,omitempty"` + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - MaxReplicas int32 `json:"maxReplicas"` + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; // if not specified the default autoscaling policy will be used. - TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty"` + TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"` } // current status of a horizontal pod autoscaler type HorizontalPodAutoscalerStatus struct { // most recent generation observed by this autoscaler. - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` // last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. - LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty"` + LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` // current number of replicas of pods managed by this autoscaler. - CurrentReplicas int32 `json:"currentReplicas"` + CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` // desired number of replicas of pods managed by this autoscaler. - DesiredReplicas int32 `json:"desiredReplicas"` + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` // current average CPU utilization over all pods, represented as a percentage of requested CPU, // e.g. 70 means that an average pod is using now 70% of its requested CPU. - CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"` + CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` } +// +genclient=true + // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { unversioned.TypeMeta `json:",inline"` - // Standard object metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty"` + // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status. - Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty"` + // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // current information about the autoscaler. - Status HorizontalPodAutoscalerStatus `json:"status,omitempty"` + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - unversioned.ListMeta `json:"metadata,omitempty"` + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // list of horizontal pod autoscaler objects. - Items []HorizontalPodAutoscaler `json:"items"` + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` } // Scale represents a scaling request for a resource. type Scale struct { unversioned.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata. - v1.ObjectMeta `json:"metadata,omitempty"` + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status. - Spec ScaleSpec `json:"spec,omitempty"` + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status. Read-only. - Status ScaleStatus `json:"status,omitempty"` + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // ScaleSpec describes the attributes of a scale subresource. type ScaleSpec struct { // desired number of instances for the scaled object. - Replicas int32 `json:"replicas,omitempty"` + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } // ScaleStatus represents the current status of a scale subresource. type ScaleStatus struct { // actual number of observed instances of the scaled object. - Replicas int32 `json:"replicas"` + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` // label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors - Selector string `json:"selector,omitempty"` + // More info about label selectors: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go index 537dafba4897..6e63745709cf 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,8 +29,8 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names", + "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", "apiVersion": "API version of the referent", } @@ -40,8 +40,8 @@ func (CrossVersionObjectReference) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status.", + "metadata": "Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", "status": "current information about the autoscaler.", } @@ -86,9 +86,9 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -107,7 +107,7 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors", + "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", } func (ScaleStatus) SwaggerDoc() map[string]string { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/validation/validation.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/validation/validation.go index 8b9a21a9de79..3432e3cb1c40 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/validation/validation.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/validation/validation.go @@ -17,8 +17,12 @@ limitations under the License. package validation import ( + "encoding/json" + apivalidation "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/controller/podautoscaler" "k8s.io/kubernetes/pkg/util/validation/field" ) @@ -32,3 +36,93 @@ func ValidateScale(scale *autoscaling.Scale) field.ErrorList { return allErrs } + +// ValidateHorizontalPodAutoscaler can be used to check whether the given autoscaler name is valid. +// Prefix indicates this name will be used as part of generation, in which case trailing dashes are allowed. +var ValidateHorizontalPodAutoscalerName = apivalidation.ValidateReplicationControllerName + +func validateHorizontalPodAutoscalerSpec(autoscaler autoscaling.HorizontalPodAutoscalerSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if autoscaler.MinReplicas != nil && *autoscaler.MinReplicas < 1 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("minReplicas"), *autoscaler.MinReplicas, "must be greater than 0")) + } + if autoscaler.MaxReplicas < 1 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than 0")) + } + if autoscaler.MinReplicas != nil && autoscaler.MaxReplicas < *autoscaler.MinReplicas { + allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than or equal to `minReplicas`")) + } + if autoscaler.TargetCPUUtilizationPercentage != nil && *autoscaler.TargetCPUUtilizationPercentage < 1 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("targetCPUUtilizationPercentage"), autoscaler.TargetCPUUtilizationPercentage, "must be greater than 0")) + } + if refErrs := ValidateCrossVersionObjectReference(autoscaler.ScaleTargetRef, fldPath.Child("scaleTargetRef")); len(refErrs) > 0 { + allErrs = append(allErrs, refErrs...) + } + return allErrs +} + +func ValidateCrossVersionObjectReference(ref autoscaling.CrossVersionObjectReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(ref.Kind) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("kind"), "")) + } else { + for _, msg := range apivalidation.IsValidPathSegmentName(ref.Kind) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ref.Kind, msg)) + } + } + + if len(ref.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else { + for _, msg := range apivalidation.IsValidPathSegmentName(ref.Name) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ref.Name, msg)) + } + } + + return allErrs +} + +func validateHorizontalPodAutoscalerAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if annotationValue, found := annotations[podautoscaler.HpaCustomMetricsTargetAnnotationName]; found { + // Try to parse the annotation + var targetList extensions.CustomMetricTargetList + if err := json.Unmarshal([]byte(annotationValue), &targetList); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("annotations"), annotations, "failed to parse custom metrics target annotation")) + } else { + if len(targetList.Items) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("annotations", "items"), "custom metrics target must not be empty")) + } + for _, target := range targetList.Items { + if target.Name == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("annotations", "items", "name"), "missing custom metric target name")) + } + if target.TargetValue.MilliValue() <= 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("annotations", "items", "value"), target.TargetValue, "custom metric target value must be greater than 0")) + } + } + } + } + return allErrs +} + +func ValidateHorizontalPodAutoscaler(autoscaler *autoscaling.HorizontalPodAutoscaler) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&autoscaler.ObjectMeta, true, ValidateHorizontalPodAutoscalerName, field.NewPath("metadata")) + allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(autoscaler.Spec, field.NewPath("spec"))...) + allErrs = append(allErrs, validateHorizontalPodAutoscalerAnnotations(autoscaler.Annotations, field.NewPath("metadata"))...) + return allErrs +} + +func ValidateHorizontalPodAutoscalerUpdate(newAutoscaler, oldAutoscaler *autoscaling.HorizontalPodAutoscaler) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&newAutoscaler.ObjectMeta, &oldAutoscaler.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(newAutoscaler.Spec, field.NewPath("spec"))...) + return allErrs +} + +func ValidateHorizontalPodAutoscalerStatusUpdate(newAutoscaler, oldAutoscaler *autoscaling.HorizontalPodAutoscaler) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&newAutoscaler.ObjectMeta, &oldAutoscaler.ObjectMeta, field.NewPath("metadata")) + status := newAutoscaler.Status + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentReplicas), field.NewPath("status", "currentReplicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredReplicas), field.NewPath("status", "desiredReplicasa"))...) + return allErrs +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/validation/validation_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/validation/validation_test.go new file mode 100644 index 000000000000..06d93c9eaa86 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/autoscaling/validation/validation_test.go @@ -0,0 +1,338 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/controller/podautoscaler" +) + +func TestValidateScale(t *testing.T) { + successCases := []autoscaling.Scale{ + { + ObjectMeta: api.ObjectMeta{ + Name: "frontend", + Namespace: api.NamespaceDefault, + }, + Spec: autoscaling.ScaleSpec{ + Replicas: 1, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "frontend", + Namespace: api.NamespaceDefault, + }, + Spec: autoscaling.ScaleSpec{ + Replicas: 10, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "frontend", + Namespace: api.NamespaceDefault, + }, + Spec: autoscaling.ScaleSpec{ + Replicas: 0, + }, + }, + } + + for _, successCase := range successCases { + if errs := ValidateScale(&successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := []struct { + scale autoscaling.Scale + msg string + }{ + { + scale: autoscaling.Scale{ + ObjectMeta: api.ObjectMeta{ + Name: "frontend", + Namespace: api.NamespaceDefault, + }, + Spec: autoscaling.ScaleSpec{ + Replicas: -1, + }, + }, + msg: "must be greater than or equal to 0", + }, + } + + for _, c := range errorCases { + if errs := ValidateScale(&c.scale); len(errs) == 0 { + t.Errorf("expected failure for %s", c.msg) + } else if !strings.Contains(errs[0].Error(), c.msg) { + t.Errorf("unexpected error: %v, expected: %s", errs[0], c.msg) + } + } +} + +func TestValidateHorizontalPodAutoscaler(t *testing.T) { + successCases := []autoscaling.HorizontalPodAutoscaler{ + { + ObjectMeta: api.ObjectMeta{ + Name: "myautoscaler", + Namespace: api.NamespaceDefault, + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{ + Kind: "ReplicationController", + Name: "myrc", + }, + MinReplicas: newInt32(1), + MaxReplicas: 5, + TargetCPUUtilizationPercentage: newInt32(70), + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "myautoscaler", + Namespace: api.NamespaceDefault, + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{ + Kind: "ReplicationController", + Name: "myrc", + }, + MinReplicas: newInt32(1), + MaxReplicas: 5, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "myautoscaler", + Namespace: api.NamespaceDefault, + Annotations: map[string]string{ + podautoscaler.HpaCustomMetricsTargetAnnotationName: "{\"items\":[{\"name\":\"qps\",\"value\":\"20\"}]}", + }, + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{ + Kind: "ReplicationController", + Name: "myrc", + }, + MinReplicas: newInt32(1), + MaxReplicas: 5, + }, + }, + } + for _, successCase := range successCases { + if errs := ValidateHorizontalPodAutoscaler(&successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := []struct { + horizontalPodAutoscaler autoscaling.HorizontalPodAutoscaler + msg string + }{ + { + horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{Name: "myrc"}, + MinReplicas: newInt32(1), + MaxReplicas: 5, + TargetCPUUtilizationPercentage: newInt32(70), + }, + }, + msg: "scaleTargetRef.kind: Required", + }, + { + horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{Kind: "..", Name: "myrc"}, + MinReplicas: newInt32(1), + MaxReplicas: 5, + TargetCPUUtilizationPercentage: newInt32(70), + }, + }, + msg: "scaleTargetRef.kind: Invalid", + }, + { + horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{Kind: "ReplicationController"}, + MinReplicas: newInt32(1), + MaxReplicas: 5, + TargetCPUUtilizationPercentage: newInt32(70), + }, + }, + msg: "scaleTargetRef.name: Required", + }, + { + horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{Name: "myautoscaler", Namespace: api.NamespaceDefault}, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{Kind: "ReplicationController", Name: ".."}, + MinReplicas: newInt32(1), + MaxReplicas: 5, + TargetCPUUtilizationPercentage: newInt32(70), + }, + }, + msg: "scaleTargetRef.name: Invalid", + }, + { + horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{ + Name: "myautoscaler", + Namespace: api.NamespaceDefault, + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{}, + MinReplicas: newInt32(-1), + MaxReplicas: 5, + }, + }, + msg: "must be greater than 0", + }, + { + horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{ + Name: "myautoscaler", + Namespace: api.NamespaceDefault, + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{}, + MinReplicas: newInt32(7), + MaxReplicas: 5, + }, + }, + msg: "must be greater than or equal to `minReplicas`", + }, + { + horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{ + Name: "myautoscaler", + Namespace: api.NamespaceDefault, + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{}, + MinReplicas: newInt32(1), + MaxReplicas: 5, + TargetCPUUtilizationPercentage: newInt32(-70), + }, + }, + msg: "must be greater than 0", + }, + { + horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{ + Name: "myautoscaler", + Namespace: api.NamespaceDefault, + Annotations: map[string]string{ + podautoscaler.HpaCustomMetricsTargetAnnotationName: "broken", + }, + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{ + Kind: "ReplicationController", + Name: "myrc", + }, + MinReplicas: newInt32(1), + MaxReplicas: 5, + }, + }, + msg: "failed to parse custom metrics target annotation", + }, + { + horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{ + Name: "myautoscaler", + Namespace: api.NamespaceDefault, + Annotations: map[string]string{ + podautoscaler.HpaCustomMetricsTargetAnnotationName: "{}", + }, + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{ + Kind: "ReplicationController", + Name: "myrc", + }, + MinReplicas: newInt32(1), + MaxReplicas: 5, + }, + }, + msg: "custom metrics target must not be empty", + }, + { + horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{ + Name: "myautoscaler", + Namespace: api.NamespaceDefault, + Annotations: map[string]string{ + podautoscaler.HpaCustomMetricsTargetAnnotationName: "{\"items\":[{\"value\":\"20\"}]}", + }, + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{ + Kind: "ReplicationController", + Name: "myrc", + }, + MinReplicas: newInt32(1), + MaxReplicas: 5, + }, + }, + msg: "missing custom metric target name", + }, + { + horizontalPodAutoscaler: autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{ + Name: "myautoscaler", + Namespace: api.NamespaceDefault, + Annotations: map[string]string{ + podautoscaler.HpaCustomMetricsTargetAnnotationName: "{\"items\":[{\"name\":\"qps\",\"value\":\"0\"}]}", + }, + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{ + Kind: "ReplicationController", + Name: "myrc", + }, + MinReplicas: newInt32(1), + MaxReplicas: 5, + }, + }, + msg: "custom metric target value must be greater than 0", + }, + } + + for _, c := range errorCases { + errs := ValidateHorizontalPodAutoscaler(&c.horizontalPodAutoscaler) + if len(errs) == 0 { + t.Errorf("expected failure for %q", c.msg) + } else if !strings.Contains(errs[0].Error(), c.msg) { + t.Errorf("unexpected error: %q, expected: %q", errs[0], c.msg) + } + } +} + +func newInt32(val int32) *int32 { + p := new(int32) + *p = val + return p +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/deep_copy_generated.go index 55d346a61932..31737e07b357 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/deep_copy_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,16 +16,263 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package batch -import api "k8s.io/kubernetes/pkg/api" +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) func init() { - err := api.Scheme.AddGeneratedDeepCopyFuncs() - if err != nil { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_batch_Job, + DeepCopy_batch_JobCondition, + DeepCopy_batch_JobList, + DeepCopy_batch_JobSpec, + DeepCopy_batch_JobStatus, + DeepCopy_batch_JobTemplate, + DeepCopy_batch_JobTemplateSpec, + DeepCopy_batch_ScheduledJob, + DeepCopy_batch_ScheduledJobList, + DeepCopy_batch_ScheduledJobSpec, + DeepCopy_batch_ScheduledJobStatus, + ); err != nil { // if one of the deep copy functions is malformed, detect it immediately. panic(err) } } + +func DeepCopy_batch_Job(in Job, out *Job, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_batch_JobSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_batch_JobStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_batch_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { + out.Type = in.Type + out.Status = in.Status + if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func DeepCopy_batch_JobList(in JobList, out *JobList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Job, len(in)) + for i := range in { + if err := DeepCopy_batch_Job(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_batch_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { + if in.Parallelism != nil { + in, out := in.Parallelism, &out.Parallelism + *out = new(int32) + **out = *in + } else { + out.Parallelism = nil + } + if in.Completions != nil { + in, out := in.Completions, &out.Completions + *out = new(int32) + **out = *in + } else { + out.Completions = nil + } + if in.ActiveDeadlineSeconds != nil { + in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = *in + } else { + out.ActiveDeadlineSeconds = nil + } + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + if in.ManualSelector != nil { + in, out := in.ManualSelector, &out.ManualSelector + *out = new(bool) + **out = *in + } else { + out.ManualSelector = nil + } + if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + return nil +} + +func DeepCopy_batch_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { + if in.Conditions != nil { + in, out := in.Conditions, &out.Conditions + *out = make([]JobCondition, len(in)) + for i := range in { + if err := DeepCopy_batch_JobCondition(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if in.StartTime != nil { + in, out := in.StartTime, &out.StartTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { + return err + } + } else { + out.StartTime = nil + } + if in.CompletionTime != nil { + in, out := in.CompletionTime, &out.CompletionTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { + return err + } + } else { + out.CompletionTime = nil + } + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func DeepCopy_batch_JobTemplate(in JobTemplate, out *JobTemplate, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_batch_JobTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + return nil +} + +func DeepCopy_batch_JobTemplateSpec(in JobTemplateSpec, out *JobTemplateSpec, c *conversion.Cloner) error { + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_batch_JobSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + return nil +} + +func DeepCopy_batch_ScheduledJob(in ScheduledJob, out *ScheduledJob, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_batch_ScheduledJobSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_batch_ScheduledJobStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_batch_ScheduledJobList(in ScheduledJobList, out *ScheduledJobList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ScheduledJob, len(in)) + for i := range in { + if err := DeepCopy_batch_ScheduledJob(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_batch_ScheduledJobSpec(in ScheduledJobSpec, out *ScheduledJobSpec, c *conversion.Cloner) error { + out.Schedule = in.Schedule + if in.StartingDeadlineSeconds != nil { + in, out := in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds + *out = new(int64) + **out = *in + } else { + out.StartingDeadlineSeconds = nil + } + out.ConcurrencyPolicy = in.ConcurrencyPolicy + out.Suspend = in.Suspend + if err := DeepCopy_batch_JobTemplateSpec(in.JobTemplate, &out.JobTemplate, c); err != nil { + return err + } + return nil +} + +func DeepCopy_batch_ScheduledJobStatus(in ScheduledJobStatus, out *ScheduledJobStatus, c *conversion.Cloner) error { + if in.Active != nil { + in, out := in.Active, &out.Active + *out = make([]api.ObjectReference, len(in)) + for i := range in { + if err := api.DeepCopy_api_ObjectReference(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Active = nil + } + if in.LastScheduleTime != nil { + in, out := in.LastScheduleTime, &out.LastScheduleTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { + return err + } + } else { + out.LastScheduleTime = nil + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/install/install.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/install/install.go index 830020a93768..9d1a88603b8f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/install/install.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/install/install.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch/v1" + "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/sets" ) @@ -39,7 +40,7 @@ const importPrefix = "k8s.io/kubernetes/pkg/apis/batch" var accessor = meta.NewAccessor() // availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1.SchemeGroupVersion} +var availableVersions = []unversioned.GroupVersion{v1.SchemeGroupVersion, v2alpha1.SchemeGroupVersion} func init() { registered.RegisterVersions(availableVersions) @@ -106,6 +107,11 @@ func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, e ObjectConvertor: api.Scheme, MetadataAccessor: accessor, }, nil + case v2alpha1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil default: g, _ := registered.Group(batch.GroupName) return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) @@ -124,6 +130,8 @@ func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { switch v { case v1.SchemeGroupVersion: v1.AddToScheme(api.Scheme) + case v2alpha1.SchemeGroupVersion: + v2alpha1.AddToScheme(api.Scheme) } } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/register.go index a302fe7514b8..8406f9ffbaed 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/register.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/register.go @@ -19,7 +19,6 @@ package batch import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/runtime" ) @@ -47,8 +46,17 @@ func AddToScheme(scheme *runtime.Scheme) { // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) { scheme.AddKnownTypes(SchemeGroupVersion, - &extensions.Job{}, - &extensions.JobList{}, + &Job{}, + &JobList{}, + &JobTemplate{}, + &ScheduledJob{}, + &ScheduledJobList{}, &api.ListOptions{}, ) } + +func (obj *Job) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *JobList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *JobTemplate) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ScheduledJob) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ScheduledJobList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/types.generated.go new file mode 100644 index 000000000000..68605cc67ad0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/types.generated.go @@ -0,0 +1,4641 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package batch + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg2_api "k8s.io/kubernetes/pkg/api" + pkg4_resource "k8s.io/kubernetes/pkg/api/resource" + pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + pkg3_types "k8s.io/kubernetes/pkg/types" + pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg2_api.ObjectMeta + var v1 pkg4_resource.Quantity + var v2 pkg1_unversioned.TypeMeta + var v3 pkg3_types.UID + var v4 pkg5_intstr.IntOrString + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + } +} + +func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy14 := &x.Status + yy14.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy16 := &x.Status + yy16.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = JobSpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = JobStatus{} + } else { + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = JobSpec{} + } else { + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = JobStatus{} + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceJob(([]Job)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceJob(([]Job)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceJob((*[]Job)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceJob((*[]Job)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobTemplate) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Template + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Template + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobTemplate) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "template": + if r.TryDecodeAsNil() { + x.Template = JobTemplateSpec{} + } else { + yyv5 := &x.Template + yyv5.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv9 := &x.ObjectMeta + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = JobTemplateSpec{} + } else { + yyv10 := &x.Template + yyv10.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = JobSpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv7 := &x.ObjectMeta + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = JobSpec{} + } else { + yyv8 := &x.Spec + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Parallelism != nil + yyq2[1] = x.Completions != nil + yyq2[2] = x.ActiveDeadlineSeconds != nil + yyq2[3] = x.Selector != nil + yyq2[4] = x.ManualSelector != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Parallelism == nil { + r.EncodeNil() + } else { + yy4 := *x.Parallelism + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("parallelism")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Parallelism == nil { + r.EncodeNil() + } else { + yy6 := *x.Parallelism + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Completions == nil { + r.EncodeNil() + } else { + yy9 := *x.Completions + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("completions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Completions == nil { + r.EncodeNil() + } else { + yy11 := *x.Completions + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(yy11)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy14 := *x.ActiveDeadlineSeconds + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(yy14)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy16 := *x.ActiveDeadlineSeconds + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(yy16)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ManualSelector == nil { + r.EncodeNil() + } else { + yy22 := *x.ManualSelector + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(yy22)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("manualSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ManualSelector == nil { + r.EncodeNil() + } else { + yy24 := *x.ManualSelector + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(yy24)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy27 := &x.Template + yy27.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy29 := &x.Template + yy29.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "parallelism": + if r.TryDecodeAsNil() { + if x.Parallelism != nil { + x.Parallelism = nil + } + } else { + if x.Parallelism == nil { + x.Parallelism = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) + } + } + case "completions": + if r.TryDecodeAsNil() { + if x.Completions != nil { + x.Completions = nil + } + } else { + if x.Completions == nil { + x.Completions = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) + } + } + case "activeDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_unversioned.LabelSelector) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "manualSelector": + if r.TryDecodeAsNil() { + if x.ManualSelector != nil { + x.ManualSelector = nil + } + } else { + if x.ManualSelector == nil { + x.ManualSelector = new(bool) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(x.ManualSelector)) = r.DecodeBool() + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg2_api.PodTemplateSpec{} + } else { + yyv14 := &x.Template + yyv14.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Parallelism != nil { + x.Parallelism = nil + } + } else { + if x.Parallelism == nil { + x.Parallelism = new(int32) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Completions != nil { + x.Completions = nil + } + } else { + if x.Completions == nil { + x.Completions = new(int32) + } + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_unversioned.LabelSelector) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ManualSelector != nil { + x.ManualSelector = nil + } + } else { + if x.ManualSelector == nil { + x.ManualSelector = new(bool) + } + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*bool)(x.ManualSelector)) = r.DecodeBool() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg2_api.PodTemplateSpec{} + } else { + yyv26 := &x.Template + yyv26.CodecDecodeSelf(d) + } + for { + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj15-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Conditions) != 0 + yyq2[1] = x.StartTime != nil + yyq2[2] = x.CompletionTime != nil + yyq2[3] = x.Active != 0 + yyq2[4] = x.Succeeded != 0 + yyq2[5] = x.Failed != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.StartTime == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym7 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StartTime == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym8 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.CompletionTime == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { + } else if yym10 { + z.EncBinaryMarshal(x.CompletionTime) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.CompletionTime) + } else { + z.EncFallback(x.CompletionTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("completionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CompletionTime == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { + } else if yym11 { + z.EncBinaryMarshal(x.CompletionTime) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(x.CompletionTime) + } else { + z.EncFallback(x.CompletionTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.Active)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("active")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.Active)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.Succeeded)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("succeeded")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.Succeeded)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.Failed)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("failed")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.Failed)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv4 := &x.Conditions + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceJobCondition((*[]JobCondition)(yyv4), d) + } + } + case "startTime": + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg1_unversioned.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + case "completionTime": + if r.TryDecodeAsNil() { + if x.CompletionTime != nil { + x.CompletionTime = nil + } + } else { + if x.CompletionTime == nil { + x.CompletionTime = new(pkg1_unversioned.Time) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { + } else if yym9 { + z.DecBinaryUnmarshal(x.CompletionTime) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.CompletionTime) + } else { + z.DecFallback(x.CompletionTime, false) + } + } + case "active": + if r.TryDecodeAsNil() { + x.Active = 0 + } else { + x.Active = int32(r.DecodeInt(32)) + } + case "succeeded": + if r.TryDecodeAsNil() { + x.Succeeded = 0 + } else { + x.Succeeded = int32(r.DecodeInt(32)) + } + case "failed": + if r.TryDecodeAsNil() { + x.Failed = 0 + } else { + x.Failed = int32(r.DecodeInt(32)) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv14 := &x.Conditions + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSliceJobCondition((*[]JobCondition)(yyv14), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg1_unversioned.Time) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym17 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym17 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CompletionTime != nil { + x.CompletionTime = nil + } + } else { + if x.CompletionTime == nil { + x.CompletionTime = new(pkg1_unversioned.Time) + } + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { + } else if yym19 { + z.DecBinaryUnmarshal(x.CompletionTime) + } else if !yym19 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.CompletionTime) + } else { + z.DecFallback(x.CompletionTime, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Active = 0 + } else { + x.Active = int32(r.DecodeInt(32)) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Succeeded = 0 + } else { + x.Succeeded = int32(r.DecodeInt(32)) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Failed = 0 + } else { + x.Failed = int32(r.DecodeInt(32)) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastProbeTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastProbeTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + x.Type = JobConditionType(r.DecodeString()) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + x.Status = pkg2_api.ConditionStatus(r.DecodeString()) + } + case "lastProbeTime": + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg1_unversioned.Time{} + } else { + yyv6 := &x.LastProbeTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_unversioned.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + x.Reason = string(r.DecodeString()) + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + x.Message = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + x.Type = JobConditionType(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + x.Status = pkg2_api.ConditionStatus(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg1_unversioned.Time{} + } else { + yyv15 := &x.LastProbeTime + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if yym16 { + z.DecBinaryUnmarshal(yyv15) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) + } else { + z.DecFallback(yyv15, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_unversioned.Time{} + } else { + yyv17 := &x.LastTransitionTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + x.Reason = string(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + x.Message = string(r.DecodeString()) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScheduledJob) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy14 := &x.Status + yy14.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy16 := &x.Status + yy16.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScheduledJob) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScheduledJob) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ScheduledJobSpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ScheduledJobStatus{} + } else { + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScheduledJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ScheduledJobSpec{} + } else { + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ScheduledJobStatus{} + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScheduledJobList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceScheduledJob(([]ScheduledJob)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceScheduledJob(([]ScheduledJob)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScheduledJobList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScheduledJobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceScheduledJob((*[]ScheduledJob)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScheduledJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceScheduledJob((*[]ScheduledJob)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScheduledJobSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.StartingDeadlineSeconds != nil + yyq2[2] = x.ConcurrencyPolicy != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("schedule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.StartingDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy7 := *x.StartingDeadlineSeconds + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startingDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StartingDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy9 := *x.StartingDeadlineSeconds + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + x.ConcurrencyPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("concurrencyPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.ConcurrencyPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeBool(bool(x.Suspend)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("suspend")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Suspend)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy18 := &x.JobTemplate + yy18.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("jobTemplate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy20 := &x.JobTemplate + yy20.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScheduledJobSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScheduledJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "schedule": + if r.TryDecodeAsNil() { + x.Schedule = "" + } else { + x.Schedule = string(r.DecodeString()) + } + case "startingDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.StartingDeadlineSeconds != nil { + x.StartingDeadlineSeconds = nil + } + } else { + if x.StartingDeadlineSeconds == nil { + x.StartingDeadlineSeconds = new(int64) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + case "concurrencyPolicy": + if r.TryDecodeAsNil() { + x.ConcurrencyPolicy = "" + } else { + x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString()) + } + case "suspend": + if r.TryDecodeAsNil() { + x.Suspend = false + } else { + x.Suspend = bool(r.DecodeBool()) + } + case "jobTemplate": + if r.TryDecodeAsNil() { + x.JobTemplate = JobTemplateSpec{} + } else { + yyv9 := &x.JobTemplate + yyv9.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScheduledJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Schedule = "" + } else { + x.Schedule = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StartingDeadlineSeconds != nil { + x.StartingDeadlineSeconds = nil + } + } else { + if x.StartingDeadlineSeconds == nil { + x.StartingDeadlineSeconds = new(int64) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ConcurrencyPolicy = "" + } else { + x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Suspend = false + } else { + x.Suspend = bool(r.DecodeBool()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.JobTemplate = JobTemplateSpec{} + } else { + yyv16 := &x.JobTemplate + yyv16.CodecDecodeSelf(d) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ConcurrencyPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ConcurrencyPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ScheduledJobStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Active) != 0 + yyq2[1] = x.LastScheduleTime != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Active == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceapi_ObjectReference(([]pkg2_api.ObjectReference)(x.Active), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("active")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Active == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceapi_ObjectReference(([]pkg2_api.ObjectReference)(x.Active), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.LastScheduleTime == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { + } else if yym7 { + z.EncBinaryMarshal(x.LastScheduleTime) + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScheduleTime) + } else { + z.EncFallback(x.LastScheduleTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastScheduleTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LastScheduleTime == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { + } else if yym8 { + z.EncBinaryMarshal(x.LastScheduleTime) + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScheduleTime) + } else { + z.EncFallback(x.LastScheduleTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScheduledJobStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScheduledJobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "active": + if r.TryDecodeAsNil() { + x.Active = nil + } else { + yyv4 := &x.Active + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceapi_ObjectReference((*[]pkg2_api.ObjectReference)(yyv4), d) + } + } + case "lastScheduleTime": + if r.TryDecodeAsNil() { + if x.LastScheduleTime != nil { + x.LastScheduleTime = nil + } + } else { + if x.LastScheduleTime == nil { + x.LastScheduleTime = new(pkg1_unversioned.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.LastScheduleTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScheduleTime) + } else { + z.DecFallback(x.LastScheduleTime, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScheduledJobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Active = nil + } else { + yyv9 := &x.Active + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceapi_ObjectReference((*[]pkg2_api.ObjectReference)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LastScheduleTime != nil { + x.LastScheduleTime = nil + } + } else { + if x.LastScheduleTime == nil { + x.LastScheduleTime = new(pkg1_unversioned.Time) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { + } else if yym12 { + z.DecBinaryUnmarshal(x.LastScheduleTime) + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScheduleTime) + } else { + z.DecFallback(x.LastScheduleTime, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Job{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Job, yyrl1) + } + } else { + yyv1 = make([]Job, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Job{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Job{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Job{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Job{}) // var yyz1 Job + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Job{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Job{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []JobCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]JobCondition, yyrl1) + } + } else { + yyv1 = make([]JobCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = JobCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, JobCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = JobCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = JobCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []JobCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceScheduledJob(v []ScheduledJob, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceScheduledJob(v *[]ScheduledJob, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ScheduledJob{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1000) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ScheduledJob, yyrl1) + } + } else { + yyv1 = make([]ScheduledJob, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ScheduledJob{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ScheduledJob{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ScheduledJob{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ScheduledJob{}) // var yyz1 ScheduledJob + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ScheduledJob{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ScheduledJob{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceapi_ObjectReference(v []pkg2_api.ObjectReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceapi_ObjectReference(v *[]pkg2_api.ObjectReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg2_api.ObjectReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg2_api.ObjectReference, yyrl1) + } + } else { + yyv1 = make([]pkg2_api.ObjectReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_api.ObjectReference{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg2_api.ObjectReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_api.ObjectReference{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg2_api.ObjectReference{}) // var yyz1 pkg2_api.ObjectReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_api.ObjectReference{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg2_api.ObjectReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/types.go new file mode 100644 index 000000000000..756d4c9d6b1a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/types.go @@ -0,0 +1,244 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package batch + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// +genclient=true + +// Job represents the configuration of a single job. +type Job struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + api.ObjectMeta `json:"metadata,omitempty"` + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec JobSpec `json:"spec,omitempty"` + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status JobStatus `json:"status,omitempty"` +} + +// JobList is a collection of jobs. +type JobList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty"` + + // Items is the list of Job. + Items []Job `json:"items"` +} + +// JobTemplate describes a template for creating copies of a predefined pod. +type JobTemplate struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + api.ObjectMeta `json:"metadata,omitempty"` + + // Template defines jobs that will be created from this template + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Template JobTemplateSpec `json:"template,omitempty"` +} + +// JobTemplateSpec describes the data a Job should have when created from a template +type JobTemplateSpec struct { + // Standard object's metadata of the jobs created from this template. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + api.ObjectMeta `json:"metadata,omitempty"` + + // Specification of the desired behavior of the job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec JobSpec `json:"spec,omitempty"` +} + +// JobSpec describes how the job execution will look like. +type JobSpec struct { + + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + Parallelism *int32 `json:"parallelism,omitempty"` + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + Completions *int32 `json:"completions,omitempty"` + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + Selector *unversioned.LabelSelector `json:"selector,omitempty"` + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + ManualSelector *bool `json:"manualSelector,omitempty"` + + // Template is the object that describes the pod that will be created when + // executing a job. + Template api.PodTemplateSpec `json:"template"` +} + +// JobStatus represents the current state of a Job. +type JobStatus struct { + + // Conditions represent the latest available observations of an object's current state. + Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + StartTime *unversioned.Time `json:"startTime,omitempty"` + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + CompletionTime *unversioned.Time `json:"completionTime,omitempty"` + + // Active is the number of actively running pods. + Active int32 `json:"active,omitempty"` + + // Succeeded is the number of pods which reached Phase Succeeded. + Succeeded int32 `json:"succeeded,omitempty"` + + // Failed is the number of pods which reached Phase Failed. + Failed int32 `json:"failed,omitempty"` +} + +type JobConditionType string + +// These are valid conditions of a job. +const ( + // JobComplete means the job has completed its execution. + JobComplete JobConditionType = "Complete" + // JobFailed means the job has failed its execution. + JobFailed JobConditionType = "Failed" +) + +// JobCondition describes current state of a job. +type JobCondition struct { + // Type of job condition, Complete or Failed. + Type JobConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus `json:"status"` + // Last time the condition was checked. + LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` + // Last time the condition transit from one status to another. + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty"` +} + +// +genclient=true + +// ScheduledJob represents the configuration of a single scheduled job. +type ScheduledJob struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + api.ObjectMeta `json:"metadata,omitempty"` + + // Spec is a structure defining the expected behavior of a job, including the schedule. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec ScheduledJobSpec `json:"spec,omitempty"` + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status ScheduledJobStatus `json:"status,omitempty"` +} + +// ScheduledJobList is a collection of scheduled jobs. +type ScheduledJobList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty"` + + // Items is the list of ScheduledJob. + Items []ScheduledJob `json:"items"` +} + +// ScheduledJobSpec describes how the job execution will look like and when it will actually run. +type ScheduledJobSpec struct { + + // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + Schedule string `json:"schedule"` + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"` + + // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` + + // Suspend flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + Suspend bool `json:"suspend"` + + // JobTemplate is the object that describes the job that will be created when + // executing a ScheduledJob. + JobTemplate JobTemplateSpec `json:"jobTemplate"` +} + +// ConcurrencyPolicy describes how the job will be handled. +// Only one of the following concurrent policies may be specified. +// If none of the following policies is specified, the default one +// is AllowConcurrent. +type ConcurrencyPolicy string + +const ( + // AllowConcurrent allows ScheduledJobs to run concurrently. + AllowConcurrent ConcurrencyPolicy = "Allow" + + // ForbidConcurrent forbids concurrent runs, skipping next run if previous + // hasn't finished yet. + ForbidConcurrent ConcurrencyPolicy = "Forbid" + + // ReplaceConcurrent cancels currently running job and replaces it with a new one. + ReplaceConcurrent ConcurrencyPolicy = "Replace" +) + +// ScheduledJobStatus represents the current state of a Job. +type ScheduledJobStatus struct { + // Active holds pointers to currently running jobs. + Active []api.ObjectReference `json:"active,omitempty"` + + // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + LastScheduleTime *unversioned.Time `json:"lastScheduleTime,omitempty"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go index c1a77ee8e920..2d163c6e9a8e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go @@ -20,7 +20,9 @@ import ( "fmt" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" v1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/runtime" ) @@ -28,8 +30,8 @@ import ( func addConversionFuncs(scheme *runtime.Scheme) { // Add non-generated conversion functions err := scheme.AddConversionFuncs( - Convert_api_PodSpec_To_v1_PodSpec, - Convert_v1_PodSpec_To_api_PodSpec, + Convert_batch_JobSpec_To_v1_JobSpec, + Convert_v1_JobSpec_To_batch_JobSpec, ) if err != nil { // If one of the conversion functions is malformed, detect it immediately. @@ -51,13 +53,54 @@ func addConversionFuncs(scheme *runtime.Scheme) { } } -// The following two PodSpec conversions functions where copied from pkg/api/conversion.go -// for the generated functions to work properly. -// This should be fixed: https://github.com/kubernetes/kubernetes/issues/12977 -func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conversion.Scope) error { - return v1.Convert_api_PodSpec_To_v1_PodSpec(in, out, s) +func Convert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector + if in.Selector != nil { + out.Selector = new(LabelSelector) + if err := Convert_unversioned_LabelSelector_To_v1_LabelSelector(in.Selector, out.Selector, s); err != nil { + return err + } + } else { + out.Selector = nil + } + if in.ManualSelector != nil { + out.ManualSelector = new(bool) + *out.ManualSelector = *in.ManualSelector + } else { + out.ManualSelector = nil + } + + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil } -func Convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conversion.Scope) error { - return v1.Convert_v1_PodSpec_To_api_PodSpec(in, out, s) +func Convert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + // unable to generate simple pointer conversion for v1.LabelSelector -> unversioned.LabelSelector + if in.Selector != nil { + out.Selector = new(unversioned.LabelSelector) + if err := Convert_v1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { + return err + } + } else { + out.Selector = nil + } + if in.ManualSelector != nil { + out.ManualSelector = new(bool) + *out.ManualSelector = *in.ManualSelector + } else { + out.ManualSelector = nil + } + + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go index 9a7b4876f60d..4bb13c498d5f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,305 +16,156 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-conversions.sh +// This file was autogenerated by conversion-gen. Do not edit it manually! package v1 import ( - reflect "reflect" - api "k8s.io/kubernetes/pkg/api" - resource "k8s.io/kubernetes/pkg/api/resource" unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - extensions "k8s.io/kubernetes/pkg/apis/extensions" + api_v1 "k8s.io/kubernetes/pkg/api/v1" + batch "k8s.io/kubernetes/pkg/apis/batch" conversion "k8s.io/kubernetes/pkg/conversion" ) -func autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.AWSElasticBlockStoreVolumeSource))(in) +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1_Job_To_batch_Job, + Convert_batch_Job_To_v1_Job, + Convert_v1_JobCondition_To_batch_JobCondition, + Convert_batch_JobCondition_To_v1_JobCondition, + Convert_v1_JobList_To_batch_JobList, + Convert_batch_JobList_To_v1_JobList, + Convert_v1_JobSpec_To_batch_JobSpec, + Convert_batch_JobSpec_To_v1_JobSpec, + Convert_v1_JobStatus_To_batch_JobStatus, + Convert_batch_JobStatus_To_v1_JobStatus, + Convert_v1_LabelSelector_To_unversioned_LabelSelector, + Convert_unversioned_LabelSelector_To_v1_LabelSelector, + Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement, + Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) } - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = int32(in.Partition) - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) } -func autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.AzureFileVolumeSource))(in) +func autoConvert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + SetDefaults_Job(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *v1.Capabilities, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Capabilities))(in) + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err } - if in.Add != nil { - out.Add = make([]v1.Capability, len(in.Add)) - for i := range in.Add { - out.Add[i] = v1.Capability(in.Add[i]) - } - } else { - out.Add = nil + if err := Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err } - if in.Drop != nil { - out.Drop = make([]v1.Capability, len(in.Drop)) - for i := range in.Drop { - out.Drop[i] = v1.Capability(in.Drop[i]) - } - } else { - out.Drop = nil + if err := Convert_v1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil { + return err } return nil } -func Convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *v1.Capabilities, s conversion.Scope) error { - return autoConvert_api_Capabilities_To_v1_Capabilities(in, out, s) +func Convert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + return autoConvert_v1_Job_To_batch_Job(in, out, s) } -func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.CephFSVolumeSource))(in) +func autoConvert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - if in.Monitors != nil { - out.Monitors = make([]string, len(in.Monitors)) - for i := range in.Monitors { - out.Monitors[i] = in.Monitors[i] - } - } else { - out.Monitors = nil + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err } - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - // unable to generate simple pointer conversion for api.LocalObjectReference -> v1.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(v1.LocalObjectReference) - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil + if err := Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err } - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) -} - -func autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.CinderVolumeSource))(in) + if err := Convert_batch_JobStatus_To_v1_JobStatus(&in.Status, &out.Status, s); err != nil { + return err } - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly return nil } -func Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { - return autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) +func Convert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error { + return autoConvert_batch_Job_To_v1_Job(in, out, s) } -func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ConfigMapKeySelector))(in) +func autoConvert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + out.Type = batch.JobConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err } - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { return err } - out.Key = in.Key + out.Reason = in.Reason + out.Message = in.Message return nil } -func Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) +func Convert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + return autoConvert_v1_JobCondition_To_batch_JobCondition(in, out, s) } -func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ConfigMapVolumeSource))(in) - } - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { +func autoConvert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + out.Type = JobConditionType(in.Type) + out.Status = api_v1.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { return err } - if in.Items != nil { - out.Items = make([]v1.KeyToPath, len(in.Items)) - for i := range in.Items { - if err := Convert_api_KeyToPath_To_v1_KeyToPath(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err } + out.Reason = in.Reason + out.Message = in.Message return nil } -func Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) +func Convert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + return autoConvert_batch_JobCondition_To_v1_JobCondition(in, out, s) } -func autoConvert_api_Container_To_v1_Container(in *api.Container, out *v1.Container, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Container))(in) - } - out.Name = in.Name - out.Image = in.Image - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - if in.Args != nil { - out.Args = make([]string, len(in.Args)) - for i := range in.Args { - out.Args[i] = in.Args[i] - } - } else { - out.Args = nil - } - out.WorkingDir = in.WorkingDir - if in.Ports != nil { - out.Ports = make([]v1.ContainerPort, len(in.Ports)) - for i := range in.Ports { - if err := Convert_api_ContainerPort_To_v1_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Env != nil { - out.Env = make([]v1.EnvVar, len(in.Env)) - for i := range in.Env { - if err := Convert_api_EnvVar_To_v1_EnvVar(&in.Env[i], &out.Env[i], s); err != nil { - return err - } - } - } else { - out.Env = nil +func autoConvert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } - if in.VolumeMounts != nil { - out.VolumeMounts = make([]v1.VolumeMount, len(in.VolumeMounts)) - for i := range in.VolumeMounts { - if err := Convert_api_VolumeMount_To_v1_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil { + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]batch.Job, len(*in)) + for i := range *in { + if err := Convert_v1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { - out.VolumeMounts = nil - } - // unable to generate simple pointer conversion for api.Probe -> v1.Probe - if in.LivenessProbe != nil { - out.LivenessProbe = new(v1.Probe) - if err := Convert_api_Probe_To_v1_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil { - return err - } - } else { - out.LivenessProbe = nil - } - // unable to generate simple pointer conversion for api.Probe -> v1.Probe - if in.ReadinessProbe != nil { - out.ReadinessProbe = new(v1.Probe) - if err := Convert_api_Probe_To_v1_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil { - return err - } - } else { - out.ReadinessProbe = nil - } - // unable to generate simple pointer conversion for api.Lifecycle -> v1.Lifecycle - if in.Lifecycle != nil { - out.Lifecycle = new(v1.Lifecycle) - if err := Convert_api_Lifecycle_To_v1_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil { - return err - } - } else { - out.Lifecycle = nil - } - out.TerminationMessagePath = in.TerminationMessagePath - out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) - // unable to generate simple pointer conversion for api.SecurityContext -> v1.SecurityContext - if in.SecurityContext != nil { - out.SecurityContext = new(v1.SecurityContext) - if err := Convert_api_SecurityContext_To_v1_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -func Convert_api_Container_To_v1_Container(in *api.Container, out *v1.Container, s conversion.Scope) error { - return autoConvert_api_Container_To_v1_Container(in, out, s) -} - -func autoConvert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ContainerPort))(in) + out.Items = nil } - out.Name = in.Name - out.HostPort = int32(in.HostPort) - out.ContainerPort = int32(in.ContainerPort) - out.Protocol = v1.Protocol(in.Protocol) - out.HostIP = in.HostIP return nil } -func Convert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { - return autoConvert_api_ContainerPort_To_v1_ContainerPort(in, out, s) +func Convert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { + return autoConvert_v1_JobList_To_batch_JobList(in, out, s) } -func autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.DownwardAPIVolumeFile))(in) - } - out.Path = in.Path - if err := Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(&in.FieldRef, &out.FieldRef, s); err != nil { +func autoConvert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - return nil -} - -func Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.DownwardAPIVolumeSource))(in) + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err } if in.Items != nil { - out.Items = make([]v1.DownwardAPIVolumeFile, len(in.Items)) - for i := range in.Items { - if err := Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + if err := Convert_batch_Job_To_v1_Job(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -322,2744 +175,156 @@ func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *a return nil } -func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EmptyDirVolumeSource))(in) - } - out.Medium = v1.StorageMedium(in.Medium) - return nil -} - -func Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) +func Convert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + return autoConvert_batch_JobList_To_v1_JobList(in, out, s) } -func autoConvert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *v1.EnvVar, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EnvVar))(in) - } - out.Name = in.Name - out.Value = in.Value - // unable to generate simple pointer conversion for api.EnvVarSource -> v1.EnvVarSource - if in.ValueFrom != nil { - out.ValueFrom = new(v1.EnvVarSource) - if err := Convert_api_EnvVarSource_To_v1_EnvVarSource(in.ValueFrom, out.ValueFrom, s); err != nil { +func autoConvert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := Convert_v1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil { return err } } else { - out.ValueFrom = nil + out.Selector = nil + } + out.ManualSelector = in.ManualSelector + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err } return nil } -func Convert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *v1.EnvVar, s conversion.Scope) error { - return autoConvert_api_EnvVar_To_v1_EnvVar(in, out, s) -} - -func autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EnvVarSource))(in) - } - // unable to generate simple pointer conversion for api.ObjectFieldSelector -> v1.ObjectFieldSelector - if in.FieldRef != nil { - out.FieldRef = new(v1.ObjectFieldSelector) - if err := Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in.FieldRef, out.FieldRef, s); err != nil { - return err - } - } else { - out.FieldRef = nil - } - // unable to generate simple pointer conversion for api.ConfigMapKeySelector -> v1.ConfigMapKeySelector - if in.ConfigMapKeyRef != nil { - out.ConfigMapKeyRef = new(v1.ConfigMapKeySelector) - if err := Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in.ConfigMapKeyRef, out.ConfigMapKeyRef, s); err != nil { - return err - } - } else { - out.ConfigMapKeyRef = nil - } - // unable to generate simple pointer conversion for api.SecretKeySelector -> v1.SecretKeySelector - if in.SecretKeyRef != nil { - out.SecretKeyRef = new(v1.SecretKeySelector) - if err := Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in.SecretKeyRef, out.SecretKeyRef, s); err != nil { +func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(LabelSelector) + if err := Convert_unversioned_LabelSelector_To_v1_LabelSelector(*in, *out, s); err != nil { return err } } else { - out.SecretKeyRef = nil - } - return nil -} - -func Convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { - return autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in, out, s) -} - -func autoConvert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *v1.ExecAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ExecAction))(in) + out.Selector = nil } - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil + out.ManualSelector = in.ManualSelector + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err } return nil } -func Convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *v1.ExecAction, s conversion.Scope) error { - return autoConvert_api_ExecAction_To_v1_ExecAction(in, out, s) -} - -func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.FCVolumeSource))(in) - } - if in.TargetWWNs != nil { - out.TargetWWNs = make([]string, len(in.TargetWWNs)) - for i := range in.TargetWWNs { - out.TargetWWNs[i] = in.TargetWWNs[i] +func autoConvert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]batch.JobCondition, len(*in)) + for i := range *in { + if err := Convert_v1_JobCondition_To_batch_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.TargetWWNs = nil - } - if in.Lun != nil { - out.Lun = new(int32) - *out.Lun = int32(*in.Lun) - } else { - out.Lun = nil + out.Conditions = nil } - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly + out.StartTime = in.StartTime + out.CompletionTime = in.CompletionTime + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed return nil } -func Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { - return autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) +func Convert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + return autoConvert_v1_JobStatus_To_batch_JobStatus(in, out, s) } -func autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.FlexVolumeSource))(in) - } - out.Driver = in.Driver - out.FSType = in.FSType - // unable to generate simple pointer conversion for api.LocalObjectReference -> v1.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(v1.LocalObjectReference) - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - if in.Options != nil { - out.Options = make(map[string]string) - for key, val := range in.Options { - out.Options[key] = val +func autoConvert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]JobCondition, len(*in)) + for i := range *in { + if err := Convert_batch_JobCondition_To_v1_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.Options = nil - } - return nil -} - -func Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) -} - -func autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.FlockerVolumeSource))(in) - } - out.DatasetName = in.DatasetName - return nil -} - -func Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) -} - -func autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.GCEPersistentDiskVolumeSource))(in) - } - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = int32(in.Partition) - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.GitRepoVolumeSource))(in) - } - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -func Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.GlusterfsVolumeSource))(in) + out.Conditions = nil } - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly + out.StartTime = in.StartTime + out.CompletionTime = in.CompletionTime + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed return nil } -func Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) +func Convert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + return autoConvert_batch_JobStatus_To_v1_JobStatus(in, out, s) } -func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.HTTPGetAction))(in) - } - out.Path = in.Path - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { - return err - } - out.Host = in.Host - out.Scheme = v1.URIScheme(in.Scheme) - if in.HTTPHeaders != nil { - out.HTTPHeaders = make([]v1.HTTPHeader, len(in.HTTPHeaders)) - for i := range in.HTTPHeaders { - if err := Convert_api_HTTPHeader_To_v1_HTTPHeader(&in.HTTPHeaders[i], &out.HTTPHeaders[i], s); err != nil { +func autoConvert_v1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { + out.MatchLabels = in.MatchLabels + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]unversioned.LabelSelectorRequirement, len(*in)) + for i := range *in { + if err := Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { - out.HTTPHeaders = nil - } - return nil -} - -func Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { - return autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) -} - -func autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.HTTPHeader))(in) + out.MatchExpressions = nil } - out.Name = in.Name - out.Value = in.Value return nil } -func Convert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { - return autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in, out, s) +func Convert_v1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { + return autoConvert_v1_LabelSelector_To_unversioned_LabelSelector(in, out, s) } -func autoConvert_api_Handler_To_v1_Handler(in *api.Handler, out *v1.Handler, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Handler))(in) - } - // unable to generate simple pointer conversion for api.ExecAction -> v1.ExecAction - if in.Exec != nil { - out.Exec = new(v1.ExecAction) - if err := Convert_api_ExecAction_To_v1_ExecAction(in.Exec, out.Exec, s); err != nil { - return err - } - } else { - out.Exec = nil - } - // unable to generate simple pointer conversion for api.HTTPGetAction -> v1.HTTPGetAction - if in.HTTPGet != nil { - out.HTTPGet = new(v1.HTTPGetAction) - if err := Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in.HTTPGet, out.HTTPGet, s); err != nil { - return err - } - } else { - out.HTTPGet = nil - } - // unable to generate simple pointer conversion for api.TCPSocketAction -> v1.TCPSocketAction - if in.TCPSocket != nil { - out.TCPSocket = new(v1.TCPSocketAction) - if err := Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in.TCPSocket, out.TCPSocket, s); err != nil { - return err +func autoConvert_unversioned_LabelSelector_To_v1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { + out.MatchLabels = in.MatchLabels + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(*in)) + for i := range *in { + if err := Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.TCPSocket = nil - } - return nil -} - -func Convert_api_Handler_To_v1_Handler(in *api.Handler, out *v1.Handler, s conversion.Scope) error { - return autoConvert_api_Handler_To_v1_Handler(in, out, s) -} - -func autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.HostPathVolumeSource))(in) - } - out.Path = in.Path - return nil -} - -func Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) -} - -func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ISCSIVolumeSource))(in) + out.MatchExpressions = nil } - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = int32(in.Lun) - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly return nil } -func Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) +func Convert_unversioned_LabelSelector_To_v1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { + return autoConvert_unversioned_LabelSelector_To_v1_LabelSelector(in, out, s) } -func autoConvert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.KeyToPath))(in) - } +func autoConvert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { out.Key = in.Key - out.Path = in.Path - return nil -} - -func Convert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { - return autoConvert_api_KeyToPath_To_v1_KeyToPath(in, out, s) -} - -func autoConvert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Lifecycle))(in) - } - // unable to generate simple pointer conversion for api.Handler -> v1.Handler - if in.PostStart != nil { - out.PostStart = new(v1.Handler) - if err := Convert_api_Handler_To_v1_Handler(in.PostStart, out.PostStart, s); err != nil { - return err - } - } else { - out.PostStart = nil - } - // unable to generate simple pointer conversion for api.Handler -> v1.Handler - if in.PreStop != nil { - out.PreStop = new(v1.Handler) - if err := Convert_api_Handler_To_v1_Handler(in.PreStop, out.PreStop, s); err != nil { - return err - } - } else { - out.PreStop = nil - } - return nil -} - -func Convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { - return autoConvert_api_Lifecycle_To_v1_Lifecycle(in, out, s) -} - -func autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.LocalObjectReference))(in) - } - out.Name = in.Name + out.Operator = unversioned.LabelSelectorOperator(in.Operator) + out.Values = in.Values return nil } -func Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { - return autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) +func Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { + return autoConvert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in, out, s) } -func autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NFSVolumeSource))(in) - } - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly +func autoConvert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = LabelSelectorOperator(in.Operator) + out.Values = in.Values return nil } -func Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) -} - -func autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ObjectFieldSelector))(in) - } - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) -} - -func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ObjectMeta))(in) - } - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.CreationTimestamp, &out.CreationTimestamp, s); err != nil { - return err - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.DeletionTimestamp != nil { - out.DeletionTimestamp = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.DeletionTimestamp, out.DeletionTimestamp, s); err != nil { - return err - } - } else { - out.DeletionTimestamp = nil - } - if in.DeletionGracePeriodSeconds != nil { - out.DeletionGracePeriodSeconds = new(int64) - *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds - } else { - out.DeletionGracePeriodSeconds = nil - } - if in.Labels != nil { - out.Labels = make(map[string]string) - for key, val := range in.Labels { - out.Labels[key] = val - } - } else { - out.Labels = nil - } - if in.Annotations != nil { - out.Annotations = make(map[string]string) - for key, val := range in.Annotations { - out.Annotations[key] = val - } - } else { - out.Annotations = nil - } - return nil -} - -func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { - return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PersistentVolumeClaimVolumeSource))(in) - } - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodSpec))(in) - } - if in.Volumes != nil { - out.Volumes = make([]v1.Volume, len(in.Volumes)) - for i := range in.Volumes { - if err := Convert_api_Volume_To_v1_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.Containers != nil { - out.Containers = make([]v1.Container, len(in.Containers)) - for i := range in.Containers { - if err := Convert_api_Container_To_v1_Container(&in.Containers[i], &out.Containers[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = v1.RestartPolicy(in.RestartPolicy) - if in.TerminationGracePeriodSeconds != nil { - out.TerminationGracePeriodSeconds = new(int64) - *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } - out.DNSPolicy = v1.DNSPolicy(in.DNSPolicy) - if in.NodeSelector != nil { - out.NodeSelector = make(map[string]string) - for key, val := range in.NodeSelector { - out.NodeSelector[key] = val - } - } else { - out.NodeSelector = nil - } - out.ServiceAccountName = in.ServiceAccountName - out.NodeName = in.NodeName - // unable to generate simple pointer conversion for api.PodSecurityContext -> v1.PodSecurityContext - if in.SecurityContext != nil { - if err := s.Convert(&in.SecurityContext, &out.SecurityContext, 0); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]v1.LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - -func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodTemplateSpec))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { - return autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s) -} - -func autoConvert_api_Probe_To_v1_Probe(in *api.Probe, out *v1.Probe, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Probe))(in) - } - if err := Convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = int32(in.InitialDelaySeconds) - out.TimeoutSeconds = int32(in.TimeoutSeconds) - out.PeriodSeconds = int32(in.PeriodSeconds) - out.SuccessThreshold = int32(in.SuccessThreshold) - out.FailureThreshold = int32(in.FailureThreshold) - return nil -} - -func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *v1.Probe, s conversion.Scope) error { - return autoConvert_api_Probe_To_v1_Probe(in, out, s) -} - -func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.RBDVolumeSource))(in) - } - if in.CephMonitors != nil { - out.CephMonitors = make([]string, len(in.CephMonitors)) - for i := range in.CephMonitors { - out.CephMonitors[i] = in.CephMonitors[i] - } - } else { - out.CephMonitors = nil - } - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - // unable to generate simple pointer conversion for api.LocalObjectReference -> v1.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(v1.LocalObjectReference) - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { - return autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) -} - -func autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ResourceRequirements))(in) - } - if in.Limits != nil { - out.Limits = make(v1.ResourceList) - for key, val := range in.Limits { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Limits[v1.ResourceName(key)] = newVal - } - } else { - out.Limits = nil - } - if in.Requests != nil { - out.Requests = make(v1.ResourceList) - for key, val := range in.Requests { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Requests[v1.ResourceName(key)] = newVal - } - } else { - out.Requests = nil - } - return nil -} - -func Convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { - return autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) -} - -func autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SELinuxOptions))(in) - } - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { - return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) -} - -func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SecretKeySelector))(in) - } - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { - return autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) -} - -func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SecretVolumeSource))(in) - } - out.SecretName = in.SecretName - return nil -} - -func Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { - return autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) -} - -func autoConvert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SecurityContext))(in) - } - // unable to generate simple pointer conversion for api.Capabilities -> v1.Capabilities - if in.Capabilities != nil { - out.Capabilities = new(v1.Capabilities) - if err := Convert_api_Capabilities_To_v1_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { - return err - } - } else { - out.Capabilities = nil - } - if in.Privileged != nil { - out.Privileged = new(bool) - *out.Privileged = *in.Privileged - } else { - out.Privileged = nil - } - // unable to generate simple pointer conversion for api.SELinuxOptions -> v1.SELinuxOptions - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(v1.SELinuxOptions) - if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot - } else { - out.RunAsNonRoot = nil - } - if in.ReadOnlyRootFilesystem != nil { - out.ReadOnlyRootFilesystem = new(bool) - *out.ReadOnlyRootFilesystem = *in.ReadOnlyRootFilesystem - } else { - out.ReadOnlyRootFilesystem = nil - } - return nil -} - -func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { - return autoConvert_api_SecurityContext_To_v1_SecurityContext(in, out, s) -} - -func autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.TCPSocketAction))(in) - } - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { - return err - } - return nil -} - -func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { - return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) -} - -func autoConvert_api_Volume_To_v1_Volume(in *api.Volume, out *v1.Volume, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Volume))(in) - } - out.Name = in.Name - if err := Convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -func Convert_api_Volume_To_v1_Volume(in *api.Volume, out *v1.Volume, s conversion.Scope) error { - return autoConvert_api_Volume_To_v1_Volume(in, out, s) -} - -func autoConvert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.VolumeMount))(in) - } - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - return nil -} - -func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { - return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s) -} - -func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.VolumeSource))(in) - } - // unable to generate simple pointer conversion for api.HostPathVolumeSource -> v1.HostPathVolumeSource - if in.HostPath != nil { - out.HostPath = new(v1.HostPathVolumeSource) - if err := Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in.HostPath, out.HostPath, s); err != nil { - return err - } - } else { - out.HostPath = nil - } - // unable to generate simple pointer conversion for api.EmptyDirVolumeSource -> v1.EmptyDirVolumeSource - if in.EmptyDir != nil { - out.EmptyDir = new(v1.EmptyDirVolumeSource) - if err := Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in.EmptyDir, out.EmptyDir, s); err != nil { - return err - } - } else { - out.EmptyDir = nil - } - // unable to generate simple pointer conversion for api.GCEPersistentDiskVolumeSource -> v1.GCEPersistentDiskVolumeSource - if in.GCEPersistentDisk != nil { - out.GCEPersistentDisk = new(v1.GCEPersistentDiskVolumeSource) - if err := Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in.GCEPersistentDisk, out.GCEPersistentDisk, s); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - // unable to generate simple pointer conversion for api.AWSElasticBlockStoreVolumeSource -> v1.AWSElasticBlockStoreVolumeSource - if in.AWSElasticBlockStore != nil { - out.AWSElasticBlockStore = new(v1.AWSElasticBlockStoreVolumeSource) - if err := Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore, out.AWSElasticBlockStore, s); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - // unable to generate simple pointer conversion for api.GitRepoVolumeSource -> v1.GitRepoVolumeSource - if in.GitRepo != nil { - out.GitRepo = new(v1.GitRepoVolumeSource) - if err := Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in.GitRepo, out.GitRepo, s); err != nil { - return err - } - } else { - out.GitRepo = nil - } - // unable to generate simple pointer conversion for api.SecretVolumeSource -> v1.SecretVolumeSource - if in.Secret != nil { - out.Secret = new(v1.SecretVolumeSource) - if err := Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in.Secret, out.Secret, s); err != nil { - return err - } - } else { - out.Secret = nil - } - // unable to generate simple pointer conversion for api.NFSVolumeSource -> v1.NFSVolumeSource - if in.NFS != nil { - out.NFS = new(v1.NFSVolumeSource) - if err := Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in.NFS, out.NFS, s); err != nil { - return err - } - } else { - out.NFS = nil - } - // unable to generate simple pointer conversion for api.ISCSIVolumeSource -> v1.ISCSIVolumeSource - if in.ISCSI != nil { - out.ISCSI = new(v1.ISCSIVolumeSource) - if err := Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in.ISCSI, out.ISCSI, s); err != nil { - return err - } - } else { - out.ISCSI = nil - } - // unable to generate simple pointer conversion for api.GlusterfsVolumeSource -> v1.GlusterfsVolumeSource - if in.Glusterfs != nil { - out.Glusterfs = new(v1.GlusterfsVolumeSource) - if err := Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in.Glusterfs, out.Glusterfs, s); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - // unable to generate simple pointer conversion for api.PersistentVolumeClaimVolumeSource -> v1.PersistentVolumeClaimVolumeSource - if in.PersistentVolumeClaim != nil { - out.PersistentVolumeClaim = new(v1.PersistentVolumeClaimVolumeSource) - if err := Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in.PersistentVolumeClaim, out.PersistentVolumeClaim, s); err != nil { - return err - } - } else { - out.PersistentVolumeClaim = nil - } - // unable to generate simple pointer conversion for api.RBDVolumeSource -> v1.RBDVolumeSource - if in.RBD != nil { - out.RBD = new(v1.RBDVolumeSource) - if err := Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in.RBD, out.RBD, s); err != nil { - return err - } - } else { - out.RBD = nil - } - // unable to generate simple pointer conversion for api.FlexVolumeSource -> v1.FlexVolumeSource - if in.FlexVolume != nil { - out.FlexVolume = new(v1.FlexVolumeSource) - if err := Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in.FlexVolume, out.FlexVolume, s); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - // unable to generate simple pointer conversion for api.CinderVolumeSource -> v1.CinderVolumeSource - if in.Cinder != nil { - out.Cinder = new(v1.CinderVolumeSource) - if err := Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil { - return err - } - } else { - out.Cinder = nil - } - // unable to generate simple pointer conversion for api.CephFSVolumeSource -> v1.CephFSVolumeSource - if in.CephFS != nil { - out.CephFS = new(v1.CephFSVolumeSource) - if err := Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { - return err - } - } else { - out.CephFS = nil - } - // unable to generate simple pointer conversion for api.FlockerVolumeSource -> v1.FlockerVolumeSource - if in.Flocker != nil { - out.Flocker = new(v1.FlockerVolumeSource) - if err := Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in.Flocker, out.Flocker, s); err != nil { - return err - } - } else { - out.Flocker = nil - } - // unable to generate simple pointer conversion for api.DownwardAPIVolumeSource -> v1.DownwardAPIVolumeSource - if in.DownwardAPI != nil { - out.DownwardAPI = new(v1.DownwardAPIVolumeSource) - if err := Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in.DownwardAPI, out.DownwardAPI, s); err != nil { - return err - } - } else { - out.DownwardAPI = nil - } - // unable to generate simple pointer conversion for api.FCVolumeSource -> v1.FCVolumeSource - if in.FC != nil { - out.FC = new(v1.FCVolumeSource) - if err := Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in.FC, out.FC, s); err != nil { - return err - } - } else { - out.FC = nil - } - // unable to generate simple pointer conversion for api.AzureFileVolumeSource -> v1.AzureFileVolumeSource - if in.AzureFile != nil { - out.AzureFile = new(v1.AzureFileVolumeSource) - if err := Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in.AzureFile, out.AzureFile, s); err != nil { - return err - } - } else { - out.AzureFile = nil - } - // unable to generate simple pointer conversion for api.ConfigMapVolumeSource -> v1.ConfigMapVolumeSource - if in.ConfigMap != nil { - out.ConfigMap = new(v1.ConfigMapVolumeSource) - if err := Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in.ConfigMap, out.ConfigMap, s); err != nil { - return err - } - } else { - out.ConfigMap = nil - } - return nil -} - -func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { - return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s) -} - -func autoConvert_unversioned_LabelSelector_To_v1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*unversioned.LabelSelector))(in) - } - if in.MatchLabels != nil { - out.MatchLabels = make(map[string]string) - for key, val := range in.MatchLabels { - out.MatchLabels[key] = val - } - } else { - out.MatchLabels = nil - } - if in.MatchExpressions != nil { - out.MatchExpressions = make([]LabelSelectorRequirement, len(in.MatchExpressions)) - for i := range in.MatchExpressions { - if err := Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(&in.MatchExpressions[i], &out.MatchExpressions[i], s); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func Convert_unversioned_LabelSelector_To_v1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { - return autoConvert_unversioned_LabelSelector_To_v1_LabelSelector(in, out, s) -} - -func autoConvert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*unversioned.LabelSelectorRequirement))(in) - } - out.Key = in.Key - out.Operator = LabelSelectorOperator(in.Operator) - if in.Values != nil { - out.Values = make([]string, len(in.Values)) - for i := range in.Values { - out.Values[i] = in.Values[i] - } - } else { - out.Values = nil - } - return nil -} - -func Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { - return autoConvert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in, out, s) -} - -func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.AWSElasticBlockStoreVolumeSource))(in) - } - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = int(in.Partition) - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in, out, s) -} - -func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.AzureFileVolumeSource))(in) - } - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_v1_Capabilities_To_api_Capabilities(in *v1.Capabilities, out *api.Capabilities, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.Capabilities))(in) - } - if in.Add != nil { - out.Add = make([]api.Capability, len(in.Add)) - for i := range in.Add { - out.Add[i] = api.Capability(in.Add[i]) - } - } else { - out.Add = nil - } - if in.Drop != nil { - out.Drop = make([]api.Capability, len(in.Drop)) - for i := range in.Drop { - out.Drop[i] = api.Capability(in.Drop[i]) - } - } else { - out.Drop = nil - } - return nil -} - -func Convert_v1_Capabilities_To_api_Capabilities(in *v1.Capabilities, out *api.Capabilities, s conversion.Scope) error { - return autoConvert_v1_Capabilities_To_api_Capabilities(in, out, s) -} - -func autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.CephFSVolumeSource))(in) - } - if in.Monitors != nil { - out.Monitors = make([]string, len(in.Monitors)) - for i := range in.Monitors { - out.Monitors[i] = in.Monitors[i] - } - } else { - out.Monitors = nil - } - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - // unable to generate simple pointer conversion for v1.LocalObjectReference -> api.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(api.LocalObjectReference) - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in, out, s) -} - -func autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *v1.CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.CinderVolumeSource))(in) - } - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *v1.CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in, out, s) -} - -func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ConfigMapKeySelector))(in) - } - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in, out, s) -} - -func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ConfigMapVolumeSource))(in) - } - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]api.KeyToPath, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_KeyToPath_To_api_KeyToPath(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in, out, s) -} - -func autoConvert_v1_Container_To_api_Container(in *v1.Container, out *api.Container, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.Container))(in) - } - out.Name = in.Name - out.Image = in.Image - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - if in.Args != nil { - out.Args = make([]string, len(in.Args)) - for i := range in.Args { - out.Args[i] = in.Args[i] - } - } else { - out.Args = nil - } - out.WorkingDir = in.WorkingDir - if in.Ports != nil { - out.Ports = make([]api.ContainerPort, len(in.Ports)) - for i := range in.Ports { - if err := Convert_v1_ContainerPort_To_api_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Env != nil { - out.Env = make([]api.EnvVar, len(in.Env)) - for i := range in.Env { - if err := Convert_v1_EnvVar_To_api_EnvVar(&in.Env[i], &out.Env[i], s); err != nil { - return err - } - } - } else { - out.Env = nil - } - if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - if in.VolumeMounts != nil { - out.VolumeMounts = make([]api.VolumeMount, len(in.VolumeMounts)) - for i := range in.VolumeMounts { - if err := Convert_v1_VolumeMount_To_api_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil { - return err - } - } - } else { - out.VolumeMounts = nil - } - // unable to generate simple pointer conversion for v1.Probe -> api.Probe - if in.LivenessProbe != nil { - out.LivenessProbe = new(api.Probe) - if err := Convert_v1_Probe_To_api_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil { - return err - } - } else { - out.LivenessProbe = nil - } - // unable to generate simple pointer conversion for v1.Probe -> api.Probe - if in.ReadinessProbe != nil { - out.ReadinessProbe = new(api.Probe) - if err := Convert_v1_Probe_To_api_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil { - return err - } - } else { - out.ReadinessProbe = nil - } - // unable to generate simple pointer conversion for v1.Lifecycle -> api.Lifecycle - if in.Lifecycle != nil { - out.Lifecycle = new(api.Lifecycle) - if err := Convert_v1_Lifecycle_To_api_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil { - return err - } - } else { - out.Lifecycle = nil - } - out.TerminationMessagePath = in.TerminationMessagePath - out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) - // unable to generate simple pointer conversion for v1.SecurityContext -> api.SecurityContext - if in.SecurityContext != nil { - out.SecurityContext = new(api.SecurityContext) - if err := Convert_v1_SecurityContext_To_api_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -func Convert_v1_Container_To_api_Container(in *v1.Container, out *api.Container, s conversion.Scope) error { - return autoConvert_v1_Container_To_api_Container(in, out, s) -} - -func autoConvert_v1_ContainerPort_To_api_ContainerPort(in *v1.ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ContainerPort))(in) - } - out.Name = in.Name - out.HostPort = int(in.HostPort) - out.ContainerPort = int(in.ContainerPort) - out.Protocol = api.Protocol(in.Protocol) - out.HostIP = in.HostIP - return nil -} - -func Convert_v1_ContainerPort_To_api_ContainerPort(in *v1.ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - return autoConvert_v1_ContainerPort_To_api_ContainerPort(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.DownwardAPIVolumeFile))(in) - } - out.Path = in.Path - if err := Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(&in.FieldRef, &out.FieldRef, s); err != nil { - return err - } - return nil -} - -func Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.DownwardAPIVolumeSource))(in) - } - if in.Items != nil { - out.Items = make([]api.DownwardAPIVolumeFile, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.EmptyDirVolumeSource))(in) - } - out.Medium = api.StorageMedium(in.Medium) - return nil -} - -func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in, out, s) -} - -func autoConvert_v1_EnvVar_To_api_EnvVar(in *v1.EnvVar, out *api.EnvVar, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.EnvVar))(in) - } - out.Name = in.Name - out.Value = in.Value - // unable to generate simple pointer conversion for v1.EnvVarSource -> api.EnvVarSource - if in.ValueFrom != nil { - out.ValueFrom = new(api.EnvVarSource) - if err := Convert_v1_EnvVarSource_To_api_EnvVarSource(in.ValueFrom, out.ValueFrom, s); err != nil { - return err - } - } else { - out.ValueFrom = nil - } - return nil -} - -func Convert_v1_EnvVar_To_api_EnvVar(in *v1.EnvVar, out *api.EnvVar, s conversion.Scope) error { - return autoConvert_v1_EnvVar_To_api_EnvVar(in, out, s) -} - -func autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in *v1.EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.EnvVarSource))(in) - } - // unable to generate simple pointer conversion for v1.ObjectFieldSelector -> api.ObjectFieldSelector - if in.FieldRef != nil { - out.FieldRef = new(api.ObjectFieldSelector) - if err := Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in.FieldRef, out.FieldRef, s); err != nil { - return err - } - } else { - out.FieldRef = nil - } - // unable to generate simple pointer conversion for v1.ConfigMapKeySelector -> api.ConfigMapKeySelector - if in.ConfigMapKeyRef != nil { - out.ConfigMapKeyRef = new(api.ConfigMapKeySelector) - if err := Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in.ConfigMapKeyRef, out.ConfigMapKeyRef, s); err != nil { - return err - } - } else { - out.ConfigMapKeyRef = nil - } - // unable to generate simple pointer conversion for v1.SecretKeySelector -> api.SecretKeySelector - if in.SecretKeyRef != nil { - out.SecretKeyRef = new(api.SecretKeySelector) - if err := Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in.SecretKeyRef, out.SecretKeyRef, s); err != nil { - return err - } - } else { - out.SecretKeyRef = nil - } - return nil -} - -func Convert_v1_EnvVarSource_To_api_EnvVarSource(in *v1.EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - return autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in, out, s) -} - -func autoConvert_v1_ExecAction_To_api_ExecAction(in *v1.ExecAction, out *api.ExecAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ExecAction))(in) - } - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - return nil -} - -func Convert_v1_ExecAction_To_api_ExecAction(in *v1.ExecAction, out *api.ExecAction, s conversion.Scope) error { - return autoConvert_v1_ExecAction_To_api_ExecAction(in, out, s) -} - -func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *v1.FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.FCVolumeSource))(in) - } - if in.TargetWWNs != nil { - out.TargetWWNs = make([]string, len(in.TargetWWNs)) - for i := range in.TargetWWNs { - out.TargetWWNs[i] = in.TargetWWNs[i] - } - } else { - out.TargetWWNs = nil - } - if in.Lun != nil { - out.Lun = new(int) - *out.Lun = int(*in.Lun) - } else { - out.Lun = nil - } - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *v1.FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in, out, s) -} - -func autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *v1.FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.FlexVolumeSource))(in) - } - out.Driver = in.Driver - out.FSType = in.FSType - // unable to generate simple pointer conversion for v1.LocalObjectReference -> api.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(api.LocalObjectReference) - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - if in.Options != nil { - out.Options = make(map[string]string) - for key, val := range in.Options { - out.Options[key] = val - } - } else { - out.Options = nil - } - return nil -} - -func Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *v1.FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in, out, s) -} - -func autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.FlockerVolumeSource))(in) - } - out.DatasetName = in.DatasetName - return nil -} - -func Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in, out, s) -} - -func autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.GCEPersistentDiskVolumeSource))(in) - } - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = int(in.Partition) - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.GitRepoVolumeSource))(in) - } - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -func Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.GlusterfsVolumeSource))(in) - } - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in, out, s) -} - -func autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in *v1.HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.HTTPGetAction))(in) - } - out.Path = in.Path - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { - return err - } - out.Host = in.Host - out.Scheme = api.URIScheme(in.Scheme) - if in.HTTPHeaders != nil { - out.HTTPHeaders = make([]api.HTTPHeader, len(in.HTTPHeaders)) - for i := range in.HTTPHeaders { - if err := Convert_v1_HTTPHeader_To_api_HTTPHeader(&in.HTTPHeaders[i], &out.HTTPHeaders[i], s); err != nil { - return err - } - } - } else { - out.HTTPHeaders = nil - } - return nil -} - -func Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *v1.HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - return autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in, out, s) -} - -func autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in *v1.HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.HTTPHeader))(in) - } - out.Name = in.Name - out.Value = in.Value - return nil -} - -func Convert_v1_HTTPHeader_To_api_HTTPHeader(in *v1.HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - return autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in, out, s) -} - -func autoConvert_v1_Handler_To_api_Handler(in *v1.Handler, out *api.Handler, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.Handler))(in) - } - // unable to generate simple pointer conversion for v1.ExecAction -> api.ExecAction - if in.Exec != nil { - out.Exec = new(api.ExecAction) - if err := Convert_v1_ExecAction_To_api_ExecAction(in.Exec, out.Exec, s); err != nil { - return err - } - } else { - out.Exec = nil - } - // unable to generate simple pointer conversion for v1.HTTPGetAction -> api.HTTPGetAction - if in.HTTPGet != nil { - out.HTTPGet = new(api.HTTPGetAction) - if err := Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in.HTTPGet, out.HTTPGet, s); err != nil { - return err - } - } else { - out.HTTPGet = nil - } - // unable to generate simple pointer conversion for v1.TCPSocketAction -> api.TCPSocketAction - if in.TCPSocket != nil { - out.TCPSocket = new(api.TCPSocketAction) - if err := Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in.TCPSocket, out.TCPSocket, s); err != nil { - return err - } - } else { - out.TCPSocket = nil - } - return nil -} - -func Convert_v1_Handler_To_api_Handler(in *v1.Handler, out *api.Handler, s conversion.Scope) error { - return autoConvert_v1_Handler_To_api_Handler(in, out, s) -} - -func autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.HostPathVolumeSource))(in) - } - out.Path = in.Path - return nil -} - -func Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in, out, s) -} - -func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ISCSIVolumeSource))(in) - } - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = int(in.Lun) - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_v1_KeyToPath_To_api_KeyToPath(in *v1.KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.KeyToPath))(in) - } - out.Key = in.Key - out.Path = in.Path - return nil -} - -func Convert_v1_KeyToPath_To_api_KeyToPath(in *v1.KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - return autoConvert_v1_KeyToPath_To_api_KeyToPath(in, out, s) -} - -func autoConvert_v1_Lifecycle_To_api_Lifecycle(in *v1.Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.Lifecycle))(in) - } - // unable to generate simple pointer conversion for v1.Handler -> api.Handler - if in.PostStart != nil { - out.PostStart = new(api.Handler) - if err := Convert_v1_Handler_To_api_Handler(in.PostStart, out.PostStart, s); err != nil { - return err - } - } else { - out.PostStart = nil - } - // unable to generate simple pointer conversion for v1.Handler -> api.Handler - if in.PreStop != nil { - out.PreStop = new(api.Handler) - if err := Convert_v1_Handler_To_api_Handler(in.PreStop, out.PreStop, s); err != nil { - return err - } - } else { - out.PreStop = nil - } - return nil -} - -func Convert_v1_Lifecycle_To_api_Lifecycle(in *v1.Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - return autoConvert_v1_Lifecycle_To_api_Lifecycle(in, out, s) -} - -func autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in *v1.LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.LocalObjectReference))(in) - } - out.Name = in.Name - return nil -} - -func Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *v1.LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - return autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in, out, s) -} - -func autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *v1.NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.NFSVolumeSource))(in) - } - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *v1.NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in, out, s) -} - -func autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ObjectFieldSelector))(in) - } - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in, out, s) -} - -func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *v1.ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ObjectMeta))(in) - } - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.CreationTimestamp, &out.CreationTimestamp, s); err != nil { - return err - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.DeletionTimestamp != nil { - out.DeletionTimestamp = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.DeletionTimestamp, out.DeletionTimestamp, s); err != nil { - return err - } - } else { - out.DeletionTimestamp = nil - } - if in.DeletionGracePeriodSeconds != nil { - out.DeletionGracePeriodSeconds = new(int64) - *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds - } else { - out.DeletionGracePeriodSeconds = nil - } - if in.Labels != nil { - out.Labels = make(map[string]string) - for key, val := range in.Labels { - out.Labels[key] = val - } - } else { - out.Labels = nil - } - if in.Annotations != nil { - out.Annotations = make(map[string]string) - for key, val := range in.Annotations { - out.Annotations[key] = val - } - } else { - out.Annotations = nil - } - return nil -} - -func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *v1.ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.PersistentVolumeClaimVolumeSource))(in) - } - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.PodSpec))(in) - } - if in.Volumes != nil { - out.Volumes = make([]api.Volume, len(in.Volumes)) - for i := range in.Volumes { - if err := Convert_v1_Volume_To_api_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.Containers != nil { - out.Containers = make([]api.Container, len(in.Containers)) - for i := range in.Containers { - if err := Convert_v1_Container_To_api_Container(&in.Containers[i], &out.Containers[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) - if in.TerminationGracePeriodSeconds != nil { - out.TerminationGracePeriodSeconds = new(int64) - *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } - out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) - if in.NodeSelector != nil { - out.NodeSelector = make(map[string]string) - for key, val := range in.NodeSelector { - out.NodeSelector[key] = val - } - } else { - out.NodeSelector = nil - } - out.ServiceAccountName = in.ServiceAccountName - // in.DeprecatedServiceAccount has no peer in out - out.NodeName = in.NodeName - // in.HostNetwork has no peer in out - // in.HostPID has no peer in out - // in.HostIPC has no peer in out - // unable to generate simple pointer conversion for v1.PodSecurityContext -> api.PodSecurityContext - if in.SecurityContext != nil { - if err := s.Convert(&in.SecurityContext, &out.SecurityContext, 0); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]api.LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - -func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.PodTemplateSpec))(in) - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - return autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s) -} - -func autoConvert_v1_Probe_To_api_Probe(in *v1.Probe, out *api.Probe, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.Probe))(in) - } - if err := Convert_v1_Handler_To_api_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = int(in.InitialDelaySeconds) - out.TimeoutSeconds = int(in.TimeoutSeconds) - out.PeriodSeconds = int(in.PeriodSeconds) - out.SuccessThreshold = int(in.SuccessThreshold) - out.FailureThreshold = int(in.FailureThreshold) - return nil -} - -func Convert_v1_Probe_To_api_Probe(in *v1.Probe, out *api.Probe, s conversion.Scope) error { - return autoConvert_v1_Probe_To_api_Probe(in, out, s) -} - -func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *v1.RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.RBDVolumeSource))(in) - } - if in.CephMonitors != nil { - out.CephMonitors = make([]string, len(in.CephMonitors)) - for i := range in.CephMonitors { - out.CephMonitors[i] = in.CephMonitors[i] - } - } else { - out.CephMonitors = nil - } - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - // unable to generate simple pointer conversion for v1.LocalObjectReference -> api.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(api.LocalObjectReference) - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *v1.RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - return autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in, out, s) -} - -func autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in *v1.ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ResourceRequirements))(in) - } - if in.Limits != nil { - out.Limits = make(api.ResourceList) - for key, val := range in.Limits { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Limits[api.ResourceName(key)] = newVal - } - } else { - out.Limits = nil - } - if in.Requests != nil { - out.Requests = make(api.ResourceList) - for key, val := range in.Requests { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Requests[api.ResourceName(key)] = newVal - } - } else { - out.Requests = nil - } - return nil -} - -func Convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *v1.ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - return autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in, out, s) -} - -func autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in *v1.SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.SELinuxOptions))(in) - } - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -func Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in *v1.SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - return autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in, out, s) -} - -func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *v1.SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.SecretKeySelector))(in) - } - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in *v1.SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - return autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in, out, s) -} - -func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *v1.SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.SecretVolumeSource))(in) - } - out.SecretName = in.SecretName - return nil -} - -func Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *v1.SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - return autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in, out, s) -} - -func autoConvert_v1_SecurityContext_To_api_SecurityContext(in *v1.SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.SecurityContext))(in) - } - // unable to generate simple pointer conversion for v1.Capabilities -> api.Capabilities - if in.Capabilities != nil { - out.Capabilities = new(api.Capabilities) - if err := Convert_v1_Capabilities_To_api_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { - return err - } - } else { - out.Capabilities = nil - } - if in.Privileged != nil { - out.Privileged = new(bool) - *out.Privileged = *in.Privileged - } else { - out.Privileged = nil - } - // unable to generate simple pointer conversion for v1.SELinuxOptions -> api.SELinuxOptions - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(api.SELinuxOptions) - if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot - } else { - out.RunAsNonRoot = nil - } - if in.ReadOnlyRootFilesystem != nil { - out.ReadOnlyRootFilesystem = new(bool) - *out.ReadOnlyRootFilesystem = *in.ReadOnlyRootFilesystem - } else { - out.ReadOnlyRootFilesystem = nil - } - return nil -} - -func Convert_v1_SecurityContext_To_api_SecurityContext(in *v1.SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - return autoConvert_v1_SecurityContext_To_api_SecurityContext(in, out, s) -} - -func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *v1.TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.TCPSocketAction))(in) - } - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { - return err - } - return nil -} - -func Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *v1.TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - return autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in, out, s) -} - -func autoConvert_v1_Volume_To_api_Volume(in *v1.Volume, out *api.Volume, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.Volume))(in) - } - out.Name = in.Name - if err := Convert_v1_VolumeSource_To_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Volume_To_api_Volume(in *v1.Volume, out *api.Volume, s conversion.Scope) error { - return autoConvert_v1_Volume_To_api_Volume(in, out, s) -} - -func autoConvert_v1_VolumeMount_To_api_VolumeMount(in *v1.VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.VolumeMount))(in) - } - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - return nil -} - -func Convert_v1_VolumeMount_To_api_VolumeMount(in *v1.VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - return autoConvert_v1_VolumeMount_To_api_VolumeMount(in, out, s) -} - -func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.VolumeSource))(in) - } - // unable to generate simple pointer conversion for v1.HostPathVolumeSource -> api.HostPathVolumeSource - if in.HostPath != nil { - out.HostPath = new(api.HostPathVolumeSource) - if err := Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in.HostPath, out.HostPath, s); err != nil { - return err - } - } else { - out.HostPath = nil - } - // unable to generate simple pointer conversion for v1.EmptyDirVolumeSource -> api.EmptyDirVolumeSource - if in.EmptyDir != nil { - out.EmptyDir = new(api.EmptyDirVolumeSource) - if err := Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in.EmptyDir, out.EmptyDir, s); err != nil { - return err - } - } else { - out.EmptyDir = nil - } - // unable to generate simple pointer conversion for v1.GCEPersistentDiskVolumeSource -> api.GCEPersistentDiskVolumeSource - if in.GCEPersistentDisk != nil { - out.GCEPersistentDisk = new(api.GCEPersistentDiskVolumeSource) - if err := Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in.GCEPersistentDisk, out.GCEPersistentDisk, s); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - // unable to generate simple pointer conversion for v1.AWSElasticBlockStoreVolumeSource -> api.AWSElasticBlockStoreVolumeSource - if in.AWSElasticBlockStore != nil { - out.AWSElasticBlockStore = new(api.AWSElasticBlockStoreVolumeSource) - if err := Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore, out.AWSElasticBlockStore, s); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - // unable to generate simple pointer conversion for v1.GitRepoVolumeSource -> api.GitRepoVolumeSource - if in.GitRepo != nil { - out.GitRepo = new(api.GitRepoVolumeSource) - if err := Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in.GitRepo, out.GitRepo, s); err != nil { - return err - } - } else { - out.GitRepo = nil - } - // unable to generate simple pointer conversion for v1.SecretVolumeSource -> api.SecretVolumeSource - if in.Secret != nil { - out.Secret = new(api.SecretVolumeSource) - if err := Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in.Secret, out.Secret, s); err != nil { - return err - } - } else { - out.Secret = nil - } - // unable to generate simple pointer conversion for v1.NFSVolumeSource -> api.NFSVolumeSource - if in.NFS != nil { - out.NFS = new(api.NFSVolumeSource) - if err := Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in.NFS, out.NFS, s); err != nil { - return err - } - } else { - out.NFS = nil - } - // unable to generate simple pointer conversion for v1.ISCSIVolumeSource -> api.ISCSIVolumeSource - if in.ISCSI != nil { - out.ISCSI = new(api.ISCSIVolumeSource) - if err := Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in.ISCSI, out.ISCSI, s); err != nil { - return err - } - } else { - out.ISCSI = nil - } - // unable to generate simple pointer conversion for v1.GlusterfsVolumeSource -> api.GlusterfsVolumeSource - if in.Glusterfs != nil { - out.Glusterfs = new(api.GlusterfsVolumeSource) - if err := Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in.Glusterfs, out.Glusterfs, s); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - // unable to generate simple pointer conversion for v1.PersistentVolumeClaimVolumeSource -> api.PersistentVolumeClaimVolumeSource - if in.PersistentVolumeClaim != nil { - out.PersistentVolumeClaim = new(api.PersistentVolumeClaimVolumeSource) - if err := Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in.PersistentVolumeClaim, out.PersistentVolumeClaim, s); err != nil { - return err - } - } else { - out.PersistentVolumeClaim = nil - } - // unable to generate simple pointer conversion for v1.RBDVolumeSource -> api.RBDVolumeSource - if in.RBD != nil { - out.RBD = new(api.RBDVolumeSource) - if err := Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in.RBD, out.RBD, s); err != nil { - return err - } - } else { - out.RBD = nil - } - // unable to generate simple pointer conversion for v1.FlexVolumeSource -> api.FlexVolumeSource - if in.FlexVolume != nil { - out.FlexVolume = new(api.FlexVolumeSource) - if err := Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in.FlexVolume, out.FlexVolume, s); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - // unable to generate simple pointer conversion for v1.CinderVolumeSource -> api.CinderVolumeSource - if in.Cinder != nil { - out.Cinder = new(api.CinderVolumeSource) - if err := Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil { - return err - } - } else { - out.Cinder = nil - } - // unable to generate simple pointer conversion for v1.CephFSVolumeSource -> api.CephFSVolumeSource - if in.CephFS != nil { - out.CephFS = new(api.CephFSVolumeSource) - if err := Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { - return err - } - } else { - out.CephFS = nil - } - // unable to generate simple pointer conversion for v1.FlockerVolumeSource -> api.FlockerVolumeSource - if in.Flocker != nil { - out.Flocker = new(api.FlockerVolumeSource) - if err := Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in.Flocker, out.Flocker, s); err != nil { - return err - } - } else { - out.Flocker = nil - } - // unable to generate simple pointer conversion for v1.DownwardAPIVolumeSource -> api.DownwardAPIVolumeSource - if in.DownwardAPI != nil { - out.DownwardAPI = new(api.DownwardAPIVolumeSource) - if err := Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in.DownwardAPI, out.DownwardAPI, s); err != nil { - return err - } - } else { - out.DownwardAPI = nil - } - // unable to generate simple pointer conversion for v1.FCVolumeSource -> api.FCVolumeSource - if in.FC != nil { - out.FC = new(api.FCVolumeSource) - if err := Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in.FC, out.FC, s); err != nil { - return err - } - } else { - out.FC = nil - } - // unable to generate simple pointer conversion for v1.AzureFileVolumeSource -> api.AzureFileVolumeSource - if in.AzureFile != nil { - out.AzureFile = new(api.AzureFileVolumeSource) - if err := Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in.AzureFile, out.AzureFile, s); err != nil { - return err - } - } else { - out.AzureFile = nil - } - // unable to generate simple pointer conversion for v1.ConfigMapVolumeSource -> api.ConfigMapVolumeSource - if in.ConfigMap != nil { - out.ConfigMap = new(api.ConfigMapVolumeSource) - if err := Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in.ConfigMap, out.ConfigMap, s); err != nil { - return err - } - } else { - out.ConfigMap = nil - } - return nil -} - -func Convert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - return autoConvert_v1_VolumeSource_To_api_VolumeSource(in, out, s) -} - -func autoConvert_v1_Job_To_extensions_Job(in *Job, out *extensions.Job, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Job))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_JobSpec_To_extensions_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_JobStatus_To_extensions_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Job_To_extensions_Job(in *Job, out *extensions.Job, s conversion.Scope) error { - return autoConvert_v1_Job_To_extensions_Job(in, out, s) -} - -func autoConvert_v1_JobCondition_To_extensions_JobCondition(in *JobCondition, out *extensions.JobCondition, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*JobCondition))(in) - } - out.Type = extensions.JobConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1_JobCondition_To_extensions_JobCondition(in *JobCondition, out *extensions.JobCondition, s conversion.Scope) error { - return autoConvert_v1_JobCondition_To_extensions_JobCondition(in, out, s) -} - -func autoConvert_v1_JobList_To_extensions_JobList(in *JobList, out *extensions.JobList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*JobList))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]extensions.Job, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_Job_To_extensions_Job(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_JobList_To_extensions_JobList(in *JobList, out *extensions.JobList, s conversion.Scope) error { - return autoConvert_v1_JobList_To_extensions_JobList(in, out, s) -} - -func autoConvert_v1_JobSpec_To_extensions_JobSpec(in *JobSpec, out *extensions.JobSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*JobSpec))(in) - } - if in.Parallelism != nil { - out.Parallelism = new(int) - *out.Parallelism = int(*in.Parallelism) - } else { - out.Parallelism = nil - } - if in.Completions != nil { - out.Completions = new(int) - *out.Completions = int(*in.Completions) - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } - // unable to generate simple pointer conversion for v1.LabelSelector -> unversioned.LabelSelector - if in.Selector != nil { - out.Selector = new(unversioned.LabelSelector) - if err := Convert_v1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if in.ManualSelector != nil { - out.ManualSelector = new(bool) - *out.ManualSelector = *in.ManualSelector - } else { - out.ManualSelector = nil - } - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v1_JobSpec_To_extensions_JobSpec(in *JobSpec, out *extensions.JobSpec, s conversion.Scope) error { - return autoConvert_v1_JobSpec_To_extensions_JobSpec(in, out, s) -} - -func autoConvert_v1_JobStatus_To_extensions_JobStatus(in *JobStatus, out *extensions.JobStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*JobStatus))(in) - } - if in.Conditions != nil { - out.Conditions = make([]extensions.JobCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_v1_JobCondition_To_extensions_JobCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.StartTime != nil { - out.StartTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.StartTime, out.StartTime, s); err != nil { - return err - } - } else { - out.StartTime = nil - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.CompletionTime != nil { - out.CompletionTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.CompletionTime, out.CompletionTime, s); err != nil { - return err - } - } else { - out.CompletionTime = nil - } - out.Active = int(in.Active) - out.Succeeded = int(in.Succeeded) - out.Failed = int(in.Failed) - return nil -} - -func Convert_v1_JobStatus_To_extensions_JobStatus(in *JobStatus, out *extensions.JobStatus, s conversion.Scope) error { - return autoConvert_v1_JobStatus_To_extensions_JobStatus(in, out, s) -} - -func autoConvert_v1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*LabelSelector))(in) - } - if in.MatchLabels != nil { - out.MatchLabels = make(map[string]string) - for key, val := range in.MatchLabels { - out.MatchLabels[key] = val - } - } else { - out.MatchLabels = nil - } - if in.MatchExpressions != nil { - out.MatchExpressions = make([]unversioned.LabelSelectorRequirement, len(in.MatchExpressions)) - for i := range in.MatchExpressions { - if err := Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(&in.MatchExpressions[i], &out.MatchExpressions[i], s); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func Convert_v1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { - return autoConvert_v1_LabelSelector_To_unversioned_LabelSelector(in, out, s) -} - -func autoConvert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*LabelSelectorRequirement))(in) - } - out.Key = in.Key - out.Operator = unversioned.LabelSelectorOperator(in.Operator) - if in.Values != nil { - out.Values = make([]string, len(in.Values)) - for i := range in.Values { - out.Values[i] = in.Values[i] - } - } else { - out.Values = nil - } - return nil -} - -func Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { - return autoConvert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in, out, s) -} - -func autoConvert_extensions_Job_To_v1_Job(in *extensions.Job, out *Job, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.Job))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_extensions_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_JobStatus_To_v1_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_Job_To_v1_Job(in *extensions.Job, out *Job, s conversion.Scope) error { - return autoConvert_extensions_Job_To_v1_Job(in, out, s) -} - -func autoConvert_extensions_JobCondition_To_v1_JobCondition(in *extensions.JobCondition, out *JobCondition, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.JobCondition))(in) - } - out.Type = JobConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_extensions_JobCondition_To_v1_JobCondition(in *extensions.JobCondition, out *JobCondition, s conversion.Scope) error { - return autoConvert_extensions_JobCondition_To_v1_JobCondition(in, out, s) -} - -func autoConvert_extensions_JobList_To_v1_JobList(in *extensions.JobList, out *JobList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.JobList))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]Job, len(in.Items)) - for i := range in.Items { - if err := Convert_extensions_Job_To_v1_Job(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_JobList_To_v1_JobList(in *extensions.JobList, out *JobList, s conversion.Scope) error { - return autoConvert_extensions_JobList_To_v1_JobList(in, out, s) -} - -func autoConvert_extensions_JobSpec_To_v1_JobSpec(in *extensions.JobSpec, out *JobSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.JobSpec))(in) - } - if in.Parallelism != nil { - out.Parallelism = new(int32) - *out.Parallelism = int32(*in.Parallelism) - } else { - out.Parallelism = nil - } - if in.Completions != nil { - out.Completions = new(int32) - *out.Completions = int32(*in.Completions) - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } - // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector - if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v1_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if in.ManualSelector != nil { - out.ManualSelector = new(bool) - *out.ManualSelector = *in.ManualSelector - } else { - out.ManualSelector = nil - } - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_JobSpec_To_v1_JobSpec(in *extensions.JobSpec, out *JobSpec, s conversion.Scope) error { - return autoConvert_extensions_JobSpec_To_v1_JobSpec(in, out, s) -} - -func autoConvert_extensions_JobStatus_To_v1_JobStatus(in *extensions.JobStatus, out *JobStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.JobStatus))(in) - } - if in.Conditions != nil { - out.Conditions = make([]JobCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_extensions_JobCondition_To_v1_JobCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.StartTime != nil { - out.StartTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.StartTime, out.StartTime, s); err != nil { - return err - } - } else { - out.StartTime = nil - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.CompletionTime != nil { - out.CompletionTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.CompletionTime, out.CompletionTime, s); err != nil { - return err - } - } else { - out.CompletionTime = nil - } - out.Active = int32(in.Active) - out.Succeeded = int32(in.Succeeded) - out.Failed = int32(in.Failed) - return nil -} - -func Convert_extensions_JobStatus_To_v1_JobStatus(in *extensions.JobStatus, out *JobStatus, s conversion.Scope) error { - return autoConvert_extensions_JobStatus_To_v1_JobStatus(in, out, s) -} - -func init() { - err := api.Scheme.AddGeneratedConversionFuncs( - autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, - autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, - autoConvert_api_Capabilities_To_v1_Capabilities, - autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, - autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource, - autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, - autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, - autoConvert_api_ContainerPort_To_v1_ContainerPort, - autoConvert_api_Container_To_v1_Container, - autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, - autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, - autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, - autoConvert_api_EnvVarSource_To_v1_EnvVarSource, - autoConvert_api_EnvVar_To_v1_EnvVar, - autoConvert_api_ExecAction_To_v1_ExecAction, - autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource, - autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource, - autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource, - autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, - autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, - autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, - autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction, - autoConvert_api_HTTPHeader_To_v1_HTTPHeader, - autoConvert_api_Handler_To_v1_Handler, - autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource, - autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, - autoConvert_api_KeyToPath_To_v1_KeyToPath, - autoConvert_api_Lifecycle_To_v1_Lifecycle, - autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference, - autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource, - autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, - autoConvert_api_ObjectMeta_To_v1_ObjectMeta, - autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, - autoConvert_api_PodSpec_To_v1_PodSpec, - autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec, - autoConvert_api_Probe_To_v1_Probe, - autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource, - autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements, - autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions, - autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector, - autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource, - autoConvert_api_SecurityContext_To_v1_SecurityContext, - autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction, - autoConvert_api_VolumeMount_To_v1_VolumeMount, - autoConvert_api_VolumeSource_To_v1_VolumeSource, - autoConvert_api_Volume_To_v1_Volume, - autoConvert_extensions_JobCondition_To_v1_JobCondition, - autoConvert_extensions_JobList_To_v1_JobList, - autoConvert_extensions_JobSpec_To_v1_JobSpec, - autoConvert_extensions_JobStatus_To_v1_JobStatus, - autoConvert_extensions_Job_To_v1_Job, - autoConvert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement, - autoConvert_unversioned_LabelSelector_To_v1_LabelSelector, - autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, - autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource, - autoConvert_v1_Capabilities_To_api_Capabilities, - autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, - autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource, - autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector, - autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource, - autoConvert_v1_ContainerPort_To_api_ContainerPort, - autoConvert_v1_Container_To_api_Container, - autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, - autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, - autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, - autoConvert_v1_EnvVarSource_To_api_EnvVarSource, - autoConvert_v1_EnvVar_To_api_EnvVar, - autoConvert_v1_ExecAction_To_api_ExecAction, - autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource, - autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource, - autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource, - autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource, - autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource, - autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, - autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction, - autoConvert_v1_HTTPHeader_To_api_HTTPHeader, - autoConvert_v1_Handler_To_api_Handler, - autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, - autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, - autoConvert_v1_JobCondition_To_extensions_JobCondition, - autoConvert_v1_JobList_To_extensions_JobList, - autoConvert_v1_JobSpec_To_extensions_JobSpec, - autoConvert_v1_JobStatus_To_extensions_JobStatus, - autoConvert_v1_Job_To_extensions_Job, - autoConvert_v1_KeyToPath_To_api_KeyToPath, - autoConvert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement, - autoConvert_v1_LabelSelector_To_unversioned_LabelSelector, - autoConvert_v1_Lifecycle_To_api_Lifecycle, - autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference, - autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource, - autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, - autoConvert_v1_ObjectMeta_To_api_ObjectMeta, - autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource, - autoConvert_v1_PodSpec_To_api_PodSpec, - autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec, - autoConvert_v1_Probe_To_api_Probe, - autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource, - autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements, - autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions, - autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector, - autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource, - autoConvert_v1_SecurityContext_To_api_SecurityContext, - autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction, - autoConvert_v1_VolumeMount_To_api_VolumeMount, - autoConvert_v1_VolumeSource_To_api_VolumeSource, - autoConvert_v1_Volume_To_api_Volume, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } +func Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { + return autoConvert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in, out, s) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go index 3ddbd6d2b7a2..c2a50b4ee181 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,1014 +16,55 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package v1 import ( - time "time" - api "k8s.io/kubernetes/pkg/api" - resource "k8s.io/kubernetes/pkg/api/resource" unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" + api_v1 "k8s.io/kubernetes/pkg/api/v1" conversion "k8s.io/kubernetes/pkg/conversion" - intstr "k8s.io/kubernetes/pkg/util/intstr" - inf "speter.net/go/exp/math/dec/inf" ) -func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c *conversion.Cloner) error { - if in.Amount != nil { - if newVal, err := c.DeepCopy(in.Amount); err != nil { - return err - } else { - out.Amount = newVal.(*inf.Dec) - } - } else { - out.Amount = nil - } - out.Format = in.Format - return nil -} - -func deepCopy_unversioned_ListMeta(in unversioned.ListMeta, out *unversioned.ListMeta, c *conversion.Cloner) error { - out.SelfLink = in.SelfLink - out.ResourceVersion = in.ResourceVersion - return nil -} - -func deepCopy_unversioned_Time(in unversioned.Time, out *unversioned.Time, c *conversion.Cloner) error { - if newVal, err := c.DeepCopy(in.Time); err != nil { - return err - } else { - out.Time = newVal.(time.Time) - } - return nil -} - -func deepCopy_unversioned_TypeMeta(in unversioned.TypeMeta, out *unversioned.TypeMeta, c *conversion.Cloner) error { - out.Kind = in.Kind - out.APIVersion = in.APIVersion - return nil -} - -func deepCopy_v1_AWSElasticBlockStoreVolumeSource(in v1.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_AzureFileVolumeSource(in v1.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, c *conversion.Cloner) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_Capabilities(in v1.Capabilities, out *v1.Capabilities, c *conversion.Cloner) error { - if in.Add != nil { - out.Add = make([]v1.Capability, len(in.Add)) - for i := range in.Add { - out.Add[i] = in.Add[i] - } - } else { - out.Add = nil - } - if in.Drop != nil { - out.Drop = make([]v1.Capability, len(in.Drop)) - for i := range in.Drop { - out.Drop[i] = in.Drop[i] - } - } else { - out.Drop = nil - } - return nil -} - -func deepCopy_v1_CephFSVolumeSource(in v1.CephFSVolumeSource, out *v1.CephFSVolumeSource, c *conversion.Cloner) error { - if in.Monitors != nil { - out.Monitors = make([]string, len(in.Monitors)) - for i := range in.Monitors { - out.Monitors[i] = in.Monitors[i] - } - } else { - out.Monitors = nil - } - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - if in.SecretRef != nil { - out.SecretRef = new(v1.LocalObjectReference) - if err := deepCopy_v1_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_CinderVolumeSource(in v1.CinderVolumeSource, out *v1.CinderVolumeSource, c *conversion.Cloner) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_ConfigMapKeySelector(in v1.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, c *conversion.Cloner) error { - if err := deepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func deepCopy_v1_ConfigMapVolumeSource(in v1.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, c *conversion.Cloner) error { - if err := deepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]v1.KeyToPath, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_KeyToPath(in.Items[i], &out.Items[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func deepCopy_v1_Container(in v1.Container, out *v1.Container, c *conversion.Cloner) error { - out.Name = in.Name - out.Image = in.Image - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - if in.Args != nil { - out.Args = make([]string, len(in.Args)) - for i := range in.Args { - out.Args[i] = in.Args[i] - } - } else { - out.Args = nil - } - out.WorkingDir = in.WorkingDir - if in.Ports != nil { - out.Ports = make([]v1.ContainerPort, len(in.Ports)) - for i := range in.Ports { - if err := deepCopy_v1_ContainerPort(in.Ports[i], &out.Ports[i], c); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Env != nil { - out.Env = make([]v1.EnvVar, len(in.Env)) - for i := range in.Env { - if err := deepCopy_v1_EnvVar(in.Env[i], &out.Env[i], c); err != nil { - return err - } - } - } else { - out.Env = nil - } - if err := deepCopy_v1_ResourceRequirements(in.Resources, &out.Resources, c); err != nil { - return err - } - if in.VolumeMounts != nil { - out.VolumeMounts = make([]v1.VolumeMount, len(in.VolumeMounts)) - for i := range in.VolumeMounts { - if err := deepCopy_v1_VolumeMount(in.VolumeMounts[i], &out.VolumeMounts[i], c); err != nil { - return err - } - } - } else { - out.VolumeMounts = nil - } - if in.LivenessProbe != nil { - out.LivenessProbe = new(v1.Probe) - if err := deepCopy_v1_Probe(*in.LivenessProbe, out.LivenessProbe, c); err != nil { - return err - } - } else { - out.LivenessProbe = nil - } - if in.ReadinessProbe != nil { - out.ReadinessProbe = new(v1.Probe) - if err := deepCopy_v1_Probe(*in.ReadinessProbe, out.ReadinessProbe, c); err != nil { - return err - } - } else { - out.ReadinessProbe = nil - } - if in.Lifecycle != nil { - out.Lifecycle = new(v1.Lifecycle) - if err := deepCopy_v1_Lifecycle(*in.Lifecycle, out.Lifecycle, c); err != nil { - return err - } - } else { - out.Lifecycle = nil - } - out.TerminationMessagePath = in.TerminationMessagePath - out.ImagePullPolicy = in.ImagePullPolicy - if in.SecurityContext != nil { - out.SecurityContext = new(v1.SecurityContext) - if err := deepCopy_v1_SecurityContext(*in.SecurityContext, out.SecurityContext, c); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -func deepCopy_v1_ContainerPort(in v1.ContainerPort, out *v1.ContainerPort, c *conversion.Cloner) error { - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = in.Protocol - out.HostIP = in.HostIP - return nil -} - -func deepCopy_v1_DownwardAPIVolumeFile(in v1.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, c *conversion.Cloner) error { - out.Path = in.Path - if err := deepCopy_v1_ObjectFieldSelector(in.FieldRef, &out.FieldRef, c); err != nil { - return err - } - return nil -} - -func deepCopy_v1_DownwardAPIVolumeSource(in v1.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, c *conversion.Cloner) error { - if in.Items != nil { - out.Items = make([]v1.DownwardAPIVolumeFile, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_DownwardAPIVolumeFile(in.Items[i], &out.Items[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func deepCopy_v1_EmptyDirVolumeSource(in v1.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, c *conversion.Cloner) error { - out.Medium = in.Medium - return nil -} - -func deepCopy_v1_EnvVar(in v1.EnvVar, out *v1.EnvVar, c *conversion.Cloner) error { - out.Name = in.Name - out.Value = in.Value - if in.ValueFrom != nil { - out.ValueFrom = new(v1.EnvVarSource) - if err := deepCopy_v1_EnvVarSource(*in.ValueFrom, out.ValueFrom, c); err != nil { - return err - } - } else { - out.ValueFrom = nil - } - return nil -} - -func deepCopy_v1_EnvVarSource(in v1.EnvVarSource, out *v1.EnvVarSource, c *conversion.Cloner) error { - if in.FieldRef != nil { - out.FieldRef = new(v1.ObjectFieldSelector) - if err := deepCopy_v1_ObjectFieldSelector(*in.FieldRef, out.FieldRef, c); err != nil { - return err - } - } else { - out.FieldRef = nil - } - if in.ConfigMapKeyRef != nil { - out.ConfigMapKeyRef = new(v1.ConfigMapKeySelector) - if err := deepCopy_v1_ConfigMapKeySelector(*in.ConfigMapKeyRef, out.ConfigMapKeyRef, c); err != nil { - return err - } - } else { - out.ConfigMapKeyRef = nil - } - if in.SecretKeyRef != nil { - out.SecretKeyRef = new(v1.SecretKeySelector) - if err := deepCopy_v1_SecretKeySelector(*in.SecretKeyRef, out.SecretKeyRef, c); err != nil { - return err - } - } else { - out.SecretKeyRef = nil - } - return nil -} - -func deepCopy_v1_ExecAction(in v1.ExecAction, out *v1.ExecAction, c *conversion.Cloner) error { - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - return nil -} - -func deepCopy_v1_FCVolumeSource(in v1.FCVolumeSource, out *v1.FCVolumeSource, c *conversion.Cloner) error { - if in.TargetWWNs != nil { - out.TargetWWNs = make([]string, len(in.TargetWWNs)) - for i := range in.TargetWWNs { - out.TargetWWNs[i] = in.TargetWWNs[i] - } - } else { - out.TargetWWNs = nil - } - if in.Lun != nil { - out.Lun = new(int32) - *out.Lun = *in.Lun - } else { - out.Lun = nil - } - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_FlexVolumeSource(in v1.FlexVolumeSource, out *v1.FlexVolumeSource, c *conversion.Cloner) error { - out.Driver = in.Driver - out.FSType = in.FSType - if in.SecretRef != nil { - out.SecretRef = new(v1.LocalObjectReference) - if err := deepCopy_v1_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - if in.Options != nil { - out.Options = make(map[string]string) - for key, val := range in.Options { - out.Options[key] = val - } - } else { - out.Options = nil - } - return nil -} - -func deepCopy_v1_FlockerVolumeSource(in v1.FlockerVolumeSource, out *v1.FlockerVolumeSource, c *conversion.Cloner) error { - out.DatasetName = in.DatasetName - return nil -} - -func deepCopy_v1_GCEPersistentDiskVolumeSource(in v1.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, c *conversion.Cloner) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_GitRepoVolumeSource(in v1.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, c *conversion.Cloner) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -func deepCopy_v1_GlusterfsVolumeSource(in v1.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, c *conversion.Cloner) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_HTTPGetAction(in v1.HTTPGetAction, out *v1.HTTPGetAction, c *conversion.Cloner) error { - out.Path = in.Path - if err := deepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { - return err - } - out.Host = in.Host - out.Scheme = in.Scheme - if in.HTTPHeaders != nil { - out.HTTPHeaders = make([]v1.HTTPHeader, len(in.HTTPHeaders)) - for i := range in.HTTPHeaders { - if err := deepCopy_v1_HTTPHeader(in.HTTPHeaders[i], &out.HTTPHeaders[i], c); err != nil { - return err - } - } - } else { - out.HTTPHeaders = nil - } - return nil -} - -func deepCopy_v1_HTTPHeader(in v1.HTTPHeader, out *v1.HTTPHeader, c *conversion.Cloner) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -func deepCopy_v1_Handler(in v1.Handler, out *v1.Handler, c *conversion.Cloner) error { - if in.Exec != nil { - out.Exec = new(v1.ExecAction) - if err := deepCopy_v1_ExecAction(*in.Exec, out.Exec, c); err != nil { - return err - } - } else { - out.Exec = nil - } - if in.HTTPGet != nil { - out.HTTPGet = new(v1.HTTPGetAction) - if err := deepCopy_v1_HTTPGetAction(*in.HTTPGet, out.HTTPGet, c); err != nil { - return err - } - } else { - out.HTTPGet = nil - } - if in.TCPSocket != nil { - out.TCPSocket = new(v1.TCPSocketAction) - if err := deepCopy_v1_TCPSocketAction(*in.TCPSocket, out.TCPSocket, c); err != nil { - return err - } - } else { - out.TCPSocket = nil - } - return nil -} - -func deepCopy_v1_HostPathVolumeSource(in v1.HostPathVolumeSource, out *v1.HostPathVolumeSource, c *conversion.Cloner) error { - out.Path = in.Path - return nil -} - -func deepCopy_v1_ISCSIVolumeSource(in v1.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, c *conversion.Cloner) error { - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_KeyToPath(in v1.KeyToPath, out *v1.KeyToPath, c *conversion.Cloner) error { - out.Key = in.Key - out.Path = in.Path - return nil -} - -func deepCopy_v1_Lifecycle(in v1.Lifecycle, out *v1.Lifecycle, c *conversion.Cloner) error { - if in.PostStart != nil { - out.PostStart = new(v1.Handler) - if err := deepCopy_v1_Handler(*in.PostStart, out.PostStart, c); err != nil { - return err - } - } else { - out.PostStart = nil - } - if in.PreStop != nil { - out.PreStop = new(v1.Handler) - if err := deepCopy_v1_Handler(*in.PreStop, out.PreStop, c); err != nil { - return err - } - } else { - out.PreStop = nil - } - return nil -} - -func deepCopy_v1_LocalObjectReference(in v1.LocalObjectReference, out *v1.LocalObjectReference, c *conversion.Cloner) error { - out.Name = in.Name - return nil -} - -func deepCopy_v1_NFSVolumeSource(in v1.NFSVolumeSource, out *v1.NFSVolumeSource, c *conversion.Cloner) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_ObjectFieldSelector(in v1.ObjectFieldSelector, out *v1.ObjectFieldSelector, c *conversion.Cloner) error { - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -func deepCopy_v1_ObjectMeta(in v1.ObjectMeta, out *v1.ObjectMeta, c *conversion.Cloner) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := deepCopy_unversioned_Time(in.CreationTimestamp, &out.CreationTimestamp, c); err != nil { - return err - } - if in.DeletionTimestamp != nil { - out.DeletionTimestamp = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.DeletionTimestamp, out.DeletionTimestamp, c); err != nil { - return err - } - } else { - out.DeletionTimestamp = nil - } - if in.DeletionGracePeriodSeconds != nil { - out.DeletionGracePeriodSeconds = new(int64) - *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds - } else { - out.DeletionGracePeriodSeconds = nil - } - if in.Labels != nil { - out.Labels = make(map[string]string) - for key, val := range in.Labels { - out.Labels[key] = val - } - } else { - out.Labels = nil - } - if in.Annotations != nil { - out.Annotations = make(map[string]string) - for key, val := range in.Annotations { - out.Annotations[key] = val - } - } else { - out.Annotations = nil - } - return nil -} - -func deepCopy_v1_PersistentVolumeClaimVolumeSource(in v1.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, c *conversion.Cloner) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_PodSecurityContext(in v1.PodSecurityContext, out *v1.PodSecurityContext, c *conversion.Cloner) error { - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(v1.SELinuxOptions) - if err := deepCopy_v1_SELinuxOptions(*in.SELinuxOptions, out.SELinuxOptions, c); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot - } else { - out.RunAsNonRoot = nil - } - if in.SupplementalGroups != nil { - out.SupplementalGroups = make([]int64, len(in.SupplementalGroups)) - for i := range in.SupplementalGroups { - out.SupplementalGroups[i] = in.SupplementalGroups[i] - } - } else { - out.SupplementalGroups = nil - } - if in.FSGroup != nil { - out.FSGroup = new(int64) - *out.FSGroup = *in.FSGroup - } else { - out.FSGroup = nil - } - return nil -} - -func deepCopy_v1_PodSpec(in v1.PodSpec, out *v1.PodSpec, c *conversion.Cloner) error { - if in.Volumes != nil { - out.Volumes = make([]v1.Volume, len(in.Volumes)) - for i := range in.Volumes { - if err := deepCopy_v1_Volume(in.Volumes[i], &out.Volumes[i], c); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.Containers != nil { - out.Containers = make([]v1.Container, len(in.Containers)) - for i := range in.Containers { - if err := deepCopy_v1_Container(in.Containers[i], &out.Containers[i], c); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = in.RestartPolicy - if in.TerminationGracePeriodSeconds != nil { - out.TerminationGracePeriodSeconds = new(int64) - *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } - out.DNSPolicy = in.DNSPolicy - if in.NodeSelector != nil { - out.NodeSelector = make(map[string]string) - for key, val := range in.NodeSelector { - out.NodeSelector[key] = val - } - } else { - out.NodeSelector = nil - } - out.ServiceAccountName = in.ServiceAccountName - out.DeprecatedServiceAccount = in.DeprecatedServiceAccount - out.NodeName = in.NodeName - out.HostNetwork = in.HostNetwork - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC - if in.SecurityContext != nil { - out.SecurityContext = new(v1.PodSecurityContext) - if err := deepCopy_v1_PodSecurityContext(*in.SecurityContext, out.SecurityContext, c); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]v1.LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := deepCopy_v1_LocalObjectReference(in.ImagePullSecrets[i], &out.ImagePullSecrets[i], c); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - -func deepCopy_v1_PodTemplateSpec(in v1.PodTemplateSpec, out *v1.PodTemplateSpec, c *conversion.Cloner) error { - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := deepCopy_v1_PodSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - return nil -} - -func deepCopy_v1_Probe(in v1.Probe, out *v1.Probe, c *conversion.Cloner) error { - if err := deepCopy_v1_Handler(in.Handler, &out.Handler, c); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -func deepCopy_v1_RBDVolumeSource(in v1.RBDVolumeSource, out *v1.RBDVolumeSource, c *conversion.Cloner) error { - if in.CephMonitors != nil { - out.CephMonitors = make([]string, len(in.CephMonitors)) - for i := range in.CephMonitors { - out.CephMonitors[i] = in.CephMonitors[i] - } - } else { - out.CephMonitors = nil - } - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - if in.SecretRef != nil { - out.SecretRef = new(v1.LocalObjectReference) - if err := deepCopy_v1_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_ResourceRequirements(in v1.ResourceRequirements, out *v1.ResourceRequirements, c *conversion.Cloner) error { - if in.Limits != nil { - out.Limits = make(v1.ResourceList) - for key, val := range in.Limits { - newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - out.Limits[key] = *newVal - } - } else { - out.Limits = nil - } - if in.Requests != nil { - out.Requests = make(v1.ResourceList) - for key, val := range in.Requests { - newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - out.Requests[key] = *newVal - } - } else { - out.Requests = nil - } - return nil -} - -func deepCopy_v1_SELinuxOptions(in v1.SELinuxOptions, out *v1.SELinuxOptions, c *conversion.Cloner) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -func deepCopy_v1_SecretKeySelector(in v1.SecretKeySelector, out *v1.SecretKeySelector, c *conversion.Cloner) error { - if err := deepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func deepCopy_v1_SecretVolumeSource(in v1.SecretVolumeSource, out *v1.SecretVolumeSource, c *conversion.Cloner) error { - out.SecretName = in.SecretName - return nil -} - -func deepCopy_v1_SecurityContext(in v1.SecurityContext, out *v1.SecurityContext, c *conversion.Cloner) error { - if in.Capabilities != nil { - out.Capabilities = new(v1.Capabilities) - if err := deepCopy_v1_Capabilities(*in.Capabilities, out.Capabilities, c); err != nil { - return err - } - } else { - out.Capabilities = nil - } - if in.Privileged != nil { - out.Privileged = new(bool) - *out.Privileged = *in.Privileged - } else { - out.Privileged = nil - } - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(v1.SELinuxOptions) - if err := deepCopy_v1_SELinuxOptions(*in.SELinuxOptions, out.SELinuxOptions, c); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot - } else { - out.RunAsNonRoot = nil - } - if in.ReadOnlyRootFilesystem != nil { - out.ReadOnlyRootFilesystem = new(bool) - *out.ReadOnlyRootFilesystem = *in.ReadOnlyRootFilesystem - } else { - out.ReadOnlyRootFilesystem = nil - } - return nil -} - -func deepCopy_v1_TCPSocketAction(in v1.TCPSocketAction, out *v1.TCPSocketAction, c *conversion.Cloner) error { - if err := deepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { - return err - } - return nil -} - -func deepCopy_v1_Volume(in v1.Volume, out *v1.Volume, c *conversion.Cloner) error { - out.Name = in.Name - if err := deepCopy_v1_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil { - return err - } - return nil -} - -func deepCopy_v1_VolumeMount(in v1.VolumeMount, out *v1.VolumeMount, c *conversion.Cloner) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - return nil -} - -func deepCopy_v1_VolumeSource(in v1.VolumeSource, out *v1.VolumeSource, c *conversion.Cloner) error { - if in.HostPath != nil { - out.HostPath = new(v1.HostPathVolumeSource) - if err := deepCopy_v1_HostPathVolumeSource(*in.HostPath, out.HostPath, c); err != nil { - return err - } - } else { - out.HostPath = nil - } - if in.EmptyDir != nil { - out.EmptyDir = new(v1.EmptyDirVolumeSource) - if err := deepCopy_v1_EmptyDirVolumeSource(*in.EmptyDir, out.EmptyDir, c); err != nil { - return err - } - } else { - out.EmptyDir = nil - } - if in.GCEPersistentDisk != nil { - out.GCEPersistentDisk = new(v1.GCEPersistentDiskVolumeSource) - if err := deepCopy_v1_GCEPersistentDiskVolumeSource(*in.GCEPersistentDisk, out.GCEPersistentDisk, c); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - if in.AWSElasticBlockStore != nil { - out.AWSElasticBlockStore = new(v1.AWSElasticBlockStoreVolumeSource) - if err := deepCopy_v1_AWSElasticBlockStoreVolumeSource(*in.AWSElasticBlockStore, out.AWSElasticBlockStore, c); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - if in.GitRepo != nil { - out.GitRepo = new(v1.GitRepoVolumeSource) - if err := deepCopy_v1_GitRepoVolumeSource(*in.GitRepo, out.GitRepo, c); err != nil { - return err - } - } else { - out.GitRepo = nil - } - if in.Secret != nil { - out.Secret = new(v1.SecretVolumeSource) - if err := deepCopy_v1_SecretVolumeSource(*in.Secret, out.Secret, c); err != nil { - return err - } - } else { - out.Secret = nil - } - if in.NFS != nil { - out.NFS = new(v1.NFSVolumeSource) - if err := deepCopy_v1_NFSVolumeSource(*in.NFS, out.NFS, c); err != nil { - return err - } - } else { - out.NFS = nil - } - if in.ISCSI != nil { - out.ISCSI = new(v1.ISCSIVolumeSource) - if err := deepCopy_v1_ISCSIVolumeSource(*in.ISCSI, out.ISCSI, c); err != nil { - return err - } - } else { - out.ISCSI = nil - } - if in.Glusterfs != nil { - out.Glusterfs = new(v1.GlusterfsVolumeSource) - if err := deepCopy_v1_GlusterfsVolumeSource(*in.Glusterfs, out.Glusterfs, c); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - if in.PersistentVolumeClaim != nil { - out.PersistentVolumeClaim = new(v1.PersistentVolumeClaimVolumeSource) - if err := deepCopy_v1_PersistentVolumeClaimVolumeSource(*in.PersistentVolumeClaim, out.PersistentVolumeClaim, c); err != nil { - return err - } - } else { - out.PersistentVolumeClaim = nil - } - if in.RBD != nil { - out.RBD = new(v1.RBDVolumeSource) - if err := deepCopy_v1_RBDVolumeSource(*in.RBD, out.RBD, c); err != nil { - return err - } - } else { - out.RBD = nil - } - if in.FlexVolume != nil { - out.FlexVolume = new(v1.FlexVolumeSource) - if err := deepCopy_v1_FlexVolumeSource(*in.FlexVolume, out.FlexVolume, c); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - if in.Cinder != nil { - out.Cinder = new(v1.CinderVolumeSource) - if err := deepCopy_v1_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil { - return err - } - } else { - out.Cinder = nil - } - if in.CephFS != nil { - out.CephFS = new(v1.CephFSVolumeSource) - if err := deepCopy_v1_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil { - return err - } - } else { - out.CephFS = nil - } - if in.Flocker != nil { - out.Flocker = new(v1.FlockerVolumeSource) - if err := deepCopy_v1_FlockerVolumeSource(*in.Flocker, out.Flocker, c); err != nil { - return err - } - } else { - out.Flocker = nil - } - if in.DownwardAPI != nil { - out.DownwardAPI = new(v1.DownwardAPIVolumeSource) - if err := deepCopy_v1_DownwardAPIVolumeSource(*in.DownwardAPI, out.DownwardAPI, c); err != nil { - return err - } - } else { - out.DownwardAPI = nil - } - if in.FC != nil { - out.FC = new(v1.FCVolumeSource) - if err := deepCopy_v1_FCVolumeSource(*in.FC, out.FC, c); err != nil { - return err - } - } else { - out.FC = nil - } - if in.AzureFile != nil { - out.AzureFile = new(v1.AzureFileVolumeSource) - if err := deepCopy_v1_AzureFileVolumeSource(*in.AzureFile, out.AzureFile, c); err != nil { - return err - } - } else { - out.AzureFile = nil - } - if in.ConfigMap != nil { - out.ConfigMap = new(v1.ConfigMapVolumeSource) - if err := deepCopy_v1_ConfigMapVolumeSource(*in.ConfigMap, out.ConfigMap, c); err != nil { - return err - } - } else { - out.ConfigMap = nil +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1_Job, + DeepCopy_v1_JobCondition, + DeepCopy_v1_JobList, + DeepCopy_v1_JobSpec, + DeepCopy_v1_JobStatus, + DeepCopy_v1_LabelSelector, + DeepCopy_v1_LabelSelectorRequirement, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) } - return nil } -func deepCopy_v1_Job(in Job, out *Job, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_Job(in Job, out *Job, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := api_v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_JobSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1_JobSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1_JobStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1_JobStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { +func DeepCopy_v1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { out.Type = in.Type out.Status = in.Status - if err := deepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { return err } - if err := deepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { return err } out.Reason = in.Reason @@ -1029,17 +72,18 @@ func deepCopy_v1_JobCondition(in JobCondition, out *JobCondition, c *conversion. return nil } -func deepCopy_v1_JobList(in JobList, out *JobList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1_JobList(in JobList, out *JobList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]Job, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_Job(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]Job, len(in)) + for i := range in { + if err := DeepCopy_v1_Job(in[i], &(*out)[i], c); err != nil { return err } } @@ -1049,50 +93,56 @@ func deepCopy_v1_JobList(in JobList, out *JobList, c *conversion.Cloner) error { return nil } -func deepCopy_v1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { +func DeepCopy_v1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { if in.Parallelism != nil { - out.Parallelism = new(int32) - *out.Parallelism = *in.Parallelism + in, out := in.Parallelism, &out.Parallelism + *out = new(int32) + **out = *in } else { out.Parallelism = nil } if in.Completions != nil { - out.Completions = new(int32) - *out.Completions = *in.Completions + in, out := in.Completions, &out.Completions + *out = new(int32) + **out = *in } else { out.Completions = nil } if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds + in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = *in } else { out.ActiveDeadlineSeconds = nil } if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := deepCopy_v1_LabelSelector(*in.Selector, out.Selector, c); err != nil { + in, out := in.Selector, &out.Selector + *out = new(LabelSelector) + if err := DeepCopy_v1_LabelSelector(*in, *out, c); err != nil { return err } } else { out.Selector = nil } if in.ManualSelector != nil { - out.ManualSelector = new(bool) - *out.ManualSelector = *in.ManualSelector + in, out := in.ManualSelector, &out.ManualSelector + *out = new(bool) + **out = *in } else { out.ManualSelector = nil } - if err := deepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + if err := api_v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { return err } return nil } -func deepCopy_v1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { +func DeepCopy_v1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { if in.Conditions != nil { - out.Conditions = make([]JobCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := deepCopy_v1_JobCondition(in.Conditions[i], &out.Conditions[i], c); err != nil { + in, out := in.Conditions, &out.Conditions + *out = make([]JobCondition, len(in)) + for i := range in { + if err := DeepCopy_v1_JobCondition(in[i], &(*out)[i], c); err != nil { return err } } @@ -1100,16 +150,18 @@ func deepCopy_v1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) e out.Conditions = nil } if in.StartTime != nil { - out.StartTime = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.StartTime, out.StartTime, c); err != nil { + in, out := in.StartTime, &out.StartTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { return err } } else { out.StartTime = nil } if in.CompletionTime != nil { - out.CompletionTime = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.CompletionTime, out.CompletionTime, c); err != nil { + in, out := in.CompletionTime, &out.CompletionTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { return err } } else { @@ -1121,19 +173,21 @@ func deepCopy_v1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) e return nil } -func deepCopy_v1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error { +func DeepCopy_v1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error { if in.MatchLabels != nil { - out.MatchLabels = make(map[string]string) - for key, val := range in.MatchLabels { - out.MatchLabels[key] = val + in, out := in.MatchLabels, &out.MatchLabels + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val } } else { out.MatchLabels = nil } if in.MatchExpressions != nil { - out.MatchExpressions = make([]LabelSelectorRequirement, len(in.MatchExpressions)) - for i := range in.MatchExpressions { - if err := deepCopy_v1_LabelSelectorRequirement(in.MatchExpressions[i], &out.MatchExpressions[i], c); err != nil { + in, out := in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(in)) + for i := range in { + if err := DeepCopy_v1_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil { return err } } @@ -1143,91 +197,15 @@ func deepCopy_v1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversi return nil } -func deepCopy_v1_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error { +func DeepCopy_v1_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error { out.Key = in.Key out.Operator = in.Operator if in.Values != nil { - out.Values = make([]string, len(in.Values)) - for i := range in.Values { - out.Values[i] = in.Values[i] - } + in, out := in.Values, &out.Values + *out = make([]string, len(in)) + copy(*out, in) } else { out.Values = nil } return nil } - -func deepCopy_intstr_IntOrString(in intstr.IntOrString, out *intstr.IntOrString, c *conversion.Cloner) error { - out.Type = in.Type - out.IntVal = in.IntVal - out.StrVal = in.StrVal - return nil -} - -func init() { - err := api.Scheme.AddGeneratedDeepCopyFuncs( - deepCopy_resource_Quantity, - deepCopy_unversioned_ListMeta, - deepCopy_unversioned_Time, - deepCopy_unversioned_TypeMeta, - deepCopy_v1_AWSElasticBlockStoreVolumeSource, - deepCopy_v1_AzureFileVolumeSource, - deepCopy_v1_Capabilities, - deepCopy_v1_CephFSVolumeSource, - deepCopy_v1_CinderVolumeSource, - deepCopy_v1_ConfigMapKeySelector, - deepCopy_v1_ConfigMapVolumeSource, - deepCopy_v1_Container, - deepCopy_v1_ContainerPort, - deepCopy_v1_DownwardAPIVolumeFile, - deepCopy_v1_DownwardAPIVolumeSource, - deepCopy_v1_EmptyDirVolumeSource, - deepCopy_v1_EnvVar, - deepCopy_v1_EnvVarSource, - deepCopy_v1_ExecAction, - deepCopy_v1_FCVolumeSource, - deepCopy_v1_FlexVolumeSource, - deepCopy_v1_FlockerVolumeSource, - deepCopy_v1_GCEPersistentDiskVolumeSource, - deepCopy_v1_GitRepoVolumeSource, - deepCopy_v1_GlusterfsVolumeSource, - deepCopy_v1_HTTPGetAction, - deepCopy_v1_HTTPHeader, - deepCopy_v1_Handler, - deepCopy_v1_HostPathVolumeSource, - deepCopy_v1_ISCSIVolumeSource, - deepCopy_v1_KeyToPath, - deepCopy_v1_Lifecycle, - deepCopy_v1_LocalObjectReference, - deepCopy_v1_NFSVolumeSource, - deepCopy_v1_ObjectFieldSelector, - deepCopy_v1_ObjectMeta, - deepCopy_v1_PersistentVolumeClaimVolumeSource, - deepCopy_v1_PodSecurityContext, - deepCopy_v1_PodSpec, - deepCopy_v1_PodTemplateSpec, - deepCopy_v1_Probe, - deepCopy_v1_RBDVolumeSource, - deepCopy_v1_ResourceRequirements, - deepCopy_v1_SELinuxOptions, - deepCopy_v1_SecretKeySelector, - deepCopy_v1_SecretVolumeSource, - deepCopy_v1_SecurityContext, - deepCopy_v1_TCPSocketAction, - deepCopy_v1_Volume, - deepCopy_v1_VolumeMount, - deepCopy_v1_VolumeSource, - deepCopy_v1_Job, - deepCopy_v1_JobCondition, - deepCopy_v1_JobList, - deepCopy_v1_JobSpec, - deepCopy_v1_JobStatus, - deepCopy_v1_LabelSelector, - deepCopy_v1_LabelSelectorRequirement, - deepCopy_intstr_IntOrString, - ) - if err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go index 759ab0fb6de3..81aa90c1d095 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go @@ -22,19 +22,21 @@ import ( func addDefaultingFuncs(scheme *runtime.Scheme) { scheme.AddDefaultingFuncs( - func(obj *Job) { - // For a non-parallel job, you can leave both `.spec.completions` and - // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. - if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { - obj.Spec.Completions = new(int32) - *obj.Spec.Completions = 1 - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - if obj.Spec.Parallelism == nil { - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - }, + SetDefaults_Job, ) } + +func SetDefaults_Job(obj *Job) { + // For a non-parallel job, you can leave both `.spec.completions` and + // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. + if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { + obj.Spec.Completions = new(int32) + *obj.Spec.Completions = 1 + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } + if obj.Spec.Parallelism == nil { + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go new file mode 100644 index 000000000000..1c67cc3a94c4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go new file mode 100644 index 000000000000..95646919d8c0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go @@ -0,0 +1,1901 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto + + It has these top-level messages: + Job + JobCondition + JobList + JobSpec + JobStatus + LabelSelector + LabelSelectorRequirement +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *Job) Reset() { *m = Job{} } +func (m *Job) String() string { return proto.CompactTextString(m) } +func (*Job) ProtoMessage() {} + +func (m *JobCondition) Reset() { *m = JobCondition{} } +func (m *JobCondition) String() string { return proto.CompactTextString(m) } +func (*JobCondition) ProtoMessage() {} + +func (m *JobList) Reset() { *m = JobList{} } +func (m *JobList) String() string { return proto.CompactTextString(m) } +func (*JobList) ProtoMessage() {} + +func (m *JobSpec) Reset() { *m = JobSpec{} } +func (m *JobSpec) String() string { return proto.CompactTextString(m) } +func (*JobSpec) ProtoMessage() {} + +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (m *JobStatus) String() string { return proto.CompactTextString(m) } +func (*JobStatus) ProtoMessage() {} + +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (m *LabelSelector) String() string { return proto.CompactTextString(m) } +func (*LabelSelector) ProtoMessage() {} + +func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } +func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) } +func (*LabelSelectorRequirement) ProtoMessage() {} + +func init() { + proto.RegisterType((*Job)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.Job") + proto.RegisterType((*JobCondition)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobCondition") + proto.RegisterType((*JobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobList") + proto.RegisterType((*JobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobSpec") + proto.RegisterType((*JobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobStatus") + proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.LabelSelector") + proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.LabelSelectorRequirement") +} +func (m *Job) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Job) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *JobCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) + n4, err := m.LastProbeTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *JobList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *JobSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Parallelism != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Parallelism)) + } + if m.Completions != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n7, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.ManualSelector != nil { + data[i] = 0x28 + i++ + if *m.ManualSelector { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *JobStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.StartTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) + n9, err := m.StartTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.CompletionTime != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size())) + n10, err := m.CompletionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Active)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Succeeded)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Failed)) + return i, nil +} + +func (m *LabelSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k := range m.MatchLabels { + data[i] = 0xa + i++ + v := m.MatchLabels[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Job) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JobSpec) Size() (n int) { + var l int + _ = l + if m.Parallelism != nil { + n += 1 + sovGenerated(uint64(*m.Parallelism)) + } + if m.Completions != nil { + n += 1 + sovGenerated(uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ManualSelector != nil { + n += 2 + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobStatus) Size() (n int) { + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CompletionTime != nil { + l = m.CompletionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Active)) + n += 1 + sovGenerated(uint64(m.Succeeded)) + n += 1 + sovGenerated(uint64(m.Failed)) + return n +} + +func (m *LabelSelector) Size() (n int) { + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k, v := range m.MatchLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Job) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Job: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = JobConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Job{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Parallelism = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Completions = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ManualSelector = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, JobCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletionTime == nil { + m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + m.Active = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Active |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + m.Succeeded = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Succeeded |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + m.Failed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Failed |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.MatchLabels == nil { + m.MatchLabels = make(map[string]string) + } + m.MatchLabels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto new file mode 100644 index 000000000000..5c01754cea3c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto @@ -0,0 +1,177 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.batch.v1; + +import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Job represents the configuration of a single job. +message Job { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional JobSpec spec = 2; + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional JobStatus status = 3; +} + +// JobCondition describes current state of a job. +message JobCondition { + // Type of job condition, Complete or Failed. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + optional string reason = 5; + + // Human readable message indicating details about last transition. + optional string message = 6; +} + +// JobList is a collection of jobs. +message JobList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of Job. + repeated Job items = 2; +} + +// JobSpec describes how the job execution will look like. +message JobSpec { + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + optional int32 parallelism = 1; + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + optional int32 completions = 2; + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + optional int64 activeDeadlineSeconds = 3; + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + optional LabelSelector selector = 4; + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + optional bool manualSelector = 5; + + // Template is the object that describes the pod that will be created when + // executing a job. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; +} + +// JobStatus represents the current state of a Job. +message JobStatus { + // Conditions represent the latest available observations of an object's current state. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + repeated JobCondition conditions = 1; + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; + + // Active is the number of actively running pods. + optional int32 active = 4; + + // Succeeded is the number of pods which reached Phase Succeeded. + optional int32 succeeded = 5; + + // Failed is the number of pods which reached Phase Failed. + optional int32 failed = 6; +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +message LabelSelector { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + map matchLabels = 1; + + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + repeated LabelSelectorRequirement matchExpressions = 2; +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message LabelSelectorRequirement { + // key is the label key that the selector applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + optional string operator = 2; + + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + repeated string values = 3; +} + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/register.go index 49f7376a339e..a8c5e484c17f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/register.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/register.go @@ -20,6 +20,7 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" ) // GroupName is the group name use in this package @@ -41,6 +42,7 @@ func addKnownTypes(scheme *runtime.Scheme) { &JobList{}, &v1.ListOptions{}, ) + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) } func (obj *Job) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go index 2c07db2168e8..58d5f6f54c77 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,10 +29,9 @@ import ( pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" pkg3_types "k8s.io/kubernetes/pkg/types" - pkg6_intstr "k8s.io/kubernetes/pkg/util/intstr" + pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" "reflect" "runtime" - pkg5_inf "speter.net/go/exp/math/dec/inf" time "time" ) @@ -70,10 +69,9 @@ func init() { var v1 pkg1_unversioned.TypeMeta var v2 pkg2_v1.ObjectMeta var v3 pkg3_types.UID - var v4 pkg6_intstr.IntOrString - var v5 pkg5_inf.Dec - var v6 time.Time - _, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6 + var v4 pkg5_intstr.IntOrString + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 } } @@ -2867,7 +2865,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 640) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/types.go index 214ffe6d3d51..47ed77524e1d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/types.go @@ -21,31 +21,33 @@ import ( "k8s.io/kubernetes/pkg/api/v1" ) +// +genclient=true + // Job represents the configuration of a single job. type Job struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec JobSpec `json:"spec,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status JobStatus `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // JobList is a collection of jobs. type JobList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Job. - Items []Job `json:"items"` + Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` } // JobSpec describes how the job execution will look like. @@ -55,25 +57,25 @@ type JobSpec struct { // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md - Parallelism *int32 `json:"parallelism,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` // Completions specifies the desired number of successfully finished pods the // job should be run with. Setting to nil means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md - Completions *int32 `json:"completions,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` // Optional duration in seconds relative to the startTime that the job may be active // before the system tries to terminate it; value must be positive integer - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` // Selector is a label query over pods that should match the pod count. // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors - Selector *LabelSelector `json:"selector,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` // ManualSelector controls generation of pod labels and pod selectors. // Leave `manualSelector` unset unless you are certain what you are doing. @@ -84,40 +86,40 @@ type JobSpec struct { // and other jobs to not function correctly. However, You may see // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // API. - // More info: http://releases.k8s.io/release-1.2/docs/design/selector-generation.md - ManualSelector *bool `json:"manualSelector,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` // Template is the object that describes the pod that will be created when // executing a job. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md - Template v1.PodTemplateSpec `json:"template"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` } // JobStatus represents the current state of a Job. type JobStatus struct { // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md - Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // StartTime represents time when the job was acknowledged by the Job Manager. // It is not guaranteed to be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. - StartTime *unversioned.Time `json:"startTime,omitempty"` + StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` // CompletionTime represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. - CompletionTime *unversioned.Time `json:"completionTime,omitempty"` + CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` // Active is the number of actively running pods. - Active int32 `json:"active,omitempty"` + Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` // Succeeded is the number of pods which reached Phase Succeeded. - Succeeded int32 `json:"succeeded,omitempty"` + Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` // Failed is the number of pods which reached Phase Failed. - Failed int32 `json:"failed,omitempty"` + Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` } type JobConditionType string @@ -133,17 +135,17 @@ const ( // JobCondition describes current state of a job. type JobCondition struct { // Type of job condition, Complete or Failed. - Type JobConditionType `json:"type"` + Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status"` + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` // Last time the condition was checked. - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` + LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` // Last time the condition transit from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty"` + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` // Human readable message indicating details about last transition. - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } // A label selector is a label query over a set of resources. The result of matchLabels and @@ -153,24 +155,24 @@ type LabelSelector struct { // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels // map is equivalent to an element of matchExpressions, whose key field is "key", the // operator is "In", and the values array contains only "value". The requirements are ANDed. - MatchLabels map[string]string `json:"matchLabels,omitempty"` + MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` // matchExpressions is a list of label selector requirements. The requirements are ANDed. - MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty"` + MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` } // A label selector requirement is a selector that contains values, a key, and an operator that // relates the key and values. type LabelSelectorRequirement struct { // key is the label key that the selector applies to. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"` + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` // operator represents a key's relationship to a set of values. // Valid operators ard In, NotIn, Exists and DoesNotExist. - Operator LabelSelectorOperator `json:"operator"` + Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` // values is an array of string values. If the operator is In or NotIn, // the values array must be non-empty. If the operator is Exists or DoesNotExist, // the values array must be empty. This array is replaced during a strategic // merge patch. - Values []string `json:"values,omitempty"` + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` } // A label selector operator is the set of operators that can be used in a selector requirement. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go index 32e278eac4df..3e758b0096a1 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,9 +29,9 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_Job = map[string]string{ "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (Job) SwaggerDoc() map[string]string { @@ -54,7 +54,7 @@ func (JobCondition) SwaggerDoc() map[string]string { var map_JobList = map[string]string{ "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "items": "Items is the list of Job.", } @@ -64,12 +64,12 @@ func (JobList) SwaggerDoc() map[string]string { var map_JobSpec = map[string]string{ "": "JobSpec describes how the job execution will look like.", - "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md", - "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md", + "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", - "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors", - "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/release-1.2/docs/design/selector-generation.md", - "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md", + "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", + "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md", + "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", } func (JobSpec) SwaggerDoc() map[string]string { @@ -78,7 +78,7 @@ func (JobSpec) SwaggerDoc() map[string]string { var map_JobStatus = map[string]string{ "": "JobStatus represents the current state of a Job.", - "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md", + "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", "active": "Active is the number of actively running pods.", diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go new file mode 100644 index 000000000000..4714fda0fb3e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "fmt" + "reflect" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_batch_JobSpec_To_v2alpha1_JobSpec, + Convert_v2alpha1_JobSpec_To_batch_JobSpec, + ) + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } + + // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. + for _, kind := range []string{"Job", "JobTemplate", "ScheduledJob"} { + err = api.Scheme.AddFieldLabelConversionFunc("batch/v2alpha1", kind, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", "status.successful": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + } + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func Convert_batch_JobSpec_To_v2alpha1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*batch.JobSpec))(in) + } + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector + if in.Selector != nil { + out.Selector = new(LabelSelector) + if err := Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in.Selector, out.Selector, s); err != nil { + return err + } + } else { + out.Selector = nil + } + if in.ManualSelector != nil { + out.ManualSelector = new(bool) + *out.ManualSelector = *in.ManualSelector + } else { + out.ManualSelector = nil + } + + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*JobSpec))(in) + } + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector + if in.Selector != nil { + out.Selector = new(unversioned.LabelSelector) + if err := Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { + return err + } + } else { + out.Selector = nil + } + if in.ManualSelector != nil { + out.ManualSelector = new(bool) + *out.ManualSelector = *in.ManualSelector + } else { + out.ManualSelector = nil + } + + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go new file mode 100644 index 000000000000..c411875e37f4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go @@ -0,0 +1,573 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v2alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + batch "k8s.io/kubernetes/pkg/apis/batch" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v2alpha1_Job_To_batch_Job, + Convert_batch_Job_To_v2alpha1_Job, + Convert_v2alpha1_JobCondition_To_batch_JobCondition, + Convert_batch_JobCondition_To_v2alpha1_JobCondition, + Convert_v2alpha1_JobList_To_batch_JobList, + Convert_batch_JobList_To_v2alpha1_JobList, + Convert_v2alpha1_JobSpec_To_batch_JobSpec, + Convert_batch_JobSpec_To_v2alpha1_JobSpec, + Convert_v2alpha1_JobStatus_To_batch_JobStatus, + Convert_batch_JobStatus_To_v2alpha1_JobStatus, + Convert_v2alpha1_JobTemplate_To_batch_JobTemplate, + Convert_batch_JobTemplate_To_v2alpha1_JobTemplate, + Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec, + Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec, + Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector, + Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector, + Convert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement, + Convert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement, + Convert_v2alpha1_ScheduledJob_To_batch_ScheduledJob, + Convert_batch_ScheduledJob_To_v2alpha1_ScheduledJob, + Convert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList, + Convert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList, + Convert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec, + Convert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec, + Convert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus, + Convert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v2alpha1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + SetDefaults_Job(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v2alpha1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v2alpha1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + return autoConvert_v2alpha1_Job_To_batch_Job(in, out, s) +} + +func autoConvert_batch_Job_To_v2alpha1_Job(in *batch.Job, out *Job, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_batch_JobSpec_To_v2alpha1_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_batch_JobStatus_To_v2alpha1_JobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_batch_Job_To_v2alpha1_Job(in *batch.Job, out *Job, s conversion.Scope) error { + return autoConvert_batch_Job_To_v2alpha1_Job(in, out, s) +} + +func autoConvert_v2alpha1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + out.Type = batch.JobConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v2alpha1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + return autoConvert_v2alpha1_JobCondition_To_batch_JobCondition(in, out, s) +} + +func autoConvert_batch_JobCondition_To_v2alpha1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + out.Type = JobConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_batch_JobCondition_To_v2alpha1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + return autoConvert_batch_JobCondition_To_v2alpha1_JobCondition(in, out, s) +} + +func autoConvert_v2alpha1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]batch.Job, len(*in)) + for i := range *in { + if err := Convert_v2alpha1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v2alpha1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { + return autoConvert_v2alpha1_JobList_To_batch_JobList(in, out, s) +} + +func autoConvert_batch_JobList_To_v2alpha1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + if err := Convert_batch_Job_To_v2alpha1_Job(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_batch_JobList_To_v2alpha1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + return autoConvert_batch_JobList_To_v2alpha1_JobList(in, out, s) +} + +func autoConvert_v2alpha1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil { + return err + } + } else { + out.Selector = nil + } + out.ManualSelector = in.ManualSelector + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_batch_JobSpec_To_v2alpha1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(LabelSelector) + if err := Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(*in, *out, s); err != nil { + return err + } + } else { + out.Selector = nil + } + out.ManualSelector = in.ManualSelector + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_v2alpha1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]batch.JobCondition, len(*in)) + for i := range *in { + if err := Convert_v2alpha1_JobCondition_To_batch_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + out.StartTime = in.StartTime + out.CompletionTime = in.CompletionTime + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func Convert_v2alpha1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_JobStatus_To_batch_JobStatus(in, out, s) +} + +func autoConvert_batch_JobStatus_To_v2alpha1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]JobCondition, len(*in)) + for i := range *in { + if err := Convert_batch_JobCondition_To_v2alpha1_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + out.StartTime = in.StartTime + out.CompletionTime = in.CompletionTime + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func Convert_batch_JobStatus_To_v2alpha1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + return autoConvert_batch_JobStatus_To_v2alpha1_JobStatus(in, out, s) +} + +func autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error { + return autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in, out, s) +} + +func autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error { + return autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in, out, s) +} + +func autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v2alpha1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in, out, s) +} + +func autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_batch_JobSpec_To_v2alpha1_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error { + return autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in, out, s) +} + +func autoConvert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { + out.MatchLabels = in.MatchLabels + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]unversioned.LabelSelectorRequirement, len(*in)) + for i := range *in { + if err := Convert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.MatchExpressions = nil + } + return nil +} + +func Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { + return autoConvert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in, out, s) +} + +func autoConvert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { + out.MatchLabels = in.MatchLabels + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(*in)) + for i := range *in { + if err := Convert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.MatchExpressions = nil + } + return nil +} + +func Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { + return autoConvert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in, out, s) +} + +func autoConvert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = unversioned.LabelSelectorOperator(in.Operator) + out.Values = in.Values + return nil +} + +func Convert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { + return autoConvert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in, out, s) +} + +func autoConvert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = LabelSelectorOperator(in.Operator) + out.Values = in.Values + return nil +} + +func Convert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { + return autoConvert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(in, out, s) +} + +func autoConvert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(in *ScheduledJob, out *batch.ScheduledJob, s conversion.Scope) error { + SetDefaults_ScheduledJob(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(in *ScheduledJob, out *batch.ScheduledJob, s conversion.Scope) error { + return autoConvert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(in, out, s) +} + +func autoConvert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(in *batch.ScheduledJob, out *ScheduledJob, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(in *batch.ScheduledJob, out *ScheduledJob, s conversion.Scope) error { + return autoConvert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(in, out, s) +} + +func autoConvert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList(in *ScheduledJobList, out *batch.ScheduledJobList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]batch.ScheduledJob, len(*in)) + for i := range *in { + if err := Convert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList(in *ScheduledJobList, out *batch.ScheduledJobList, s conversion.Scope) error { + return autoConvert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList(in, out, s) +} + +func autoConvert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList(in *batch.ScheduledJobList, out *ScheduledJobList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScheduledJob, len(*in)) + for i := range *in { + if err := Convert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList(in *batch.ScheduledJobList, out *ScheduledJobList, s conversion.Scope) error { + return autoConvert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList(in, out, s) +} + +func autoConvert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(in *ScheduledJobSpec, out *batch.ScheduledJobSpec, s conversion.Scope) error { + out.Schedule = in.Schedule + out.StartingDeadlineSeconds = in.StartingDeadlineSeconds + out.ConcurrencyPolicy = batch.ConcurrencyPolicy(in.ConcurrencyPolicy) + out.Suspend = in.Suspend + if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(in *ScheduledJobSpec, out *batch.ScheduledJobSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(in, out, s) +} + +func autoConvert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(in *batch.ScheduledJobSpec, out *ScheduledJobSpec, s conversion.Scope) error { + out.Schedule = in.Schedule + out.StartingDeadlineSeconds = in.StartingDeadlineSeconds + out.ConcurrencyPolicy = ConcurrencyPolicy(in.ConcurrencyPolicy) + out.Suspend = in.Suspend + if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { + return err + } + return nil +} + +func Convert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(in *batch.ScheduledJobSpec, out *ScheduledJobSpec, s conversion.Scope) error { + return autoConvert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(in, out, s) +} + +func autoConvert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(in *ScheduledJobStatus, out *batch.ScheduledJobStatus, s conversion.Scope) error { + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = make([]api.ObjectReference, len(*in)) + for i := range *in { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.Active = nil + } + out.LastScheduleTime = in.LastScheduleTime + return nil +} + +func Convert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(in *ScheduledJobStatus, out *batch.ScheduledJobStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(in, out, s) +} + +func autoConvert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(in *batch.ScheduledJobStatus, out *ScheduledJobStatus, s conversion.Scope) error { + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = make([]v1.ObjectReference, len(*in)) + for i := range *in { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.Active = nil + } + out.LastScheduleTime = in.LastScheduleTime + return nil +} + +func Convert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(in *batch.ScheduledJobStatus, out *ScheduledJobStatus, s conversion.Scope) error { + return autoConvert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(in, out, s) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go new file mode 100644 index 000000000000..8e0eb343a5e1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go @@ -0,0 +1,318 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v2alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v2alpha1_Job, + DeepCopy_v2alpha1_JobCondition, + DeepCopy_v2alpha1_JobList, + DeepCopy_v2alpha1_JobSpec, + DeepCopy_v2alpha1_JobStatus, + DeepCopy_v2alpha1_JobTemplate, + DeepCopy_v2alpha1_JobTemplateSpec, + DeepCopy_v2alpha1_LabelSelector, + DeepCopy_v2alpha1_LabelSelectorRequirement, + DeepCopy_v2alpha1_ScheduledJob, + DeepCopy_v2alpha1_ScheduledJobList, + DeepCopy_v2alpha1_ScheduledJobSpec, + DeepCopy_v2alpha1_ScheduledJobStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v2alpha1_Job(in Job, out *Job, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_JobSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_JobStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v2alpha1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { + out.Type = in.Type + out.Status = in.Status + if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func DeepCopy_v2alpha1_JobList(in JobList, out *JobList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Job, len(in)) + for i := range in { + if err := DeepCopy_v2alpha1_Job(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v2alpha1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { + if in.Parallelism != nil { + in, out := in.Parallelism, &out.Parallelism + *out = new(int32) + **out = *in + } else { + out.Parallelism = nil + } + if in.Completions != nil { + in, out := in.Completions, &out.Completions + *out = new(int32) + **out = *in + } else { + out.Completions = nil + } + if in.ActiveDeadlineSeconds != nil { + in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = *in + } else { + out.ActiveDeadlineSeconds = nil + } + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(LabelSelector) + if err := DeepCopy_v2alpha1_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + if in.ManualSelector != nil { + in, out := in.ManualSelector, &out.ManualSelector + *out = new(bool) + **out = *in + } else { + out.ManualSelector = nil + } + if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v2alpha1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { + if in.Conditions != nil { + in, out := in.Conditions, &out.Conditions + *out = make([]JobCondition, len(in)) + for i := range in { + if err := DeepCopy_v2alpha1_JobCondition(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if in.StartTime != nil { + in, out := in.StartTime, &out.StartTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { + return err + } + } else { + out.StartTime = nil + } + if in.CompletionTime != nil { + in, out := in.CompletionTime, &out.CompletionTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { + return err + } + } else { + out.CompletionTime = nil + } + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func DeepCopy_v2alpha1_JobTemplate(in JobTemplate, out *JobTemplate, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_JobTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v2alpha1_JobTemplateSpec(in JobTemplateSpec, out *JobTemplateSpec, c *conversion.Cloner) error { + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_JobSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v2alpha1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error { + if in.MatchLabels != nil { + in, out := in.MatchLabels, &out.MatchLabels + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val + } + } else { + out.MatchLabels = nil + } + if in.MatchExpressions != nil { + in, out := in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(in)) + for i := range in { + if err := DeepCopy_v2alpha1_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.MatchExpressions = nil + } + return nil +} + +func DeepCopy_v2alpha1_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error { + out.Key = in.Key + out.Operator = in.Operator + if in.Values != nil { + in, out := in.Values, &out.Values + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Values = nil + } + return nil +} + +func DeepCopy_v2alpha1_ScheduledJob(in ScheduledJob, out *ScheduledJob, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_ScheduledJobSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_ScheduledJobStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v2alpha1_ScheduledJobList(in ScheduledJobList, out *ScheduledJobList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ScheduledJob, len(in)) + for i := range in { + if err := DeepCopy_v2alpha1_ScheduledJob(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v2alpha1_ScheduledJobSpec(in ScheduledJobSpec, out *ScheduledJobSpec, c *conversion.Cloner) error { + out.Schedule = in.Schedule + if in.StartingDeadlineSeconds != nil { + in, out := in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds + *out = new(int64) + **out = *in + } else { + out.StartingDeadlineSeconds = nil + } + out.ConcurrencyPolicy = in.ConcurrencyPolicy + out.Suspend = in.Suspend + if err := DeepCopy_v2alpha1_JobTemplateSpec(in.JobTemplate, &out.JobTemplate, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v2alpha1_ScheduledJobStatus(in ScheduledJobStatus, out *ScheduledJobStatus, c *conversion.Cloner) error { + if in.Active != nil { + in, out := in.Active, &out.Active + *out = make([]v1.ObjectReference, len(in)) + for i := range in { + if err := v1.DeepCopy_v1_ObjectReference(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Active = nil + } + if in.LastScheduleTime != nil { + in, out := in.LastScheduleTime, &out.LastScheduleTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { + return err + } + } else { + out.LastScheduleTime = nil + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go new file mode 100644 index 000000000000..72da797c77f7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { + scheme.AddDefaultingFuncs( + SetDefaults_Job, + SetDefaults_ScheduledJob, + ) +} + +func SetDefaults_Job(obj *Job) { + // For a non-parallel job, you can leave both `.spec.completions` and + // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. + if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { + obj.Spec.Completions = new(int32) + *obj.Spec.Completions = 1 + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } + if obj.Spec.Parallelism == nil { + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } +} + +func SetDefaults_ScheduledJob(obj *ScheduledJob) { + if obj.Spec.ConcurrencyPolicy == "" { + obj.Spec.ConcurrencyPolicy = AllowConcurrent + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go new file mode 100644 index 000000000000..0e6b67b5894e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v2alpha1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go new file mode 100644 index 000000000000..a642fa691c7a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go @@ -0,0 +1,3013 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v2alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto + + It has these top-level messages: + Job + JobCondition + JobList + JobSpec + JobStatus + JobTemplate + JobTemplateSpec + LabelSelector + LabelSelectorRequirement + ScheduledJob + ScheduledJobList + ScheduledJobSpec + ScheduledJobStatus +*/ +package v2alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *Job) Reset() { *m = Job{} } +func (m *Job) String() string { return proto.CompactTextString(m) } +func (*Job) ProtoMessage() {} + +func (m *JobCondition) Reset() { *m = JobCondition{} } +func (m *JobCondition) String() string { return proto.CompactTextString(m) } +func (*JobCondition) ProtoMessage() {} + +func (m *JobList) Reset() { *m = JobList{} } +func (m *JobList) String() string { return proto.CompactTextString(m) } +func (*JobList) ProtoMessage() {} + +func (m *JobSpec) Reset() { *m = JobSpec{} } +func (m *JobSpec) String() string { return proto.CompactTextString(m) } +func (*JobSpec) ProtoMessage() {} + +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (m *JobStatus) String() string { return proto.CompactTextString(m) } +func (*JobStatus) ProtoMessage() {} + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (m *JobTemplate) String() string { return proto.CompactTextString(m) } +func (*JobTemplate) ProtoMessage() {} + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (m *JobTemplateSpec) String() string { return proto.CompactTextString(m) } +func (*JobTemplateSpec) ProtoMessage() {} + +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (m *LabelSelector) String() string { return proto.CompactTextString(m) } +func (*LabelSelector) ProtoMessage() {} + +func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } +func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) } +func (*LabelSelectorRequirement) ProtoMessage() {} + +func (m *ScheduledJob) Reset() { *m = ScheduledJob{} } +func (m *ScheduledJob) String() string { return proto.CompactTextString(m) } +func (*ScheduledJob) ProtoMessage() {} + +func (m *ScheduledJobList) Reset() { *m = ScheduledJobList{} } +func (m *ScheduledJobList) String() string { return proto.CompactTextString(m) } +func (*ScheduledJobList) ProtoMessage() {} + +func (m *ScheduledJobSpec) Reset() { *m = ScheduledJobSpec{} } +func (m *ScheduledJobSpec) String() string { return proto.CompactTextString(m) } +func (*ScheduledJobSpec) ProtoMessage() {} + +func (m *ScheduledJobStatus) Reset() { *m = ScheduledJobStatus{} } +func (m *ScheduledJobStatus) String() string { return proto.CompactTextString(m) } +func (*ScheduledJobStatus) ProtoMessage() {} + +func init() { + proto.RegisterType((*Job)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.Job") + proto.RegisterType((*JobCondition)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobCondition") + proto.RegisterType((*JobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobList") + proto.RegisterType((*JobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobSpec") + proto.RegisterType((*JobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobStatus") + proto.RegisterType((*JobTemplate)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobTemplate") + proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobTemplateSpec") + proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.LabelSelector") + proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.LabelSelectorRequirement") + proto.RegisterType((*ScheduledJob)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJob") + proto.RegisterType((*ScheduledJobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJobList") + proto.RegisterType((*ScheduledJobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJobSpec") + proto.RegisterType((*ScheduledJobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJobStatus") +} +func (m *Job) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Job) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *JobCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) + n4, err := m.LastProbeTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *JobList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *JobSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Parallelism != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Parallelism)) + } + if m.Completions != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n7, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.ManualSelector != nil { + data[i] = 0x28 + i++ + if *m.ManualSelector { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *JobStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.StartTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) + n9, err := m.StartTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.CompletionTime != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size())) + n10, err := m.CompletionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Active)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Succeeded)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Failed)) + return i, nil +} + +func (m *JobTemplate) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobTemplate) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n11, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n12, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + return i, nil +} + +func (m *JobTemplateSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobTemplateSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n13, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n14, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil +} + +func (m *LabelSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k := range m.MatchLabels { + data[i] = 0xa + i++ + v := m.MatchLabels[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ScheduledJob) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScheduledJob) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n15, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n16, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n17, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *ScheduledJobList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScheduledJobList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n18, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ScheduledJobSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScheduledJobSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Schedule))) + i += copy(data[i:], m.Schedule) + if m.StartingDeadlineSeconds != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.StartingDeadlineSeconds)) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ConcurrencyPolicy))) + i += copy(data[i:], m.ConcurrencyPolicy) + data[i] = 0x20 + i++ + if m.Suspend { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.JobTemplate.Size())) + n19, err := m.JobTemplate.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + return i, nil +} + +func (m *ScheduledJobStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScheduledJobStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Active) > 0 { + for _, msg := range m.Active { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.LastScheduleTime != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastScheduleTime.Size())) + n20, err := m.LastScheduleTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Job) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JobSpec) Size() (n int) { + var l int + _ = l + if m.Parallelism != nil { + n += 1 + sovGenerated(uint64(*m.Parallelism)) + } + if m.Completions != nil { + n += 1 + sovGenerated(uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ManualSelector != nil { + n += 2 + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobStatus) Size() (n int) { + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CompletionTime != nil { + l = m.CompletionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Active)) + n += 1 + sovGenerated(uint64(m.Succeeded)) + n += 1 + sovGenerated(uint64(m.Failed)) + return n +} + +func (m *JobTemplate) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobTemplateSpec) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LabelSelector) Size() (n int) { + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k, v := range m.MatchLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ScheduledJob) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScheduledJobList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ScheduledJobSpec) Size() (n int) { + var l int + _ = l + l = len(m.Schedule) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartingDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) + } + l = len(m.ConcurrencyPolicy) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = m.JobTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScheduledJobStatus) Size() (n int) { + var l int + _ = l + if len(m.Active) > 0 { + for _, e := range m.Active { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LastScheduleTime != nil { + l = m.LastScheduleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Job) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Job: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = JobConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Job{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Parallelism = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Completions = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ManualSelector = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, JobCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletionTime == nil { + m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + m.Active = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Active |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + m.Succeeded = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Succeeded |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + m.Failed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Failed |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplate) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplateSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.MatchLabels == nil { + m.MatchLabels = make(map[string]string) + } + m.MatchLabels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScheduledJob) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScheduledJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScheduledJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScheduledJobList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScheduledJobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScheduledJobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ScheduledJob{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScheduledJobSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScheduledJobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScheduledJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedule = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StartingDeadlineSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConcurrencyPolicy = ConcurrencyPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Suspend = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.JobTemplate.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScheduledJobStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScheduledJobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScheduledJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Active = append(m.Active, k8s_io_kubernetes_pkg_api_v1.ObjectReference{}) + if err := m.Active[len(m.Active)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScheduleTime == nil { + m.LastScheduleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.LastScheduleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto new file mode 100644 index 000000000000..816045a732c3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto @@ -0,0 +1,255 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.batch.v2alpha1; + +import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/runtime/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v2alpha1"; + +// Job represents the configuration of a single job. +message Job { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional JobSpec spec = 2; + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional JobStatus status = 3; +} + +// JobCondition describes current state of a job. +message JobCondition { + // Type of job condition, Complete or Failed. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + optional string reason = 5; + + // Human readable message indicating details about last transition. + optional string message = 6; +} + +// JobList is a collection of jobs. +message JobList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of Job. + repeated Job items = 2; +} + +// JobSpec describes how the job execution will look like. +message JobSpec { + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + optional int32 parallelism = 1; + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + optional int32 completions = 2; + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + optional int64 activeDeadlineSeconds = 3; + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + optional LabelSelector selector = 4; + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + optional bool manualSelector = 5; + + // Template is the object that describes the pod that will be created when + // executing a job. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; +} + +// JobStatus represents the current state of a Job. +message JobStatus { + // Conditions represent the latest available observations of an object's current state. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + repeated JobCondition conditions = 1; + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; + + // Active is the number of actively running pods. + optional int32 active = 4; + + // Succeeded is the number of pods which reached Phase Succeeded. + optional int32 succeeded = 5; + + // Failed is the number of pods which reached Phase Failed. + optional int32 failed = 6; +} + +// JobTemplate describes a template for creating copies of a predefined pod. +message JobTemplate { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Template defines jobs that will be created from this template + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional JobTemplateSpec template = 2; +} + +// JobTemplateSpec describes the data a Job should have when created from a template +message JobTemplateSpec { + // Standard object's metadata of the jobs created from this template. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional JobSpec spec = 2; +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +message LabelSelector { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + map matchLabels = 1; + + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + repeated LabelSelectorRequirement matchExpressions = 2; +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message LabelSelectorRequirement { + // key is the label key that the selector applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + optional string operator = 2; + + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + repeated string values = 3; +} + +// ScheduledJob represents the configuration of a single scheduled job. +message ScheduledJob { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec is a structure defining the expected behavior of a job, including the schedule. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional ScheduledJobSpec spec = 2; + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional ScheduledJobStatus status = 3; +} + +// ScheduledJobList is a collection of scheduled jobs. +message ScheduledJobList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of ScheduledJob. + repeated ScheduledJob items = 2; +} + +// ScheduledJobSpec describes how the job execution will look like and when it will actually run. +message ScheduledJobSpec { + // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + optional string schedule = 1; + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + optional int64 startingDeadlineSeconds = 2; + + // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + optional string concurrencyPolicy = 3; + + // Suspend flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + optional bool suspend = 4; + + // JobTemplate is the object that describes the job that will be created when + // executing a ScheduledJob. + optional JobTemplateSpec jobTemplate = 5; +} + +// ScheduledJobStatus represents the current state of a Job. +message ScheduledJobStatus { + // Active holds pointers to currently running jobs. + repeated k8s.io.kubernetes.pkg.api.v1.ObjectReference active = 1; + + // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScheduleTime = 4; +} + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go new file mode 100644 index 000000000000..3d9dcb83f6cc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" +) + +// GroupName is the group name use in this package +const GroupName = "batch" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v2alpha1"} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) + addDefaultingFuncs(scheme) + addConversionFuncs(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &Job{}, + &JobList{}, + &JobTemplate{}, + &ScheduledJob{}, + &ScheduledJobList{}, + &v1.ListOptions{}, + ) + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) +} + +func (obj *Job) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *JobList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *JobTemplate) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ScheduledJob) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ScheduledJobList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go new file mode 100644 index 000000000000..7b64c81d2488 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go @@ -0,0 +1,5280 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v2alpha1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg4_resource "k8s.io/kubernetes/pkg/api/resource" + pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" + pkg3_types "k8s.io/kubernetes/pkg/types" + pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg4_resource.Quantity + var v1 pkg1_unversioned.TypeMeta + var v2 pkg2_v1.ObjectMeta + var v3 pkg3_types.UID + var v4 pkg5_intstr.IntOrString + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + } +} + +func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy14 := &x.Status + yy14.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy16 := &x.Status + yy16.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = JobSpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = JobStatus{} + } else { + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = JobSpec{} + } else { + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = JobStatus{} + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceJob(([]Job)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceJob(([]Job)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceJob((*[]Job)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceJob((*[]Job)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobTemplate) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Template + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Template + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobTemplate) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "template": + if r.TryDecodeAsNil() { + x.Template = JobTemplateSpec{} + } else { + yyv5 := &x.Template + yyv5.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv9 := &x.ObjectMeta + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = JobTemplateSpec{} + } else { + yyv10 := &x.Template + yyv10.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = JobSpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv7 := &x.ObjectMeta + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = JobSpec{} + } else { + yyv8 := &x.Spec + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Parallelism != nil + yyq2[1] = x.Completions != nil + yyq2[2] = x.ActiveDeadlineSeconds != nil + yyq2[3] = x.Selector != nil + yyq2[4] = x.ManualSelector != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Parallelism == nil { + r.EncodeNil() + } else { + yy4 := *x.Parallelism + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("parallelism")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Parallelism == nil { + r.EncodeNil() + } else { + yy6 := *x.Parallelism + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Completions == nil { + r.EncodeNil() + } else { + yy9 := *x.Completions + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("completions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Completions == nil { + r.EncodeNil() + } else { + yy11 := *x.Completions + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(yy11)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy14 := *x.ActiveDeadlineSeconds + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(yy14)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy16 := *x.ActiveDeadlineSeconds + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(yy16)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Selector == nil { + r.EncodeNil() + } else { + x.Selector.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + x.Selector.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ManualSelector == nil { + r.EncodeNil() + } else { + yy22 := *x.ManualSelector + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(yy22)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("manualSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ManualSelector == nil { + r.EncodeNil() + } else { + yy24 := *x.ManualSelector + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(yy24)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy27 := &x.Template + yy27.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy29 := &x.Template + yy29.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "parallelism": + if r.TryDecodeAsNil() { + if x.Parallelism != nil { + x.Parallelism = nil + } + } else { + if x.Parallelism == nil { + x.Parallelism = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) + } + } + case "completions": + if r.TryDecodeAsNil() { + if x.Completions != nil { + x.Completions = nil + } + } else { + if x.Completions == nil { + x.Completions = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) + } + } + case "activeDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(LabelSelector) + } + x.Selector.CodecDecodeSelf(d) + } + case "manualSelector": + if r.TryDecodeAsNil() { + if x.ManualSelector != nil { + x.ManualSelector = nil + } + } else { + if x.ManualSelector == nil { + x.ManualSelector = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.ManualSelector)) = r.DecodeBool() + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg2_v1.PodTemplateSpec{} + } else { + yyv13 := &x.Template + yyv13.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Parallelism != nil { + x.Parallelism = nil + } + } else { + if x.Parallelism == nil { + x.Parallelism = new(int32) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Completions != nil { + x.Completions = nil + } + } else { + if x.Completions == nil { + x.Completions = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(LabelSelector) + } + x.Selector.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ManualSelector != nil { + x.ManualSelector = nil + } + } else { + if x.ManualSelector == nil { + x.ManualSelector = new(bool) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(x.ManualSelector)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg2_v1.PodTemplateSpec{} + } else { + yyv24 := &x.Template + yyv24.CodecDecodeSelf(d) + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Conditions) != 0 + yyq2[1] = x.StartTime != nil + yyq2[2] = x.CompletionTime != nil + yyq2[3] = x.Active != 0 + yyq2[4] = x.Succeeded != 0 + yyq2[5] = x.Failed != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.StartTime == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym7 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StartTime == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym8 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.CompletionTime == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { + } else if yym10 { + z.EncBinaryMarshal(x.CompletionTime) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.CompletionTime) + } else { + z.EncFallback(x.CompletionTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("completionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CompletionTime == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { + } else if yym11 { + z.EncBinaryMarshal(x.CompletionTime) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(x.CompletionTime) + } else { + z.EncFallback(x.CompletionTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.Active)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("active")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.Active)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.Succeeded)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("succeeded")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.Succeeded)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.Failed)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("failed")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.Failed)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv4 := &x.Conditions + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceJobCondition((*[]JobCondition)(yyv4), d) + } + } + case "startTime": + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg1_unversioned.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + case "completionTime": + if r.TryDecodeAsNil() { + if x.CompletionTime != nil { + x.CompletionTime = nil + } + } else { + if x.CompletionTime == nil { + x.CompletionTime = new(pkg1_unversioned.Time) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { + } else if yym9 { + z.DecBinaryUnmarshal(x.CompletionTime) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.CompletionTime) + } else { + z.DecFallback(x.CompletionTime, false) + } + } + case "active": + if r.TryDecodeAsNil() { + x.Active = 0 + } else { + x.Active = int32(r.DecodeInt(32)) + } + case "succeeded": + if r.TryDecodeAsNil() { + x.Succeeded = 0 + } else { + x.Succeeded = int32(r.DecodeInt(32)) + } + case "failed": + if r.TryDecodeAsNil() { + x.Failed = 0 + } else { + x.Failed = int32(r.DecodeInt(32)) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv14 := &x.Conditions + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSliceJobCondition((*[]JobCondition)(yyv14), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg1_unversioned.Time) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym17 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym17 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CompletionTime != nil { + x.CompletionTime = nil + } + } else { + if x.CompletionTime == nil { + x.CompletionTime = new(pkg1_unversioned.Time) + } + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { + } else if yym19 { + z.DecBinaryUnmarshal(x.CompletionTime) + } else if !yym19 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.CompletionTime) + } else { + z.DecFallback(x.CompletionTime, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Active = 0 + } else { + x.Active = int32(r.DecodeInt(32)) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Succeeded = 0 + } else { + x.Succeeded = int32(r.DecodeInt(32)) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Failed = 0 + } else { + x.Failed = int32(r.DecodeInt(32)) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastProbeTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastProbeTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + x.Type = JobConditionType(r.DecodeString()) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) + } + case "lastProbeTime": + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg1_unversioned.Time{} + } else { + yyv6 := &x.LastProbeTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_unversioned.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + x.Reason = string(r.DecodeString()) + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + x.Message = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + x.Type = JobConditionType(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg1_unversioned.Time{} + } else { + yyv15 := &x.LastProbeTime + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if yym16 { + z.DecBinaryUnmarshal(yyv15) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) + } else { + z.DecFallback(yyv15, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_unversioned.Time{} + } else { + yyv17 := &x.LastTransitionTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + x.Reason = string(r.DecodeString()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + x.Message = string(r.DecodeString()) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScheduledJob) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy14 := &x.Status + yy14.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy16 := &x.Status + yy16.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScheduledJob) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScheduledJob) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ScheduledJobSpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ScheduledJobStatus{} + } else { + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScheduledJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ScheduledJobSpec{} + } else { + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ScheduledJobStatus{} + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScheduledJobList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceScheduledJob(([]ScheduledJob)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceScheduledJob(([]ScheduledJob)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScheduledJobList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScheduledJobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceScheduledJob((*[]ScheduledJob)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScheduledJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceScheduledJob((*[]ScheduledJob)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScheduledJobSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.StartingDeadlineSeconds != nil + yyq2[2] = x.ConcurrencyPolicy != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("schedule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.StartingDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy7 := *x.StartingDeadlineSeconds + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startingDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StartingDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy9 := *x.StartingDeadlineSeconds + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + x.ConcurrencyPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("concurrencyPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.ConcurrencyPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeBool(bool(x.Suspend)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("suspend")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Suspend)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy18 := &x.JobTemplate + yy18.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("jobTemplate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy20 := &x.JobTemplate + yy20.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScheduledJobSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScheduledJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "schedule": + if r.TryDecodeAsNil() { + x.Schedule = "" + } else { + x.Schedule = string(r.DecodeString()) + } + case "startingDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.StartingDeadlineSeconds != nil { + x.StartingDeadlineSeconds = nil + } + } else { + if x.StartingDeadlineSeconds == nil { + x.StartingDeadlineSeconds = new(int64) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + case "concurrencyPolicy": + if r.TryDecodeAsNil() { + x.ConcurrencyPolicy = "" + } else { + x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString()) + } + case "suspend": + if r.TryDecodeAsNil() { + x.Suspend = false + } else { + x.Suspend = bool(r.DecodeBool()) + } + case "jobTemplate": + if r.TryDecodeAsNil() { + x.JobTemplate = JobTemplateSpec{} + } else { + yyv9 := &x.JobTemplate + yyv9.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScheduledJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Schedule = "" + } else { + x.Schedule = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StartingDeadlineSeconds != nil { + x.StartingDeadlineSeconds = nil + } + } else { + if x.StartingDeadlineSeconds == nil { + x.StartingDeadlineSeconds = new(int64) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ConcurrencyPolicy = "" + } else { + x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Suspend = false + } else { + x.Suspend = bool(r.DecodeBool()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.JobTemplate = JobTemplateSpec{} + } else { + yyv16 := &x.JobTemplate + yyv16.CodecDecodeSelf(d) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ConcurrencyPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ConcurrencyPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ScheduledJobStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Active) != 0 + yyq2[1] = x.LastScheduleTime != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Active == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicev1_ObjectReference(([]pkg2_v1.ObjectReference)(x.Active), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("active")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Active == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicev1_ObjectReference(([]pkg2_v1.ObjectReference)(x.Active), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.LastScheduleTime == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { + } else if yym7 { + z.EncBinaryMarshal(x.LastScheduleTime) + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScheduleTime) + } else { + z.EncFallback(x.LastScheduleTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastScheduleTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LastScheduleTime == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { + } else if yym8 { + z.EncBinaryMarshal(x.LastScheduleTime) + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScheduleTime) + } else { + z.EncFallback(x.LastScheduleTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScheduledJobStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScheduledJobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "active": + if r.TryDecodeAsNil() { + x.Active = nil + } else { + yyv4 := &x.Active + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicev1_ObjectReference((*[]pkg2_v1.ObjectReference)(yyv4), d) + } + } + case "lastScheduleTime": + if r.TryDecodeAsNil() { + if x.LastScheduleTime != nil { + x.LastScheduleTime = nil + } + } else { + if x.LastScheduleTime == nil { + x.LastScheduleTime = new(pkg1_unversioned.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.LastScheduleTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScheduleTime) + } else { + z.DecFallback(x.LastScheduleTime, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScheduledJobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Active = nil + } else { + yyv9 := &x.Active + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicev1_ObjectReference((*[]pkg2_v1.ObjectReference)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LastScheduleTime != nil { + x.LastScheduleTime = nil + } + } else { + if x.LastScheduleTime == nil { + x.LastScheduleTime = new(pkg1_unversioned.Time) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { + } else if yym12 { + z.DecBinaryUnmarshal(x.LastScheduleTime) + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScheduleTime) + } else { + z.DecFallback(x.LastScheduleTime, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LabelSelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.MatchLabels) != 0 + yyq2[1] = len(x.MatchExpressions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.MatchLabels == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncMapStringStringV(x.MatchLabels, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("matchLabels")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MatchLabels == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncMapStringStringV(x.MatchLabels, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.MatchExpressions == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("matchExpressions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MatchExpressions == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LabelSelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LabelSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "matchLabels": + if r.TryDecodeAsNil() { + x.MatchLabels = nil + } else { + yyv4 := &x.MatchLabels + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecMapStringStringX(yyv4, false, d) + } + } + case "matchExpressions": + if r.TryDecodeAsNil() { + x.MatchExpressions = nil + } else { + yyv6 := &x.MatchExpressions + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LabelSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MatchLabels = nil + } else { + yyv9 := &x.MatchLabels + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + z.F.DecMapStringStringX(yyv9, false, d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MatchExpressions = nil + } else { + yyv11 := &x.MatchExpressions + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LabelSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = len(x.Values) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Operator.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operator")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Operator.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Values == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Values, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("values")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Values == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Values, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LabelSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LabelSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + x.Key = string(r.DecodeString()) + } + case "operator": + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + x.Operator = LabelSelectorOperator(r.DecodeString()) + } + case "values": + if r.TryDecodeAsNil() { + x.Values = nil + } else { + yyv6 := &x.Values + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LabelSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + x.Key = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + x.Operator = LabelSelectorOperator(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Values = nil + } else { + yyv11 := &x.Values + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + z.F.DecSliceStringX(yyv11, false, d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x LabelSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *LabelSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Job{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Job, yyrl1) + } + } else { + yyv1 = make([]Job, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Job{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Job{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Job{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Job{}) // var yyz1 Job + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Job{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Job{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []JobCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]JobCondition, yyrl1) + } + } else { + yyv1 = make([]JobCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = JobCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, JobCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = JobCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = JobCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []JobCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceScheduledJob(v []ScheduledJob, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceScheduledJob(v *[]ScheduledJob, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ScheduledJob{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1024) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ScheduledJob, yyrl1) + } + } else { + yyv1 = make([]ScheduledJob, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ScheduledJob{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ScheduledJob{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ScheduledJob{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ScheduledJob{}) // var yyz1 ScheduledJob + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ScheduledJob{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ScheduledJob{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicev1_ObjectReference(v []pkg2_v1.ObjectReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicev1_ObjectReference(v *[]pkg2_v1.ObjectReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg2_v1.ObjectReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg2_v1.ObjectReference, yyrl1) + } + } else { + yyv1 = make([]pkg2_v1.ObjectReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.ObjectReference{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg2_v1.ObjectReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.ObjectReference{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg2_v1.ObjectReference{}) // var yyz1 pkg2_v1.ObjectReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.ObjectReference{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg2_v1.ObjectReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceLabelSelectorRequirement(v []LabelSelectorRequirement, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequirement, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LabelSelectorRequirement{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LabelSelectorRequirement, yyrl1) + } + } else { + yyv1 = make([]LabelSelectorRequirement, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LabelSelectorRequirement{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LabelSelectorRequirement{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LabelSelectorRequirement{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LabelSelectorRequirement{}) // var yyz1 LabelSelectorRequirement + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LabelSelectorRequirement{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LabelSelectorRequirement{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go new file mode 100644 index 000000000000..f33dfc1ed6d0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go @@ -0,0 +1,283 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" +) + +// Job represents the configuration of a single job. +type Job struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// JobList is a collection of jobs. +type JobList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Job. + Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// JobTemplate describes a template for creating copies of a predefined pod. +type JobTemplate struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Template defines jobs that will be created from this template + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` +} + +// JobTemplateSpec describes the data a Job should have when created from a template +type JobTemplateSpec struct { + // Standard object's metadata of the jobs created from this template. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// JobSpec describes how the job execution will look like. +type JobSpec struct { + + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` + + // Template is the object that describes the pod that will be created when + // executing a job. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` +} + +// JobStatus represents the current state of a Job. +type JobStatus struct { + + // Conditions represent the latest available observations of an object's current state. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` + + // Active is the number of actively running pods. + Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` + + // Succeeded is the number of pods which reached Phase Succeeded. + Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` + + // Failed is the number of pods which reached Phase Failed. + Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` +} + +type JobConditionType string + +// These are valid conditions of a job. +const ( + // JobComplete means the job has completed its execution. + JobComplete JobConditionType = "Complete" + // JobFailed means the job has failed its execution. + JobFailed JobConditionType = "Failed" +) + +// JobCondition describes current state of a job. +type JobCondition struct { + // Type of job condition, Complete or Failed. + Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // Last time the condition was checked. + LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transit from one status to another. + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// ScheduledJob represents the configuration of a single scheduled job. +type ScheduledJob struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is a structure defining the expected behavior of a job, including the schedule. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec ScheduledJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status ScheduledJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ScheduledJobList is a collection of scheduled jobs. +type ScheduledJobList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ScheduledJob. + Items []ScheduledJob `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ScheduledJobSpec describes how the job execution will look like and when it will actually run. +type ScheduledJobSpec struct { + + // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` + + // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` + + // Suspend flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + Suspend bool `json:"suspend" protobuf:"varint,4,opt,name=suspend"` + + // JobTemplate is the object that describes the job that will be created when + // executing a ScheduledJob. + JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"` +} + +// ConcurrencyPolicy describes how the job will be handled. +// Only one of the following concurrent policies may be specified. +// If none of the following policies is specified, the default one +// is AllowConcurrent. +type ConcurrencyPolicy string + +const ( + // AllowConcurrent allows ScheduledJobs to run concurrently. + AllowConcurrent ConcurrencyPolicy = "Allow" + + // ForbidConcurrent forbids concurrent runs, skipping next run if previous + // hasn't finished yet. + ForbidConcurrent ConcurrencyPolicy = "Forbid" + + // ReplaceConcurrent cancels currently running job and replaces it with a new one. + ReplaceConcurrent ConcurrencyPolicy = "Replace" +) + +// ScheduledJobStatus represents the current state of a Job. +type ScheduledJobStatus struct { + // Active holds pointers to currently running jobs. + Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` + + // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + LastScheduleTime *unversioned.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +type LabelSelector struct { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type LabelSelectorRequirement struct { + // key is the label key that the selector applies to. + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A label selector operator is the set of operators that can be used in a selector requirement. +type LabelSelectorOperator string + +const ( + LabelSelectorOpIn LabelSelectorOperator = "In" + LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" + LabelSelectorOpExists LabelSelectorOperator = "Exists" + LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000000..7f0e3b19975c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go @@ -0,0 +1,178 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Job = map[string]string{ + "": "Job represents the configuration of a single job.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Job) SwaggerDoc() map[string]string { + return map_Job +} + +var map_JobCondition = map[string]string{ + "": "JobCondition describes current state of a job.", + "type": "Type of job condition, Complete or Failed.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastProbeTime": "Last time the condition was checked.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (JobCondition) SwaggerDoc() map[string]string { + return map_JobCondition +} + +var map_JobList = map[string]string{ + "": "JobList is a collection of jobs.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of Job.", +} + +func (JobList) SwaggerDoc() map[string]string { + return map_JobList +} + +var map_JobSpec = map[string]string{ + "": "JobSpec describes how the job execution will look like.", + "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", + "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", + "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md", + "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", +} + +func (JobSpec) SwaggerDoc() map[string]string { + return map_JobSpec +} + +var map_JobStatus = map[string]string{ + "": "JobStatus represents the current state of a Job.", + "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "active": "Active is the number of actively running pods.", + "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.", + "failed": "Failed is the number of pods which reached Phase Failed.", +} + +func (JobStatus) SwaggerDoc() map[string]string { + return map_JobStatus +} + +var map_JobTemplate = map[string]string{ + "": "JobTemplate describes a template for creating copies of a predefined pod.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "template": "Template defines jobs that will be created from this template http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (JobTemplate) SwaggerDoc() map[string]string { + return map_JobTemplate +} + +var map_JobTemplateSpec = map[string]string{ + "": "JobTemplateSpec describes the data a Job should have when created from a template", + "metadata": "Standard object's metadata of the jobs created from this template. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (JobTemplateSpec) SwaggerDoc() map[string]string { + return map_JobTemplateSpec +} + +var map_LabelSelector = map[string]string{ + "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", + "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", + "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", +} + +func (LabelSelector) SwaggerDoc() map[string]string { + return map_LabelSelector +} + +var map_LabelSelectorRequirement = map[string]string{ + "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the label key that the selector applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", +} + +func (LabelSelectorRequirement) SwaggerDoc() map[string]string { + return map_LabelSelectorRequirement +} + +var map_ScheduledJob = map[string]string{ + "": "ScheduledJob represents the configuration of a single scheduled job.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job, including the schedule. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (ScheduledJob) SwaggerDoc() map[string]string { + return map_ScheduledJob +} + +var map_ScheduledJobList = map[string]string{ + "": "ScheduledJobList is a collection of scheduled jobs.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of ScheduledJob.", +} + +func (ScheduledJobList) SwaggerDoc() map[string]string { + return map_ScheduledJobList +} + +var map_ScheduledJobSpec = map[string]string{ + "": "ScheduledJobSpec describes how the job execution will look like and when it will actually run.", + "schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", + "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", + "suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", + "jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a ScheduledJob.", +} + +func (ScheduledJobSpec) SwaggerDoc() map[string]string { + return map_ScheduledJobSpec +} + +var map_ScheduledJobStatus = map[string]string{ + "": "ScheduledJobStatus represents the current state of a Job.", + "active": "Active holds pointers to currently running jobs.", + "lastScheduleTime": "LastScheduleTime keeps information of when was the last time the job was successfully scheduled.", +} + +func (ScheduledJobStatus) SwaggerDoc() map[string]string { + return map_ScheduledJobStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/validation/validation.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/validation/validation.go new file mode 100644 index 000000000000..ca1ea5b9e958 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/validation/validation.go @@ -0,0 +1,221 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "github.com/robfig/cron" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation" + apivalidation "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// TODO: generalize for other controller objects that will follow the same pattern, such as ReplicaSet and DaemonSet, and +// move to new location. Replace batch.Job with an interface. +// +// ValidateGeneratedSelector validates that the generated selector on a controller object match the controller object +// metadata, and the labels on the pod template are as generated. +func ValidateGeneratedSelector(obj *batch.Job) field.ErrorList { + allErrs := field.ErrorList{} + if obj.Spec.ManualSelector != nil && *obj.Spec.ManualSelector { + return allErrs + } + + if obj.Spec.Selector == nil { + return allErrs // This case should already have been checked in caller. No need for more errors. + } + + // If somehow uid was unset then we would get "controller-uid=" as the selector + // which is bad. + if obj.ObjectMeta.UID == "" { + allErrs = append(allErrs, field.Required(field.NewPath("metadata").Child("uid"), "")) + } + + // If somehow uid was unset then we would get "controller-uid=" as the selector + // which is bad. + if obj.ObjectMeta.UID == "" { + allErrs = append(allErrs, field.Required(field.NewPath("metadata").Child("uid"), "")) + } + + // If selector generation was requested, then expected labels must be + // present on pod template, and much match job's uid and name. The + // generated (not-manual) selectors/labels ensure no overlap with other + // controllers. The manual mode allows orphaning, adoption, + // backward-compatibility, and experimentation with new + // labeling/selection schemes. Automatic selector generation should + // have placed certain labels on the pod, but this could have failed if + // the user added coflicting labels. Validate that the expected + // generated ones are there. + + allErrs = append(allErrs, apivalidation.ValidateHasLabel(obj.Spec.Template.ObjectMeta, field.NewPath("spec").Child("template").Child("metadata"), "controller-uid", string(obj.UID))...) + allErrs = append(allErrs, apivalidation.ValidateHasLabel(obj.Spec.Template.ObjectMeta, field.NewPath("spec").Child("template").Child("metadata"), "job-name", string(obj.Name))...) + expectedLabels := make(map[string]string) + expectedLabels["controller-uid"] = string(obj.UID) + expectedLabels["job-name"] = string(obj.Name) + // Whether manually or automatically generated, the selector of the job must match the pods it will produce. + if selector, err := unversioned.LabelSelectorAsSelector(obj.Spec.Selector); err == nil { + if !selector.Matches(labels.Set(expectedLabels)) { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("selector"), obj.Spec.Selector, "`selector` not auto-generated")) + } + } + + return allErrs +} + +func ValidateJob(job *batch.Job) field.ErrorList { + // Jobs and rcs have the same name validation + allErrs := apivalidation.ValidateObjectMeta(&job.ObjectMeta, true, apivalidation.ValidateReplicationControllerName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateGeneratedSelector(job)...) + allErrs = append(allErrs, ValidateJobSpec(&job.Spec, field.NewPath("spec"))...) + return allErrs +} + +func ValidateJobSpec(spec *batch.JobSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if spec.Parallelism != nil { + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.Parallelism), fldPath.Child("parallelism"))...) + } + if spec.Completions != nil { + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.Completions), fldPath.Child("completions"))...) + } + if spec.ActiveDeadlineSeconds != nil { + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.ActiveDeadlineSeconds), fldPath.Child("activeDeadlineSeconds"))...) + } + if spec.Selector == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) + } else { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + } + + // Whether manually or automatically generated, the selector of the job must match the pods it will produce. + if selector, err := unversioned.LabelSelectorAsSelector(spec.Selector); err == nil { + labels := labels.Set(spec.Template.Labels) + if !selector.Matches(labels) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "metadata", "labels"), spec.Template.Labels, "`selector` does not match template `labels`")) + } + } + + allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"))...) + if spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure && + spec.Template.Spec.RestartPolicy != api.RestartPolicyNever { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), + spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyOnFailure), string(api.RestartPolicyNever)})) + } + return allErrs +} + +func ValidateJobStatus(status *batch.JobStatus, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Active), fldPath.Child("active"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Succeeded), fldPath.Child("succeeded"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Failed), fldPath.Child("failed"))...) + return allErrs +} + +func ValidateJobUpdate(job, oldJob *batch.Job) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&oldJob.ObjectMeta, &job.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateJobSpecUpdate(job.Spec, oldJob.Spec, field.NewPath("spec"))...) + return allErrs +} + +func ValidateJobUpdateStatus(job, oldJob *batch.Job) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&oldJob.ObjectMeta, &job.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateJobStatusUpdate(job.Status, oldJob.Status)...) + return allErrs +} + +func ValidateJobSpecUpdate(spec, oldSpec batch.JobSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateJobSpec(&spec, fldPath)...) + allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.Completions, oldSpec.Completions, fldPath.Child("completions"))...) + allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.Selector, oldSpec.Selector, fldPath.Child("selector"))...) + allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.Template, oldSpec.Template, fldPath.Child("template"))...) + return allErrs +} + +func ValidateJobStatusUpdate(status, oldStatus batch.JobStatus) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateJobStatus(&status, field.NewPath("status"))...) + return allErrs +} + +func ValidateScheduledJob(scheduledJob *batch.ScheduledJob) field.ErrorList { + // ScheduledJobs and rcs have the same name validation + allErrs := apivalidation.ValidateObjectMeta(&scheduledJob.ObjectMeta, true, apivalidation.ValidateReplicationControllerName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateScheduledJobSpec(&scheduledJob.Spec, field.NewPath("spec"))...) + return allErrs +} + +func ValidateScheduledJobSpec(spec *batch.ScheduledJobSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(spec.Schedule) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("schedule"), "")) + } else { + allErrs = append(allErrs, validateScheduleFormat(spec.Schedule, fldPath.Child("schedule"))...) + } + if spec.StartingDeadlineSeconds != nil { + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.StartingDeadlineSeconds), fldPath.Child("startingDeadlineSeconds"))...) + } + allErrs = append(allErrs, validateConcurrencyPolicy(&spec.ConcurrencyPolicy, fldPath.Child("concurrencyPolicy"))...) + allErrs = append(allErrs, ValidateJobTemplateSpec(&spec.JobTemplate, fldPath.Child("jobTemplate"))...) + + return allErrs +} + +func validateConcurrencyPolicy(concurrencyPolicy *batch.ConcurrencyPolicy, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch *concurrencyPolicy { + case batch.AllowConcurrent, batch.ForbidConcurrent, batch.ReplaceConcurrent: + break + case "": + allErrs = append(allErrs, field.Required(fldPath, "")) + default: + validValues := []string{string(batch.AllowConcurrent), string(batch.ForbidConcurrent), string(batch.ReplaceConcurrent)} + allErrs = append(allErrs, field.NotSupported(fldPath, *concurrencyPolicy, validValues)) + } + + return allErrs +} + +func validateScheduleFormat(schedule string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + _, err := cron.Parse(schedule) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, schedule, err.Error())) + } + + return allErrs +} + +func ValidateJobTemplate(job *batch.JobTemplate) field.ErrorList { + // this method should be identical to ValidateJob + allErrs := apivalidation.ValidateObjectMeta(&job.ObjectMeta, true, apivalidation.ValidateReplicationControllerName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateJobTemplateSpec(&job.Template, field.NewPath("template"))...) + return allErrs +} + +func ValidateJobTemplateSpec(spec *batch.JobTemplateSpec, fldPath *field.Path) field.ErrorList { + // this method should be identical to ValidateJob + allErrs := ValidateJobSpec(&spec.Spec, fldPath.Child("spec")) + return allErrs +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/validation/validation_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/validation/validation_test.go new file mode 100644 index 000000000000..2f5fb534db23 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/batch/validation/validation_test.go @@ -0,0 +1,581 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/types" +) + +func getValidManualSelector() *unversioned.LabelSelector { + return &unversioned.LabelSelector{ + MatchLabels: map[string]string{"a": "b"}, + } +} + +func getValidPodTemplateSpecForManual(selector *unversioned.LabelSelector) api.PodTemplateSpec { + return api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: selector.MatchLabels, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + } +} + +func getValidGeneratedSelector() *unversioned.LabelSelector { + return &unversioned.LabelSelector{ + MatchLabels: map[string]string{"controller-uid": "1a2b3c", "job-name": "myjob"}, + } +} + +func getValidPodTemplateSpecForGenerated(selector *unversioned.LabelSelector) api.PodTemplateSpec { + return api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: selector.MatchLabels, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + } +} + +func TestValidateJob(t *testing.T) { + validManualSelector := getValidManualSelector() + validPodTemplateSpecForManual := getValidPodTemplateSpecForManual(validManualSelector) + validGeneratedSelector := getValidGeneratedSelector() + validPodTemplateSpecForGenerated := getValidPodTemplateSpecForGenerated(validGeneratedSelector) + + successCases := map[string]batch.Job{ + "manual selector": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.JobSpec{ + Selector: validManualSelector, + ManualSelector: newBool(true), + Template: validPodTemplateSpecForManual, + }, + }, + "generated selector": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + } + for k, v := range successCases { + if errs := ValidateJob(&v); len(errs) != 0 { + t.Errorf("expected success for %s: %v", k, errs) + } + } + negative := int32(-1) + negative64 := int64(-1) + errorCases := map[string]batch.Job{ + "spec.parallelism:must be greater than or equal to 0": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.JobSpec{ + Parallelism: &negative, + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + "spec.completions:must be greater than or equal to 0": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.JobSpec{ + Completions: &negative, + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + "spec.activeDeadlineSeconds:must be greater than or equal to 0": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.JobSpec{ + ActiveDeadlineSeconds: &negative64, + Selector: validGeneratedSelector, + Template: validPodTemplateSpecForGenerated, + }, + }, + "spec.selector:Required value": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.JobSpec{ + Template: validPodTemplateSpecForGenerated, + }, + }, + "spec.template.metadata.labels: Invalid value: {\"y\":\"z\"}: `selector` does not match template `labels`": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.JobSpec{ + Selector: validManualSelector, + ManualSelector: newBool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"y": "z"}, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + }, + }, + "spec.template.metadata.labels: Invalid value: {\"controller-uid\":\"4d5e6f\"}: `selector` does not match template `labels`": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.JobSpec{ + Selector: validManualSelector, + ManualSelector: newBool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"controller-uid": "4d5e6f"}, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + }, + }, + "spec.template.spec.restartPolicy: Unsupported value": { + ObjectMeta: api.ObjectMeta{ + Name: "myjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.JobSpec{ + Selector: validManualSelector, + ManualSelector: newBool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validManualSelector.MatchLabels, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + }, + }, + } + + for k, v := range errorCases { + errs := ValidateJob(&v) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } else { + s := strings.Split(k, ":") + err := errs[0] + if err.Field != s[0] || !strings.Contains(err.Error(), s[1]) { + t.Errorf("unexpected error: %v, expected: %s", err, k) + } + } + } +} + +func TestValidateJobUpdateStatus(t *testing.T) { + type testcase struct { + old batch.Job + update batch.Job + } + + successCases := []testcase{ + { + old: batch.Job{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Status: batch.JobStatus{ + Active: 1, + Succeeded: 2, + Failed: 3, + }, + }, + update: batch.Job{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Status: batch.JobStatus{ + Active: 1, + Succeeded: 1, + Failed: 3, + }, + }, + }, + } + + for _, successCase := range successCases { + successCase.old.ObjectMeta.ResourceVersion = "1" + successCase.update.ObjectMeta.ResourceVersion = "1" + if errs := ValidateJobUpdateStatus(&successCase.update, &successCase.old); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := map[string]testcase{ + "[status.active: Invalid value: -1: must be greater than or equal to 0, status.succeeded: Invalid value: -2: must be greater than or equal to 0]": { + old: batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: api.NamespaceDefault, + ResourceVersion: "10", + }, + Status: batch.JobStatus{ + Active: 1, + Succeeded: 2, + Failed: 3, + }, + }, + update: batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: api.NamespaceDefault, + ResourceVersion: "10", + }, + Status: batch.JobStatus{ + Active: -1, + Succeeded: -2, + Failed: 3, + }, + }, + }, + } + + for testName, errorCase := range errorCases { + errs := ValidateJobUpdateStatus(&errorCase.update, &errorCase.old) + if len(errs) == 0 { + t.Errorf("expected failure: %s", testName) + continue + } + if errs.ToAggregate().Error() != testName { + t.Errorf("expected '%s' got '%s'", errs.ToAggregate().Error(), testName) + } + } +} + +func TestValidateScheduledJob(t *testing.T) { + validManualSelector := getValidManualSelector() + validGeneratedSelector := getValidGeneratedSelector() + validPodTemplateSpec := getValidPodTemplateSpecForGenerated(validGeneratedSelector) + + successCases := map[string]batch.ScheduledJob{ + "basic scheduled job": { + ObjectMeta: api.ObjectMeta{ + Name: "myscheduledjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.ScheduledJobSpec{ + Schedule: "* * * * * ?", + ConcurrencyPolicy: batch.AllowConcurrent, + JobTemplate: batch.JobTemplateSpec{ + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpec, + }, + }, + }, + }, + } + for k, v := range successCases { + if errs := ValidateScheduledJob(&v); len(errs) != 0 { + t.Errorf("expected success for %s: %v", k, errs) + } + } + + negative := int32(-1) + negative64 := int64(-1) + + errorCases := map[string]batch.ScheduledJob{ + "spec.schedule: Invalid value": { + ObjectMeta: api.ObjectMeta{ + Name: "myscheduledjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.ScheduledJobSpec{ + Schedule: "error", + ConcurrencyPolicy: batch.AllowConcurrent, + JobTemplate: batch.JobTemplateSpec{ + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpec, + }, + }, + }, + }, + "spec.schedule: Required value": { + ObjectMeta: api.ObjectMeta{ + Name: "myscheduledjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.ScheduledJobSpec{ + Schedule: "", + ConcurrencyPolicy: batch.AllowConcurrent, + JobTemplate: batch.JobTemplateSpec{ + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpec, + }, + }, + }, + }, + "spec.startingDeadlineSeconds:must be greater than or equal to 0": { + ObjectMeta: api.ObjectMeta{ + Name: "myscheduledjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.ScheduledJobSpec{ + Schedule: "* * * * * ?", + ConcurrencyPolicy: batch.AllowConcurrent, + StartingDeadlineSeconds: &negative64, + JobTemplate: batch.JobTemplateSpec{ + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpec, + }, + }, + }, + }, + "spec.concurrencyPolicy: Required value": { + ObjectMeta: api.ObjectMeta{ + Name: "myscheduledjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.ScheduledJobSpec{ + Schedule: "* * * * * ?", + JobTemplate: batch.JobTemplateSpec{ + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Template: validPodTemplateSpec, + }, + }, + }, + }, + "spec.jobTemplate.spec.parallelism:must be greater than or equal to 0": { + ObjectMeta: api.ObjectMeta{ + Name: "myscheduledjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.ScheduledJobSpec{ + Schedule: "* * * * * ?", + ConcurrencyPolicy: batch.AllowConcurrent, + JobTemplate: batch.JobTemplateSpec{ + Spec: batch.JobSpec{ + Selector: validGeneratedSelector, + Parallelism: &negative, + Template: validPodTemplateSpec, + }, + }, + }, + }, + "spec.jobTemplate.spec.completions:must be greater than or equal to 0": { + ObjectMeta: api.ObjectMeta{ + Name: "myscheduledjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.ScheduledJobSpec{ + Schedule: "* * * * * ?", + ConcurrencyPolicy: batch.AllowConcurrent, + JobTemplate: batch.JobTemplateSpec{ + + Spec: batch.JobSpec{ + Completions: &negative, + Selector: validGeneratedSelector, + Template: validPodTemplateSpec, + }, + }, + }, + }, + "spec.jobTemplate.spec.activeDeadlineSeconds:must be greater than or equal to 0": { + ObjectMeta: api.ObjectMeta{ + Name: "myscheduledjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.ScheduledJobSpec{ + Schedule: "* * * * * ?", + ConcurrencyPolicy: batch.AllowConcurrent, + JobTemplate: batch.JobTemplateSpec{ + Spec: batch.JobSpec{ + ActiveDeadlineSeconds: &negative64, + Selector: validGeneratedSelector, + Template: validPodTemplateSpec, + }, + }, + }, + }, + "spec.jobTemplate.spec.selector:Required value": { + ObjectMeta: api.ObjectMeta{ + Name: "myscheduledjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.ScheduledJobSpec{ + Schedule: "* * * * * ?", + ConcurrencyPolicy: batch.AllowConcurrent, + JobTemplate: batch.JobTemplateSpec{ + Spec: batch.JobSpec{ + Template: validPodTemplateSpec, + }, + }, + }, + }, + "spec.jobTemplate.spec.template.metadata.labels: Invalid value: {\"y\":\"z\"}: `selector` does not match template `labels`": { + ObjectMeta: api.ObjectMeta{ + Name: "myscheduledjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.ScheduledJobSpec{ + Schedule: "* * * * * ?", + ConcurrencyPolicy: batch.AllowConcurrent, + JobTemplate: batch.JobTemplateSpec{ + Spec: batch.JobSpec{ + Selector: validManualSelector, + ManualSelector: newBool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"y": "z"}, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + }, + }, + }, + }, + "spec.jobTemplate.spec.template.metadata.labels: Invalid value: {\"controller-uid\":\"4d5e6f\"}: `selector` does not match template `labels`": { + ObjectMeta: api.ObjectMeta{ + Name: "myscheduledjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.ScheduledJobSpec{ + Schedule: "* * * * * ?", + ConcurrencyPolicy: batch.AllowConcurrent, + JobTemplate: batch.JobTemplateSpec{ + Spec: batch.JobSpec{ + Selector: validManualSelector, + ManualSelector: newBool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"controller-uid": "4d5e6f"}, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + }, + }, + }, + }, + "spec.jobTemplate.spec.template.spec.restartPolicy: Unsupported value": { + ObjectMeta: api.ObjectMeta{ + Name: "myscheduledjob", + Namespace: api.NamespaceDefault, + UID: types.UID("1a2b3c"), + }, + Spec: batch.ScheduledJobSpec{ + Schedule: "* * * * * ?", + ConcurrencyPolicy: batch.AllowConcurrent, + JobTemplate: batch.JobTemplateSpec{ + Spec: batch.JobSpec{ + Selector: validManualSelector, + ManualSelector: newBool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validManualSelector.MatchLabels, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + }, + }, + }, + }, + } + + for k, v := range errorCases { + errs := ValidateScheduledJob(&v) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } else { + s := strings.Split(k, ":") + err := errs[0] + if err.Field != s[0] || !strings.Contains(err.Error(), s[1]) { + t.Errorf("unexpected error: %v, expected: %s", err, k) + } + } + } +} + +func newBool(val bool) *bool { + p := new(bool) + *p = val + return p +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go index c9a7b31e2f01..ee6514fcf05d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,16 +16,353 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package componentconfig -import api "k8s.io/kubernetes/pkg/api" +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) func init() { - err := api.Scheme.AddGeneratedDeepCopyFuncs() - if err != nil { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_componentconfig_IPVar, + DeepCopy_componentconfig_KubeControllerManagerConfiguration, + DeepCopy_componentconfig_KubeProxyConfiguration, + DeepCopy_componentconfig_KubeSchedulerConfiguration, + DeepCopy_componentconfig_KubeletConfiguration, + DeepCopy_componentconfig_LeaderElectionConfiguration, + DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration, + DeepCopy_componentconfig_PortRangeVar, + DeepCopy_componentconfig_VolumeConfiguration, + ); err != nil { // if one of the deep copy functions is malformed, detect it immediately. panic(err) } } + +func DeepCopy_componentconfig_IPVar(in IPVar, out *IPVar, c *conversion.Cloner) error { + if in.Val != nil { + in, out := in.Val, &out.Val + *out = new(string) + **out = *in + } else { + out.Val = nil + } + return nil +} + +func DeepCopy_componentconfig_KubeControllerManagerConfiguration(in KubeControllerManagerConfiguration, out *KubeControllerManagerConfiguration, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.Port = in.Port + out.Address = in.Address + out.CloudProvider = in.CloudProvider + out.CloudConfigFile = in.CloudConfigFile + out.ConcurrentEndpointSyncs = in.ConcurrentEndpointSyncs + out.ConcurrentRSSyncs = in.ConcurrentRSSyncs + out.ConcurrentRCSyncs = in.ConcurrentRCSyncs + out.ConcurrentResourceQuotaSyncs = in.ConcurrentResourceQuotaSyncs + out.ConcurrentDeploymentSyncs = in.ConcurrentDeploymentSyncs + out.ConcurrentDaemonSetSyncs = in.ConcurrentDaemonSetSyncs + out.ConcurrentJobSyncs = in.ConcurrentJobSyncs + out.ConcurrentNamespaceSyncs = in.ConcurrentNamespaceSyncs + out.LookupCacheSizeForRC = in.LookupCacheSizeForRC + out.LookupCacheSizeForRS = in.LookupCacheSizeForRS + out.LookupCacheSizeForDaemonSet = in.LookupCacheSizeForDaemonSet + if err := unversioned.DeepCopy_unversioned_Duration(in.ServiceSyncPeriod, &out.ServiceSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.NodeSyncPeriod, &out.NodeSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.ResourceQuotaSyncPeriod, &out.ResourceQuotaSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.NamespaceSyncPeriod, &out.NamespaceSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.PVClaimBinderSyncPeriod, &out.PVClaimBinderSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.MinResyncPeriod, &out.MinResyncPeriod, c); err != nil { + return err + } + out.TerminatedPodGCThreshold = in.TerminatedPodGCThreshold + if err := unversioned.DeepCopy_unversioned_Duration(in.HorizontalPodAutoscalerSyncPeriod, &out.HorizontalPodAutoscalerSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.DeploymentControllerSyncPeriod, &out.DeploymentControllerSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.PodEvictionTimeout, &out.PodEvictionTimeout, c); err != nil { + return err + } + out.DeletingPodsQps = in.DeletingPodsQps + out.DeletingPodsBurst = in.DeletingPodsBurst + if err := unversioned.DeepCopy_unversioned_Duration(in.NodeMonitorGracePeriod, &out.NodeMonitorGracePeriod, c); err != nil { + return err + } + out.RegisterRetryCount = in.RegisterRetryCount + if err := unversioned.DeepCopy_unversioned_Duration(in.NodeStartupGracePeriod, &out.NodeStartupGracePeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.NodeMonitorPeriod, &out.NodeMonitorPeriod, c); err != nil { + return err + } + out.ServiceAccountKeyFile = in.ServiceAccountKeyFile + out.EnableProfiling = in.EnableProfiling + out.ClusterName = in.ClusterName + out.ClusterCIDR = in.ClusterCIDR + out.ServiceCIDR = in.ServiceCIDR + out.NodeCIDRMaskSize = in.NodeCIDRMaskSize + out.AllocateNodeCIDRs = in.AllocateNodeCIDRs + out.ConfigureCloudRoutes = in.ConfigureCloudRoutes + out.RootCAFile = in.RootCAFile + out.ContentType = in.ContentType + out.KubeAPIQPS = in.KubeAPIQPS + out.KubeAPIBurst = in.KubeAPIBurst + if err := DeepCopy_componentconfig_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil { + return err + } + if err := DeepCopy_componentconfig_VolumeConfiguration(in.VolumeConfiguration, &out.VolumeConfiguration, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.ControllerStartInterval, &out.ControllerStartInterval, c); err != nil { + return err + } + out.EnableGarbageCollector = in.EnableGarbageCollector + return nil +} + +func DeepCopy_componentconfig_KubeProxyConfiguration(in KubeProxyConfiguration, out *KubeProxyConfiguration, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.BindAddress = in.BindAddress + out.ClusterCIDR = in.ClusterCIDR + out.HealthzBindAddress = in.HealthzBindAddress + out.HealthzPort = in.HealthzPort + out.HostnameOverride = in.HostnameOverride + if in.IPTablesMasqueradeBit != nil { + in, out := in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit + *out = new(int32) + **out = *in + } else { + out.IPTablesMasqueradeBit = nil + } + if err := unversioned.DeepCopy_unversioned_Duration(in.IPTablesSyncPeriod, &out.IPTablesSyncPeriod, c); err != nil { + return err + } + out.KubeconfigPath = in.KubeconfigPath + out.MasqueradeAll = in.MasqueradeAll + out.Master = in.Master + if in.OOMScoreAdj != nil { + in, out := in.OOMScoreAdj, &out.OOMScoreAdj + *out = new(int32) + **out = *in + } else { + out.OOMScoreAdj = nil + } + out.Mode = in.Mode + out.PortRange = in.PortRange + out.ResourceContainer = in.ResourceContainer + if err := unversioned.DeepCopy_unversioned_Duration(in.UDPIdleTimeout, &out.UDPIdleTimeout, c); err != nil { + return err + } + out.ConntrackMax = in.ConntrackMax + if err := unversioned.DeepCopy_unversioned_Duration(in.ConntrackTCPEstablishedTimeout, &out.ConntrackTCPEstablishedTimeout, c); err != nil { + return err + } + return nil +} + +func DeepCopy_componentconfig_KubeSchedulerConfiguration(in KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.Port = in.Port + out.Address = in.Address + out.AlgorithmProvider = in.AlgorithmProvider + out.PolicyConfigFile = in.PolicyConfigFile + out.EnableProfiling = in.EnableProfiling + out.ContentType = in.ContentType + out.KubeAPIQPS = in.KubeAPIQPS + out.KubeAPIBurst = in.KubeAPIBurst + out.SchedulerName = in.SchedulerName + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.FailureDomains = in.FailureDomains + if err := DeepCopy_componentconfig_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil { + return err + } + return nil +} + +func DeepCopy_componentconfig_KubeletConfiguration(in KubeletConfiguration, out *KubeletConfiguration, c *conversion.Cloner) error { + out.Config = in.Config + if err := unversioned.DeepCopy_unversioned_Duration(in.SyncFrequency, &out.SyncFrequency, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.FileCheckFrequency, &out.FileCheckFrequency, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.HTTPCheckFrequency, &out.HTTPCheckFrequency, c); err != nil { + return err + } + out.ManifestURL = in.ManifestURL + out.ManifestURLHeader = in.ManifestURLHeader + out.EnableServer = in.EnableServer + out.Address = in.Address + out.Port = in.Port + out.ReadOnlyPort = in.ReadOnlyPort + out.TLSCertFile = in.TLSCertFile + out.TLSPrivateKeyFile = in.TLSPrivateKeyFile + out.CertDirectory = in.CertDirectory + out.HostnameOverride = in.HostnameOverride + out.PodInfraContainerImage = in.PodInfraContainerImage + out.DockerEndpoint = in.DockerEndpoint + out.RootDirectory = in.RootDirectory + out.SeccompProfileRoot = in.SeccompProfileRoot + out.AllowPrivileged = in.AllowPrivileged + out.HostNetworkSources = in.HostNetworkSources + out.HostPIDSources = in.HostPIDSources + out.HostIPCSources = in.HostIPCSources + out.RegistryPullQPS = in.RegistryPullQPS + out.RegistryBurst = in.RegistryBurst + out.EventRecordQPS = in.EventRecordQPS + out.EventBurst = in.EventBurst + out.EnableDebuggingHandlers = in.EnableDebuggingHandlers + if err := unversioned.DeepCopy_unversioned_Duration(in.MinimumGCAge, &out.MinimumGCAge, c); err != nil { + return err + } + out.MaxPerPodContainerCount = in.MaxPerPodContainerCount + out.MaxContainerCount = in.MaxContainerCount + out.CAdvisorPort = in.CAdvisorPort + out.HealthzPort = in.HealthzPort + out.HealthzBindAddress = in.HealthzBindAddress + out.OOMScoreAdj = in.OOMScoreAdj + out.RegisterNode = in.RegisterNode + out.ClusterDomain = in.ClusterDomain + out.MasterServiceNamespace = in.MasterServiceNamespace + out.ClusterDNS = in.ClusterDNS + if err := unversioned.DeepCopy_unversioned_Duration(in.StreamingConnectionIdleTimeout, &out.StreamingConnectionIdleTimeout, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.NodeStatusUpdateFrequency, &out.NodeStatusUpdateFrequency, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.ImageMinimumGCAge, &out.ImageMinimumGCAge, c); err != nil { + return err + } + out.ImageGCHighThresholdPercent = in.ImageGCHighThresholdPercent + out.ImageGCLowThresholdPercent = in.ImageGCLowThresholdPercent + out.LowDiskSpaceThresholdMB = in.LowDiskSpaceThresholdMB + if err := unversioned.DeepCopy_unversioned_Duration(in.VolumeStatsAggPeriod, &out.VolumeStatsAggPeriod, c); err != nil { + return err + } + out.NetworkPluginName = in.NetworkPluginName + out.NetworkPluginDir = in.NetworkPluginDir + out.VolumePluginDir = in.VolumePluginDir + out.CloudProvider = in.CloudProvider + out.CloudConfigFile = in.CloudConfigFile + out.KubeletCgroups = in.KubeletCgroups + out.RuntimeCgroups = in.RuntimeCgroups + out.SystemCgroups = in.SystemCgroups + out.CgroupRoot = in.CgroupRoot + out.ContainerRuntime = in.ContainerRuntime + out.RktPath = in.RktPath + out.RktAPIEndpoint = in.RktAPIEndpoint + out.RktStage1Image = in.RktStage1Image + out.LockFilePath = in.LockFilePath + out.ExitOnLockContention = in.ExitOnLockContention + out.ConfigureCBR0 = in.ConfigureCBR0 + out.HairpinMode = in.HairpinMode + out.BabysitDaemons = in.BabysitDaemons + out.MaxPods = in.MaxPods + out.NvidiaGPUs = in.NvidiaGPUs + out.DockerExecHandlerName = in.DockerExecHandlerName + out.PodCIDR = in.PodCIDR + out.ResolverConfig = in.ResolverConfig + out.CPUCFSQuota = in.CPUCFSQuota + out.Containerized = in.Containerized + out.MaxOpenFiles = in.MaxOpenFiles + out.ReconcileCIDR = in.ReconcileCIDR + out.RegisterSchedulable = in.RegisterSchedulable + out.ContentType = in.ContentType + out.KubeAPIQPS = in.KubeAPIQPS + out.KubeAPIBurst = in.KubeAPIBurst + out.SerializeImagePulls = in.SerializeImagePulls + out.ExperimentalFlannelOverlay = in.ExperimentalFlannelOverlay + if err := unversioned.DeepCopy_unversioned_Duration(in.OutOfDiskTransitionFrequency, &out.OutOfDiskTransitionFrequency, c); err != nil { + return err + } + out.NodeIP = in.NodeIP + if in.NodeLabels != nil { + in, out := in.NodeLabels, &out.NodeLabels + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val + } + } else { + out.NodeLabels = nil + } + out.NonMasqueradeCIDR = in.NonMasqueradeCIDR + out.EnableCustomMetrics = in.EnableCustomMetrics + out.EvictionHard = in.EvictionHard + out.EvictionSoft = in.EvictionSoft + out.EvictionSoftGracePeriod = in.EvictionSoftGracePeriod + if err := unversioned.DeepCopy_unversioned_Duration(in.EvictionPressureTransitionPeriod, &out.EvictionPressureTransitionPeriod, c); err != nil { + return err + } + out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod + out.PodsPerCore = in.PodsPerCore + return nil +} + +func DeepCopy_componentconfig_LeaderElectionConfiguration(in LeaderElectionConfiguration, out *LeaderElectionConfiguration, c *conversion.Cloner) error { + out.LeaderElect = in.LeaderElect + if err := unversioned.DeepCopy_unversioned_Duration(in.LeaseDuration, &out.LeaseDuration, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.RenewDeadline, &out.RenewDeadline, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.RetryPeriod, &out.RetryPeriod, c); err != nil { + return err + } + return nil +} + +func DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration(in PersistentVolumeRecyclerConfiguration, out *PersistentVolumeRecyclerConfiguration, c *conversion.Cloner) error { + out.MaximumRetry = in.MaximumRetry + out.MinimumTimeoutNFS = in.MinimumTimeoutNFS + out.PodTemplateFilePathNFS = in.PodTemplateFilePathNFS + out.IncrementTimeoutNFS = in.IncrementTimeoutNFS + out.PodTemplateFilePathHostPath = in.PodTemplateFilePathHostPath + out.MinimumTimeoutHostPath = in.MinimumTimeoutHostPath + out.IncrementTimeoutHostPath = in.IncrementTimeoutHostPath + return nil +} + +func DeepCopy_componentconfig_PortRangeVar(in PortRangeVar, out *PortRangeVar, c *conversion.Cloner) error { + if in.Val != nil { + in, out := in.Val, &out.Val + *out = new(string) + **out = *in + } else { + out.Val = nil + } + return nil +} + +func DeepCopy_componentconfig_VolumeConfiguration(in VolumeConfiguration, out *VolumeConfiguration, c *conversion.Cloner) error { + out.EnableHostPathProvisioning = in.EnableHostPathProvisioning + if err := DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration(in.PersistentVolumeRecyclerConfiguration, &out.PersistentVolumeRecyclerConfiguration, c); err != nil { + return err + } + out.FlexVolumePluginDir = in.FlexVolumePluginDir + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/helpers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/helpers_test.go new file mode 100644 index 000000000000..7aece8257f10 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/helpers_test.go @@ -0,0 +1,71 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package componentconfig + +import ( + "strings" + "testing" + + "github.com/spf13/pflag" +) + +func TestIPVar(t *testing.T) { + defaultIP := "0.0.0.0" + cases := []struct { + argc string + expectErr bool + expectVal string + }{ + + { + argc: "blah --ip=1.2.3.4", + expectVal: "1.2.3.4", + }, + { + argc: "blah --ip=1.2.3.4a", + expectErr: true, + expectVal: defaultIP, + }, + } + for _, c := range cases { + fs := pflag.NewFlagSet("blah", pflag.PanicOnError) + ip := defaultIP + fs.Var(IPVar{&ip}, "ip", "the ip") + + var err error + func() { + defer func() { + if r := recover(); r != nil { + err = r.(error) + } + }() + fs.Parse(strings.Split(c.argc, " ")) + }() + + if c.expectErr && err == nil { + t.Errorf("did not observe an expected error") + continue + } + if !c.expectErr && err != nil { + t.Errorf("observed an unexpected error") + continue + } + if c.expectVal != ip { + t.Errorf("unexpected ip: expected %q, saw %q", c.expectVal, ip) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/install/install_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/install/install_test.go new file mode 100644 index 000000000000..940e17567387 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/install/install_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package install + +import ( + "encoding/json" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestCodec(t *testing.T) { + daemonSet := componentconfig.KubeProxyConfiguration{} + // We do want to use package registered rather than testapi here, because we + // want to test if the package install and package registered work as expected. + data, err := runtime.Encode(api.Codecs.LegacyCodec(registered.GroupOrDie(componentconfig.GroupName).GroupVersion), &daemonSet) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + other := componentconfig.KubeProxyConfiguration{} + if err := json.Unmarshal(data, &other); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if other.APIVersion != registered.GroupOrDie(componentconfig.GroupName).GroupVersion.String() || other.Kind != "KubeProxyConfiguration" { + t.Errorf("unexpected unmarshalled object %#v", other) + } +} + +func TestInterfacesFor(t *testing.T) { + if _, err := registered.GroupOrDie(componentconfig.GroupName).InterfacesFor(componentconfig.SchemeGroupVersion); err == nil { + t.Fatalf("unexpected non-error: %v", err) + } + for i, version := range registered.GroupOrDie(componentconfig.GroupName).GroupVersions { + if vi, err := registered.GroupOrDie(componentconfig.GroupName).InterfacesFor(version); err != nil || vi == nil { + t.Fatalf("%d: unexpected result: %v", i, err) + } + } +} + +func TestRESTMapper(t *testing.T) { + gv := unversioned.GroupVersion{Group: componentconfig.GroupName, Version: "v1alpha1"} + proxyGVK := gv.WithKind("KubeProxyConfiguration") + + if gvk, err := registered.GroupOrDie(componentconfig.GroupName).RESTMapper.KindFor(gv.WithResource("kubeproxyconfiguration")); err != nil || gvk != proxyGVK { + t.Errorf("unexpected version mapping: %v %v", gvk, err) + } + + if m, err := registered.GroupOrDie(componentconfig.GroupName).RESTMapper.RESTMapping(proxyGVK.GroupKind(), ""); err != nil || m.GroupVersionKind != proxyGVK || m.Resource != "kubeproxyconfigurations" { + t.Errorf("unexpected version mapping: %#v %v", m, err) + } + + for _, version := range registered.GroupOrDie(componentconfig.GroupName).GroupVersions { + mapping, err := registered.GroupOrDie(componentconfig.GroupName).RESTMapper.RESTMapping(proxyGVK.GroupKind(), version.Version) + if err != nil { + t.Errorf("unexpected error: %v", err) + continue + } + + if mapping.Resource != "kubeproxyconfigurations" { + t.Errorf("incorrect resource name: %#v", mapping) + } + if mapping.GroupVersionKind.GroupVersion() != version { + t.Errorf("incorrect groupVersion: %v", mapping) + } + + interfaces, _ := registered.GroupOrDie(componentconfig.GroupName).InterfacesFor(version) + if mapping.ObjectConvertor != interfaces.ObjectConvertor { + t.Errorf("unexpected: %#v, expected: %#v", mapping, interfaces) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go index f3f54626b3e8..53a6c1d36aff 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -81,16 +81,16 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [18]bool + var yyq2 [19]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[16] = x.Kind != "" - yyq2[17] = x.APIVersion != "" + yyq2[17] = x.Kind != "" + yyq2[18] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(18) + r.EncodeArrayStart(19) } else { - yynn2 = 16 + yynn2 = 17 for _, b := range yyq2 { if b { yynn2++ @@ -124,17 +124,17 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("healthzBindAddress")) + r.EncodeString(codecSelferC_UTF81234, string("clusterCIDR")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym8 := z.EncBinary() _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) } } if yyr2 || yy2arr2 { @@ -143,17 +143,17 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym10 if false { } else { - r.EncodeInt(int64(x.HealthzPort)) + r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("healthzPort")) + r.EncodeString(codecSelferC_UTF81234, string("healthzBindAddress")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym11 := z.EncBinary() _ = yym11 if false { } else { - r.EncodeInt(int64(x.HealthzPort)) + r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) } } if yyr2 || yy2arr2 { @@ -162,15 +162,34 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym13 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride)) + r.EncodeInt(int64(x.HealthzPort)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostnameOverride")) + r.EncodeString(codecSelferC_UTF81234, string("healthzPort")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym14 := z.EncBinary() _ = yym14 if false { + } else { + r.EncodeInt(int64(x.HealthzPort)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostnameOverride")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride)) } @@ -180,12 +199,12 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x.IPTablesMasqueradeBit == nil { r.EncodeNil() } else { - yy16 := *x.IPTablesMasqueradeBit - yym17 := z.EncBinary() - _ = yym17 + yy19 := *x.IPTablesMasqueradeBit + yym20 := z.EncBinary() + _ = yym20 if false { } else { - r.EncodeInt(int64(yy16)) + r.EncodeInt(int64(yy19)) } } } else { @@ -195,46 +214,46 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x.IPTablesMasqueradeBit == nil { r.EncodeNil() } else { - yy18 := *x.IPTablesMasqueradeBit - yym19 := z.EncBinary() - _ = yym19 + yy21 := *x.IPTablesMasqueradeBit + yym22 := z.EncBinary() + _ = yym22 if false { } else { - r.EncodeInt(int64(yy18)) + r.EncodeInt(int64(yy21)) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy21 := &x.IPTablesSyncPeriod - yym22 := z.EncBinary() - _ = yym22 + yy24 := &x.IPTablesSyncPeriod + yym25 := z.EncBinary() + _ = yym25 if false { - } else if z.HasExtensions() && z.EncExt(yy21) { - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(yy21) + } else if z.HasExtensions() && z.EncExt(yy24) { + } else if !yym25 && z.IsJSONHandle() { + z.EncJSONMarshal(yy24) } else { - z.EncFallback(yy21) + z.EncFallback(yy24) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("iptablesSyncPeriodSeconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy23 := &x.IPTablesSyncPeriod - yym24 := z.EncBinary() - _ = yym24 + yy26 := &x.IPTablesSyncPeriod + yym27 := z.EncBinary() + _ = yym27 if false { - } else if z.HasExtensions() && z.EncExt(yy23) { - } else if !yym24 && z.IsJSONHandle() { - z.EncJSONMarshal(yy23) + } else if z.HasExtensions() && z.EncExt(yy26) { + } else if !yym27 && z.IsJSONHandle() { + z.EncJSONMarshal(yy26) } else { - z.EncFallback(yy23) + z.EncFallback(yy26) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym26 := z.EncBinary() - _ = yym26 + yym29 := z.EncBinary() + _ = yym29 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigPath)) @@ -243,8 +262,8 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeconfigPath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym27 := z.EncBinary() - _ = yym27 + yym30 := z.EncBinary() + _ = yym30 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigPath)) @@ -252,8 +271,8 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym29 := z.EncBinary() - _ = yym29 + yym32 := z.EncBinary() + _ = yym32 if false { } else { r.EncodeBool(bool(x.MasqueradeAll)) @@ -262,8 +281,8 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("masqueradeAll")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym30 := z.EncBinary() - _ = yym30 + yym33 := z.EncBinary() + _ = yym33 if false { } else { r.EncodeBool(bool(x.MasqueradeAll)) @@ -271,8 +290,8 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym32 := z.EncBinary() - _ = yym32 + yym35 := z.EncBinary() + _ = yym35 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Master)) @@ -281,8 +300,8 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("master")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym33 := z.EncBinary() - _ = yym33 + yym36 := z.EncBinary() + _ = yym36 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Master)) @@ -293,12 +312,12 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x.OOMScoreAdj == nil { r.EncodeNil() } else { - yy35 := *x.OOMScoreAdj - yym36 := z.EncBinary() - _ = yym36 + yy38 := *x.OOMScoreAdj + yym39 := z.EncBinary() + _ = yym39 if false { } else { - r.EncodeInt(int64(yy35)) + r.EncodeInt(int64(yy38)) } } } else { @@ -308,12 +327,12 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x.OOMScoreAdj == nil { r.EncodeNil() } else { - yy37 := *x.OOMScoreAdj - yym38 := z.EncBinary() - _ = yym38 + yy40 := *x.OOMScoreAdj + yym41 := z.EncBinary() + _ = yym41 if false { } else { - r.EncodeInt(int64(yy37)) + r.EncodeInt(int64(yy40)) } } } @@ -328,8 +347,8 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym43 := z.EncBinary() - _ = yym43 + yym46 := z.EncBinary() + _ = yym46 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PortRange)) @@ -338,8 +357,8 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("portRange")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym44 := z.EncBinary() - _ = yym44 + yym47 := z.EncBinary() + _ = yym47 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PortRange)) @@ -347,8 +366,8 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym46 := z.EncBinary() - _ = yym46 + yym49 := z.EncBinary() + _ = yym49 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ResourceContainer)) @@ -357,8 +376,8 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeletCgroups")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym47 := z.EncBinary() - _ = yym47 + yym50 := z.EncBinary() + _ = yym50 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ResourceContainer)) @@ -366,35 +385,35 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy49 := &x.UDPIdleTimeout - yym50 := z.EncBinary() - _ = yym50 + yy52 := &x.UDPIdleTimeout + yym53 := z.EncBinary() + _ = yym53 if false { - } else if z.HasExtensions() && z.EncExt(yy49) { - } else if !yym50 && z.IsJSONHandle() { - z.EncJSONMarshal(yy49) + } else if z.HasExtensions() && z.EncExt(yy52) { + } else if !yym53 && z.IsJSONHandle() { + z.EncJSONMarshal(yy52) } else { - z.EncFallback(yy49) + z.EncFallback(yy52) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("udpTimeoutMilliseconds")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy51 := &x.UDPIdleTimeout - yym52 := z.EncBinary() - _ = yym52 + yy54 := &x.UDPIdleTimeout + yym55 := z.EncBinary() + _ = yym55 if false { - } else if z.HasExtensions() && z.EncExt(yy51) { - } else if !yym52 && z.IsJSONHandle() { - z.EncJSONMarshal(yy51) + } else if z.HasExtensions() && z.EncExt(yy54) { + } else if !yym55 && z.IsJSONHandle() { + z.EncJSONMarshal(yy54) } else { - z.EncFallback(yy51) + z.EncFallback(yy54) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym54 := z.EncBinary() - _ = yym54 + yym57 := z.EncBinary() + _ = yym57 if false { } else { r.EncodeInt(int64(x.ConntrackMax)) @@ -403,8 +422,8 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("conntrackMax")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym55 := z.EncBinary() - _ = yym55 + yym58 := z.EncBinary() + _ = yym58 if false { } else { r.EncodeInt(int64(x.ConntrackMax)) @@ -412,36 +431,36 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy57 := &x.ConntrackTCPEstablishedTimeout - yym58 := z.EncBinary() - _ = yym58 + yy60 := &x.ConntrackTCPEstablishedTimeout + yym61 := z.EncBinary() + _ = yym61 if false { - } else if z.HasExtensions() && z.EncExt(yy57) { - } else if !yym58 && z.IsJSONHandle() { - z.EncJSONMarshal(yy57) + } else if z.HasExtensions() && z.EncExt(yy60) { + } else if !yym61 && z.IsJSONHandle() { + z.EncJSONMarshal(yy60) } else { - z.EncFallback(yy57) + z.EncFallback(yy60) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("conntrackTCPEstablishedTimeout")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy59 := &x.ConntrackTCPEstablishedTimeout - yym60 := z.EncBinary() - _ = yym60 + yy62 := &x.ConntrackTCPEstablishedTimeout + yym63 := z.EncBinary() + _ = yym63 if false { - } else if z.HasExtensions() && z.EncExt(yy59) { - } else if !yym60 && z.IsJSONHandle() { - z.EncJSONMarshal(yy59) + } else if z.HasExtensions() && z.EncExt(yy62) { + } else if !yym63 && z.IsJSONHandle() { + z.EncJSONMarshal(yy62) } else { - z.EncFallback(yy59) + z.EncFallback(yy62) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - yym62 := z.EncBinary() - _ = yym62 + if yyq2[17] { + yym65 := z.EncBinary() + _ = yym65 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -450,12 +469,12 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[16] { + if yyq2[17] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym63 := z.EncBinary() - _ = yym63 + yym66 := z.EncBinary() + _ = yym66 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -464,9 +483,9 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - yym65 := z.EncBinary() - _ = yym65 + if yyq2[18] { + yym68 := z.EncBinary() + _ = yym68 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -475,12 +494,12 @@ func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[17] { + if yyq2[18] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym66 := z.EncBinary() - _ = yym66 + yym69 := z.EncBinary() + _ = yym69 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -554,6 +573,12 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Deco } else { x.BindAddress = string(r.DecodeString()) } + case "clusterCIDR": + if r.TryDecodeAsNil() { + x.ClusterCIDR = "" + } else { + x.ClusterCIDR = string(r.DecodeString()) + } case "healthzBindAddress": if r.TryDecodeAsNil() { x.HealthzBindAddress = "" @@ -564,7 +589,7 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.HealthzPort = 0 } else { - x.HealthzPort = int(r.DecodeInt(codecSelferBitsize1234)) + x.HealthzPort = int32(r.DecodeInt(32)) } case "hostnameOverride": if r.TryDecodeAsNil() { @@ -579,28 +604,28 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Deco } } else { if x.IPTablesMasqueradeBit == nil { - x.IPTablesMasqueradeBit = new(int) + x.IPTablesMasqueradeBit = new(int32) } - yym9 := z.DecBinary() - _ = yym9 + yym10 := z.DecBinary() + _ = yym10 if false { } else { - *((*int)(x.IPTablesMasqueradeBit)) = int(r.DecodeInt(codecSelferBitsize1234)) + *((*int32)(x.IPTablesMasqueradeBit)) = int32(r.DecodeInt(32)) } } case "iptablesSyncPeriodSeconds": if r.TryDecodeAsNil() { x.IPTablesSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv10 := &x.IPTablesSyncPeriod - yym11 := z.DecBinary() - _ = yym11 + yyv11 := &x.IPTablesSyncPeriod + yym12 := z.DecBinary() + _ = yym12 if false { - } else if z.HasExtensions() && z.DecExt(yyv10) { - } else if !yym11 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv10) + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) } else { - z.DecFallback(yyv10, false) + z.DecFallback(yyv11, false) } } case "kubeconfigPath": @@ -628,13 +653,13 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Deco } } else { if x.OOMScoreAdj == nil { - x.OOMScoreAdj = new(int) + x.OOMScoreAdj = new(int32) } - yym16 := z.DecBinary() - _ = yym16 + yym17 := z.DecBinary() + _ = yym17 if false { } else { - *((*int)(x.OOMScoreAdj)) = int(r.DecodeInt(codecSelferBitsize1234)) + *((*int32)(x.OOMScoreAdj)) = int32(r.DecodeInt(32)) } } case "mode": @@ -659,36 +684,36 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.UDPIdleTimeout = pkg1_unversioned.Duration{} } else { - yyv20 := &x.UDPIdleTimeout - yym21 := z.DecBinary() - _ = yym21 + yyv21 := &x.UDPIdleTimeout + yym22 := z.DecBinary() + _ = yym22 if false { - } else if z.HasExtensions() && z.DecExt(yyv20) { - } else if !yym21 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv20) + } else if z.HasExtensions() && z.DecExt(yyv21) { + } else if !yym22 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv21) } else { - z.DecFallback(yyv20, false) + z.DecFallback(yyv21, false) } } case "conntrackMax": if r.TryDecodeAsNil() { x.ConntrackMax = 0 } else { - x.ConntrackMax = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConntrackMax = int32(r.DecodeInt(32)) } case "conntrackTCPEstablishedTimeout": if r.TryDecodeAsNil() { x.ConntrackTCPEstablishedTimeout = pkg1_unversioned.Duration{} } else { - yyv23 := &x.ConntrackTCPEstablishedTimeout - yym24 := z.DecBinary() - _ = yym24 + yyv24 := &x.ConntrackTCPEstablishedTimeout + yym25 := z.DecBinary() + _ = yym25 if false { - } else if z.HasExtensions() && z.DecExt(yyv23) { - } else if !yym24 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv23) + } else if z.HasExtensions() && z.DecExt(yyv24) { + } else if !yym25 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv24) } else { - z.DecFallback(yyv23, false) + z.DecFallback(yyv24, false) } } case "kind": @@ -714,16 +739,16 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj27 int - var yyb27 bool - var yyhl27 bool = l >= 0 - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + var yyj28 int + var yyb28 bool + var yyhl28 bool = l >= 0 + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -733,13 +758,29 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De } else { x.BindAddress = string(r.DecodeString()) } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ClusterCIDR = "" + } else { + x.ClusterCIDR = string(r.DecodeString()) + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -749,13 +790,13 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De } else { x.HealthzBindAddress = string(r.DecodeString()) } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -763,15 +804,15 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.HealthzPort = 0 } else { - x.HealthzPort = int(r.DecodeInt(codecSelferBitsize1234)) + x.HealthzPort = int32(r.DecodeInt(32)) } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -781,13 +822,13 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De } else { x.HostnameOverride = string(r.DecodeString()) } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -798,22 +839,22 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De } } else { if x.IPTablesMasqueradeBit == nil { - x.IPTablesMasqueradeBit = new(int) + x.IPTablesMasqueradeBit = new(int32) } - yym33 := z.DecBinary() - _ = yym33 + yym35 := z.DecBinary() + _ = yym35 if false { } else { - *((*int)(x.IPTablesMasqueradeBit)) = int(r.DecodeInt(codecSelferBitsize1234)) + *((*int32)(x.IPTablesMasqueradeBit)) = int32(r.DecodeInt(32)) } } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -821,24 +862,24 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.IPTablesSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv34 := &x.IPTablesSyncPeriod - yym35 := z.DecBinary() - _ = yym35 + yyv36 := &x.IPTablesSyncPeriod + yym37 := z.DecBinary() + _ = yym37 if false { - } else if z.HasExtensions() && z.DecExt(yyv34) { - } else if !yym35 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv34) + } else if z.HasExtensions() && z.DecExt(yyv36) { + } else if !yym37 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv36) } else { - z.DecFallback(yyv34, false) + z.DecFallback(yyv36, false) } } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -848,13 +889,13 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De } else { x.KubeconfigPath = string(r.DecodeString()) } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -864,13 +905,13 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De } else { x.MasqueradeAll = bool(r.DecodeBool()) } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -880,13 +921,13 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De } else { x.Master = string(r.DecodeString()) } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -897,22 +938,22 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De } } else { if x.OOMScoreAdj == nil { - x.OOMScoreAdj = new(int) + x.OOMScoreAdj = new(int32) } - yym40 := z.DecBinary() - _ = yym40 + yym42 := z.DecBinary() + _ = yym42 if false { } else { - *((*int)(x.OOMScoreAdj)) = int(r.DecodeInt(codecSelferBitsize1234)) + *((*int32)(x.OOMScoreAdj)) = int32(r.DecodeInt(32)) } } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -922,13 +963,13 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De } else { x.Mode = ProxyMode(r.DecodeString()) } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -938,13 +979,13 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De } else { x.PortRange = string(r.DecodeString()) } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -954,13 +995,13 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De } else { x.ResourceContainer = string(r.DecodeString()) } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -968,24 +1009,24 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.UDPIdleTimeout = pkg1_unversioned.Duration{} } else { - yyv44 := &x.UDPIdleTimeout - yym45 := z.DecBinary() - _ = yym45 + yyv46 := &x.UDPIdleTimeout + yym47 := z.DecBinary() + _ = yym47 if false { - } else if z.HasExtensions() && z.DecExt(yyv44) { - } else if !yym45 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv44) + } else if z.HasExtensions() && z.DecExt(yyv46) { + } else if !yym47 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv46) } else { - z.DecFallback(yyv44, false) + z.DecFallback(yyv46, false) } } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -993,15 +1034,15 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.ConntrackMax = 0 } else { - x.ConntrackMax = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConntrackMax = int32(r.DecodeInt(32)) } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1009,24 +1050,24 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De if r.TryDecodeAsNil() { x.ConntrackTCPEstablishedTimeout = pkg1_unversioned.Duration{} } else { - yyv47 := &x.ConntrackTCPEstablishedTimeout - yym48 := z.DecBinary() - _ = yym48 + yyv49 := &x.ConntrackTCPEstablishedTimeout + yym50 := z.DecBinary() + _ = yym50 if false { - } else if z.HasExtensions() && z.DecExt(yyv47) { - } else if !yym48 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv47) + } else if z.HasExtensions() && z.DecExt(yyv49) { + } else if !yym50 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv49) } else { - z.DecFallback(yyv47, false) + z.DecFallback(yyv49, false) } } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1036,13 +1077,13 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De } else { x.Kind = string(r.DecodeString()) } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1053,17 +1094,17 @@ func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.De x.APIVersion = string(r.DecodeString()) } for { - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l } else { - yyb27 = r.CheckBreak() + yyb28 = r.CheckBreak() } - if yyb27 { + if yyb28 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj27-1, "") + z.DecStructFieldNotFound(yyj28-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -1134,24 +1175,30 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [78]bool + var yyq2 [89]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[47] = x.CloudProvider != "" - yyq2[48] = x.CloudConfigFile != "" - yyq2[49] = x.KubeletCgroups != "" - yyq2[50] = x.RuntimeCgroups != "" - yyq2[51] = x.SystemCgroups != "" - yyq2[52] = x.CgroupRoot != "" - yyq2[54] = x.RktPath != "" - yyq2[56] = x.RktStage1Image != "" - yyq2[73] = true - yyq2[74] = x.NodeIP != "" + yyq2[48] = x.CloudProvider != "" + yyq2[49] = x.CloudConfigFile != "" + yyq2[50] = x.KubeletCgroups != "" + yyq2[51] = x.RuntimeCgroups != "" + yyq2[52] = x.SystemCgroups != "" + yyq2[53] = x.CgroupRoot != "" + yyq2[55] = x.RktPath != "" + yyq2[56] = x.RktAPIEndpoint != "" + yyq2[57] = x.RktStage1Image != "" + yyq2[78] = true + yyq2[79] = x.NodeIP != "" + yyq2[83] = x.EvictionHard != "" + yyq2[84] = x.EvictionSoft != "" + yyq2[85] = x.EvictionSoftGracePeriod != "" + yyq2[86] = true + yyq2[87] = x.EvictionMaxPodGracePeriod != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(78) + r.EncodeArrayStart(89) } else { - yynn2 = 68 + yynn2 = 73 for _, b := range yyq2 { if b { yynn2++ @@ -1513,17 +1560,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym61 if false { } else { - r.EncodeBool(bool(x.AllowPrivileged)) + r.EncodeString(codecSelferC_UTF81234, string(x.SeccompProfileRoot)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allowPrivileged")) + r.EncodeString(codecSelferC_UTF81234, string("seccompProfileRoot")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym62 := z.EncBinary() _ = yym62 if false { } else { - r.EncodeBool(bool(x.AllowPrivileged)) + r.EncodeString(codecSelferC_UTF81234, string(x.SeccompProfileRoot)) } } if yyr2 || yy2arr2 { @@ -1532,17 +1579,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym64 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostNetworkSources)) + r.EncodeBool(bool(x.AllowPrivileged)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostNetworkSources")) + r.EncodeString(codecSelferC_UTF81234, string("allowPrivileged")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym65 := z.EncBinary() _ = yym65 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostNetworkSources)) + r.EncodeBool(bool(x.AllowPrivileged)) } } if yyr2 || yy2arr2 { @@ -1551,17 +1598,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym67 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostPIDSources)) + r.EncodeString(codecSelferC_UTF81234, string(x.HostNetworkSources)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPIDSources")) + r.EncodeString(codecSelferC_UTF81234, string("hostNetworkSources")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym68 := z.EncBinary() _ = yym68 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostPIDSources)) + r.EncodeString(codecSelferC_UTF81234, string(x.HostNetworkSources)) } } if yyr2 || yy2arr2 { @@ -1570,17 +1617,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym70 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIPCSources)) + r.EncodeString(codecSelferC_UTF81234, string(x.HostPIDSources)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIPCSources")) + r.EncodeString(codecSelferC_UTF81234, string("hostPIDSources")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym71 := z.EncBinary() _ = yym71 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIPCSources)) + r.EncodeString(codecSelferC_UTF81234, string(x.HostPIDSources)) } } if yyr2 || yy2arr2 { @@ -1589,17 +1636,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym73 if false { } else { - r.EncodeFloat64(float64(x.RegistryPullQPS)) + r.EncodeString(codecSelferC_UTF81234, string(x.HostIPCSources)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registryPullQPS")) + r.EncodeString(codecSelferC_UTF81234, string("hostIPCSources")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym74 := z.EncBinary() _ = yym74 if false { } else { - r.EncodeFloat64(float64(x.RegistryPullQPS)) + r.EncodeString(codecSelferC_UTF81234, string(x.HostIPCSources)) } } if yyr2 || yy2arr2 { @@ -1608,17 +1655,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym76 if false { } else { - r.EncodeInt(int64(x.RegistryBurst)) + r.EncodeFloat64(float64(x.RegistryPullQPS)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registryBurst")) + r.EncodeString(codecSelferC_UTF81234, string("registryPullQPS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym77 := z.EncBinary() _ = yym77 if false { } else { - r.EncodeInt(int64(x.RegistryBurst)) + r.EncodeFloat64(float64(x.RegistryPullQPS)) } } if yyr2 || yy2arr2 { @@ -1627,17 +1674,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym79 if false { } else { - r.EncodeFloat32(float32(x.EventRecordQPS)) + r.EncodeInt(int64(x.RegistryBurst)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("eventRecordQPS")) + r.EncodeString(codecSelferC_UTF81234, string("registryBurst")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym80 := z.EncBinary() _ = yym80 if false { } else { - r.EncodeFloat32(float32(x.EventRecordQPS)) + r.EncodeInt(int64(x.RegistryBurst)) } } if yyr2 || yy2arr2 { @@ -1646,17 +1693,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym82 if false { } else { - r.EncodeInt(int64(x.EventBurst)) + r.EncodeFloat32(float32(x.EventRecordQPS)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("eventBurst")) + r.EncodeString(codecSelferC_UTF81234, string("eventRecordQPS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym83 := z.EncBinary() _ = yym83 if false { } else { - r.EncodeInt(int64(x.EventBurst)) + r.EncodeFloat32(float32(x.EventRecordQPS)) } } if yyr2 || yy2arr2 { @@ -1665,63 +1712,63 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym85 if false { } else { - r.EncodeBool(bool(x.EnableDebuggingHandlers)) + r.EncodeInt(int64(x.EventBurst)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableDebuggingHandlers")) + r.EncodeString(codecSelferC_UTF81234, string("eventBurst")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym86 := z.EncBinary() _ = yym86 if false { } else { - r.EncodeBool(bool(x.EnableDebuggingHandlers)) + r.EncodeInt(int64(x.EventBurst)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy88 := &x.MinimumGCAge - yym89 := z.EncBinary() - _ = yym89 + yym88 := z.EncBinary() + _ = yym88 if false { - } else if z.HasExtensions() && z.EncExt(yy88) { - } else if !yym89 && z.IsJSONHandle() { - z.EncJSONMarshal(yy88) } else { - z.EncFallback(yy88) + r.EncodeBool(bool(x.EnableDebuggingHandlers)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minimumGCAge")) + r.EncodeString(codecSelferC_UTF81234, string("enableDebuggingHandlers")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy90 := &x.MinimumGCAge - yym91 := z.EncBinary() - _ = yym91 + yym89 := z.EncBinary() + _ = yym89 if false { - } else if z.HasExtensions() && z.EncExt(yy90) { - } else if !yym91 && z.IsJSONHandle() { - z.EncJSONMarshal(yy90) } else { - z.EncFallback(yy90) + r.EncodeBool(bool(x.EnableDebuggingHandlers)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym93 := z.EncBinary() - _ = yym93 + yy91 := &x.MinimumGCAge + yym92 := z.EncBinary() + _ = yym92 if false { + } else if z.HasExtensions() && z.EncExt(yy91) { + } else if !yym92 && z.IsJSONHandle() { + z.EncJSONMarshal(yy91) } else { - r.EncodeInt(int64(x.MaxPerPodContainerCount)) + z.EncFallback(yy91) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxPerPodContainerCount")) + r.EncodeString(codecSelferC_UTF81234, string("minimumGCAge")) z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy93 := &x.MinimumGCAge yym94 := z.EncBinary() _ = yym94 if false { + } else if z.HasExtensions() && z.EncExt(yy93) { + } else if !yym94 && z.IsJSONHandle() { + z.EncJSONMarshal(yy93) } else { - r.EncodeInt(int64(x.MaxPerPodContainerCount)) + z.EncFallback(yy93) } } if yyr2 || yy2arr2 { @@ -1730,17 +1777,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym96 if false { } else { - r.EncodeInt(int64(x.MaxContainerCount)) + r.EncodeInt(int64(x.MaxPerPodContainerCount)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxContainerCount")) + r.EncodeString(codecSelferC_UTF81234, string("maxPerPodContainerCount")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym97 := z.EncBinary() _ = yym97 if false { } else { - r.EncodeInt(int64(x.MaxContainerCount)) + r.EncodeInt(int64(x.MaxPerPodContainerCount)) } } if yyr2 || yy2arr2 { @@ -1749,17 +1796,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym99 if false { } else { - r.EncodeUint(uint64(x.CAdvisorPort)) + r.EncodeInt(int64(x.MaxContainerCount)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cAdvisorPort")) + r.EncodeString(codecSelferC_UTF81234, string("maxContainerCount")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym100 := z.EncBinary() _ = yym100 if false { } else { - r.EncodeUint(uint64(x.CAdvisorPort)) + r.EncodeInt(int64(x.MaxContainerCount)) } } if yyr2 || yy2arr2 { @@ -1768,17 +1815,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym102 if false { } else { - r.EncodeInt(int64(x.HealthzPort)) + r.EncodeUint(uint64(x.CAdvisorPort)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("healthzPort")) + r.EncodeString(codecSelferC_UTF81234, string("cAdvisorPort")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym103 := z.EncBinary() _ = yym103 if false { } else { - r.EncodeInt(int64(x.HealthzPort)) + r.EncodeUint(uint64(x.CAdvisorPort)) } } if yyr2 || yy2arr2 { @@ -1787,17 +1834,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym105 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) + r.EncodeInt(int64(x.HealthzPort)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("healthzBindAddress")) + r.EncodeString(codecSelferC_UTF81234, string("healthzPort")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym106 := z.EncBinary() _ = yym106 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) + r.EncodeInt(int64(x.HealthzPort)) } } if yyr2 || yy2arr2 { @@ -1806,17 +1853,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym108 if false { } else { - r.EncodeInt(int64(x.OOMScoreAdj)) + r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("oomScoreAdj")) + r.EncodeString(codecSelferC_UTF81234, string("healthzBindAddress")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym109 := z.EncBinary() _ = yym109 if false { } else { - r.EncodeInt(int64(x.OOMScoreAdj)) + r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) } } if yyr2 || yy2arr2 { @@ -1825,17 +1872,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym111 if false { } else { - r.EncodeBool(bool(x.RegisterNode)) + r.EncodeInt(int64(x.OOMScoreAdj)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registerNode")) + r.EncodeString(codecSelferC_UTF81234, string("oomScoreAdj")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym112 := z.EncBinary() _ = yym112 if false { } else { - r.EncodeBool(bool(x.RegisterNode)) + r.EncodeInt(int64(x.OOMScoreAdj)) } } if yyr2 || yy2arr2 { @@ -1844,17 +1891,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym114 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDomain)) + r.EncodeBool(bool(x.RegisterNode)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterDomain")) + r.EncodeString(codecSelferC_UTF81234, string("registerNode")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym115 := z.EncBinary() _ = yym115 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDomain)) + r.EncodeBool(bool(x.RegisterNode)) } } if yyr2 || yy2arr2 { @@ -1863,17 +1910,17 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym117 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MasterServiceNamespace)) + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDomain)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("masterServiceNamespace")) + r.EncodeString(codecSelferC_UTF81234, string("clusterDomain")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym118 := z.EncBinary() _ = yym118 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MasterServiceNamespace)) + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDomain)) } } if yyr2 || yy2arr2 { @@ -1882,49 +1929,55 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym120 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDNS)) + r.EncodeString(codecSelferC_UTF81234, string(x.MasterServiceNamespace)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterDNS")) + r.EncodeString(codecSelferC_UTF81234, string("masterServiceNamespace")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym121 := z.EncBinary() _ = yym121 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDNS)) + r.EncodeString(codecSelferC_UTF81234, string(x.MasterServiceNamespace)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy123 := &x.StreamingConnectionIdleTimeout - yym124 := z.EncBinary() - _ = yym124 + yym123 := z.EncBinary() + _ = yym123 if false { - } else if z.HasExtensions() && z.EncExt(yy123) { - } else if !yym124 && z.IsJSONHandle() { - z.EncJSONMarshal(yy123) } else { - z.EncFallback(yy123) + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDNS)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("streamingConnectionIdleTimeout")) + r.EncodeString(codecSelferC_UTF81234, string("clusterDNS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy125 := &x.StreamingConnectionIdleTimeout - yym126 := z.EncBinary() - _ = yym126 + yym124 := z.EncBinary() + _ = yym124 if false { - } else if z.HasExtensions() && z.EncExt(yy125) { - } else if !yym126 && z.IsJSONHandle() { - z.EncJSONMarshal(yy125) } else { - z.EncFallback(yy125) + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDNS)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy128 := &x.NodeStatusUpdateFrequency + yy126 := &x.StreamingConnectionIdleTimeout + yym127 := z.EncBinary() + _ = yym127 + if false { + } else if z.HasExtensions() && z.EncExt(yy126) { + } else if !yym127 && z.IsJSONHandle() { + z.EncJSONMarshal(yy126) + } else { + z.EncFallback(yy126) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("streamingConnectionIdleTimeout")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy128 := &x.StreamingConnectionIdleTimeout yym129 := z.EncBinary() _ = yym129 if false { @@ -1934,24 +1987,24 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } else { z.EncFallback(yy128) } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy131 := &x.NodeStatusUpdateFrequency + yym132 := z.EncBinary() + _ = yym132 + if false { + } else if z.HasExtensions() && z.EncExt(yy131) { + } else if !yym132 && z.IsJSONHandle() { + z.EncJSONMarshal(yy131) + } else { + z.EncFallback(yy131) + } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodeStatusUpdateFrequency")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy130 := &x.NodeStatusUpdateFrequency - yym131 := z.EncBinary() - _ = yym131 - if false { - } else if z.HasExtensions() && z.EncExt(yy130) { - } else if !yym131 && z.IsJSONHandle() { - z.EncJSONMarshal(yy130) - } else { - z.EncFallback(yy130) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy133 := &x.ImageMinimumGCAge + yy133 := &x.NodeStatusUpdateFrequency yym134 := z.EncBinary() _ = yym134 if false { @@ -1961,25 +2014,38 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } else { z.EncFallback(yy133) } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy136 := &x.ImageMinimumGCAge + yym137 := z.EncBinary() + _ = yym137 + if false { + } else if z.HasExtensions() && z.EncExt(yy136) { + } else if !yym137 && z.IsJSONHandle() { + z.EncJSONMarshal(yy136) + } else { + z.EncFallback(yy136) + } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("imageMinimumGCAge")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy135 := &x.ImageMinimumGCAge - yym136 := z.EncBinary() - _ = yym136 + yy138 := &x.ImageMinimumGCAge + yym139 := z.EncBinary() + _ = yym139 if false { - } else if z.HasExtensions() && z.EncExt(yy135) { - } else if !yym136 && z.IsJSONHandle() { - z.EncJSONMarshal(yy135) + } else if z.HasExtensions() && z.EncExt(yy138) { + } else if !yym139 && z.IsJSONHandle() { + z.EncJSONMarshal(yy138) } else { - z.EncFallback(yy135) + z.EncFallback(yy138) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym138 := z.EncBinary() - _ = yym138 + yym141 := z.EncBinary() + _ = yym141 if false { } else { r.EncodeInt(int64(x.ImageGCHighThresholdPercent)) @@ -1988,8 +2054,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("imageGCHighThresholdPercent")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym139 := z.EncBinary() - _ = yym139 + yym142 := z.EncBinary() + _ = yym142 if false { } else { r.EncodeInt(int64(x.ImageGCHighThresholdPercent)) @@ -1997,8 +2063,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym141 := z.EncBinary() - _ = yym141 + yym144 := z.EncBinary() + _ = yym144 if false { } else { r.EncodeInt(int64(x.ImageGCLowThresholdPercent)) @@ -2007,8 +2073,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("imageGCLowThresholdPercent")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym142 := z.EncBinary() - _ = yym142 + yym145 := z.EncBinary() + _ = yym145 if false { } else { r.EncodeInt(int64(x.ImageGCLowThresholdPercent)) @@ -2016,8 +2082,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym144 := z.EncBinary() - _ = yym144 + yym147 := z.EncBinary() + _ = yym147 if false { } else { r.EncodeInt(int64(x.LowDiskSpaceThresholdMB)) @@ -2026,8 +2092,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lowDiskSpaceThresholdMB")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym145 := z.EncBinary() - _ = yym145 + yym148 := z.EncBinary() + _ = yym148 if false { } else { r.EncodeInt(int64(x.LowDiskSpaceThresholdMB)) @@ -2035,35 +2101,35 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy147 := &x.VolumeStatsAggPeriod - yym148 := z.EncBinary() - _ = yym148 + yy150 := &x.VolumeStatsAggPeriod + yym151 := z.EncBinary() + _ = yym151 if false { - } else if z.HasExtensions() && z.EncExt(yy147) { - } else if !yym148 && z.IsJSONHandle() { - z.EncJSONMarshal(yy147) + } else if z.HasExtensions() && z.EncExt(yy150) { + } else if !yym151 && z.IsJSONHandle() { + z.EncJSONMarshal(yy150) } else { - z.EncFallback(yy147) + z.EncFallback(yy150) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("VolumeStatsAggPeriod")) + r.EncodeString(codecSelferC_UTF81234, string("volumeStatsAggPeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy149 := &x.VolumeStatsAggPeriod - yym150 := z.EncBinary() - _ = yym150 + yy152 := &x.VolumeStatsAggPeriod + yym153 := z.EncBinary() + _ = yym153 if false { - } else if z.HasExtensions() && z.EncExt(yy149) { - } else if !yym150 && z.IsJSONHandle() { - z.EncJSONMarshal(yy149) + } else if z.HasExtensions() && z.EncExt(yy152) { + } else if !yym153 && z.IsJSONHandle() { + z.EncJSONMarshal(yy152) } else { - z.EncFallback(yy149) + z.EncFallback(yy152) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym152 := z.EncBinary() - _ = yym152 + yym155 := z.EncBinary() + _ = yym155 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginName)) @@ -2072,8 +2138,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("networkPluginName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym153 := z.EncBinary() - _ = yym153 + yym156 := z.EncBinary() + _ = yym156 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginName)) @@ -2081,8 +2147,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym155 := z.EncBinary() - _ = yym155 + yym158 := z.EncBinary() + _ = yym158 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginDir)) @@ -2091,8 +2157,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("networkPluginDir")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym156 := z.EncBinary() - _ = yym156 + yym159 := z.EncBinary() + _ = yym159 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginDir)) @@ -2100,8 +2166,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym158 := z.EncBinary() - _ = yym158 + yym161 := z.EncBinary() + _ = yym161 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.VolumePluginDir)) @@ -2110,8 +2176,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("volumePluginDir")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym159 := z.EncBinary() - _ = yym159 + yym162 := z.EncBinary() + _ = yym162 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.VolumePluginDir)) @@ -2119,9 +2185,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[47] { - yym161 := z.EncBinary() - _ = yym161 + if yyq2[48] { + yym164 := z.EncBinary() + _ = yym164 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) @@ -2130,12 +2196,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[47] { + if yyq2[48] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cloudProvider")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym162 := z.EncBinary() - _ = yym162 + yym165 := z.EncBinary() + _ = yym165 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) @@ -2144,9 +2210,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[48] { - yym164 := z.EncBinary() - _ = yym164 + if yyq2[49] { + yym167 := z.EncBinary() + _ = yym167 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) @@ -2155,12 +2221,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[48] { + if yyq2[49] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cloudConfigFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym165 := z.EncBinary() - _ = yym165 + yym168 := z.EncBinary() + _ = yym168 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) @@ -2169,9 +2235,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[49] { - yym167 := z.EncBinary() - _ = yym167 + if yyq2[50] { + yym170 := z.EncBinary() + _ = yym170 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.KubeletCgroups)) @@ -2180,12 +2246,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[49] { + if yyq2[50] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeletCgroups")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym168 := z.EncBinary() - _ = yym168 + yym171 := z.EncBinary() + _ = yym171 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.KubeletCgroups)) @@ -2194,9 +2260,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[50] { - yym170 := z.EncBinary() - _ = yym170 + if yyq2[51] { + yym173 := z.EncBinary() + _ = yym173 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RuntimeCgroups)) @@ -2205,12 +2271,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[50] { + if yyq2[51] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("runtimeCgroups")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym171 := z.EncBinary() - _ = yym171 + yym174 := z.EncBinary() + _ = yym174 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RuntimeCgroups)) @@ -2219,9 +2285,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[51] { - yym173 := z.EncBinary() - _ = yym173 + if yyq2[52] { + yym176 := z.EncBinary() + _ = yym176 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SystemCgroups)) @@ -2230,12 +2296,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[51] { + if yyq2[52] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("systemContainer")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym174 := z.EncBinary() - _ = yym174 + yym177 := z.EncBinary() + _ = yym177 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SystemCgroups)) @@ -2244,9 +2310,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[52] { - yym176 := z.EncBinary() - _ = yym176 + if yyq2[53] { + yym179 := z.EncBinary() + _ = yym179 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CgroupRoot)) @@ -2255,12 +2321,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[52] { + if yyq2[53] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cgroupRoot")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym177 := z.EncBinary() - _ = yym177 + yym180 := z.EncBinary() + _ = yym180 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CgroupRoot)) @@ -2269,8 +2335,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym179 := z.EncBinary() - _ = yym179 + yym182 := z.EncBinary() + _ = yym182 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntime)) @@ -2279,8 +2345,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("containerRuntime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym180 := z.EncBinary() - _ = yym180 + yym183 := z.EncBinary() + _ = yym183 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntime)) @@ -2288,9 +2354,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[54] { - yym182 := z.EncBinary() - _ = yym182 + if yyq2[55] { + yym185 := z.EncBinary() + _ = yym185 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RktPath)) @@ -2299,12 +2365,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[54] { + if yyq2[55] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("rktPath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym183 := z.EncBinary() - _ = yym183 + yym186 := z.EncBinary() + _ = yym186 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RktPath)) @@ -2313,28 +2379,34 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym185 := z.EncBinary() - _ = yym185 - if false { + if yyq2[56] { + yym188 := z.EncBinary() + _ = yym188 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RktAPIEndpoint)) + } } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LockFilePath)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lockFilePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym186 := z.EncBinary() - _ = yym186 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LockFilePath)) + if yyq2[56] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rktAPIEndpoint")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym189 := z.EncBinary() + _ = yym189 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RktAPIEndpoint)) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[56] { - yym188 := z.EncBinary() - _ = yym188 + if yyq2[57] { + yym191 := z.EncBinary() + _ = yym191 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RktStage1Image)) @@ -2343,12 +2415,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[56] { + if yyq2[57] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("rktStage1Image")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym189 := z.EncBinary() - _ = yym189 + yym192 := z.EncBinary() + _ = yym192 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RktStage1Image)) @@ -2357,8 +2429,46 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym191 := z.EncBinary() - _ = yym191 + yym194 := z.EncBinary() + _ = yym194 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LockFilePath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lockFilePath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym195 := z.EncBinary() + _ = yym195 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LockFilePath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym197 := z.EncBinary() + _ = yym197 + if false { + } else { + r.EncodeBool(bool(x.ExitOnLockContention)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("exitOnLockContention")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym198 := z.EncBinary() + _ = yym198 + if false { + } else { + r.EncodeBool(bool(x.ExitOnLockContention)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym200 := z.EncBinary() + _ = yym200 if false { } else { r.EncodeBool(bool(x.ConfigureCBR0)) @@ -2367,8 +2477,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("configureCbr0")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym192 := z.EncBinary() - _ = yym192 + yym201 := z.EncBinary() + _ = yym201 if false { } else { r.EncodeBool(bool(x.ConfigureCBR0)) @@ -2376,8 +2486,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym194 := z.EncBinary() - _ = yym194 + yym203 := z.EncBinary() + _ = yym203 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.HairpinMode)) @@ -2386,8 +2496,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hairpinMode")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym195 := z.EncBinary() - _ = yym195 + yym204 := z.EncBinary() + _ = yym204 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.HairpinMode)) @@ -2395,8 +2505,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym197 := z.EncBinary() - _ = yym197 + yym206 := z.EncBinary() + _ = yym206 if false { } else { r.EncodeBool(bool(x.BabysitDaemons)) @@ -2405,8 +2515,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("babysitDaemons")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym198 := z.EncBinary() - _ = yym198 + yym207 := z.EncBinary() + _ = yym207 if false { } else { r.EncodeBool(bool(x.BabysitDaemons)) @@ -2414,8 +2524,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym200 := z.EncBinary() - _ = yym200 + yym209 := z.EncBinary() + _ = yym209 if false { } else { r.EncodeInt(int64(x.MaxPods)) @@ -2424,8 +2534,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("maxPods")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym201 := z.EncBinary() - _ = yym201 + yym210 := z.EncBinary() + _ = yym210 if false { } else { r.EncodeInt(int64(x.MaxPods)) @@ -2433,8 +2543,27 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym203 := z.EncBinary() - _ = yym203 + yym212 := z.EncBinary() + _ = yym212 + if false { + } else { + r.EncodeInt(int64(x.NvidiaGPUs)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nvidiaGPUs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym213 := z.EncBinary() + _ = yym213 + if false { + } else { + r.EncodeInt(int64(x.NvidiaGPUs)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym215 := z.EncBinary() + _ = yym215 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DockerExecHandlerName)) @@ -2443,8 +2572,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("dockerExecHandlerName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym204 := z.EncBinary() - _ = yym204 + yym216 := z.EncBinary() + _ = yym216 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DockerExecHandlerName)) @@ -2452,8 +2581,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym206 := z.EncBinary() - _ = yym206 + yym218 := z.EncBinary() + _ = yym218 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) @@ -2462,8 +2591,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podCIDR")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym207 := z.EncBinary() - _ = yym207 + yym219 := z.EncBinary() + _ = yym219 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) @@ -2471,8 +2600,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym209 := z.EncBinary() - _ = yym209 + yym221 := z.EncBinary() + _ = yym221 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ResolverConfig)) @@ -2481,8 +2610,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("resolvConf")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym210 := z.EncBinary() - _ = yym210 + yym222 := z.EncBinary() + _ = yym222 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ResolverConfig)) @@ -2490,8 +2619,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym212 := z.EncBinary() - _ = yym212 + yym224 := z.EncBinary() + _ = yym224 if false { } else { r.EncodeBool(bool(x.CPUCFSQuota)) @@ -2499,9 +2628,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cpuCFSQuota")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym213 := z.EncBinary() - _ = yym213 + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym225 := z.EncBinary() + _ = yym225 if false { } else { r.EncodeBool(bool(x.CPUCFSQuota)) @@ -2509,8 +2638,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym215 := z.EncBinary() - _ = yym215 + yym227 := z.EncBinary() + _ = yym227 if false { } else { r.EncodeBool(bool(x.Containerized)) @@ -2519,8 +2648,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("containerized")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym216 := z.EncBinary() - _ = yym216 + yym228 := z.EncBinary() + _ = yym228 if false { } else { r.EncodeBool(bool(x.Containerized)) @@ -2528,8 +2657,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym218 := z.EncBinary() - _ = yym218 + yym230 := z.EncBinary() + _ = yym230 if false { } else { r.EncodeUint(uint64(x.MaxOpenFiles)) @@ -2538,8 +2667,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("maxOpenFiles")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym219 := z.EncBinary() - _ = yym219 + yym231 := z.EncBinary() + _ = yym231 if false { } else { r.EncodeUint(uint64(x.MaxOpenFiles)) @@ -2547,8 +2676,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym221 := z.EncBinary() - _ = yym221 + yym233 := z.EncBinary() + _ = yym233 if false { } else { r.EncodeBool(bool(x.ReconcileCIDR)) @@ -2557,8 +2686,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reconcileCIDR")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym222 := z.EncBinary() - _ = yym222 + yym234 := z.EncBinary() + _ = yym234 if false { } else { r.EncodeBool(bool(x.ReconcileCIDR)) @@ -2566,8 +2695,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym224 := z.EncBinary() - _ = yym224 + yym236 := z.EncBinary() + _ = yym236 if false { } else { r.EncodeBool(bool(x.RegisterSchedulable)) @@ -2576,8 +2705,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("registerSchedulable")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym225 := z.EncBinary() - _ = yym225 + yym237 := z.EncBinary() + _ = yym237 if false { } else { r.EncodeBool(bool(x.RegisterSchedulable)) @@ -2585,8 +2714,27 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym227 := z.EncBinary() - _ = yym227 + yym239 := z.EncBinary() + _ = yym239 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("contentType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym240 := z.EncBinary() + _ = yym240 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym242 := z.EncBinary() + _ = yym242 if false { } else { r.EncodeFloat32(float32(x.KubeAPIQPS)) @@ -2595,8 +2743,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym228 := z.EncBinary() - _ = yym228 + yym243 := z.EncBinary() + _ = yym243 if false { } else { r.EncodeFloat32(float32(x.KubeAPIQPS)) @@ -2604,8 +2752,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym230 := z.EncBinary() - _ = yym230 + yym245 := z.EncBinary() + _ = yym245 if false { } else { r.EncodeInt(int64(x.KubeAPIBurst)) @@ -2614,8 +2762,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym231 := z.EncBinary() - _ = yym231 + yym246 := z.EncBinary() + _ = yym246 if false { } else { r.EncodeInt(int64(x.KubeAPIBurst)) @@ -2623,8 +2771,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym233 := z.EncBinary() - _ = yym233 + yym248 := z.EncBinary() + _ = yym248 if false { } else { r.EncodeBool(bool(x.SerializeImagePulls)) @@ -2633,8 +2781,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("serializeImagePulls")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym234 := z.EncBinary() - _ = yym234 + yym249 := z.EncBinary() + _ = yym249 if false { } else { r.EncodeBool(bool(x.SerializeImagePulls)) @@ -2642,8 +2790,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym236 := z.EncBinary() - _ = yym236 + yym251 := z.EncBinary() + _ = yym251 if false { } else { r.EncodeBool(bool(x.ExperimentalFlannelOverlay)) @@ -2652,8 +2800,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("experimentalFlannelOverlay")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym237 := z.EncBinary() - _ = yym237 + yym252 := z.EncBinary() + _ = yym252 if false { } else { r.EncodeBool(bool(x.ExperimentalFlannelOverlay)) @@ -2661,42 +2809,42 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[73] { - yy239 := &x.OutOfDiskTransitionFrequency - yym240 := z.EncBinary() - _ = yym240 + if yyq2[78] { + yy254 := &x.OutOfDiskTransitionFrequency + yym255 := z.EncBinary() + _ = yym255 if false { - } else if z.HasExtensions() && z.EncExt(yy239) { - } else if !yym240 && z.IsJSONHandle() { - z.EncJSONMarshal(yy239) + } else if z.HasExtensions() && z.EncExt(yy254) { + } else if !yym255 && z.IsJSONHandle() { + z.EncJSONMarshal(yy254) } else { - z.EncFallback(yy239) + z.EncFallback(yy254) } } else { r.EncodeNil() } } else { - if yyq2[73] { + if yyq2[78] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("outOfDiskTransitionFrequency")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy241 := &x.OutOfDiskTransitionFrequency - yym242 := z.EncBinary() - _ = yym242 + yy256 := &x.OutOfDiskTransitionFrequency + yym257 := z.EncBinary() + _ = yym257 if false { - } else if z.HasExtensions() && z.EncExt(yy241) { - } else if !yym242 && z.IsJSONHandle() { - z.EncJSONMarshal(yy241) + } else if z.HasExtensions() && z.EncExt(yy256) { + } else if !yym257 && z.IsJSONHandle() { + z.EncJSONMarshal(yy256) } else { - z.EncFallback(yy241) + z.EncFallback(yy256) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[74] { - yym244 := z.EncBinary() - _ = yym244 + if yyq2[79] { + yym259 := z.EncBinary() + _ = yym259 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NodeIP)) @@ -2705,12 +2853,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[74] { + if yyq2[79] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodeIP")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym245 := z.EncBinary() - _ = yym245 + yym260 := z.EncBinary() + _ = yym260 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NodeIP)) @@ -2722,8 +2870,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x.NodeLabels == nil { r.EncodeNil() } else { - yym247 := z.EncBinary() - _ = yym247 + yym262 := z.EncBinary() + _ = yym262 if false { } else { z.F.EncMapStringStringV(x.NodeLabels, false, e) @@ -2736,8 +2884,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x.NodeLabels == nil { r.EncodeNil() } else { - yym248 := z.EncBinary() - _ = yym248 + yym263 := z.EncBinary() + _ = yym263 if false { } else { z.F.EncMapStringStringV(x.NodeLabels, false, e) @@ -2746,8 +2894,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym250 := z.EncBinary() - _ = yym250 + yym265 := z.EncBinary() + _ = yym265 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NonMasqueradeCIDR)) @@ -2756,8 +2904,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nonMasqueradeCIDR")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym251 := z.EncBinary() - _ = yym251 + yym266 := z.EncBinary() + _ = yym266 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NonMasqueradeCIDR)) @@ -2765,8 +2913,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym253 := z.EncBinary() - _ = yym253 + yym268 := z.EncBinary() + _ = yym268 if false { } else { r.EncodeBool(bool(x.EnableCustomMetrics)) @@ -2775,13 +2923,165 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("enableCustomMetrics")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym254 := z.EncBinary() - _ = yym254 + yym269 := z.EncBinary() + _ = yym269 if false { } else { r.EncodeBool(bool(x.EnableCustomMetrics)) } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[83] { + yym271 := z.EncBinary() + _ = yym271 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvictionHard)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[83] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("evictionHard")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym272 := z.EncBinary() + _ = yym272 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvictionHard)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[84] { + yym274 := z.EncBinary() + _ = yym274 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoft)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[84] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("evictionSoft")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym275 := z.EncBinary() + _ = yym275 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoft)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[85] { + yym277 := z.EncBinary() + _ = yym277 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoftGracePeriod)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[85] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("evictionSoftGracePeriod")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym278 := z.EncBinary() + _ = yym278 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoftGracePeriod)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[86] { + yy280 := &x.EvictionPressureTransitionPeriod + yym281 := z.EncBinary() + _ = yym281 + if false { + } else if z.HasExtensions() && z.EncExt(yy280) { + } else if !yym281 && z.IsJSONHandle() { + z.EncJSONMarshal(yy280) + } else { + z.EncFallback(yy280) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[86] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("evictionPressureTransitionPeriod")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy282 := &x.EvictionPressureTransitionPeriod + yym283 := z.EncBinary() + _ = yym283 + if false { + } else if z.HasExtensions() && z.EncExt(yy282) { + } else if !yym283 && z.IsJSONHandle() { + z.EncJSONMarshal(yy282) + } else { + z.EncFallback(yy282) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[87] { + yym285 := z.EncBinary() + _ = yym285 + if false { + } else { + r.EncodeInt(int64(x.EvictionMaxPodGracePeriod)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[87] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("evictionMaxPodGracePeriod")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym286 := z.EncBinary() + _ = yym286 + if false { + } else { + r.EncodeInt(int64(x.EvictionMaxPodGracePeriod)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym288 := z.EncBinary() + _ = yym288 + if false { + } else { + r.EncodeInt(int64(x.PodsPerCore)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podsPerCore")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym289 := z.EncBinary() + _ = yym289 + if false { + } else { + r.EncodeInt(int64(x.PodsPerCore)) + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -2972,6 +3272,12 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode } else { x.RootDirectory = string(r.DecodeString()) } + case "seccompProfileRoot": + if r.TryDecodeAsNil() { + x.SeccompProfileRoot = "" + } else { + x.SeccompProfileRoot = string(r.DecodeString()) + } case "allowPrivileged": if r.TryDecodeAsNil() { x.AllowPrivileged = false @@ -3006,7 +3312,7 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.RegistryBurst = 0 } else { - x.RegistryBurst = int(r.DecodeInt(codecSelferBitsize1234)) + x.RegistryBurst = int32(r.DecodeInt(32)) } case "eventRecordQPS": if r.TryDecodeAsNil() { @@ -3018,7 +3324,7 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.EventBurst = 0 } else { - x.EventBurst = int(r.DecodeInt(codecSelferBitsize1234)) + x.EventBurst = int32(r.DecodeInt(32)) } case "enableDebuggingHandlers": if r.TryDecodeAsNil() { @@ -3030,28 +3336,28 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.MinimumGCAge = pkg1_unversioned.Duration{} } else { - yyv33 := &x.MinimumGCAge - yym34 := z.DecBinary() - _ = yym34 + yyv34 := &x.MinimumGCAge + yym35 := z.DecBinary() + _ = yym35 if false { - } else if z.HasExtensions() && z.DecExt(yyv33) { - } else if !yym34 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv33) + } else if z.HasExtensions() && z.DecExt(yyv34) { + } else if !yym35 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv34) } else { - z.DecFallback(yyv33, false) + z.DecFallback(yyv34, false) } } case "maxPerPodContainerCount": if r.TryDecodeAsNil() { x.MaxPerPodContainerCount = 0 } else { - x.MaxPerPodContainerCount = int(r.DecodeInt(codecSelferBitsize1234)) + x.MaxPerPodContainerCount = int32(r.DecodeInt(32)) } case "maxContainerCount": if r.TryDecodeAsNil() { x.MaxContainerCount = 0 } else { - x.MaxContainerCount = int(r.DecodeInt(codecSelferBitsize1234)) + x.MaxContainerCount = int32(r.DecodeInt(32)) } case "cAdvisorPort": if r.TryDecodeAsNil() { @@ -3063,7 +3369,7 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.HealthzPort = 0 } else { - x.HealthzPort = int(r.DecodeInt(codecSelferBitsize1234)) + x.HealthzPort = int32(r.DecodeInt(32)) } case "healthzBindAddress": if r.TryDecodeAsNil() { @@ -3075,7 +3381,7 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.OOMScoreAdj = 0 } else { - x.OOMScoreAdj = int(r.DecodeInt(codecSelferBitsize1234)) + x.OOMScoreAdj = int32(r.DecodeInt(32)) } case "registerNode": if r.TryDecodeAsNil() { @@ -3105,78 +3411,78 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.StreamingConnectionIdleTimeout = pkg1_unversioned.Duration{} } else { - yyv45 := &x.StreamingConnectionIdleTimeout - yym46 := z.DecBinary() - _ = yym46 + yyv46 := &x.StreamingConnectionIdleTimeout + yym47 := z.DecBinary() + _ = yym47 if false { - } else if z.HasExtensions() && z.DecExt(yyv45) { - } else if !yym46 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv45) + } else if z.HasExtensions() && z.DecExt(yyv46) { + } else if !yym47 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv46) } else { - z.DecFallback(yyv45, false) + z.DecFallback(yyv46, false) } } case "nodeStatusUpdateFrequency": if r.TryDecodeAsNil() { x.NodeStatusUpdateFrequency = pkg1_unversioned.Duration{} } else { - yyv47 := &x.NodeStatusUpdateFrequency - yym48 := z.DecBinary() - _ = yym48 + yyv48 := &x.NodeStatusUpdateFrequency + yym49 := z.DecBinary() + _ = yym49 if false { - } else if z.HasExtensions() && z.DecExt(yyv47) { - } else if !yym48 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv47) + } else if z.HasExtensions() && z.DecExt(yyv48) { + } else if !yym49 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv48) } else { - z.DecFallback(yyv47, false) + z.DecFallback(yyv48, false) } } case "imageMinimumGCAge": if r.TryDecodeAsNil() { x.ImageMinimumGCAge = pkg1_unversioned.Duration{} } else { - yyv49 := &x.ImageMinimumGCAge - yym50 := z.DecBinary() - _ = yym50 + yyv50 := &x.ImageMinimumGCAge + yym51 := z.DecBinary() + _ = yym51 if false { - } else if z.HasExtensions() && z.DecExt(yyv49) { - } else if !yym50 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv49) + } else if z.HasExtensions() && z.DecExt(yyv50) { + } else if !yym51 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv50) } else { - z.DecFallback(yyv49, false) + z.DecFallback(yyv50, false) } } case "imageGCHighThresholdPercent": if r.TryDecodeAsNil() { x.ImageGCHighThresholdPercent = 0 } else { - x.ImageGCHighThresholdPercent = int(r.DecodeInt(codecSelferBitsize1234)) + x.ImageGCHighThresholdPercent = int32(r.DecodeInt(32)) } case "imageGCLowThresholdPercent": if r.TryDecodeAsNil() { x.ImageGCLowThresholdPercent = 0 } else { - x.ImageGCLowThresholdPercent = int(r.DecodeInt(codecSelferBitsize1234)) + x.ImageGCLowThresholdPercent = int32(r.DecodeInt(32)) } case "lowDiskSpaceThresholdMB": if r.TryDecodeAsNil() { x.LowDiskSpaceThresholdMB = 0 } else { - x.LowDiskSpaceThresholdMB = int(r.DecodeInt(codecSelferBitsize1234)) + x.LowDiskSpaceThresholdMB = int32(r.DecodeInt(32)) } - case "VolumeStatsAggPeriod": + case "volumeStatsAggPeriod": if r.TryDecodeAsNil() { x.VolumeStatsAggPeriod = pkg1_unversioned.Duration{} } else { - yyv54 := &x.VolumeStatsAggPeriod - yym55 := z.DecBinary() - _ = yym55 + yyv55 := &x.VolumeStatsAggPeriod + yym56 := z.DecBinary() + _ = yym56 if false { - } else if z.HasExtensions() && z.DecExt(yyv54) { - } else if !yym55 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv54) + } else if z.HasExtensions() && z.DecExt(yyv55) { + } else if !yym56 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv55) } else { - z.DecFallback(yyv54, false) + z.DecFallback(yyv55, false) } } case "networkPluginName": @@ -3245,11 +3551,11 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode } else { x.RktPath = string(r.DecodeString()) } - case "lockFilePath": + case "rktAPIEndpoint": if r.TryDecodeAsNil() { - x.LockFilePath = "" + x.RktAPIEndpoint = "" } else { - x.LockFilePath = string(r.DecodeString()) + x.RktAPIEndpoint = string(r.DecodeString()) } case "rktStage1Image": if r.TryDecodeAsNil() { @@ -3257,6 +3563,18 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode } else { x.RktStage1Image = string(r.DecodeString()) } + case "lockFilePath": + if r.TryDecodeAsNil() { + x.LockFilePath = "" + } else { + x.LockFilePath = string(r.DecodeString()) + } + case "exitOnLockContention": + if r.TryDecodeAsNil() { + x.ExitOnLockContention = false + } else { + x.ExitOnLockContention = bool(r.DecodeBool()) + } case "configureCbr0": if r.TryDecodeAsNil() { x.ConfigureCBR0 = false @@ -3279,7 +3597,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.MaxPods = 0 } else { - x.MaxPods = int(r.DecodeInt(codecSelferBitsize1234)) + x.MaxPods = int32(r.DecodeInt(32)) + } + case "nvidiaGPUs": + if r.TryDecodeAsNil() { + x.NvidiaGPUs = 0 + } else { + x.NvidiaGPUs = int32(r.DecodeInt(32)) } case "dockerExecHandlerName": if r.TryDecodeAsNil() { @@ -3329,6 +3653,12 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode } else { x.RegisterSchedulable = bool(r.DecodeBool()) } + case "contentType": + if r.TryDecodeAsNil() { + x.ContentType = "" + } else { + x.ContentType = string(r.DecodeString()) + } case "kubeAPIQPS": if r.TryDecodeAsNil() { x.KubeAPIQPS = 0 @@ -3339,7 +3669,7 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.KubeAPIBurst = 0 } else { - x.KubeAPIBurst = int(r.DecodeInt(codecSelferBitsize1234)) + x.KubeAPIBurst = int32(r.DecodeInt(32)) } case "serializeImagePulls": if r.TryDecodeAsNil() { @@ -3357,15 +3687,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.OutOfDiskTransitionFrequency = pkg1_unversioned.Duration{} } else { - yyv85 := &x.OutOfDiskTransitionFrequency - yym86 := z.DecBinary() - _ = yym86 + yyv90 := &x.OutOfDiskTransitionFrequency + yym91 := z.DecBinary() + _ = yym91 if false { - } else if z.HasExtensions() && z.DecExt(yyv85) { - } else if !yym86 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv85) + } else if z.HasExtensions() && z.DecExt(yyv90) { + } else if !yym91 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv90) } else { - z.DecFallback(yyv85, false) + z.DecFallback(yyv90, false) } } case "nodeIP": @@ -3378,12 +3708,12 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.NodeLabels = nil } else { - yyv88 := &x.NodeLabels - yym89 := z.DecBinary() - _ = yym89 + yyv93 := &x.NodeLabels + yym94 := z.DecBinary() + _ = yym94 if false { } else { - z.F.DecMapStringStringX(yyv88, false, d) + z.F.DecMapStringStringX(yyv93, false, d) } } case "nonMasqueradeCIDR": @@ -3398,6 +3728,51 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode } else { x.EnableCustomMetrics = bool(r.DecodeBool()) } + case "evictionHard": + if r.TryDecodeAsNil() { + x.EvictionHard = "" + } else { + x.EvictionHard = string(r.DecodeString()) + } + case "evictionSoft": + if r.TryDecodeAsNil() { + x.EvictionSoft = "" + } else { + x.EvictionSoft = string(r.DecodeString()) + } + case "evictionSoftGracePeriod": + if r.TryDecodeAsNil() { + x.EvictionSoftGracePeriod = "" + } else { + x.EvictionSoftGracePeriod = string(r.DecodeString()) + } + case "evictionPressureTransitionPeriod": + if r.TryDecodeAsNil() { + x.EvictionPressureTransitionPeriod = pkg1_unversioned.Duration{} + } else { + yyv100 := &x.EvictionPressureTransitionPeriod + yym101 := z.DecBinary() + _ = yym101 + if false { + } else if z.HasExtensions() && z.DecExt(yyv100) { + } else if !yym101 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv100) + } else { + z.DecFallback(yyv100, false) + } + } + case "evictionMaxPodGracePeriod": + if r.TryDecodeAsNil() { + x.EvictionMaxPodGracePeriod = 0 + } else { + x.EvictionMaxPodGracePeriod = int32(r.DecodeInt(32)) + } + case "podsPerCore": + if r.TryDecodeAsNil() { + x.PodsPerCore = 0 + } else { + x.PodsPerCore = int32(r.DecodeInt(32)) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -3409,16 +3784,16 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj92 int - var yyb92 bool - var yyhl92 bool = l >= 0 - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + var yyj104 int + var yyb104 bool + var yyhl104 bool = l >= 0 + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3428,13 +3803,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.Config = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3442,24 +3817,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.SyncFrequency = pkg1_unversioned.Duration{} } else { - yyv94 := &x.SyncFrequency - yym95 := z.DecBinary() - _ = yym95 + yyv106 := &x.SyncFrequency + yym107 := z.DecBinary() + _ = yym107 if false { - } else if z.HasExtensions() && z.DecExt(yyv94) { - } else if !yym95 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv94) + } else if z.HasExtensions() && z.DecExt(yyv106) { + } else if !yym107 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv106) } else { - z.DecFallback(yyv94, false) + z.DecFallback(yyv106, false) } } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3467,24 +3842,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.FileCheckFrequency = pkg1_unversioned.Duration{} } else { - yyv96 := &x.FileCheckFrequency - yym97 := z.DecBinary() - _ = yym97 + yyv108 := &x.FileCheckFrequency + yym109 := z.DecBinary() + _ = yym109 if false { - } else if z.HasExtensions() && z.DecExt(yyv96) { - } else if !yym97 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv96) + } else if z.HasExtensions() && z.DecExt(yyv108) { + } else if !yym109 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv108) } else { - z.DecFallback(yyv96, false) + z.DecFallback(yyv108, false) } } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3492,24 +3867,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.HTTPCheckFrequency = pkg1_unversioned.Duration{} } else { - yyv98 := &x.HTTPCheckFrequency - yym99 := z.DecBinary() - _ = yym99 + yyv110 := &x.HTTPCheckFrequency + yym111 := z.DecBinary() + _ = yym111 if false { - } else if z.HasExtensions() && z.DecExt(yyv98) { - } else if !yym99 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv98) + } else if z.HasExtensions() && z.DecExt(yyv110) { + } else if !yym111 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv110) } else { - z.DecFallback(yyv98, false) + z.DecFallback(yyv110, false) } } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3519,13 +3894,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ManifestURL = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3535,13 +3910,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ManifestURLHeader = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3551,13 +3926,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EnableServer = bool(r.DecodeBool()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3567,13 +3942,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.Address = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3583,13 +3958,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.Port = uint(r.DecodeUint(codecSelferBitsize1234)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3599,13 +3974,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ReadOnlyPort = uint(r.DecodeUint(codecSelferBitsize1234)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3615,13 +3990,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.TLSCertFile = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3631,13 +4006,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.TLSPrivateKeyFile = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3647,13 +4022,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.CertDirectory = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3663,13 +4038,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.HostnameOverride = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3679,13 +4054,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.PodInfraContainerImage = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3695,13 +4070,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.DockerEndpoint = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3711,13 +4086,29 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RootDirectory = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l + } else { + yyb104 = r.CheckBreak() + } + if yyb104 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SeccompProfileRoot = "" + } else { + x.SeccompProfileRoot = string(r.DecodeString()) + } + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3727,13 +4118,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.AllowPrivileged = bool(r.DecodeBool()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3743,13 +4134,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.HostNetworkSources = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3759,13 +4150,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.HostPIDSources = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3775,13 +4166,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.HostIPCSources = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3791,13 +4182,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RegistryPullQPS = float64(r.DecodeFloat(false)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3805,15 +4196,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.RegistryBurst = 0 } else { - x.RegistryBurst = int(r.DecodeInt(codecSelferBitsize1234)) + x.RegistryBurst = int32(r.DecodeInt(32)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3823,13 +4214,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EventRecordQPS = float32(r.DecodeFloat(true)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3837,15 +4228,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.EventBurst = 0 } else { - x.EventBurst = int(r.DecodeInt(codecSelferBitsize1234)) + x.EventBurst = int32(r.DecodeInt(32)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3855,13 +4246,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EnableDebuggingHandlers = bool(r.DecodeBool()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3869,24 +4260,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.MinimumGCAge = pkg1_unversioned.Duration{} } else { - yyv122 := &x.MinimumGCAge - yym123 := z.DecBinary() - _ = yym123 + yyv135 := &x.MinimumGCAge + yym136 := z.DecBinary() + _ = yym136 if false { - } else if z.HasExtensions() && z.DecExt(yyv122) { - } else if !yym123 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv122) + } else if z.HasExtensions() && z.DecExt(yyv135) { + } else if !yym136 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv135) } else { - z.DecFallback(yyv122, false) + z.DecFallback(yyv135, false) } } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3894,15 +4285,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.MaxPerPodContainerCount = 0 } else { - x.MaxPerPodContainerCount = int(r.DecodeInt(codecSelferBitsize1234)) + x.MaxPerPodContainerCount = int32(r.DecodeInt(32)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3910,15 +4301,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.MaxContainerCount = 0 } else { - x.MaxContainerCount = int(r.DecodeInt(codecSelferBitsize1234)) + x.MaxContainerCount = int32(r.DecodeInt(32)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3928,13 +4319,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.CAdvisorPort = uint(r.DecodeUint(codecSelferBitsize1234)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3942,15 +4333,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.HealthzPort = 0 } else { - x.HealthzPort = int(r.DecodeInt(codecSelferBitsize1234)) + x.HealthzPort = int32(r.DecodeInt(32)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3960,13 +4351,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.HealthzBindAddress = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3974,15 +4365,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.OOMScoreAdj = 0 } else { - x.OOMScoreAdj = int(r.DecodeInt(codecSelferBitsize1234)) + x.OOMScoreAdj = int32(r.DecodeInt(32)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -3992,13 +4383,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RegisterNode = bool(r.DecodeBool()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4008,13 +4399,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ClusterDomain = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4024,13 +4415,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.MasterServiceNamespace = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4040,13 +4431,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ClusterDNS = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4054,24 +4445,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.StreamingConnectionIdleTimeout = pkg1_unversioned.Duration{} } else { - yyv134 := &x.StreamingConnectionIdleTimeout - yym135 := z.DecBinary() - _ = yym135 + yyv147 := &x.StreamingConnectionIdleTimeout + yym148 := z.DecBinary() + _ = yym148 if false { - } else if z.HasExtensions() && z.DecExt(yyv134) { - } else if !yym135 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv134) + } else if z.HasExtensions() && z.DecExt(yyv147) { + } else if !yym148 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv147) } else { - z.DecFallback(yyv134, false) + z.DecFallback(yyv147, false) } } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4079,24 +4470,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.NodeStatusUpdateFrequency = pkg1_unversioned.Duration{} } else { - yyv136 := &x.NodeStatusUpdateFrequency - yym137 := z.DecBinary() - _ = yym137 + yyv149 := &x.NodeStatusUpdateFrequency + yym150 := z.DecBinary() + _ = yym150 if false { - } else if z.HasExtensions() && z.DecExt(yyv136) { - } else if !yym137 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv136) + } else if z.HasExtensions() && z.DecExt(yyv149) { + } else if !yym150 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv149) } else { - z.DecFallback(yyv136, false) + z.DecFallback(yyv149, false) } } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4104,24 +4495,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.ImageMinimumGCAge = pkg1_unversioned.Duration{} } else { - yyv138 := &x.ImageMinimumGCAge - yym139 := z.DecBinary() - _ = yym139 + yyv151 := &x.ImageMinimumGCAge + yym152 := z.DecBinary() + _ = yym152 if false { - } else if z.HasExtensions() && z.DecExt(yyv138) { - } else if !yym139 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv138) + } else if z.HasExtensions() && z.DecExt(yyv151) { + } else if !yym152 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv151) } else { - z.DecFallback(yyv138, false) + z.DecFallback(yyv151, false) } } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4129,15 +4520,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.ImageGCHighThresholdPercent = 0 } else { - x.ImageGCHighThresholdPercent = int(r.DecodeInt(codecSelferBitsize1234)) + x.ImageGCHighThresholdPercent = int32(r.DecodeInt(32)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4145,15 +4536,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.ImageGCLowThresholdPercent = 0 } else { - x.ImageGCLowThresholdPercent = int(r.DecodeInt(codecSelferBitsize1234)) + x.ImageGCLowThresholdPercent = int32(r.DecodeInt(32)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4161,15 +4552,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.LowDiskSpaceThresholdMB = 0 } else { - x.LowDiskSpaceThresholdMB = int(r.DecodeInt(codecSelferBitsize1234)) + x.LowDiskSpaceThresholdMB = int32(r.DecodeInt(32)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4177,24 +4568,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.VolumeStatsAggPeriod = pkg1_unversioned.Duration{} } else { - yyv143 := &x.VolumeStatsAggPeriod - yym144 := z.DecBinary() - _ = yym144 + yyv156 := &x.VolumeStatsAggPeriod + yym157 := z.DecBinary() + _ = yym157 if false { - } else if z.HasExtensions() && z.DecExt(yyv143) { - } else if !yym144 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv143) + } else if z.HasExtensions() && z.DecExt(yyv156) { + } else if !yym157 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv156) } else { - z.DecFallback(yyv143, false) + z.DecFallback(yyv156, false) } } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4204,13 +4595,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.NetworkPluginName = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4220,13 +4611,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.NetworkPluginDir = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4236,13 +4627,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.VolumePluginDir = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4252,13 +4643,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.CloudProvider = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4268,13 +4659,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.CloudConfigFile = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4284,13 +4675,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.KubeletCgroups = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4300,13 +4691,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RuntimeCgroups = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4316,13 +4707,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.SystemCgroups = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4332,45 +4723,77 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.CgroupRoot = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l + } else { + yyb104 = r.CheckBreak() + } + if yyb104 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerRuntime = "" + } else { + x.ContainerRuntime = string(r.DecodeString()) + } + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l + } else { + yyb104 = r.CheckBreak() + } + if yyb104 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RktPath = "" + } else { + x.RktPath = string(r.DecodeString()) + } + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ContainerRuntime = "" + x.RktAPIEndpoint = "" } else { - x.ContainerRuntime = string(r.DecodeString()) + x.RktAPIEndpoint = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.RktPath = "" + x.RktStage1Image = "" } else { - x.RktPath = string(r.DecodeString()) + x.RktStage1Image = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4380,29 +4803,29 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.LockFilePath = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.RktStage1Image = "" + x.ExitOnLockContention = false } else { - x.RktStage1Image = string(r.DecodeString()) + x.ExitOnLockContention = bool(r.DecodeBool()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4412,13 +4835,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ConfigureCBR0 = bool(r.DecodeBool()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4428,13 +4851,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.HairpinMode = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4444,13 +4867,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.BabysitDaemons = bool(r.DecodeBool()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4458,15 +4881,31 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.MaxPods = 0 } else { - x.MaxPods = int(r.DecodeInt(codecSelferBitsize1234)) + x.MaxPods = int32(r.DecodeInt(32)) + } + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l + } else { + yyb104 = r.CheckBreak() + } + if yyb104 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NvidiaGPUs = 0 + } else { + x.NvidiaGPUs = int32(r.DecodeInt(32)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4476,13 +4915,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.DockerExecHandlerName = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4492,13 +4931,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.PodCIDR = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4508,13 +4947,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ResolverConfig = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4524,13 +4963,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.CPUCFSQuota = bool(r.DecodeBool()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4540,13 +4979,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.Containerized = bool(r.DecodeBool()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4556,13 +4995,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.MaxOpenFiles = uint64(r.DecodeUint(64)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4572,13 +5011,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ReconcileCIDR = bool(r.DecodeBool()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4588,13 +5027,29 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RegisterSchedulable = bool(r.DecodeBool()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l + } else { + yyb104 = r.CheckBreak() + } + if yyb104 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContentType = "" + } else { + x.ContentType = string(r.DecodeString()) + } + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4604,13 +5059,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.KubeAPIQPS = float32(r.DecodeFloat(true)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4618,15 +5073,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.KubeAPIBurst = 0 } else { - x.KubeAPIBurst = int(r.DecodeInt(codecSelferBitsize1234)) + x.KubeAPIBurst = int32(r.DecodeInt(32)) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4636,13 +5091,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.SerializeImagePulls = bool(r.DecodeBool()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4652,13 +5107,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ExperimentalFlannelOverlay = bool(r.DecodeBool()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4666,24 +5121,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.OutOfDiskTransitionFrequency = pkg1_unversioned.Duration{} } else { - yyv174 := &x.OutOfDiskTransitionFrequency - yym175 := z.DecBinary() - _ = yym175 + yyv191 := &x.OutOfDiskTransitionFrequency + yym192 := z.DecBinary() + _ = yym192 if false { - } else if z.HasExtensions() && z.DecExt(yyv174) { - } else if !yym175 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv174) + } else if z.HasExtensions() && z.DecExt(yyv191) { + } else if !yym192 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv191) } else { - z.DecFallback(yyv174, false) + z.DecFallback(yyv191, false) } } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4693,13 +5148,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.NodeIP = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4707,21 +5162,21 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.NodeLabels = nil } else { - yyv177 := &x.NodeLabels - yym178 := z.DecBinary() - _ = yym178 + yyv194 := &x.NodeLabels + yym195 := z.DecBinary() + _ = yym195 if false { } else { - z.F.DecMapStringStringX(yyv177, false, d) + z.F.DecMapStringStringX(yyv194, false, d) } } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4731,13 +5186,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.NonMasqueradeCIDR = string(r.DecodeString()) } - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4747,18 +5202,123 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EnableCustomMetrics = bool(r.DecodeBool()) } + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l + } else { + yyb104 = r.CheckBreak() + } + if yyb104 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EvictionHard = "" + } else { + x.EvictionHard = string(r.DecodeString()) + } + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l + } else { + yyb104 = r.CheckBreak() + } + if yyb104 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EvictionSoft = "" + } else { + x.EvictionSoft = string(r.DecodeString()) + } + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l + } else { + yyb104 = r.CheckBreak() + } + if yyb104 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EvictionSoftGracePeriod = "" + } else { + x.EvictionSoftGracePeriod = string(r.DecodeString()) + } + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l + } else { + yyb104 = r.CheckBreak() + } + if yyb104 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EvictionPressureTransitionPeriod = pkg1_unversioned.Duration{} + } else { + yyv201 := &x.EvictionPressureTransitionPeriod + yym202 := z.DecBinary() + _ = yym202 + if false { + } else if z.HasExtensions() && z.DecExt(yyv201) { + } else if !yym202 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv201) + } else { + z.DecFallback(yyv201, false) + } + } + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l + } else { + yyb104 = r.CheckBreak() + } + if yyb104 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EvictionMaxPodGracePeriod = 0 + } else { + x.EvictionMaxPodGracePeriod = int32(r.DecodeInt(32)) + } + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l + } else { + yyb104 = r.CheckBreak() + } + if yyb104 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodsPerCore = 0 + } else { + x.PodsPerCore = int32(r.DecodeInt(32)) + } for { - yyj92++ - if yyhl92 { - yyb92 = yyj92 > l + yyj104++ + if yyhl104 { + yyb104 = yyj104 > l } else { - yyb92 = r.CheckBreak() + yyb104 = r.CheckBreak() } - if yyb92 { + if yyb104 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj92-1, "") + z.DecStructFieldNotFound(yyj104-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -4777,16 +5337,16 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [11]bool + var yyq2 [14]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[9] = x.Kind != "" - yyq2[10] = x.APIVersion != "" + yyq2[12] = x.Kind != "" + yyq2[13] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(11) + r.EncodeArrayStart(14) } else { - yynn2 = 9 + yynn2 = 12 for _, b := range yyq2 { if b { yynn2++ @@ -4896,17 +5456,17 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym19 if false { } else { - r.EncodeFloat32(float32(x.KubeAPIQPS)) + r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS")) + r.EncodeString(codecSelferC_UTF81234, string("contentType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym20 := z.EncBinary() _ = yym20 if false { } else { - r.EncodeFloat32(float32(x.KubeAPIQPS)) + r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) } } if yyr2 || yy2arr2 { @@ -4915,17 +5475,17 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym22 if false { } else { - r.EncodeInt(int64(x.KubeAPIBurst)) + r.EncodeFloat32(float32(x.KubeAPIQPS)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst")) + r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym23 := z.EncBinary() _ = yym23 if false { } else { - r.EncodeInt(int64(x.KubeAPIBurst)) + r.EncodeFloat32(float32(x.KubeAPIQPS)) } } if yyr2 || yy2arr2 { @@ -4934,35 +5494,92 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym25 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) + r.EncodeInt(int64(x.KubeAPIBurst)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("schedulerName")) + r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym26 := z.EncBinary() _ = yym26 if false { + } else { + r.EncodeInt(int64(x.KubeAPIBurst)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("schedulerName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy28 := &x.LeaderElection - yy28.CodecEncodeSelf(e) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hardPodAffinitySymmetricWeight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym34 := z.EncBinary() + _ = yym34 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("failureDomains")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym35 := z.EncBinary() + _ = yym35 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy37 := &x.LeaderElection + yy37.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("leaderElection")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy30 := &x.LeaderElection - yy30.CodecEncodeSelf(e) + yy39 := &x.LeaderElection + yy39.CodecEncodeSelf(e) } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - yym33 := z.EncBinary() - _ = yym33 + if yyq2[12] { + yym42 := z.EncBinary() + _ = yym42 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -4971,12 +5588,12 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[9] { + if yyq2[12] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym34 := z.EncBinary() - _ = yym34 + yym43 := z.EncBinary() + _ = yym43 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -4985,9 +5602,9 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - yym36 := z.EncBinary() - _ = yym36 + if yyq2[13] { + yym45 := z.EncBinary() + _ = yym45 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -4996,12 +5613,12 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[10] { + if yyq2[13] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym37 := z.EncBinary() - _ = yym37 + yym46 := z.EncBinary() + _ = yym46 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -5073,7 +5690,7 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978. if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int(r.DecodeInt(codecSelferBitsize1234)) + x.Port = int32(r.DecodeInt(32)) } case "address": if r.TryDecodeAsNil() { @@ -5099,6 +5716,12 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978. } else { x.EnableProfiling = bool(r.DecodeBool()) } + case "contentType": + if r.TryDecodeAsNil() { + x.ContentType = "" + } else { + x.ContentType = string(r.DecodeString()) + } case "kubeAPIQPS": if r.TryDecodeAsNil() { x.KubeAPIQPS = 0 @@ -5109,7 +5732,7 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978. if r.TryDecodeAsNil() { x.KubeAPIBurst = 0 } else { - x.KubeAPIBurst = int(r.DecodeInt(codecSelferBitsize1234)) + x.KubeAPIBurst = int32(r.DecodeInt(32)) } case "schedulerName": if r.TryDecodeAsNil() { @@ -5117,12 +5740,24 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978. } else { x.SchedulerName = string(r.DecodeString()) } + case "hardPodAffinitySymmetricWeight": + if r.TryDecodeAsNil() { + x.HardPodAffinitySymmetricWeight = 0 + } else { + x.HardPodAffinitySymmetricWeight = int(r.DecodeInt(codecSelferBitsize1234)) + } + case "failureDomains": + if r.TryDecodeAsNil() { + x.FailureDomains = "" + } else { + x.FailureDomains = string(r.DecodeString()) + } case "leaderElection": if r.TryDecodeAsNil() { x.LeaderElection = LeaderElectionConfiguration{} } else { - yyv12 := &x.LeaderElection - yyv12.CodecDecodeSelf(d) + yyv15 := &x.LeaderElection + yyv15.CodecDecodeSelf(d) } case "kind": if r.TryDecodeAsNil() { @@ -5147,16 +5782,16 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj15 int - var yyb15 bool - var yyhl15 bool = l >= 0 - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb15 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb15 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5164,15 +5799,15 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int(r.DecodeInt(codecSelferBitsize1234)) + x.Port = int32(r.DecodeInt(32)) } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb15 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb15 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5182,13 +5817,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.Address = string(r.DecodeString()) } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb15 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb15 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5198,13 +5833,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.AlgorithmProvider = string(r.DecodeString()) } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb15 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb15 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5214,13 +5849,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.PolicyConfigFile = string(r.DecodeString()) } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb15 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb15 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5230,13 +5865,29 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.EnableProfiling = bool(r.DecodeBool()) } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContentType = "" + } else { + x.ContentType = string(r.DecodeString()) + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb15 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb15 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5246,13 +5897,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.KubeAPIQPS = float32(r.DecodeFloat(true)) } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb15 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb15 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5260,15 +5911,15 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 if r.TryDecodeAsNil() { x.KubeAPIBurst = 0 } else { - x.KubeAPIBurst = int(r.DecodeInt(codecSelferBitsize1234)) + x.KubeAPIBurst = int32(r.DecodeInt(32)) } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb15 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb15 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5278,13 +5929,45 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.SchedulerName = string(r.DecodeString()) } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HardPodAffinitySymmetricWeight = 0 + } else { + x.HardPodAffinitySymmetricWeight = int(r.DecodeInt(codecSelferBitsize1234)) + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FailureDomains = "" + } else { + x.FailureDomains = string(r.DecodeString()) + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb15 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb15 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5292,16 +5975,16 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 if r.TryDecodeAsNil() { x.LeaderElection = LeaderElectionConfiguration{} } else { - yyv24 := &x.LeaderElection - yyv24.CodecDecodeSelf(d) + yyv30 := &x.LeaderElection + yyv30.CodecDecodeSelf(d) } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb15 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb15 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5311,13 +5994,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.Kind = string(r.DecodeString()) } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb15 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb15 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5328,17 +6011,17 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 x.APIVersion = string(r.DecodeString()) } for { - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb15 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb15 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj15-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -5720,16 +6403,16 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [43]bool + var yyq2 [49]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[41] = x.Kind != "" - yyq2[42] = x.APIVersion != "" + yyq2[47] = x.Kind != "" + yyq2[48] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(43) + r.EncodeArrayStart(49) } else { - yynn2 = 41 + yynn2 = 47 for _, b := range yyq2 { if b { yynn2++ @@ -6439,108 +7122,184 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode _ = yym122 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountKeyFile)) + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountKeyFile)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym124 := z.EncBinary() + _ = yym124 + if false { + } else { + r.EncodeBool(bool(x.EnableProfiling)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("enableProfiling")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym125 := z.EncBinary() + _ = yym125 + if false { + } else { + r.EncodeBool(bool(x.EnableProfiling)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym127 := z.EncBinary() + _ = yym127 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("clusterName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym128 := z.EncBinary() + _ = yym128 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym130 := z.EncBinary() + _ = yym130 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("clusterCIDR")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym131 := z.EncBinary() + _ = yym131 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym133 := z.EncBinary() + _ = yym133 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceCIDR)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceCIDR")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym134 := z.EncBinary() + _ = yym134 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceCIDR)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym124 := z.EncBinary() - _ = yym124 + yym136 := z.EncBinary() + _ = yym136 if false { } else { - r.EncodeBool(bool(x.EnableProfiling)) + r.EncodeInt(int64(x.NodeCIDRMaskSize)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableProfiling")) + r.EncodeString(codecSelferC_UTF81234, string("nodeCIDRMaskSize")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym125 := z.EncBinary() - _ = yym125 + yym137 := z.EncBinary() + _ = yym137 if false { } else { - r.EncodeBool(bool(x.EnableProfiling)) + r.EncodeInt(int64(x.NodeCIDRMaskSize)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym127 := z.EncBinary() - _ = yym127 + yym139 := z.EncBinary() + _ = yym139 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) + r.EncodeBool(bool(x.AllocateNodeCIDRs)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterName")) + r.EncodeString(codecSelferC_UTF81234, string("allocateNodeCIDRs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym128 := z.EncBinary() - _ = yym128 + yym140 := z.EncBinary() + _ = yym140 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) + r.EncodeBool(bool(x.AllocateNodeCIDRs)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym130 := z.EncBinary() - _ = yym130 + yym142 := z.EncBinary() + _ = yym142 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) + r.EncodeBool(bool(x.ConfigureCloudRoutes)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterCIDR")) + r.EncodeString(codecSelferC_UTF81234, string("configureCloudRoutes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym131 := z.EncBinary() - _ = yym131 + yym143 := z.EncBinary() + _ = yym143 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) + r.EncodeBool(bool(x.ConfigureCloudRoutes)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym133 := z.EncBinary() - _ = yym133 + yym145 := z.EncBinary() + _ = yym145 if false { } else { - r.EncodeBool(bool(x.AllocateNodeCIDRs)) + r.EncodeString(codecSelferC_UTF81234, string(x.RootCAFile)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allocateNodeCIDRs")) + r.EncodeString(codecSelferC_UTF81234, string("rootCAFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym134 := z.EncBinary() - _ = yym134 + yym146 := z.EncBinary() + _ = yym146 if false { } else { - r.EncodeBool(bool(x.AllocateNodeCIDRs)) + r.EncodeString(codecSelferC_UTF81234, string(x.RootCAFile)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym136 := z.EncBinary() - _ = yym136 + yym148 := z.EncBinary() + _ = yym148 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RootCAFile)) + r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rootCAFile")) + r.EncodeString(codecSelferC_UTF81234, string("contentType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym137 := z.EncBinary() - _ = yym137 + yym149 := z.EncBinary() + _ = yym149 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RootCAFile)) + r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym139 := z.EncBinary() - _ = yym139 + yym151 := z.EncBinary() + _ = yym151 if false { } else { r.EncodeFloat32(float32(x.KubeAPIQPS)) @@ -6549,8 +7308,8 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym140 := z.EncBinary() - _ = yym140 + yym152 := z.EncBinary() + _ = yym152 if false { } else { r.EncodeFloat32(float32(x.KubeAPIQPS)) @@ -6558,8 +7317,8 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym142 := z.EncBinary() - _ = yym142 + yym154 := z.EncBinary() + _ = yym154 if false { } else { r.EncodeInt(int64(x.KubeAPIBurst)) @@ -6568,8 +7327,8 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym143 := z.EncBinary() - _ = yym143 + yym155 := z.EncBinary() + _ = yym155 if false { } else { r.EncodeInt(int64(x.KubeAPIBurst)) @@ -6577,31 +7336,77 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy145 := &x.LeaderElection - yy145.CodecEncodeSelf(e) + yy157 := &x.LeaderElection + yy157.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("leaderElection")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy147 := &x.LeaderElection - yy147.CodecEncodeSelf(e) + yy159 := &x.LeaderElection + yy159.CodecEncodeSelf(e) } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy150 := &x.VolumeConfiguration - yy150.CodecEncodeSelf(e) + yy162 := &x.VolumeConfiguration + yy162.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("volumeConfiguration")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy152 := &x.VolumeConfiguration - yy152.CodecEncodeSelf(e) + yy164 := &x.VolumeConfiguration + yy164.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy167 := &x.ControllerStartInterval + yym168 := z.EncBinary() + _ = yym168 + if false { + } else if z.HasExtensions() && z.EncExt(yy167) { + } else if !yym168 && z.IsJSONHandle() { + z.EncJSONMarshal(yy167) + } else { + z.EncFallback(yy167) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("controllerStartInterval")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy169 := &x.ControllerStartInterval + yym170 := z.EncBinary() + _ = yym170 + if false { + } else if z.HasExtensions() && z.EncExt(yy169) { + } else if !yym170 && z.IsJSONHandle() { + z.EncJSONMarshal(yy169) + } else { + z.EncFallback(yy169) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym172 := z.EncBinary() + _ = yym172 + if false { + } else { + r.EncodeBool(bool(x.EnableGarbageCollector)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("enableGarbageCollector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym173 := z.EncBinary() + _ = yym173 + if false { + } else { + r.EncodeBool(bool(x.EnableGarbageCollector)) + } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[41] { - yym155 := z.EncBinary() - _ = yym155 + if yyq2[47] { + yym175 := z.EncBinary() + _ = yym175 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -6610,12 +7415,12 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[41] { + if yyq2[47] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym156 := z.EncBinary() - _ = yym156 + yym176 := z.EncBinary() + _ = yym176 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -6624,9 +7429,9 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[42] { - yym158 := z.EncBinary() - _ = yym158 + if yyq2[48] { + yym178 := z.EncBinary() + _ = yym178 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -6635,12 +7440,12 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[42] { + if yyq2[48] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym159 := z.EncBinary() - _ = yym159 + yym179 := z.EncBinary() + _ = yym179 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -6712,7 +7517,7 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int(r.DecodeInt(codecSelferBitsize1234)) + x.Port = int32(r.DecodeInt(32)) } case "address": if r.TryDecodeAsNil() { @@ -6736,67 +7541,67 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co if r.TryDecodeAsNil() { x.ConcurrentEndpointSyncs = 0 } else { - x.ConcurrentEndpointSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentEndpointSyncs = int32(r.DecodeInt(32)) } case "concurrentRSSyncs": if r.TryDecodeAsNil() { x.ConcurrentRSSyncs = 0 } else { - x.ConcurrentRSSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentRSSyncs = int32(r.DecodeInt(32)) } case "concurrentRCSyncs": if r.TryDecodeAsNil() { x.ConcurrentRCSyncs = 0 } else { - x.ConcurrentRCSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentRCSyncs = int32(r.DecodeInt(32)) } case "concurrentResourceQuotaSyncs": if r.TryDecodeAsNil() { x.ConcurrentResourceQuotaSyncs = 0 } else { - x.ConcurrentResourceQuotaSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentResourceQuotaSyncs = int32(r.DecodeInt(32)) } case "concurrentDeploymentSyncs": if r.TryDecodeAsNil() { x.ConcurrentDeploymentSyncs = 0 } else { - x.ConcurrentDeploymentSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentDeploymentSyncs = int32(r.DecodeInt(32)) } case "concurrentDaemonSetSyncs": if r.TryDecodeAsNil() { x.ConcurrentDaemonSetSyncs = 0 } else { - x.ConcurrentDaemonSetSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentDaemonSetSyncs = int32(r.DecodeInt(32)) } case "concurrentJobSyncs": if r.TryDecodeAsNil() { x.ConcurrentJobSyncs = 0 } else { - x.ConcurrentJobSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentJobSyncs = int32(r.DecodeInt(32)) } case "concurrentNamespaceSyncs": if r.TryDecodeAsNil() { x.ConcurrentNamespaceSyncs = 0 } else { - x.ConcurrentNamespaceSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentNamespaceSyncs = int32(r.DecodeInt(32)) } case "lookupCacheSizeForRC": if r.TryDecodeAsNil() { x.LookupCacheSizeForRC = 0 } else { - x.LookupCacheSizeForRC = int(r.DecodeInt(codecSelferBitsize1234)) + x.LookupCacheSizeForRC = int32(r.DecodeInt(32)) } case "lookupCacheSizeForRS": if r.TryDecodeAsNil() { x.LookupCacheSizeForRS = 0 } else { - x.LookupCacheSizeForRS = int(r.DecodeInt(codecSelferBitsize1234)) + x.LookupCacheSizeForRS = int32(r.DecodeInt(32)) } case "lookupCacheSizeForDaemonSet": if r.TryDecodeAsNil() { x.LookupCacheSizeForDaemonSet = 0 } else { - x.LookupCacheSizeForDaemonSet = int(r.DecodeInt(codecSelferBitsize1234)) + x.LookupCacheSizeForDaemonSet = int32(r.DecodeInt(32)) } case "serviceSyncPeriod": if r.TryDecodeAsNil() { @@ -6892,7 +7697,7 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co if r.TryDecodeAsNil() { x.TerminatedPodGCThreshold = 0 } else { - x.TerminatedPodGCThreshold = int(r.DecodeInt(codecSelferBitsize1234)) + x.TerminatedPodGCThreshold = int32(r.DecodeInt(32)) } case "horizontalPodAutoscalerSyncPeriod": if r.TryDecodeAsNil() { @@ -6949,7 +7754,7 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co if r.TryDecodeAsNil() { x.DeletingPodsBurst = 0 } else { - x.DeletingPodsBurst = int(r.DecodeInt(codecSelferBitsize1234)) + x.DeletingPodsBurst = int32(r.DecodeInt(32)) } case "nodeMonitorGracePeriod": if r.TryDecodeAsNil() { @@ -6970,7 +7775,7 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co if r.TryDecodeAsNil() { x.RegisterRetryCount = 0 } else { - x.RegisterRetryCount = int(r.DecodeInt(codecSelferBitsize1234)) + x.RegisterRetryCount = int32(r.DecodeInt(32)) } case "nodeStartupGracePeriod": if r.TryDecodeAsNil() { @@ -7026,18 +7831,42 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co } else { x.ClusterCIDR = string(r.DecodeString()) } + case "serviceCIDR": + if r.TryDecodeAsNil() { + x.ServiceCIDR = "" + } else { + x.ServiceCIDR = string(r.DecodeString()) + } + case "nodeCIDRMaskSize": + if r.TryDecodeAsNil() { + x.NodeCIDRMaskSize = 0 + } else { + x.NodeCIDRMaskSize = int32(r.DecodeInt(32)) + } case "allocateNodeCIDRs": if r.TryDecodeAsNil() { x.AllocateNodeCIDRs = false } else { x.AllocateNodeCIDRs = bool(r.DecodeBool()) } + case "configureCloudRoutes": + if r.TryDecodeAsNil() { + x.ConfigureCloudRoutes = false + } else { + x.ConfigureCloudRoutes = bool(r.DecodeBool()) + } case "rootCAFile": if r.TryDecodeAsNil() { x.RootCAFile = "" } else { x.RootCAFile = string(r.DecodeString()) } + case "contentType": + if r.TryDecodeAsNil() { + x.ContentType = "" + } else { + x.ContentType = string(r.DecodeString()) + } case "kubeAPIQPS": if r.TryDecodeAsNil() { x.KubeAPIQPS = 0 @@ -7048,21 +7877,42 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co if r.TryDecodeAsNil() { x.KubeAPIBurst = 0 } else { - x.KubeAPIBurst = int(r.DecodeInt(codecSelferBitsize1234)) + x.KubeAPIBurst = int32(r.DecodeInt(32)) } case "leaderElection": if r.TryDecodeAsNil() { x.LeaderElection = LeaderElectionConfiguration{} } else { - yyv55 := &x.LeaderElection - yyv55.CodecDecodeSelf(d) + yyv59 := &x.LeaderElection + yyv59.CodecDecodeSelf(d) } case "volumeConfiguration": if r.TryDecodeAsNil() { x.VolumeConfiguration = VolumeConfiguration{} } else { - yyv56 := &x.VolumeConfiguration - yyv56.CodecDecodeSelf(d) + yyv60 := &x.VolumeConfiguration + yyv60.CodecDecodeSelf(d) + } + case "controllerStartInterval": + if r.TryDecodeAsNil() { + x.ControllerStartInterval = pkg1_unversioned.Duration{} + } else { + yyv61 := &x.ControllerStartInterval + yym62 := z.DecBinary() + _ = yym62 + if false { + } else if z.HasExtensions() && z.DecExt(yyv61) { + } else if !yym62 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv61) + } else { + z.DecFallback(yyv61, false) + } + } + case "enableGarbageCollector": + if r.TryDecodeAsNil() { + x.EnableGarbageCollector = false + } else { + x.EnableGarbageCollector = bool(r.DecodeBool()) } case "kind": if r.TryDecodeAsNil() { @@ -7087,16 +7937,16 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj59 int - var yyb59 bool - var yyhl59 bool = l >= 0 - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + var yyj66 int + var yyb66 bool + var yyhl66 bool = l >= 0 + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7104,15 +7954,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.Port = 0 } else { - x.Port = int(r.DecodeInt(codecSelferBitsize1234)) + x.Port = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7122,13 +7972,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.Address = string(r.DecodeString()) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7138,13 +7988,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.CloudProvider = string(r.DecodeString()) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7154,13 +8004,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.CloudConfigFile = string(r.DecodeString()) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7168,15 +8018,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.ConcurrentEndpointSyncs = 0 } else { - x.ConcurrentEndpointSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentEndpointSyncs = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7184,15 +8034,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.ConcurrentRSSyncs = 0 } else { - x.ConcurrentRSSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentRSSyncs = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7200,15 +8050,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.ConcurrentRCSyncs = 0 } else { - x.ConcurrentRCSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentRCSyncs = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7216,15 +8066,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.ConcurrentResourceQuotaSyncs = 0 } else { - x.ConcurrentResourceQuotaSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentResourceQuotaSyncs = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7232,15 +8082,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.ConcurrentDeploymentSyncs = 0 } else { - x.ConcurrentDeploymentSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentDeploymentSyncs = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7248,15 +8098,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.ConcurrentDaemonSetSyncs = 0 } else { - x.ConcurrentDaemonSetSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentDaemonSetSyncs = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7264,15 +8114,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.ConcurrentJobSyncs = 0 } else { - x.ConcurrentJobSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentJobSyncs = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7280,15 +8130,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.ConcurrentNamespaceSyncs = 0 } else { - x.ConcurrentNamespaceSyncs = int(r.DecodeInt(codecSelferBitsize1234)) + x.ConcurrentNamespaceSyncs = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7296,15 +8146,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.LookupCacheSizeForRC = 0 } else { - x.LookupCacheSizeForRC = int(r.DecodeInt(codecSelferBitsize1234)) + x.LookupCacheSizeForRC = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7312,15 +8162,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.LookupCacheSizeForRS = 0 } else { - x.LookupCacheSizeForRS = int(r.DecodeInt(codecSelferBitsize1234)) + x.LookupCacheSizeForRS = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7328,15 +8178,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.LookupCacheSizeForDaemonSet = 0 } else { - x.LookupCacheSizeForDaemonSet = int(r.DecodeInt(codecSelferBitsize1234)) + x.LookupCacheSizeForDaemonSet = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7344,24 +8194,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.ServiceSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv75 := &x.ServiceSyncPeriod - yym76 := z.DecBinary() - _ = yym76 + yyv82 := &x.ServiceSyncPeriod + yym83 := z.DecBinary() + _ = yym83 if false { - } else if z.HasExtensions() && z.DecExt(yyv75) { - } else if !yym76 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv75) + } else if z.HasExtensions() && z.DecExt(yyv82) { + } else if !yym83 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv82) } else { - z.DecFallback(yyv75, false) + z.DecFallback(yyv82, false) } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7369,24 +8219,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.NodeSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv77 := &x.NodeSyncPeriod - yym78 := z.DecBinary() - _ = yym78 + yyv84 := &x.NodeSyncPeriod + yym85 := z.DecBinary() + _ = yym85 if false { - } else if z.HasExtensions() && z.DecExt(yyv77) { - } else if !yym78 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv77) + } else if z.HasExtensions() && z.DecExt(yyv84) { + } else if !yym85 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv84) } else { - z.DecFallback(yyv77, false) + z.DecFallback(yyv84, false) } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7394,24 +8244,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.ResourceQuotaSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv79 := &x.ResourceQuotaSyncPeriod - yym80 := z.DecBinary() - _ = yym80 + yyv86 := &x.ResourceQuotaSyncPeriod + yym87 := z.DecBinary() + _ = yym87 if false { - } else if z.HasExtensions() && z.DecExt(yyv79) { - } else if !yym80 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv79) + } else if z.HasExtensions() && z.DecExt(yyv86) { + } else if !yym87 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv86) } else { - z.DecFallback(yyv79, false) + z.DecFallback(yyv86, false) } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7419,24 +8269,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.NamespaceSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv81 := &x.NamespaceSyncPeriod - yym82 := z.DecBinary() - _ = yym82 + yyv88 := &x.NamespaceSyncPeriod + yym89 := z.DecBinary() + _ = yym89 if false { - } else if z.HasExtensions() && z.DecExt(yyv81) { - } else if !yym82 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv81) + } else if z.HasExtensions() && z.DecExt(yyv88) { + } else if !yym89 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv88) } else { - z.DecFallback(yyv81, false) + z.DecFallback(yyv88, false) } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7444,24 +8294,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.PVClaimBinderSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv83 := &x.PVClaimBinderSyncPeriod - yym84 := z.DecBinary() - _ = yym84 + yyv90 := &x.PVClaimBinderSyncPeriod + yym91 := z.DecBinary() + _ = yym91 if false { - } else if z.HasExtensions() && z.DecExt(yyv83) { - } else if !yym84 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv83) + } else if z.HasExtensions() && z.DecExt(yyv90) { + } else if !yym91 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv90) } else { - z.DecFallback(yyv83, false) + z.DecFallback(yyv90, false) } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7469,24 +8319,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.MinResyncPeriod = pkg1_unversioned.Duration{} } else { - yyv85 := &x.MinResyncPeriod - yym86 := z.DecBinary() - _ = yym86 + yyv92 := &x.MinResyncPeriod + yym93 := z.DecBinary() + _ = yym93 if false { - } else if z.HasExtensions() && z.DecExt(yyv85) { - } else if !yym86 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv85) + } else if z.HasExtensions() && z.DecExt(yyv92) { + } else if !yym93 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv92) } else { - z.DecFallback(yyv85, false) + z.DecFallback(yyv92, false) } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7494,15 +8344,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.TerminatedPodGCThreshold = 0 } else { - x.TerminatedPodGCThreshold = int(r.DecodeInt(codecSelferBitsize1234)) + x.TerminatedPodGCThreshold = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7510,24 +8360,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.HorizontalPodAutoscalerSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv88 := &x.HorizontalPodAutoscalerSyncPeriod - yym89 := z.DecBinary() - _ = yym89 + yyv95 := &x.HorizontalPodAutoscalerSyncPeriod + yym96 := z.DecBinary() + _ = yym96 if false { - } else if z.HasExtensions() && z.DecExt(yyv88) { - } else if !yym89 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv88) + } else if z.HasExtensions() && z.DecExt(yyv95) { + } else if !yym96 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv95) } else { - z.DecFallback(yyv88, false) + z.DecFallback(yyv95, false) } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7535,24 +8385,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.DeploymentControllerSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv90 := &x.DeploymentControllerSyncPeriod - yym91 := z.DecBinary() - _ = yym91 + yyv97 := &x.DeploymentControllerSyncPeriod + yym98 := z.DecBinary() + _ = yym98 if false { - } else if z.HasExtensions() && z.DecExt(yyv90) { - } else if !yym91 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv90) + } else if z.HasExtensions() && z.DecExt(yyv97) { + } else if !yym98 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv97) } else { - z.DecFallback(yyv90, false) + z.DecFallback(yyv97, false) } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7560,24 +8410,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.PodEvictionTimeout = pkg1_unversioned.Duration{} } else { - yyv92 := &x.PodEvictionTimeout - yym93 := z.DecBinary() - _ = yym93 + yyv99 := &x.PodEvictionTimeout + yym100 := z.DecBinary() + _ = yym100 if false { - } else if z.HasExtensions() && z.DecExt(yyv92) { - } else if !yym93 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv92) + } else if z.HasExtensions() && z.DecExt(yyv99) { + } else if !yym100 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv99) } else { - z.DecFallback(yyv92, false) + z.DecFallback(yyv99, false) } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7587,13 +8437,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.DeletingPodsQps = float32(r.DecodeFloat(true)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7601,15 +8451,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.DeletingPodsBurst = 0 } else { - x.DeletingPodsBurst = int(r.DecodeInt(codecSelferBitsize1234)) + x.DeletingPodsBurst = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7617,24 +8467,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.NodeMonitorGracePeriod = pkg1_unversioned.Duration{} } else { - yyv96 := &x.NodeMonitorGracePeriod - yym97 := z.DecBinary() - _ = yym97 + yyv103 := &x.NodeMonitorGracePeriod + yym104 := z.DecBinary() + _ = yym104 if false { - } else if z.HasExtensions() && z.DecExt(yyv96) { - } else if !yym97 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv96) + } else if z.HasExtensions() && z.DecExt(yyv103) { + } else if !yym104 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv103) } else { - z.DecFallback(yyv96, false) + z.DecFallback(yyv103, false) } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7642,15 +8492,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.RegisterRetryCount = 0 } else { - x.RegisterRetryCount = int(r.DecodeInt(codecSelferBitsize1234)) + x.RegisterRetryCount = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7658,24 +8508,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.NodeStartupGracePeriod = pkg1_unversioned.Duration{} } else { - yyv99 := &x.NodeStartupGracePeriod - yym100 := z.DecBinary() - _ = yym100 + yyv106 := &x.NodeStartupGracePeriod + yym107 := z.DecBinary() + _ = yym107 if false { - } else if z.HasExtensions() && z.DecExt(yyv99) { - } else if !yym100 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv99) + } else if z.HasExtensions() && z.DecExt(yyv106) { + } else if !yym107 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv106) } else { - z.DecFallback(yyv99, false) + z.DecFallback(yyv106, false) } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7683,24 +8533,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.NodeMonitorPeriod = pkg1_unversioned.Duration{} } else { - yyv101 := &x.NodeMonitorPeriod - yym102 := z.DecBinary() - _ = yym102 + yyv108 := &x.NodeMonitorPeriod + yym109 := z.DecBinary() + _ = yym109 if false { - } else if z.HasExtensions() && z.DecExt(yyv101) { - } else if !yym102 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv101) + } else if z.HasExtensions() && z.DecExt(yyv108) { + } else if !yym109 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv108) } else { - z.DecFallback(yyv101, false) + z.DecFallback(yyv108, false) } } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7710,13 +8560,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ServiceAccountKeyFile = string(r.DecodeString()) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7726,13 +8576,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.EnableProfiling = bool(r.DecodeBool()) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7742,13 +8592,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ClusterName = string(r.DecodeString()) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7758,13 +8608,45 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ClusterCIDR = string(r.DecodeString()) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l + } else { + yyb66 = r.CheckBreak() + } + if yyb66 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServiceCIDR = "" + } else { + x.ServiceCIDR = string(r.DecodeString()) + } + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l + } else { + yyb66 = r.CheckBreak() + } + if yyb66 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeCIDRMaskSize = 0 + } else { + x.NodeCIDRMaskSize = int32(r.DecodeInt(32)) + } + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7774,13 +8656,29 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.AllocateNodeCIDRs = bool(r.DecodeBool()) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l + } else { + yyb66 = r.CheckBreak() + } + if yyb66 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ConfigureCloudRoutes = false + } else { + x.ConfigureCloudRoutes = bool(r.DecodeBool()) + } + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7790,13 +8688,29 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.RootCAFile = string(r.DecodeString()) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l + } else { + yyb66 = r.CheckBreak() + } + if yyb66 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContentType = "" + } else { + x.ContentType = string(r.DecodeString()) + } + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7806,13 +8720,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.KubeAPIQPS = float32(r.DecodeFloat(true)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7820,15 +8734,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.KubeAPIBurst = 0 } else { - x.KubeAPIBurst = int(r.DecodeInt(codecSelferBitsize1234)) + x.KubeAPIBurst = int32(r.DecodeInt(32)) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7836,16 +8750,16 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.LeaderElection = LeaderElectionConfiguration{} } else { - yyv111 := &x.LeaderElection - yyv111.CodecDecodeSelf(d) + yyv122 := &x.LeaderElection + yyv122.CodecDecodeSelf(d) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7853,16 +8767,57 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.VolumeConfiguration = VolumeConfiguration{} } else { - yyv112 := &x.VolumeConfiguration - yyv112.CodecDecodeSelf(d) + yyv123 := &x.VolumeConfiguration + yyv123.CodecDecodeSelf(d) + } + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l + } else { + yyb66 = r.CheckBreak() + } + if yyb66 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ControllerStartInterval = pkg1_unversioned.Duration{} + } else { + yyv124 := &x.ControllerStartInterval + yym125 := z.DecBinary() + _ = yym125 + if false { + } else if z.HasExtensions() && z.DecExt(yyv124) { + } else if !yym125 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv124) + } else { + z.DecFallback(yyv124, false) + } + } + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l + } else { + yyb66 = r.CheckBreak() + } + if yyb66 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EnableGarbageCollector = false + } else { + x.EnableGarbageCollector = bool(r.DecodeBool()) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7872,13 +8827,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.Kind = string(r.DecodeString()) } - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7889,17 +8844,17 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * x.APIVersion = string(r.DecodeString()) } for { - yyj59++ - if yyhl59 { - yyb59 = yyj59 > l + yyj66++ + if yyhl66 { + yyb66 = yyj66 > l } else { - yyb59 = r.CheckBreak() + yyb66 = r.CheckBreak() } - if yyb59 { + if yyb66 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj59-1, "") + z.DecStructFieldNotFound(yyj66-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -7918,14 +8873,14 @@ func (x *VolumeConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(3) } else { - yynn2 = 2 + yynn2 = 3 for _, b := range yyq2 { if b { yynn2++ @@ -7964,6 +8919,25 @@ func (x *VolumeConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { yy9 := &x.PersistentVolumeRecyclerConfiguration yy9.CodecEncodeSelf(e) } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FlexVolumePluginDir)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flexVolumePluginDir")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FlexVolumePluginDir)) + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -8038,6 +9012,12 @@ func (x *VolumeConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder yyv5 := &x.PersistentVolumeRecyclerConfiguration yyv5.CodecDecodeSelf(d) } + case "flexVolumePluginDir": + if r.TryDecodeAsNil() { + x.FlexVolumePluginDir = "" + } else { + x.FlexVolumePluginDir = string(r.DecodeString()) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -8049,16 +9029,16 @@ func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -8068,13 +9048,13 @@ func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decod } else { x.EnableHostPathProvisioning = bool(r.DecodeBool()) } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -8082,21 +9062,37 @@ func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.PersistentVolumeRecyclerConfiguration = PersistentVolumeRecyclerConfiguration{} } else { - yyv8 := &x.PersistentVolumeRecyclerConfiguration - yyv8.CodecDecodeSelf(d) + yyv9 := &x.PersistentVolumeRecyclerConfiguration + yyv9.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FlexVolumePluginDir = "" + } else { + x.FlexVolumePluginDir = string(r.DecodeString()) } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb6 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb6 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -8329,13 +9325,13 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromMap(l int, d if r.TryDecodeAsNil() { x.MaximumRetry = 0 } else { - x.MaximumRetry = int(r.DecodeInt(codecSelferBitsize1234)) + x.MaximumRetry = int32(r.DecodeInt(32)) } case "minimumTimeoutNFS": if r.TryDecodeAsNil() { x.MinimumTimeoutNFS = 0 } else { - x.MinimumTimeoutNFS = int(r.DecodeInt(codecSelferBitsize1234)) + x.MinimumTimeoutNFS = int32(r.DecodeInt(32)) } case "podTemplateFilePathNFS": if r.TryDecodeAsNil() { @@ -8347,7 +9343,7 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromMap(l int, d if r.TryDecodeAsNil() { x.IncrementTimeoutNFS = 0 } else { - x.IncrementTimeoutNFS = int(r.DecodeInt(codecSelferBitsize1234)) + x.IncrementTimeoutNFS = int32(r.DecodeInt(32)) } case "podTemplateFilePathHostPath": if r.TryDecodeAsNil() { @@ -8359,13 +9355,13 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromMap(l int, d if r.TryDecodeAsNil() { x.MinimumTimeoutHostPath = 0 } else { - x.MinimumTimeoutHostPath = int(r.DecodeInt(codecSelferBitsize1234)) + x.MinimumTimeoutHostPath = int32(r.DecodeInt(32)) } case "incrementTimeoutHostPath": if r.TryDecodeAsNil() { x.IncrementTimeoutHostPath = 0 } else { - x.IncrementTimeoutHostPath = int(r.DecodeInt(codecSelferBitsize1234)) + x.IncrementTimeoutHostPath = int32(r.DecodeInt(32)) } default: z.DecStructFieldNotFound(-1, yys3) @@ -8395,7 +9391,7 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, if r.TryDecodeAsNil() { x.MaximumRetry = 0 } else { - x.MaximumRetry = int(r.DecodeInt(codecSelferBitsize1234)) + x.MaximumRetry = int32(r.DecodeInt(32)) } yyj11++ if yyhl11 { @@ -8411,7 +9407,7 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, if r.TryDecodeAsNil() { x.MinimumTimeoutNFS = 0 } else { - x.MinimumTimeoutNFS = int(r.DecodeInt(codecSelferBitsize1234)) + x.MinimumTimeoutNFS = int32(r.DecodeInt(32)) } yyj11++ if yyhl11 { @@ -8443,7 +9439,7 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, if r.TryDecodeAsNil() { x.IncrementTimeoutNFS = 0 } else { - x.IncrementTimeoutNFS = int(r.DecodeInt(codecSelferBitsize1234)) + x.IncrementTimeoutNFS = int32(r.DecodeInt(32)) } yyj11++ if yyhl11 { @@ -8475,7 +9471,7 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, if r.TryDecodeAsNil() { x.MinimumTimeoutHostPath = 0 } else { - x.MinimumTimeoutHostPath = int(r.DecodeInt(codecSelferBitsize1234)) + x.MinimumTimeoutHostPath = int32(r.DecodeInt(32)) } yyj11++ if yyhl11 { @@ -8491,7 +9487,7 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, if r.TryDecodeAsNil() { x.IncrementTimeoutHostPath = 0 } else { - x.IncrementTimeoutHostPath = int(r.DecodeInt(codecSelferBitsize1234)) + x.IncrementTimeoutHostPath = int32(r.DecodeInt(32)) } for { yyj11++ diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/types.go index fa52286da2b9..3d2b7c769cb3 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/types.go @@ -24,16 +24,20 @@ type KubeProxyConfiguration struct { // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 // for all interfaces) BindAddress string `json:"bindAddress"` + // clusterCIDR is the CIDR range of the pods in the cluster. It is used to + // bridge traffic coming from outside of the cluster. If not provided, + // no off-cluster bridging will be performed. + ClusterCIDR string `json:"clusterCIDR"` // healthzBindAddress is the IP address for the health check server to serve on, // defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces) HealthzBindAddress string `json:"healthzBindAddress"` // healthzPort is the port to bind the health check server. Use 0 to disable. - HealthzPort int `json:"healthzPort"` + HealthzPort int32 `json:"healthzPort"` // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. HostnameOverride string `json:"hostnameOverride"` // iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT if using // the pure iptables proxy mode. Values must be within the range [0, 31]. - IPTablesMasqueradeBit *int `json:"iptablesMasqueradeBit"` + IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"` // iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', // '2h22m'). Must be greater than 0. IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"` @@ -46,7 +50,7 @@ type KubeProxyConfiguration struct { Master string `json:"master"` // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within // the range [-1000, 1000] - OOMScoreAdj *int `json:"oomScoreAdj"` + OOMScoreAdj *int32 `json:"oomScoreAdj"` // mode specifies which proxy mode to use. Mode ProxyMode `json:"mode"` // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed @@ -59,7 +63,7 @@ type KubeProxyConfiguration struct { // Must be greater than 0. Only applicable for proxyMode=userspace. UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"` // conntrackMax is the maximum number of NAT connections to track (0 to leave as-is)") - ConntrackMax int `json:"conntrackMax"` + ConntrackMax int32 `json:"conntrackMax"` // conntrackTCPEstablishedTimeout is how long an idle UDP connection will be kept open // (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode is Userspace ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"` @@ -147,6 +151,8 @@ type KubeletConfiguration struct { // rootDirectory is the directory path to place kubelet files (volume // mounts,etc). RootDirectory string `json:"rootDirectory"` + // seccompProfileRoot is the directory path for seccomp profiles. + SeccompProfileRoot string `json:"seccompProfileRoot"` // allowPrivileged enables containers to request privileged mode. // Defaults to false. AllowPrivileged bool `json:"allowPrivileged"` @@ -165,14 +171,14 @@ type KubeletConfiguration struct { // registryBurst is the maximum size of a bursty pulls, temporarily allows // pulls to burst to this number, while still not exceeding registryQps. // Only used if registryQps > 0. - RegistryBurst int `json:"registryBurst"` + RegistryBurst int32 `json:"registryBurst"` // eventRecordQPS is the maximum event creations per second. If 0, there // is no limit enforced. EventRecordQPS float32 `json:"eventRecordQPS"` // eventBurst is the maximum size of a bursty event records, temporarily // allows event records to burst to this number, while still not exceeding // event-qps. Only used if eventQps > 0 - EventBurst int `json:"eventBurst"` + EventBurst int32 `json:"eventBurst"` // enableDebuggingHandlers enables server endpoints for log collection // and local running of containers and commands EnableDebuggingHandlers bool `json:"enableDebuggingHandlers"` @@ -181,20 +187,20 @@ type KubeletConfiguration struct { MinimumGCAge unversioned.Duration `json:"minimumGCAge"` // maxPerPodContainerCount is the maximum number of old instances to // retain per container. Each container takes up some disk space. - MaxPerPodContainerCount int `json:"maxPerPodContainerCount"` + MaxPerPodContainerCount int32 `json:"maxPerPodContainerCount"` // maxContainerCount is the maximum number of old instances of containers // to retain globally. Each container takes up some disk space. - MaxContainerCount int `json:"maxContainerCount"` + MaxContainerCount int32 `json:"maxContainerCount"` // cAdvisorPort is the port of the localhost cAdvisor endpoint CAdvisorPort uint `json:"cAdvisorPort"` // healthzPort is the port of the localhost healthz endpoint - HealthzPort int `json:"healthzPort"` + HealthzPort int32 `json:"healthzPort"` // healthzBindAddress is the IP address for the healthz server to serve // on. HealthzBindAddress string `json:"healthzBindAddress"` // oomScoreAdj is The oom-score-adj value for kubelet process. Values // must be within the range [-1000, 1000]. - OOMScoreAdj int `json:"oomScoreAdj"` + OOMScoreAdj int32 `json:"oomScoreAdj"` // registerNode enables automatic registration with the apiserver. RegisterNode bool `json:"registerNode"` // clusterDomain is the DNS domain for this cluster. If set, kubelet will @@ -220,17 +226,17 @@ type KubeletConfiguration struct { ImageMinimumGCAge unversioned.Duration `json:"imageMinimumGCAge"` // imageGCHighThresholdPercent is the percent of disk usage after which // image garbage collection is always run. - ImageGCHighThresholdPercent int `json:"imageGCHighThresholdPercent"` + ImageGCHighThresholdPercent int32 `json:"imageGCHighThresholdPercent"` // imageGCLowThresholdPercent is the percent of disk usage before which // image garbage collection is never run. Lowest disk usage to garbage // collect to. - ImageGCLowThresholdPercent int `json:"imageGCLowThresholdPercent"` + ImageGCLowThresholdPercent int32 `json:"imageGCLowThresholdPercent"` // lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to // maintain. When disk space falls below this threshold, new pods would // be rejected. - LowDiskSpaceThresholdMB int `json:"lowDiskSpaceThresholdMB"` + LowDiskSpaceThresholdMB int32 `json:"lowDiskSpaceThresholdMB"` // How frequently to calculate and cache volume disk usage for all pods - VolumeStatsAggPeriod unversioned.Duration `json:volumeStatsAggPeriod` + VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"` // networkPluginName is the name of the network plugin to be invoked for // various events in kubelet/pod lifecycle NetworkPluginName string `json:"networkPluginName"` @@ -257,16 +263,23 @@ type KubeletConfiguration struct { CgroupRoot string `json:"cgroupRoot,omitempty"` // containerRuntime is the container runtime to use. ContainerRuntime string `json:"containerRuntime"` - // rktPath is hte path of rkt binary. Leave empty to use the first rkt in + // rktPath is the path of rkt binary. Leave empty to use the first rkt in // $PATH. RktPath string `json:"rktPath,omitempty"` + // rktApiEndpoint is the endpoint of the rkt API service to communicate with. + RktAPIEndpoint string `json:"rktAPIEndpoint,omitempty"` + // rktStage1Image is the image to use as stage1. Local paths and + // http/https URLs are supported. + RktStage1Image string `json:"rktStage1Image,omitempty"` // lockFilePath is the path that kubelet will use to as a lock file. // It uses this file as a lock to synchronize with other kubelet processes // that may be running. LockFilePath string `json:"lockFilePath"` - // rktStage1Image is the image to use as stage1. Local paths and - // http/https URLs are supported. - RktStage1Image string `json:"rktStage1Image,omitempty"` + // ExitOnLockContention is a flag that signifies to the kubelet that it is running + // in "bootstrap" mode. This requires that 'LockFilePath' has been set. + // This will cause the kubelet to listen to inotify events on the lock file, + // releasing it and exiting when another process tries to open that file. + ExitOnLockContention bool `json:"exitOnLockContention"` // configureCBR0 enables the kublet to configure cbr0 based on // Node.Spec.PodCIDR. ConfigureCBR0 bool `json:"configureCbr0"` @@ -283,7 +296,9 @@ type KubeletConfiguration struct { // The node has babysitter process monitoring docker and kubelet. BabysitDaemons bool `json:"babysitDaemons"` // maxPods is the number of pods that can run on this Kubelet. - MaxPods int `json:"maxPods"` + MaxPods int32 `json:"maxPods"` + // nvidiaGPUs is the number of NVIDIA GPU devices on this node. + NvidiaGPUs int32 `json:"nvidiaGPUs"` // dockerExecHandlerName is the handler to use when executing a command // in a container. Valid values are 'native' and 'nsenter'. Defaults to // 'native'. @@ -307,11 +322,13 @@ type KubeletConfiguration struct { // registerSchedulable tells the kubelet to register the node as // schedulable. No-op if register-node is false. RegisterSchedulable bool `json:"registerSchedulable"` + // contentType is contentType of requests sent to apiserver. + ContentType string `json:"contentType"` // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver KubeAPIQPS float32 `json:"kubeAPIQPS"` // kubeAPIBurst is the burst to allow while talking with kubernetes // apiserver - KubeAPIBurst int `json:"kubeAPIBurst"` + KubeAPIBurst int32 `json:"kubeAPIBurst"` // serializeImagePulls when enabled, tells the Kubelet to pull images one // at a time. We recommend *not* changing the default value on nodes that // run docker daemon with version < 1.9 or an Aufs storage backend. @@ -333,13 +350,25 @@ type KubeletConfiguration struct { NonMasqueradeCIDR string `json:"nonMasqueradeCIDR"` // enable gathering custom metrics. EnableCustomMetrics bool `json:"enableCustomMetrics"` + // Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'. + EvictionHard string `json:"evictionHard,omitempty"` + // Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'. + EvictionSoft string `json:"evictionSoft,omitempty"` + // Comma-delimeted list of grace periods for each soft eviction signal. For example, 'memory.available=30s'. + EvictionSoftGracePeriod string `json:"evictionSoftGracePeriod,omitempty"` + // Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. + EvictionPressureTransitionPeriod unversioned.Duration `json:"evictionPressureTransitionPeriod,omitempty"` + // Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. + EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod,omitempty"` + // Maximum number of pods per core. Cannot exceed MaxPods + PodsPerCore int32 `json:"podsPerCore"` } type KubeSchedulerConfiguration struct { unversioned.TypeMeta // port is the port that the scheduler's http service runs on. - Port int `json:"port"` + Port int32 `json:"port"` // address is the IP address to serve on. Address string `json:"address"` // algorithmProvider is the scheduling algorithm provider to use. @@ -348,14 +377,22 @@ type KubeSchedulerConfiguration struct { PolicyConfigFile string `json:"policyConfigFile"` // enableProfiling enables profiling via web interface. EnableProfiling bool `json:"enableProfiling"` + // contentType is contentType of requests sent to apiserver. + ContentType string `json:"contentType"` // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. KubeAPIQPS float32 `json:"kubeAPIQPS"` // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver. - KubeAPIBurst int `json:"kubeAPIBurst"` + KubeAPIBurst int32 `json:"kubeAPIBurst"` // schedulerName is name of the scheduler, used to select which pods // will be processed by this scheduler, based on pod's annotation with // key 'scheduler.alpha.kubernetes.io/name'. SchedulerName string `json:"schedulerName"` + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. + HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` + // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. + FailureDomains string `json:"failureDomains"` // leaderElection defines the configuration of leader election client. LeaderElection LeaderElectionConfiguration `json:"leaderElection"` } @@ -389,7 +426,7 @@ type KubeControllerManagerConfiguration struct { unversioned.TypeMeta // port is the port that the controller-manager's http service runs on. - Port int `json:"port"` + Port int32 `json:"port"` // address is the IP address to serve on (set to 0.0.0.0 for all interfaces). Address string `json:"address"` // cloudProvider is the provider for cloud services. @@ -399,43 +436,43 @@ type KubeControllerManagerConfiguration struct { // concurrentEndpointSyncs is the number of endpoint syncing operations // that will be done concurrently. Larger number = faster endpoint updating, // but more CPU (and network) load. - ConcurrentEndpointSyncs int `json:"concurrentEndpointSyncs"` + ConcurrentEndpointSyncs int32 `json:"concurrentEndpointSyncs"` // concurrentRSSyncs is the number of replica sets that are allowed to sync // concurrently. Larger number = more responsive replica management, but more // CPU (and network) load. - ConcurrentRSSyncs int `json:"concurrentRSSyncs"` + ConcurrentRSSyncs int32 `json:"concurrentRSSyncs"` // concurrentRCSyncs is the number of replication controllers that are // allowed to sync concurrently. Larger number = more responsive replica // management, but more CPU (and network) load. - ConcurrentRCSyncs int `json:"concurrentRCSyncs"` + ConcurrentRCSyncs int32 `json:"concurrentRCSyncs"` // concurrentResourceQuotaSyncs is the number of resource quotas that are // allowed to sync concurrently. Larger number = more responsive quota // management, but more CPU (and network) load. - ConcurrentResourceQuotaSyncs int `json:"concurrentResourceQuotaSyncs"` + ConcurrentResourceQuotaSyncs int32 `json:"concurrentResourceQuotaSyncs"` // concurrentDeploymentSyncs is the number of deployment objects that are // allowed to sync concurrently. Larger number = more responsive deployments, // but more CPU (and network) load. - ConcurrentDeploymentSyncs int `json:"concurrentDeploymentSyncs"` + ConcurrentDeploymentSyncs int32 `json:"concurrentDeploymentSyncs"` // concurrentDaemonSetSyncs is the number of daemonset objects that are // allowed to sync concurrently. Larger number = more responsive daemonset, // but more CPU (and network) load. - ConcurrentDaemonSetSyncs int `json:"concurrentDaemonSetSyncs"` + ConcurrentDaemonSetSyncs int32 `json:"concurrentDaemonSetSyncs"` // concurrentJobSyncs is the number of job objects that are // allowed to sync concurrently. Larger number = more responsive jobs, // but more CPU (and network) load. - ConcurrentJobSyncs int `json:"concurrentJobSyncs"` + ConcurrentJobSyncs int32 `json:"concurrentJobSyncs"` // concurrentNamespaceSyncs is the number of namespace objects that are // allowed to sync concurrently. - ConcurrentNamespaceSyncs int `json:"concurrentNamespaceSyncs"` + ConcurrentNamespaceSyncs int32 `json:"concurrentNamespaceSyncs"` // lookupCacheSizeForRC is the size of lookup cache for replication controllers. // Larger number = more responsive replica management, but more MEM load. - LookupCacheSizeForRC int `json:"lookupCacheSizeForRC"` + LookupCacheSizeForRC int32 `json:"lookupCacheSizeForRC"` // lookupCacheSizeForRS is the size of lookup cache for replicatsets. // Larger number = more responsive replica management, but more MEM load. - LookupCacheSizeForRS int `json:"lookupCacheSizeForRS"` + LookupCacheSizeForRS int32 `json:"lookupCacheSizeForRS"` // lookupCacheSizeForDaemonSet is the size of lookup cache for daemonsets. // Larger number = more responsive daemonset, but more MEM load. - LookupCacheSizeForDaemonSet int `json:"lookupCacheSizeForDaemonSet"` + LookupCacheSizeForDaemonSet int32 `json:"lookupCacheSizeForDaemonSet"` // serviceSyncPeriod is the period for syncing services with their external // load balancers. ServiceSyncPeriod unversioned.Duration `json:"serviceSyncPeriod"` @@ -458,7 +495,7 @@ type KubeControllerManagerConfiguration struct { // terminatedPodGCThreshold is the number of terminated pods that can exist // before the terminated pod garbage collector starts deleting terminated pods. // If <= 0, the terminated pod garbage collector is disabled. - TerminatedPodGCThreshold int `json:"terminatedPodGCThreshold"` + TerminatedPodGCThreshold int32 `json:"terminatedPodGCThreshold"` // horizontalPodAutoscalerSyncPeriod is the period for syncing the number of // pods in horizontal pod autoscaler. HorizontalPodAutoscalerSyncPeriod unversioned.Duration `json:"horizontalPodAutoscalerSyncPeriod"` @@ -471,7 +508,7 @@ type KubeControllerManagerConfiguration struct { DeletingPodsQps float32 `json:"deletingPodsQps"` // deletingPodsBurst is the number of nodes on which pods are bursty deleted in // case of node failure. For more details look into RateLimiter. - DeletingPodsBurst int `json:"deletingPodsBurst"` + DeletingPodsBurst int32 `json:"deletingPodsBurst"` // nodeMontiorGracePeriod is the amount of time which we allow a running node to be // unresponsive before marking it unhealty. Must be N times more than kubelet's // nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet @@ -479,7 +516,7 @@ type KubeControllerManagerConfiguration struct { NodeMonitorGracePeriod unversioned.Duration `json:"nodeMonitorGracePeriod"` // registerRetryCount is the number of retries for initial node registration. // Retry interval equals node-sync-period. - RegisterRetryCount int `json:"registerRetryCount"` + RegisterRetryCount int32 `json:"registerRetryCount"` // nodeStartupGracePeriod is the amount of time which we allow starting a node to // be unresponsive before marking it unhealty. NodeStartupGracePeriod unversioned.Duration `json:"nodeStartupGracePeriod"` @@ -494,20 +531,35 @@ type KubeControllerManagerConfiguration struct { ClusterName string `json:"clusterName"` // clusterCIDR is CIDR Range for Pods in cluster. ClusterCIDR string `json:"clusterCIDR"` - // allocateNodeCIDRs enables CIDRs for Pods to be allocated and set on the - // cloud provider. + // serviceCIDR is CIDR Range for Services in cluster. + ServiceCIDR string `json:"serviceCIDR"` + // NodeCIDRMaskSize is the mask size for node cidr in cluster. + NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"` + // allocateNodeCIDRs enables CIDRs for Pods to be allocated and, if + // ConfigureCloudRoutes is true, to be set on the cloud provider. AllocateNodeCIDRs bool `json:"allocateNodeCIDRs"` + // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs + // to be configured on the cloud provider. + ConfigureCloudRoutes bool `json:"configureCloudRoutes"` // rootCAFile is the root certificate authority will be included in service // account's token secret. This must be a valid PEM-encoded CA bundle. RootCAFile string `json:"rootCAFile"` + // contentType is contentType of requests sent to apiserver. + ContentType string `json:"contentType"` // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. KubeAPIQPS float32 `json:"kubeAPIQPS"` // kubeAPIBurst is the burst to use while talking with kubernetes apiserver. - KubeAPIBurst int `json:"kubeAPIBurst"` + KubeAPIBurst int32 `json:"kubeAPIBurst"` // leaderElection defines the configuration of leader election client. LeaderElection LeaderElectionConfiguration `json:"leaderElection"` - // vloumeConfiguration holds configuration for volume related features. + // volumeConfiguration holds configuration for volume related features. VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"` + // How long to wait between starting controller managers + ControllerStartInterval unversioned.Duration `json:"controllerStartInterval"` + // enables the generic garbage collector. MUST be synced with the + // corresponding flag of the kube-apiserver. WARNING: the generic garbage + // collector is an alpha feature. + EnableGarbageCollector bool `json:"enableGarbageCollector"` } // VolumeConfiguration contains *all* enumerated flags meant to configure all volume @@ -523,21 +575,24 @@ type VolumeConfiguration struct { EnableHostPathProvisioning bool `json:"enableHostPathProvisioning"` // persistentVolumeRecyclerConfiguration holds configuration for persistent volume plugins. PersistentVolumeRecyclerConfiguration PersistentVolumeRecyclerConfiguration `json:"persitentVolumeRecyclerConfiguration"` + // volumePluginDir is the full path of the directory in which the flex + // volume plugin should search for additional third party volume plugins + FlexVolumePluginDir string `json:"flexVolumePluginDir"` } type PersistentVolumeRecyclerConfiguration struct { // maximumRetry is number of retries the PV recycler will execute on failure to recycle // PV. - MaximumRetry int `json:"maximumRetry"` + MaximumRetry int32 `json:"maximumRetry"` // minimumTimeoutNFS is the minimum ActiveDeadlineSeconds to use for an NFS Recycler // pod. - MinimumTimeoutNFS int `json:"minimumTimeoutNFS"` + MinimumTimeoutNFS int32 `json:"minimumTimeoutNFS"` // podTemplateFilePathNFS is the file path to a pod definition used as a template for // NFS persistent volume recycling PodTemplateFilePathNFS string `json:"podTemplateFilePathNFS"` // incrementTimeoutNFS is the increment of time added per Gi to ActiveDeadlineSeconds // for an NFS scrubber pod. - IncrementTimeoutNFS int `json:"incrementTimeoutNFS"` + IncrementTimeoutNFS int32 `json:"incrementTimeoutNFS"` // podTemplateFilePathHostPath is the file path to a pod definition used as a template for // HostPath persistent volume recycling. This is for development and testing only and // will not work in a multi-node cluster. @@ -545,9 +600,9 @@ type PersistentVolumeRecyclerConfiguration struct { // minimumTimeoutHostPath is the minimum ActiveDeadlineSeconds to use for a HostPath // Recycler pod. This is for development and testing only and will not work in a multi-node // cluster. - MinimumTimeoutHostPath int `json:"minimumTimeoutHostPath"` + MinimumTimeoutHostPath int32 `json:"minimumTimeoutHostPath"` // incrementTimeoutHostPath is the increment of time added per Gi to ActiveDeadlineSeconds // for a HostPath scrubber pod. This is for development and testing only and will not work // in a multi-node cluster. - IncrementTimeoutHostPath int `json:"incrementTimeoutHostPath"` + IncrementTimeoutHostPath int32 `json:"incrementTimeoutHostPath"` } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go index b4bff5c816df..fd607760bfc4 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,177 +16,105 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-conversions.sh +// This file was autogenerated by conversion-gen. Do not edit it manually! package v1alpha1 import ( - reflect "reflect" - api "k8s.io/kubernetes/pkg/api" componentconfig "k8s.io/kubernetes/pkg/apis/componentconfig" conversion "k8s.io/kubernetes/pkg/conversion" ) -func autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*componentconfig.KubeProxyConfiguration))(in) +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration, + Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration, + Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration, + Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration, + Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration, + Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) } +} + +func autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error { + SetDefaults_KubeProxyConfiguration(in) if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } out.BindAddress = in.BindAddress + out.ClusterCIDR = in.ClusterCIDR out.HealthzBindAddress = in.HealthzBindAddress - out.HealthzPort = int32(in.HealthzPort) + out.HealthzPort = in.HealthzPort out.HostnameOverride = in.HostnameOverride - if in.IPTablesMasqueradeBit != nil { - out.IPTablesMasqueradeBit = new(int32) - *out.IPTablesMasqueradeBit = int32(*in.IPTablesMasqueradeBit) - } else { - out.IPTablesMasqueradeBit = nil - } - if err := s.Convert(&in.IPTablesSyncPeriod, &out.IPTablesSyncPeriod, 0); err != nil { - return err - } + out.IPTablesMasqueradeBit = in.IPTablesMasqueradeBit + out.IPTablesSyncPeriod = in.IPTablesSyncPeriod out.KubeconfigPath = in.KubeconfigPath out.MasqueradeAll = in.MasqueradeAll out.Master = in.Master - if in.OOMScoreAdj != nil { - out.OOMScoreAdj = new(int32) - *out.OOMScoreAdj = int32(*in.OOMScoreAdj) - } else { - out.OOMScoreAdj = nil - } - out.Mode = ProxyMode(in.Mode) + out.OOMScoreAdj = in.OOMScoreAdj + out.Mode = componentconfig.ProxyMode(in.Mode) out.PortRange = in.PortRange out.ResourceContainer = in.ResourceContainer - if err := s.Convert(&in.UDPIdleTimeout, &out.UDPIdleTimeout, 0); err != nil { - return err - } - out.ConntrackMax = int32(in.ConntrackMax) - if err := s.Convert(&in.ConntrackTCPEstablishedTimeout, &out.ConntrackTCPEstablishedTimeout, 0); err != nil { - return err - } - return nil -} - -func Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, s) -} - -func autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*componentconfig.KubeSchedulerConfiguration))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Port = in.Port - out.Address = in.Address - out.AlgorithmProvider = in.AlgorithmProvider - out.PolicyConfigFile = in.PolicyConfigFile - if err := api.Convert_bool_To_bool_ref(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { - return err - } - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = in.KubeAPIBurst - out.SchedulerName = in.SchedulerName - if err := Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - return nil -} - -func Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in, out, s) -} - -func autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*componentconfig.LeaderElectionConfiguration))(in) - } - if err := api.Convert_bool_To_bool_ref(&in.LeaderElect, &out.LeaderElect, s); err != nil { - return err - } - if err := s.Convert(&in.LeaseDuration, &out.LeaseDuration, 0); err != nil { - return err - } - if err := s.Convert(&in.RenewDeadline, &out.RenewDeadline, 0); err != nil { - return err - } - if err := s.Convert(&in.RetryPeriod, &out.RetryPeriod, 0); err != nil { - return err - } + out.UDPIdleTimeout = in.UDPIdleTimeout + out.ConntrackMax = in.ConntrackMax + out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout return nil } -func Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in, out, s) +func Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in, out, s) } -func autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*KubeProxyConfiguration))(in) - } +func autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } out.BindAddress = in.BindAddress + out.ClusterCIDR = in.ClusterCIDR out.HealthzBindAddress = in.HealthzBindAddress - out.HealthzPort = int(in.HealthzPort) + out.HealthzPort = in.HealthzPort out.HostnameOverride = in.HostnameOverride - if in.IPTablesMasqueradeBit != nil { - out.IPTablesMasqueradeBit = new(int) - *out.IPTablesMasqueradeBit = int(*in.IPTablesMasqueradeBit) - } else { - out.IPTablesMasqueradeBit = nil - } - if err := s.Convert(&in.IPTablesSyncPeriod, &out.IPTablesSyncPeriod, 0); err != nil { - return err - } + out.IPTablesMasqueradeBit = in.IPTablesMasqueradeBit + out.IPTablesSyncPeriod = in.IPTablesSyncPeriod out.KubeconfigPath = in.KubeconfigPath out.MasqueradeAll = in.MasqueradeAll out.Master = in.Master - if in.OOMScoreAdj != nil { - out.OOMScoreAdj = new(int) - *out.OOMScoreAdj = int(*in.OOMScoreAdj) - } else { - out.OOMScoreAdj = nil - } - out.Mode = componentconfig.ProxyMode(in.Mode) + out.OOMScoreAdj = in.OOMScoreAdj + out.Mode = ProxyMode(in.Mode) out.PortRange = in.PortRange out.ResourceContainer = in.ResourceContainer - if err := s.Convert(&in.UDPIdleTimeout, &out.UDPIdleTimeout, 0); err != nil { - return err - } - out.ConntrackMax = int(in.ConntrackMax) - if err := s.Convert(&in.ConntrackTCPEstablishedTimeout, &out.ConntrackTCPEstablishedTimeout, 0); err != nil { - return err - } + out.UDPIdleTimeout = in.UDPIdleTimeout + out.ConntrackMax = in.ConntrackMax + out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout return nil } -func Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in, out, s) +func Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { + return autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, s) } func autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration, out *componentconfig.KubeSchedulerConfiguration, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*KubeSchedulerConfiguration))(in) - } + SetDefaults_KubeSchedulerConfiguration(in) if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - out.Port = in.Port + out.Port = int32(in.Port) out.Address = in.Address out.AlgorithmProvider = in.AlgorithmProvider out.PolicyConfigFile = in.PolicyConfigFile - if err := api.Convert_bool_ref_To_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { + if err := api.Convert_Pointer_bool_To_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { return err } + out.ContentType = in.ContentType out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = in.KubeAPIBurst + out.KubeAPIBurst = int32(in.KubeAPIBurst) out.SchedulerName = in.SchedulerName + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.FailureDomains = in.FailureDomains if err := Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { return err } @@ -195,22 +125,41 @@ func Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedule return autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in, out, s) } -func autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*LeaderElectionConfiguration))(in) - } - if err := api.Convert_bool_ref_To_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { +func autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := s.Convert(&in.LeaseDuration, &out.LeaseDuration, 0); err != nil { + out.Port = int(in.Port) + out.Address = in.Address + out.AlgorithmProvider = in.AlgorithmProvider + out.PolicyConfigFile = in.PolicyConfigFile + if err := api.Convert_bool_To_Pointer_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { return err } - if err := s.Convert(&in.RenewDeadline, &out.RenewDeadline, 0); err != nil { + out.ContentType = in.ContentType + out.KubeAPIQPS = in.KubeAPIQPS + out.KubeAPIBurst = int(in.KubeAPIBurst) + out.SchedulerName = in.SchedulerName + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.FailureDomains = in.FailureDomains + if err := Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { return err } - if err := s.Convert(&in.RetryPeriod, &out.RetryPeriod, 0); err != nil { + return nil +} + +func Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { + return autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error { + SetDefaults_LeaderElectionConfiguration(in) + if err := api.Convert_Pointer_bool_To_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { return err } + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod return nil } @@ -218,17 +167,16 @@ func Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElect return autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in, out, s) } -func init() { - err := api.Scheme.AddGeneratedConversionFuncs( - autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration, - autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration, - autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration, - autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration, - autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration, - autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) +func autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { + if err := api.Convert_bool_To_Pointer_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { + return err } + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + return nil +} + +func Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { + return autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in, out, s) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go index 928491342600..f4f9fc932285 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package v1alpha1 @@ -24,58 +26,61 @@ import ( conversion "k8s.io/kubernetes/pkg/conversion" ) -func deepCopy_unversioned_Duration(in unversioned.Duration, out *unversioned.Duration, c *conversion.Cloner) error { - out.Duration = in.Duration - return nil -} - -func deepCopy_unversioned_TypeMeta(in unversioned.TypeMeta, out *unversioned.TypeMeta, c *conversion.Cloner) error { - out.Kind = in.Kind - out.APIVersion = in.APIVersion - return nil +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1alpha1_KubeProxyConfiguration, + DeepCopy_v1alpha1_KubeSchedulerConfiguration, + DeepCopy_v1alpha1_LeaderElectionConfiguration, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } } -func deepCopy_v1alpha1_KubeProxyConfiguration(in KubeProxyConfiguration, out *KubeProxyConfiguration, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1alpha1_KubeProxyConfiguration(in KubeProxyConfiguration, out *KubeProxyConfiguration, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.BindAddress = in.BindAddress + out.ClusterCIDR = in.ClusterCIDR out.HealthzBindAddress = in.HealthzBindAddress out.HealthzPort = in.HealthzPort out.HostnameOverride = in.HostnameOverride if in.IPTablesMasqueradeBit != nil { - out.IPTablesMasqueradeBit = new(int32) - *out.IPTablesMasqueradeBit = *in.IPTablesMasqueradeBit + in, out := in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit + *out = new(int32) + **out = *in } else { out.IPTablesMasqueradeBit = nil } - if err := deepCopy_unversioned_Duration(in.IPTablesSyncPeriod, &out.IPTablesSyncPeriod, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Duration(in.IPTablesSyncPeriod, &out.IPTablesSyncPeriod, c); err != nil { return err } out.KubeconfigPath = in.KubeconfigPath out.MasqueradeAll = in.MasqueradeAll out.Master = in.Master if in.OOMScoreAdj != nil { - out.OOMScoreAdj = new(int32) - *out.OOMScoreAdj = *in.OOMScoreAdj + in, out := in.OOMScoreAdj, &out.OOMScoreAdj + *out = new(int32) + **out = *in } else { out.OOMScoreAdj = nil } out.Mode = in.Mode out.PortRange = in.PortRange out.ResourceContainer = in.ResourceContainer - if err := deepCopy_unversioned_Duration(in.UDPIdleTimeout, &out.UDPIdleTimeout, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Duration(in.UDPIdleTimeout, &out.UDPIdleTimeout, c); err != nil { return err } out.ConntrackMax = in.ConntrackMax - if err := deepCopy_unversioned_Duration(in.ConntrackTCPEstablishedTimeout, &out.ConntrackTCPEstablishedTimeout, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Duration(in.ConntrackTCPEstablishedTimeout, &out.ConntrackTCPEstablishedTimeout, c); err != nil { return err } return nil } -func deepCopy_v1alpha1_KubeSchedulerConfiguration(in KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1alpha1_KubeSchedulerConfiguration(in KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Port = in.Port @@ -83,49 +88,40 @@ func deepCopy_v1alpha1_KubeSchedulerConfiguration(in KubeSchedulerConfiguration, out.AlgorithmProvider = in.AlgorithmProvider out.PolicyConfigFile = in.PolicyConfigFile if in.EnableProfiling != nil { - out.EnableProfiling = new(bool) - *out.EnableProfiling = *in.EnableProfiling + in, out := in.EnableProfiling, &out.EnableProfiling + *out = new(bool) + **out = *in } else { out.EnableProfiling = nil } + out.ContentType = in.ContentType out.KubeAPIQPS = in.KubeAPIQPS out.KubeAPIBurst = in.KubeAPIBurst out.SchedulerName = in.SchedulerName - if err := deepCopy_v1alpha1_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil { + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.FailureDomains = in.FailureDomains + if err := DeepCopy_v1alpha1_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil { return err } return nil } -func deepCopy_v1alpha1_LeaderElectionConfiguration(in LeaderElectionConfiguration, out *LeaderElectionConfiguration, c *conversion.Cloner) error { +func DeepCopy_v1alpha1_LeaderElectionConfiguration(in LeaderElectionConfiguration, out *LeaderElectionConfiguration, c *conversion.Cloner) error { if in.LeaderElect != nil { - out.LeaderElect = new(bool) - *out.LeaderElect = *in.LeaderElect + in, out := in.LeaderElect, &out.LeaderElect + *out = new(bool) + **out = *in } else { out.LeaderElect = nil } - if err := deepCopy_unversioned_Duration(in.LeaseDuration, &out.LeaseDuration, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Duration(in.LeaseDuration, &out.LeaseDuration, c); err != nil { return err } - if err := deepCopy_unversioned_Duration(in.RenewDeadline, &out.RenewDeadline, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Duration(in.RenewDeadline, &out.RenewDeadline, c); err != nil { return err } - if err := deepCopy_unversioned_Duration(in.RetryPeriod, &out.RetryPeriod, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Duration(in.RetryPeriod, &out.RetryPeriod, c); err != nil { return err } return nil } - -func init() { - err := api.Scheme.AddGeneratedDeepCopyFuncs( - deepCopy_unversioned_Duration, - deepCopy_unversioned_TypeMeta, - deepCopy_v1alpha1_KubeProxyConfiguration, - deepCopy_v1alpha1_KubeSchedulerConfiguration, - deepCopy_v1alpha1_LeaderElectionConfiguration, - ) - if err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go index af35c7d3884d..bab6bb3e32f8 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go @@ -28,72 +28,87 @@ import ( func addDefaultingFuncs(scheme *runtime.Scheme) { scheme.AddDefaultingFuncs( - func(obj *KubeProxyConfiguration) { - if obj.BindAddress == "" { - obj.BindAddress = "0.0.0.0" - } - if obj.HealthzPort == 0 { - obj.HealthzPort = 10249 - } - if obj.HealthzBindAddress == "" { - obj.HealthzBindAddress = "127.0.0.1" - } - if obj.OOMScoreAdj == nil { - temp := int32(qos.KubeProxyOOMScoreAdj) - obj.OOMScoreAdj = &temp - } - if obj.ResourceContainer == "" { - obj.ResourceContainer = "/kube-proxy" - } - if obj.IPTablesSyncPeriod.Duration == 0 { - obj.IPTablesSyncPeriod = unversioned.Duration{30 * time.Second} - } - zero := unversioned.Duration{} - if obj.UDPIdleTimeout == zero { - obj.UDPIdleTimeout = unversioned.Duration{250 * time.Millisecond} - } - if obj.ConntrackMax == 0 { - obj.ConntrackMax = 256 * 1024 // 4x default (64k) - } - if obj.IPTablesMasqueradeBit == nil { - temp := int32(14) - obj.IPTablesMasqueradeBit = &temp - } - if obj.ConntrackTCPEstablishedTimeout == zero { - obj.ConntrackTCPEstablishedTimeout = unversioned.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default) - } - }, - func(obj *KubeSchedulerConfiguration) { - if obj.Port == 0 { - obj.Port = ports.SchedulerPort - } - if obj.Address == "" { - obj.Address = "0.0.0.0" - } - if obj.AlgorithmProvider == "" { - obj.AlgorithmProvider = "DefaultProvider" - } - if obj.KubeAPIQPS == 0 { - obj.KubeAPIQPS = 50.0 - } - if obj.KubeAPIBurst == 0 { - obj.KubeAPIBurst = 100 - } - if obj.SchedulerName == "" { - obj.SchedulerName = api.DefaultSchedulerName - } - }, - func(obj *LeaderElectionConfiguration) { - zero := unversioned.Duration{} - if obj.LeaseDuration == zero { - obj.LeaseDuration = unversioned.Duration{15 * time.Second} - } - if obj.RenewDeadline == zero { - obj.RenewDeadline = unversioned.Duration{10 * time.Second} - } - if obj.RetryPeriod == zero { - obj.RetryPeriod = unversioned.Duration{2 * time.Second} - } - }, + SetDefaults_KubeProxyConfiguration, + SetDefaults_KubeSchedulerConfiguration, + SetDefaults_LeaderElectionConfiguration, ) } + +func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) { + if obj.BindAddress == "" { + obj.BindAddress = "0.0.0.0" + } + if obj.HealthzPort == 0 { + obj.HealthzPort = 10249 + } + if obj.HealthzBindAddress == "" { + obj.HealthzBindAddress = "127.0.0.1" + } + if obj.OOMScoreAdj == nil { + temp := int32(qos.KubeProxyOOMScoreAdj) + obj.OOMScoreAdj = &temp + } + if obj.ResourceContainer == "" { + obj.ResourceContainer = "/kube-proxy" + } + if obj.IPTablesSyncPeriod.Duration == 0 { + obj.IPTablesSyncPeriod = unversioned.Duration{Duration: 30 * time.Second} + } + zero := unversioned.Duration{} + if obj.UDPIdleTimeout == zero { + obj.UDPIdleTimeout = unversioned.Duration{Duration: 250 * time.Millisecond} + } + if obj.ConntrackMax == 0 { + obj.ConntrackMax = 256 * 1024 // 4x default (64k) + } + if obj.IPTablesMasqueradeBit == nil { + temp := int32(14) + obj.IPTablesMasqueradeBit = &temp + } + if obj.ConntrackTCPEstablishedTimeout == zero { + obj.ConntrackTCPEstablishedTimeout = unversioned.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default) + } +} + +func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) { + if obj.Port == 0 { + obj.Port = ports.SchedulerPort + } + if obj.Address == "" { + obj.Address = "0.0.0.0" + } + if obj.AlgorithmProvider == "" { + obj.AlgorithmProvider = "DefaultProvider" + } + if obj.ContentType == "" { + obj.ContentType = "application/vnd.kubernetes.protobuf" + } + if obj.KubeAPIQPS == 0 { + obj.KubeAPIQPS = 50.0 + } + if obj.KubeAPIBurst == 0 { + obj.KubeAPIBurst = 100 + } + if obj.SchedulerName == "" { + obj.SchedulerName = api.DefaultSchedulerName + } + if obj.HardPodAffinitySymmetricWeight == 0 { + obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight + } + if obj.FailureDomains == "" { + obj.FailureDomains = api.DefaultFailureDomains + } +} + +func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) { + zero := unversioned.Duration{} + if obj.LeaseDuration == zero { + obj.LeaseDuration = unversioned.Duration{Duration: 15 * time.Second} + } + if obj.RenewDeadline == zero { + obj.RenewDeadline = unversioned.Duration{Duration: 10 * time.Second} + } + if obj.RetryPeriod == zero { + obj.RetryPeriod = unversioned.Duration{Duration: 2 * time.Second} + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go new file mode 100644 index 000000000000..65a03a2093dc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1alpha1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go index 7210541ddf10..2ae65d87da1d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go @@ -24,6 +24,10 @@ type KubeProxyConfiguration struct { // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 // for all interfaces) BindAddress string `json:"bindAddress"` + // clusterCIDR is the CIDR range of the pods in the cluster. It is used to + // bridge traffic coming from outside of the cluster. If not provided, + // no off-cluster bridging will be performed. + ClusterCIDR string `json:"clusterCIDR"` // healthzBindAddress is the IP address for the health check server to serve on, // defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces) HealthzBindAddress string `json:"healthzBindAddress"` @@ -91,6 +95,8 @@ type KubeSchedulerConfiguration struct { PolicyConfigFile string `json:"policyConfigFile"` // enableProfiling enables profiling via web interface. EnableProfiling *bool `json:"enableProfiling"` + // contentType is contentType of requests sent to apiserver. + ContentType string `json:"contentType"` // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. KubeAPIQPS float32 `json:"kubeAPIQPS"` // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver. @@ -99,6 +105,12 @@ type KubeSchedulerConfiguration struct { // will be processed by this scheduler, based on pod's annotation with // key 'scheduler.alpha.kubernetes.io/name'. SchedulerName string `json:"schedulerName"` + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. + HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` + // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. + FailureDomains string `json:"failureDomains"` // leaderElection defines the configuration of leader election client. LeaderElection LeaderElectionConfiguration `json:"leaderElection"` } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/deep_copy_generated.go index ff5898cfe0d9..0d3654117873 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/deep_copy_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,16 +16,947 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package extensions -import api "k8s.io/kubernetes/pkg/api" +import ( + api "k8s.io/kubernetes/pkg/api" + resource "k8s.io/kubernetes/pkg/api/resource" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" + intstr "k8s.io/kubernetes/pkg/util/intstr" +) func init() { - err := api.Scheme.AddGeneratedDeepCopyFuncs() - if err != nil { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_extensions_APIVersion, + DeepCopy_extensions_CustomMetricCurrentStatus, + DeepCopy_extensions_CustomMetricCurrentStatusList, + DeepCopy_extensions_CustomMetricTarget, + DeepCopy_extensions_CustomMetricTargetList, + DeepCopy_extensions_DaemonSet, + DeepCopy_extensions_DaemonSetList, + DeepCopy_extensions_DaemonSetSpec, + DeepCopy_extensions_DaemonSetStatus, + DeepCopy_extensions_Deployment, + DeepCopy_extensions_DeploymentList, + DeepCopy_extensions_DeploymentRollback, + DeepCopy_extensions_DeploymentSpec, + DeepCopy_extensions_DeploymentStatus, + DeepCopy_extensions_DeploymentStrategy, + DeepCopy_extensions_FSGroupStrategyOptions, + DeepCopy_extensions_HTTPIngressPath, + DeepCopy_extensions_HTTPIngressRuleValue, + DeepCopy_extensions_HostPortRange, + DeepCopy_extensions_IDRange, + DeepCopy_extensions_Ingress, + DeepCopy_extensions_IngressBackend, + DeepCopy_extensions_IngressList, + DeepCopy_extensions_IngressRule, + DeepCopy_extensions_IngressRuleValue, + DeepCopy_extensions_IngressSpec, + DeepCopy_extensions_IngressStatus, + DeepCopy_extensions_IngressTLS, + DeepCopy_extensions_NetworkPolicy, + DeepCopy_extensions_NetworkPolicyIngressRule, + DeepCopy_extensions_NetworkPolicyList, + DeepCopy_extensions_NetworkPolicyPeer, + DeepCopy_extensions_NetworkPolicyPort, + DeepCopy_extensions_NetworkPolicySpec, + DeepCopy_extensions_PodSecurityPolicy, + DeepCopy_extensions_PodSecurityPolicyList, + DeepCopy_extensions_PodSecurityPolicySpec, + DeepCopy_extensions_ReplicaSet, + DeepCopy_extensions_ReplicaSetList, + DeepCopy_extensions_ReplicaSetSpec, + DeepCopy_extensions_ReplicaSetStatus, + DeepCopy_extensions_ReplicationControllerDummy, + DeepCopy_extensions_RollbackConfig, + DeepCopy_extensions_RollingUpdateDeployment, + DeepCopy_extensions_RunAsUserStrategyOptions, + DeepCopy_extensions_SELinuxStrategyOptions, + DeepCopy_extensions_Scale, + DeepCopy_extensions_ScaleSpec, + DeepCopy_extensions_ScaleStatus, + DeepCopy_extensions_SupplementalGroupsStrategyOptions, + DeepCopy_extensions_ThirdPartyResource, + DeepCopy_extensions_ThirdPartyResourceData, + DeepCopy_extensions_ThirdPartyResourceDataList, + DeepCopy_extensions_ThirdPartyResourceList, + ); err != nil { // if one of the deep copy functions is malformed, detect it immediately. panic(err) } } + +func DeepCopy_extensions_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error { + out.Name = in.Name + return nil +} + +func DeepCopy_extensions_CustomMetricCurrentStatus(in CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, c *conversion.Cloner) error { + out.Name = in.Name + if err := resource.DeepCopy_resource_Quantity(in.CurrentValue, &out.CurrentValue, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_CustomMetricCurrentStatusList(in CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, c *conversion.Cloner) error { + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]CustomMetricCurrentStatus, len(in)) + for i := range in { + if err := DeepCopy_extensions_CustomMetricCurrentStatus(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_extensions_CustomMetricTarget(in CustomMetricTarget, out *CustomMetricTarget, c *conversion.Cloner) error { + out.Name = in.Name + if err := resource.DeepCopy_resource_Quantity(in.TargetValue, &out.TargetValue, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_CustomMetricTargetList(in CustomMetricTargetList, out *CustomMetricTargetList, c *conversion.Cloner) error { + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]CustomMetricTarget, len(in)) + for i := range in { + if err := DeepCopy_extensions_CustomMetricTarget(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_extensions_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_extensions_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_DaemonSetStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]DaemonSet, len(in)) + for i := range in { + if err := DeepCopy_extensions_DaemonSet(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_extensions_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error { + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error { + out.CurrentNumberScheduled = in.CurrentNumberScheduled + out.NumberMisscheduled = in.NumberMisscheduled + out.DesiredNumberScheduled = in.DesiredNumberScheduled + return nil +} + +func DeepCopy_extensions_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_extensions_DeploymentSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_DeploymentStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Deployment, len(in)) + for i := range in { + if err := DeepCopy_extensions_Deployment(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_extensions_DeploymentRollback(in DeploymentRollback, out *DeploymentRollback, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.Name = in.Name + if in.UpdatedAnnotations != nil { + in, out := in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val + } + } else { + out.UpdatedAnnotations = nil + } + if err := DeepCopy_extensions_RollbackConfig(in.RollbackTo, &out.RollbackTo, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error { + out.Replicas = in.Replicas + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_extensions_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + if in.RevisionHistoryLimit != nil { + in, out := in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = *in + } else { + out.RevisionHistoryLimit = nil + } + out.Paused = in.Paused + if in.RollbackTo != nil { + in, out := in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + if err := DeepCopy_extensions_RollbackConfig(*in, *out, c); err != nil { + return err + } + } else { + out.RollbackTo = nil + } + return nil +} + +func DeepCopy_extensions_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error { + out.ObservedGeneration = in.ObservedGeneration + out.Replicas = in.Replicas + out.UpdatedReplicas = in.UpdatedReplicas + out.AvailableReplicas = in.AvailableReplicas + out.UnavailableReplicas = in.UnavailableReplicas + return nil +} + +func DeepCopy_extensions_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error { + out.Type = in.Type + if in.RollingUpdate != nil { + in, out := in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + if err := DeepCopy_extensions_RollingUpdateDeployment(*in, *out, c); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func DeepCopy_extensions_FSGroupStrategyOptions(in FSGroupStrategyOptions, out *FSGroupStrategyOptions, c *conversion.Cloner) error { + out.Rule = in.Rule + if in.Ranges != nil { + in, out := in.Ranges, &out.Ranges + *out = make([]IDRange, len(in)) + for i := range in { + if err := DeepCopy_extensions_IDRange(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Ranges = nil + } + return nil +} + +func DeepCopy_extensions_HTTPIngressPath(in HTTPIngressPath, out *HTTPIngressPath, c *conversion.Cloner) error { + out.Path = in.Path + if err := DeepCopy_extensions_IngressBackend(in.Backend, &out.Backend, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_HTTPIngressRuleValue(in HTTPIngressRuleValue, out *HTTPIngressRuleValue, c *conversion.Cloner) error { + if in.Paths != nil { + in, out := in.Paths, &out.Paths + *out = make([]HTTPIngressPath, len(in)) + for i := range in { + if err := DeepCopy_extensions_HTTPIngressPath(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Paths = nil + } + return nil +} + +func DeepCopy_extensions_HostPortRange(in HostPortRange, out *HostPortRange, c *conversion.Cloner) error { + out.Min = in.Min + out.Max = in.Max + return nil +} + +func DeepCopy_extensions_IDRange(in IDRange, out *IDRange, c *conversion.Cloner) error { + out.Min = in.Min + out.Max = in.Max + return nil +} + +func DeepCopy_extensions_Ingress(in Ingress, out *Ingress, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_extensions_IngressSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_IngressStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_IngressBackend(in IngressBackend, out *IngressBackend, c *conversion.Cloner) error { + out.ServiceName = in.ServiceName + if err := intstr.DeepCopy_intstr_IntOrString(in.ServicePort, &out.ServicePort, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_IngressList(in IngressList, out *IngressList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Ingress, len(in)) + for i := range in { + if err := DeepCopy_extensions_Ingress(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_extensions_IngressRule(in IngressRule, out *IngressRule, c *conversion.Cloner) error { + out.Host = in.Host + if err := DeepCopy_extensions_IngressRuleValue(in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_IngressRuleValue(in IngressRuleValue, out *IngressRuleValue, c *conversion.Cloner) error { + if in.HTTP != nil { + in, out := in.HTTP, &out.HTTP + *out = new(HTTPIngressRuleValue) + if err := DeepCopy_extensions_HTTPIngressRuleValue(*in, *out, c); err != nil { + return err + } + } else { + out.HTTP = nil + } + return nil +} + +func DeepCopy_extensions_IngressSpec(in IngressSpec, out *IngressSpec, c *conversion.Cloner) error { + if in.Backend != nil { + in, out := in.Backend, &out.Backend + *out = new(IngressBackend) + if err := DeepCopy_extensions_IngressBackend(*in, *out, c); err != nil { + return err + } + } else { + out.Backend = nil + } + if in.TLS != nil { + in, out := in.TLS, &out.TLS + *out = make([]IngressTLS, len(in)) + for i := range in { + if err := DeepCopy_extensions_IngressTLS(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.TLS = nil + } + if in.Rules != nil { + in, out := in.Rules, &out.Rules + *out = make([]IngressRule, len(in)) + for i := range in { + if err := DeepCopy_extensions_IngressRule(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func DeepCopy_extensions_IngressStatus(in IngressStatus, out *IngressStatus, c *conversion.Cloner) error { + if err := api.DeepCopy_api_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_IngressTLS(in IngressTLS, out *IngressTLS, c *conversion.Cloner) error { + if in.Hosts != nil { + in, out := in.Hosts, &out.Hosts + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Hosts = nil + } + out.SecretName = in.SecretName + return nil +} + +func DeepCopy_extensions_NetworkPolicy(in NetworkPolicy, out *NetworkPolicy, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_extensions_NetworkPolicySpec(in.Spec, &out.Spec, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_NetworkPolicyIngressRule(in NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, c *conversion.Cloner) error { + if in.Ports != nil { + in, out := in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(in)) + for i := range in { + if err := DeepCopy_extensions_NetworkPolicyPort(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Ports = nil + } + if in.From != nil { + in, out := in.From, &out.From + *out = make([]NetworkPolicyPeer, len(in)) + for i := range in { + if err := DeepCopy_extensions_NetworkPolicyPeer(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.From = nil + } + return nil +} + +func DeepCopy_extensions_NetworkPolicyList(in NetworkPolicyList, out *NetworkPolicyList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]NetworkPolicy, len(in)) + for i := range in { + if err := DeepCopy_extensions_NetworkPolicy(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_extensions_NetworkPolicyPeer(in NetworkPolicyPeer, out *NetworkPolicyPeer, c *conversion.Cloner) error { + if in.PodSelector != nil { + in, out := in.PodSelector, &out.PodSelector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.PodSelector = nil + } + if in.NamespaceSelector != nil { + in, out := in.NamespaceSelector, &out.NamespaceSelector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.NamespaceSelector = nil + } + return nil +} + +func DeepCopy_extensions_NetworkPolicyPort(in NetworkPolicyPort, out *NetworkPolicyPort, c *conversion.Cloner) error { + if in.Protocol != nil { + in, out := in.Protocol, &out.Protocol + *out = new(api.Protocol) + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + **out = newVal.(api.Protocol) + } + } else { + out.Protocol = nil + } + if in.Port != nil { + in, out := in.Port, &out.Port + *out = new(intstr.IntOrString) + if err := intstr.DeepCopy_intstr_IntOrString(*in, *out, c); err != nil { + return err + } + } else { + out.Port = nil + } + return nil +} + +func DeepCopy_extensions_NetworkPolicySpec(in NetworkPolicySpec, out *NetworkPolicySpec, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_LabelSelector(in.PodSelector, &out.PodSelector, c); err != nil { + return err + } + if in.Ingress != nil { + in, out := in.Ingress, &out.Ingress + *out = make([]NetworkPolicyIngressRule, len(in)) + for i := range in { + if err := DeepCopy_extensions_NetworkPolicyIngressRule(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Ingress = nil + } + return nil +} + +func DeepCopy_extensions_PodSecurityPolicy(in PodSecurityPolicy, out *PodSecurityPolicy, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_extensions_PodSecurityPolicySpec(in.Spec, &out.Spec, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_PodSecurityPolicyList(in PodSecurityPolicyList, out *PodSecurityPolicyList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(in)) + for i := range in { + if err := DeepCopy_extensions_PodSecurityPolicy(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_extensions_PodSecurityPolicySpec(in PodSecurityPolicySpec, out *PodSecurityPolicySpec, c *conversion.Cloner) error { + out.Privileged = in.Privileged + if in.DefaultAddCapabilities != nil { + in, out := in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]api.Capability, len(in)) + for i := range in { + (*out)[i] = in[i] + } + } else { + out.DefaultAddCapabilities = nil + } + if in.RequiredDropCapabilities != nil { + in, out := in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]api.Capability, len(in)) + for i := range in { + (*out)[i] = in[i] + } + } else { + out.RequiredDropCapabilities = nil + } + if in.AllowedCapabilities != nil { + in, out := in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]api.Capability, len(in)) + for i := range in { + (*out)[i] = in[i] + } + } else { + out.AllowedCapabilities = nil + } + if in.Volumes != nil { + in, out := in.Volumes, &out.Volumes + *out = make([]FSType, len(in)) + for i := range in { + (*out)[i] = in[i] + } + } else { + out.Volumes = nil + } + out.HostNetwork = in.HostNetwork + if in.HostPorts != nil { + in, out := in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(in)) + for i := range in { + if err := DeepCopy_extensions_HostPortRange(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.HostPorts = nil + } + out.HostPID = in.HostPID + out.HostIPC = in.HostIPC + if err := DeepCopy_extensions_SELinuxStrategyOptions(in.SELinux, &out.SELinux, c); err != nil { + return err + } + if err := DeepCopy_extensions_RunAsUserStrategyOptions(in.RunAsUser, &out.RunAsUser, c); err != nil { + return err + } + if err := DeepCopy_extensions_SupplementalGroupsStrategyOptions(in.SupplementalGroups, &out.SupplementalGroups, c); err != nil { + return err + } + if err := DeepCopy_extensions_FSGroupStrategyOptions(in.FSGroup, &out.FSGroup, c); err != nil { + return err + } + out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem + return nil +} + +func DeepCopy_extensions_ReplicaSet(in ReplicaSet, out *ReplicaSet, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_extensions_ReplicaSetSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_ReplicaSetStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_ReplicaSetList(in ReplicaSetList, out *ReplicaSetList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ReplicaSet, len(in)) + for i := range in { + if err := DeepCopy_extensions_ReplicaSet(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_extensions_ReplicaSetSpec(in ReplicaSetSpec, out *ReplicaSetSpec, c *conversion.Cloner) error { + out.Replicas = in.Replicas + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_ReplicaSetStatus(in ReplicaSetStatus, out *ReplicaSetStatus, c *conversion.Cloner) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +func DeepCopy_extensions_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_RollbackConfig(in RollbackConfig, out *RollbackConfig, c *conversion.Cloner) error { + out.Revision = in.Revision + return nil +} + +func DeepCopy_extensions_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error { + if err := intstr.DeepCopy_intstr_IntOrString(in.MaxUnavailable, &out.MaxUnavailable, c); err != nil { + return err + } + if err := intstr.DeepCopy_intstr_IntOrString(in.MaxSurge, &out.MaxSurge, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_RunAsUserStrategyOptions(in RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, c *conversion.Cloner) error { + out.Rule = in.Rule + if in.Ranges != nil { + in, out := in.Ranges, &out.Ranges + *out = make([]IDRange, len(in)) + for i := range in { + if err := DeepCopy_extensions_IDRange(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Ranges = nil + } + return nil +} + +func DeepCopy_extensions_SELinuxStrategyOptions(in SELinuxStrategyOptions, out *SELinuxStrategyOptions, c *conversion.Cloner) error { + out.Rule = in.Rule + if in.SELinuxOptions != nil { + in, out := in.SELinuxOptions, &out.SELinuxOptions + *out = new(api.SELinuxOptions) + if err := api.DeepCopy_api_SELinuxOptions(*in, *out, c); err != nil { + return err + } + } else { + out.SELinuxOptions = nil + } + return nil +} + +func DeepCopy_extensions_Scale(in Scale, out *Scale, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_extensions_ScaleSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_ScaleStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_extensions_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { + out.Replicas = in.Replicas + return nil +} + +func DeepCopy_extensions_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { + out.Replicas = in.Replicas + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + return nil +} + +func DeepCopy_extensions_SupplementalGroupsStrategyOptions(in SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, c *conversion.Cloner) error { + out.Rule = in.Rule + if in.Ranges != nil { + in, out := in.Ranges, &out.Ranges + *out = make([]IDRange, len(in)) + for i := range in { + if err := DeepCopy_extensions_IDRange(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Ranges = nil + } + return nil +} + +func DeepCopy_extensions_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + out.Description = in.Description + if in.Versions != nil { + in, out := in.Versions, &out.Versions + *out = make([]APIVersion, len(in)) + for i := range in { + if err := DeepCopy_extensions_APIVersion(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Versions = nil + } + return nil +} + +func DeepCopy_extensions_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Data != nil { + in, out := in.Data, &out.Data + *out = make([]byte, len(in)) + copy(*out, in) + } else { + out.Data = nil + } + return nil +} + +func DeepCopy_extensions_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ThirdPartyResourceData, len(in)) + for i := range in { + if err := DeepCopy_extensions_ThirdPartyResourceData(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_extensions_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ThirdPartyResource, len(in)) + for i := range in { + if err := DeepCopy_extensions_ThirdPartyResource(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/install/install.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/install/install.go index 90d5c507335b..449127084b32 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/install/install.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/install/install.go @@ -92,6 +92,7 @@ func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper // if a kind is not enumerated here, it is assumed to have a namespace scope rootScoped := sets.NewString( "PodSecurityPolicy", + "ThirdPartyResource", ) ignoredKinds := sets.NewString() diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/install/install_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/install/install_test.go new file mode 100644 index 000000000000..3252462a1d55 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/install/install_test.go @@ -0,0 +1,118 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package install + +import ( + "encoding/json" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestResourceVersioner(t *testing.T) { + daemonSet := extensions.DaemonSet{ObjectMeta: api.ObjectMeta{ResourceVersion: "10"}} + version, err := accessor.ResourceVersion(&daemonSet) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if version != "10" { + t.Errorf("unexpected version %v", version) + } + + daemonSetList := extensions.DaemonSetList{ListMeta: unversioned.ListMeta{ResourceVersion: "10"}} + version, err = accessor.ResourceVersion(&daemonSetList) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if version != "10" { + t.Errorf("unexpected version %v", version) + } +} + +func TestCodec(t *testing.T) { + daemonSet := extensions.DaemonSet{} + // We do want to use package registered rather than testapi here, because we + // want to test if the package install and package registered work as expected. + data, err := runtime.Encode(api.Codecs.LegacyCodec(registered.GroupOrDie(extensions.GroupName).GroupVersion), &daemonSet) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + other := extensions.DaemonSet{} + if err := json.Unmarshal(data, &other); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if other.APIVersion != registered.GroupOrDie(extensions.GroupName).GroupVersion.String() || other.Kind != "DaemonSet" { + t.Errorf("unexpected unmarshalled object %#v", other) + } +} + +func TestInterfacesFor(t *testing.T) { + if _, err := registered.GroupOrDie(extensions.GroupName).InterfacesFor(extensions.SchemeGroupVersion); err == nil { + t.Fatalf("unexpected non-error: %v", err) + } + for i, version := range registered.GroupOrDie(extensions.GroupName).GroupVersions { + if vi, err := registered.GroupOrDie(extensions.GroupName).InterfacesFor(version); err != nil || vi == nil { + t.Fatalf("%d: unexpected result: %v", i, err) + } + } +} + +func TestRESTMapper(t *testing.T) { + gv := v1beta1.SchemeGroupVersion + daemonSetGVK := gv.WithKind("DaemonSet") + + if gvk, err := registered.GroupOrDie(extensions.GroupName).RESTMapper.KindFor(gv.WithResource("daemonsets")); err != nil || gvk != daemonSetGVK { + t.Errorf("unexpected version mapping: %v %v", gvk, err) + } + + if m, err := registered.GroupOrDie(extensions.GroupName).RESTMapper.RESTMapping(daemonSetGVK.GroupKind(), ""); err != nil || m.GroupVersionKind != daemonSetGVK || m.Resource != "daemonsets" { + t.Errorf("unexpected version mapping: %#v %v", m, err) + } + + for _, version := range registered.GroupOrDie(extensions.GroupName).GroupVersions { + mapping, err := registered.GroupOrDie(extensions.GroupName).RESTMapper.RESTMapping(daemonSetGVK.GroupKind(), version.Version) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if mapping.Resource != "daemonsets" { + t.Errorf("incorrect resource name: %#v", mapping) + } + if mapping.GroupVersionKind.GroupVersion() != version { + t.Errorf("incorrect groupVersion: %v", mapping) + } + + interfaces, _ := registered.GroupOrDie(extensions.GroupName).InterfacesFor(version) + if mapping.ObjectConvertor != interfaces.ObjectConvertor { + t.Errorf("unexpected: %#v, expected: %#v", mapping, interfaces) + } + + rc := &extensions.DaemonSet{ObjectMeta: api.ObjectMeta{Name: "foo"}} + name, err := mapping.MetadataAccessor.Name(rc) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if name != "foo" { + t.Errorf("unable to retrieve object meta with: %v", mapping.MetadataAccessor) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/register.go index 057940d1612a..3264ae658aef 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/register.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/register.go @@ -19,6 +19,8 @@ package extensions import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/runtime" ) @@ -50,10 +52,11 @@ func addKnownTypes(scheme *runtime.Scheme) { &Deployment{}, &DeploymentList{}, &DeploymentRollback{}, - &HorizontalPodAutoscaler{}, - &HorizontalPodAutoscalerList{}, - &Job{}, - &JobList{}, + &autoscaling.HorizontalPodAutoscaler{}, + &autoscaling.HorizontalPodAutoscalerList{}, + &batch.Job{}, + &batch.JobList{}, + &batch.JobTemplate{}, &ReplicationControllerDummy{}, &Scale{}, &ThirdPartyResource{}, @@ -70,27 +73,27 @@ func addKnownTypes(scheme *runtime.Scheme) { &api.ExportOptions{}, &PodSecurityPolicy{}, &PodSecurityPolicyList{}, + &NetworkPolicy{}, + &NetworkPolicyList{}, ) } -func (obj *Deployment) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *DeploymentList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *DeploymentRollback) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *HorizontalPodAutoscaler) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *HorizontalPodAutoscalerList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *Job) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *JobList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ReplicationControllerDummy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *Scale) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ThirdPartyResource) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ThirdPartyResourceList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *DaemonSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *DaemonSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ThirdPartyResourceData) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ThirdPartyResourceDataList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *Ingress) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *IngressList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ReplicaSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ReplicaSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *PodSecurityPolicy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *PodSecurityPolicyList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *Deployment) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *DeploymentList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *DeploymentRollback) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ReplicationControllerDummy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *Scale) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ThirdPartyResource) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ThirdPartyResourceList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *DaemonSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *DaemonSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ThirdPartyResourceData) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ThirdPartyResourceDataList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *Ingress) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *IngressList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ReplicaSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ReplicaSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *PodSecurityPolicy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *PodSecurityPolicyList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *NetworkPolicy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *NetworkPolicyList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go index 2df949897669..eec90fb0a2cd 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,10 +29,9 @@ import ( pkg4_resource "k8s.io/kubernetes/pkg/api/resource" pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" pkg3_types "k8s.io/kubernetes/pkg/types" - pkg6_intstr "k8s.io/kubernetes/pkg/util/intstr" + pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" "reflect" "runtime" - pkg5_inf "speter.net/go/exp/math/dec/inf" time "time" ) @@ -70,10 +69,9 @@ func init() { var v1 pkg4_resource.Quantity var v2 pkg1_unversioned.LabelSelector var v3 pkg3_types.UID - var v4 pkg6_intstr.IntOrString - var v5 pkg5_inf.Dec - var v6 time.Time - _, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6 + var v4 pkg5_intstr.IntOrString + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 } } @@ -198,7 +196,7 @@ func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Replicas = int32(r.DecodeInt(32)) } default: z.DecStructFieldNotFound(-1, yys3) @@ -228,7 +226,7 @@ func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Replicas = int32(r.DecodeInt(32)) } for { yyj5++ @@ -396,7 +394,7 @@ func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Replicas = int32(r.DecodeInt(32)) } case "selector": if r.TryDecodeAsNil() { @@ -443,7 +441,7 @@ func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Replicas = 0 } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Replicas = int32(r.DecodeInt(32)) } yyj7++ if yyhl7 { @@ -1048,7 +1046,7 @@ func (x *ReplicationControllerDummy) codecDecodeSelfFromArray(l int, d *codec197 z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *SubresourceReference) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -1062,18 +1060,14 @@ func (x *SubresourceReference) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.Name != "" - yyq2[2] = x.APIVersion != "" - yyq2[3] = x.Subresource != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) + r.EncodeArrayStart(2) } else { - yynn2 = 0 + yynn2 = 2 for _, b := range yyq2 { if b { yynn2++ @@ -1084,102 +1078,48 @@ func (x *SubresourceReference) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } + yy7 := &x.TargetValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) } else { - r.EncodeString(codecSelferC_UTF81234, "") + z.EncFallback(yy7) } } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.TargetValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subresource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } + z.EncFallback(yy9) } } if yyr2 || yy2arr2 { @@ -1191,7 +1131,7 @@ func (x *SubresourceReference) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *SubresourceReference) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -1221,7 +1161,7 @@ func (x *SubresourceReference) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *SubresourceReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -1243,29 +1183,26 @@ func (x *SubresourceReference) codecDecodeSelfFromMap(l int, d *codec1978.Decode yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } case "name": if r.TryDecodeAsNil() { x.Name = "" } else { x.Name = string(r.DecodeString()) } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "subresource": + case "value": if r.TryDecodeAsNil() { - x.Subresource = "" + x.TargetValue = pkg4_resource.Quantity{} } else { - x.Subresource = string(r.DecodeString()) + yyv5 := &x.TargetValue + yym6 := z.DecBinary() + _ = yym6 + if false { + } else if z.HasExtensions() && z.DecExt(yyv5) { + } else if !yym6 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv5) + } else { + z.DecFallback(yyv5, false) + } } default: z.DecStructFieldNotFound(-1, yys3) @@ -1274,36 +1211,20 @@ func (x *SubresourceReference) codecDecodeSelfFromMap(l int, d *codec1978.Decode z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *SubresourceReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb8 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb8 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -1313,55 +1234,48 @@ func (x *SubresourceReference) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.Name = string(r.DecodeString()) } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb8 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb8 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Subresource = "" + x.TargetValue = pkg4_resource.Quantity{} } else { - x.Subresource = string(r.DecodeString()) + yyv9 := &x.TargetValue + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } } for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb8 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb8 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *CPUTargetUtilization) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -1393,21 +1307,29 @@ func (x *CPUTargetUtilization) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { + if x.Items == nil { + r.EncodeNil() } else { - r.EncodeInt(int64(x.TargetPercentage)) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) + } } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetPercentage")) + r.EncodeString(codecSelferC_UTF81234, string("items")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { + if x.Items == nil { + r.EncodeNil() } else { - r.EncodeInt(int64(x.TargetPercentage)) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) + } } } if yyr2 || yy2arr2 { @@ -1419,7 +1341,7 @@ func (x *CPUTargetUtilization) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *CPUTargetUtilization) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -1449,7 +1371,7 @@ func (x *CPUTargetUtilization) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *CPUTargetUtilization) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -1471,12 +1393,18 @@ func (x *CPUTargetUtilization) codecDecodeSelfFromMap(l int, d *codec1978.Decode yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "targetPercentage": + case "items": if r.TryDecodeAsNil() { - x.TargetPercentage = 0 + x.Items = nil } else { - x.TargetPercentage = int(r.DecodeInt(codecSelferBitsize1234)) - } + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv4), d) + } + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -1484,46 +1412,52 @@ func (x *CPUTargetUtilization) codecDecodeSelfFromMap(l int, d *codec1978.Decode z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *CPUTargetUtilization) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb5 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb5 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.TargetPercentage = 0 + x.Items = nil } else { - x.TargetPercentage = int(r.DecodeInt(codecSelferBitsize1234)) + yyv7 := &x.Items + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv7), d) + } } for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb5 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb5 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -1574,7 +1508,7 @@ func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.TargetValue + yy7 := &x.CurrentValue yym8 := z.EncBinary() _ = yym8 if false { @@ -1588,7 +1522,7 @@ func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("value")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.TargetValue + yy9 := &x.CurrentValue yym10 := z.EncBinary() _ = yym10 if false { @@ -1608,7 +1542,7 @@ func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -1638,7 +1572,7 @@ func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -1668,9 +1602,9 @@ func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) } case "value": if r.TryDecodeAsNil() { - x.TargetValue = pkg4_resource.Quantity{} + x.CurrentValue = pkg4_resource.Quantity{} } else { - yyv5 := &x.TargetValue + yyv5 := &x.CurrentValue yym6 := z.DecBinary() _ = yym6 if false { @@ -1688,7 +1622,7 @@ func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *CustomMetricCurrentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -1723,9 +1657,9 @@ func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decode } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.TargetValue = pkg4_resource.Quantity{} + x.CurrentValue = pkg4_resource.Quantity{} } else { - yyv9 := &x.TargetValue + yyv9 := &x.CurrentValue yym10 := z.DecBinary() _ = yym10 if false { @@ -1752,7 +1686,7 @@ func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decode z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -1791,7 +1725,7 @@ func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym4 if false { } else { - h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) + h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) } } } else { @@ -1805,7 +1739,7 @@ func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym5 if false { } else { - h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) + h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) } } } @@ -1818,7 +1752,7 @@ func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -1848,7 +1782,7 @@ func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -1879,7 +1813,7 @@ func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Deco _ = yym5 if false { } else { - h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv4), d) + h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv4), d) } } default: @@ -1889,7 +1823,7 @@ func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Deco z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -1915,7 +1849,7 @@ func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.De _ = yym8 if false { } else { - h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv7), d) + h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv7), d) } } for { @@ -1934,7 +1868,7 @@ func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.De z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -1948,14 +1882,19 @@ func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [5]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[0] = true + yyq2[1] = x.Description != "" + yyq2[2] = len(x.Versions) != 0 + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(5) } else { - yynn2 = 2 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -1966,48 +1905,127 @@ func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Description)) + } } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("description")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Description)) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.CurrentValue - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) + if yyq2[2] { + if x.Versions == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) + } + } } else { - z.EncFallback(yy7) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.CurrentValue - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("versions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Versions == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } } else { - z.EncFallback(yy9) + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } } } if yyr2 || yy2arr2 { @@ -2019,7 +2037,7 @@ func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -2049,7 +2067,7 @@ func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -2071,27 +2089,43 @@ func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.D yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "name": + case "metadata": if r.TryDecodeAsNil() { - x.Name = "" + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - x.Name = string(r.DecodeString()) + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) } - case "value": + case "description": if r.TryDecodeAsNil() { - x.CurrentValue = pkg4_resource.Quantity{} + x.Description = "" } else { - yyv5 := &x.CurrentValue - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(yyv5) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv5) - } else { - z.DecFallback(yyv5, false) + x.Description = string(r.DecodeString()) + } + case "versions": + if r.TryDecodeAsNil() { + x.Versions = nil + } else { + yyv6 := &x.Versions + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceAPIVersion((*[]APIVersion)(yyv6), d) } } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -2099,71 +2133,117 @@ func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.D z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *CustomMetricCurrentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb7 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb7 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Name = "" + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - x.Name = string(r.DecodeString()) + yyv11 := &x.ObjectMeta + yyv11.CodecDecodeSelf(d) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb7 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb7 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.CurrentValue = pkg4_resource.Quantity{} + x.Description = "" } else { - yyv9 := &x.CurrentValue - yym10 := z.DecBinary() - _ = yym10 + x.Description = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Versions = nil + } else { + yyv13 := &x.Versions + yym14 := z.DecBinary() + _ = yym14 if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) } else { - z.DecFallback(yyv9, false) + h.decSliceAPIVersion((*[]APIVersion)(yyv13), d) } } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb7 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb7 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -2177,12 +2257,15 @@ func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) + r.EncodeArrayStart(4) } else { yynn2 = 1 for _, b := range yyq2 { @@ -2193,16 +2276,45 @@ func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeMapStart(yynn2) yynn2 = 0 } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if x.Items == nil { r.EncodeNil() } else { - yym4 := z.EncBinary() - _ = yym4 + yym9 := z.EncBinary() + _ = yym9 if false { } else { - h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) + h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) } } } else { @@ -2212,11 +2324,61 @@ func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { if x.Items == nil { r.EncodeNil() } else { - yym5 := z.EncBinary() - _ = yym5 + yym10 := z.EncBinary() + _ = yym10 if false { } else { - h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) + h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } @@ -2229,7 +2391,7 @@ func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -2259,7 +2421,7 @@ func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -2281,39 +2443,87 @@ func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec19 yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "items": + case "metadata": if r.TryDecodeAsNil() { - x.Items = nil + x.ListMeta = pkg1_unversioned.ListMeta{} } else { - yyv4 := &x.Items + yyv4 := &x.ListMeta yym5 := z.DecBinary() _ = yym5 if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { } else { - h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv4), d) + z.DecFallback(yyv4, false) } } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ThirdPartyResourceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb6 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb6 { + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -2321,31 +2531,63 @@ func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromArray(l int, d *codec if r.TryDecodeAsNil() { x.Items = nil } else { - yyv7 := &x.Items - yym8 := z.DecBinary() - _ = yym8 + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 if false { } else { - h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv7), d) + h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv13), d) } } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb6 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb6 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -2359,16 +2601,15 @@ func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [1]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[1] = x.MinReplicas != nil - yyq2[3] = x.CPUUtilization != nil + yyq2[0] = x.Name != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) + r.EncodeArrayStart(1) } else { - yynn2 = 2 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -2379,89 +2620,26 @@ func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.ScaleRef - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scaleRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ScaleRef - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy9 := *x.MinReplicas - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy11 := *x.MinReplicas - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(yy11)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.CPUUtilization == nil { - r.EncodeNil() + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - x.CPUUtilization.CodecEncodeSelf(e) + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[3] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cpuUtilization")) + r.EncodeString(codecSelferC_UTF81234, string("name")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CPUUtilization == nil { - r.EncodeNil() + yym5 := z.EncBinary() + _ = yym5 + if false { } else { - x.CPUUtilization.CodecEncodeSelf(e) + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } } @@ -2474,7 +2652,7 @@ func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -2504,7 +2682,7 @@ func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -2526,45 +2704,11 @@ func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978 yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "scaleRef": - if r.TryDecodeAsNil() { - x.ScaleRef = SubresourceReference{} - } else { - yyv4 := &x.ScaleRef - yyv4.CodecDecodeSelf(d) - } - case "minReplicas": - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int)(x.MinReplicas)) = int(r.DecodeInt(codecSelferBitsize1234)) - } - } - case "maxReplicas": - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - x.MaxReplicas = int(r.DecodeInt(codecSelferBitsize1234)) - } - case "cpuUtilization": + case "name": if r.TryDecodeAsNil() { - if x.CPUUtilization != nil { - x.CPUUtilization = nil - } + x.Name = "" } else { - if x.CPUUtilization == nil { - x.CPUUtilization = new(CPUTargetUtilization) - } - x.CPUUtilization.CodecDecodeSelf(d) + x.Name = string(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -2573,110 +2717,46 @@ func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb9 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb9 { + if yyb5 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ScaleRef = SubresourceReference{} - } else { - yyv10 := &x.ScaleRef - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + x.Name = "" } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int)(x.MinReplicas)) = int(r.DecodeInt(codecSelferBitsize1234)) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - x.MaxReplicas = int(r.DecodeInt(codecSelferBitsize1234)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CPUUtilization != nil { - x.CPUUtilization = nil - } - } else { - if x.CPUUtilization == nil { - x.CPUUtilization = new(CPUTargetUtilization) - } - x.CPUUtilization.CodecDecodeSelf(d) + x.Name = string(r.DecodeString()) } for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb9 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb9 { + if yyb5 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") + z.DecStructFieldNotFound(yyj5-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -2690,17 +2770,18 @@ func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != nil - yyq2[1] = x.LastScaleTime != nil - yyq2[4] = x.CurrentCPUUtilizationPercentage != nil + yyq2[0] = true + yyq2[1] = len(x.Data) != 0 + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) + r.EncodeArrayStart(4) } else { - yynn2 = 2 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -2712,54 +2793,31 @@ func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy4 := *x.ObservedGeneration - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy6 := *x.ObservedGeneration - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - if x.LastScaleTime == nil { + if x.Data == nil { r.EncodeNil() } else { yym9 := z.EncBinary() _ = yym9 if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym9 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym9 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) } else { - z.EncFallback(x.LastScaleTime) + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) } } } else { @@ -2768,95 +2826,67 @@ func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) + r.EncodeString(codecSelferC_UTF81234, string("data")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LastScaleTime == nil { + if x.Data == nil { r.EncodeNil() } else { yym10 := z.EncBinary() _ = yym10 if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym10 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) } else { - z.EncFallback(x.LastScaleTime) + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) } } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym15 := z.EncBinary() - _ = yym15 - if false { + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } } else { - r.EncodeInt(int64(x.DesiredReplicas)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.CurrentCPUUtilizationPercentage == nil { - r.EncodeNil() + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { } else { - yy18 := *x.CurrentCPUUtilizationPercentage - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(yy18)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[4] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage")) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CurrentCPUUtilizationPercentage == nil { - r.EncodeNil() + yym16 := z.EncBinary() + _ = yym16 + if false { } else { - yy20 := *x.CurrentCPUUtilizationPercentage - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(yy20)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } @@ -2869,7 +2899,7 @@ func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -2899,7 +2929,7 @@ func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -2921,70 +2951,36 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec19 yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "observedGeneration": + case "metadata": if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) } - case "lastScaleTime": + case "data": if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } + x.Data = nil } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) - } - yym7 := z.DecBinary() - _ = yym7 + yyv5 := &x.Data + yym6 := z.DecBinary() + _ = yym6 if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) } else { - z.DecFallback(x.LastScaleTime, false) + *yyv5 = r.DecodeBytes(*(*[]byte)(yyv5), false, false) } } - case "currentReplicas": - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - x.CurrentReplicas = int(r.DecodeInt(codecSelferBitsize1234)) - } - case "desiredReplicas": + case "kind": if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 + x.Kind = "" } else { - x.DesiredReplicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Kind = string(r.DecodeString()) } - case "currentCPUUtilizationPercentage": + case "apiVersion": if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } + x.APIVersion = "" } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int)(x.CurrentCPUUtilizationPercentage)) = int(r.DecodeInt(codecSelferBitsize1234)) - } + x.APIVersion = string(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -2993,145 +2989,101 @@ func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec19 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResourceData) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } + x.Data = nil } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) - } - yym16 := z.DecBinary() - _ = yym16 + yyv11 := &x.Data + yym12 := z.DecBinary() + _ = yym12 if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym16 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) } else { - z.DecFallback(x.LastScaleTime, false) + *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false) } } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - x.CurrentReplicas = int(r.DecodeInt(codecSelferBitsize1234)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 + x.Kind = "" } else { - x.DesiredReplicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Kind = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } + x.APIVersion = "" } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int)(x.CurrentCPUUtilizationPercentage)) = int(r.DecodeInt(codecSelferBitsize1234)) - } + x.APIVersion = string(r.DecodeString()) } for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -3276,7 +3228,7 @@ func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -3306,7 +3258,7 @@ func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -3337,14 +3289,14 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Dec } case "spec": if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} + x.Spec = DeploymentSpec{} } else { yyv5 := &x.Spec yyv5.CodecDecodeSelf(d) } case "status": if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} + x.Status = DeploymentStatus{} } else { yyv6 := &x.Status yyv6.CodecDecodeSelf(d) @@ -3368,7 +3320,7 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Dec z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -3404,7 +3356,7 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.D } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} + x.Spec = DeploymentSpec{} } else { yyv11 := &x.Spec yyv11.CodecDecodeSelf(d) @@ -3421,7 +3373,7 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.D } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} + x.Status = DeploymentStatus{} } else { yyv12 := &x.Status yyv12.CodecDecodeSelf(d) @@ -3474,7 +3426,7 @@ func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.D z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -3488,15 +3440,19 @@ func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [8]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" + yyq2[0] = x.Replicas != 0 + yyq2[1] = x.Selector != nil + yyq2[3] = true + yyq2[4] = x.MinReadySeconds != 0 + yyq2[5] = x.RevisionHistoryLimit != nil + yyq2[6] = x.Paused != false + yyq2[7] = x.RollbackTo != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) + r.EncodeArrayStart(8) } else { yynn2 = 1 for _, b := range yyq2 { @@ -3510,106 +3466,196 @@ func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 + yym4 := z.EncBinary() + _ = yym4 if false { - } else if z.HasExtensions() && z.EncExt(yy4) { } else { - z.EncFallback(yy4) + r.EncodeInt(int64(x.Replicas)) } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 + yym5 := z.EncBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.EncExt(yy6) { } else { - z.EncFallback(yy6) + r.EncodeInt(int64(x.Replicas)) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } } else { - yym9 := z.EncBinary() - _ = yym9 - if false { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } } } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy10 := &x.Template + yy10.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) + r.EncodeString(codecSelferC_UTF81234, string("template")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { + yy12 := &x.Template + yy12.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Strategy + yy15.CodecEncodeSelf(e) + } else { r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("strategy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Strategy + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } } else { - yym10 := z.EncBinary() - _ = yym10 + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 if false { } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + r.EncodeInt(int64(x.MinReadySeconds)) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { + if yyq2[5] { + if x.RevisionHistoryLimit == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + yy23 := *x.RevisionHistoryLimit + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeInt(int64(yy23)) + } } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq2[2] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) + r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { + if x.RevisionHistoryLimit == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + yy25 := *x.RevisionHistoryLimit + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(yy25)) + } } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 + if yyq2[6] { + yym28 := z.EncBinary() + _ = yym28 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + r.EncodeBool(bool(x.Paused)) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeBool(false) } } else { - if yyq2[3] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + r.EncodeString(codecSelferC_UTF81234, string("paused")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 + yym29 := z.EncBinary() + _ = yym29 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + r.EncodeBool(bool(x.Paused)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) } } } @@ -3622,7 +3668,7 @@ func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -3652,7 +3698,7 @@ func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -3674,151 +3720,269 @@ func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978 yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": + case "replicas": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.Replicas = 0 } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } + x.Replicas = int32(r.DecodeInt(32)) } - case "items": + case "selector": if r.TryDecodeAsNil() { - x.Items = nil + if x.Selector != nil { + x.Selector = nil + } } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 + if x.Selector == nil { + x.Selector = new(pkg1_unversioned.LabelSelector) + } + yym6 := z.DecBinary() + _ = yym6 if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv6), d) + z.DecFallback(x.Selector, false) } } - case "kind": + case "template": if r.TryDecodeAsNil() { - x.Kind = "" + x.Template = pkg2_api.PodTemplateSpec{} } else { - x.Kind = string(r.DecodeString()) + yyv7 := &x.Template + yyv7.CodecDecodeSelf(d) } - case "apiVersion": + case "strategy": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Strategy = DeploymentStrategy{} } else { - x.APIVersion = string(r.DecodeString()) + yyv8 := &x.Strategy + yyv8.CodecDecodeSelf(d) } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + case "minReadySeconds": + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + x.MinReadySeconds = int32(r.DecodeInt(32)) + } + case "revisionHistoryLimit": + if r.TryDecodeAsNil() { + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + case "paused": + if r.TryDecodeAsNil() { + x.Paused = false + } else { + x.Paused = bool(r.DecodeBool()) + } + case "rollbackTo": + if r.TryDecodeAsNil() { + if x.RollbackTo != nil { + x.RollbackTo = nil + } + } else { + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb10 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb10 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.Replicas = 0 } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 + x.Replicas = int32(r.DecodeInt(32)) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_unversioned.LabelSelector) + } + yym17 := z.DecBinary() + _ = yym17 if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { - z.DecFallback(yyv11, false) + z.DecFallback(x.Selector, false) } } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb10 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb10 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Items = nil + x.Template = pkg2_api.PodTemplateSpec{} } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Strategy = DeploymentStrategy{} + } else { + yyv19 := &x.Strategy + yyv19.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + x.MinReadySeconds = int32(r.DecodeInt(32)) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym22 := z.DecBinary() + _ = yym22 if false { } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv13), d) + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) } } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb10 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb10 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" + x.Paused = false } else { - x.Kind = string(r.DecodeString()) + x.Paused = bool(r.DecodeBool()) } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb10 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb10 { + if yyb14 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + if x.RollbackTo != nil { + x.RollbackTo = nil + } } else { - x.APIVersion = string(r.DecodeString()) + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) } for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l } else { - yyb10 = r.CheckBreak() + yyb14 = r.CheckBreak() } - if yyb10 { + if yyb14 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") + z.DecStructFieldNotFound(yyj14-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -3835,16 +3999,14 @@ func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [5]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = true - yyq2[1] = x.Description != "" - yyq2[2] = len(x.Versions) != 0 + yyq2[1] = len(x.UpdatedAnnotations) != 0 yyq2[3] = x.Kind != "" yyq2[4] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(5) } else { - yynn2 = 0 + yynn2 = 2 for _, b := range yyq2 { if b { yynn2++ @@ -3855,78 +4017,66 @@ func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - yym9 := z.EncBinary() - _ = yym9 - if false { + if x.UpdatedAnnotations == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Description)) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) + } } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("description")) + r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Description)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Versions == nil { + if x.UpdatedAnnotations == nil { r.EncodeNil() } else { - yym12 := z.EncBinary() - _ = yym12 + yym8 := z.EncBinary() + _ = yym8 if false { } else { - h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) } } - } else { - r.EncodeNil() } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy10 := &x.RollbackTo + yy10.CodecEncodeSelf(e) } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("versions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Versions == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) - } - } - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.RollbackTo + yy12.CodecEncodeSelf(e) } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) @@ -3987,7 +4137,7 @@ func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -4017,7 +4167,7 @@ func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -4039,31 +4189,31 @@ func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "description": + case "name": if r.TryDecodeAsNil() { - x.Description = "" + x.Name = "" } else { - x.Description = string(r.DecodeString()) + x.Name = string(r.DecodeString()) } - case "versions": + case "updatedAnnotations": if r.TryDecodeAsNil() { - x.Versions = nil + x.UpdatedAnnotations = nil } else { - yyv6 := &x.Versions - yym7 := z.DecBinary() - _ = yym7 + yyv5 := &x.UpdatedAnnotations + yym6 := z.DecBinary() + _ = yym6 if false { } else { - h.decSliceAPIVersion((*[]APIVersion)(yyv6), d) + z.F.DecMapStringStringX(yyv5, false, d) } } + case "rollbackTo": + if r.TryDecodeAsNil() { + x.RollbackTo = RollbackConfig{} + } else { + yyv7 := &x.RollbackTo + yyv7.CodecDecodeSelf(d) + } case "kind": if r.TryDecodeAsNil() { x.Kind = "" @@ -4083,7 +4233,7 @@ func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -4102,10 +4252,9 @@ func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decode } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} + x.Name = "" } else { - yyv11 := &x.ObjectMeta - yyv11.CodecDecodeSelf(d) + x.Name = string(r.DecodeString()) } yyj10++ if yyhl10 { @@ -4119,9 +4268,15 @@ func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decode } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Description = "" + x.UpdatedAnnotations = nil } else { - x.Description = string(r.DecodeString()) + yyv12 := &x.UpdatedAnnotations + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecMapStringStringX(yyv12, false, d) + } } yyj10++ if yyhl10 { @@ -4135,15 +4290,10 @@ func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decode } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Versions = nil + x.RollbackTo = RollbackConfig{} } else { - yyv13 := &x.Versions - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceAPIVersion((*[]APIVersion)(yyv13), d) - } + yyv14 := &x.RollbackTo + yyv14.CodecDecodeSelf(d) } yyj10++ if yyhl10 { @@ -4193,7 +4343,7 @@ func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decode z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -4207,17 +4357,15 @@ func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [1]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" + yyq2[0] = x.Revision != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) + r.EncodeArrayStart(1) } else { - yynn2 = 1 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -4229,106 +4377,25 @@ func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 + yym4 := z.EncBinary() + _ = yym4 if false { - } else if z.HasExtensions() && z.EncExt(yy4) { } else { - z.EncFallback(yy4) + r.EncodeInt(int64(x.Revision)) } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + r.EncodeString(codecSelferC_UTF81234, string("revision")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 + yym5 := z.EncBinary() + _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + r.EncodeInt(int64(x.Revision)) } } } @@ -4341,7 +4408,7 @@ func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -4371,7 +4438,7 @@ func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -4393,42 +4460,11 @@ func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Deco yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": + case "revision": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Revision = 0 } else { - x.APIVersion = string(r.DecodeString()) + x.Revision = int64(r.DecodeInt(64)) } default: z.DecStructFieldNotFound(-1, yys3) @@ -4437,107 +4473,46 @@ func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Deco z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ThirdPartyResourceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb10 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb10 { + if yyb5 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Revision = 0 } else { - x.APIVersion = string(r.DecodeString()) + x.Revision = int64(r.DecodeInt(64)) } for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb10 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb10 { + if yyb5 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") + z.DecStructFieldNotFound(yyj5-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -4554,8 +4529,8 @@ func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = x.APIGroup != "" + yyq2[0] = x.Type != "" + yyq2[1] = x.RollingUpdate != nil var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(2) @@ -4572,50 +4547,38 @@ func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } + x.Type.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) + r.EncodeString(codecSelferC_UTF81234, string("type")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } + x.Type.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { + if x.RollingUpdate == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + x.RollingUpdate.CodecEncodeSelf(e) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) + r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { + if x.RollingUpdate == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + x.RollingUpdate.CodecEncodeSelf(e) } } } @@ -4628,7 +4591,7 @@ func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -4658,7 +4621,7 @@ func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -4680,17 +4643,22 @@ func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "name": + case "type": if r.TryDecodeAsNil() { - x.Name = "" + x.Type = "" } else { - x.Name = string(r.DecodeString()) + x.Type = DeploymentStrategyType(r.DecodeString()) } - case "apiGroup": + case "rollingUpdate": if r.TryDecodeAsNil() { - x.APIGroup = "" + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } } else { - x.APIGroup = string(r.DecodeString()) + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) } default: z.DecStructFieldNotFound(-1, yys3) @@ -4699,7 +4667,7 @@ func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -4718,9 +4686,9 @@ func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Name = "" + x.Type = "" } else { - x.Name = string(r.DecodeString()) + x.Type = DeploymentStrategyType(r.DecodeString()) } yyj6++ if yyhl6 { @@ -4734,9 +4702,14 @@ func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIGroup = "" + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } } else { - x.APIGroup = string(r.DecodeString()) + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) } for { yyj6++ @@ -4754,7 +4727,33 @@ func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { +func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -4768,16 +4767,14 @@ func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = true - yyq2[1] = len(x.Data) != 0 - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" + yyq2[1] = true var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) + r.EncodeArrayStart(2) } else { yynn2 = 0 for _, b := range yyq2 { @@ -4791,32 +4788,48 @@ func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) + yy4 := &x.MaxUnavailable + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(yy4) + } else { + z.EncFallback(yy4) + } } else { r.EncodeNil() } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) + yy6 := &x.MaxUnavailable + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(yy6) + } else { + z.EncFallback(yy6) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - if x.Data == nil { - r.EncodeNil() + yy9 := &x.MaxSurge + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } + z.EncFallback(yy9) } } else { r.EncodeNil() @@ -4824,67 +4837,17 @@ func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) + r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() + yy11 := &x.MaxSurge + yym12 := z.EncBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.EncExt(yy11) { + } else if !yym12 && z.IsJSONHandle() { + z.EncJSONMarshal(yy11) } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + z.EncFallback(yy11) } } } @@ -4897,7 +4860,7 @@ func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -4927,7 +4890,7 @@ func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -4949,36 +4912,35 @@ func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Deco yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "data": + case "maxUnavailable": if r.TryDecodeAsNil() { - x.Data = nil + x.MaxUnavailable = pkg5_intstr.IntOrString{} } else { - yyv5 := &x.Data - yym6 := z.DecBinary() - _ = yym6 + yyv4 := &x.MaxUnavailable + yym5 := z.DecBinary() + _ = yym5 if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) } else { - *yyv5 = r.DecodeBytes(*(*[]byte)(yyv5), false, false) + z.DecFallback(yyv4, false) } } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": + case "maxSurge": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.MaxSurge = pkg5_intstr.IntOrString{} } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.MaxSurge + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } } default: z.DecStructFieldNotFound(-1, yys3) @@ -4987,101 +4949,80 @@ func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Deco z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ThirdPartyResourceData) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb9 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb9 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} + x.MaxUnavailable = pkg5_intstr.IntOrString{} } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) + yyv9 := &x.MaxUnavailable + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb9 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb9 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Data = nil + x.MaxSurge = pkg5_intstr.IntOrString{} } else { - yyv11 := &x.Data + yyv11 := &x.MaxSurge yym12 := z.DecBinary() _ = yym12 if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) } else { - *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false) + z.DecFallback(yyv11, false) } } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb9 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb9 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -5098,11 +5039,11 @@ func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [5]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" + yyq2[0] = x.ObservedGeneration != 0 + yyq2[1] = x.Replicas != 0 + yyq2[2] = x.UpdatedReplicas != 0 + yyq2[3] = x.AvailableReplicas != 0 + yyq2[4] = x.UnavailableReplicas != 0 var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(5) @@ -5119,101 +5060,125 @@ func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) + r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 + yym13 := z.EncBinary() + _ = yym13 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + r.EncodeInt(int64(x.AvailableReplicas)) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeInt(0) } } else { if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) + r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 + yym14 := z.EncBinary() + _ = yym14 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + r.EncodeInt(int64(x.AvailableReplicas)) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 + yym16 := z.EncBinary() + _ = yym16 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + r.EncodeInt(int64(x.UnavailableReplicas)) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeInt(0) } } else { if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 + yym17 := z.EncBinary() + _ = yym17 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + r.EncodeInt(int64(x.UnavailableReplicas)) } } } @@ -5226,7 +5191,7 @@ func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -5256,7 +5221,7 @@ func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -5278,38 +5243,35 @@ func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": + case "observedGeneration": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} + x.ObservedGeneration = 0 } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) + x.ObservedGeneration = int64(r.DecodeInt(64)) } - case "spec": + case "replicas": if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} + x.Replicas = 0 } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) + x.Replicas = int32(r.DecodeInt(32)) } - case "status": + case "updatedReplicas": if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} + x.UpdatedReplicas = 0 } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) + x.UpdatedReplicas = int32(r.DecodeInt(32)) } - case "kind": + case "availableReplicas": if r.TryDecodeAsNil() { - x.Kind = "" + x.AvailableReplicas = 0 } else { - x.Kind = string(r.DecodeString()) + x.AvailableReplicas = int32(r.DecodeInt(32)) } - case "apiVersion": + case "unavailableReplicas": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.UnavailableReplicas = 0 } else { - x.APIVersion = string(r.DecodeString()) + x.UnavailableReplicas = int32(r.DecodeInt(32)) } default: z.DecStructFieldNotFound(-1, yys3) @@ -5318,7 +5280,7 @@ func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -5337,10 +5299,9 @@ func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} + x.ObservedGeneration = 0 } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) + x.ObservedGeneration = int64(r.DecodeInt(64)) } yyj9++ if yyhl9 { @@ -5354,10 +5315,9 @@ func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} + x.Replicas = 0 } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) + x.Replicas = int32(r.DecodeInt(32)) } yyj9++ if yyhl9 { @@ -5371,10 +5331,9 @@ func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} + x.UpdatedReplicas = 0 } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) + x.UpdatedReplicas = int32(r.DecodeInt(32)) } yyj9++ if yyhl9 { @@ -5388,9 +5347,9 @@ func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" + x.AvailableReplicas = 0 } else { - x.Kind = string(r.DecodeString()) + x.AvailableReplicas = int32(r.DecodeInt(32)) } yyj9++ if yyhl9 { @@ -5404,9 +5363,9 @@ func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + x.UnavailableReplicas = 0 } else { - x.APIVersion = string(r.DecodeString()) + x.UnavailableReplicas = int32(r.DecodeInt(32)) } for { yyj9++ @@ -5424,7 +5383,7 @@ func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -5438,19 +5397,15 @@ func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.Replicas != 0 - yyq2[1] = x.Selector != nil - yyq2[3] = true - yyq2[4] = x.MinReadySeconds != 0 - yyq2[5] = x.RevisionHistoryLimit != nil - yyq2[6] = x.Paused != false - yyq2[7] = x.RollbackTo != nil + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) + r.EncodeArrayStart(4) } else { yynn2 = 1 for _, b := range yyq2 { @@ -5464,196 +5419,106 @@ func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 if false { + } else if z.HasExtensions() && z.EncExt(yy4) { } else { - r.EncodeInt(int64(x.Replicas)) + z.EncFallback(yy4) } } else { - r.EncodeInt(0) + r.EncodeNil() } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 if false { + } else if z.HasExtensions() && z.EncExt(yy6) { } else { - r.EncodeInt(int64(x.Replicas)) + z.EncFallback(yy6) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { + if x.Items == nil { r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } + h.encSliceDeployment(([]Deployment)(x.Items), e) } } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy10 := &x.Template - yy10.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) + r.EncodeString(codecSelferC_UTF81234, string("items")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.Template - yy12.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Strategy - yy15.CodecEncodeSelf(e) - } else { + if x.Items == nil { r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("strategy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Strategy - yy17.CodecEncodeSelf(e) + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceDeployment(([]Deployment)(x.Items), e) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 if false { } else { - r.EncodeInt(int64(x.MinReadySeconds)) + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } else { - r.EncodeInt(0) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[4] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 + yym13 := z.EncBinary() + _ = yym13 if false { } else { - r.EncodeInt(int64(x.MinReadySeconds)) + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.RevisionHistoryLimit == nil { - r.EncodeNil() + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { } else { - yy23 := *x.RevisionHistoryLimit - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeInt(int64(yy23)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[5] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RevisionHistoryLimit == nil { - r.EncodeNil() + yym16 := z.EncBinary() + _ = yym16 + if false { } else { - yy25 := *x.RevisionHistoryLimit - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeInt(int64(yy25)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("paused")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } @@ -5666,7 +5531,7 @@ func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -5696,7 +5561,7 @@ func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -5718,81 +5583,42 @@ func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) - } - case "selector": + case "metadata": if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } + x.ListMeta = pkg1_unversioned.ListMeta{} } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym6 := z.DecBinary() - _ = yym6 + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else if z.HasExtensions() && z.DecExt(yyv4) { } else { - z.DecFallback(x.Selector, false) + z.DecFallback(yyv4, false) } } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv7 := &x.Template - yyv7.CodecDecodeSelf(d) - } - case "strategy": - if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} - } else { - yyv8 := &x.Strategy - yyv8.CodecDecodeSelf(d) - } - case "minReadySeconds": - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int(r.DecodeInt(codecSelferBitsize1234)) - } - case "revisionHistoryLimit": + case "items": if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } + x.Items = nil } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int) - } - yym11 := z.DecBinary() - _ = yym11 + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 if false { } else { - *((*int)(x.RevisionHistoryLimit)) = int(r.DecodeInt(codecSelferBitsize1234)) + h.decSliceDeployment((*[]Deployment)(yyv6), d) } } - case "paused": + case "kind": if r.TryDecodeAsNil() { - x.Paused = false + x.Kind = "" } else { - x.Paused = bool(r.DecodeBool()) + x.Kind = string(r.DecodeString()) } - case "rollbackTo": + case "apiVersion": if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } + x.APIVersion = "" } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) + x.APIVersion = string(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -5801,186 +5627,107 @@ func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb14 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb14 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } + x.ListMeta = pkg1_unversioned.ListMeta{} } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym17 := z.DecBinary() - _ = yym17 + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else if z.HasExtensions() && z.DecExt(yyv11) { } else { - z.DecFallback(x.Selector, false) + z.DecFallback(yyv11, false) } } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb14 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb14 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} + x.Items = nil } else { - yyv18 := &x.Template - yyv18.CodecDecodeSelf(d) + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceDeployment((*[]Deployment)(yyv13), d) + } } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb14 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb14 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} + x.Kind = "" } else { - yyv19 := &x.Strategy - yyv19.CodecDecodeSelf(d) + x.Kind = string(r.DecodeString()) } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb14 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb14 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int(r.DecodeInt(codecSelferBitsize1234)) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + x.APIVersion = "" } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return + x.APIVersion = string(r.DecodeString()) } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } - } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int) - } - yym22 := z.DecBinary() - _ = yym22 - if false { + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - *((*int)(x.RevisionHistoryLimit)) = int(r.DecodeInt(codecSelferBitsize1234)) + yyb10 = r.CheckBreak() } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Paused = false - } else { - x.Paused = bool(r.DecodeBool()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } - } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -5994,17 +5741,15 @@ func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[1] = len(x.UpdatedAnnotations) != 0 - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" + yyq2[0] = x.Selector != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) + r.EncodeArrayStart(2) } else { - yynn2 = 2 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -6015,116 +5760,49 @@ func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.UpdatedAnnotations == nil { + if yyq2[0] { + if x.Selector == nil { r.EncodeNil() } else { - yym7 := z.EncBinary() - _ = yym7 + yym4 := z.EncBinary() + _ = yym4 if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) + z.EncFallback(x.Selector) } } } else { r.EncodeNil() } } else { - if yyq2[1] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) + r.EncodeString(codecSelferC_UTF81234, string("selector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.UpdatedAnnotations == nil { + if x.Selector == nil { r.EncodeNil() } else { - yym8 := z.EncBinary() - _ = yym8 + yym5 := z.EncBinary() + _ = yym5 if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) + z.EncFallback(x.Selector) } } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy10 := &x.RollbackTo - yy10.CodecEncodeSelf(e) + yy7 := &x.Template + yy7.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + r.EncodeString(codecSelferC_UTF81234, string("template")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.RollbackTo - yy12.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } + yy9 := &x.Template + yy9.CodecEncodeSelf(e) } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) @@ -6135,7 +5813,7 @@ func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DaemonSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -6165,7 +5843,7 @@ func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -6187,42 +5865,29 @@ func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "updatedAnnotations": + case "selector": if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil + if x.Selector != nil { + x.Selector = nil + } } else { - yyv5 := &x.UpdatedAnnotations - yym6 := z.DecBinary() - _ = yym6 + if x.Selector == nil { + x.Selector = new(pkg1_unversioned.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { - z.F.DecMapStringStringX(yyv5, false, d) + z.DecFallback(x.Selector, false) } } - case "rollbackTo": - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv7 := &x.RollbackTo - yyv7.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": + case "template": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Template = pkg2_api.PodTemplateSpec{} } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.Template + yyv6.CodecDecodeSelf(d) } default: z.DecStructFieldNotFound(-1, yys3) @@ -6231,117 +5896,74 @@ func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DaemonSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb10 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb10 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Name = "" + if x.Selector != nil { + x.Selector = nil + } } else { - x.Name = string(r.DecodeString()) + if x.Selector == nil { + x.Selector = new(pkg1_unversioned.LabelSelector) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb10 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb10 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil + x.Template = pkg2_api.PodTemplateSpec{} } else { - yyv12 := &x.UpdatedAnnotations - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecMapStringStringX(yyv12, false, d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv14 := &x.RollbackTo - yyv14.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) + yyv10 := &x.Template + yyv10.CodecDecodeSelf(d) } for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb10 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb10 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -6355,15 +5977,14 @@ func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool + var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.Revision != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) + r.EncodeArrayStart(3) } else { - yynn2 = 0 + yynn2 = 3 for _, b := range yyq2 { if b { yynn2++ @@ -6374,27 +5995,59 @@ func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - r.EncodeInt(0) + r.EncodeInt(int64(x.CurrentNumberScheduled)) } } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revision")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentNumberScheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.CurrentNumberScheduled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.NumberMisscheduled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("numberMisscheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.NumberMisscheduled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.DesiredNumberScheduled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredNumberScheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.DesiredNumberScheduled)) } } if yyr2 || yy2arr2 { @@ -6406,7 +6059,7 @@ func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -6436,7 +6089,7 @@ func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -6458,11 +6111,23 @@ func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "revision": + case "currentNumberScheduled": if r.TryDecodeAsNil() { - x.Revision = 0 + x.CurrentNumberScheduled = 0 } else { - x.Revision = int64(r.DecodeInt(64)) + x.CurrentNumberScheduled = int32(r.DecodeInt(32)) + } + case "numberMisscheduled": + if r.TryDecodeAsNil() { + x.NumberMisscheduled = 0 + } else { + x.NumberMisscheduled = int32(r.DecodeInt(32)) + } + case "desiredNumberScheduled": + if r.TryDecodeAsNil() { + x.DesiredNumberScheduled = 0 + } else { + x.DesiredNumberScheduled = int32(r.DecodeInt(32)) } default: z.DecStructFieldNotFound(-1, yys3) @@ -6471,46 +6136,78 @@ func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DaemonSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb5 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb5 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Revision = 0 + x.CurrentNumberScheduled = 0 } else { - x.Revision = int64(r.DecodeInt(64)) + x.CurrentNumberScheduled = int32(r.DecodeInt(32)) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NumberMisscheduled = 0 + } else { + x.NumberMisscheduled = int32(r.DecodeInt(32)) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredNumberScheduled = 0 + } else { + x.DesiredNumberScheduled = int32(r.DecodeInt(32)) } for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb5 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb5 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -6524,14 +6221,17 @@ func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [5]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.Type != "" - yyq2[1] = x.RollingUpdate != nil + yyq2[0] = true + yyq2[1] = true + yyq2[2] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(5) } else { yynn2 = 0 for _, b := range yyq2 { @@ -6545,38 +6245,101 @@ func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - x.Type.CodecEncodeSelf(e) + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) + r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy14 := &x.Status + yy14.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy16 := &x.Status + yy16.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } @@ -6589,7 +6352,7 @@ func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -6619,7 +6382,7 @@ func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -6641,22 +6404,38 @@ func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "type": + case "metadata": if r.TryDecodeAsNil() { - x.Type = "" + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - x.Type = DeploymentStrategyType(r.DecodeString()) + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) } - case "rollingUpdate": + case "spec": if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } + x.Spec = DaemonSetSpec{} } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = DaemonSetStatus{} + } else { + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -6665,93 +6444,113 @@ func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb6 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb6 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Type = "" + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - x.Type = DeploymentStrategyType(r.DecodeString()) + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb6 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb6 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } + x.Spec = DaemonSetSpec{} } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = DaemonSetStatus{} + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb6 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb6 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -6765,16 +6564,17 @@ func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = true - yyq2[1] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(4) } else { - yynn2 = 0 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -6786,13 +6586,11 @@ func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.MaxUnavailable + yy4 := &x.ListMeta yym5 := z.EncBinary() _ = yym5 if false { } else if z.HasExtensions() && z.EncExt(yy4) { - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4) } else { z.EncFallback(yy4) } @@ -6802,15 +6600,13 @@ func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.MaxUnavailable + yy6 := &x.ListMeta yym7 := z.EncBinary() _ = yym7 if false { } else if z.HasExtensions() && z.EncExt(yy6) { - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(yy6) } else { z.EncFallback(yy6) } @@ -6818,34 +6614,78 @@ func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.MaxSurge + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { yym10 := z.EncBinary() _ = yym10 if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) } else { - z.EncFallback(yy9) + h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[1] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) + r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.MaxSurge - yym12 := z.EncBinary() - _ = yym12 + yym13 := z.EncBinary() + _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(yy11) { - } else if !yym12 && z.IsJSONHandle() { - z.EncJSONMarshal(yy11) } else { - z.EncFallback(yy11) + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } @@ -6858,7 +6698,7 @@ func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -6888,7 +6728,7 @@ func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -6910,36 +6750,43 @@ func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Dec yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "maxUnavailable": + case "metadata": if r.TryDecodeAsNil() { - x.MaxUnavailable = pkg6_intstr.IntOrString{} + x.ListMeta = pkg1_unversioned.ListMeta{} } else { - yyv4 := &x.MaxUnavailable + yyv4 := &x.ListMeta yym5 := z.DecBinary() _ = yym5 if false { } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) } else { z.DecFallback(yyv4, false) } } - case "maxSurge": + case "items": if r.TryDecodeAsNil() { - x.MaxSurge = pkg6_intstr.IntOrString{} + x.Items = nil } else { - yyv6 := &x.MaxSurge + yyv6 := &x.Items yym7 := z.DecBinary() _ = yym7 if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) } else { - z.DecFallback(yyv6, false) + h.decSliceDaemonSet((*[]DaemonSet)(yyv6), d) } } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -6947,80 +6794,107 @@ func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Dec z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *DaemonSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb8 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb8 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.MaxUnavailable = pkg6_intstr.IntOrString{} + x.ListMeta = pkg1_unversioned.ListMeta{} } else { - yyv9 := &x.MaxUnavailable - yym10 := z.DecBinary() - _ = yym10 + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) + } else if z.HasExtensions() && z.DecExt(yyv11) { } else { - z.DecFallback(yyv9, false) + z.DecFallback(yyv11, false) } } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb8 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb8 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.MaxSurge = pkg6_intstr.IntOrString{} + x.Items = nil } else { - yyv11 := &x.MaxSurge - yym12 := z.DecBinary() - _ = yym12 + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) } else { - z.DecFallback(yyv11, false) + h.decSliceDaemonSet((*[]DaemonSet)(yyv13), d) } } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb8 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb8 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -7034,19 +6908,17 @@ func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != 0 - yyq2[1] = x.Replicas != 0 - yyq2[2] = x.UpdatedReplicas != 0 - yyq2[3] = x.AvailableReplicas != 0 - yyq2[4] = x.UnavailableReplicas != 0 + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) + r.EncodeArrayStart(4) } else { - yynn2 = 0 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -7058,125 +6930,106 @@ func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 if false { + } else if z.HasExtensions() && z.EncExt(yy4) { } else { - r.EncodeInt(int64(x.ObservedGeneration)) + z.EncFallback(yy4) } } else { - r.EncodeInt(0) + r.EncodeNil() } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 if false { + } else if z.HasExtensions() && z.EncExt(yy6) { } else { - r.EncodeInt(int64(x.ObservedGeneration)) + z.EncFallback(yy6) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 if false { } else { - r.EncodeInt(int64(x.Replicas)) + h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) } - } else { - r.EncodeInt(0) } } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 if false { } else { - r.EncodeInt(int64(x.Replicas)) + h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 + yym12 := z.EncBinary() + _ = yym12 if false { } else { - r.EncodeInt(int64(x.UpdatedReplicas)) + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } else { - r.EncodeInt(0) + r.EncodeString(codecSelferC_UTF81234, "") } } else { if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) + r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 + yym13 := z.EncBinary() + _ = yym13 if false { } else { - r.EncodeInt(int64(x.UpdatedReplicas)) + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 + yym15 := z.EncBinary() + _ = yym15 if false { } else { - r.EncodeInt(int64(x.AvailableReplicas)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } else { - r.EncodeInt(0) + r.EncodeString(codecSelferC_UTF81234, "") } } else { if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { yym16 := z.EncBinary() _ = yym16 if false { } else { - r.EncodeInt(int64(x.UnavailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.UnavailableReplicas)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } @@ -7189,7 +7042,7 @@ func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -7219,7 +7072,7 @@ func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -7241,35 +7094,42 @@ func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - case "replicas": + case "metadata": if r.TryDecodeAsNil() { - x.Replicas = 0 + x.ListMeta = pkg1_unversioned.ListMeta{} } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } } - case "updatedReplicas": + case "items": if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 + x.Items = nil } else { - x.UpdatedReplicas = int(r.DecodeInt(codecSelferBitsize1234)) + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv6), d) + } } - case "availableReplicas": + case "kind": if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 + x.Kind = "" } else { - x.AvailableReplicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Kind = string(r.DecodeString()) } - case "unavailableReplicas": + case "apiVersion": if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 + x.APIVersion = "" } else { - x.UnavailableReplicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.APIVersion = string(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -7278,110 +7138,107 @@ func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ThirdPartyResourceDataList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb9 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb9 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Replicas = 0 + x.ListMeta = pkg1_unversioned.ListMeta{} } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb9 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb9 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 + x.Items = nil } else { - x.UpdatedReplicas = int(r.DecodeInt(codecSelferBitsize1234)) + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv13), d) + } } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb9 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb9 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 + x.Kind = "" } else { - x.AvailableReplicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Kind = string(r.DecodeString()) } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb9 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb9 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 + x.APIVersion = "" } else { - x.UnavailableReplicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.APIVersion = string(r.DecodeString()) } for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb9 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb9 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -7395,17 +7252,19 @@ func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [5]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" + yyq2[1] = true + yyq2[2] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) + r.EncodeArrayStart(5) } else { - yynn2 = 1 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -7417,14 +7276,8 @@ func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) } else { r.EncodeNil() } @@ -7433,48 +7286,49 @@ func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 + yy14 := &x.Status + yy14.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy16 := &x.Status + yy16.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -7483,12 +7337,12 @@ func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[2] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 + yym20 := z.EncBinary() + _ = yym20 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -7497,9 +7351,9 @@ func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -7508,12 +7362,12 @@ func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[3] { + if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 + yym23 := z.EncBinary() + _ = yym23 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -7529,7 +7383,7 @@ func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *Ingress) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -7559,7 +7413,7 @@ func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -7583,28 +7437,24 @@ func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { switch yys3 { case "metadata": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) } - case "items": + case "spec": if r.TryDecodeAsNil() { - x.Items = nil + x.Spec = IngressSpec{} } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv6), d) - } + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = IngressStatus{} + } else { + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) } case "kind": if r.TryDecodeAsNil() { @@ -7625,65 +7475,71 @@ func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb10 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb10 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb10 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb10 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Items = nil + x.Spec = IngressSpec{} } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv13), d) - } + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb10 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb10 { + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = IngressStatus{} + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7693,13 +7549,13 @@ func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Kind = string(r.DecodeString()) } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb10 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb10 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7710,22 +7566,22 @@ func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { x.APIVersion = string(r.DecodeString()) } for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb10 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb10 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -7739,13 +7595,15 @@ func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.Selector != nil + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(4) } else { yynn2 = 1 for _, b := range yyq2 { @@ -7759,16 +7617,13 @@ func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - if x.Selector == nil { - r.EncodeNil() + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } + z.EncFallback(yy4) } } else { r.EncodeNil() @@ -7776,31 +7631,94 @@ func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } + z.EncFallback(yy6) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Template - yy7.CodecEncodeSelf(e) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceIngress(([]Ingress)(x.Items), e) + } + } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) + r.EncodeString(codecSelferC_UTF81234, string("items")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Template - yy9.CodecEncodeSelf(e) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceIngress(([]Ingress)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) @@ -7811,7 +7729,7 @@ func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *DaemonSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *IngressList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -7841,7 +7759,7 @@ func (x *DaemonSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -7863,29 +7781,42 @@ func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "selector": + case "metadata": if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } + x.ListMeta = pkg1_unversioned.ListMeta{} } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } + yyv4 := &x.ListMeta yym5 := z.DecBinary() _ = yym5 if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else if z.HasExtensions() && z.DecExt(yyv4) { } else { - z.DecFallback(x.Selector, false) + z.DecFallback(yyv4, false) } } - case "template": + case "items": if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} + x.Items = nil } else { - yyv6 := &x.Template - yyv6.CodecDecodeSelf(d) + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceIngress((*[]Ingress)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -7894,74 +7825,107 @@ func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *DaemonSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb7 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb7 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } + x.ListMeta = pkg1_unversioned.ListMeta{} } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) } - yym9 := z.DecBinary() - _ = yym9 + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { - z.DecFallback(x.Selector, false) + h.decSliceIngress((*[]Ingress)(yyv13), d) } } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb7 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb7 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} + x.Kind = "" } else { - yyv10 := &x.Template - yyv10.CodecDecodeSelf(d) + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) } for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb7 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb7 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -7978,11 +7942,14 @@ func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[0] = x.Backend != nil + yyq2[1] = len(x.TLS) != 0 + yyq2[2] = len(x.Rules) != 0 var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn2 = 3 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -7993,59 +7960,91 @@ func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { + if yyq2[0] { + if x.Backend == nil { + r.EncodeNil() + } else { + x.Backend.CodecEncodeSelf(e) + } } else { - r.EncodeInt(int64(x.CurrentNumberScheduled)) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentNumberScheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.CurrentNumberScheduled)) + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("backend")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Backend == nil { + r.EncodeNil() + } else { + x.Backend.CodecEncodeSelf(e) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { + if yyq2[1] { + if x.TLS == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) + } + } } else { - r.EncodeInt(int64(x.NumberMisscheduled)) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("numberMisscheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.NumberMisscheduled)) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tls")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TLS == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) + } + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { + if yyq2[2] { + if x.Rules == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceIngressRule(([]IngressRule)(x.Rules), e) + } + } } else { - r.EncodeInt(int64(x.DesiredNumberScheduled)) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredNumberScheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.DesiredNumberScheduled)) + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSliceIngressRule(([]IngressRule)(x.Rules), e) + } + } } } if yyr2 || yy2arr2 { @@ -8057,7 +8056,7 @@ func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *IngressSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -8087,7 +8086,7 @@ func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -8109,23 +8108,40 @@ func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "currentNumberScheduled": + case "backend": if r.TryDecodeAsNil() { - x.CurrentNumberScheduled = 0 + if x.Backend != nil { + x.Backend = nil + } } else { - x.CurrentNumberScheduled = int(r.DecodeInt(codecSelferBitsize1234)) + if x.Backend == nil { + x.Backend = new(IngressBackend) + } + x.Backend.CodecDecodeSelf(d) } - case "numberMisscheduled": + case "tls": if r.TryDecodeAsNil() { - x.NumberMisscheduled = 0 + x.TLS = nil } else { - x.NumberMisscheduled = int(r.DecodeInt(codecSelferBitsize1234)) + yyv5 := &x.TLS + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceIngressTLS((*[]IngressTLS)(yyv5), d) + } } - case "desiredNumberScheduled": + case "rules": if r.TryDecodeAsNil() { - x.DesiredNumberScheduled = 0 + x.Rules = nil } else { - x.DesiredNumberScheduled = int(r.DecodeInt(codecSelferBitsize1234)) + yyv7 := &x.Rules + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceIngressRule((*[]IngressRule)(yyv7), d) + } } default: z.DecStructFieldNotFound(-1, yys3) @@ -8134,78 +8150,95 @@ func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *DaemonSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb7 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb7 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.CurrentNumberScheduled = 0 + if x.Backend != nil { + x.Backend = nil + } } else { - x.CurrentNumberScheduled = int(r.DecodeInt(codecSelferBitsize1234)) + if x.Backend == nil { + x.Backend = new(IngressBackend) + } + x.Backend.CodecDecodeSelf(d) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb7 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb7 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.NumberMisscheduled = 0 + x.TLS = nil } else { - x.NumberMisscheduled = int(r.DecodeInt(codecSelferBitsize1234)) + yyv11 := &x.TLS + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceIngressTLS((*[]IngressTLS)(yyv11), d) + } } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb7 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb7 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.DesiredNumberScheduled = 0 + x.Rules = nil } else { - x.DesiredNumberScheduled = int(r.DecodeInt(codecSelferBitsize1234)) + yyv13 := &x.Rules + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceIngressRule((*[]IngressRule)(yyv13), d) + } } for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb7 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb7 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -8219,17 +8252,14 @@ func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" + yyq2[0] = len(x.Hosts) != 0 + yyq2[1] = x.SecretName != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) + r.EncodeArrayStart(2) } else { yynn2 = 0 for _, b := range yyq2 { @@ -8243,101 +8273,58 @@ func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) + if x.Hosts == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Hosts, false, e) + } + } } else { r.EncodeNil() } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("hosts")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) + if x.Hosts == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Hosts, false, e) + } + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 + yym7 := z.EncBinary() + _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) } } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[3] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) + r.EncodeString(codecSelferC_UTF81234, string("secretName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 + yym8 := z.EncBinary() + _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) } } } @@ -8350,7 +8337,7 @@ func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *IngressTLS) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -8380,7 +8367,7 @@ func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -8402,38 +8389,23 @@ func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = DaemonSetSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = DaemonSetStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": + case "hosts": if r.TryDecodeAsNil() { - x.Kind = "" + x.Hosts = nil } else { - x.Kind = string(r.DecodeString()) + yyv4 := &x.Hosts + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } } - case "apiVersion": + case "secretName": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.SecretName = "" } else { - x.APIVersion = string(r.DecodeString()) + x.SecretName = string(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -8442,113 +8414,68 @@ func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *IngressTLS) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = DaemonSetSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = DaemonSetStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb9 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb9 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" + x.Hosts = nil } else { - x.Kind = string(r.DecodeString()) + yyv8 := &x.Hosts + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb9 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb9 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + x.SecretName = "" } else { - x.APIVersion = string(r.DecodeString()) + x.SecretName = string(r.DecodeString()) } for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb9 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb9 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *IngressStatus) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -8562,17 +8489,15 @@ func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [1]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) + r.EncodeArrayStart(1) } else { - yynn2 = 1 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -8584,119 +8509,30 @@ func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } + yy4 := &x.LoadBalancer + yy4.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } + yy6 := &x.LoadBalancer + yy6.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) - } - } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) + z.EncSendContainerState(codecSelfer_containerMapEnd1234) } } } } -func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *IngressStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -8726,7 +8562,7 @@ func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *IngressStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -8748,42 +8584,12 @@ func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceDaemonSet((*[]DaemonSet)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": + case "loadBalancer": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.LoadBalancer = pkg2_api.LoadBalancerStatus{} } else { - x.APIVersion = string(r.DecodeString()) + yyv4 := &x.LoadBalancer + yyv4.CodecDecodeSelf(d) } default: z.DecStructFieldNotFound(-1, yys3) @@ -8792,107 +8598,47 @@ func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *DaemonSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *IngressStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceDaemonSet((*[]DaemonSet)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb10 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb10 { + if yyb5 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + x.LoadBalancer = pkg2_api.LoadBalancerStatus{} } else { - x.APIVersion = string(r.DecodeString()) + yyv6 := &x.LoadBalancer + yyv6.CodecDecodeSelf(d) } for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb10 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb10 { + if yyb5 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") + z.DecStructFieldNotFound(yyj5-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -8906,17 +8652,16 @@ func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" + yyq2[0] = x.Host != "" + yyq2[1] = x.IngressRuleValue.HTTP != nil && x.HTTP != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) + r.EncodeArrayStart(2) } else { - yynn2 = 1 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -8928,111 +8673,67 @@ func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 + yym4 := z.EncBinary() + _ = yym4 if false { - } else if z.HasExtensions() && z.EncExt(yy4) { } else { - z.EncFallback(yy4) + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("host")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 + yym5 := z.EncBinary() + _ = yym5 if false { - } else if z.HasExtensions() && z.EncExt(yy6) { } else { - z.EncFallback(yy6) + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) } } } + var yyn6 bool + if x.IngressRuleValue.HTTP == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { + if yyn6 { r.EncodeNil() } else { - yym9 := z.EncBinary() - _ = yym9 - if false { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.HTTP == nil { + r.EncodeNil() + } else { + x.HTTP.CodecEncodeSelf(e) + } } else { - h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) + r.EncodeNil() } } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("http")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn6 { + r.EncodeNil() } else { - h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) + if x.HTTP == nil { + r.EncodeNil() + } else { + x.HTTP.CodecEncodeSelf(e) + } } } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) } @@ -9040,7 +8741,7 @@ func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *IngressRule) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -9070,7 +8771,7 @@ func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *IngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -9092,42 +8793,25 @@ func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978. yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": + case "host": if r.TryDecodeAsNil() { - x.Items = nil + x.Host = "" } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv6), d) - } + x.Host = string(r.DecodeString()) } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) + case "http": + if x.IngressRuleValue.HTTP == nil { + x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) } - case "apiVersion": if r.TryDecodeAsNil() { - x.APIVersion = "" + if x.HTTP != nil { + x.HTTP = nil + } } else { - x.APIVersion = string(r.DecodeString()) + if x.HTTP == nil { + x.HTTP = new(HTTPIngressRuleValue) + } + x.HTTP.CodecDecodeSelf(d) } default: z.DecStructFieldNotFound(-1, yys3) @@ -9136,107 +8820,70 @@ func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978. z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ThirdPartyResourceDataList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *IngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb10 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb10 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + x.Host = "" } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return + x.Host = string(r.DecodeString()) } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) + if x.IngressRuleValue.HTTP == nil { + x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb10 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb10 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + if x.HTTP != nil { + x.HTTP = nil + } } else { - x.APIVersion = string(r.DecodeString()) + if x.HTTP == nil { + x.HTTP = new(HTTPIngressRuleValue) + } + x.HTTP.CodecDecodeSelf(d) } for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb10 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb10 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -9250,17 +8897,13 @@ func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool + var yyq2 [1]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" + yyq2[0] = x.HTTP != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) + r.EncodeArrayStart(1) } else { yynn2 = 0 for _, b := range yyq2 { @@ -9274,127 +8917,49 @@ func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) + if x.HTTP == nil { + r.EncodeNil() + } else { + x.HTTP.CodecEncodeSelf(e) + } } else { r.EncodeNil() } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) + r.EncodeString(codecSelferC_UTF81234, string("http")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) + if x.HTTP == nil { + r.EncodeNil() + } else { + x.HTTP.CodecEncodeSelf(e) + } } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } + } +} + +func (x *IngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { x.codecDecodeSelfFromMap(yyl2, d) } @@ -9411,7 +8976,7 @@ func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -9433,38 +8998,16 @@ func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": + case "http": if r.TryDecodeAsNil() { - x.APIVersion = "" + if x.HTTP != nil { + x.HTTP = nil + } } else { - x.APIVersion = string(r.DecodeString()) + if x.HTTP == nil { + x.HTTP = new(HTTPIngressRuleValue) + } + x.HTTP.CodecDecodeSelf(d) } default: z.DecStructFieldNotFound(-1, yys3) @@ -9473,113 +9016,51 @@ func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *IngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb9 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb9 { + if yyb5 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + if x.HTTP != nil { + x.HTTP = nil + } } else { - x.APIVersion = string(r.DecodeString()) + if x.HTTP == nil { + x.HTTP = new(HTTPIngressRuleValue) + } + x.HTTP.CodecDecodeSelf(d) } for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb9 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb9 { + if yyb5 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") + z.DecStructFieldNotFound(yyj5-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *HTTPIngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -9593,15 +9074,12 @@ func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [1]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) + r.EncodeArrayStart(1) } else { yynn2 = 1 for _, b := range yyq2 { @@ -9614,112 +9092,33 @@ func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta + if x.Paths == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("paths")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Paths == nil { + r.EncodeNil() + } else { yym5 := z.EncBinary() _ = yym5 if false { - } else if z.HasExtensions() && z.EncExt(yy4) { } else { - z.EncFallback(yy4) + h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) } - } else { - r.EncodeNil() } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) } @@ -9727,7 +9126,7 @@ func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *HTTPIngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -9757,7 +9156,7 @@ func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *HTTPIngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -9779,43 +9178,18 @@ func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": + case "paths": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.Paths = nil } else { - yyv4 := &x.ListMeta + yyv4 := &x.Paths yym5 := z.DecBinary() _ = yym5 if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { } else { - h.decSliceJob((*[]Job)(yyv6), d) + h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv4), d) } } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -9823,107 +9197,52 @@ func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *HTTPIngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb10 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb10 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Items = nil + x.Paths = nil } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 + yyv7 := &x.Paths + yym8 := z.DecBinary() + _ = yym8 if false { } else { - h.decSliceJob((*[]Job)(yyv13), d) + h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv7), d) } } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb10 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb10 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -9937,17 +9256,13 @@ func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.Parallelism != nil - yyq2[1] = x.Completions != nil - yyq2[2] = x.ActiveDeadlineSeconds != nil - yyq2[3] = x.Selector != nil - yyq2[4] = x.ManualSelector != nil + yyq2[0] = x.Path != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) + r.EncodeArrayStart(2) } else { yynn2 = 1 for _, b := range yyq2 { @@ -9961,188 +9276,38 @@ func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - if x.Parallelism == nil { - r.EncodeNil() + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - yy4 := *x.Parallelism - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("parallelism")) + r.EncodeString(codecSelferC_UTF81234, string("path")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Parallelism == nil { - r.EncodeNil() + yym5 := z.EncBinary() + _ = yym5 + if false { } else { - yy6 := *x.Parallelism - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Completions == nil { - r.EncodeNil() - } else { - yy9 := *x.Completions - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Completions == nil { - r.EncodeNil() - } else { - yy11 := *x.Completions - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(yy11)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy14 := *x.ActiveDeadlineSeconds - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(yy14)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy16 := *x.ActiveDeadlineSeconds - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(yy16)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy22 := *x.ManualSelector - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeBool(bool(yy22)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("manualSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy24 := *x.ManualSelector - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(yy24)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy27 := &x.Template - yy27.CodecEncodeSelf(e) + yy7 := &x.Backend + yy7.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) + r.EncodeString(codecSelferC_UTF81234, string("backend")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy29 := &x.Template - yy29.CodecEncodeSelf(e) + yy9 := &x.Backend + yy9.CodecEncodeSelf(e) } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) @@ -10153,7 +9318,7 @@ func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *HTTPIngressPath) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -10183,7 +9348,7 @@ func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -10205,93 +9370,18 @@ func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "parallelism": - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int)(x.Parallelism)) = int(r.DecodeInt(codecSelferBitsize1234)) - } - } - case "completions": - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int)(x.Completions)) = int(r.DecodeInt(codecSelferBitsize1234)) - } - } - case "activeDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "manualSelector": + case "path": if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } + x.Path = "" } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } + x.Path = string(r.DecodeString()) } - case "template": + case "backend": if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} + x.Backend = IngressBackend{} } else { - yyv14 := &x.Template - yyv14.CodecDecodeSelf(d) + yyv5 := &x.Backend + yyv5.CodecDecodeSelf(d) } default: z.DecStructFieldNotFound(-1, yys3) @@ -10300,205 +9390,84 @@ func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj15 int - var yyb15 bool - var yyhl15 bool = l >= 0 - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb15 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb15 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } + x.Path = "" } else { - if x.Parallelism == nil { - x.Parallelism = new(int) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*int)(x.Parallelism)) = int(r.DecodeInt(codecSelferBitsize1234)) - } + x.Path = string(r.DecodeString()) } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb15 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb15 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int) - } - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*int)(x.Completions)) = int(r.DecodeInt(codecSelferBitsize1234)) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l + x.Backend = IngressBackend{} } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return + yyv8 := &x.Backend + yyv8.CodecDecodeSelf(d) } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym21 := z.DecBinary() - _ = yym21 - if false { + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + yyb6 = r.CheckBreak() } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil + if yyb6 { + break } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym23 := z.DecBinary() - _ = yym23 + yym1 := z.EncBinary() + _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } - } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym25 := z.DecBinary() - _ = yym25 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv26 := &x.Template - yyv26.CodecDecodeSelf(d) - } - for { - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj15-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if z.HasExtensions() && z.EncExt(x) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = len(x.Conditions) != 0 - yyq2[1] = x.StartTime != nil - yyq2[2] = x.CompletionTime != nil - yyq2[3] = x.Active != 0 - yyq2[4] = x.Succeeded != 0 - yyq2[5] = x.Failed != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) + r.EncodeArrayStart(2) } else { - yynn2 = 0 + yynn2 = 2 for _, b := range yyq2 { if b { yynn2++ @@ -10509,196 +9478,48 @@ func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.StartTime == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym7 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartTime == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym8 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym10 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) } } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym11 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("active")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } + yy7 := &x.ServicePort + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) } else { - r.EncodeInt(0) + z.EncFallback(yy7) } } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("succeeded")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("servicePort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.ServicePort + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) } else { - r.EncodeInt(0) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } + z.EncFallback(yy9) } } if yyr2 || yy2arr2 { @@ -10710,7 +9531,7 @@ func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *IngressBackend) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -10740,7 +9561,7 @@ func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -10762,78 +9583,27 @@ func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv4 := &x.Conditions - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv4), d) - } - } - case "startTime": + case "serviceName": if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } + x.ServiceName = "" } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } + x.ServiceName = string(r.DecodeString()) } - case "completionTime": + case "servicePort": if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } + x.ServicePort = pkg5_intstr.IntOrString{} } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym9 := z.DecBinary() - _ = yym9 + yyv5 := &x.ServicePort + yym6 := z.DecBinary() + _ = yym6 if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym9 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) + } else if z.HasExtensions() && z.DecExt(yyv5) { + } else if !yym6 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv5) } else { - z.DecFallback(x.CompletionTime, false) + z.DecFallback(yyv5, false) } } - case "active": - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int(r.DecodeInt(codecSelferBitsize1234)) - } - case "succeeded": - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int(r.DecodeInt(codecSelferBitsize1234)) - } - case "failed": - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int(r.DecodeInt(codecSelferBitsize1234)) - } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -10841,188 +9611,71 @@ func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb13 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb13 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Conditions = nil + x.ServiceName = "" } else { - yyv14 := &x.Conditions - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv14), d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym17 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym17 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } + x.ServiceName = string(r.DecodeString()) } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb13 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb13 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } + x.ServicePort = pkg5_intstr.IntOrString{} } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym19 := z.DecBinary() - _ = yym19 + yyv9 := &x.ServicePort + yym10 := z.DecBinary() + _ = yym10 if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym19 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym19 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) } else { - z.DecFallback(x.CompletionTime, false) + z.DecFallback(yyv9, false) } } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int(r.DecodeInt(codecSelferBitsize1234)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int(r.DecodeInt(codecSelferBitsize1234)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int(r.DecodeInt(codecSelferBitsize1234)) - } for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb13 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb13 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -11036,18 +9689,19 @@ func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool + var yyq2 [5]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) + r.EncodeArrayStart(5) } else { - yynn2 = 2 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -11058,106 +9712,88 @@ func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf7 := &x.Status - yysf7.CodecEncodeSelf(e) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf8 := &x.Status - yysf8.CodecEncodeSelf(e) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[2] { - yy10 := &x.LastProbeTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } + yy14 := &x.Status + yy14.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) + r.EncodeString(codecSelferC_UTF81234, string("status")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastProbeTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } + yy16 := &x.Status + yy16.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 + yym19 := z.EncBinary() + _ = yym19 if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) } else { - z.EncFallback(yy15) + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } else { - r.EncodeNil() + r.EncodeString(codecSelferC_UTF81234, "") } } else { if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 + yym20 := z.EncBinary() + _ = yym20 if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) } else { - z.EncFallback(yy17) + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 + yym22 := z.EncBinary() + _ = yym22 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } else { r.EncodeString(codecSelferC_UTF81234, "") @@ -11165,38 +9801,13 @@ func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { yym23 := z.EncBinary() _ = yym23 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } @@ -11209,7 +9820,7 @@ func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ReplicaSet) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -11239,7 +9850,7 @@ func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -11261,63 +9872,38 @@ func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - case "status": + case "metadata": if r.TryDecodeAsNil() { - x.Status = "" + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - x.Status = pkg2_api.ConditionStatus(r.DecodeString()) + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) } - case "lastProbeTime": + case "spec": if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} + x.Spec = ReplicaSetSpec{} } else { - yyv6 := &x.LastProbeTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) } - case "lastTransitionTime": + case "status": if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} + x.Status = ReplicaSetStatus{} } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) } - case "reason": + case "kind": if r.TryDecodeAsNil() { - x.Reason = "" + x.Kind = "" } else { - x.Reason = string(r.DecodeString()) + x.Kind = string(r.DecodeString()) } - case "message": + case "apiVersion": if r.TryDecodeAsNil() { - x.Message = "" + x.APIVersion = "" } else { - x.Message = string(r.DecodeString()) + x.APIVersion = string(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -11326,148 +9912,113 @@ func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Type = "" + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - x.Type = JobConditionType(r.DecodeString()) + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Status = "" + x.Spec = ReplicaSetSpec{} } else { - x.Status = pkg2_api.ConditionStatus(r.DecodeString()) + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} + x.Status = ReplicaSetStatus{} } else { - yyv15 := &x.LastProbeTime - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if yym16 { - z.DecBinaryUnmarshal(yyv15) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv17 := &x.LastTransitionTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Reason = "" + x.Kind = "" } else { - x.Reason = string(r.DecodeString()) + x.Kind = string(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Message = "" + x.APIVersion = "" } else { - x.Message = string(r.DecodeString()) + x.APIVersion = string(r.DecodeString()) } for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -11481,19 +10032,17 @@ func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) + r.EncodeArrayStart(4) } else { - yynn2 = 0 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -11505,8 +10054,14 @@ func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } } else { r.EncodeNil() } @@ -11515,49 +10070,48 @@ func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { + if x.Items == nil { r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) } else { - r.EncodeNil() + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) + } } } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -11566,12 +10120,12 @@ func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[3] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -11580,9 +10134,9 @@ func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -11591,12 +10145,12 @@ func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[4] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -11612,7 +10166,7 @@ func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *Ingress) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ReplicaSetList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -11642,7 +10196,7 @@ func (x *Ingress) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -11666,24 +10220,28 @@ func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { switch yys3 { case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = IngressSpec{} + x.ListMeta = pkg1_unversioned.ListMeta{} } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } } - case "status": + case "items": if r.TryDecodeAsNil() { - x.Status = IngressStatus{} + x.Items = nil } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceReplicaSet((*[]ReplicaSet)(yyv6), d) + } } case "kind": if r.TryDecodeAsNil() { @@ -11704,113 +10262,107 @@ func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb9 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb9 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} + x.ListMeta = pkg1_unversioned.ListMeta{} } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb9 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb9 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Spec = IngressSpec{} + x.Items = nil } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceReplicaSet((*[]ReplicaSet)(yyv13), d) + } } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb9 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb9 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Status = IngressStatus{} + x.Kind = "" } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) + x.Kind = string(r.DecodeString()) } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb9 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb9 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" + x.APIVersion = "" } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) + x.APIVersion = string(r.DecodeString()) } for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb9 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb9 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -11824,15 +10376,14 @@ func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" + yyq2[1] = x.Selector != nil + yyq2[2] = true var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) + r.EncodeArrayStart(3) } else { yynn2 = 1 for _, b := range yyq2 { @@ -11845,108 +10396,73 @@ func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceIngress(([]Ingress)(x.Items), e) - } + r.EncodeInt(int64(x.Replicas)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() + yym5 := z.EncBinary() + _ = yym5 + if false { } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceIngress(([]Ingress)(x.Items), e) - } + r.EncodeInt(int64(x.Replicas)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq2[2] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) + r.EncodeString(codecSelferC_UTF81234, string("selector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { + if x.Selector == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } + if yyq2[2] { + yy10 := &x.Template + yy10.CodecEncodeSelf(e) } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq2[3] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + r.EncodeString(codecSelferC_UTF81234, string("template")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } + yy12 := &x.Template + yy12.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { @@ -11958,7 +10474,7 @@ func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *IngressList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ReplicaSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -11988,7 +10504,7 @@ func (x *IngressList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -12010,42 +10526,35 @@ func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": + case "replicas": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.Replicas = 0 } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } + x.Replicas = int32(r.DecodeInt(32)) } - case "items": + case "selector": if r.TryDecodeAsNil() { - x.Items = nil + if x.Selector != nil { + x.Selector = nil + } } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 + if x.Selector == nil { + x.Selector = new(pkg1_unversioned.LabelSelector) + } + yym6 := z.DecBinary() + _ = yym6 if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { - h.decSliceIngress((*[]Ingress)(yyv6), d) + z.DecFallback(x.Selector, false) } } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": + case "template": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Template = pkg2_api.PodTemplateSpec{} } else { - x.APIVersion = string(r.DecodeString()) + yyv7 := &x.Template + yyv7.CodecDecodeSelf(d) } default: z.DecStructFieldNotFound(-1, yys3) @@ -12054,107 +10563,90 @@ func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb10 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb10 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.Replicas = 0 } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } + x.Replicas = int32(r.DecodeInt(32)) } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb10 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb10 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Items = nil + if x.Selector != nil { + x.Selector = nil + } } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 + if x.Selector == nil { + x.Selector = new(pkg1_unversioned.LabelSelector) + } + yym11 := z.DecBinary() + _ = yym11 if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { - h.decSliceIngress((*[]Ingress)(yyv13), d) + z.DecFallback(x.Selector, false) } } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb10 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb10 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Template = pkg2_api.PodTemplateSpec{} } else { - x.APIVersion = string(r.DecodeString()) + yyv12 := &x.Template + yyv12.CodecDecodeSelf(d) } for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb10 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb10 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -12171,14 +10663,13 @@ func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.Backend != nil - yyq2[1] = len(x.TLS) != 0 - yyq2[2] = len(x.Rules) != 0 + yyq2[1] = x.FullyLabeledReplicas != 0 + yyq2[2] = x.ObservedGeneration != 0 var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(3) } else { - yynn2 = 0 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -12189,90 +10680,70 @@ func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Backend == nil { - r.EncodeNil() - } else { - x.Backend.CodecEncodeSelf(e) - } + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - r.EncodeNil() + r.EncodeInt(int64(x.Replicas)) } } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("backend")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Backend == nil { - r.EncodeNil() - } else { - x.Backend.CodecEncodeSelf(e) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - if x.TLS == nil { - r.EncodeNil() + yym7 := z.EncBinary() + _ = yym7 + if false { } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) - } + r.EncodeInt(int64(x.FullyLabeledReplicas)) } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tls")) + r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TLS == nil { - r.EncodeNil() + yym8 := z.EncBinary() + _ = yym8 + if false { } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) - } + r.EncodeInt(int64(x.FullyLabeledReplicas)) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[2] { - if x.Rules == nil { - r.EncodeNil() + yym10 := z.EncBinary() + _ = yym10 + if false { } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceIngressRule(([]IngressRule)(x.Rules), e) - } + r.EncodeInt(int64(x.ObservedGeneration)) } } else { - r.EncodeNil() + r.EncodeInt(0) } } else { if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rules")) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Rules == nil { - r.EncodeNil() + yym11 := z.EncBinary() + _ = yym11 + if false { } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - h.encSliceIngressRule(([]IngressRule)(x.Rules), e) - } + r.EncodeInt(int64(x.ObservedGeneration)) } } } @@ -12285,7 +10756,7 @@ func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *IngressSpec) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *ReplicaSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -12315,7 +10786,7 @@ func (x *IngressSpec) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -12337,40 +10808,23 @@ func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "backend": + case "replicas": if r.TryDecodeAsNil() { - if x.Backend != nil { - x.Backend = nil - } + x.Replicas = 0 } else { - if x.Backend == nil { - x.Backend = new(IngressBackend) - } - x.Backend.CodecDecodeSelf(d) + x.Replicas = int32(r.DecodeInt(32)) } - case "tls": + case "fullyLabeledReplicas": if r.TryDecodeAsNil() { - x.TLS = nil + x.FullyLabeledReplicas = 0 } else { - yyv5 := &x.TLS - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceIngressTLS((*[]IngressTLS)(yyv5), d) - } + x.FullyLabeledReplicas = int32(r.DecodeInt(32)) } - case "rules": + case "observedGeneration": if r.TryDecodeAsNil() { - x.Rules = nil + x.ObservedGeneration = 0 } else { - yyv7 := &x.Rules - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceIngressRule((*[]IngressRule)(yyv7), d) - } + x.ObservedGeneration = int64(r.DecodeInt(64)) } default: z.DecStructFieldNotFound(-1, yys3) @@ -12379,95 +10833,78 @@ func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb9 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb9 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.Backend != nil { - x.Backend = nil - } + x.Replicas = 0 } else { - if x.Backend == nil { - x.Backend = new(IngressBackend) - } - x.Backend.CodecDecodeSelf(d) + x.Replicas = int32(r.DecodeInt(32)) } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb9 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb9 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.TLS = nil + x.FullyLabeledReplicas = 0 } else { - yyv11 := &x.TLS - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceIngressTLS((*[]IngressTLS)(yyv11), d) - } + x.FullyLabeledReplicas = int32(r.DecodeInt(32)) } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb9 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb9 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Rules = nil + x.ObservedGeneration = 0 } else { - yyv13 := &x.Rules - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceIngressRule((*[]IngressRule)(yyv13), d) - } + x.ObservedGeneration = int64(r.DecodeInt(64)) } for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb9 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb9 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -12481,14 +10918,16 @@ func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = len(x.Hosts) != 0 - yyq2[1] = x.SecretName != "" + yyq2[0] = true + yyq2[1] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(4) } else { yynn2 = 0 for _, b := range yyq2 { @@ -12502,58 +10941,84 @@ func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - if x.Hosts == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Hosts, false, e) - } - } + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hosts")) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Hosts == nil { - r.EncodeNil() + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym14 := z.EncBinary() + _ = yym14 + if false { } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Hosts, false, e) - } + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[1] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretName")) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 + yym18 := z.EncBinary() + _ = yym18 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } @@ -12566,7 +11031,7 @@ func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *IngressTLS) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *PodSecurityPolicy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -12596,7 +11061,7 @@ func (x *IngressTLS) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -12618,23 +11083,31 @@ func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "hosts": + case "metadata": if r.TryDecodeAsNil() { - x.Hosts = nil + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - yyv4 := &x.Hosts - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) } - case "secretName": + case "spec": if r.TryDecodeAsNil() { - x.SecretName = "" + x.Spec = PodSecurityPolicySpec{} } else { - x.SecretName = string(r.DecodeString()) + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -12643,68 +11116,96 @@ func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *IngressTLS) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Hosts = nil + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - yyv8 := &x.Hosts - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } + yyv9 := &x.ObjectMeta + yyv9.CodecDecodeSelf(d) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.SecretName = "" + x.Spec = PodSecurityPolicySpec{} } else { - x.SecretName = string(r.DecodeString()) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) } for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb7 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb7 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *IngressStatus) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -12718,15 +11219,24 @@ func (x *IngressStatus) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool + var yyq2 [14]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = true + yyq2[0] = x.Privileged != false + yyq2[1] = len(x.DefaultAddCapabilities) != 0 + yyq2[2] = len(x.RequiredDropCapabilities) != 0 + yyq2[3] = len(x.AllowedCapabilities) != 0 + yyq2[4] = len(x.Volumes) != 0 + yyq2[5] = x.HostNetwork != false + yyq2[6] = len(x.HostPorts) != 0 + yyq2[7] = x.HostPID != false + yyq2[8] = x.HostIPC != false + yyq2[13] = x.ReadOnlyRootFilesystem != false var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) + r.EncodeArrayStart(14) } else { - yynn2 = 0 + yynn2 = 4 for _, b := range yyq2 { if b { yynn2++ @@ -12738,431 +11248,334 @@ func (x *IngressStatus) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.LoadBalancer - yy4.CodecEncodeSelf(e) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Privileged)) + } } else { - r.EncodeNil() + r.EncodeBool(false) } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) + r.EncodeString(codecSelferC_UTF81234, string("privileged")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.LoadBalancer - yy6.CodecEncodeSelf(e) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Privileged)) + } } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DefaultAddCapabilities == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceapi_Capability(([]pkg2_api.Capability)(x.DefaultAddCapabilities), e) + } + } + } else { + r.EncodeNil() + } } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultAddCapabilities")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultAddCapabilities == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceapi_Capability(([]pkg2_api.Capability)(x.DefaultAddCapabilities), e) + } + } + } } - } - } -} - -func (x *IngressStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.RequiredDropCapabilities == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceapi_Capability(([]pkg2_api.Capability)(x.RequiredDropCapabilities), e) + } + } + } else { + r.EncodeNil() + } } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDropCapabilities")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDropCapabilities == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSliceapi_Capability(([]pkg2_api.Capability)(x.RequiredDropCapabilities), e) + } + } + } } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "loadBalancer": - if r.TryDecodeAsNil() { - x.LoadBalancer = pkg2_api.LoadBalancerStatus{} + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.AllowedCapabilities == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encSliceapi_Capability(([]pkg2_api.Capability)(x.AllowedCapabilities), e) + } + } + } else { + r.EncodeNil() + } } else { - yyv4 := &x.LoadBalancer - yyv4.CodecDecodeSelf(d) + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("allowedCapabilities")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AllowedCapabilities == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encSliceapi_Capability(([]pkg2_api.Capability)(x.AllowedCapabilities), e) + } + } + } } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancer = pkg2_api.LoadBalancerStatus{} - } else { - yyv6 := &x.LoadBalancer - yyv6.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Host != "" - yyq2[1] = x.IngressRuleValue.HTTP != nil && x.HTTP != nil - var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Volumes == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceFSType(([]FSType)(x.Volumes), e) + } + } + } else { + r.EncodeNil() + } } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Volumes == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSliceFSType(([]FSType)(x.Volumes), e) + } } } - r.EncodeMapStart(yynn2) - yynn2 = 0 } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + r.EncodeBool(bool(x.HostNetwork)) } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeBool(false) } } else { - if yyq2[0] { + if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("host")) + r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 + yym20 := z.EncBinary() + _ = yym20 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + r.EncodeBool(bool(x.HostNetwork)) } } } - var yyn6 bool - if x.IngressRuleValue.HTTP == nil { - yyn6 = true - goto LABEL6 - } - LABEL6: if yyr2 || yy2arr2 { - if yyn6 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.HTTP == nil { - r.EncodeNil() + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.HostPorts == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { } else { - x.HTTP.CodecEncodeSelf(e) + h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) } - } else { - r.EncodeNil() } + } else { + r.EncodeNil() } } else { - if yyq2[1] { + if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("http")) + r.EncodeString(codecSelferC_UTF81234, string("hostPorts")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn6 { + if x.HostPorts == nil { r.EncodeNil() } else { - if x.HTTP == nil { - r.EncodeNil() + yym23 := z.EncBinary() + _ = yym23 + if false { } else { - x.HTTP.CodecEncodeSelf(e) + h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) } } } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } else { + r.EncodeBool(false) + } } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } } - } - } -} - -func (x *IngressRule) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } else { + r.EncodeBool(false) + } } else { - x.codecDecodeSelfFromMap(yyl2, d) + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy31 := &x.SELinux + yy31.CodecEncodeSelf(e) } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("seLinux")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy33 := &x.SELinux + yy33.CodecEncodeSelf(e) } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "host": - if r.TryDecodeAsNil() { - x.Host = "" + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy36 := &x.RunAsUser + yy36.CodecEncodeSelf(e) } else { - x.Host = string(r.DecodeString()) - } - case "http": - if x.IngressRuleValue.HTTP == nil { - x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy38 := &x.RunAsUser + yy38.CodecEncodeSelf(e) } - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy41 := &x.SupplementalGroups + yy41.CodecEncodeSelf(e) } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy43 := &x.SupplementalGroups + yy43.CodecEncodeSelf(e) } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - if x.IngressRuleValue.HTTP == nil { - x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.HTTP != nil - var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy46 := &x.FSGroup + yy46.CodecEncodeSelf(e) } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy48 := &x.FSGroup + yy48.CodecEncodeSelf(e) } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.HTTP == nil { - r.EncodeNil() + if yyq2[13] { + yym51 := z.EncBinary() + _ = yym51 + if false { } else { - x.HTTP.CodecEncodeSelf(e) + r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) } } else { - r.EncodeNil() + r.EncodeBool(false) } } else { - if yyq2[0] { + if yyq2[13] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("http")) + r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HTTP == nil { - r.EncodeNil() + yym52 := z.EncBinary() + _ = yym52 + if false { } else { - x.HTTP.CodecEncodeSelf(e) + r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) } } } @@ -13175,7 +11588,7 @@ func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *IngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *PodSecurityPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -13205,7 +11618,7 @@ func (x *IngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -13227,16 +11640,123 @@ func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "http": + case "privileged": if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil + x.Privileged = false + } else { + x.Privileged = bool(r.DecodeBool()) + } + case "defaultAddCapabilities": + if r.TryDecodeAsNil() { + x.DefaultAddCapabilities = nil + } else { + yyv5 := &x.DefaultAddCapabilities + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv5), d) } + } + case "requiredDropCapabilities": + if r.TryDecodeAsNil() { + x.RequiredDropCapabilities = nil } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) + yyv7 := &x.RequiredDropCapabilities + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv7), d) } - x.HTTP.CodecDecodeSelf(d) + } + case "allowedCapabilities": + if r.TryDecodeAsNil() { + x.AllowedCapabilities = nil + } else { + yyv9 := &x.AllowedCapabilities + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv9), d) + } + } + case "volumes": + if r.TryDecodeAsNil() { + x.Volumes = nil + } else { + yyv11 := &x.Volumes + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceFSType((*[]FSType)(yyv11), d) + } + } + case "hostNetwork": + if r.TryDecodeAsNil() { + x.HostNetwork = false + } else { + x.HostNetwork = bool(r.DecodeBool()) + } + case "hostPorts": + if r.TryDecodeAsNil() { + x.HostPorts = nil + } else { + yyv14 := &x.HostPorts + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSliceHostPortRange((*[]HostPortRange)(yyv14), d) + } + } + case "hostPID": + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + x.HostPID = bool(r.DecodeBool()) + } + case "hostIPC": + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + x.HostIPC = bool(r.DecodeBool()) + } + case "seLinux": + if r.TryDecodeAsNil() { + x.SELinux = SELinuxStrategyOptions{} + } else { + yyv18 := &x.SELinux + yyv18.CodecDecodeSelf(d) + } + case "runAsUser": + if r.TryDecodeAsNil() { + x.RunAsUser = RunAsUserStrategyOptions{} + } else { + yyv19 := &x.RunAsUser + yyv19.CodecDecodeSelf(d) + } + case "supplementalGroups": + if r.TryDecodeAsNil() { + x.SupplementalGroups = SupplementalGroupsStrategyOptions{} + } else { + yyv20 := &x.SupplementalGroups + yyv20.CodecDecodeSelf(d) + } + case "fsGroup": + if r.TryDecodeAsNil() { + x.FSGroup = FSGroupStrategyOptions{} + } else { + yyv21 := &x.FSGroup + yyv21.CodecDecodeSelf(d) + } + case "readOnlyRootFilesystem": + if r.TryDecodeAsNil() { + x.ReadOnlyRootFilesystem = false + } else { + x.ReadOnlyRootFilesystem = bool(r.DecodeBool()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -13245,233 +11765,288 @@ func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *IngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb5 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb5 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } + x.Privileged = false } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) + x.Privileged = bool(r.DecodeBool()) } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DefaultAddCapabilities = nil + } else { + yyv25 := &x.DefaultAddCapabilities + yym26 := z.DecBinary() + _ = yym26 + if false { } else { - yyb5 = r.CheckBreak() + h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv25), d) } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPIngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yym1 := z.EncBinary() - _ = yym1 + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDropCapabilities = nil + } else { + yyv27 := &x.RequiredDropCapabilities + yym28 := z.DecBinary() + _ = yym28 if false { - } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Paths == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("paths")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Paths == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } + h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv27), d) } } -} - -func (x *HTTPIngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AllowedCapabilities = nil + } else { + yyv29 := &x.AllowedCapabilities + yym30 := z.DecBinary() + _ = yym30 + if false { } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv29), d) } } -} - -func (x *HTTPIngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Volumes = nil + } else { + yyv31 := &x.Volumes + yym32 := z.DecBinary() + _ = yym32 + if false { } else { - if r.CheckBreak() { - break - } + h.decSliceFSType((*[]FSType)(yyv31), d) } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "paths": - if r.TryDecodeAsNil() { - x.Paths = nil - } else { - yyv4 := &x.Paths - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPIngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb6 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb6 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Paths = nil + x.HostNetwork = false } else { - yyv7 := &x.Paths - yym8 := z.DecBinary() - _ = yym8 + x.HostNetwork = bool(r.DecodeBool()) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPorts = nil + } else { + yyv34 := &x.HostPorts + yym35 := z.DecBinary() + _ = yym35 if false { } else { - h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv7), d) + h.decSliceHostPortRange((*[]HostPortRange)(yyv34), d) } } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + x.HostPID = bool(r.DecodeBool()) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + x.HostIPC = bool(r.DecodeBool()) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SELinux = SELinuxStrategyOptions{} + } else { + yyv38 := &x.SELinux + yyv38.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RunAsUser = RunAsUserStrategyOptions{} + } else { + yyv39 := &x.RunAsUser + yyv39.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SupplementalGroups = SupplementalGroupsStrategyOptions{} + } else { + yyv40 := &x.SupplementalGroups + yyv40.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSGroup = FSGroupStrategyOptions{} + } else { + yyv41 := &x.FSGroup + yyv41.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnlyRootFilesystem = false + } else { + x.ReadOnlyRootFilesystem = bool(r.DecodeBool()) + } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb6 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb6 { + if yyb23 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj23-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -13488,12 +12063,11 @@ func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.Path != "" var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn2 = 1 + yynn2 = 2 for _, b := range yyq2 { if b { yynn2++ @@ -13504,39 +12078,41 @@ func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeInt(int64(x.Min)) } } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("min")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Min)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Backend - yy7.CodecEncodeSelf(e) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Max)) + } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("backend")) + r.EncodeString(codecSelferC_UTF81234, string("max")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Backend - yy9.CodecEncodeSelf(e) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Max)) + } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) @@ -13547,7 +12123,7 @@ func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *HTTPIngressPath) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *HostPortRange) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -13577,7 +12153,7 @@ func (x *HTTPIngressPath) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -13599,18 +12175,17 @@ func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "path": + case "min": if r.TryDecodeAsNil() { - x.Path = "" + x.Min = 0 } else { - x.Path = string(r.DecodeString()) + x.Min = int(r.DecodeInt(codecSelferBitsize1234)) } - case "backend": + case "max": if r.TryDecodeAsNil() { - x.Backend = IngressBackend{} + x.Max = 0 } else { - yyv5 := &x.Backend - yyv5.CodecDecodeSelf(d) + x.Max = int(r.DecodeInt(codecSelferBitsize1234)) } default: z.DecStructFieldNotFound(-1, yys3) @@ -13619,7 +12194,7 @@ func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *HostPortRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -13638,9 +12213,9 @@ func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Path = "" + x.Min = 0 } else { - x.Path = string(r.DecodeString()) + x.Min = int(r.DecodeInt(codecSelferBitsize1234)) } yyj6++ if yyhl6 { @@ -13654,28 +12229,53 @@ func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Backend = IngressBackend{} + x.Max = 0 + } else { + x.Max = int(r.DecodeInt(codecSelferBitsize1234)) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x FSType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *FSType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { } else { - yyv8 := &x.Backend - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + *((*string)(x)) = r.DecodeString() } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -13692,11 +12292,12 @@ func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[1] = x.SELinuxOptions != nil var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn2 = 2 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -13707,48 +12308,34 @@ func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } + x.Rule.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceName")) + r.EncodeString(codecSelferC_UTF81234, string("rule")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } + x.Rule.CodecEncodeSelf(e) } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.ServicePort - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) + if yyq2[1] { + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } } else { - z.EncFallback(yy7) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("servicePort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.ServicePort - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } } } if yyr2 || yy2arr2 { @@ -13760,7 +12347,7 @@ func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *IngressBackend) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *SELinuxStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -13790,7 +12377,7 @@ func (x *IngressBackend) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -13812,26 +12399,22 @@ func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "serviceName": + case "rule": if r.TryDecodeAsNil() { - x.ServiceName = "" + x.Rule = "" } else { - x.ServiceName = string(r.DecodeString()) + x.Rule = SELinuxStrategy(r.DecodeString()) } - case "servicePort": + case "seLinuxOptions": if r.TryDecodeAsNil() { - x.ServicePort = pkg6_intstr.IntOrString{} + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } } else { - yyv5 := &x.ServicePort - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(yyv5) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv5) - } else { - z.DecFallback(yyv5, false) + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(pkg2_api.SELinuxOptions) } + x.SELinuxOptions.CodecDecodeSelf(d) } default: z.DecStructFieldNotFound(-1, yys3) @@ -13840,71 +12423,93 @@ func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *SELinuxStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb7 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb7 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ServiceName = "" + x.Rule = "" } else { - x.ServiceName = string(r.DecodeString()) + x.Rule = SELinuxStrategy(r.DecodeString()) } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb7 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb7 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ServicePort = pkg6_intstr.IntOrString{} + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } } else { - yyv9 := &x.ServicePort - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(pkg2_api.SELinuxOptions) } + x.SELinuxOptions.CodecDecodeSelf(d) } for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb7 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb7 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) { +func (x SELinuxStrategy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *SELinuxStrategy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -13918,125 +12523,62 @@ func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" + yyq2[1] = len(x.Ranges) != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) + r.EncodeArrayStart(2) } else { - yynn2 = 0 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) + } } + r.EncodeMapStart(yynn2) + yynn2 = 0 } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } + x.Rule.CodecEncodeSelf(e) } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Rule.CodecEncodeSelf(e) } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { + if yyq2[1] { + if x.Ranges == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq2[4] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + r.EncodeString(codecSelferC_UTF81234, string("ranges")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { + if x.Ranges == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } } } } @@ -14049,7 +12591,7 @@ func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ReplicaSet) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *RunAsUserStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -14079,7 +12621,7 @@ func (x *ReplicaSet) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -14101,38 +12643,23 @@ func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ReplicaSetSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ReplicaSetStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": + case "rule": if r.TryDecodeAsNil() { - x.Kind = "" + x.Rule = "" } else { - x.Kind = string(r.DecodeString()) + x.Rule = RunAsUserStrategy(r.DecodeString()) } - case "apiVersion": + case "ranges": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Ranges = nil } else { - x.APIVersion = string(r.DecodeString()) + yyv5 := &x.Ranges + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv5), d) + } } default: z.DecStructFieldNotFound(-1, yys3) @@ -14141,113 +12668,68 @@ func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ReplicaSetSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ReplicaSetStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb9 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb9 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" + x.Rule = "" } else { - x.Kind = string(r.DecodeString()) + x.Rule = RunAsUserStrategy(r.DecodeString()) } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb9 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb9 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Ranges = nil } else { - x.APIVersion = string(r.DecodeString()) + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv9), d) + } } for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb9 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb9 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -14261,129 +12743,58 @@ func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ } } + r.EncodeMapStart(yynn2) + yynn2 = 0 } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() + yym4 := z.EncBinary() + _ = yym4 + if false { } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) - } + r.EncodeInt(int64(x.Min)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) + r.EncodeString(codecSelferC_UTF81234, string("min")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() + yym5 := z.EncBinary() + _ = yym5 + if false { } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) - } + r.EncodeInt(int64(x.Min)) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } + yym7 := z.EncBinary() + _ = yym7 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeInt(int64(x.Max)) } } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("max")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } + r.EncodeInt(int64(x.Max)) } } if yyr2 || yy2arr2 { @@ -14395,7 +12806,7 @@ func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ReplicaSetList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *IDRange) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -14425,7 +12836,7 @@ func (x *ReplicaSetList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -14447,42 +12858,17 @@ func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceReplicaSet((*[]ReplicaSet)(yyv6), d) - } - } - case "kind": + case "min": if r.TryDecodeAsNil() { - x.Kind = "" + x.Min = 0 } else { - x.Kind = string(r.DecodeString()) + x.Min = int64(r.DecodeInt(64)) } - case "apiVersion": + case "max": if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Max = 0 } else { - x.APIVersion = string(r.DecodeString()) + x.Max = int64(r.DecodeInt(64)) } default: z.DecStructFieldNotFound(-1, yys3) @@ -14491,107 +12877,88 @@ func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *IDRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceReplicaSet((*[]ReplicaSet)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb10 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb10 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" + x.Min = 0 } else { - x.Kind = string(r.DecodeString()) + x.Min = int64(r.DecodeInt(64)) } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb10 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb10 { + if yyb6 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Max = 0 } else { - x.APIVersion = string(r.DecodeString()) + x.Max = int64(r.DecodeInt(64)) } for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l } else { - yyb10 = r.CheckBreak() + yyb6 = r.CheckBreak() } - if yyb10 { + if yyb6 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") + z.DecStructFieldNotFound(yyj6-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { +func (x RunAsUserStrategy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *RunAsUserStrategy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *FSGroupStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -14605,16 +12972,16 @@ func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Selector != nil - yyq2[2] = true + const yyr2 bool = false + yyq2[0] = x.Rule != "" + yyq2[1] = len(x.Ranges) != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) + r.EncodeArrayStart(2) } else { - yynn2 = 1 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -14625,35 +12992,30 @@ func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { + if yyq2[0] { + x.Rule.CodecEncodeSelf(e) } else { - r.EncodeInt(int64(x.Replicas)) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Rule.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - if x.Selector == nil { + if x.Ranges == nil { r.EncodeNil() } else { yym7 := z.EncBinary() _ = yym7 if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { } else { - z.EncFallback(x.Selector) + h.encSliceIDRange(([]IDRange)(x.Ranges), e) } } } else { @@ -14662,38 +13024,20 @@ func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) + r.EncodeString(codecSelferC_UTF81234, string("ranges")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { + if x.Ranges == nil { r.EncodeNil() } else { yym8 := z.EncBinary() _ = yym8 if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { } else { - z.EncFallback(x.Selector) + h.encSliceIDRange(([]IDRange)(x.Ranges), e) } } } } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.Template - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.Template - yy12.CodecEncodeSelf(e) - } - } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -14703,7 +13047,7 @@ func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ReplicaSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *FSGroupStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -14733,7 +13077,7 @@ func (x *ReplicaSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *FSGroupStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -14755,36 +13099,24 @@ func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "replicas": + case "rule": if r.TryDecodeAsNil() { - x.Replicas = 0 + x.Rule = "" } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Rule = FSGroupStrategyType(r.DecodeString()) } - case "selector": + case "ranges": if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } + x.Ranges = nil } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } + yyv5 := &x.Ranges yym6 := z.DecBinary() _ = yym6 if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { - z.DecFallback(x.Selector, false) + h.decSliceIDRange((*[]IDRange)(yyv5), d) } } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv7 := &x.Template - yyv7.CodecDecodeSelf(d) - } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -14792,90 +13124,94 @@ func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *FSGroupStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb8 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb8 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Replicas = 0 + x.Rule = "" } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Rule = FSGroupStrategyType(r.DecodeString()) } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb8 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb8 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } + x.Ranges = nil } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym11 := z.DecBinary() - _ = yym11 + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { } else { - z.DecFallback(x.Selector, false) + h.decSliceIDRange((*[]IDRange)(yyv9), d) } } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv12 := &x.Template - yyv12.CodecDecodeSelf(d) - } for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb8 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb8 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { +func (x FSGroupStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *FSGroupStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *SupplementalGroupsStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -14889,16 +13225,16 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[1] = x.FullyLabeledReplicas != 0 - yyq2[2] = x.ObservedGeneration != 0 + yyq2[0] = x.Rule != "" + yyq2[1] = len(x.Ranges) != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) + r.EncodeArrayStart(2) } else { - yynn2 = 1 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -14909,70 +13245,49 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } + if yyq2[0] { + x.Rule.CodecEncodeSelf(e) } else { - r.EncodeInt(0) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[1] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) + r.EncodeString(codecSelferC_UTF81234, string("rule")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } + x.Rule.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { + if yyq2[1] { + if x.Ranges == nil { + r.EncodeNil() } else { - r.EncodeInt(int64(x.ObservedGeneration)) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } } } else { - r.EncodeInt(0) + r.EncodeNil() } } else { - if yyq2[2] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + r.EncodeString(codecSelferC_UTF81234, string("ranges")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { + if x.Ranges == nil { + r.EncodeNil() } else { - r.EncodeInt(int64(x.ObservedGeneration)) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } } } } @@ -14985,7 +13300,7 @@ func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *ReplicaSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *SupplementalGroupsStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15015,7 +13330,7 @@ func (x *ReplicaSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15037,23 +13352,23 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) - } - case "fullyLabeledReplicas": + case "rule": if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 + x.Rule = "" } else { - x.FullyLabeledReplicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Rule = SupplementalGroupsStrategyType(r.DecodeString()) } - case "observedGeneration": + case "ranges": if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 + x.Ranges = nil } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) + yyv5 := &x.Ranges + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv5), d) + } } default: z.DecStructFieldNotFound(-1, yys3) @@ -15062,7 +13377,7 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15081,25 +13396,9 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 + x.Rule = "" } else { - x.FullyLabeledReplicas = int(r.DecodeInt(codecSelferBitsize1234)) + x.Rule = SupplementalGroupsStrategyType(r.DecodeString()) } yyj7++ if yyhl7 { @@ -15113,9 +13412,15 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 + x.Ranges = nil } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv9), d) + } } for { yyj7++ @@ -15133,7 +13438,33 @@ func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { +func (x SupplementalGroupsStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *SupplementalGroupsStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -15151,14 +13482,13 @@ func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = true - yyq2[1] = true yyq2[2] = x.Kind != "" yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(4) } else { - yynn2 = 0 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -15170,8 +13500,14 @@ func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } } else { r.EncodeNil() } @@ -15180,32 +13516,48 @@ func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { + if x.Items == nil { r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) + } } } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 + yym12 := z.EncBinary() + _ = yym12 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -15218,8 +13570,8 @@ func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 + yym13 := z.EncBinary() + _ = yym13 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -15229,8 +13581,8 @@ func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 + yym15 := z.EncBinary() + _ = yym15 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -15243,8 +13595,8 @@ func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 + yym16 := z.EncBinary() + _ = yym16 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -15260,7 +13612,7 @@ func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *PodSecurityPolicy) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15290,7 +13642,7 @@ func (x *PodSecurityPolicy) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15314,17 +13666,28 @@ func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) switch yys3 { case "metadata": if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} + x.ListMeta = pkg1_unversioned.ListMeta{} } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } } - case "spec": + case "items": if r.TryDecodeAsNil() { - x.Spec = PodSecurityPolicySpec{} + x.Items = nil } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv6), d) + } } case "kind": if r.TryDecodeAsNil() { @@ -15345,54 +13708,65 @@ func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb8 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb8 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} + x.ListMeta = pkg1_unversioned.ListMeta{} } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb8 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb8 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Spec = PodSecurityPolicySpec{} + x.Items = nil } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv13), d) + } } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb8 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb8 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15402,13 +13776,13 @@ func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder } else { x.Kind = string(r.DecodeString()) } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb8 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb8 { + if yyb10 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -15419,22 +13793,22 @@ func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder x.APIVersion = string(r.DecodeString()) } for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb8 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb8 { + if yyb10 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *NetworkPolicy) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -15448,21 +13822,16 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [9]bool + var yyq2 [4]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = x.Privileged != false - yyq2[1] = len(x.Capabilities) != 0 - yyq2[2] = len(x.Volumes) != 0 - yyq2[3] = x.HostNetwork != false - yyq2[4] = len(x.HostPorts) != 0 - yyq2[5] = x.HostPID != false - yyq2[6] = x.HostIPC != false - yyq2[7] = true - yyq2[8] = true + yyq2[0] = true + yyq2[1] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(9) + r.EncodeArrayStart(4) } else { yynn2 = 0 for _, b := range yyq2 { @@ -15476,236 +13845,87 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Privileged)) - } + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) } else { - r.EncodeBool(false) + r.EncodeNil() } } else { if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("privileged")) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Privileged)) - } + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - if x.Capabilities == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.Capabilities), e) - } - } + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) } else { r.EncodeNil() } } else { if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capabilities")) + r.EncodeString(codecSelferC_UTF81234, string("spec")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capabilities == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.Capabilities), e) - } - } + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[2] { - if x.Volumes == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceFSType(([]FSType)(x.Volumes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Volumes == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - h.encSliceFSType(([]FSType)(x.Volumes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) yym14 := z.EncBinary() _ = yym14 if false { } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.HostPorts == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPorts")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPorts == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } else { - r.EncodeBool(false) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[5] { + if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPID")) + r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 + yym15 := z.EncBinary() + _ = yym15 if false { } else { - r.EncodeBool(bool(x.HostPID)) + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 if false { } else { - r.EncodeBool(bool(x.HostIPC)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } else { - r.EncodeBool(false) + r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[6] { + if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 + yym18 := z.EncBinary() + _ = yym18 if false { } else { - r.EncodeBool(bool(x.HostIPC)) + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yy25 := &x.SELinux - yy25.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinux")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy27 := &x.SELinux - yy27.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yy30 := &x.RunAsUser - yy30.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy32 := &x.RunAsUser - yy32.CodecEncodeSelf(e) - } - } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -15715,7 +13935,7 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *PodSecurityPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *NetworkPolicy) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -15745,101 +13965,53 @@ func (x *PodSecurityPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *NetworkPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yys3Slc = z.DecScratchBuffer() // default slice to decode into _ = yys3Slc var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "privileged": - if r.TryDecodeAsNil() { - x.Privileged = false - } else { - x.Privileged = bool(r.DecodeBool()) - } - case "capabilities": - if r.TryDecodeAsNil() { - x.Capabilities = nil - } else { - yyv5 := &x.Capabilities - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv5), d) - } - } - case "volumes": - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv7 := &x.Volumes - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceFSType((*[]FSType)(yyv7), d) - } - } - case "hostNetwork": - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - case "hostPorts": - if r.TryDecodeAsNil() { - x.HostPorts = nil - } else { - yyv10 := &x.HostPorts - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv10), d) - } + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break } - case "hostPID": + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": if r.TryDecodeAsNil() { - x.HostPID = false + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - x.HostPID = bool(r.DecodeBool()) + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) } - case "hostIPC": + case "spec": if r.TryDecodeAsNil() { - x.HostIPC = false + x.Spec = NetworkPolicySpec{} } else { - x.HostIPC = bool(r.DecodeBool()) + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) } - case "seLinux": + case "kind": if r.TryDecodeAsNil() { - x.SELinux = SELinuxStrategyOptions{} + x.Kind = "" } else { - yyv14 := &x.SELinux - yyv14.CodecDecodeSelf(d) + x.Kind = string(r.DecodeString()) } - case "runAsUser": + case "apiVersion": if r.TryDecodeAsNil() { - x.RunAsUser = RunAsUserStrategyOptions{} + x.APIVersion = "" } else { - yyv15 := &x.RunAsUser - yyv15.CodecDecodeSelf(d) + x.APIVersion = string(r.DecodeString()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -15848,194 +14020,96 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decod z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Privileged = false - } else { - x.Privileged = bool(r.DecodeBool()) - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capabilities = nil - } else { - yyv18 := &x.Capabilities - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv18), d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv20 := &x.Volumes - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - h.decSliceFSType((*[]FSType)(yyv20), d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPorts = nil - } else { - yyv23 := &x.HostPorts - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv23), d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb16 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb16 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.HostPID = false + x.ObjectMeta = pkg2_api.ObjectMeta{} } else { - x.HostPID = bool(r.DecodeBool()) + yyv9 := &x.ObjectMeta + yyv9.CodecDecodeSelf(d) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb16 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb16 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.HostIPC = false + x.Spec = NetworkPolicySpec{} } else { - x.HostIPC = bool(r.DecodeBool()) + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb16 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb16 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.SELinux = SELinuxStrategyOptions{} + x.Kind = "" } else { - yyv27 := &x.SELinux - yyv27.CodecDecodeSelf(d) + x.Kind = string(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb16 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb16 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.RunAsUser = RunAsUserStrategyOptions{} + x.APIVersion = "" } else { - yyv28 := &x.RunAsUser - yyv28.CodecDecodeSelf(d) + x.APIVersion = string(r.DecodeString()) } for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb16 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb16 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *NetworkPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -16052,11 +14126,12 @@ func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[1] = len(x.Ingress) != 0 var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn2 = 2 + yynn2 = 1 for _, b := range yyq2 { if b { yynn2++ @@ -16067,40 +14142,58 @@ func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 + yy4 := &x.PodSelector + yym5 := z.EncBinary() + _ = yym5 if false { + } else if z.HasExtensions() && z.EncExt(yy4) { } else { - r.EncodeInt(int64(x.Min)) + z.EncFallback(yy4) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) + r.EncodeString(codecSelferC_UTF81234, string("podSelector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 + yy6 := &x.PodSelector + yym7 := z.EncBinary() + _ = yym7 if false { + } else if z.HasExtensions() && z.EncExt(yy6) { } else { - r.EncodeInt(int64(x.Min)) + z.EncFallback(yy6) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { + if yyq2[1] { + if x.Ingress == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) + } + } } else { - r.EncodeInt(int64(x.Max)) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Max)) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ingress")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ingress == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) + } + } } } if yyr2 || yy2arr2 { @@ -16112,7 +14205,7 @@ func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *HostPortRange) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *NetworkPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -16142,7 +14235,7 @@ func (x *HostPortRange) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *NetworkPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -16164,17 +14257,30 @@ func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "min": + case "podSelector": if r.TryDecodeAsNil() { - x.Min = 0 + x.PodSelector = pkg1_unversioned.LabelSelector{} } else { - x.Min = int(r.DecodeInt(codecSelferBitsize1234)) + yyv4 := &x.PodSelector + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } } - case "max": + case "ingress": if r.TryDecodeAsNil() { - x.Max = 0 + x.Ingress = nil } else { - x.Max = int(r.DecodeInt(codecSelferBitsize1234)) + yyv6 := &x.Ingress + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv6), d) + } } default: z.DecStructFieldNotFound(-1, yys3) @@ -16183,88 +14289,75 @@ func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *HostPortRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *NetworkPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Min = 0 + x.PodSelector = pkg1_unversioned.LabelSelector{} } else { - x.Min = int(r.DecodeInt(codecSelferBitsize1234)) + yyv9 := &x.PodSelector + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else { + z.DecFallback(yyv9, false) + } } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Max = 0 + x.Ingress = nil } else { - x.Max = int(r.DecodeInt(codecSelferBitsize1234)) + yyv11 := &x.Ingress + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv11), d) + } } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x FSType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *FSType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -16281,12 +14374,13 @@ func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[1] = x.SELinuxOptions != nil + yyq2[0] = len(x.Ports) != 0 + yyq2[1] = len(x.From) != 0 var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn2 = 1 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -16297,20 +14391,49 @@ func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Rule.CodecEncodeSelf(e) + if yyq2[0] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) + } + } + } else { + r.EncodeNil() + } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) + } + } + } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - if x.SELinuxOptions == nil { + if x.From == nil { r.EncodeNil() } else { - x.SELinuxOptions.CodecEncodeSelf(e) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) + } } } else { r.EncodeNil() @@ -16318,12 +14441,17 @@ func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) + r.EncodeString(codecSelferC_UTF81234, string("from")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SELinuxOptions == nil { + if x.From == nil { r.EncodeNil() } else { - x.SELinuxOptions.CodecEncodeSelf(e) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) + } } } } @@ -16336,7 +14464,7 @@ func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *SELinuxStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *NetworkPolicyIngressRule) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -16366,7 +14494,7 @@ func (x *SELinuxStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *NetworkPolicyIngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -16388,22 +14516,29 @@ func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Deco yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "rule": + case "ports": if r.TryDecodeAsNil() { - x.Rule = "" + x.Ports = nil } else { - x.Rule = SELinuxStrategy(r.DecodeString()) + yyv4 := &x.Ports + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv4), d) + } } - case "seLinuxOptions": + case "from": if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } + x.From = nil } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(pkg2_api.SELinuxOptions) + yyv6 := &x.From + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv6), d) } - x.SELinuxOptions.CodecDecodeSelf(d) } default: z.DecStructFieldNotFound(-1, yys3) @@ -16412,93 +14547,74 @@ func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Deco z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *SELinuxStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *NetworkPolicyIngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Rule = "" + x.Ports = nil } else { - x.Rule = SELinuxStrategy(r.DecodeString()) + yyv9 := &x.Ports + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv9), d) + } } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } + x.From = nil } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(pkg2_api.SELinuxOptions) + yyv11 := &x.From + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv11), d) } - x.SELinuxOptions.CodecDecodeSelf(d) } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x SELinuxStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *SELinuxStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *NetworkPolicyPort) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -16515,12 +14631,13 @@ func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[1] = len(x.Ranges) != 0 + yyq2[0] = x.Protocol != nil + yyq2[1] = x.Port != nil var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn2 = 1 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -16531,24 +14648,45 @@ func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Rule.CodecEncodeSelf(e) + if yyq2[0] { + if x.Protocol == nil { + r.EncodeNil() + } else { + yy4 := *x.Protocol + yysf5 := &yy4 + yysf5.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Protocol == nil { + r.EncodeNil() + } else { + yy6 := *x.Protocol + yysf7 := &yy6 + yysf7.CodecEncodeSelf(e) + } + } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - if x.Ranges == nil { + if x.Port == nil { r.EncodeNil() } else { - yym7 := z.EncBinary() - _ = yym7 + yym9 := z.EncBinary() + _ = yym9 if false { + } else if z.HasExtensions() && z.EncExt(x.Port) { + } else if !yym9 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Port) } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) + z.EncFallback(x.Port) } } } else { @@ -16557,16 +14695,19 @@ func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ranges")) + r.EncodeString(codecSelferC_UTF81234, string("port")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ranges == nil { + if x.Port == nil { r.EncodeNil() } else { - yym8 := z.EncBinary() - _ = yym8 + yym10 := z.EncBinary() + _ = yym10 if false { + } else if z.HasExtensions() && z.EncExt(x.Port) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Port) } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) + z.EncFallback(x.Port) } } } @@ -16580,7 +14721,7 @@ func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *RunAsUserStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *NetworkPolicyPort) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -16610,7 +14751,7 @@ func (x *RunAsUserStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -16632,22 +14773,34 @@ func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.De yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "rule": + case "protocol": if r.TryDecodeAsNil() { - x.Rule = "" + if x.Protocol != nil { + x.Protocol = nil + } } else { - x.Rule = RunAsUserStrategy(r.DecodeString()) + if x.Protocol == nil { + x.Protocol = new(pkg2_api.Protocol) + } + x.Protocol.CodecDecodeSelf(d) } - case "ranges": + case "port": if r.TryDecodeAsNil() { - x.Ranges = nil + if x.Port != nil { + x.Port = nil + } } else { - yyv5 := &x.Ranges + if x.Port == nil { + x.Port = new(pkg5_intstr.IntOrString) + } yym6 := z.DecBinary() _ = yym6 if false { + } else if z.HasExtensions() && z.DecExt(x.Port) { + } else if !yym6 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Port) } else { - h.decSliceIDRange((*[]IDRange)(yyv5), d) + z.DecFallback(x.Port, false) } } default: @@ -16657,7 +14810,7 @@ func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.De z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *NetworkPolicyPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -16676,9 +14829,14 @@ func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978. } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Rule = "" + if x.Protocol != nil { + x.Protocol = nil + } } else { - x.Rule = RunAsUserStrategy(r.DecodeString()) + if x.Protocol == nil { + x.Protocol = new(pkg2_api.Protocol) + } + x.Protocol.CodecDecodeSelf(d) } yyj7++ if yyhl7 { @@ -16692,14 +14850,21 @@ func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978. } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Ranges = nil + if x.Port != nil { + x.Port = nil + } } else { - yyv9 := &x.Ranges + if x.Port == nil { + x.Port = new(pkg5_intstr.IntOrString) + } yym10 := z.DecBinary() _ = yym10 if false { + } else if z.HasExtensions() && z.DecExt(x.Port) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Port) } else { - h.decSliceIDRange((*[]IDRange)(yyv9), d) + z.DecFallback(x.Port, false) } } for { @@ -16718,7 +14883,7 @@ func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978. z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -16735,11 +14900,13 @@ func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false + yyq2[0] = x.PodSelector != nil + yyq2[1] = x.NamespaceSelector != nil var yynn2 int if yyr2 || yy2arr2 { r.EncodeArrayStart(2) } else { - yynn2 = 2 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -16750,40 +14917,72 @@ func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { + if yyq2[0] { + if x.PodSelector == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodSelector) { + } else { + z.EncFallback(x.PodSelector) + } + } } else { - r.EncodeInt(int64(x.Min)) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Min)) + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodSelector == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodSelector) { + } else { + z.EncFallback(x.PodSelector) + } + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { + if yyq2[1] { + if x.NamespaceSelector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { + } else { + z.EncFallback(x.NamespaceSelector) + } + } } else { - r.EncodeInt(int64(x.Max)) + r.EncodeNil() } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Max)) + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespaceSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NamespaceSelector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { + } else { + z.EncFallback(x.NamespaceSelector) + } + } } } if yyr2 || yy2arr2 { @@ -16795,7 +14994,7 @@ func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *IDRange) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *NetworkPolicyPeer) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -16825,7 +15024,7 @@ func (x *IDRange) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -16847,17 +15046,39 @@ func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "min": + case "podSelector": if r.TryDecodeAsNil() { - x.Min = 0 + if x.PodSelector != nil { + x.PodSelector = nil + } } else { - x.Min = int64(r.DecodeInt(64)) + if x.PodSelector == nil { + x.PodSelector = new(pkg1_unversioned.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodSelector) { + } else { + z.DecFallback(x.PodSelector, false) + } } - case "max": + case "namespaceSelector": if r.TryDecodeAsNil() { - x.Max = 0 + if x.NamespaceSelector != nil { + x.NamespaceSelector = nil + } } else { - x.Max = int64(r.DecodeInt(64)) + if x.NamespaceSelector == nil { + x.NamespaceSelector = new(pkg1_unversioned.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { + } else { + z.DecFallback(x.NamespaceSelector, false) + } } default: z.DecStructFieldNotFound(-1, yys3) @@ -16866,88 +15087,84 @@ func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *IDRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Min = 0 + if x.PodSelector != nil { + x.PodSelector = nil + } } else { - x.Min = int64(r.DecodeInt(64)) + if x.PodSelector == nil { + x.PodSelector = new(pkg1_unversioned.LabelSelector) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodSelector) { + } else { + z.DecFallback(x.PodSelector, false) + } } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Max = 0 + if x.NamespaceSelector != nil { + x.NamespaceSelector = nil + } } else { - x.Max = int64(r.DecodeInt(64)) + if x.NamespaceSelector == nil { + x.NamespaceSelector = new(pkg1_unversioned.LabelSelector) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { + } else { + z.DecFallback(x.NamespaceSelector, false) + } } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb6 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb6 { + if yyb8 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x RunAsUserStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *RunAsUserStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -17018,7 +15235,7 @@ func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym9 if false { } else { - h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) + h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) } } } else { @@ -17032,7 +15249,7 @@ func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym10 if false { } else { - h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) + h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) } } } @@ -17095,7 +15312,7 @@ func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *NetworkPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -17125,7 +15342,7 @@ func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *NetworkPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -17169,7 +15386,7 @@ func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decod _ = yym7 if false { } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv6), d) + h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv6), d) } } case "kind": @@ -17191,7 +15408,7 @@ func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decod z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -17240,7 +15457,7 @@ func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Dec _ = yym14 if false { } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv13), d) + h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv13), d) } } yyj10++ @@ -17273,25 +15490,144 @@ func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.APIVersion = "" } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CustomMetricTarget{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CustomMetricTarget, yyrl1) + } + } else { + yyv1 = make([]CustomMetricTarget, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricTarget{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CustomMetricTarget{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricTarget{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CustomMetricTarget{}) // var yyz1 CustomMetricTarget + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricTarget{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + } - if yyb10 { - break + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CustomMetricTarget{} + yyc1 = true } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + yyh1.End() + if yyc1 { + *v = yyv1 + } } -func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurrentStatus, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -17304,7 +15640,7 @@ func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *c z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurrentStatus, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -17315,7 +15651,7 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []CustomMetricTarget{} + yyv1 = []CustomMetricCurrentStatus{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -17330,15 +15666,15 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]CustomMetricTarget, yyrl1) + yyv1 = make([]CustomMetricCurrentStatus, yyrl1) } } else { - yyv1 = make([]CustomMetricTarget, yyrl1) + yyv1 = make([]CustomMetricCurrentStatus, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -17353,7 +15689,7 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} + yyv1[yyj1] = CustomMetricCurrentStatus{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -17362,10 +15698,10 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, CustomMetricTarget{}) + yyv1 = append(yyv1, CustomMetricCurrentStatus{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} + yyv1[yyj1] = CustomMetricCurrentStatus{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -17379,13 +15715,13 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, CustomMetricTarget{}) // var yyz1 CustomMetricTarget + yyv1 = append(yyv1, CustomMetricCurrentStatus{}) // var yyz1 CustomMetricCurrentStatus yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} + yyv1[yyj1] = CustomMetricCurrentStatus{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -17400,7 +15736,7 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []CustomMetricTarget{} + yyv1 = []CustomMetricCurrentStatus{} yyc1 = true } } @@ -17410,7 +15746,7 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * } } -func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurrentStatus, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -17423,7 +15759,7 @@ func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurre z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurrentStatus, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -17434,7 +15770,7 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []CustomMetricCurrentStatus{} + yyv1 = []APIVersion{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -17449,15 +15785,15 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]CustomMetricCurrentStatus, yyrl1) + yyv1 = make([]APIVersion, yyrl1) } } else { - yyv1 = make([]CustomMetricCurrentStatus, yyrl1) + yyv1 = make([]APIVersion, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -17472,7 +15808,7 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} + yyv1[yyj1] = APIVersion{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -17481,10 +15817,10 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, CustomMetricCurrentStatus{}) + yyv1 = append(yyv1, APIVersion{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} + yyv1[yyj1] = APIVersion{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -17498,13 +15834,13 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, CustomMetricCurrentStatus{}) // var yyz1 CustomMetricCurrentStatus + yyv1 = append(yyv1, APIVersion{}) // var yyz1 APIVersion yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} + yyv1[yyj1] = APIVersion{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -17519,7 +15855,7 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []CustomMetricCurrentStatus{} + yyv1 = []APIVersion{} yyc1 = true } } @@ -17529,7 +15865,7 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr } } -func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -17542,7 +15878,7 @@ func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutosc z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -17553,7 +15889,7 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} + yyv1 = []ThirdPartyResource{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -17568,15 +15904,15 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 320) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + yyv1 = make([]ThirdPartyResource, yyrl1) } } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + yyv1 = make([]ThirdPartyResource, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -17591,7 +15927,7 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} + yyv1[yyj1] = ThirdPartyResource{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -17600,10 +15936,10 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) + yyv1 = append(yyv1, ThirdPartyResource{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} + yyv1[yyj1] = ThirdPartyResource{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -17617,13 +15953,13 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler + yyv1 = append(yyv1, ThirdPartyResource{}) // var yyz1 ThirdPartyResource yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} + yyv1[yyj1] = ThirdPartyResource{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -17638,7 +15974,7 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} + yyv1 = []ThirdPartyResource{} yyc1 = true } } @@ -17648,7 +15984,7 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos } } -func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -17661,7 +15997,7 @@ func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -17672,7 +16008,7 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []APIVersion{} + yyv1 = []Deployment{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -17687,15 +16023,15 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]APIVersion, yyrl1) + yyv1 = make([]Deployment, yyrl1) } } else { - yyv1 = make([]APIVersion, yyrl1) + yyv1 = make([]Deployment, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -17710,7 +16046,7 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} + yyv1[yyj1] = Deployment{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -17719,10 +16055,10 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, APIVersion{}) + yyv1 = append(yyv1, Deployment{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} + yyv1[yyj1] = Deployment{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -17736,13 +16072,13 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, APIVersion{}) // var yyz1 APIVersion + yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} + yyv1[yyj1] = Deployment{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -17757,7 +16093,7 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []APIVersion{} + yyv1 = []Deployment{} yyc1 = true } } @@ -17767,7 +16103,7 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode } } -func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -17780,7 +16116,7 @@ func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *c z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -17791,7 +16127,7 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []ThirdPartyResource{} + yyv1 = []DaemonSet{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -17806,15 +16142,15 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 232) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 696) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]ThirdPartyResource, yyrl1) + yyv1 = make([]DaemonSet, yyrl1) } } else { - yyv1 = make([]ThirdPartyResource, yyrl1) + yyv1 = make([]DaemonSet, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -17829,7 +16165,7 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} + yyv1[yyj1] = DaemonSet{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -17838,10 +16174,10 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ThirdPartyResource{}) + yyv1 = append(yyv1, DaemonSet{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} + yyv1[yyj1] = DaemonSet{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -17855,13 +16191,13 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ThirdPartyResource{}) // var yyz1 ThirdPartyResource + yyv1 = append(yyv1, DaemonSet{}) // var yyz1 DaemonSet yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} + yyv1[yyj1] = DaemonSet{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -17876,7 +16212,7 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ThirdPartyResource{} + yyv1 = []DaemonSet{} yyc1 = true } } @@ -17886,7 +16222,7 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * } } -func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -17899,7 +16235,7 @@ func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -17910,7 +16246,7 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []Deployment{} + yyv1 = []ThirdPartyResourceData{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -17925,15 +16261,15 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 632) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]Deployment, yyrl1) + yyv1 = make([]ThirdPartyResourceData, yyrl1) } } else { - yyv1 = make([]Deployment, yyrl1) + yyv1 = make([]ThirdPartyResourceData, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -17948,7 +16284,7 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} + yyv1[yyj1] = ThirdPartyResourceData{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -17957,10 +16293,10 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Deployment{}) + yyv1 = append(yyv1, ThirdPartyResourceData{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} + yyv1[yyj1] = ThirdPartyResourceData{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -17974,13 +16310,13 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment + yyv1 = append(yyv1, ThirdPartyResourceData{}) // var yyz1 ThirdPartyResourceData yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} + yyv1[yyj1] = ThirdPartyResourceData{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -17995,7 +16331,7 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Deployment{} + yyv1 = []ThirdPartyResourceData{} yyc1 = true } } @@ -18005,7 +16341,7 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode } } -func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -18018,7 +16354,7 @@ func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18029,7 +16365,7 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []DaemonSet{} + yyv1 = []Ingress{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -18044,15 +16380,15 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 552) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 320) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]DaemonSet, yyrl1) + yyv1 = make([]Ingress, yyrl1) } } else { - yyv1 = make([]DaemonSet, yyrl1) + yyv1 = make([]Ingress, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -18067,7 +16403,7 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} + yyv1[yyj1] = Ingress{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -18076,10 +16412,10 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, DaemonSet{}) + yyv1 = append(yyv1, Ingress{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} + yyv1[yyj1] = Ingress{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -18093,13 +16429,13 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, DaemonSet{}) // var yyz1 DaemonSet + yyv1 = append(yyv1, Ingress{}) // var yyz1 Ingress yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} + yyv1[yyj1] = Ingress{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -18114,7 +16450,7 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []DaemonSet{} + yyv1 = []Ingress{} yyc1 = true } } @@ -18124,7 +16460,7 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) } } -func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -18137,7 +16473,7 @@ func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceDa z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18148,7 +16484,7 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []ThirdPartyResourceData{} + yyv1 = []IngressTLS{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -18163,15 +16499,15 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 216) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]ThirdPartyResourceData, yyrl1) + yyv1 = make([]IngressTLS, yyrl1) } } else { - yyv1 = make([]ThirdPartyResourceData, yyrl1) + yyv1 = make([]IngressTLS, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -18186,7 +16522,7 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} + yyv1[yyj1] = IngressTLS{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -18195,10 +16531,10 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ThirdPartyResourceData{}) + yyv1 = append(yyv1, IngressTLS{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} + yyv1[yyj1] = IngressTLS{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -18212,13 +16548,13 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ThirdPartyResourceData{}) // var yyz1 ThirdPartyResourceData + yyv1 = append(yyv1, IngressTLS{}) // var yyz1 IngressTLS yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} + yyv1[yyj1] = IngressTLS{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -18233,7 +16569,7 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ThirdPartyResourceData{} + yyv1 = []IngressTLS{} yyc1 = true } } @@ -18243,7 +16579,7 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD } } -func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -18256,7 +16592,7 @@ func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18267,7 +16603,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []Job{} + yyv1 = []IngressRule{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -18282,15 +16618,15 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 624) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]Job, yyrl1) + yyv1 = make([]IngressRule, yyrl1) } } else { - yyv1 = make([]Job, yyrl1) + yyv1 = make([]IngressRule, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -18305,7 +16641,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} + yyv1[yyj1] = IngressRule{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -18314,10 +16650,10 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Job{}) + yyv1 = append(yyv1, IngressRule{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} + yyv1[yyj1] = IngressRule{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -18331,13 +16667,13 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Job{}) // var yyz1 Job + yyv1 = append(yyv1, IngressRule{}) // var yyz1 IngressRule yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} + yyv1[yyj1] = IngressRule{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -18352,7 +16688,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Job{} + yyv1 = []IngressRule{} yyc1 = true } } @@ -18362,7 +16698,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { } } -func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -18375,7 +16711,7 @@ func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Enc z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18386,7 +16722,7 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []JobCondition{} + yyv1 = []HTTPIngressPath{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -18401,15 +16737,15 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]JobCondition, yyrl1) + yyv1 = make([]HTTPIngressPath, yyrl1) } } else { - yyv1 = make([]JobCondition, yyrl1) + yyv1 = make([]HTTPIngressPath, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -18424,7 +16760,7 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} + yyv1[yyj1] = HTTPIngressPath{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -18433,10 +16769,10 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, JobCondition{}) + yyv1 = append(yyv1, HTTPIngressPath{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} + yyv1[yyj1] = HTTPIngressPath{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -18450,13 +16786,13 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition + yyv1 = append(yyv1, HTTPIngressPath{}) // var yyz1 HTTPIngressPath yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} + yyv1[yyj1] = HTTPIngressPath{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -18471,7 +16807,7 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []JobCondition{} + yyv1 = []HTTPIngressPath{} yyc1 = true } } @@ -18481,7 +16817,7 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De } } -func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -18494,7 +16830,7 @@ func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18505,7 +16841,7 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []Ingress{} + yyv1 = []ReplicaSet{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -18520,15 +16856,15 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 272) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 704) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]Ingress, yyrl1) + yyv1 = make([]ReplicaSet, yyrl1) } } else { - yyv1 = make([]Ingress, yyrl1) + yyv1 = make([]ReplicaSet, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -18543,7 +16879,7 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} + yyv1[yyj1] = ReplicaSet{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -18552,10 +16888,10 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Ingress{}) + yyv1 = append(yyv1, ReplicaSet{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} + yyv1[yyj1] = ReplicaSet{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -18569,13 +16905,13 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Ingress{}) // var yyz1 Ingress + yyv1 = append(yyv1, ReplicaSet{}) // var yyz1 ReplicaSet yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} + yyv1[yyj1] = ReplicaSet{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -18590,7 +16926,7 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Ingress{} + yyv1 = []ReplicaSet{} yyc1 = true } } @@ -18600,20 +16936,20 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { } } -func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceapi_Capability(v []pkg2_api.Capability, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) + yysf2 := &yyv1 + yysf2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18624,7 +16960,7 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []IngressTLS{} + yyv1 = []pkg2_api.Capability{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -18637,23 +16973,18 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode yyrr1 = yyl1 // len(yyv1) if yyl1 > cap(yyv1) { - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]IngressTLS, yyrl1) + yyv1 = make([]pkg2_api.Capability, yyrl1) } } else { - yyv1 = make([]IngressTLS, yyrl1) + yyv1 = make([]pkg2_api.Capability, yyrl1) } yyc1 = true yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } } else if yyl1 != len(yyv1) { yyv1 = yyv1[:yyl1] yyc1 = true @@ -18662,22 +16993,20 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} + yyv1[yyj1] = "" } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) + yyv1[yyj1] = pkg2_api.Capability(r.DecodeString()) } } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IngressTLS{}) + yyv1 = append(yyv1, "") yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} + yyv1[yyj1] = "" } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) + yyv1[yyj1] = pkg2_api.Capability(r.DecodeString()) } } @@ -18688,16 +17017,15 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IngressTLS{}) // var yyz1 IngressTLS + yyv1 = append(yyv1, "") // var yyz1 pkg2_api.Capability yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} + yyv1[yyj1] = "" } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) + yyv1[yyj1] = pkg2_api.Capability(r.DecodeString()) } } else { @@ -18709,7 +17037,7 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IngressTLS{} + yyv1 = []pkg2_api.Capability{} yyc1 = true } } @@ -18719,20 +17047,19 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode } } -func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) + yyv1.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18743,7 +17070,7 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []IngressRule{} + yyv1 = []FSType{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -18756,23 +17083,18 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco yyrr1 = yyl1 // len(yyv1) if yyl1 > cap(yyv1) { - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]IngressRule, yyrl1) + yyv1 = make([]FSType, yyrl1) } } else { - yyv1 = make([]IngressRule, yyrl1) + yyv1 = make([]FSType, yyrl1) } yyc1 = true yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } } else if yyl1 != len(yyv1) { yyv1 = yyv1[:yyl1] yyc1 = true @@ -18781,22 +17103,20 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} + yyv1[yyj1] = "" } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) + yyv1[yyj1] = FSType(r.DecodeString()) } } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IngressRule{}) + yyv1 = append(yyv1, "") yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} + yyv1[yyj1] = "" } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) + yyv1[yyj1] = FSType(r.DecodeString()) } } @@ -18807,16 +17127,15 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IngressRule{}) // var yyz1 IngressRule + yyv1 = append(yyv1, "") // var yyz1 FSType yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} + yyv1[yyj1] = "" } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) + yyv1[yyj1] = FSType(r.DecodeString()) } } else { @@ -18828,7 +17147,7 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IngressRule{} + yyv1 = []FSType{} yyc1 = true } } @@ -18838,7 +17157,7 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco } } -func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -18851,7 +17170,7 @@ func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec19 z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18862,7 +17181,7 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []HTTPIngressPath{} + yyv1 = []HostPortRange{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -18877,15 +17196,15 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]HTTPIngressPath, yyrl1) + yyv1 = make([]HostPortRange, yyrl1) } } else { - yyv1 = make([]HTTPIngressPath, yyrl1) + yyv1 = make([]HostPortRange, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -18900,7 +17219,7 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} + yyv1[yyj1] = HostPortRange{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -18909,10 +17228,10 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HTTPIngressPath{}) + yyv1 = append(yyv1, HostPortRange{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} + yyv1[yyj1] = HostPortRange{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -18926,13 +17245,13 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HTTPIngressPath{}) // var yyz1 HTTPIngressPath + yyv1 = append(yyv1, HostPortRange{}) // var yyz1 HostPortRange yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} + yyv1[yyj1] = HostPortRange{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -18947,7 +17266,7 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HTTPIngressPath{} + yyv1 = []HostPortRange{} yyc1 = true } } @@ -18957,7 +17276,7 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 } } -func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -18970,7 +17289,7 @@ func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18981,7 +17300,7 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []ReplicaSet{} + yyv1 = []IDRange{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -18996,15 +17315,15 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 560) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]ReplicaSet, yyrl1) + yyv1 = make([]IDRange, yyrl1) } } else { - yyv1 = make([]ReplicaSet, yyrl1) + yyv1 = make([]IDRange, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -19019,7 +17338,7 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} + yyv1[yyj1] = IDRange{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -19028,10 +17347,10 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ReplicaSet{}) + yyv1 = append(yyv1, IDRange{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} + yyv1[yyj1] = IDRange{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -19045,13 +17364,13 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ReplicaSet{}) // var yyz1 ReplicaSet + yyv1 = append(yyv1, IDRange{}) // var yyz1 IDRange yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} + yyv1[yyj1] = IDRange{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -19066,7 +17385,7 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ReplicaSet{} + yyv1 = []IDRange{} yyc1 = true } } @@ -19076,20 +17395,20 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode } } -func (x codecSelfer1234) encSliceapi_Capability(v []pkg2_api.Capability, e *codec1978.Encoder) { +func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf2 := &yyv1 - yysf2.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *codec1978.Decoder) { +func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19100,7 +17419,7 @@ func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *cod _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []pkg2_api.Capability{} + yyv1 = []PodSecurityPolicy{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -19113,18 +17432,23 @@ func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *cod yyrr1 = yyl1 // len(yyv1) if yyl1 > cap(yyv1) { - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 536) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]pkg2_api.Capability, yyrl1) + yyv1 = make([]PodSecurityPolicy, yyrl1) } } else { - yyv1 = make([]pkg2_api.Capability, yyrl1) + yyv1 = make([]PodSecurityPolicy, yyrl1) } yyc1 = true yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } } else if yyl1 != len(yyv1) { yyv1 = yyv1[:yyl1] yyc1 = true @@ -19133,20 +17457,22 @@ func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *cod for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = "" + yyv1[yyj1] = PodSecurityPolicy{} } else { - yyv1[yyj1] = pkg2_api.Capability(r.DecodeString()) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") + yyv1 = append(yyv1, PodSecurityPolicy{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = "" + yyv1[yyj1] = PodSecurityPolicy{} } else { - yyv1[yyj1] = pkg2_api.Capability(r.DecodeString()) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } @@ -19157,15 +17483,16 @@ func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *cod for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 pkg2_api.Capability + yyv1 = append(yyv1, PodSecurityPolicy{}) // var yyz1 PodSecurityPolicy yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = "" + yyv1[yyj1] = PodSecurityPolicy{} } else { - yyv1[yyj1] = pkg2_api.Capability(r.DecodeString()) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -19177,7 +17504,7 @@ func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *cod yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg2_api.Capability{} + yyv1 = []PodSecurityPolicy{} yyc1 = true } } @@ -19187,19 +17514,20 @@ func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *cod } } -func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19210,7 +17538,7 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []FSType{} + yyv1 = []NetworkPolicyIngressRule{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -19223,18 +17551,23 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { yyrr1 = yyl1 // len(yyv1) if yyl1 > cap(yyv1) { - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]FSType, yyrl1) + yyv1 = make([]NetworkPolicyIngressRule, yyrl1) } } else { - yyv1 = make([]FSType, yyrl1) + yyv1 = make([]NetworkPolicyIngressRule, yyrl1) } yyc1 = true yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } } else if yyl1 != len(yyv1) { yyv1 = yyv1[:yyl1] yyc1 = true @@ -19243,20 +17576,22 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = "" + yyv1[yyj1] = NetworkPolicyIngressRule{} } else { - yyv1[yyj1] = FSType(r.DecodeString()) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") + yyv1 = append(yyv1, NetworkPolicyIngressRule{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = "" + yyv1[yyj1] = NetworkPolicyIngressRule{} } else { - yyv1[yyj1] = FSType(r.DecodeString()) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } @@ -19267,15 +17602,16 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 FSType + yyv1 = append(yyv1, NetworkPolicyIngressRule{}) // var yyz1 NetworkPolicyIngressRule yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = "" + yyv1[yyj1] = NetworkPolicyIngressRule{} } else { - yyv1[yyj1] = FSType(r.DecodeString()) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -19287,7 +17623,7 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []FSType{} + yyv1 = []NetworkPolicyIngressRule{} yyc1 = true } } @@ -19297,7 +17633,7 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { } } -func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -19310,7 +17646,7 @@ func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.E z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19321,7 +17657,7 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []HostPortRange{} + yyv1 = []NetworkPolicyPort{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -19341,10 +17677,10 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]HostPortRange, yyrl1) + yyv1 = make([]NetworkPolicyPort, yyrl1) } } else { - yyv1 = make([]HostPortRange, yyrl1) + yyv1 = make([]NetworkPolicyPort, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -19359,7 +17695,7 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} + yyv1[yyj1] = NetworkPolicyPort{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -19368,10 +17704,10 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HostPortRange{}) + yyv1 = append(yyv1, NetworkPolicyPort{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} + yyv1[yyj1] = NetworkPolicyPort{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -19385,13 +17721,13 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HostPortRange{}) // var yyz1 HostPortRange + yyv1 = append(yyv1, NetworkPolicyPort{}) // var yyz1 NetworkPolicyPort yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} + yyv1[yyj1] = NetworkPolicyPort{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -19406,7 +17742,7 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HostPortRange{} + yyv1 = []NetworkPolicyPort{} yyc1 = true } } @@ -19416,7 +17752,7 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. } } -func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -19429,7 +17765,7 @@ func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19440,7 +17776,7 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []IDRange{} + yyv1 = []NetworkPolicyPeer{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -19460,10 +17796,10 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]IDRange, yyrl1) + yyv1 = make([]NetworkPolicyPeer, yyrl1) } } else { - yyv1 = make([]IDRange, yyrl1) + yyv1 = make([]NetworkPolicyPeer, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -19478,7 +17814,7 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} + yyv1[yyj1] = NetworkPolicyPeer{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -19487,10 +17823,10 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IDRange{}) + yyv1 = append(yyv1, NetworkPolicyPeer{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} + yyv1[yyj1] = NetworkPolicyPeer{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -19504,13 +17840,13 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IDRange{}) // var yyz1 IDRange + yyv1 = append(yyv1, NetworkPolicyPeer{}) // var yyz1 NetworkPolicyPeer yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} + yyv1[yyj1] = NetworkPolicyPeer{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -19525,7 +17861,7 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IDRange{} + yyv1 = []NetworkPolicyPeer{} yyc1 = true } } @@ -19535,7 +17871,7 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { } } -func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -19548,7 +17884,7 @@ func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *cod z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19559,7 +17895,7 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []PodSecurityPolicy{} + yyv1 = []NetworkPolicy{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -19574,15 +17910,15 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 352) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]PodSecurityPolicy, yyrl1) + yyv1 = make([]NetworkPolicy, yyrl1) } } else { - yyv1 = make([]PodSecurityPolicy, yyrl1) + yyv1 = make([]NetworkPolicy, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -19597,7 +17933,7 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} + yyv1[yyj1] = NetworkPolicy{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -19606,10 +17942,10 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodSecurityPolicy{}) + yyv1 = append(yyv1, NetworkPolicy{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} + yyv1[yyj1] = NetworkPolicy{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -19623,13 +17959,13 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodSecurityPolicy{}) // var yyz1 PodSecurityPolicy + yyv1 = append(yyv1, NetworkPolicy{}) // var yyz1 NetworkPolicy yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} + yyv1[yyj1] = NetworkPolicy{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -19644,7 +17980,7 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodSecurityPolicy{} + yyv1 = []NetworkPolicy{} yyc1 = true } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/types.go index f8a736bdf3de..8b1b6ef31727 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/types.go @@ -38,16 +38,16 @@ import ( // describes the attributes of a scale subresource type ScaleSpec struct { // desired number of instances for the scaled object. - Replicas int `json:"replicas,omitempty"` + Replicas int32 `json:"replicas,omitempty"` } // represents the current status of a scale subresource. type ScaleStatus struct { // actual number of observed instances of the scaled object. - Replicas int `json:"replicas"` + Replicas int32 `json:"replicas"` // label query over pods that should match the replicas count. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors Selector *unversioned.LabelSelector `json:"selector,omitempty"` } @@ -56,13 +56,13 @@ type ScaleStatus struct { // represents a scaling request for a resource. type Scale struct { unversioned.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. api.ObjectMeta `json:"metadata,omitempty"` - // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Spec ScaleSpec `json:"spec,omitempty"` - // current status of the scale. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. Status ScaleStatus `json:"status,omitempty"` } @@ -71,24 +71,6 @@ type ReplicationControllerDummy struct { unversioned.TypeMeta `json:",inline"` } -// SubresourceReference contains enough information to let you inspect or modify the referred subresource. -type SubresourceReference struct { - // Kind of the referent; More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds" - Kind string `json:"kind,omitempty"` - // Name of the referent; More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names - Name string `json:"name,omitempty"` - // API version of the referent - APIVersion string `json:"apiVersion,omitempty"` - // Subresource name of the referent - Subresource string `json:"subresource,omitempty"` -} - -type CPUTargetUtilization struct { - // fraction of the requested CPU that should be utilized/used, - // e.g. 70 means that 70% of the requested CPU should be in use. - TargetPercentage int `json:"targetPercentage"` -} - // Alpha-level support for Custom Metrics in HPA (as annotations). type CustomMetricTarget struct { // Custom Metric name. @@ -112,64 +94,7 @@ type CustomMetricCurrentStatusList struct { Items []CustomMetricCurrentStatus `json:"items"` } -// specification of a horizontal pod autoscaler. -type HorizontalPodAutoscalerSpec struct { - // reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, - // and will set the desired number of pods by modifying its spec. - ScaleRef SubresourceReference `json:"scaleRef"` - // lower limit for the number of pods that can be set by the autoscaler, default 1. - MinReplicas *int `json:"minReplicas,omitempty"` - // upper limit for the number of pods that can be set by the autoscaler. It cannot be smaller than MinReplicas. - MaxReplicas int `json:"maxReplicas"` - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; - // if not specified it defaults to the target CPU utilization at 80% of the requested resources. - CPUUtilization *CPUTargetUtilization `json:"cpuUtilization,omitempty"` -} - -// current status of a horizontal pod autoscaler -type HorizontalPodAutoscalerStatus struct { - // most recent generation observed by this autoscaler. - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - - // last time the HorizontalPodAutoscaler scaled the number of pods; - // used by the autoscaler to control how often the number of pods is changed. - LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty"` - - // current number of replicas of pods managed by this autoscaler. - CurrentReplicas int `json:"currentReplicas"` - - // desired number of replicas of pods managed by this autoscaler. - DesiredReplicas int `json:"desiredReplicas"` - - // current average CPU utilization over all pods, represented as a percentage of requested CPU, - // e.g. 70 means that an average pod is using now 70% of its requested CPU. - CurrentCPUUtilizationPercentage *int `json:"currentCPUUtilizationPercentage,omitempty"` -} - -// +genclient=true - -// configuration of a horizontal pod autoscaler. -type HorizontalPodAutoscaler struct { - unversioned.TypeMeta `json:",inline"` - api.ObjectMeta `json:"metadata,omitempty"` - - // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status. - Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty"` - - // current information about the autoscaler. - Status HorizontalPodAutoscalerStatus `json:"status,omitempty"` -} - -// list of horizontal pod autoscaler objects. -type HorizontalPodAutoscalerList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - // list of horizontal pod autoscaler objects. - Items []HorizontalPodAutoscaler `json:"items"` -} - -// +genclient=true +// +genclient=true,nonNamespaced=true // A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource // types to the API. It consists of one or more Versions of the api. @@ -201,9 +126,6 @@ type ThirdPartyResourceList struct { type APIVersion struct { // Name of this version (e.g. 'v1'). Name string `json:"name,omitempty"` - - // The API group to add this object into, default 'experimental'. - APIGroup string `json:"apiGroup,omitempty"` } // An internal object, used for versioned storage in etcd. Not exposed to the end user. @@ -232,7 +154,7 @@ type Deployment struct { type DeploymentSpec struct { // Number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. - Replicas int `json:"replicas,omitempty"` + Replicas int32 `json:"replicas,omitempty"` // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. @@ -247,11 +169,11 @@ type DeploymentSpec struct { // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - MinReadySeconds int `json:"minReadySeconds,omitempty"` + MinReadySeconds int32 `json:"minReadySeconds,omitempty"` // The number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. - RevisionHistoryLimit *int `json:"revisionHistoryLimit,omitempty"` + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` // Indicates that the deployment is paused and will not be processed by the // deployment controller. @@ -337,16 +259,16 @@ type DeploymentStatus struct { ObservedGeneration int64 `json:"observedGeneration,omitempty"` // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - Replicas int `json:"replicas,omitempty"` + Replicas int32 `json:"replicas,omitempty"` // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - UpdatedReplicas int `json:"updatedReplicas,omitempty"` + UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - AvailableReplicas int `json:"availableReplicas,omitempty"` + AvailableReplicas int32 `json:"availableReplicas,omitempty"` // Total number of unavailable pods targeted by this deployment. - UnavailableReplicas int `json:"unavailableReplicas,omitempty"` + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty"` } type DeploymentList struct { @@ -408,14 +330,14 @@ type DaemonSetSpec struct { // Selector is a label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors Selector *unversioned.LabelSelector `json:"selector,omitempty"` // Template is the object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#pod-template + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template Template api.PodTemplateSpec `json:"template"` // TODO(madhusudancs): Uncomment while implementing DaemonSet updates. @@ -445,15 +367,15 @@ const ( type DaemonSetStatus struct { // CurrentNumberScheduled is the number of nodes that are running at least 1 // daemon pod and are supposed to run the daemon pod. - CurrentNumberScheduled int `json:"currentNumberScheduled"` + CurrentNumberScheduled int32 `json:"currentNumberScheduled"` // NumberMisscheduled is the number of nodes that are running the daemon pod, but are // not supposed to run the daemon pod. - NumberMisscheduled int `json:"numberMisscheduled"` + NumberMisscheduled int32 `json:"numberMisscheduled"` // DesiredNumberScheduled is the total number of nodes that should be running the daemon // pod (including nodes correctly running the daemon pod). - DesiredNumberScheduled int `json:"desiredNumberScheduled"` + DesiredNumberScheduled int32 `json:"desiredNumberScheduled"` } // +genclient=true @@ -462,18 +384,18 @@ type DaemonSetStatus struct { type DaemonSet struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata api.ObjectMeta `json:"metadata,omitempty"` // Spec defines the desired behavior of this daemon set. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status Spec DaemonSetSpec `json:"spec,omitempty"` // Status is the current status of this daemon set. This data may be // out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status Status DaemonSetStatus `json:"status,omitempty"` } @@ -481,7 +403,7 @@ type DaemonSet struct { type DaemonSetList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` // Items is a list of daemon sets. @@ -491,7 +413,7 @@ type DaemonSetList struct { type ThirdPartyResourceDataList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` // Items is a list of third party objects Items []ThirdPartyResourceData `json:"items"` @@ -499,127 +421,6 @@ type ThirdPartyResourceDataList struct { // +genclient=true -// Job represents the configuration of a single job. -type Job struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - api.ObjectMeta `json:"metadata,omitempty"` - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec JobSpec `json:"spec,omitempty"` - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status JobStatus `json:"status,omitempty"` -} - -// JobList is a collection of jobs. -type JobList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` - - // Items is the list of Job. - Items []Job `json:"items"` -} - -// JobSpec describes how the job execution will look like. -type JobSpec struct { - - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - Parallelism *int `json:"parallelism,omitempty"` - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - Completions *int `json:"completions,omitempty"` - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - Selector *unversioned.LabelSelector `json:"selector,omitempty"` - - // ManualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - ManualSelector *bool `json:"manualSelector,omitempty"` - - // Template is the object that describes the pod that will be created when - // executing a job. - Template api.PodTemplateSpec `json:"template"` -} - -// JobStatus represents the current state of a Job. -type JobStatus struct { - - // Conditions represent the latest available observations of an object's current state. - Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - StartTime *unversioned.Time `json:"startTime,omitempty"` - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - CompletionTime *unversioned.Time `json:"completionTime,omitempty"` - - // Active is the number of actively running pods. - Active int `json:"active,omitempty"` - - // Succeeded is the number of pods which reached Phase Succeeded. - Succeeded int `json:"succeeded,omitempty"` - - // Failed is the number of pods which reached Phase Failed. - Failed int `json:"failed,omitempty"` -} - -type JobConditionType string - -// These are valid conditions of a job. -const ( - // JobComplete means the job has completed its execution. - JobComplete JobConditionType = "Complete" - // JobFailed means the job has failed its execution. - JobFailed JobConditionType = "Failed" -) - -// JobCondition describes current state of a job. -type JobCondition struct { - // Type of job condition, Complete or Failed. - Type JobConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus `json:"status"` - // Last time the condition was checked. - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` - // Last time the condition transit from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` - // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty"` - // Human readable message indicating details about last transition. - Message string `json:"message,omitempty"` -} - -// +genclient=true - // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name @@ -627,15 +428,15 @@ type JobCondition struct { type Ingress struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata api.ObjectMeta `json:"metadata,omitempty"` // Spec is the desired state of the Ingress. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status Spec IngressSpec `json:"spec,omitempty"` // Status is the current state of the Ingress. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status Status IngressStatus `json:"status,omitempty"` } @@ -643,7 +444,7 @@ type Ingress struct { type IngressList struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` // Items is the list of Ingress. @@ -658,10 +459,11 @@ type IngressSpec struct { // specify a global default. Backend *IngressBackend `json:"backend,omitempty"` - // TLS is the TLS configuration. Currently the Ingress only supports a single TLS - // port, 443, and assumes TLS termination. If multiple members of this - // list specify different hosts, they will be multiplexed on the same - // port according to the hostname specified through the SNI TLS extension. + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. TLS []IngressTLS `json:"tls,omitempty"` // A list of host rules used to configure the Ingress. If unspecified, or @@ -797,12 +599,12 @@ type ReplicaSetList struct { // a Template set. type ReplicaSetSpec struct { // Replicas is the number of desired replicas. - Replicas int `json:"replicas"` + Replicas int32 `json:"replicas"` // Selector is a label query over pods that should match the replica count. // Must match in order to be controlled. // If empty, defaulted to labels on pod template. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors Selector *unversioned.LabelSelector `json:"selector,omitempty"` // Template is the object that describes the pod that will be created if @@ -813,15 +615,17 @@ type ReplicaSetSpec struct { // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { // Replicas is the number of actual replicas. - Replicas int `json:"replicas"` + Replicas int32 `json:"replicas"` // The number of pods that have labels matching the labels of the pod template of the replicaset. - FullyLabeledReplicas int `json:"fullyLabeledReplicas,omitempty"` + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` // ObservedGeneration is the most recent generation observed by the controller. ObservedGeneration int64 `json:"observedGeneration,omitempty"` } +// +genclient=true,nonNamespaced=true + // PodSecurityPolicy governs the ability to make requests that affect the SecurityContext // that will be applied to a pod and container. type PodSecurityPolicy struct { @@ -836,8 +640,17 @@ type PodSecurityPolicy struct { type PodSecurityPolicySpec struct { // Privileged determines if a pod can request to be run as privileged. Privileged bool `json:"privileged,omitempty"` - // Capabilities is a list of capabilities that can be added. - Capabilities []api.Capability `json:"capabilities,omitempty"` + // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capabiility in both + // DefaultAddCapabilities and RequiredDropCapabilities. + DefaultAddCapabilities []api.Capability `json:"defaultAddCapabilities,omitempty"` + // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + RequiredDropCapabilities []api.Capability `json:"requiredDropCapabilities,omitempty"` + // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + AllowedCapabilities []api.Capability `json:"allowedCapabilities,omitempty"` // Volumes is a white list of allowed volume plugins. Empty indicates that all plugins // may be used. Volumes []FSType `json:"volumes,omitempty"` @@ -850,9 +663,19 @@ type PodSecurityPolicySpec struct { // HostIPC determines if the policy allows the use of HostIPC in the pod spec. HostIPC bool `json:"hostIPC,omitempty"` // SELinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux,omitempty"` + SELinux SELinuxStrategyOptions `json:"seLinux"` // RunAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser,omitempty"` + RunAsUser RunAsUserStrategyOptions `json:"runAsUser"` + // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups"` + // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + FSGroup FSGroupStrategyOptions `json:"fsGroup"` + // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty"` } // HostPortRange defines a range of host ports that will be enabled by a policy @@ -868,6 +691,9 @@ type HostPortRange struct { type FSType string var ( + AzureFile FSType = "azureFile" + Flocker FSType = "flocker" + FlexVolume FSType = "flexVolume" HostPath FSType = "hostPath" EmptyDir FSType = "emptyDir" GCEPersistentDisk FSType = "gcePersistentDisk" @@ -883,6 +709,9 @@ var ( CephFS FSType = "cephFS" DownwardAPI FSType = "downwardAPI" FC FSType = "fc" + ConfigMap FSType = "configMap" + VsphereVolume FSType = "vsphereVolume" + All FSType = "*" ) // SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. @@ -890,7 +719,7 @@ type SELinuxStrategyOptions struct { // Rule is the strategy that will dictate the allowable labels that may be set. Rule SELinuxStrategy `json:"rule"` // seLinuxOptions required to run as; required for MustRunAs - // More info: http://releases.k8s.io/release-1.2/docs/design/security_context.md#security-context + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context SELinuxOptions *api.SELinuxOptions `json:"seLinuxOptions,omitempty"` } @@ -934,6 +763,46 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +type FSGroupStrategyOptions struct { + // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + Rule FSGroupStrategyType `json:"rule,omitempty"` + // Ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + Ranges []IDRange `json:"ranges,omitempty"` +} + +// FSGroupStrategyType denotes strategy types for generating FSGroup values for a +// SecurityContext +type FSGroupStrategyType string + +const ( + // container must have FSGroup of X applied. + FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" + // container may make requests for any FSGroup labels. + FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" +) + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +type SupplementalGroupsStrategyOptions struct { + // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + Rule SupplementalGroupsStrategyType `json:"rule,omitempty"` + // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + Ranges []IDRange `json:"ranges,omitempty"` +} + +// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental +// groups for a SecurityContext. +type SupplementalGroupsStrategyType string + +const ( + // container must run as a particular gid. + SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" + // container may make requests for any gid. + SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" +) + // PodSecurityPolicyList is a list of PodSecurityPolicy objects. type PodSecurityPolicyList struct { unversioned.TypeMeta `json:",inline"` @@ -941,3 +810,89 @@ type PodSecurityPolicyList struct { Items []PodSecurityPolicy `json:"items"` } + +type NetworkPolicy struct { + unversioned.TypeMeta `json:",inline"` + api.ObjectMeta `json:"metadata,omitempty"` + + // Specification of the desired behavior for this NetworkPolicy. + Spec NetworkPolicySpec `json:"spec,omitempty"` +} + +type NetworkPolicySpec struct { + // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules + // is applied to any pods selected by this field. Multiple network policies can select the + // same set of pods. In this case, the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + PodSelector unversioned.LabelSelector `json:"podSelector"` + + // List of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, + // OR if the traffic source is the pod's local node, + // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy + // objects whose podSelector matches the pod. + // If this field is empty then this NetworkPolicy does not affect ingress isolation. + // If this field is present and contains at least one rule, this policy allows any traffic + // which matches at least one of the ingress rules in this list. + Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty"` +} + +// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. +type NetworkPolicyIngressRule struct { + // List of ports which should be made accessible on the pods selected for this rule. + // Each item in this list is combined using a logical OR. + // If this field is not provided, this rule matches all ports (traffic not restricted by port). + // If this field is empty, this rule matches no ports (no traffic matches). + // If this field is present and contains at least one item, then this rule allows traffic + // only if the traffic matches at least one port in the list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + Ports []NetworkPolicyPort `json:"ports,omitempty"` + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. + // If this field is not provided, this rule matches all sources (traffic not restricted by source). + // If this field is empty, this rule matches no sources (no traffic matches). + // If this field is present and contains at least on item, this rule allows traffic only if the + // traffic matches at least one item in the from list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + From []NetworkPolicyPeer `json:"from,omitempty"` +} + +type NetworkPolicyPort struct { + // Optional. The protocol (TCP or UDP) which traffic must match. + // If not specified, this field defaults to TCP. + Protocol *api.Protocol `json:"protocol,omitempty"` + + // If specified, the port on the given protocol. This can + // either be a numerical or named port on a pod. If this field is not provided, + // this matches all port names and numbers. + // If present, only traffic on the specified protocol AND port + // will be matched. + Port *intstr.IntOrString `json:"port,omitempty"` +} + +type NetworkPolicyPeer struct { + // Exactly one of the following must be specified. + + // This is a label selector which selects Pods in this namespace. + // This field follows standard label selector semantics. + // If not provided, this selector selects no pods. + // If present but empty, this selector selects all pods in this namespace. + PodSelector *unversioned.LabelSelector `json:"podSelector,omitempty"` + + // Selects Namespaces using cluster scoped-labels. This + // matches all pods in all namespaces selected by this label selector. + // This field follows standard label selector semantics. + // If omited, this selector selects no namespaces. + // If present but empty, this selector selects all namespaces. + NamespaceSelector *unversioned.LabelSelector `json:"namespaceSelector,omitempty"` +} + +// NetworkPolicyList is a list of NetworkPolicy objects. +type NetworkPolicyList struct { + unversioned.TypeMeta `json:",inline"` + unversioned.ListMeta `json:"metadata,omitempty"` + + Items []NetworkPolicy `json:"items"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go index 5f4841cb4314..ad5c91c90e50 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go @@ -18,11 +18,12 @@ package v1beta1 import ( "fmt" - "reflect" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" v1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/runtime" @@ -32,8 +33,6 @@ import ( func addConversionFuncs(scheme *runtime.Scheme) { // Add non-generated conversion functions err := scheme.AddConversionFuncs( - Convert_api_PodSpec_To_v1_PodSpec, - Convert_v1_PodSpec_To_api_PodSpec, Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, @@ -44,8 +43,14 @@ func addConversionFuncs(scheme *runtime.Scheme) { Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, - Convert_extensions_JobSpec_To_v1beta1_JobSpec, - Convert_v1beta1_JobSpec_To_extensions_JobSpec, + // autoscaling + Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference, + Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference, + Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec, + Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, + // batch + Convert_batch_JobSpec_To_v1beta1_JobSpec, + Convert_v1beta1_JobSpec_To_batch_JobSpec, ) if err != nil { // If one of the conversion functions is malformed, detect it immediately. @@ -84,21 +89,7 @@ func addConversionFuncs(scheme *runtime.Scheme) { } } -// The following two PodSpec conversions functions where copied from pkg/api/conversion.go -// for the generated functions to work properly. -// This should be fixed: https://github.com/kubernetes/kubernetes/issues/12977 -func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conversion.Scope) error { - return v1.Convert_api_PodSpec_To_v1_PodSpec(in, out, s) -} - -func Convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conversion.Scope) error { - return v1.Convert_v1_PodSpec_To_api_PodSpec(in, out, s) -} - func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ScaleStatus))(in) - } out.Replicas = int32(in.Replicas) out.Selector = nil @@ -118,10 +109,7 @@ func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleS } func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ScaleStatus))(in) - } - out.Replicas = int(in.Replicas) + out.Replicas = in.Replicas // Normally when 2 fields map to the same internal value we favor the old field, since // old clients can't be expected to know about new fields but clients that know about the @@ -148,11 +136,7 @@ func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out } func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.DeploymentSpec))(in) - } - out.Replicas = new(int32) - *out.Replicas = int32(in.Replicas) + out.Replicas = &in.Replicas if in.Selector != nil { out.Selector = new(LabelSelector) if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil { @@ -183,11 +167,8 @@ func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions. } func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DeploymentSpec))(in) - } if in.Replicas != nil { - out.Replicas = int(*in.Replicas) + out.Replicas = *in.Replicas } if in.Selector != nil { @@ -204,11 +185,8 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentS if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { return err } - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int) - *out.RevisionHistoryLimit = int(*in.RevisionHistoryLimit) - } - out.MinReadySeconds = int(in.MinReadySeconds) + out.RevisionHistoryLimit = in.RevisionHistoryLimit + out.MinReadySeconds = in.MinReadySeconds out.Paused = in.Paused if in.RollbackTo != nil { out.RollbackTo = new(extensions.RollbackConfig) @@ -220,9 +198,6 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentS } func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.DeploymentStrategy))(in) - } out.Type = DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = new(RollingUpdateDeployment) @@ -236,9 +211,6 @@ func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *ext } func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DeploymentStrategy))(in) - } out.Type = extensions.DeploymentStrategyType(in.Type) if in.RollingUpdate != nil { out.RollingUpdate = new(extensions.RollingUpdateDeployment) @@ -252,9 +224,6 @@ func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *Dep } func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.RollingUpdateDeployment))(in) - } if out.MaxUnavailable == nil { out.MaxUnavailable = &intstr.IntOrString{} } @@ -271,9 +240,6 @@ func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployme } func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*RollingUpdateDeployment))(in) - } if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } @@ -284,9 +250,6 @@ func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployme } func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ReplicaSetSpec))(in) - } out.Replicas = new(int32) *out.Replicas = int32(in.Replicas) if in.Selector != nil { @@ -305,11 +268,8 @@ func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions. } func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ReplicaSetSpec))(in) - } if in.Replicas != nil { - out.Replicas = int(*in.Replicas) + out.Replicas = *in.Replicas } if in.Selector != nil { out.Selector = new(unversioned.LabelSelector) @@ -325,28 +285,10 @@ func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetS return nil } -func Convert_extensions_JobSpec_To_v1beta1_JobSpec(in *extensions.JobSpec, out *JobSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.JobSpec))(in) - } - if in.Parallelism != nil { - out.Parallelism = new(int32) - *out.Parallelism = int32(*in.Parallelism) - } else { - out.Parallelism = nil - } - if in.Completions != nil { - out.Completions = new(int32) - *out.Completions = int32(*in.Completions) - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } +func Convert_batch_JobSpec_To_v1beta1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1beta1.LabelSelector if in.Selector != nil { out.Selector = new(LabelSelector) @@ -371,34 +313,16 @@ func Convert_extensions_JobSpec_To_v1beta1_JobSpec(in *extensions.JobSpec, out * } // END non-standard conversion - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil } -func Convert_v1beta1_JobSpec_To_extensions_JobSpec(in *JobSpec, out *extensions.JobSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*JobSpec))(in) - } - if in.Parallelism != nil { - out.Parallelism = new(int) - *out.Parallelism = int(*in.Parallelism) - } else { - out.Parallelism = nil - } - if in.Completions != nil { - out.Completions = new(int) - *out.Completions = int(*in.Completions) - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } +func Convert_v1beta1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds // unable to generate simple pointer conversion for v1beta1.LabelSelector -> unversioned.LabelSelector if in.Selector != nil { out.Selector = new(unversioned.LabelSelector) @@ -423,8 +347,58 @@ func Convert_v1beta1_JobSpec_To_extensions_JobSpec(in *JobSpec, out *extensions. } // END non-standard conversion - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference(in *autoscaling.CrossVersionObjectReference, out *SubresourceReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + out.Subresource = "scale" + return nil +} + +func Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference(in *SubresourceReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference(&in.ScaleTargetRef, &out.ScaleRef, s); err != nil { + return err + } + if in.MinReplicas != nil { + out.MinReplicas = new(int32) + *out.MinReplicas = *in.MinReplicas + } else { + out.MinReplicas = nil + } + out.MaxReplicas = in.MaxReplicas + if in.TargetCPUUtilizationPercentage != nil { + out.CPUUtilization = &CPUTargetUtilization{TargetPercentage: *in.TargetCPUUtilizationPercentage} + } + return nil +} + +func Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleRef, &out.ScaleTargetRef, s); err != nil { return err } + if in.MinReplicas != nil { + out.MinReplicas = new(int32) + *out.MinReplicas = int32(*in.MinReplicas) + } else { + out.MinReplicas = nil + } + out.MaxReplicas = int32(in.MaxReplicas) + if in.CPUUtilization != nil { + out.TargetCPUUtilizationPercentage = new(int32) + *out.TargetCPUUtilizationPercentage = int32(in.CPUUtilization.TargetPercentage) + } return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_generated.go index 26ba97ece0cb..445394f16a59 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,149 +16,225 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-conversions.sh +// This file was autogenerated by conversion-gen. Do not edit it manually! package v1beta1 import ( - reflect "reflect" - api "k8s.io/kubernetes/pkg/api" - resource "k8s.io/kubernetes/pkg/api/resource" unversioned "k8s.io/kubernetes/pkg/api/unversioned" v1 "k8s.io/kubernetes/pkg/api/v1" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + batch "k8s.io/kubernetes/pkg/apis/batch" extensions "k8s.io/kubernetes/pkg/apis/extensions" conversion "k8s.io/kubernetes/pkg/conversion" ) -func autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.AWSElasticBlockStoreVolumeSource))(in) +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_APIVersion_To_extensions_APIVersion, + Convert_extensions_APIVersion_To_v1beta1_APIVersion, + Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus, + Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus, + Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList, + Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList, + Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget, + Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget, + Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList, + Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList, + Convert_v1beta1_DaemonSet_To_extensions_DaemonSet, + Convert_extensions_DaemonSet_To_v1beta1_DaemonSet, + Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList, + Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList, + Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec, + Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec, + Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus, + Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus, + Convert_v1beta1_Deployment_To_extensions_Deployment, + Convert_extensions_Deployment_To_v1beta1_Deployment, + Convert_v1beta1_DeploymentList_To_extensions_DeploymentList, + Convert_extensions_DeploymentList_To_v1beta1_DeploymentList, + Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback, + Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback, + Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, + Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, + Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus, + Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus, + Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, + Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, + Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions, + Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions, + Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath, + Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath, + Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue, + Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue, + Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, + Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler, + Convert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList, + Convert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList, + Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, + Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec, + Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, + Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus, + Convert_v1beta1_HostPortRange_To_extensions_HostPortRange, + Convert_extensions_HostPortRange_To_v1beta1_HostPortRange, + Convert_v1beta1_IDRange_To_extensions_IDRange, + Convert_extensions_IDRange_To_v1beta1_IDRange, + Convert_v1beta1_Ingress_To_extensions_Ingress, + Convert_extensions_Ingress_To_v1beta1_Ingress, + Convert_v1beta1_IngressBackend_To_extensions_IngressBackend, + Convert_extensions_IngressBackend_To_v1beta1_IngressBackend, + Convert_v1beta1_IngressList_To_extensions_IngressList, + Convert_extensions_IngressList_To_v1beta1_IngressList, + Convert_v1beta1_IngressRule_To_extensions_IngressRule, + Convert_extensions_IngressRule_To_v1beta1_IngressRule, + Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue, + Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue, + Convert_v1beta1_IngressSpec_To_extensions_IngressSpec, + Convert_extensions_IngressSpec_To_v1beta1_IngressSpec, + Convert_v1beta1_IngressStatus_To_extensions_IngressStatus, + Convert_extensions_IngressStatus_To_v1beta1_IngressStatus, + Convert_v1beta1_IngressTLS_To_extensions_IngressTLS, + Convert_extensions_IngressTLS_To_v1beta1_IngressTLS, + Convert_v1beta1_Job_To_batch_Job, + Convert_batch_Job_To_v1beta1_Job, + Convert_v1beta1_JobCondition_To_batch_JobCondition, + Convert_batch_JobCondition_To_v1beta1_JobCondition, + Convert_v1beta1_JobList_To_batch_JobList, + Convert_batch_JobList_To_v1beta1_JobList, + Convert_v1beta1_JobSpec_To_batch_JobSpec, + Convert_batch_JobSpec_To_v1beta1_JobSpec, + Convert_v1beta1_JobStatus_To_batch_JobStatus, + Convert_batch_JobStatus_To_v1beta1_JobStatus, + Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector, + Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector, + Convert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement, + Convert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement, + Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy, + Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy, + Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule, + Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule, + Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList, + Convert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList, + Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer, + Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer, + Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort, + Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort, + Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec, + Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec, + Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy, + Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy, + Convert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList, + Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList, + Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec, + Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec, + Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet, + Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet, + Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList, + Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList, + Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, + Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, + Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus, + Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus, + Convert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy, + Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy, + Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig, + Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig, + Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, + Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, + Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions, + Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions, + Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions, + Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions, + Convert_v1beta1_Scale_To_extensions_Scale, + Convert_extensions_Scale_To_v1beta1_Scale, + Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec, + Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec, + Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, + Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions, + Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions, + Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource, + Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource, + Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData, + Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData, + Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList, + Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList, + Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList, + Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) } - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = int32(in.Partition) - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) } -func autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.AzureFileVolumeSource))(in) - } - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly +func autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { + out.Name = in.Name return nil } -func Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) +func Convert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { + return autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in, out, s) } -func autoConvert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *v1.Capabilities, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Capabilities))(in) - } - if in.Add != nil { - out.Add = make([]v1.Capability, len(in.Add)) - for i := range in.Add { - out.Add[i] = v1.Capability(in.Add[i]) - } - } else { - out.Add = nil - } - if in.Drop != nil { - out.Drop = make([]v1.Capability, len(in.Drop)) - for i := range in.Drop { - out.Drop[i] = v1.Capability(in.Drop[i]) - } - } else { - out.Drop = nil - } +func autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { + out.Name = in.Name return nil } -func Convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *v1.Capabilities, s conversion.Scope) error { - return autoConvert_api_Capabilities_To_v1_Capabilities(in, out, s) +func Convert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { + return autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in, out, s) } -func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.CephFSVolumeSource))(in) - } - if in.Monitors != nil { - out.Monitors = make([]string, len(in.Monitors)) - for i := range in.Monitors { - out.Monitors[i] = in.Monitors[i] - } - } else { - out.Monitors = nil - } - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - // unable to generate simple pointer conversion for api.LocalObjectReference -> v1.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(v1.LocalObjectReference) - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil +func autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { + out.Name = in.Name + if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.CurrentValue, &out.CurrentValue, s); err != nil { + return err } - out.ReadOnly = in.ReadOnly return nil } -func Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) +func Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { + return autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in, out, s) } -func autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.CinderVolumeSource))(in) +func autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error { + out.Name = in.Name + if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.CurrentValue, &out.CurrentValue, s); err != nil { + return err } - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly return nil } -func Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { - return autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) +func Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error { + return autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in, out, s) } -func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ConfigMapKeySelector))(in) - } - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err +func autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.CustomMetricCurrentStatus, len(*in)) + for i := range *in { + if err := Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil } - out.Key = in.Key return nil } -func Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) +func Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { + return autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in, out, s) } -func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ConfigMapVolumeSource))(in) - } - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } +func autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error { if in.Items != nil { - out.Items = make([]v1.KeyToPath, len(in.Items)) - for i := range in.Items { - if err := Convert_api_KeyToPath_To_v1_KeyToPath(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricCurrentStatus, len(*in)) + for i := range *in { + if err := Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -166,153 +244,59 @@ func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.C return nil } -func Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) +func Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error { + return autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in, out, s) } -func autoConvert_api_Container_To_v1_Container(in *api.Container, out *v1.Container, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Container))(in) - } +func autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { out.Name = in.Name - out.Image = in.Image - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - if in.Args != nil { - out.Args = make([]string, len(in.Args)) - for i := range in.Args { - out.Args[i] = in.Args[i] - } - } else { - out.Args = nil - } - out.WorkingDir = in.WorkingDir - if in.Ports != nil { - out.Ports = make([]v1.ContainerPort, len(in.Ports)) - for i := range in.Ports { - if err := Convert_api_ContainerPort_To_v1_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Env != nil { - out.Env = make([]v1.EnvVar, len(in.Env)) - for i := range in.Env { - if err := Convert_api_EnvVar_To_v1_EnvVar(&in.Env[i], &out.Env[i], s); err != nil { - return err - } - } - } else { - out.Env = nil - } - if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.TargetValue, &out.TargetValue, s); err != nil { return err } - if in.VolumeMounts != nil { - out.VolumeMounts = make([]v1.VolumeMount, len(in.VolumeMounts)) - for i := range in.VolumeMounts { - if err := Convert_api_VolumeMount_To_v1_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil { - return err - } - } - } else { - out.VolumeMounts = nil - } - // unable to generate simple pointer conversion for api.Probe -> v1.Probe - if in.LivenessProbe != nil { - out.LivenessProbe = new(v1.Probe) - if err := Convert_api_Probe_To_v1_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil { - return err - } - } else { - out.LivenessProbe = nil - } - // unable to generate simple pointer conversion for api.Probe -> v1.Probe - if in.ReadinessProbe != nil { - out.ReadinessProbe = new(v1.Probe) - if err := Convert_api_Probe_To_v1_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil { - return err - } - } else { - out.ReadinessProbe = nil - } - // unable to generate simple pointer conversion for api.Lifecycle -> v1.Lifecycle - if in.Lifecycle != nil { - out.Lifecycle = new(v1.Lifecycle) - if err := Convert_api_Lifecycle_To_v1_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil { - return err - } - } else { - out.Lifecycle = nil - } - out.TerminationMessagePath = in.TerminationMessagePath - out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) - // unable to generate simple pointer conversion for api.SecurityContext -> v1.SecurityContext - if in.SecurityContext != nil { - out.SecurityContext = new(v1.SecurityContext) - if err := Convert_api_SecurityContext_To_v1_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY return nil } -func Convert_api_Container_To_v1_Container(in *api.Container, out *v1.Container, s conversion.Scope) error { - return autoConvert_api_Container_To_v1_Container(in, out, s) +func Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { + return autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in, out, s) } -func autoConvert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ContainerPort))(in) - } +func autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error { out.Name = in.Name - out.HostPort = int32(in.HostPort) - out.ContainerPort = int32(in.ContainerPort) - out.Protocol = v1.Protocol(in.Protocol) - out.HostIP = in.HostIP + if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.TargetValue, &out.TargetValue, s); err != nil { + return err + } return nil } -func Convert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { - return autoConvert_api_ContainerPort_To_v1_ContainerPort(in, out, s) +func Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error { + return autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in, out, s) } -func autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.DownwardAPIVolumeFile))(in) - } - out.Path = in.Path - if err := Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(&in.FieldRef, &out.FieldRef, s); err != nil { - return err +func autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.CustomMetricTarget, len(*in)) + for i := range *in { + if err := Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil } return nil } -func Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) +func Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { + return autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in, out, s) } -func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.DownwardAPIVolumeSource))(in) - } +func autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error { if in.Items != nil { - out.Items = make([]v1.DownwardAPIVolumeFile, len(in.Items)) - for i := range in.Items { - if err := Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricTarget, len(*in)) + for i := range *in { + if err := Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -322,3351 +306,351 @@ func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *a return nil } -func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) +func Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error { + return autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in, out, s) } -func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EmptyDirVolumeSource))(in) +func autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { + SetDefaults_DaemonSet(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + return err } - out.Medium = v1.StorageMedium(in.Medium) return nil } -func Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) +func Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in, out, s) } -func autoConvert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *v1.EnvVar, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EnvVar))(in) +func autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - out.Name = in.Name - out.Value = in.Value - // unable to generate simple pointer conversion for api.EnvVarSource -> v1.EnvVarSource - if in.ValueFrom != nil { - out.ValueFrom = new(v1.EnvVarSource) - if err := Convert_api_EnvVarSource_To_v1_EnvVarSource(in.ValueFrom, out.ValueFrom, s); err != nil { - return err - } - } else { - out.ValueFrom = nil + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + return err } return nil } -func Convert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *v1.EnvVar, s conversion.Scope) error { - return autoConvert_api_EnvVar_To_v1_EnvVar(in, out, s) +func Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { + return autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in, out, s) } -func autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.EnvVarSource))(in) - } - // unable to generate simple pointer conversion for api.ObjectFieldSelector -> v1.ObjectFieldSelector - if in.FieldRef != nil { - out.FieldRef = new(v1.ObjectFieldSelector) - if err := Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in.FieldRef, out.FieldRef, s); err != nil { - return err - } - } else { - out.FieldRef = nil +func autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - // unable to generate simple pointer conversion for api.ConfigMapKeySelector -> v1.ConfigMapKeySelector - if in.ConfigMapKeyRef != nil { - out.ConfigMapKeyRef = new(v1.ConfigMapKeySelector) - if err := Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in.ConfigMapKeyRef, out.ConfigMapKeyRef, s); err != nil { - return err - } - } else { - out.ConfigMapKeyRef = nil + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err } - // unable to generate simple pointer conversion for api.SecretKeySelector -> v1.SecretKeySelector - if in.SecretKeyRef != nil { - out.SecretKeyRef = new(v1.SecretKeySelector) - if err := Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in.SecretKeyRef, out.SecretKeyRef, s); err != nil { - return err + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.DaemonSet, len(*in)) + for i := range *in { + if err := Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.SecretKeyRef = nil + out.Items = nil } return nil } -func Convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { - return autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in, out, s) +func Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in, out, s) } -func autoConvert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *v1.ExecAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ExecAction))(in) +func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err } - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + if err := Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.Command = nil + out.Items = nil } return nil } -func Convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *v1.ExecAction, s conversion.Scope) error { - return autoConvert_api_ExecAction_To_v1_ExecAction(in, out, s) +func Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in, out, s) } -func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.FCVolumeSource))(in) - } - if in.TargetWWNs != nil { - out.TargetWWNs = make([]string, len(in.TargetWWNs)) - for i := range in.TargetWWNs { - out.TargetWWNs[i] = in.TargetWWNs[i] +func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil { + return err } } else { - out.TargetWWNs = nil + out.Selector = nil } - if in.Lun != nil { - out.Lun = new(int32) - *out.Lun = int32(*in.Lun) - } else { - out.Lun = nil + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err } - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly return nil } -func Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { - return autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) +func Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in, out, s) } -func autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.FlexVolumeSource))(in) - } - out.Driver = in.Driver - out.FSType = in.FSType - // unable to generate simple pointer conversion for api.LocalObjectReference -> v1.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(v1.LocalObjectReference) - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { +func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(LabelSelector) + if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(*in, *out, s); err != nil { return err } } else { - out.SecretRef = nil + out.Selector = nil } - out.ReadOnly = in.ReadOnly - if in.Options != nil { - out.Options = make(map[string]string) - for key, val := range in.Options { - out.Options[key] = val - } - } else { - out.Options = nil + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err } return nil } -func Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) +func Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in, out, s) } -func autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.FlockerVolumeSource))(in) - } - out.DatasetName = in.DatasetName +func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { + out.CurrentNumberScheduled = in.CurrentNumberScheduled + out.NumberMisscheduled = in.NumberMisscheduled + out.DesiredNumberScheduled = in.DesiredNumberScheduled return nil } -func Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) +func Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s) } -func autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.GCEPersistentDiskVolumeSource))(in) - } - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = int32(in.Partition) - out.ReadOnly = in.ReadOnly +func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { + out.CurrentNumberScheduled = in.CurrentNumberScheduled + out.NumberMisscheduled = in.NumberMisscheduled + out.DesiredNumberScheduled = in.DesiredNumberScheduled return nil } -func Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) +func Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s) } -func autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.GitRepoVolumeSource))(in) +func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { + SetDefaults_Deployment(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + return err } - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory return nil } -func Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) +func Convert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { + return autoConvert_v1beta1_Deployment_To_extensions_Deployment(in, out, s) } -func autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.GlusterfsVolumeSource))(in) +func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + return err } - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly return nil } -func Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) +func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { + return autoConvert_extensions_Deployment_To_v1beta1_Deployment(in, out, s) } -func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.HTTPGetAction))(in) +func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - out.Path = in.Path - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } - out.Host = in.Host - out.Scheme = v1.URIScheme(in.Scheme) - if in.HTTPHeaders != nil { - out.HTTPHeaders = make([]v1.HTTPHeader, len(in.HTTPHeaders)) - for i := range in.HTTPHeaders { - if err := Convert_api_HTTPHeader_To_v1_HTTPHeader(&in.HTTPHeaders[i], &out.HTTPHeaders[i], s); err != nil { + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.Deployment, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { - out.HTTPHeaders = nil - } - return nil -} - -func Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { - return autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) -} - -func autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.HTTPHeader))(in) + out.Items = nil } - out.Name = in.Name - out.Value = in.Value return nil } -func Convert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { - return autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in, out, s) +func Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in, out, s) } -func autoConvert_api_Handler_To_v1_Handler(in *api.Handler, out *v1.Handler, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Handler))(in) - } - // unable to generate simple pointer conversion for api.ExecAction -> v1.ExecAction - if in.Exec != nil { - out.Exec = new(v1.ExecAction) - if err := Convert_api_ExecAction_To_v1_ExecAction(in.Exec, out.Exec, s); err != nil { - return err - } - } else { - out.Exec = nil +func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - // unable to generate simple pointer conversion for api.HTTPGetAction -> v1.HTTPGetAction - if in.HTTPGet != nil { - out.HTTPGet = new(v1.HTTPGetAction) - if err := Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in.HTTPGet, out.HTTPGet, s); err != nil { - return err - } - } else { - out.HTTPGet = nil + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err } - // unable to generate simple pointer conversion for api.TCPSocketAction -> v1.TCPSocketAction - if in.TCPSocket != nil { - out.TCPSocket = new(v1.TCPSocketAction) - if err := Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in.TCPSocket, out.TCPSocket, s); err != nil { - return err + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + if err := Convert_extensions_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.TCPSocket = nil + out.Items = nil } return nil } -func Convert_api_Handler_To_v1_Handler(in *api.Handler, out *v1.Handler, s conversion.Scope) error { - return autoConvert_api_Handler_To_v1_Handler(in, out, s) +func Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { + return autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in, out, s) } -func autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.HostPathVolumeSource))(in) - } - out.Path = in.Path - return nil -} - -func Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) -} - -func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ISCSIVolumeSource))(in) - } - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = int32(in.Lun) - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.KeyToPath))(in) - } - out.Key = in.Key - out.Path = in.Path - return nil -} - -func Convert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { - return autoConvert_api_KeyToPath_To_v1_KeyToPath(in, out, s) -} - -func autoConvert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Lifecycle))(in) - } - // unable to generate simple pointer conversion for api.Handler -> v1.Handler - if in.PostStart != nil { - out.PostStart = new(v1.Handler) - if err := Convert_api_Handler_To_v1_Handler(in.PostStart, out.PostStart, s); err != nil { - return err - } - } else { - out.PostStart = nil - } - // unable to generate simple pointer conversion for api.Handler -> v1.Handler - if in.PreStop != nil { - out.PreStop = new(v1.Handler) - if err := Convert_api_Handler_To_v1_Handler(in.PreStop, out.PreStop, s); err != nil { - return err - } - } else { - out.PreStop = nil - } - return nil -} - -func Convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { - return autoConvert_api_Lifecycle_To_v1_Lifecycle(in, out, s) -} - -func autoConvert_api_ListOptions_To_v1beta1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ListOptions))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - if err := api.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - if in.TimeoutSeconds != nil { - out.TimeoutSeconds = new(int64) - *out.TimeoutSeconds = *in.TimeoutSeconds - } else { - out.TimeoutSeconds = nil - } - return nil -} - -func Convert_api_ListOptions_To_v1beta1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { - return autoConvert_api_ListOptions_To_v1beta1_ListOptions(in, out, s) -} - -func autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.LoadBalancerIngress))(in) - } - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -func Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { - return autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) -} - -func autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.LoadBalancerStatus))(in) - } - if in.Ingress != nil { - out.Ingress = make([]v1.LoadBalancerIngress, len(in.Ingress)) - for i := range in.Ingress { - if err := Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(&in.Ingress[i], &out.Ingress[i], s); err != nil { - return err - } - } - } else { - out.Ingress = nil - } - return nil -} - -func Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) -} - -func autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.LocalObjectReference))(in) - } - out.Name = in.Name - return nil -} - -func Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { - return autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) -} - -func autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.NFSVolumeSource))(in) - } - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) -} - -func autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ObjectFieldSelector))(in) - } - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) -} - -func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ObjectMeta))(in) - } - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.CreationTimestamp, &out.CreationTimestamp, s); err != nil { - return err - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.DeletionTimestamp != nil { - out.DeletionTimestamp = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.DeletionTimestamp, out.DeletionTimestamp, s); err != nil { - return err - } - } else { - out.DeletionTimestamp = nil - } - if in.DeletionGracePeriodSeconds != nil { - out.DeletionGracePeriodSeconds = new(int64) - *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds - } else { - out.DeletionGracePeriodSeconds = nil - } - if in.Labels != nil { - out.Labels = make(map[string]string) - for key, val := range in.Labels { - out.Labels[key] = val - } - } else { - out.Labels = nil - } - if in.Annotations != nil { - out.Annotations = make(map[string]string) - for key, val := range in.Annotations { - out.Annotations[key] = val - } - } else { - out.Annotations = nil - } - return nil -} - -func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { - return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PersistentVolumeClaimVolumeSource))(in) - } - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodSpec))(in) - } - if in.Volumes != nil { - out.Volumes = make([]v1.Volume, len(in.Volumes)) - for i := range in.Volumes { - if err := Convert_api_Volume_To_v1_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.Containers != nil { - out.Containers = make([]v1.Container, len(in.Containers)) - for i := range in.Containers { - if err := Convert_api_Container_To_v1_Container(&in.Containers[i], &out.Containers[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = v1.RestartPolicy(in.RestartPolicy) - if in.TerminationGracePeriodSeconds != nil { - out.TerminationGracePeriodSeconds = new(int64) - *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } - out.DNSPolicy = v1.DNSPolicy(in.DNSPolicy) - if in.NodeSelector != nil { - out.NodeSelector = make(map[string]string) - for key, val := range in.NodeSelector { - out.NodeSelector[key] = val - } - } else { - out.NodeSelector = nil - } - out.ServiceAccountName = in.ServiceAccountName - out.NodeName = in.NodeName - // unable to generate simple pointer conversion for api.PodSecurityContext -> v1.PodSecurityContext - if in.SecurityContext != nil { - if err := s.Convert(&in.SecurityContext, &out.SecurityContext, 0); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]v1.LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - -func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodTemplateSpec))(in) - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { - return autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s) -} - -func autoConvert_api_Probe_To_v1_Probe(in *api.Probe, out *v1.Probe, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Probe))(in) - } - if err := Convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = int32(in.InitialDelaySeconds) - out.TimeoutSeconds = int32(in.TimeoutSeconds) - out.PeriodSeconds = int32(in.PeriodSeconds) - out.SuccessThreshold = int32(in.SuccessThreshold) - out.FailureThreshold = int32(in.FailureThreshold) - return nil -} - -func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *v1.Probe, s conversion.Scope) error { - return autoConvert_api_Probe_To_v1_Probe(in, out, s) -} - -func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.RBDVolumeSource))(in) - } - if in.CephMonitors != nil { - out.CephMonitors = make([]string, len(in.CephMonitors)) - for i := range in.CephMonitors { - out.CephMonitors[i] = in.CephMonitors[i] - } - } else { - out.CephMonitors = nil - } - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - // unable to generate simple pointer conversion for api.LocalObjectReference -> v1.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(v1.LocalObjectReference) - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { - return autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) -} - -func autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ResourceRequirements))(in) - } - if in.Limits != nil { - out.Limits = make(v1.ResourceList) - for key, val := range in.Limits { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Limits[v1.ResourceName(key)] = newVal - } - } else { - out.Limits = nil - } - if in.Requests != nil { - out.Requests = make(v1.ResourceList) - for key, val := range in.Requests { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Requests[v1.ResourceName(key)] = newVal - } - } else { - out.Requests = nil - } - return nil -} - -func Convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { - return autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) -} - -func autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SELinuxOptions))(in) - } - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { - return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) -} - -func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SecretKeySelector))(in) - } - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { - return autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) -} - -func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SecretVolumeSource))(in) - } - out.SecretName = in.SecretName - return nil -} - -func Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { - return autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) -} - -func autoConvert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.SecurityContext))(in) - } - // unable to generate simple pointer conversion for api.Capabilities -> v1.Capabilities - if in.Capabilities != nil { - out.Capabilities = new(v1.Capabilities) - if err := Convert_api_Capabilities_To_v1_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { - return err - } - } else { - out.Capabilities = nil - } - if in.Privileged != nil { - out.Privileged = new(bool) - *out.Privileged = *in.Privileged - } else { - out.Privileged = nil - } - // unable to generate simple pointer conversion for api.SELinuxOptions -> v1.SELinuxOptions - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(v1.SELinuxOptions) - if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot - } else { - out.RunAsNonRoot = nil - } - if in.ReadOnlyRootFilesystem != nil { - out.ReadOnlyRootFilesystem = new(bool) - *out.ReadOnlyRootFilesystem = *in.ReadOnlyRootFilesystem - } else { - out.ReadOnlyRootFilesystem = nil - } - return nil -} - -func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { - return autoConvert_api_SecurityContext_To_v1_SecurityContext(in, out, s) -} - -func autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.TCPSocketAction))(in) - } - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { - return err - } - return nil -} - -func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { - return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) -} - -func autoConvert_api_Volume_To_v1_Volume(in *api.Volume, out *v1.Volume, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.Volume))(in) - } - out.Name = in.Name - if err := Convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -func Convert_api_Volume_To_v1_Volume(in *api.Volume, out *v1.Volume, s conversion.Scope) error { - return autoConvert_api_Volume_To_v1_Volume(in, out, s) -} - -func autoConvert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.VolumeMount))(in) +func autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - return nil -} - -func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { - return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s) -} - -func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.VolumeSource))(in) - } - // unable to generate simple pointer conversion for api.HostPathVolumeSource -> v1.HostPathVolumeSource - if in.HostPath != nil { - out.HostPath = new(v1.HostPathVolumeSource) - if err := Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in.HostPath, out.HostPath, s); err != nil { - return err - } - } else { - out.HostPath = nil - } - // unable to generate simple pointer conversion for api.EmptyDirVolumeSource -> v1.EmptyDirVolumeSource - if in.EmptyDir != nil { - out.EmptyDir = new(v1.EmptyDirVolumeSource) - if err := Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in.EmptyDir, out.EmptyDir, s); err != nil { - return err - } - } else { - out.EmptyDir = nil - } - // unable to generate simple pointer conversion for api.GCEPersistentDiskVolumeSource -> v1.GCEPersistentDiskVolumeSource - if in.GCEPersistentDisk != nil { - out.GCEPersistentDisk = new(v1.GCEPersistentDiskVolumeSource) - if err := Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in.GCEPersistentDisk, out.GCEPersistentDisk, s); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - // unable to generate simple pointer conversion for api.AWSElasticBlockStoreVolumeSource -> v1.AWSElasticBlockStoreVolumeSource - if in.AWSElasticBlockStore != nil { - out.AWSElasticBlockStore = new(v1.AWSElasticBlockStoreVolumeSource) - if err := Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore, out.AWSElasticBlockStore, s); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - // unable to generate simple pointer conversion for api.GitRepoVolumeSource -> v1.GitRepoVolumeSource - if in.GitRepo != nil { - out.GitRepo = new(v1.GitRepoVolumeSource) - if err := Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in.GitRepo, out.GitRepo, s); err != nil { - return err - } - } else { - out.GitRepo = nil - } - // unable to generate simple pointer conversion for api.SecretVolumeSource -> v1.SecretVolumeSource - if in.Secret != nil { - out.Secret = new(v1.SecretVolumeSource) - if err := Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in.Secret, out.Secret, s); err != nil { - return err - } - } else { - out.Secret = nil - } - // unable to generate simple pointer conversion for api.NFSVolumeSource -> v1.NFSVolumeSource - if in.NFS != nil { - out.NFS = new(v1.NFSVolumeSource) - if err := Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in.NFS, out.NFS, s); err != nil { - return err - } - } else { - out.NFS = nil - } - // unable to generate simple pointer conversion for api.ISCSIVolumeSource -> v1.ISCSIVolumeSource - if in.ISCSI != nil { - out.ISCSI = new(v1.ISCSIVolumeSource) - if err := Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in.ISCSI, out.ISCSI, s); err != nil { - return err - } - } else { - out.ISCSI = nil - } - // unable to generate simple pointer conversion for api.GlusterfsVolumeSource -> v1.GlusterfsVolumeSource - if in.Glusterfs != nil { - out.Glusterfs = new(v1.GlusterfsVolumeSource) - if err := Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in.Glusterfs, out.Glusterfs, s); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - // unable to generate simple pointer conversion for api.PersistentVolumeClaimVolumeSource -> v1.PersistentVolumeClaimVolumeSource - if in.PersistentVolumeClaim != nil { - out.PersistentVolumeClaim = new(v1.PersistentVolumeClaimVolumeSource) - if err := Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in.PersistentVolumeClaim, out.PersistentVolumeClaim, s); err != nil { - return err - } - } else { - out.PersistentVolumeClaim = nil - } - // unable to generate simple pointer conversion for api.RBDVolumeSource -> v1.RBDVolumeSource - if in.RBD != nil { - out.RBD = new(v1.RBDVolumeSource) - if err := Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in.RBD, out.RBD, s); err != nil { - return err - } - } else { - out.RBD = nil - } - // unable to generate simple pointer conversion for api.FlexVolumeSource -> v1.FlexVolumeSource - if in.FlexVolume != nil { - out.FlexVolume = new(v1.FlexVolumeSource) - if err := Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in.FlexVolume, out.FlexVolume, s); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - // unable to generate simple pointer conversion for api.CinderVolumeSource -> v1.CinderVolumeSource - if in.Cinder != nil { - out.Cinder = new(v1.CinderVolumeSource) - if err := Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil { - return err - } - } else { - out.Cinder = nil - } - // unable to generate simple pointer conversion for api.CephFSVolumeSource -> v1.CephFSVolumeSource - if in.CephFS != nil { - out.CephFS = new(v1.CephFSVolumeSource) - if err := Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { - return err - } - } else { - out.CephFS = nil - } - // unable to generate simple pointer conversion for api.FlockerVolumeSource -> v1.FlockerVolumeSource - if in.Flocker != nil { - out.Flocker = new(v1.FlockerVolumeSource) - if err := Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in.Flocker, out.Flocker, s); err != nil { - return err - } - } else { - out.Flocker = nil - } - // unable to generate simple pointer conversion for api.DownwardAPIVolumeSource -> v1.DownwardAPIVolumeSource - if in.DownwardAPI != nil { - out.DownwardAPI = new(v1.DownwardAPIVolumeSource) - if err := Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in.DownwardAPI, out.DownwardAPI, s); err != nil { - return err - } - } else { - out.DownwardAPI = nil - } - // unable to generate simple pointer conversion for api.FCVolumeSource -> v1.FCVolumeSource - if in.FC != nil { - out.FC = new(v1.FCVolumeSource) - if err := Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in.FC, out.FC, s); err != nil { - return err - } - } else { - out.FC = nil - } - // unable to generate simple pointer conversion for api.AzureFileVolumeSource -> v1.AzureFileVolumeSource - if in.AzureFile != nil { - out.AzureFile = new(v1.AzureFileVolumeSource) - if err := Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in.AzureFile, out.AzureFile, s); err != nil { - return err - } - } else { - out.AzureFile = nil - } - // unable to generate simple pointer conversion for api.ConfigMapVolumeSource -> v1.ConfigMapVolumeSource - if in.ConfigMap != nil { - out.ConfigMap = new(v1.ConfigMapVolumeSource) - if err := Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in.ConfigMap, out.ConfigMap, s); err != nil { - return err - } - } else { - out.ConfigMap = nil - } - return nil -} - -func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { - return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s) -} - -func autoConvert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*unversioned.LabelSelector))(in) - } - if in.MatchLabels != nil { - out.MatchLabels = make(map[string]string) - for key, val := range in.MatchLabels { - out.MatchLabels[key] = val - } - } else { - out.MatchLabels = nil - } - if in.MatchExpressions != nil { - out.MatchExpressions = make([]LabelSelectorRequirement, len(in.MatchExpressions)) - for i := range in.MatchExpressions { - if err := Convert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(&in.MatchExpressions[i], &out.MatchExpressions[i], s); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { - return autoConvert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in, out, s) -} - -func autoConvert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*unversioned.LabelSelectorRequirement))(in) - } - out.Key = in.Key - out.Operator = LabelSelectorOperator(in.Operator) - if in.Values != nil { - out.Values = make([]string, len(in.Values)) - for i := range in.Values { - out.Values[i] = in.Values[i] - } - } else { - out.Values = nil - } - return nil -} - -func Convert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { - return autoConvert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(in, out, s) -} - -func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.AWSElasticBlockStoreVolumeSource))(in) - } - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = int(in.Partition) - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in, out, s) -} - -func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.AzureFileVolumeSource))(in) - } - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_v1_Capabilities_To_api_Capabilities(in *v1.Capabilities, out *api.Capabilities, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.Capabilities))(in) - } - if in.Add != nil { - out.Add = make([]api.Capability, len(in.Add)) - for i := range in.Add { - out.Add[i] = api.Capability(in.Add[i]) - } - } else { - out.Add = nil - } - if in.Drop != nil { - out.Drop = make([]api.Capability, len(in.Drop)) - for i := range in.Drop { - out.Drop[i] = api.Capability(in.Drop[i]) - } - } else { - out.Drop = nil - } - return nil -} - -func Convert_v1_Capabilities_To_api_Capabilities(in *v1.Capabilities, out *api.Capabilities, s conversion.Scope) error { - return autoConvert_v1_Capabilities_To_api_Capabilities(in, out, s) -} - -func autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.CephFSVolumeSource))(in) - } - if in.Monitors != nil { - out.Monitors = make([]string, len(in.Monitors)) - for i := range in.Monitors { - out.Monitors[i] = in.Monitors[i] - } - } else { - out.Monitors = nil - } - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - // unable to generate simple pointer conversion for v1.LocalObjectReference -> api.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(api.LocalObjectReference) - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in, out, s) -} - -func autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *v1.CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.CinderVolumeSource))(in) - } - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *v1.CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in, out, s) -} - -func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ConfigMapKeySelector))(in) - } - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in, out, s) -} - -func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ConfigMapVolumeSource))(in) - } - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]api.KeyToPath, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_KeyToPath_To_api_KeyToPath(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in, out, s) -} - -func autoConvert_v1_Container_To_api_Container(in *v1.Container, out *api.Container, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.Container))(in) - } - out.Name = in.Name - out.Image = in.Image - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - if in.Args != nil { - out.Args = make([]string, len(in.Args)) - for i := range in.Args { - out.Args[i] = in.Args[i] - } - } else { - out.Args = nil - } - out.WorkingDir = in.WorkingDir - if in.Ports != nil { - out.Ports = make([]api.ContainerPort, len(in.Ports)) - for i := range in.Ports { - if err := Convert_v1_ContainerPort_To_api_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Env != nil { - out.Env = make([]api.EnvVar, len(in.Env)) - for i := range in.Env { - if err := Convert_v1_EnvVar_To_api_EnvVar(&in.Env[i], &out.Env[i], s); err != nil { - return err - } - } - } else { - out.Env = nil - } - if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - if in.VolumeMounts != nil { - out.VolumeMounts = make([]api.VolumeMount, len(in.VolumeMounts)) - for i := range in.VolumeMounts { - if err := Convert_v1_VolumeMount_To_api_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil { - return err - } - } - } else { - out.VolumeMounts = nil - } - // unable to generate simple pointer conversion for v1.Probe -> api.Probe - if in.LivenessProbe != nil { - out.LivenessProbe = new(api.Probe) - if err := Convert_v1_Probe_To_api_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil { - return err - } - } else { - out.LivenessProbe = nil - } - // unable to generate simple pointer conversion for v1.Probe -> api.Probe - if in.ReadinessProbe != nil { - out.ReadinessProbe = new(api.Probe) - if err := Convert_v1_Probe_To_api_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil { - return err - } - } else { - out.ReadinessProbe = nil - } - // unable to generate simple pointer conversion for v1.Lifecycle -> api.Lifecycle - if in.Lifecycle != nil { - out.Lifecycle = new(api.Lifecycle) - if err := Convert_v1_Lifecycle_To_api_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil { - return err - } - } else { - out.Lifecycle = nil - } - out.TerminationMessagePath = in.TerminationMessagePath - out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) - // unable to generate simple pointer conversion for v1.SecurityContext -> api.SecurityContext - if in.SecurityContext != nil { - out.SecurityContext = new(api.SecurityContext) - if err := Convert_v1_SecurityContext_To_api_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -func Convert_v1_Container_To_api_Container(in *v1.Container, out *api.Container, s conversion.Scope) error { - return autoConvert_v1_Container_To_api_Container(in, out, s) -} - -func autoConvert_v1_ContainerPort_To_api_ContainerPort(in *v1.ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ContainerPort))(in) - } - out.Name = in.Name - out.HostPort = int(in.HostPort) - out.ContainerPort = int(in.ContainerPort) - out.Protocol = api.Protocol(in.Protocol) - out.HostIP = in.HostIP - return nil -} - -func Convert_v1_ContainerPort_To_api_ContainerPort(in *v1.ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - return autoConvert_v1_ContainerPort_To_api_ContainerPort(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.DownwardAPIVolumeFile))(in) - } - out.Path = in.Path - if err := Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(&in.FieldRef, &out.FieldRef, s); err != nil { - return err - } - return nil -} - -func Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.DownwardAPIVolumeSource))(in) - } - if in.Items != nil { - out.Items = make([]api.DownwardAPIVolumeFile, len(in.Items)) - for i := range in.Items { - if err := Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.EmptyDirVolumeSource))(in) - } - out.Medium = api.StorageMedium(in.Medium) - return nil -} - -func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in, out, s) -} - -func autoConvert_v1_EnvVar_To_api_EnvVar(in *v1.EnvVar, out *api.EnvVar, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.EnvVar))(in) - } - out.Name = in.Name - out.Value = in.Value - // unable to generate simple pointer conversion for v1.EnvVarSource -> api.EnvVarSource - if in.ValueFrom != nil { - out.ValueFrom = new(api.EnvVarSource) - if err := Convert_v1_EnvVarSource_To_api_EnvVarSource(in.ValueFrom, out.ValueFrom, s); err != nil { - return err - } - } else { - out.ValueFrom = nil - } - return nil -} - -func Convert_v1_EnvVar_To_api_EnvVar(in *v1.EnvVar, out *api.EnvVar, s conversion.Scope) error { - return autoConvert_v1_EnvVar_To_api_EnvVar(in, out, s) -} - -func autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in *v1.EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.EnvVarSource))(in) - } - // unable to generate simple pointer conversion for v1.ObjectFieldSelector -> api.ObjectFieldSelector - if in.FieldRef != nil { - out.FieldRef = new(api.ObjectFieldSelector) - if err := Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in.FieldRef, out.FieldRef, s); err != nil { - return err - } - } else { - out.FieldRef = nil - } - // unable to generate simple pointer conversion for v1.ConfigMapKeySelector -> api.ConfigMapKeySelector - if in.ConfigMapKeyRef != nil { - out.ConfigMapKeyRef = new(api.ConfigMapKeySelector) - if err := Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in.ConfigMapKeyRef, out.ConfigMapKeyRef, s); err != nil { - return err - } - } else { - out.ConfigMapKeyRef = nil - } - // unable to generate simple pointer conversion for v1.SecretKeySelector -> api.SecretKeySelector - if in.SecretKeyRef != nil { - out.SecretKeyRef = new(api.SecretKeySelector) - if err := Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in.SecretKeyRef, out.SecretKeyRef, s); err != nil { - return err - } - } else { - out.SecretKeyRef = nil - } - return nil -} - -func Convert_v1_EnvVarSource_To_api_EnvVarSource(in *v1.EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - return autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in, out, s) -} - -func autoConvert_v1_ExecAction_To_api_ExecAction(in *v1.ExecAction, out *api.ExecAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ExecAction))(in) - } - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - return nil -} - -func Convert_v1_ExecAction_To_api_ExecAction(in *v1.ExecAction, out *api.ExecAction, s conversion.Scope) error { - return autoConvert_v1_ExecAction_To_api_ExecAction(in, out, s) -} - -func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *v1.FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.FCVolumeSource))(in) - } - if in.TargetWWNs != nil { - out.TargetWWNs = make([]string, len(in.TargetWWNs)) - for i := range in.TargetWWNs { - out.TargetWWNs[i] = in.TargetWWNs[i] - } - } else { - out.TargetWWNs = nil - } - if in.Lun != nil { - out.Lun = new(int) - *out.Lun = int(*in.Lun) - } else { - out.Lun = nil - } - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *v1.FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in, out, s) -} - -func autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *v1.FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.FlexVolumeSource))(in) - } - out.Driver = in.Driver - out.FSType = in.FSType - // unable to generate simple pointer conversion for v1.LocalObjectReference -> api.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(api.LocalObjectReference) - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - if in.Options != nil { - out.Options = make(map[string]string) - for key, val := range in.Options { - out.Options[key] = val - } - } else { - out.Options = nil - } - return nil -} - -func Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *v1.FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in, out, s) -} - -func autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.FlockerVolumeSource))(in) - } - out.DatasetName = in.DatasetName - return nil -} - -func Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in, out, s) -} - -func autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.GCEPersistentDiskVolumeSource))(in) - } - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = int(in.Partition) - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.GitRepoVolumeSource))(in) - } - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -func Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.GlusterfsVolumeSource))(in) - } - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in, out, s) -} - -func autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in *v1.HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.HTTPGetAction))(in) - } - out.Path = in.Path - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { - return err - } - out.Host = in.Host - out.Scheme = api.URIScheme(in.Scheme) - if in.HTTPHeaders != nil { - out.HTTPHeaders = make([]api.HTTPHeader, len(in.HTTPHeaders)) - for i := range in.HTTPHeaders { - if err := Convert_v1_HTTPHeader_To_api_HTTPHeader(&in.HTTPHeaders[i], &out.HTTPHeaders[i], s); err != nil { - return err - } - } - } else { - out.HTTPHeaders = nil - } - return nil -} - -func Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *v1.HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - return autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in, out, s) -} - -func autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in *v1.HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.HTTPHeader))(in) - } - out.Name = in.Name - out.Value = in.Value - return nil -} - -func Convert_v1_HTTPHeader_To_api_HTTPHeader(in *v1.HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - return autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in, out, s) -} - -func autoConvert_v1_Handler_To_api_Handler(in *v1.Handler, out *api.Handler, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.Handler))(in) - } - // unable to generate simple pointer conversion for v1.ExecAction -> api.ExecAction - if in.Exec != nil { - out.Exec = new(api.ExecAction) - if err := Convert_v1_ExecAction_To_api_ExecAction(in.Exec, out.Exec, s); err != nil { - return err - } - } else { - out.Exec = nil - } - // unable to generate simple pointer conversion for v1.HTTPGetAction -> api.HTTPGetAction - if in.HTTPGet != nil { - out.HTTPGet = new(api.HTTPGetAction) - if err := Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in.HTTPGet, out.HTTPGet, s); err != nil { - return err - } - } else { - out.HTTPGet = nil - } - // unable to generate simple pointer conversion for v1.TCPSocketAction -> api.TCPSocketAction - if in.TCPSocket != nil { - out.TCPSocket = new(api.TCPSocketAction) - if err := Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in.TCPSocket, out.TCPSocket, s); err != nil { - return err - } - } else { - out.TCPSocket = nil - } - return nil -} - -func Convert_v1_Handler_To_api_Handler(in *v1.Handler, out *api.Handler, s conversion.Scope) error { - return autoConvert_v1_Handler_To_api_Handler(in, out, s) -} - -func autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.HostPathVolumeSource))(in) - } - out.Path = in.Path - return nil -} - -func Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in, out, s) -} - -func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ISCSIVolumeSource))(in) - } - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = int(in.Lun) - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_v1_KeyToPath_To_api_KeyToPath(in *v1.KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.KeyToPath))(in) - } - out.Key = in.Key - out.Path = in.Path - return nil -} - -func Convert_v1_KeyToPath_To_api_KeyToPath(in *v1.KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - return autoConvert_v1_KeyToPath_To_api_KeyToPath(in, out, s) -} - -func autoConvert_v1_Lifecycle_To_api_Lifecycle(in *v1.Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.Lifecycle))(in) - } - // unable to generate simple pointer conversion for v1.Handler -> api.Handler - if in.PostStart != nil { - out.PostStart = new(api.Handler) - if err := Convert_v1_Handler_To_api_Handler(in.PostStart, out.PostStart, s); err != nil { - return err - } - } else { - out.PostStart = nil - } - // unable to generate simple pointer conversion for v1.Handler -> api.Handler - if in.PreStop != nil { - out.PreStop = new(api.Handler) - if err := Convert_v1_Handler_To_api_Handler(in.PreStop, out.PreStop, s); err != nil { - return err - } - } else { - out.PreStop = nil - } - return nil -} - -func Convert_v1_Lifecycle_To_api_Lifecycle(in *v1.Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - return autoConvert_v1_Lifecycle_To_api_Lifecycle(in, out, s) -} - -func autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.LoadBalancerIngress))(in) - } - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -func Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in, out, s) -} - -func autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.LoadBalancerStatus))(in) - } - if in.Ingress != nil { - out.Ingress = make([]api.LoadBalancerIngress, len(in.Ingress)) - for i := range in.Ingress { - if err := Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(&in.Ingress[i], &out.Ingress[i], s); err != nil { - return err - } - } - } else { - out.Ingress = nil - } - return nil -} - -func Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in, out, s) -} - -func autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in *v1.LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.LocalObjectReference))(in) - } - out.Name = in.Name - return nil -} - -func Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *v1.LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - return autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in, out, s) -} - -func autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *v1.NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.NFSVolumeSource))(in) - } - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *v1.NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in, out, s) -} - -func autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ObjectFieldSelector))(in) - } - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in, out, s) -} - -func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *v1.ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ObjectMeta))(in) - } - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.CreationTimestamp, &out.CreationTimestamp, s); err != nil { - return err - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.DeletionTimestamp != nil { - out.DeletionTimestamp = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.DeletionTimestamp, out.DeletionTimestamp, s); err != nil { - return err - } - } else { - out.DeletionTimestamp = nil - } - if in.DeletionGracePeriodSeconds != nil { - out.DeletionGracePeriodSeconds = new(int64) - *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds - } else { - out.DeletionGracePeriodSeconds = nil - } - if in.Labels != nil { - out.Labels = make(map[string]string) - for key, val := range in.Labels { - out.Labels[key] = val - } - } else { - out.Labels = nil - } - if in.Annotations != nil { - out.Annotations = make(map[string]string) - for key, val := range in.Annotations { - out.Annotations[key] = val - } - } else { - out.Annotations = nil - } - return nil -} - -func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *v1.ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.PersistentVolumeClaimVolumeSource))(in) - } - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.PodSpec))(in) - } - if in.Volumes != nil { - out.Volumes = make([]api.Volume, len(in.Volumes)) - for i := range in.Volumes { - if err := Convert_v1_Volume_To_api_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.Containers != nil { - out.Containers = make([]api.Container, len(in.Containers)) - for i := range in.Containers { - if err := Convert_v1_Container_To_api_Container(&in.Containers[i], &out.Containers[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) - if in.TerminationGracePeriodSeconds != nil { - out.TerminationGracePeriodSeconds = new(int64) - *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } - out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) - if in.NodeSelector != nil { - out.NodeSelector = make(map[string]string) - for key, val := range in.NodeSelector { - out.NodeSelector[key] = val - } - } else { - out.NodeSelector = nil - } - out.ServiceAccountName = in.ServiceAccountName - // in.DeprecatedServiceAccount has no peer in out - out.NodeName = in.NodeName - // in.HostNetwork has no peer in out - // in.HostPID has no peer in out - // in.HostIPC has no peer in out - // unable to generate simple pointer conversion for v1.PodSecurityContext -> api.PodSecurityContext - if in.SecurityContext != nil { - if err := s.Convert(&in.SecurityContext, &out.SecurityContext, 0); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]api.LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - -func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.PodTemplateSpec))(in) - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - return autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s) -} - -func autoConvert_v1_Probe_To_api_Probe(in *v1.Probe, out *api.Probe, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.Probe))(in) - } - if err := Convert_v1_Handler_To_api_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = int(in.InitialDelaySeconds) - out.TimeoutSeconds = int(in.TimeoutSeconds) - out.PeriodSeconds = int(in.PeriodSeconds) - out.SuccessThreshold = int(in.SuccessThreshold) - out.FailureThreshold = int(in.FailureThreshold) - return nil -} - -func Convert_v1_Probe_To_api_Probe(in *v1.Probe, out *api.Probe, s conversion.Scope) error { - return autoConvert_v1_Probe_To_api_Probe(in, out, s) -} - -func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *v1.RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.RBDVolumeSource))(in) - } - if in.CephMonitors != nil { - out.CephMonitors = make([]string, len(in.CephMonitors)) - for i := range in.CephMonitors { - out.CephMonitors[i] = in.CephMonitors[i] - } - } else { - out.CephMonitors = nil - } - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - // unable to generate simple pointer conversion for v1.LocalObjectReference -> api.LocalObjectReference - if in.SecretRef != nil { - out.SecretRef = new(api.LocalObjectReference) - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *v1.RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - return autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in, out, s) -} - -func autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in *v1.ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.ResourceRequirements))(in) - } - if in.Limits != nil { - out.Limits = make(api.ResourceList) - for key, val := range in.Limits { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Limits[api.ResourceName(key)] = newVal - } - } else { - out.Limits = nil - } - if in.Requests != nil { - out.Requests = make(api.ResourceList) - for key, val := range in.Requests { - newVal := resource.Quantity{} - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, &newVal, s); err != nil { - return err - } - out.Requests[api.ResourceName(key)] = newVal - } - } else { - out.Requests = nil - } - return nil -} - -func Convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *v1.ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - return autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in, out, s) -} - -func autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in *v1.SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.SELinuxOptions))(in) - } - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -func Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in *v1.SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - return autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in, out, s) -} - -func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *v1.SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.SecretKeySelector))(in) - } - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in *v1.SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - return autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in, out, s) -} - -func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *v1.SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.SecretVolumeSource))(in) - } - out.SecretName = in.SecretName - return nil -} - -func Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *v1.SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - return autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in, out, s) -} - -func autoConvert_v1_SecurityContext_To_api_SecurityContext(in *v1.SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.SecurityContext))(in) - } - // unable to generate simple pointer conversion for v1.Capabilities -> api.Capabilities - if in.Capabilities != nil { - out.Capabilities = new(api.Capabilities) - if err := Convert_v1_Capabilities_To_api_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { - return err - } - } else { - out.Capabilities = nil - } - if in.Privileged != nil { - out.Privileged = new(bool) - *out.Privileged = *in.Privileged - } else { - out.Privileged = nil - } - // unable to generate simple pointer conversion for v1.SELinuxOptions -> api.SELinuxOptions - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(api.SELinuxOptions) - if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot - } else { - out.RunAsNonRoot = nil - } - if in.ReadOnlyRootFilesystem != nil { - out.ReadOnlyRootFilesystem = new(bool) - *out.ReadOnlyRootFilesystem = *in.ReadOnlyRootFilesystem - } else { - out.ReadOnlyRootFilesystem = nil - } - return nil -} - -func Convert_v1_SecurityContext_To_api_SecurityContext(in *v1.SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - return autoConvert_v1_SecurityContext_To_api_SecurityContext(in, out, s) -} - -func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *v1.TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.TCPSocketAction))(in) - } - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { - return err - } - return nil -} - -func Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *v1.TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - return autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in, out, s) -} - -func autoConvert_v1_Volume_To_api_Volume(in *v1.Volume, out *api.Volume, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.Volume))(in) - } - out.Name = in.Name - if err := Convert_v1_VolumeSource_To_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Volume_To_api_Volume(in *v1.Volume, out *api.Volume, s conversion.Scope) error { - return autoConvert_v1_Volume_To_api_Volume(in, out, s) -} - -func autoConvert_v1_VolumeMount_To_api_VolumeMount(in *v1.VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.VolumeMount))(in) - } - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - return nil -} - -func Convert_v1_VolumeMount_To_api_VolumeMount(in *v1.VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - return autoConvert_v1_VolumeMount_To_api_VolumeMount(in, out, s) -} - -func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*v1.VolumeSource))(in) - } - // unable to generate simple pointer conversion for v1.HostPathVolumeSource -> api.HostPathVolumeSource - if in.HostPath != nil { - out.HostPath = new(api.HostPathVolumeSource) - if err := Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in.HostPath, out.HostPath, s); err != nil { - return err - } - } else { - out.HostPath = nil - } - // unable to generate simple pointer conversion for v1.EmptyDirVolumeSource -> api.EmptyDirVolumeSource - if in.EmptyDir != nil { - out.EmptyDir = new(api.EmptyDirVolumeSource) - if err := Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in.EmptyDir, out.EmptyDir, s); err != nil { - return err - } - } else { - out.EmptyDir = nil - } - // unable to generate simple pointer conversion for v1.GCEPersistentDiskVolumeSource -> api.GCEPersistentDiskVolumeSource - if in.GCEPersistentDisk != nil { - out.GCEPersistentDisk = new(api.GCEPersistentDiskVolumeSource) - if err := Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in.GCEPersistentDisk, out.GCEPersistentDisk, s); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - // unable to generate simple pointer conversion for v1.AWSElasticBlockStoreVolumeSource -> api.AWSElasticBlockStoreVolumeSource - if in.AWSElasticBlockStore != nil { - out.AWSElasticBlockStore = new(api.AWSElasticBlockStoreVolumeSource) - if err := Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in.AWSElasticBlockStore, out.AWSElasticBlockStore, s); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - // unable to generate simple pointer conversion for v1.GitRepoVolumeSource -> api.GitRepoVolumeSource - if in.GitRepo != nil { - out.GitRepo = new(api.GitRepoVolumeSource) - if err := Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in.GitRepo, out.GitRepo, s); err != nil { - return err - } - } else { - out.GitRepo = nil - } - // unable to generate simple pointer conversion for v1.SecretVolumeSource -> api.SecretVolumeSource - if in.Secret != nil { - out.Secret = new(api.SecretVolumeSource) - if err := Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in.Secret, out.Secret, s); err != nil { - return err - } - } else { - out.Secret = nil - } - // unable to generate simple pointer conversion for v1.NFSVolumeSource -> api.NFSVolumeSource - if in.NFS != nil { - out.NFS = new(api.NFSVolumeSource) - if err := Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in.NFS, out.NFS, s); err != nil { - return err - } - } else { - out.NFS = nil - } - // unable to generate simple pointer conversion for v1.ISCSIVolumeSource -> api.ISCSIVolumeSource - if in.ISCSI != nil { - out.ISCSI = new(api.ISCSIVolumeSource) - if err := Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in.ISCSI, out.ISCSI, s); err != nil { - return err - } - } else { - out.ISCSI = nil - } - // unable to generate simple pointer conversion for v1.GlusterfsVolumeSource -> api.GlusterfsVolumeSource - if in.Glusterfs != nil { - out.Glusterfs = new(api.GlusterfsVolumeSource) - if err := Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in.Glusterfs, out.Glusterfs, s); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - // unable to generate simple pointer conversion for v1.PersistentVolumeClaimVolumeSource -> api.PersistentVolumeClaimVolumeSource - if in.PersistentVolumeClaim != nil { - out.PersistentVolumeClaim = new(api.PersistentVolumeClaimVolumeSource) - if err := Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in.PersistentVolumeClaim, out.PersistentVolumeClaim, s); err != nil { - return err - } - } else { - out.PersistentVolumeClaim = nil - } - // unable to generate simple pointer conversion for v1.RBDVolumeSource -> api.RBDVolumeSource - if in.RBD != nil { - out.RBD = new(api.RBDVolumeSource) - if err := Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in.RBD, out.RBD, s); err != nil { - return err - } - } else { - out.RBD = nil - } - // unable to generate simple pointer conversion for v1.FlexVolumeSource -> api.FlexVolumeSource - if in.FlexVolume != nil { - out.FlexVolume = new(api.FlexVolumeSource) - if err := Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in.FlexVolume, out.FlexVolume, s); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - // unable to generate simple pointer conversion for v1.CinderVolumeSource -> api.CinderVolumeSource - if in.Cinder != nil { - out.Cinder = new(api.CinderVolumeSource) - if err := Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil { - return err - } - } else { - out.Cinder = nil - } - // unable to generate simple pointer conversion for v1.CephFSVolumeSource -> api.CephFSVolumeSource - if in.CephFS != nil { - out.CephFS = new(api.CephFSVolumeSource) - if err := Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { - return err - } - } else { - out.CephFS = nil - } - // unable to generate simple pointer conversion for v1.FlockerVolumeSource -> api.FlockerVolumeSource - if in.Flocker != nil { - out.Flocker = new(api.FlockerVolumeSource) - if err := Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in.Flocker, out.Flocker, s); err != nil { - return err - } - } else { - out.Flocker = nil - } - // unable to generate simple pointer conversion for v1.DownwardAPIVolumeSource -> api.DownwardAPIVolumeSource - if in.DownwardAPI != nil { - out.DownwardAPI = new(api.DownwardAPIVolumeSource) - if err := Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in.DownwardAPI, out.DownwardAPI, s); err != nil { - return err - } - } else { - out.DownwardAPI = nil - } - // unable to generate simple pointer conversion for v1.FCVolumeSource -> api.FCVolumeSource - if in.FC != nil { - out.FC = new(api.FCVolumeSource) - if err := Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in.FC, out.FC, s); err != nil { - return err - } - } else { - out.FC = nil - } - // unable to generate simple pointer conversion for v1.AzureFileVolumeSource -> api.AzureFileVolumeSource - if in.AzureFile != nil { - out.AzureFile = new(api.AzureFileVolumeSource) - if err := Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in.AzureFile, out.AzureFile, s); err != nil { - return err - } - } else { - out.AzureFile = nil - } - // unable to generate simple pointer conversion for v1.ConfigMapVolumeSource -> api.ConfigMapVolumeSource - if in.ConfigMap != nil { - out.ConfigMap = new(api.ConfigMapVolumeSource) - if err := Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in.ConfigMap, out.ConfigMap, s); err != nil { - return err - } - } else { - out.ConfigMap = nil - } - return nil -} - -func Convert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - return autoConvert_v1_VolumeSource_To_api_VolumeSource(in, out, s) -} - -func autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.APIVersion))(in) - } - out.Name = in.Name - out.APIGroup = in.APIGroup - return nil -} - -func Convert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { - return autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in, out, s) -} - -func autoConvert_extensions_CPUTargetUtilization_To_v1beta1_CPUTargetUtilization(in *extensions.CPUTargetUtilization, out *CPUTargetUtilization, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.CPUTargetUtilization))(in) - } - out.TargetPercentage = int32(in.TargetPercentage) - return nil -} - -func Convert_extensions_CPUTargetUtilization_To_v1beta1_CPUTargetUtilization(in *extensions.CPUTargetUtilization, out *CPUTargetUtilization, s conversion.Scope) error { - return autoConvert_extensions_CPUTargetUtilization_To_v1beta1_CPUTargetUtilization(in, out, s) -} - -func autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.DaemonSet))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { - return autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in, out, s) -} - -func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.DaemonSetList))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]DaemonSet, len(in.Items)) - for i := range in.Items { - if err := Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in, out, s) -} - -func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.DaemonSetSpec))(in) - } - // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1beta1.LabelSelector - if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in, out, s) -} - -func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.DaemonSetStatus))(in) - } - out.CurrentNumberScheduled = int32(in.CurrentNumberScheduled) - out.NumberMisscheduled = int32(in.NumberMisscheduled) - out.DesiredNumberScheduled = int32(in.DesiredNumberScheduled) - return nil -} - -func Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s) -} - -func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.Deployment))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { - return autoConvert_extensions_Deployment_To_v1beta1_Deployment(in, out, s) -} - -func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.DeploymentList))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]Deployment, len(in.Items)) - for i := range in.Items { - if err := Convert_extensions_Deployment_To_v1beta1_Deployment(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { - return autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in, out, s) -} - -func autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.DeploymentRollback))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Name = in.Name - if in.UpdatedAnnotations != nil { - out.UpdatedAnnotations = make(map[string]string) - for key, val := range in.UpdatedAnnotations { - out.UpdatedAnnotations[key] = val - } - } else { - out.UpdatedAnnotations = nil - } - if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { - return autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) -} - -func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.DeploymentSpec))(in) - } - if err := s.Convert(&in.Replicas, &out.Replicas, 0); err != nil { - return err - } - // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1beta1.LabelSelector - if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { - return err - } - out.MinReadySeconds = int32(in.MinReadySeconds) - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int32) - *out.RevisionHistoryLimit = int32(*in.RevisionHistoryLimit) - } else { - out.RevisionHistoryLimit = nil - } - out.Paused = in.Paused - // unable to generate simple pointer conversion for extensions.RollbackConfig -> v1beta1.RollbackConfig - if in.RollbackTo != nil { - out.RollbackTo = new(RollbackConfig) - if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in.RollbackTo, out.RollbackTo, s); err != nil { - return err - } - } else { - out.RollbackTo = nil - } - return nil -} - -func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.DeploymentStatus))(in) - } - out.ObservedGeneration = in.ObservedGeneration - out.Replicas = int32(in.Replicas) - out.UpdatedReplicas = int32(in.UpdatedReplicas) - out.AvailableReplicas = int32(in.AvailableReplicas) - out.UnavailableReplicas = int32(in.UnavailableReplicas) - return nil -} - -func Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { - return autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) -} - -func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.DeploymentStrategy))(in) - } - out.Type = DeploymentStrategyType(in.Type) - // unable to generate simple pointer conversion for extensions.RollingUpdateDeployment -> v1beta1.RollingUpdateDeployment - if in.RollingUpdate != nil { - out.RollingUpdate = new(RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.HTTPIngressPath))(in) - } - out.Path = in.Path - if err := Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(&in.Backend, &out.Backend, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { - return autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in, out, s) -} - -func autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.HTTPIngressRuleValue))(in) - } - if in.Paths != nil { - out.Paths = make([]HTTPIngressPath, len(in.Paths)) - for i := range in.Paths { - if err := Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(&in.Paths[i], &out.Paths[i], s); err != nil { - return err - } - } - } else { - out.Paths = nil - } - return nil -} - -func Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { - return autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in, out, s) -} - -func autoConvert_extensions_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in *extensions.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.HorizontalPodAutoscaler))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_extensions_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in *extensions.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_extensions_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in, out, s) -} - -func autoConvert_extensions_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in *extensions.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.HorizontalPodAutoscalerList))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]HorizontalPodAutoscaler, len(in.Items)) - for i := range in.Items { - if err := Convert_extensions_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in *extensions.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_extensions_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in, out, s) -} - -func autoConvert_extensions_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(in *extensions.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.HorizontalPodAutoscalerSpec))(in) - } - if err := Convert_extensions_SubresourceReference_To_v1beta1_SubresourceReference(&in.ScaleRef, &out.ScaleRef, s); err != nil { - return err - } - if in.MinReplicas != nil { - out.MinReplicas = new(int32) - *out.MinReplicas = int32(*in.MinReplicas) - } else { - out.MinReplicas = nil - } - out.MaxReplicas = int32(in.MaxReplicas) - // unable to generate simple pointer conversion for extensions.CPUTargetUtilization -> v1beta1.CPUTargetUtilization - if in.CPUUtilization != nil { - out.CPUUtilization = new(CPUTargetUtilization) - if err := Convert_extensions_CPUTargetUtilization_To_v1beta1_CPUTargetUtilization(in.CPUUtilization, out.CPUUtilization, s); err != nil { - return err - } - } else { - out.CPUUtilization = nil - } - return nil -} - -func Convert_extensions_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(in *extensions.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - return autoConvert_extensions_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(in, out, s) -} - -func autoConvert_extensions_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in *extensions.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.HorizontalPodAutoscalerStatus))(in) - } - if in.ObservedGeneration != nil { - out.ObservedGeneration = new(int64) - *out.ObservedGeneration = *in.ObservedGeneration - } else { - out.ObservedGeneration = nil - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.LastScaleTime != nil { - out.LastScaleTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.LastScaleTime, out.LastScaleTime, s); err != nil { - return err - } - } else { - out.LastScaleTime = nil - } - out.CurrentReplicas = int32(in.CurrentReplicas) - out.DesiredReplicas = int32(in.DesiredReplicas) - if in.CurrentCPUUtilizationPercentage != nil { - out.CurrentCPUUtilizationPercentage = new(int32) - *out.CurrentCPUUtilizationPercentage = int32(*in.CurrentCPUUtilizationPercentage) - } else { - out.CurrentCPUUtilizationPercentage = nil - } - return nil -} - -func Convert_extensions_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in *extensions.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_extensions_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in, out, s) -} - -func autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.HostPortRange))(in) - } - out.Min = int32(in.Min) - out.Max = int32(in.Max) - return nil -} - -func Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { - return autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in, out, s) -} - -func autoConvert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.IDRange))(in) - } - out.Min = in.Min - out.Max = in.Max - return nil -} - -func Convert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error { - return autoConvert_extensions_IDRange_To_v1beta1_IDRange(in, out, s) -} - -func autoConvert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.Ingress))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { - return autoConvert_extensions_Ingress_To_v1beta1_Ingress(in, out, s) -} - -func autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.IngressBackend))(in) - } - out.ServiceName = in.ServiceName - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.ServicePort, &out.ServicePort, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { - return autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in, out, s) -} - -func autoConvert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.IngressList))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]Ingress, len(in.Items)) - for i := range in.Items { - if err := Convert_extensions_Ingress_To_v1beta1_Ingress(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { - return autoConvert_extensions_IngressList_To_v1beta1_IngressList(in, out, s) -} - -func autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.IngressRule))(in) - } - out.Host = in.Host - if err := Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { - return autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in, out, s) -} - -func autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.IngressRuleValue))(in) - } - // unable to generate simple pointer conversion for extensions.HTTPIngressRuleValue -> v1beta1.HTTPIngressRuleValue - if in.HTTP != nil { - out.HTTP = new(HTTPIngressRuleValue) - if err := Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in.HTTP, out.HTTP, s); err != nil { - return err - } - } else { - out.HTTP = nil - } - return nil -} - -func Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { - return autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in, out, s) -} - -func autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.IngressSpec))(in) - } - // unable to generate simple pointer conversion for extensions.IngressBackend -> v1beta1.IngressBackend - if in.Backend != nil { - out.Backend = new(IngressBackend) - if err := Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(in.Backend, out.Backend, s); err != nil { - return err - } - } else { - out.Backend = nil - } - if in.TLS != nil { - out.TLS = make([]IngressTLS, len(in.TLS)) - for i := range in.TLS { - if err := Convert_extensions_IngressTLS_To_v1beta1_IngressTLS(&in.TLS[i], &out.TLS[i], s); err != nil { - return err - } - } - } else { - out.TLS = nil - } - if in.Rules != nil { - out.Rules = make([]IngressRule, len(in.Rules)) - for i := range in.Rules { - if err := Convert_extensions_IngressRule_To_v1beta1_IngressRule(&in.Rules[i], &out.Rules[i], s); err != nil { - return err - } - } - } else { - out.Rules = nil - } - return nil -} - -func Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { - return autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in, out, s) -} - -func autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.IngressStatus))(in) - } - if err := Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { - return autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in, out, s) -} - -func autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.IngressTLS))(in) - } - if in.Hosts != nil { - out.Hosts = make([]string, len(in.Hosts)) - for i := range in.Hosts { - out.Hosts[i] = in.Hosts[i] - } - } else { - out.Hosts = nil - } - out.SecretName = in.SecretName - return nil -} - -func Convert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { - return autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in, out, s) -} - -func autoConvert_extensions_Job_To_v1beta1_Job(in *extensions.Job, out *Job, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.Job))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_extensions_JobSpec_To_v1beta1_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_JobStatus_To_v1beta1_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_Job_To_v1beta1_Job(in *extensions.Job, out *Job, s conversion.Scope) error { - return autoConvert_extensions_Job_To_v1beta1_Job(in, out, s) -} - -func autoConvert_extensions_JobCondition_To_v1beta1_JobCondition(in *extensions.JobCondition, out *JobCondition, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.JobCondition))(in) - } - out.Type = JobConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_extensions_JobCondition_To_v1beta1_JobCondition(in *extensions.JobCondition, out *JobCondition, s conversion.Scope) error { - return autoConvert_extensions_JobCondition_To_v1beta1_JobCondition(in, out, s) -} - -func autoConvert_extensions_JobList_To_v1beta1_JobList(in *extensions.JobList, out *JobList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.JobList))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]Job, len(in.Items)) - for i := range in.Items { - if err := Convert_extensions_Job_To_v1beta1_Job(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_JobList_To_v1beta1_JobList(in *extensions.JobList, out *JobList, s conversion.Scope) error { - return autoConvert_extensions_JobList_To_v1beta1_JobList(in, out, s) -} - -func autoConvert_extensions_JobSpec_To_v1beta1_JobSpec(in *extensions.JobSpec, out *JobSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.JobSpec))(in) - } - if in.Parallelism != nil { - out.Parallelism = new(int32) - *out.Parallelism = int32(*in.Parallelism) - } else { - out.Parallelism = nil - } - if in.Completions != nil { - out.Completions = new(int32) - *out.Completions = int32(*in.Completions) - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } - // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1beta1.LabelSelector - if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - // in.ManualSelector has no peer in out - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func autoConvert_extensions_JobStatus_To_v1beta1_JobStatus(in *extensions.JobStatus, out *JobStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.JobStatus))(in) - } - if in.Conditions != nil { - out.Conditions = make([]JobCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_extensions_JobCondition_To_v1beta1_JobCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.StartTime != nil { - out.StartTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.StartTime, out.StartTime, s); err != nil { - return err - } - } else { - out.StartTime = nil - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.CompletionTime != nil { - out.CompletionTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.CompletionTime, out.CompletionTime, s); err != nil { - return err - } - } else { - out.CompletionTime = nil - } - out.Active = int32(in.Active) - out.Succeeded = int32(in.Succeeded) - out.Failed = int32(in.Failed) - return nil -} - -func Convert_extensions_JobStatus_To_v1beta1_JobStatus(in *extensions.JobStatus, out *JobStatus, s conversion.Scope) error { - return autoConvert_extensions_JobStatus_To_v1beta1_JobStatus(in, out, s) -} - -func autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.PodSecurityPolicy))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { - return autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in, out, s) -} - -func autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.PodSecurityPolicyList))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]PodSecurityPolicy, len(in.Items)) - for i := range in.Items { - if err := Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { - return autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in, out, s) -} - -func autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.PodSecurityPolicySpec))(in) - } - out.Privileged = in.Privileged - if in.Capabilities != nil { - out.Capabilities = make([]v1.Capability, len(in.Capabilities)) - for i := range in.Capabilities { - out.Capabilities[i] = v1.Capability(in.Capabilities[i]) - } - } else { - out.Capabilities = nil - } - if in.Volumes != nil { - out.Volumes = make([]FSType, len(in.Volumes)) - for i := range in.Volumes { - out.Volumes[i] = FSType(in.Volumes[i]) - } - } else { - out.Volumes = nil - } - out.HostNetwork = in.HostNetwork - if in.HostPorts != nil { - out.HostPorts = make([]HostPortRange, len(in.HostPorts)) - for i := range in.HostPorts { - if err := Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(&in.HostPorts[i], &out.HostPorts[i], s); err != nil { - return err - } - } - } else { - out.HostPorts = nil - } - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC - if err := Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { - return err - } - if err := Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { - return autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in, out, s) -} - -func autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ReplicaSet))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in, out, s) -} - -func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ReplicaSetList))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + out.UpdatedAnnotations = in.UpdatedAnnotations + if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { return err } - if in.Items != nil { - out.Items = make([]ReplicaSet, len(in.Items)) - for i := range in.Items { - if err := Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } return nil } -func Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in, out, s) +func Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s) } -func autoConvert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ReplicaSetSpec))(in) - } - if err := s.Convert(&in.Replicas, &out.Replicas, 0); err != nil { +func autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1beta1.LabelSelector - if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Name = in.Name + out.UpdatedAnnotations = in.UpdatedAnnotations + if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { return err } return nil } -func autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ReplicaSetStatus))(in) - } - out.Replicas = int32(in.Replicas) - out.FullyLabeledReplicas = int32(in.FullyLabeledReplicas) +func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { + return autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) +} + +func autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { out.ObservedGeneration = in.ObservedGeneration + out.Replicas = in.Replicas + out.UpdatedReplicas = in.UpdatedReplicas + out.AvailableReplicas = in.AvailableReplicas + out.UnavailableReplicas = in.UnavailableReplicas return nil } -func Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in, out, s) +func Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) } -func autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ReplicationControllerDummy))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } +func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Replicas = in.Replicas + out.UpdatedReplicas = in.UpdatedReplicas + out.AvailableReplicas = in.AvailableReplicas + out.UnavailableReplicas = in.UnavailableReplicas return nil } -func Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { - return autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in, out, s) +func Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { + return autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) } -func autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.RollbackConfig))(in) +func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { + out.Type = extensions.DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(extensions.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil } - out.Revision = in.Revision return nil } -func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { - return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) -} - -func autoConvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.RollingUpdateDeployment))(in) - } - if err := s.Convert(&in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { - return err - } - if err := s.Convert(&in.MaxSurge, &out.MaxSurge, 0); err != nil { - return err +func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { + out.Type = DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil } return nil } -func autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.RunAsUserStrategyOptions))(in) - } - out.Rule = RunAsUserStrategy(in.Rule) +func autoConvert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error { + out.Rule = extensions.FSGroupStrategyType(in.Rule) if in.Ranges != nil { - out.Ranges = make([]IDRange, len(in.Ranges)) - for i := range in.Ranges { - if err := Convert_extensions_IDRange_To_v1beta1_IDRange(&in.Ranges[i], &out.Ranges[i], s); err != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]extensions.IDRange, len(*in)) + for i := range *in { + if err := Convert_v1beta1_IDRange_To_extensions_IDRange(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -3676,142 +660,136 @@ func autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrateg return nil } -func Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { - return autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in, out, s) +func Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in, out, s) } -func autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.SELinuxStrategyOptions))(in) - } - out.Rule = SELinuxStrategy(in.Rule) - // unable to generate simple pointer conversion for api.SELinuxOptions -> v1.SELinuxOptions - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(v1.SELinuxOptions) - if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { - return err +func autoConvert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error { + out.Rule = FSGroupStrategyType(in.Rule) + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + for i := range *in { + if err := Convert_extensions_IDRange_To_v1beta1_IDRange(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.SELinuxOptions = nil + out.Ranges = nil } return nil } -func Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { - return autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in, out, s) +func Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in, out, s) } -func autoConvert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.Scale))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { +func autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { + out.Path = in.Path + if err := Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(&in.Backend, &out.Backend, s); err != nil { return err } return nil } -func Convert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { - return autoConvert_extensions_Scale_To_v1beta1_Scale(in, out, s) +func Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { + return autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in, out, s) } -func autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ScaleSpec))(in) +func autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { + out.Path = in.Path + if err := Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(&in.Backend, &out.Backend, s); err != nil { + return err } - out.Replicas = int32(in.Replicas) return nil } -func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { - return autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) +func Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { + return autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in, out, s) } -func autoConvert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ScaleStatus))(in) +func autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]extensions.HTTPIngressPath, len(*in)) + for i := range *in { + if err := Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Paths = nil } - out.Replicas = int32(in.Replicas) - // in.Selector has no peer in out return nil } -func autoConvert_extensions_SubresourceReference_To_v1beta1_SubresourceReference(in *extensions.SubresourceReference, out *SubresourceReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.SubresourceReference))(in) +func Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { + return autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in, out, s) +} + +func autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]HTTPIngressPath, len(*in)) + for i := range *in { + if err := Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Paths = nil } - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - out.Subresource = in.Subresource return nil } -func Convert_extensions_SubresourceReference_To_v1beta1_SubresourceReference(in *extensions.SubresourceReference, out *SubresourceReference, s conversion.Scope) error { - return autoConvert_extensions_SubresourceReference_To_v1beta1_SubresourceReference(in, out, s) +func Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { + return autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in, out, s) } -func autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ThirdPartyResource))(in) - } +func autoConvert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + SetDefaults_HorizontalPodAutoscaler(in) if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - out.Description = in.Description - if in.Versions != nil { - out.Versions = make([]APIVersion, len(in.Versions)) - for i := range in.Versions { - if err := Convert_extensions_APIVersion_To_v1beta1_APIVersion(&in.Versions[i], &out.Versions[i], s); err != nil { - return err - } - } - } else { - out.Versions = nil + if err := Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err } return nil } -func Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in, out, s) +func Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s) } -func autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ThirdPartyResourceData))(in) - } +func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - if err := conversion.ByteSliceCopy(&in.Data, &out.Data, s); err != nil { + if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in, out, s) +func Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in, out, s) } -func autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ThirdPartyResourceDataList))(in) - } +func autoConvert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } @@ -3819,9 +797,10 @@ func autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyReso return err } if in.Items != nil { - out.Items = make([]ThirdPartyResourceData, len(in.Items)) - for i := range in.Items { - if err := Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]autoscaling.HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -3831,14 +810,11 @@ func autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyReso return nil } -func Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in, out, s) +func Convert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) } -func autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ThirdPartyResourceList))(in) - } +func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } @@ -3846,9 +822,10 @@ func autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResource return err } if in.Items != nil { - out.Items = make([]ThirdPartyResource, len(in.Items)) - for i := range in.Items { - if err := Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -3858,149 +835,143 @@ func autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResource return nil } -func Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in, out, s) +func Convert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in, out, s) } -func autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*APIVersion))(in) - } - out.Name = in.Name - out.APIGroup = in.APIGroup +func autoConvert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.LastScaleTime = in.LastScaleTime + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage return nil } -func Convert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { - return autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in, out, s) +func Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s) } -func autoConvert_v1beta1_CPUTargetUtilization_To_extensions_CPUTargetUtilization(in *CPUTargetUtilization, out *extensions.CPUTargetUtilization, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*CPUTargetUtilization))(in) - } - out.TargetPercentage = int(in.TargetPercentage) +func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.LastScaleTime = in.LastScaleTime + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage return nil } -func Convert_v1beta1_CPUTargetUtilization_To_extensions_CPUTargetUtilization(in *CPUTargetUtilization, out *extensions.CPUTargetUtilization, s conversion.Scope) error { - return autoConvert_v1beta1_CPUTargetUtilization_To_extensions_CPUTargetUtilization(in, out, s) +func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in, out, s) } -func autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DaemonSet))(in) - } +func autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { + out.Min = int(in.Min) + out.Max = int(in.Max) + return nil +} + +func Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { + return autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in, out, s) +} + +func autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { + out.Min = int32(in.Min) + out.Max = int32(in.Max) + return nil +} + +func Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { + return autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in, out, s) +} + +func autoConvert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error { + out.Min = in.Min + out.Max = in.Max + return nil +} + +func Convert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error { + return autoConvert_v1beta1_IDRange_To_extensions_IDRange(in, out, s) +} + +func autoConvert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error { + out.Min = in.Min + out.Max = in.Max + return nil +} + +func Convert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error { + return autoConvert_extensions_IDRange_To_v1beta1_IDRange(in, out, s) +} + +func autoConvert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - if err := Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in, out, s) +func Convert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { + return autoConvert_v1beta1_Ingress_To_extensions_Ingress(in, out, s) } -func autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DaemonSetList))(in) - } +func autoConvert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - if in.Items != nil { - out.Items = make([]extensions.DaemonSet, len(in.Items)) - for i := range in.Items { - if err := Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(&in.Items[i], &out.Items[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in, out, s) -} - -func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DaemonSetSpec))(in) - } - // unable to generate simple pointer conversion for v1beta1.LabelSelector -> unversioned.LabelSelector - if in.Selector != nil { - out.Selector = new(unversioned.LabelSelector) - if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil + if err := Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(&in.Spec, &out.Spec, s); err != nil { + return err } - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in, out, s) +func Convert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { + return autoConvert_extensions_Ingress_To_v1beta1_Ingress(in, out, s) } -func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DaemonSetStatus))(in) +func autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { + out.ServiceName = in.ServiceName + if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.ServicePort, &out.ServicePort, s); err != nil { + return err } - out.CurrentNumberScheduled = int(in.CurrentNumberScheduled) - out.NumberMisscheduled = int(in.NumberMisscheduled) - out.DesiredNumberScheduled = int(in.DesiredNumberScheduled) return nil } -func Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s) +func Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { + return autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in, out, s) } -func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Deployment))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { +func autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { + out.ServiceName = in.ServiceName + if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.ServicePort, &out.ServicePort, s); err != nil { return err } return nil } -func Convert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { - return autoConvert_v1beta1_Deployment_To_extensions_Deployment(in, out, s) +func Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { + return autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in, out, s) } -func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DeploymentList))(in) - } +func autoConvert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } @@ -4008,9 +979,10 @@ func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *Deploym return err } if in.Items != nil { - out.Items = make([]extensions.Deployment, len(in.Items)) - for i := range in.Items { - if err := Convert_v1beta1_Deployment_To_extensions_Deployment(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.Ingress, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Ingress_To_extensions_Ingress(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -4020,310 +992,295 @@ func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *Deploym return nil } -func Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in, out, s) +func Convert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { + return autoConvert_v1beta1_IngressList_To_extensions_IngressList(in, out, s) } -func autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DeploymentRollback))(in) - } +func autoConvert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - out.Name = in.Name - if in.UpdatedAnnotations != nil { - out.UpdatedAnnotations = make(map[string]string) - for key, val := range in.UpdatedAnnotations { - out.UpdatedAnnotations[key] = val + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + if err := Convert_extensions_Ingress_To_v1beta1_Ingress(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.UpdatedAnnotations = nil - } - if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { - return err + out.Items = nil } return nil } -func Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s) +func Convert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { + return autoConvert_extensions_IngressList_To_v1beta1_IngressList(in, out, s) } -func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DeploymentSpec))(in) - } - // in.Replicas has no peer in out - // unable to generate simple pointer conversion for v1beta1.LabelSelector -> unversioned.LabelSelector - if in.Selector != nil { - out.Selector = new(unversioned.LabelSelector) - if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := s.Convert(&in.Strategy, &out.Strategy, 0); err != nil { +func autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { + out.Host = in.Host + if err := Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { return err } - out.MinReadySeconds = int(in.MinReadySeconds) - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int) - *out.RevisionHistoryLimit = int(*in.RevisionHistoryLimit) - } else { - out.RevisionHistoryLimit = nil - } - out.Paused = in.Paused - // unable to generate simple pointer conversion for v1beta1.RollbackConfig -> extensions.RollbackConfig - if in.RollbackTo != nil { - out.RollbackTo = new(extensions.RollbackConfig) - if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in.RollbackTo, out.RollbackTo, s); err != nil { - return err - } - } else { - out.RollbackTo = nil - } - return nil -} - -func autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DeploymentStatus))(in) - } - out.ObservedGeneration = in.ObservedGeneration - out.Replicas = int(in.Replicas) - out.UpdatedReplicas = int(in.UpdatedReplicas) - out.AvailableReplicas = int(in.AvailableReplicas) - out.UnavailableReplicas = int(in.UnavailableReplicas) return nil } -func Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) +func Convert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { + return autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in, out, s) } -func autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HTTPIngressPath))(in) - } - out.Path = in.Path - if err := Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(&in.Backend, &out.Backend, s); err != nil { +func autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { + out.Host = in.Host + if err := Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { return err } return nil } -func Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { - return autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in, out, s) +func Convert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { + return autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in, out, s) } -func autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HTTPIngressRuleValue))(in) - } - if in.Paths != nil { - out.Paths = make([]extensions.HTTPIngressPath, len(in.Paths)) - for i := range in.Paths { - if err := Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(&in.Paths[i], &out.Paths[i], s); err != nil { - return err - } +func autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(extensions.HTTPIngressRuleValue) + if err := Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(*in, *out, s); err != nil { + return err } } else { - out.Paths = nil + out.HTTP = nil } return nil } -func Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { - return autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in, out, s) +func Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { + return autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in, out, s) } -func autoConvert_v1beta1_HorizontalPodAutoscaler_To_extensions_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *extensions.HorizontalPodAutoscaler, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HorizontalPodAutoscaler))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1beta1_HorizontalPodAutoscalerSpec_To_extensions_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_HorizontalPodAutoscalerStatus_To_extensions_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { - return err +func autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPIngressRuleValue) + if err := Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(*in, *out, s); err != nil { + return err + } + } else { + out.HTTP = nil } return nil } -func Convert_v1beta1_HorizontalPodAutoscaler_To_extensions_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *extensions.HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_v1beta1_HorizontalPodAutoscaler_To_extensions_HorizontalPodAutoscaler(in, out, s) +func Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { + return autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in, out, s) } -func autoConvert_v1beta1_HorizontalPodAutoscalerList_To_extensions_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *extensions.HorizontalPodAutoscalerList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HorizontalPodAutoscalerList))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err +func autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(extensions.IngressBackend) + if err := Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(*in, *out, s); err != nil { + return err + } + } else { + out.Backend = nil } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]extensions.IngressTLS, len(*in)) + for i := range *in { + if err := Convert_v1beta1_IngressTLS_To_extensions_IngressTLS(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.TLS = nil } - if in.Items != nil { - out.Items = make([]extensions.HorizontalPodAutoscaler, len(in.Items)) - for i := range in.Items { - if err := Convert_v1beta1_HorizontalPodAutoscaler_To_extensions_HorizontalPodAutoscaler(&in.Items[i], &out.Items[i], s); err != nil { + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]extensions.IngressRule, len(*in)) + for i := range *in { + if err := Convert_v1beta1_IngressRule_To_extensions_IngressRule(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { - out.Items = nil + out.Rules = nil } return nil } -func Convert_v1beta1_HorizontalPodAutoscalerList_To_extensions_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *extensions.HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_v1beta1_HorizontalPodAutoscalerList_To_extensions_HorizontalPodAutoscalerList(in, out, s) +func Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { + return autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in, out, s) } -func autoConvert_v1beta1_HorizontalPodAutoscalerSpec_To_extensions_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *extensions.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HorizontalPodAutoscalerSpec))(in) - } - if err := Convert_v1beta1_SubresourceReference_To_extensions_SubresourceReference(&in.ScaleRef, &out.ScaleRef, s); err != nil { - return err +func autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(IngressBackend) + if err := Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(*in, *out, s); err != nil { + return err + } + } else { + out.Backend = nil } - if in.MinReplicas != nil { - out.MinReplicas = new(int) - *out.MinReplicas = int(*in.MinReplicas) + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]IngressTLS, len(*in)) + for i := range *in { + if err := Convert_extensions_IngressTLS_To_v1beta1_IngressTLS(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } } else { - out.MinReplicas = nil + out.TLS = nil } - out.MaxReplicas = int(in.MaxReplicas) - // unable to generate simple pointer conversion for v1beta1.CPUTargetUtilization -> extensions.CPUTargetUtilization - if in.CPUUtilization != nil { - out.CPUUtilization = new(extensions.CPUTargetUtilization) - if err := Convert_v1beta1_CPUTargetUtilization_To_extensions_CPUTargetUtilization(in.CPUUtilization, out.CPUUtilization, s); err != nil { - return err + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]IngressRule, len(*in)) + for i := range *in { + if err := Convert_extensions_IngressRule_To_v1beta1_IngressRule(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.CPUUtilization = nil + out.Rules = nil } return nil } -func Convert_v1beta1_HorizontalPodAutoscalerSpec_To_extensions_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *extensions.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - return autoConvert_v1beta1_HorizontalPodAutoscalerSpec_To_extensions_HorizontalPodAutoscalerSpec(in, out, s) +func Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { + return autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in, out, s) } -func autoConvert_v1beta1_HorizontalPodAutoscalerStatus_To_extensions_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *extensions.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HorizontalPodAutoscalerStatus))(in) - } - if in.ObservedGeneration != nil { - out.ObservedGeneration = new(int64) - *out.ObservedGeneration = *in.ObservedGeneration - } else { - out.ObservedGeneration = nil - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.LastScaleTime != nil { - out.LastScaleTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.LastScaleTime, out.LastScaleTime, s); err != nil { - return err - } - } else { - out.LastScaleTime = nil - } - out.CurrentReplicas = int(in.CurrentReplicas) - out.DesiredReplicas = int(in.DesiredReplicas) - if in.CurrentCPUUtilizationPercentage != nil { - out.CurrentCPUUtilizationPercentage = new(int) - *out.CurrentCPUUtilizationPercentage = int(*in.CurrentCPUUtilizationPercentage) - } else { - out.CurrentCPUUtilizationPercentage = nil +func autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil { + return err } return nil } -func Convert_v1beta1_HorizontalPodAutoscalerStatus_To_extensions_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *extensions.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_v1beta1_HorizontalPodAutoscalerStatus_To_extensions_HorizontalPodAutoscalerStatus(in, out, s) +func Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { + return autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in, out, s) } -func autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*HostPortRange))(in) +func autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil { + return err } - out.Min = int(in.Min) - out.Max = int(in.Max) return nil } -func Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { - return autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in, out, s) +func Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { + return autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in, out, s) } -func autoConvert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*IDRange))(in) - } - out.Min = in.Min - out.Max = in.Max +func autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { + out.Hosts = in.Hosts + out.SecretName = in.SecretName return nil } -func Convert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error { - return autoConvert_v1beta1_IDRange_To_extensions_IDRange(in, out, s) +func Convert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { + return autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in, out, s) } -func autoConvert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Ingress))(in) +func autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { + out.Hosts = in.Hosts + out.SecretName = in.SecretName + return nil +} + +func Convert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { + return autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in, out, s) +} + +func autoConvert_v1beta1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + SetDefaults_Job(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v1beta1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil { + return err } + return nil +} + +func Convert_v1beta1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + return autoConvert_v1beta1_Job_To_batch_Job(in, out, s) +} + +func autoConvert_batch_Job_To_v1beta1_Job(in *batch.Job, out *Job, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - if err := Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_batch_JobSpec_To_v1beta1_JobSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_batch_JobStatus_To_v1beta1_JobStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func Convert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { - return autoConvert_v1beta1_Ingress_To_extensions_Ingress(in, out, s) +func Convert_batch_Job_To_v1beta1_Job(in *batch.Job, out *Job, s conversion.Scope) error { + return autoConvert_batch_Job_To_v1beta1_Job(in, out, s) } -func autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*IngressBackend))(in) +func autoConvert_v1beta1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + out.Type = batch.JobConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err } - out.ServiceName = in.ServiceName - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.ServicePort, &out.ServicePort, s); err != nil { + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { return err } + out.Reason = in.Reason + out.Message = in.Message return nil } -func Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { - return autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in, out, s) +func Convert_v1beta1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + return autoConvert_v1beta1_JobCondition_To_batch_JobCondition(in, out, s) } -func autoConvert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*IngressList))(in) +func autoConvert_batch_JobCondition_To_v1beta1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + out.Type = JobConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_batch_JobCondition_To_v1beta1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + return autoConvert_batch_JobCondition_To_v1beta1_JobCondition(in, out, s) +} + +func autoConvert_v1beta1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } @@ -4331,9 +1288,10 @@ func autoConvert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, return err } if in.Items != nil { - out.Items = make([]extensions.Ingress, len(in.Items)) - for i := range in.Items { - if err := Convert_v1beta1_Ingress_To_extensions_Ingress(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]batch.Job, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -4343,167 +1301,243 @@ func autoConvert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, return nil } -func Convert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { - return autoConvert_v1beta1_IngressList_To_extensions_IngressList(in, out, s) +func Convert_v1beta1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { + return autoConvert_v1beta1_JobList_To_batch_JobList(in, out, s) } -func autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*IngressRule))(in) +func autoConvert_batch_JobList_To_v1beta1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - out.Host = in.Host - if err := Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { return err } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + if err := Convert_batch_Job_To_v1beta1_Job(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } -func Convert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { - return autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in, out, s) +func Convert_batch_JobList_To_v1beta1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + return autoConvert_batch_JobList_To_v1beta1_JobList(in, out, s) } -func autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*IngressRuleValue))(in) - } - // unable to generate simple pointer conversion for v1beta1.HTTPIngressRuleValue -> extensions.HTTPIngressRuleValue - if in.HTTP != nil { - out.HTTP = new(extensions.HTTPIngressRuleValue) - if err := Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in.HTTP, out.HTTP, s); err != nil { - return err +func autoConvert_v1beta1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]batch.JobCondition, len(*in)) + for i := range *in { + if err := Convert_v1beta1_JobCondition_To_batch_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.HTTP = nil + out.Conditions = nil } + out.StartTime = in.StartTime + out.CompletionTime = in.CompletionTime + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed return nil } -func Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { - return autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in, out, s) +func Convert_v1beta1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + return autoConvert_v1beta1_JobStatus_To_batch_JobStatus(in, out, s) } -func autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*IngressSpec))(in) - } - // unable to generate simple pointer conversion for v1beta1.IngressBackend -> extensions.IngressBackend - if in.Backend != nil { - out.Backend = new(extensions.IngressBackend) - if err := Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(in.Backend, out.Backend, s); err != nil { - return err +func autoConvert_batch_JobStatus_To_v1beta1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]JobCondition, len(*in)) + for i := range *in { + if err := Convert_batch_JobCondition_To_v1beta1_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.Backend = nil + out.Conditions = nil } - if in.TLS != nil { - out.TLS = make([]extensions.IngressTLS, len(in.TLS)) - for i := range in.TLS { - if err := Convert_v1beta1_IngressTLS_To_extensions_IngressTLS(&in.TLS[i], &out.TLS[i], s); err != nil { + out.StartTime = in.StartTime + out.CompletionTime = in.CompletionTime + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func Convert_batch_JobStatus_To_v1beta1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + return autoConvert_batch_JobStatus_To_v1beta1_JobStatus(in, out, s) +} + +func autoConvert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { + out.MatchLabels = in.MatchLabels + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]unversioned.LabelSelectorRequirement, len(*in)) + for i := range *in { + if err := Convert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { - out.TLS = nil + out.MatchExpressions = nil } - if in.Rules != nil { - out.Rules = make([]extensions.IngressRule, len(in.Rules)) - for i := range in.Rules { - if err := Convert_v1beta1_IngressRule_To_extensions_IngressRule(&in.Rules[i], &out.Rules[i], s); err != nil { + return nil +} + +func Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { + return autoConvert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in, out, s) +} + +func autoConvert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { + out.MatchLabels = in.MatchLabels + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(*in)) + for i := range *in { + if err := Convert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { - out.Rules = nil + out.MatchExpressions = nil } return nil } -func Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { - return autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in, out, s) +func Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { + return autoConvert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in, out, s) } -func autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*IngressStatus))(in) - } - if err := Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { - return err - } +func autoConvert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = unversioned.LabelSelectorOperator(in.Operator) + out.Values = in.Values return nil } -func Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { - return autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in, out, s) +func Convert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { + return autoConvert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in, out, s) } -func autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*IngressTLS))(in) - } - if in.Hosts != nil { - out.Hosts = make([]string, len(in.Hosts)) - for i := range in.Hosts { - out.Hosts[i] = in.Hosts[i] - } - } else { - out.Hosts = nil - } - out.SecretName = in.SecretName +func autoConvert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = LabelSelectorOperator(in.Operator) + out.Values = in.Values return nil } -func Convert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { - return autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in, out, s) +func Convert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { + return autoConvert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(in, out, s) } -func autoConvert_v1beta1_Job_To_extensions_Job(in *Job, out *extensions.Job, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Job))(in) - } +func autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error { + SetDefaults_NetworkPolicy(in) if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - if err := Convert_v1beta1_JobSpec_To_extensions_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_JobStatus_To_extensions_JobStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { return err } return nil } -func Convert_v1beta1_Job_To_extensions_Job(in *Job, out *extensions.Job, s conversion.Scope) error { - return autoConvert_v1beta1_Job_To_extensions_Job(in, out, s) +func Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in, out, s) } -func autoConvert_v1beta1_JobCondition_To_extensions_JobCondition(in *JobCondition, out *extensions.JobCondition, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*JobCondition))(in) +func autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - out.Type = extensions.JobConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + if err := Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { return err } - out.Reason = in.Reason - out.Message = in.Message return nil } -func Convert_v1beta1_JobCondition_To_extensions_JobCondition(in *JobCondition, out *extensions.JobCondition, s conversion.Scope) error { - return autoConvert_v1beta1_JobCondition_To_extensions_JobCondition(in, out, s) +func Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error { + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]extensions.NetworkPolicyPort, len(*in)) + for i := range *in { + if err := Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Ports = nil + } + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]extensions.NetworkPolicyPeer, len(*in)) + for i := range *in { + if err := Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.From = nil + } + return nil +} + +func Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in, out, s) } -func autoConvert_v1beta1_JobList_To_extensions_JobList(in *JobList, out *extensions.JobList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*JobList))(in) +func autoConvert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error { + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + if err := Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Ports = nil + } + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + if err := Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.From = nil } + return nil +} + +func Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } @@ -4511,9 +1545,10 @@ func autoConvert_v1beta1_JobList_To_extensions_JobList(in *JobList, out *extensi return err } if in.Items != nil { - out.Items = make([]extensions.Job, len(in.Items)) - for i := range in.Items { - if err := Convert_v1beta1_Job_To_extensions_Job(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.NetworkPolicy, len(*in)) + for i := range *in { + if err := Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -4523,192 +1558,225 @@ func autoConvert_v1beta1_JobList_To_extensions_JobList(in *JobList, out *extensi return nil } -func Convert_v1beta1_JobList_To_extensions_JobList(in *JobList, out *extensions.JobList, s conversion.Scope) error { - return autoConvert_v1beta1_JobList_To_extensions_JobList(in, out, s) +func Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in, out, s) } -func autoConvert_v1beta1_JobSpec_To_extensions_JobSpec(in *JobSpec, out *extensions.JobSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*JobSpec))(in) +func autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - if in.Parallelism != nil { - out.Parallelism = new(int) - *out.Parallelism = int(*in.Parallelism) - } else { - out.Parallelism = nil + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err } - if in.Completions != nil { - out.Completions = new(int) - *out.Completions = int(*in.Completions) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkPolicy, len(*in)) + for i := range *in { + if err := Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } } else { - out.Completions = nil + out.Items = nil } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds + return nil +} + +func Convert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error { + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + *out = new(unversioned.LabelSelector) + if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil { + return err + } } else { - out.ActiveDeadlineSeconds = nil + out.PodSelector = nil } - // unable to generate simple pointer conversion for v1beta1.LabelSelector -> unversioned.LabelSelector - if in.Selector != nil { - out.Selector = new(unversioned.LabelSelector) - if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(unversioned.LabelSelector) + if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil { return err } } else { - out.Selector = nil - } - // in.AutoSelector has no peer in out - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err + out.NamespaceSelector = nil } return nil } -func autoConvert_v1beta1_JobStatus_To_extensions_JobStatus(in *JobStatus, out *extensions.JobStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*JobStatus))(in) - } - if in.Conditions != nil { - out.Conditions = make([]extensions.JobCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := Convert_v1beta1_JobCondition_To_extensions_JobCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.StartTime != nil { - out.StartTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.StartTime, out.StartTime, s); err != nil { +func Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in, out, s) +} + +func autoConvert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error { + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + *out = new(LabelSelector) + if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(*in, *out, s); err != nil { return err } } else { - out.StartTime = nil + out.PodSelector = nil } - // unable to generate simple pointer conversion for unversioned.Time -> unversioned.Time - if in.CompletionTime != nil { - out.CompletionTime = new(unversioned.Time) - if err := api.Convert_unversioned_Time_To_unversioned_Time(in.CompletionTime, out.CompletionTime, s); err != nil { + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(LabelSelector) + if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(*in, *out, s); err != nil { return err } } else { - out.CompletionTime = nil + out.NamespaceSelector = nil } - out.Active = int(in.Active) - out.Succeeded = int(in.Succeeded) - out.Failed = int(in.Failed) return nil } -func Convert_v1beta1_JobStatus_To_extensions_JobStatus(in *JobStatus, out *extensions.JobStatus, s conversion.Scope) error { - return autoConvert_v1beta1_JobStatus_To_extensions_JobStatus(in, out, s) +func Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in, out, s) } -func autoConvert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*LabelSelector))(in) +func autoConvert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error { + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(api.Protocol) + **out = api.Protocol(**in) + } else { + out.Protocol = nil } - if in.MatchLabels != nil { - out.MatchLabels = make(map[string]string) - for key, val := range in.MatchLabels { - out.MatchLabels[key] = val - } + out.Port = in.Port + return nil +} + +func Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in, out, s) +} + +func autoConvert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error { + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(v1.Protocol) + **out = v1.Protocol(**in) } else { - out.MatchLabels = nil + out.Protocol = nil } - if in.MatchExpressions != nil { - out.MatchExpressions = make([]unversioned.LabelSelectorRequirement, len(in.MatchExpressions)) - for i := range in.MatchExpressions { - if err := Convert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(&in.MatchExpressions[i], &out.MatchExpressions[i], s); err != nil { + out.Port = in.Port + return nil +} + +func Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error { + if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(&in.PodSelector, &out.PodSelector, s); err != nil { + return err + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]extensions.NetworkPolicyIngressRule, len(*in)) + for i := range *in { + if err := Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], s); err != nil { return err } } } else { - out.MatchExpressions = nil + out.Ingress = nil } return nil } -func Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { - return autoConvert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in, out, s) +func Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in, out, s) } -func autoConvert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*LabelSelectorRequirement))(in) +func autoConvert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error { + if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(&in.PodSelector, &out.PodSelector, s); err != nil { + return err } - out.Key = in.Key - out.Operator = unversioned.LabelSelectorOperator(in.Operator) - if in.Values != nil { - out.Values = make([]string, len(in.Values)) - for i := range in.Values { - out.Values[i] = in.Values[i] + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]NetworkPolicyIngressRule, len(*in)) + for i := range *in { + if err := Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.Values = nil + out.Ingress = nil } return nil } -func Convert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { - return autoConvert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in, out, s) +func Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in, out, s) } -func autoConvert_v1beta1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ListOptions))(in) - } +func autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := api.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - if err := api.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + if err := Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { return err } - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - if in.TimeoutSeconds != nil { - out.TimeoutSeconds = new(int64) - *out.TimeoutSeconds = *in.TimeoutSeconds - } else { - out.TimeoutSeconds = nil - } return nil } -func Convert_v1beta1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { - return autoConvert_v1beta1_ListOptions_To_api_ListOptions(in, out, s) +func Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { + return autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in, out, s) } -func autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodSecurityPolicy))(in) - } +func autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - if err := Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { return err } return nil } -func Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { - return autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in, out, s) +func Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { + return autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in, out, s) } func autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodSecurityPolicyList))(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.PodSecurityPolicy, len(*in)) + for i := range *in { + if err := Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil } + return nil +} + +func Convert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error { + return autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in, out, s) +} + +func autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } @@ -4716,9 +1784,10 @@ func autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyLi return err } if in.Items != nil { - out.Items = make([]extensions.PodSecurityPolicy, len(in.Items)) - for i := range in.Items { - if err := Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(*in)) + for i := range *in { + if err := Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -4728,36 +1797,126 @@ func autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyLi return nil } -func Convert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error { - return autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in, out, s) +func Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { + return autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in, out, s) +} + +func autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { + out.Privileged = in.Privileged + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]api.Capability, len(*in)) + for i := range *in { + (*out)[i] = api.Capability((*in)[i]) + } + } else { + out.DefaultAddCapabilities = nil + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]api.Capability, len(*in)) + for i := range *in { + (*out)[i] = api.Capability((*in)[i]) + } + } else { + out.RequiredDropCapabilities = nil + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]api.Capability, len(*in)) + for i := range *in { + (*out)[i] = api.Capability((*in)[i]) + } + } else { + out.AllowedCapabilities = nil + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]extensions.FSType, len(*in)) + for i := range *in { + (*out)[i] = extensions.FSType((*in)[i]) + } + } else { + out.Volumes = nil + } + out.HostNetwork = in.HostNetwork + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]extensions.HostPortRange, len(*in)) + for i := range *in { + if err := Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.HostPorts = nil + } + out.HostPID = in.HostPID + out.HostIPC = in.HostIPC + if err := Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { + return err + } + if err := Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { + return err + } + if err := Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { + return err + } + if err := Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, s); err != nil { + return err + } + out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem + return nil +} + +func Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { + return autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in, out, s) } -func autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodSecurityPolicySpec))(in) - } +func autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { out.Privileged = in.Privileged - if in.Capabilities != nil { - out.Capabilities = make([]api.Capability, len(in.Capabilities)) - for i := range in.Capabilities { - out.Capabilities[i] = api.Capability(in.Capabilities[i]) + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]v1.Capability, len(*in)) + for i := range *in { + (*out)[i] = v1.Capability((*in)[i]) + } + } else { + out.DefaultAddCapabilities = nil + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]v1.Capability, len(*in)) + for i := range *in { + (*out)[i] = v1.Capability((*in)[i]) + } + } else { + out.RequiredDropCapabilities = nil + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]v1.Capability, len(*in)) + for i := range *in { + (*out)[i] = v1.Capability((*in)[i]) } } else { - out.Capabilities = nil + out.AllowedCapabilities = nil } if in.Volumes != nil { - out.Volumes = make([]extensions.FSType, len(in.Volumes)) - for i := range in.Volumes { - out.Volumes[i] = extensions.FSType(in.Volumes[i]) + in, out := &in.Volumes, &out.Volumes + *out = make([]FSType, len(*in)) + for i := range *in { + (*out)[i] = FSType((*in)[i]) } } else { out.Volumes = nil } out.HostNetwork = in.HostNetwork if in.HostPorts != nil { - out.HostPorts = make([]extensions.HostPortRange, len(in.HostPorts)) - for i := range in.HostPorts { - if err := Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(&in.HostPorts[i], &out.HostPorts[i], s); err != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(*in)) + for i := range *in { + if err := Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -4766,27 +1925,33 @@ func autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySp } out.HostPID = in.HostPID out.HostIPC = in.HostIPC - if err := Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { + if err := Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { return err } - if err := Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { + if err := Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { + return err + } + if err := Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { + return err + } + if err := Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, s); err != nil { return err } + out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem return nil } -func Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { - return autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in, out, s) +func Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { + return autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in, out, s) } func autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ReplicaSet))(in) - } + SetDefaults_ReplicaSet(in) if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } if err := Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { @@ -4802,10 +1967,28 @@ func Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *ex return autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in, out, s) } -func autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ReplicaSetList))(in) +func autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + return err } + return nil +} + +func Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in, out, s) +} + +func autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } @@ -4813,9 +1996,10 @@ func autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *Replica return err } if in.Items != nil { - out.Items = make([]extensions.ReplicaSet, len(in.Items)) - for i := range in.Items { - if err := Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.ReplicaSet, len(*in)) + for i := range *in { + if err := Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -4829,32 +2013,34 @@ func Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetL return autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s) } -func autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ReplicaSetSpec))(in) +func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } - // in.Replicas has no peer in out - // unable to generate simple pointer conversion for v1beta1.LabelSelector -> unversioned.LabelSelector - if in.Selector != nil { - out.Selector = new(unversioned.LabelSelector) - if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + if err := Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } } } else { - out.Selector = nil - } - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err + out.Items = nil } return nil } +func Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in, out, s) +} + func autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ReplicaSetStatus))(in) - } - out.Replicas = int(in.Replicas) - out.FullyLabeledReplicas = int(in.FullyLabeledReplicas) + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ObservedGeneration = in.ObservedGeneration return nil } @@ -4863,10 +2049,18 @@ func Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *Replica return autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s) } +func autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +func Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in, out, s) +} + func autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in *ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ReplicationControllerDummy))(in) - } if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } @@ -4877,10 +2071,18 @@ func Convert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControl return autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in, out, s) } -func autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*RollbackConfig))(in) +func autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } + return nil +} + +func Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { + return autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in, out, s) +} + +func autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { out.Revision = in.Revision return nil } @@ -4889,24 +2091,22 @@ func Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackCon return autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in, out, s) } -func autoConvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*RollingUpdateDeployment))(in) - } - // in.MaxUnavailable has no peer in out - // in.MaxSurge has no peer in out +func autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { + out.Revision = in.Revision return nil } +func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { + return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) +} + func autoConvert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in *RunAsUserStrategyOptions, out *extensions.RunAsUserStrategyOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*RunAsUserStrategyOptions))(in) - } out.Rule = extensions.RunAsUserStrategy(in.Rule) if in.Ranges != nil { - out.Ranges = make([]extensions.IDRange, len(in.Ranges)) - for i := range in.Ranges { - if err := Convert_v1beta1_IDRange_To_extensions_IDRange(&in.Ranges[i], &out.Ranges[i], s); err != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]extensions.IDRange, len(*in)) + for i := range *in { + if err := Convert_v1beta1_IDRange_To_extensions_IDRange(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -4920,15 +2120,33 @@ func Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOpt return autoConvert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in, out, s) } -func autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*SELinuxStrategyOptions))(in) +func autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { + out.Rule = RunAsUserStrategy(in.Rule) + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + for i := range *in { + if err := Convert_extensions_IDRange_To_v1beta1_IDRange(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Ranges = nil } + return nil +} + +func Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { + return autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in, out, s) +} + +func autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error { out.Rule = extensions.SELinuxStrategy(in.Rule) - // unable to generate simple pointer conversion for v1.SELinuxOptions -> api.SELinuxOptions if in.SELinuxOptions != nil { - out.SELinuxOptions = new(api.SELinuxOptions) - if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(api.SELinuxOptions) + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(*in, *out, 0); err != nil { return err } } else { @@ -4941,14 +2159,31 @@ func Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions return autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in, out, s) } -func autoConvert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Scale))(in) +func autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { + out.Rule = SELinuxStrategy(in.Rule) + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(v1.SELinuxOptions) + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.SELinuxOptions = nil } + return nil +} + +func Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { + return autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in, out, s) +} + +func autoConvert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } if err := Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { @@ -4964,11 +2199,29 @@ func Convert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, return autoConvert_v1beta1_Scale_To_extensions_Scale(in, out, s) } -func autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ScaleSpec))(in) +func autoConvert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err } - out.Replicas = int(in.Replicas) + return nil +} + +func Convert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { + return autoConvert_extensions_Scale_To_v1beta1_Scale(in, out, s) +} + +func autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas return nil } @@ -4976,46 +2229,69 @@ func Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *exten return autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in, out, s) } -func autoConvert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ScaleStatus))(in) +func autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + return autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) +} + +func autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { + out.Rule = extensions.SupplementalGroupsStrategyType(in.Rule) + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]extensions.IDRange, len(*in)) + for i := range *in { + if err := Convert_v1beta1_IDRange_To_extensions_IDRange(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Ranges = nil } - out.Replicas = int(in.Replicas) - // in.Selector has no peer in out - // in.TargetSelector has no peer in out return nil } -func autoConvert_v1beta1_SubresourceReference_To_extensions_SubresourceReference(in *SubresourceReference, out *extensions.SubresourceReference, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*SubresourceReference))(in) +func Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in, out, s) +} + +func autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error { + out.Rule = SupplementalGroupsStrategyType(in.Rule) + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + for i := range *in { + if err := Convert_extensions_IDRange_To_v1beta1_IDRange(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Ranges = nil } - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - out.Subresource = in.Subresource return nil } -func Convert_v1beta1_SubresourceReference_To_extensions_SubresourceReference(in *SubresourceReference, out *extensions.SubresourceReference, s conversion.Scope) error { - return autoConvert_v1beta1_SubresourceReference_To_extensions_SubresourceReference(in, out, s) +func Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error { + return autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in, out, s) } func autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ThirdPartyResource))(in) - } if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } out.Description = in.Description if in.Versions != nil { - out.Versions = make([]extensions.APIVersion, len(in.Versions)) - for i := range in.Versions { - if err := Convert_v1beta1_APIVersion_To_extensions_APIVersion(&in.Versions[i], &out.Versions[i], s); err != nil { + in, out := &in.Versions, &out.Versions + *out = make([]extensions.APIVersion, len(*in)) + for i := range *in { + if err := Convert_v1beta1_APIVersion_To_extensions_APIVersion(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -5029,17 +2305,42 @@ func Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *Thi return autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in, out, s) } -func autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ThirdPartyResourceData))(in) +func autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + out.Description = in.Description + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]APIVersion, len(*in)) + for i := range *in { + if err := Convert_extensions_APIVersion_To_v1beta1_APIVersion(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Versions = nil } + return nil +} + +func Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { + return autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in, out, s) +} + +func autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { return err } - if err := conversion.ByteSliceCopy(&in.Data, &out.Data, s); err != nil { + if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Data, &out.Data, s); err != nil { return err } return nil @@ -5049,10 +2350,25 @@ func Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData return autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in, out, s) } -func autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ThirdPartyResourceDataList))(in) +func autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err } + if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Data, &out.Data, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { + return autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in, out, s) +} + +func autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } @@ -5060,9 +2376,10 @@ func autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyReso return err } if in.Items != nil { - out.Items = make([]extensions.ThirdPartyResourceData, len(in.Items)) - for i := range in.Items { - if err := Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.ThirdPartyResourceData, len(*in)) + for i := range *in { + if err := Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -5076,10 +2393,32 @@ func Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResource return autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in, out, s) } -func autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ThirdPartyResourceList))(in) +func autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThirdPartyResourceData, len(*in)) + for i := range *in { + if err := Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil } + return nil +} + +func Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { + return autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in, out, s) +} + +func autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } @@ -5087,9 +2426,10 @@ func autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResource return err } if in.Items != nil { - out.Items = make([]extensions.ThirdPartyResource, len(in.Items)) - for i := range in.Items { - if err := Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(&in.Items[i], &out.Items[i], s); err != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.ThirdPartyResource, len(*in)) + for i := range *in { + if err := Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(&(*in)[i], &(*out)[i], s); err != nil { return err } } @@ -5103,218 +2443,27 @@ func Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList return autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in, out, s) } -func init() { - err := api.Scheme.AddGeneratedConversionFuncs( - autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, - autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, - autoConvert_api_Capabilities_To_v1_Capabilities, - autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, - autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource, - autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, - autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, - autoConvert_api_ContainerPort_To_v1_ContainerPort, - autoConvert_api_Container_To_v1_Container, - autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, - autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, - autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, - autoConvert_api_EnvVarSource_To_v1_EnvVarSource, - autoConvert_api_EnvVar_To_v1_EnvVar, - autoConvert_api_ExecAction_To_v1_ExecAction, - autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource, - autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource, - autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource, - autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, - autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, - autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, - autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction, - autoConvert_api_HTTPHeader_To_v1_HTTPHeader, - autoConvert_api_Handler_To_v1_Handler, - autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource, - autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, - autoConvert_api_KeyToPath_To_v1_KeyToPath, - autoConvert_api_Lifecycle_To_v1_Lifecycle, - autoConvert_api_ListOptions_To_v1beta1_ListOptions, - autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress, - autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus, - autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference, - autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource, - autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, - autoConvert_api_ObjectMeta_To_v1_ObjectMeta, - autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, - autoConvert_api_PodSpec_To_v1_PodSpec, - autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec, - autoConvert_api_Probe_To_v1_Probe, - autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource, - autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements, - autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions, - autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector, - autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource, - autoConvert_api_SecurityContext_To_v1_SecurityContext, - autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction, - autoConvert_api_VolumeMount_To_v1_VolumeMount, - autoConvert_api_VolumeSource_To_v1_VolumeSource, - autoConvert_api_Volume_To_v1_Volume, - autoConvert_extensions_APIVersion_To_v1beta1_APIVersion, - autoConvert_extensions_CPUTargetUtilization_To_v1beta1_CPUTargetUtilization, - autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList, - autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec, - autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus, - autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet, - autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList, - autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback, - autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, - autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus, - autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, - autoConvert_extensions_Deployment_To_v1beta1_Deployment, - autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath, - autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue, - autoConvert_extensions_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList, - autoConvert_extensions_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec, - autoConvert_extensions_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus, - autoConvert_extensions_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler, - autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange, - autoConvert_extensions_IDRange_To_v1beta1_IDRange, - autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend, - autoConvert_extensions_IngressList_To_v1beta1_IngressList, - autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue, - autoConvert_extensions_IngressRule_To_v1beta1_IngressRule, - autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec, - autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus, - autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS, - autoConvert_extensions_Ingress_To_v1beta1_Ingress, - autoConvert_extensions_JobCondition_To_v1beta1_JobCondition, - autoConvert_extensions_JobList_To_v1beta1_JobList, - autoConvert_extensions_JobSpec_To_v1beta1_JobSpec, - autoConvert_extensions_JobStatus_To_v1beta1_JobStatus, - autoConvert_extensions_Job_To_v1beta1_Job, - autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList, - autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec, - autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy, - autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList, - autoConvert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, - autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus, - autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet, - autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy, - autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig, - autoConvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, - autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions, - autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions, - autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec, - autoConvert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, - autoConvert_extensions_Scale_To_v1beta1_Scale, - autoConvert_extensions_SubresourceReference_To_v1beta1_SubresourceReference, - autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList, - autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData, - autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList, - autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource, - autoConvert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement, - autoConvert_unversioned_LabelSelector_To_v1beta1_LabelSelector, - autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, - autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource, - autoConvert_v1_Capabilities_To_api_Capabilities, - autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, - autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource, - autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector, - autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource, - autoConvert_v1_ContainerPort_To_api_ContainerPort, - autoConvert_v1_Container_To_api_Container, - autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, - autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, - autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, - autoConvert_v1_EnvVarSource_To_api_EnvVarSource, - autoConvert_v1_EnvVar_To_api_EnvVar, - autoConvert_v1_ExecAction_To_api_ExecAction, - autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource, - autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource, - autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource, - autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource, - autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource, - autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, - autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction, - autoConvert_v1_HTTPHeader_To_api_HTTPHeader, - autoConvert_v1_Handler_To_api_Handler, - autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, - autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, - autoConvert_v1_KeyToPath_To_api_KeyToPath, - autoConvert_v1_Lifecycle_To_api_Lifecycle, - autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress, - autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus, - autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference, - autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource, - autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, - autoConvert_v1_ObjectMeta_To_api_ObjectMeta, - autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource, - autoConvert_v1_PodSpec_To_api_PodSpec, - autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec, - autoConvert_v1_Probe_To_api_Probe, - autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource, - autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements, - autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions, - autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector, - autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource, - autoConvert_v1_SecurityContext_To_api_SecurityContext, - autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction, - autoConvert_v1_VolumeMount_To_api_VolumeMount, - autoConvert_v1_VolumeSource_To_api_VolumeSource, - autoConvert_v1_Volume_To_api_Volume, - autoConvert_v1beta1_APIVersion_To_extensions_APIVersion, - autoConvert_v1beta1_CPUTargetUtilization_To_extensions_CPUTargetUtilization, - autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList, - autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec, - autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus, - autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet, - autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList, - autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback, - autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, - autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus, - autoConvert_v1beta1_Deployment_To_extensions_Deployment, - autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath, - autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue, - autoConvert_v1beta1_HorizontalPodAutoscalerList_To_extensions_HorizontalPodAutoscalerList, - autoConvert_v1beta1_HorizontalPodAutoscalerSpec_To_extensions_HorizontalPodAutoscalerSpec, - autoConvert_v1beta1_HorizontalPodAutoscalerStatus_To_extensions_HorizontalPodAutoscalerStatus, - autoConvert_v1beta1_HorizontalPodAutoscaler_To_extensions_HorizontalPodAutoscaler, - autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange, - autoConvert_v1beta1_IDRange_To_extensions_IDRange, - autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend, - autoConvert_v1beta1_IngressList_To_extensions_IngressList, - autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue, - autoConvert_v1beta1_IngressRule_To_extensions_IngressRule, - autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec, - autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus, - autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS, - autoConvert_v1beta1_Ingress_To_extensions_Ingress, - autoConvert_v1beta1_JobCondition_To_extensions_JobCondition, - autoConvert_v1beta1_JobList_To_extensions_JobList, - autoConvert_v1beta1_JobSpec_To_extensions_JobSpec, - autoConvert_v1beta1_JobStatus_To_extensions_JobStatus, - autoConvert_v1beta1_Job_To_extensions_Job, - autoConvert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement, - autoConvert_v1beta1_LabelSelector_To_unversioned_LabelSelector, - autoConvert_v1beta1_ListOptions_To_api_ListOptions, - autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList, - autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec, - autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy, - autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList, - autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, - autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus, - autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet, - autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy, - autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig, - autoConvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - autoConvert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions, - autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions, - autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec, - autoConvert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, - autoConvert_v1beta1_Scale_To_extensions_Scale, - autoConvert_v1beta1_SubresourceReference_To_extensions_SubresourceReference, - autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList, - autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData, - autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList, - autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) +func autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThirdPartyResource, len(*in)) + for i := range *in { + if err := Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil } + return nil +} + +func Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { + return autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in, out, s) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_test.go new file mode 100644 index 000000000000..759e38ca1099 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1_test + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/batch" + versioned "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" +) + +// TestJobSpecConversion tests that ManualSelector and AutoSelector +// are handled correctly. +func TestJobSpecConversion(t *testing.T) { + pTrue := new(bool) + *pTrue = true + pFalse := new(bool) + *pFalse = false + + // False or nil convert to true. + // True converts to nil. + tests := []struct { + in *bool + expectOut *bool + }{ + { + in: nil, + expectOut: pTrue, + }, + { + in: pFalse, + expectOut: pTrue, + }, + { + in: pTrue, + expectOut: nil, + }, + } + + // Test internal -> v1beta1. + for _, test := range tests { + i := &batch.JobSpec{ + ManualSelector: test.in, + } + v := versioned.JobSpec{} + if err := api.Scheme.Convert(i, &v); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectOut, v.AutoSelector) { + t.Fatalf("want v1beta1.AutoSelector %v, got %v", test.expectOut, v.AutoSelector) + } + } + + // Test v1beta1 -> internal. + for _, test := range tests { + i := &versioned.JobSpec{ + AutoSelector: test.in, + } + e := batch.JobSpec{} + if err := api.Scheme.Convert(i, &e); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectOut, e.ManualSelector) { + t.Fatalf("want extensions.ManualSelector %v, got %v", test.expectOut, e.ManualSelector) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/deep_copy_generated.go index 845e4dd7f5e3..dd33d0d08f51 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/deep_copy_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,1049 +16,180 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package v1beta1 import ( - time "time" - api "k8s.io/kubernetes/pkg/api" resource "k8s.io/kubernetes/pkg/api/resource" unversioned "k8s.io/kubernetes/pkg/api/unversioned" v1 "k8s.io/kubernetes/pkg/api/v1" conversion "k8s.io/kubernetes/pkg/conversion" intstr "k8s.io/kubernetes/pkg/util/intstr" - inf "speter.net/go/exp/math/dec/inf" ) -func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c *conversion.Cloner) error { - if in.Amount != nil { - if newVal, err := c.DeepCopy(in.Amount); err != nil { - return err - } else { - out.Amount = newVal.(*inf.Dec) - } - } else { - out.Amount = nil - } - out.Format = in.Format - return nil -} - -func deepCopy_unversioned_ListMeta(in unversioned.ListMeta, out *unversioned.ListMeta, c *conversion.Cloner) error { - out.SelfLink = in.SelfLink - out.ResourceVersion = in.ResourceVersion - return nil -} - -func deepCopy_unversioned_Time(in unversioned.Time, out *unversioned.Time, c *conversion.Cloner) error { - if newVal, err := c.DeepCopy(in.Time); err != nil { - return err - } else { - out.Time = newVal.(time.Time) - } - return nil -} - -func deepCopy_unversioned_TypeMeta(in unversioned.TypeMeta, out *unversioned.TypeMeta, c *conversion.Cloner) error { - out.Kind = in.Kind - out.APIVersion = in.APIVersion - return nil -} - -func deepCopy_v1_AWSElasticBlockStoreVolumeSource(in v1.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_AzureFileVolumeSource(in v1.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, c *conversion.Cloner) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_Capabilities(in v1.Capabilities, out *v1.Capabilities, c *conversion.Cloner) error { - if in.Add != nil { - out.Add = make([]v1.Capability, len(in.Add)) - for i := range in.Add { - out.Add[i] = in.Add[i] - } - } else { - out.Add = nil - } - if in.Drop != nil { - out.Drop = make([]v1.Capability, len(in.Drop)) - for i := range in.Drop { - out.Drop[i] = in.Drop[i] - } - } else { - out.Drop = nil - } - return nil -} - -func deepCopy_v1_CephFSVolumeSource(in v1.CephFSVolumeSource, out *v1.CephFSVolumeSource, c *conversion.Cloner) error { - if in.Monitors != nil { - out.Monitors = make([]string, len(in.Monitors)) - for i := range in.Monitors { - out.Monitors[i] = in.Monitors[i] - } - } else { - out.Monitors = nil - } - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - if in.SecretRef != nil { - out.SecretRef = new(v1.LocalObjectReference) - if err := deepCopy_v1_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_CinderVolumeSource(in v1.CinderVolumeSource, out *v1.CinderVolumeSource, c *conversion.Cloner) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_ConfigMapKeySelector(in v1.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, c *conversion.Cloner) error { - if err := deepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func deepCopy_v1_ConfigMapVolumeSource(in v1.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, c *conversion.Cloner) error { - if err := deepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { - return err - } - if in.Items != nil { - out.Items = make([]v1.KeyToPath, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_KeyToPath(in.Items[i], &out.Items[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func deepCopy_v1_Container(in v1.Container, out *v1.Container, c *conversion.Cloner) error { - out.Name = in.Name - out.Image = in.Image - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - if in.Args != nil { - out.Args = make([]string, len(in.Args)) - for i := range in.Args { - out.Args[i] = in.Args[i] - } - } else { - out.Args = nil - } - out.WorkingDir = in.WorkingDir - if in.Ports != nil { - out.Ports = make([]v1.ContainerPort, len(in.Ports)) - for i := range in.Ports { - if err := deepCopy_v1_ContainerPort(in.Ports[i], &out.Ports[i], c); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Env != nil { - out.Env = make([]v1.EnvVar, len(in.Env)) - for i := range in.Env { - if err := deepCopy_v1_EnvVar(in.Env[i], &out.Env[i], c); err != nil { - return err - } - } - } else { - out.Env = nil - } - if err := deepCopy_v1_ResourceRequirements(in.Resources, &out.Resources, c); err != nil { - return err - } - if in.VolumeMounts != nil { - out.VolumeMounts = make([]v1.VolumeMount, len(in.VolumeMounts)) - for i := range in.VolumeMounts { - if err := deepCopy_v1_VolumeMount(in.VolumeMounts[i], &out.VolumeMounts[i], c); err != nil { - return err - } - } - } else { - out.VolumeMounts = nil - } - if in.LivenessProbe != nil { - out.LivenessProbe = new(v1.Probe) - if err := deepCopy_v1_Probe(*in.LivenessProbe, out.LivenessProbe, c); err != nil { - return err - } - } else { - out.LivenessProbe = nil - } - if in.ReadinessProbe != nil { - out.ReadinessProbe = new(v1.Probe) - if err := deepCopy_v1_Probe(*in.ReadinessProbe, out.ReadinessProbe, c); err != nil { - return err - } - } else { - out.ReadinessProbe = nil - } - if in.Lifecycle != nil { - out.Lifecycle = new(v1.Lifecycle) - if err := deepCopy_v1_Lifecycle(*in.Lifecycle, out.Lifecycle, c); err != nil { - return err - } - } else { - out.Lifecycle = nil - } - out.TerminationMessagePath = in.TerminationMessagePath - out.ImagePullPolicy = in.ImagePullPolicy - if in.SecurityContext != nil { - out.SecurityContext = new(v1.SecurityContext) - if err := deepCopy_v1_SecurityContext(*in.SecurityContext, out.SecurityContext, c); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -func deepCopy_v1_ContainerPort(in v1.ContainerPort, out *v1.ContainerPort, c *conversion.Cloner) error { - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = in.Protocol - out.HostIP = in.HostIP - return nil -} - -func deepCopy_v1_DownwardAPIVolumeFile(in v1.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, c *conversion.Cloner) error { - out.Path = in.Path - if err := deepCopy_v1_ObjectFieldSelector(in.FieldRef, &out.FieldRef, c); err != nil { - return err - } - return nil -} - -func deepCopy_v1_DownwardAPIVolumeSource(in v1.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, c *conversion.Cloner) error { - if in.Items != nil { - out.Items = make([]v1.DownwardAPIVolumeFile, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1_DownwardAPIVolumeFile(in.Items[i], &out.Items[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func deepCopy_v1_EmptyDirVolumeSource(in v1.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, c *conversion.Cloner) error { - out.Medium = in.Medium - return nil -} - -func deepCopy_v1_EnvVar(in v1.EnvVar, out *v1.EnvVar, c *conversion.Cloner) error { - out.Name = in.Name - out.Value = in.Value - if in.ValueFrom != nil { - out.ValueFrom = new(v1.EnvVarSource) - if err := deepCopy_v1_EnvVarSource(*in.ValueFrom, out.ValueFrom, c); err != nil { - return err - } - } else { - out.ValueFrom = nil - } - return nil -} - -func deepCopy_v1_EnvVarSource(in v1.EnvVarSource, out *v1.EnvVarSource, c *conversion.Cloner) error { - if in.FieldRef != nil { - out.FieldRef = new(v1.ObjectFieldSelector) - if err := deepCopy_v1_ObjectFieldSelector(*in.FieldRef, out.FieldRef, c); err != nil { - return err - } - } else { - out.FieldRef = nil - } - if in.ConfigMapKeyRef != nil { - out.ConfigMapKeyRef = new(v1.ConfigMapKeySelector) - if err := deepCopy_v1_ConfigMapKeySelector(*in.ConfigMapKeyRef, out.ConfigMapKeyRef, c); err != nil { - return err - } - } else { - out.ConfigMapKeyRef = nil - } - if in.SecretKeyRef != nil { - out.SecretKeyRef = new(v1.SecretKeySelector) - if err := deepCopy_v1_SecretKeySelector(*in.SecretKeyRef, out.SecretKeyRef, c); err != nil { - return err - } - } else { - out.SecretKeyRef = nil - } - return nil -} - -func deepCopy_v1_ExecAction(in v1.ExecAction, out *v1.ExecAction, c *conversion.Cloner) error { - if in.Command != nil { - out.Command = make([]string, len(in.Command)) - for i := range in.Command { - out.Command[i] = in.Command[i] - } - } else { - out.Command = nil - } - return nil -} - -func deepCopy_v1_FCVolumeSource(in v1.FCVolumeSource, out *v1.FCVolumeSource, c *conversion.Cloner) error { - if in.TargetWWNs != nil { - out.TargetWWNs = make([]string, len(in.TargetWWNs)) - for i := range in.TargetWWNs { - out.TargetWWNs[i] = in.TargetWWNs[i] - } - } else { - out.TargetWWNs = nil - } - if in.Lun != nil { - out.Lun = new(int32) - *out.Lun = *in.Lun - } else { - out.Lun = nil - } - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_FlexVolumeSource(in v1.FlexVolumeSource, out *v1.FlexVolumeSource, c *conversion.Cloner) error { - out.Driver = in.Driver - out.FSType = in.FSType - if in.SecretRef != nil { - out.SecretRef = new(v1.LocalObjectReference) - if err := deepCopy_v1_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - if in.Options != nil { - out.Options = make(map[string]string) - for key, val := range in.Options { - out.Options[key] = val - } - } else { - out.Options = nil - } - return nil -} - -func deepCopy_v1_FlockerVolumeSource(in v1.FlockerVolumeSource, out *v1.FlockerVolumeSource, c *conversion.Cloner) error { - out.DatasetName = in.DatasetName - return nil -} - -func deepCopy_v1_GCEPersistentDiskVolumeSource(in v1.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, c *conversion.Cloner) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_GitRepoVolumeSource(in v1.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, c *conversion.Cloner) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -func deepCopy_v1_GlusterfsVolumeSource(in v1.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, c *conversion.Cloner) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_HTTPGetAction(in v1.HTTPGetAction, out *v1.HTTPGetAction, c *conversion.Cloner) error { - out.Path = in.Path - if err := deepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { - return err - } - out.Host = in.Host - out.Scheme = in.Scheme - if in.HTTPHeaders != nil { - out.HTTPHeaders = make([]v1.HTTPHeader, len(in.HTTPHeaders)) - for i := range in.HTTPHeaders { - if err := deepCopy_v1_HTTPHeader(in.HTTPHeaders[i], &out.HTTPHeaders[i], c); err != nil { - return err - } - } - } else { - out.HTTPHeaders = nil - } - return nil -} - -func deepCopy_v1_HTTPHeader(in v1.HTTPHeader, out *v1.HTTPHeader, c *conversion.Cloner) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -func deepCopy_v1_Handler(in v1.Handler, out *v1.Handler, c *conversion.Cloner) error { - if in.Exec != nil { - out.Exec = new(v1.ExecAction) - if err := deepCopy_v1_ExecAction(*in.Exec, out.Exec, c); err != nil { - return err - } - } else { - out.Exec = nil - } - if in.HTTPGet != nil { - out.HTTPGet = new(v1.HTTPGetAction) - if err := deepCopy_v1_HTTPGetAction(*in.HTTPGet, out.HTTPGet, c); err != nil { - return err - } - } else { - out.HTTPGet = nil - } - if in.TCPSocket != nil { - out.TCPSocket = new(v1.TCPSocketAction) - if err := deepCopy_v1_TCPSocketAction(*in.TCPSocket, out.TCPSocket, c); err != nil { - return err - } - } else { - out.TCPSocket = nil - } - return nil -} - -func deepCopy_v1_HostPathVolumeSource(in v1.HostPathVolumeSource, out *v1.HostPathVolumeSource, c *conversion.Cloner) error { - out.Path = in.Path - return nil -} - -func deepCopy_v1_ISCSIVolumeSource(in v1.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, c *conversion.Cloner) error { - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_KeyToPath(in v1.KeyToPath, out *v1.KeyToPath, c *conversion.Cloner) error { - out.Key = in.Key - out.Path = in.Path - return nil -} - -func deepCopy_v1_Lifecycle(in v1.Lifecycle, out *v1.Lifecycle, c *conversion.Cloner) error { - if in.PostStart != nil { - out.PostStart = new(v1.Handler) - if err := deepCopy_v1_Handler(*in.PostStart, out.PostStart, c); err != nil { - return err - } - } else { - out.PostStart = nil - } - if in.PreStop != nil { - out.PreStop = new(v1.Handler) - if err := deepCopy_v1_Handler(*in.PreStop, out.PreStop, c); err != nil { - return err - } - } else { - out.PreStop = nil - } - return nil -} - -func deepCopy_v1_LoadBalancerIngress(in v1.LoadBalancerIngress, out *v1.LoadBalancerIngress, c *conversion.Cloner) error { - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -func deepCopy_v1_LoadBalancerStatus(in v1.LoadBalancerStatus, out *v1.LoadBalancerStatus, c *conversion.Cloner) error { - if in.Ingress != nil { - out.Ingress = make([]v1.LoadBalancerIngress, len(in.Ingress)) - for i := range in.Ingress { - if err := deepCopy_v1_LoadBalancerIngress(in.Ingress[i], &out.Ingress[i], c); err != nil { - return err - } - } - } else { - out.Ingress = nil - } - return nil -} - -func deepCopy_v1_LocalObjectReference(in v1.LocalObjectReference, out *v1.LocalObjectReference, c *conversion.Cloner) error { - out.Name = in.Name - return nil -} - -func deepCopy_v1_NFSVolumeSource(in v1.NFSVolumeSource, out *v1.NFSVolumeSource, c *conversion.Cloner) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_ObjectFieldSelector(in v1.ObjectFieldSelector, out *v1.ObjectFieldSelector, c *conversion.Cloner) error { - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -func deepCopy_v1_ObjectMeta(in v1.ObjectMeta, out *v1.ObjectMeta, c *conversion.Cloner) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := deepCopy_unversioned_Time(in.CreationTimestamp, &out.CreationTimestamp, c); err != nil { - return err - } - if in.DeletionTimestamp != nil { - out.DeletionTimestamp = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.DeletionTimestamp, out.DeletionTimestamp, c); err != nil { - return err - } - } else { - out.DeletionTimestamp = nil - } - if in.DeletionGracePeriodSeconds != nil { - out.DeletionGracePeriodSeconds = new(int64) - *out.DeletionGracePeriodSeconds = *in.DeletionGracePeriodSeconds - } else { - out.DeletionGracePeriodSeconds = nil - } - if in.Labels != nil { - out.Labels = make(map[string]string) - for key, val := range in.Labels { - out.Labels[key] = val - } - } else { - out.Labels = nil - } - if in.Annotations != nil { - out.Annotations = make(map[string]string) - for key, val := range in.Annotations { - out.Annotations[key] = val - } - } else { - out.Annotations = nil - } - return nil -} - -func deepCopy_v1_PersistentVolumeClaimVolumeSource(in v1.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, c *conversion.Cloner) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -func deepCopy_v1_PodSecurityContext(in v1.PodSecurityContext, out *v1.PodSecurityContext, c *conversion.Cloner) error { - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(v1.SELinuxOptions) - if err := deepCopy_v1_SELinuxOptions(*in.SELinuxOptions, out.SELinuxOptions, c); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot - } else { - out.RunAsNonRoot = nil - } - if in.SupplementalGroups != nil { - out.SupplementalGroups = make([]int64, len(in.SupplementalGroups)) - for i := range in.SupplementalGroups { - out.SupplementalGroups[i] = in.SupplementalGroups[i] - } - } else { - out.SupplementalGroups = nil - } - if in.FSGroup != nil { - out.FSGroup = new(int64) - *out.FSGroup = *in.FSGroup - } else { - out.FSGroup = nil - } - return nil -} - -func deepCopy_v1_PodSpec(in v1.PodSpec, out *v1.PodSpec, c *conversion.Cloner) error { - if in.Volumes != nil { - out.Volumes = make([]v1.Volume, len(in.Volumes)) - for i := range in.Volumes { - if err := deepCopy_v1_Volume(in.Volumes[i], &out.Volumes[i], c); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.Containers != nil { - out.Containers = make([]v1.Container, len(in.Containers)) - for i := range in.Containers { - if err := deepCopy_v1_Container(in.Containers[i], &out.Containers[i], c); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = in.RestartPolicy - if in.TerminationGracePeriodSeconds != nil { - out.TerminationGracePeriodSeconds = new(int64) - *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } - out.DNSPolicy = in.DNSPolicy - if in.NodeSelector != nil { - out.NodeSelector = make(map[string]string) - for key, val := range in.NodeSelector { - out.NodeSelector[key] = val - } - } else { - out.NodeSelector = nil - } - out.ServiceAccountName = in.ServiceAccountName - out.DeprecatedServiceAccount = in.DeprecatedServiceAccount - out.NodeName = in.NodeName - out.HostNetwork = in.HostNetwork - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC - if in.SecurityContext != nil { - out.SecurityContext = new(v1.PodSecurityContext) - if err := deepCopy_v1_PodSecurityContext(*in.SecurityContext, out.SecurityContext, c); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]v1.LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := deepCopy_v1_LocalObjectReference(in.ImagePullSecrets[i], &out.ImagePullSecrets[i], c); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - -func deepCopy_v1_PodTemplateSpec(in v1.PodTemplateSpec, out *v1.PodTemplateSpec, c *conversion.Cloner) error { - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := deepCopy_v1_PodSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - return nil -} - -func deepCopy_v1_Probe(in v1.Probe, out *v1.Probe, c *conversion.Cloner) error { - if err := deepCopy_v1_Handler(in.Handler, &out.Handler, c); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -func deepCopy_v1_RBDVolumeSource(in v1.RBDVolumeSource, out *v1.RBDVolumeSource, c *conversion.Cloner) error { - if in.CephMonitors != nil { - out.CephMonitors = make([]string, len(in.CephMonitors)) - for i := range in.CephMonitors { - out.CephMonitors[i] = in.CephMonitors[i] - } - } else { - out.CephMonitors = nil - } - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - if in.SecretRef != nil { - out.SecretRef = new(v1.LocalObjectReference) - if err := deepCopy_v1_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil { - return err - } - } else { - out.SecretRef = nil +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1beta1_APIVersion, + DeepCopy_v1beta1_CPUTargetUtilization, + DeepCopy_v1beta1_CustomMetricCurrentStatus, + DeepCopy_v1beta1_CustomMetricCurrentStatusList, + DeepCopy_v1beta1_CustomMetricTarget, + DeepCopy_v1beta1_CustomMetricTargetList, + DeepCopy_v1beta1_DaemonSet, + DeepCopy_v1beta1_DaemonSetList, + DeepCopy_v1beta1_DaemonSetSpec, + DeepCopy_v1beta1_DaemonSetStatus, + DeepCopy_v1beta1_Deployment, + DeepCopy_v1beta1_DeploymentList, + DeepCopy_v1beta1_DeploymentRollback, + DeepCopy_v1beta1_DeploymentSpec, + DeepCopy_v1beta1_DeploymentStatus, + DeepCopy_v1beta1_DeploymentStrategy, + DeepCopy_v1beta1_ExportOptions, + DeepCopy_v1beta1_FSGroupStrategyOptions, + DeepCopy_v1beta1_HTTPIngressPath, + DeepCopy_v1beta1_HTTPIngressRuleValue, + DeepCopy_v1beta1_HorizontalPodAutoscaler, + DeepCopy_v1beta1_HorizontalPodAutoscalerList, + DeepCopy_v1beta1_HorizontalPodAutoscalerSpec, + DeepCopy_v1beta1_HorizontalPodAutoscalerStatus, + DeepCopy_v1beta1_HostPortRange, + DeepCopy_v1beta1_IDRange, + DeepCopy_v1beta1_Ingress, + DeepCopy_v1beta1_IngressBackend, + DeepCopy_v1beta1_IngressList, + DeepCopy_v1beta1_IngressRule, + DeepCopy_v1beta1_IngressRuleValue, + DeepCopy_v1beta1_IngressSpec, + DeepCopy_v1beta1_IngressStatus, + DeepCopy_v1beta1_IngressTLS, + DeepCopy_v1beta1_Job, + DeepCopy_v1beta1_JobCondition, + DeepCopy_v1beta1_JobList, + DeepCopy_v1beta1_JobSpec, + DeepCopy_v1beta1_JobStatus, + DeepCopy_v1beta1_LabelSelector, + DeepCopy_v1beta1_LabelSelectorRequirement, + DeepCopy_v1beta1_ListOptions, + DeepCopy_v1beta1_NetworkPolicy, + DeepCopy_v1beta1_NetworkPolicyIngressRule, + DeepCopy_v1beta1_NetworkPolicyList, + DeepCopy_v1beta1_NetworkPolicyPeer, + DeepCopy_v1beta1_NetworkPolicyPort, + DeepCopy_v1beta1_NetworkPolicySpec, + DeepCopy_v1beta1_PodSecurityPolicy, + DeepCopy_v1beta1_PodSecurityPolicyList, + DeepCopy_v1beta1_PodSecurityPolicySpec, + DeepCopy_v1beta1_ReplicaSet, + DeepCopy_v1beta1_ReplicaSetList, + DeepCopy_v1beta1_ReplicaSetSpec, + DeepCopy_v1beta1_ReplicaSetStatus, + DeepCopy_v1beta1_ReplicationControllerDummy, + DeepCopy_v1beta1_RollbackConfig, + DeepCopy_v1beta1_RollingUpdateDeployment, + DeepCopy_v1beta1_RunAsUserStrategyOptions, + DeepCopy_v1beta1_SELinuxStrategyOptions, + DeepCopy_v1beta1_Scale, + DeepCopy_v1beta1_ScaleSpec, + DeepCopy_v1beta1_ScaleStatus, + DeepCopy_v1beta1_SubresourceReference, + DeepCopy_v1beta1_SupplementalGroupsStrategyOptions, + DeepCopy_v1beta1_ThirdPartyResource, + DeepCopy_v1beta1_ThirdPartyResourceData, + DeepCopy_v1beta1_ThirdPartyResourceDataList, + DeepCopy_v1beta1_ThirdPartyResourceList, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) } - out.ReadOnly = in.ReadOnly - return nil } -func deepCopy_v1_ResourceRequirements(in v1.ResourceRequirements, out *v1.ResourceRequirements, c *conversion.Cloner) error { - if in.Limits != nil { - out.Limits = make(v1.ResourceList) - for key, val := range in.Limits { - newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - out.Limits[key] = *newVal - } - } else { - out.Limits = nil - } - if in.Requests != nil { - out.Requests = make(v1.ResourceList) - for key, val := range in.Requests { - newVal := new(resource.Quantity) - if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - out.Requests[key] = *newVal - } - } else { - out.Requests = nil - } +func DeepCopy_v1beta1_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error { + out.Name = in.Name return nil } -func deepCopy_v1_SELinuxOptions(in v1.SELinuxOptions, out *v1.SELinuxOptions, c *conversion.Cloner) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level +func DeepCopy_v1beta1_CPUTargetUtilization(in CPUTargetUtilization, out *CPUTargetUtilization, c *conversion.Cloner) error { + out.TargetPercentage = in.TargetPercentage return nil } -func deepCopy_v1_SecretKeySelector(in v1.SecretKeySelector, out *v1.SecretKeySelector, c *conversion.Cloner) error { - if err := deepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { +func DeepCopy_v1beta1_CustomMetricCurrentStatus(in CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, c *conversion.Cloner) error { + out.Name = in.Name + if err := resource.DeepCopy_resource_Quantity(in.CurrentValue, &out.CurrentValue, c); err != nil { return err } - out.Key = in.Key return nil } -func deepCopy_v1_SecretVolumeSource(in v1.SecretVolumeSource, out *v1.SecretVolumeSource, c *conversion.Cloner) error { - out.SecretName = in.SecretName - return nil -} - -func deepCopy_v1_SecurityContext(in v1.SecurityContext, out *v1.SecurityContext, c *conversion.Cloner) error { - if in.Capabilities != nil { - out.Capabilities = new(v1.Capabilities) - if err := deepCopy_v1_Capabilities(*in.Capabilities, out.Capabilities, c); err != nil { - return err - } - } else { - out.Capabilities = nil - } - if in.Privileged != nil { - out.Privileged = new(bool) - *out.Privileged = *in.Privileged - } else { - out.Privileged = nil - } - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(v1.SELinuxOptions) - if err := deepCopy_v1_SELinuxOptions(*in.SELinuxOptions, out.SELinuxOptions, c); err != nil { - return err +func DeepCopy_v1beta1_CustomMetricCurrentStatusList(in CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, c *conversion.Cloner) error { + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]CustomMetricCurrentStatus, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_CustomMetricCurrentStatus(in[i], &(*out)[i], c); err != nil { + return err + } } } else { - out.SELinuxOptions = nil - } - if in.RunAsUser != nil { - out.RunAsUser = new(int64) - *out.RunAsUser = *in.RunAsUser - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - out.RunAsNonRoot = new(bool) - *out.RunAsNonRoot = *in.RunAsNonRoot - } else { - out.RunAsNonRoot = nil - } - if in.ReadOnlyRootFilesystem != nil { - out.ReadOnlyRootFilesystem = new(bool) - *out.ReadOnlyRootFilesystem = *in.ReadOnlyRootFilesystem - } else { - out.ReadOnlyRootFilesystem = nil - } - return nil -} - -func deepCopy_v1_TCPSocketAction(in v1.TCPSocketAction, out *v1.TCPSocketAction, c *conversion.Cloner) error { - if err := deepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { - return err + out.Items = nil } return nil } -func deepCopy_v1_Volume(in v1.Volume, out *v1.Volume, c *conversion.Cloner) error { +func DeepCopy_v1beta1_CustomMetricTarget(in CustomMetricTarget, out *CustomMetricTarget, c *conversion.Cloner) error { out.Name = in.Name - if err := deepCopy_v1_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil { + if err := resource.DeepCopy_resource_Quantity(in.TargetValue, &out.TargetValue, c); err != nil { return err } return nil } -func deepCopy_v1_VolumeMount(in v1.VolumeMount, out *v1.VolumeMount, c *conversion.Cloner) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - return nil -} - -func deepCopy_v1_VolumeSource(in v1.VolumeSource, out *v1.VolumeSource, c *conversion.Cloner) error { - if in.HostPath != nil { - out.HostPath = new(v1.HostPathVolumeSource) - if err := deepCopy_v1_HostPathVolumeSource(*in.HostPath, out.HostPath, c); err != nil { - return err - } - } else { - out.HostPath = nil - } - if in.EmptyDir != nil { - out.EmptyDir = new(v1.EmptyDirVolumeSource) - if err := deepCopy_v1_EmptyDirVolumeSource(*in.EmptyDir, out.EmptyDir, c); err != nil { - return err - } - } else { - out.EmptyDir = nil - } - if in.GCEPersistentDisk != nil { - out.GCEPersistentDisk = new(v1.GCEPersistentDiskVolumeSource) - if err := deepCopy_v1_GCEPersistentDiskVolumeSource(*in.GCEPersistentDisk, out.GCEPersistentDisk, c); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - if in.AWSElasticBlockStore != nil { - out.AWSElasticBlockStore = new(v1.AWSElasticBlockStoreVolumeSource) - if err := deepCopy_v1_AWSElasticBlockStoreVolumeSource(*in.AWSElasticBlockStore, out.AWSElasticBlockStore, c); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - if in.GitRepo != nil { - out.GitRepo = new(v1.GitRepoVolumeSource) - if err := deepCopy_v1_GitRepoVolumeSource(*in.GitRepo, out.GitRepo, c); err != nil { - return err - } - } else { - out.GitRepo = nil - } - if in.Secret != nil { - out.Secret = new(v1.SecretVolumeSource) - if err := deepCopy_v1_SecretVolumeSource(*in.Secret, out.Secret, c); err != nil { - return err - } - } else { - out.Secret = nil - } - if in.NFS != nil { - out.NFS = new(v1.NFSVolumeSource) - if err := deepCopy_v1_NFSVolumeSource(*in.NFS, out.NFS, c); err != nil { - return err - } - } else { - out.NFS = nil - } - if in.ISCSI != nil { - out.ISCSI = new(v1.ISCSIVolumeSource) - if err := deepCopy_v1_ISCSIVolumeSource(*in.ISCSI, out.ISCSI, c); err != nil { - return err - } - } else { - out.ISCSI = nil - } - if in.Glusterfs != nil { - out.Glusterfs = new(v1.GlusterfsVolumeSource) - if err := deepCopy_v1_GlusterfsVolumeSource(*in.Glusterfs, out.Glusterfs, c); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - if in.PersistentVolumeClaim != nil { - out.PersistentVolumeClaim = new(v1.PersistentVolumeClaimVolumeSource) - if err := deepCopy_v1_PersistentVolumeClaimVolumeSource(*in.PersistentVolumeClaim, out.PersistentVolumeClaim, c); err != nil { - return err - } - } else { - out.PersistentVolumeClaim = nil - } - if in.RBD != nil { - out.RBD = new(v1.RBDVolumeSource) - if err := deepCopy_v1_RBDVolumeSource(*in.RBD, out.RBD, c); err != nil { - return err - } - } else { - out.RBD = nil - } - if in.FlexVolume != nil { - out.FlexVolume = new(v1.FlexVolumeSource) - if err := deepCopy_v1_FlexVolumeSource(*in.FlexVolume, out.FlexVolume, c); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - if in.Cinder != nil { - out.Cinder = new(v1.CinderVolumeSource) - if err := deepCopy_v1_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil { - return err - } - } else { - out.Cinder = nil - } - if in.CephFS != nil { - out.CephFS = new(v1.CephFSVolumeSource) - if err := deepCopy_v1_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil { - return err - } - } else { - out.CephFS = nil - } - if in.Flocker != nil { - out.Flocker = new(v1.FlockerVolumeSource) - if err := deepCopy_v1_FlockerVolumeSource(*in.Flocker, out.Flocker, c); err != nil { - return err - } - } else { - out.Flocker = nil - } - if in.DownwardAPI != nil { - out.DownwardAPI = new(v1.DownwardAPIVolumeSource) - if err := deepCopy_v1_DownwardAPIVolumeSource(*in.DownwardAPI, out.DownwardAPI, c); err != nil { - return err - } - } else { - out.DownwardAPI = nil - } - if in.FC != nil { - out.FC = new(v1.FCVolumeSource) - if err := deepCopy_v1_FCVolumeSource(*in.FC, out.FC, c); err != nil { - return err - } - } else { - out.FC = nil - } - if in.AzureFile != nil { - out.AzureFile = new(v1.AzureFileVolumeSource) - if err := deepCopy_v1_AzureFileVolumeSource(*in.AzureFile, out.AzureFile, c); err != nil { - return err - } - } else { - out.AzureFile = nil - } - if in.ConfigMap != nil { - out.ConfigMap = new(v1.ConfigMapVolumeSource) - if err := deepCopy_v1_ConfigMapVolumeSource(*in.ConfigMap, out.ConfigMap, c); err != nil { - return err +func DeepCopy_v1beta1_CustomMetricTargetList(in CustomMetricTargetList, out *CustomMetricTargetList, c *conversion.Cloner) error { + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]CustomMetricTarget, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_CustomMetricTarget(in[i], &(*out)[i], c); err != nil { + return err + } } } else { - out.ConfigMap = nil + out.Items = nil } return nil } -func deepCopy_v1beta1_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error { - out.Name = in.Name - out.APIGroup = in.APIGroup - return nil -} - -func deepCopy_v1beta1_CPUTargetUtilization(in CPUTargetUtilization, out *CPUTargetUtilization, c *conversion.Cloner) error { - out.TargetPercentage = in.TargetPercentage - return nil -} - -func deepCopy_v1beta1_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1beta1_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1beta1_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1beta1_DaemonSetStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1beta1_DaemonSetStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1beta1_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]DaemonSet, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1beta1_DaemonSet(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]DaemonSet, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_DaemonSet(in[i], &(*out)[i], c); err != nil { return err } } @@ -1066,55 +199,57 @@ func deepCopy_v1beta1_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *con return nil } -func deepCopy_v1beta1_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error { +func DeepCopy_v1beta1_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error { if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := deepCopy_v1beta1_LabelSelector(*in.Selector, out.Selector, c); err != nil { + in, out := in.Selector, &out.Selector + *out = new(LabelSelector) + if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil { return err } } else { out.Selector = nil } - if err := deepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { return err } return nil } -func deepCopy_v1beta1_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error { +func DeepCopy_v1beta1_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled return nil } -func deepCopy_v1beta1_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1beta1_DeploymentSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1beta1_DeploymentSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1beta1_DeploymentStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1beta1_DeploymentStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1beta1_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]Deployment, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1beta1_Deployment(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]Deployment, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_Deployment(in[i], &(*out)[i], c); err != nil { return err } } @@ -1124,57 +259,62 @@ func deepCopy_v1beta1_DeploymentList(in DeploymentList, out *DeploymentList, c * return nil } -func deepCopy_v1beta1_DeploymentRollback(in DeploymentRollback, out *DeploymentRollback, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_DeploymentRollback(in DeploymentRollback, out *DeploymentRollback, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.Name = in.Name if in.UpdatedAnnotations != nil { - out.UpdatedAnnotations = make(map[string]string) - for key, val := range in.UpdatedAnnotations { - out.UpdatedAnnotations[key] = val + in, out := in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val } } else { out.UpdatedAnnotations = nil } - if err := deepCopy_v1beta1_RollbackConfig(in.RollbackTo, &out.RollbackTo, c); err != nil { + if err := DeepCopy_v1beta1_RollbackConfig(in.RollbackTo, &out.RollbackTo, c); err != nil { return err } return nil } -func deepCopy_v1beta1_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error { +func DeepCopy_v1beta1_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error { if in.Replicas != nil { - out.Replicas = new(int32) - *out.Replicas = *in.Replicas + in, out := in.Replicas, &out.Replicas + *out = new(int32) + **out = *in } else { out.Replicas = nil } if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := deepCopy_v1beta1_LabelSelector(*in.Selector, out.Selector, c); err != nil { + in, out := in.Selector, &out.Selector + *out = new(LabelSelector) + if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil { return err } } else { out.Selector = nil } - if err := deepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { return err } - if err := deepCopy_v1beta1_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil { + if err := DeepCopy_v1beta1_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int32) - *out.RevisionHistoryLimit = *in.RevisionHistoryLimit + in, out := in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = *in } else { out.RevisionHistoryLimit = nil } out.Paused = in.Paused if in.RollbackTo != nil { - out.RollbackTo = new(RollbackConfig) - if err := deepCopy_v1beta1_RollbackConfig(*in.RollbackTo, out.RollbackTo, c); err != nil { + in, out := in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + if err := DeepCopy_v1beta1_RollbackConfig(*in, *out, c); err != nil { return err } } else { @@ -1183,7 +323,7 @@ func deepCopy_v1beta1_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c * return nil } -func deepCopy_v1beta1_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error { +func DeepCopy_v1beta1_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error { out.ObservedGeneration = in.ObservedGeneration out.Replicas = in.Replicas out.UpdatedReplicas = in.UpdatedReplicas @@ -1192,11 +332,12 @@ func deepCopy_v1beta1_DeploymentStatus(in DeploymentStatus, out *DeploymentStatu return nil } -func deepCopy_v1beta1_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error { +func DeepCopy_v1beta1_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error { out.Type = in.Type if in.RollingUpdate != nil { - out.RollingUpdate = new(RollingUpdateDeployment) - if err := deepCopy_v1beta1_RollingUpdateDeployment(*in.RollingUpdate, out.RollingUpdate, c); err != nil { + in, out := in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + if err := DeepCopy_v1beta1_RollingUpdateDeployment(*in, *out, c); err != nil { return err } } else { @@ -1205,19 +346,45 @@ func deepCopy_v1beta1_DeploymentStrategy(in DeploymentStrategy, out *DeploymentS return nil } -func deepCopy_v1beta1_HTTPIngressPath(in HTTPIngressPath, out *HTTPIngressPath, c *conversion.Cloner) error { +func DeepCopy_v1beta1_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.Export = in.Export + out.Exact = in.Exact + return nil +} + +func DeepCopy_v1beta1_FSGroupStrategyOptions(in FSGroupStrategyOptions, out *FSGroupStrategyOptions, c *conversion.Cloner) error { + out.Rule = in.Rule + if in.Ranges != nil { + in, out := in.Ranges, &out.Ranges + *out = make([]IDRange, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_IDRange(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Ranges = nil + } + return nil +} + +func DeepCopy_v1beta1_HTTPIngressPath(in HTTPIngressPath, out *HTTPIngressPath, c *conversion.Cloner) error { out.Path = in.Path - if err := deepCopy_v1beta1_IngressBackend(in.Backend, &out.Backend, c); err != nil { + if err := DeepCopy_v1beta1_IngressBackend(in.Backend, &out.Backend, c); err != nil { return err } return nil } -func deepCopy_v1beta1_HTTPIngressRuleValue(in HTTPIngressRuleValue, out *HTTPIngressRuleValue, c *conversion.Cloner) error { +func DeepCopy_v1beta1_HTTPIngressRuleValue(in HTTPIngressRuleValue, out *HTTPIngressRuleValue, c *conversion.Cloner) error { if in.Paths != nil { - out.Paths = make([]HTTPIngressPath, len(in.Paths)) - for i := range in.Paths { - if err := deepCopy_v1beta1_HTTPIngressPath(in.Paths[i], &out.Paths[i], c); err != nil { + in, out := in.Paths, &out.Paths + *out = make([]HTTPIngressPath, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_HTTPIngressPath(in[i], &(*out)[i], c); err != nil { return err } } @@ -1227,33 +394,34 @@ func deepCopy_v1beta1_HTTPIngressRuleValue(in HTTPIngressRuleValue, out *HTTPIng return nil } -func deepCopy_v1beta1_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1beta1_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1beta1_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1beta1_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1beta1_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1beta1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]HorizontalPodAutoscaler, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1beta1_HorizontalPodAutoscaler(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_HorizontalPodAutoscaler(in[i], &(*out)[i], c); err != nil { return err } } @@ -1263,20 +431,22 @@ func deepCopy_v1beta1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList return nil } -func deepCopy_v1beta1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { - if err := deepCopy_v1beta1_SubresourceReference(in.ScaleRef, &out.ScaleRef, c); err != nil { +func DeepCopy_v1beta1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { + if err := DeepCopy_v1beta1_SubresourceReference(in.ScaleRef, &out.ScaleRef, c); err != nil { return err } if in.MinReplicas != nil { - out.MinReplicas = new(int32) - *out.MinReplicas = *in.MinReplicas + in, out := in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = *in } else { out.MinReplicas = nil } out.MaxReplicas = in.MaxReplicas if in.CPUUtilization != nil { - out.CPUUtilization = new(CPUTargetUtilization) - if err := deepCopy_v1beta1_CPUTargetUtilization(*in.CPUUtilization, out.CPUUtilization, c); err != nil { + in, out := in.CPUUtilization, &out.CPUUtilization + *out = new(CPUTargetUtilization) + if err := DeepCopy_v1beta1_CPUTargetUtilization(*in, *out, c); err != nil { return err } } else { @@ -1285,16 +455,18 @@ func deepCopy_v1beta1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec return nil } -func deepCopy_v1beta1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { +func DeepCopy_v1beta1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { if in.ObservedGeneration != nil { - out.ObservedGeneration = new(int64) - *out.ObservedGeneration = *in.ObservedGeneration + in, out := in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = *in } else { out.ObservedGeneration = nil } if in.LastScaleTime != nil { - out.LastScaleTime = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.LastScaleTime, out.LastScaleTime, c); err != nil { + in, out := in.LastScaleTime, &out.LastScaleTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { return err } } else { @@ -1303,61 +475,63 @@ func deepCopy_v1beta1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerSt out.CurrentReplicas = in.CurrentReplicas out.DesiredReplicas = in.DesiredReplicas if in.CurrentCPUUtilizationPercentage != nil { - out.CurrentCPUUtilizationPercentage = new(int32) - *out.CurrentCPUUtilizationPercentage = *in.CurrentCPUUtilizationPercentage + in, out := in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage + *out = new(int32) + **out = *in } else { out.CurrentCPUUtilizationPercentage = nil } return nil } -func deepCopy_v1beta1_HostPortRange(in HostPortRange, out *HostPortRange, c *conversion.Cloner) error { +func DeepCopy_v1beta1_HostPortRange(in HostPortRange, out *HostPortRange, c *conversion.Cloner) error { out.Min = in.Min out.Max = in.Max return nil } -func deepCopy_v1beta1_IDRange(in IDRange, out *IDRange, c *conversion.Cloner) error { +func DeepCopy_v1beta1_IDRange(in IDRange, out *IDRange, c *conversion.Cloner) error { out.Min = in.Min out.Max = in.Max return nil } -func deepCopy_v1beta1_Ingress(in Ingress, out *Ingress, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_Ingress(in Ingress, out *Ingress, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1beta1_IngressSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1beta1_IngressSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1beta1_IngressStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1beta1_IngressStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1beta1_IngressBackend(in IngressBackend, out *IngressBackend, c *conversion.Cloner) error { +func DeepCopy_v1beta1_IngressBackend(in IngressBackend, out *IngressBackend, c *conversion.Cloner) error { out.ServiceName = in.ServiceName - if err := deepCopy_intstr_IntOrString(in.ServicePort, &out.ServicePort, c); err != nil { + if err := intstr.DeepCopy_intstr_IntOrString(in.ServicePort, &out.ServicePort, c); err != nil { return err } return nil } -func deepCopy_v1beta1_IngressList(in IngressList, out *IngressList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_IngressList(in IngressList, out *IngressList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]Ingress, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1beta1_Ingress(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]Ingress, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_Ingress(in[i], &(*out)[i], c); err != nil { return err } } @@ -1367,18 +541,19 @@ func deepCopy_v1beta1_IngressList(in IngressList, out *IngressList, c *conversio return nil } -func deepCopy_v1beta1_IngressRule(in IngressRule, out *IngressRule, c *conversion.Cloner) error { +func DeepCopy_v1beta1_IngressRule(in IngressRule, out *IngressRule, c *conversion.Cloner) error { out.Host = in.Host - if err := deepCopy_v1beta1_IngressRuleValue(in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { + if err := DeepCopy_v1beta1_IngressRuleValue(in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { return err } return nil } -func deepCopy_v1beta1_IngressRuleValue(in IngressRuleValue, out *IngressRuleValue, c *conversion.Cloner) error { +func DeepCopy_v1beta1_IngressRuleValue(in IngressRuleValue, out *IngressRuleValue, c *conversion.Cloner) error { if in.HTTP != nil { - out.HTTP = new(HTTPIngressRuleValue) - if err := deepCopy_v1beta1_HTTPIngressRuleValue(*in.HTTP, out.HTTP, c); err != nil { + in, out := in.HTTP, &out.HTTP + *out = new(HTTPIngressRuleValue) + if err := DeepCopy_v1beta1_HTTPIngressRuleValue(*in, *out, c); err != nil { return err } } else { @@ -1387,19 +562,21 @@ func deepCopy_v1beta1_IngressRuleValue(in IngressRuleValue, out *IngressRuleValu return nil } -func deepCopy_v1beta1_IngressSpec(in IngressSpec, out *IngressSpec, c *conversion.Cloner) error { +func DeepCopy_v1beta1_IngressSpec(in IngressSpec, out *IngressSpec, c *conversion.Cloner) error { if in.Backend != nil { - out.Backend = new(IngressBackend) - if err := deepCopy_v1beta1_IngressBackend(*in.Backend, out.Backend, c); err != nil { + in, out := in.Backend, &out.Backend + *out = new(IngressBackend) + if err := DeepCopy_v1beta1_IngressBackend(*in, *out, c); err != nil { return err } } else { out.Backend = nil } if in.TLS != nil { - out.TLS = make([]IngressTLS, len(in.TLS)) - for i := range in.TLS { - if err := deepCopy_v1beta1_IngressTLS(in.TLS[i], &out.TLS[i], c); err != nil { + in, out := in.TLS, &out.TLS + *out = make([]IngressTLS, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_IngressTLS(in[i], &(*out)[i], c); err != nil { return err } } @@ -1407,9 +584,10 @@ func deepCopy_v1beta1_IngressSpec(in IngressSpec, out *IngressSpec, c *conversio out.TLS = nil } if in.Rules != nil { - out.Rules = make([]IngressRule, len(in.Rules)) - for i := range in.Rules { - if err := deepCopy_v1beta1_IngressRule(in.Rules[i], &out.Rules[i], c); err != nil { + in, out := in.Rules, &out.Rules + *out = make([]IngressRule, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_IngressRule(in[i], &(*out)[i], c); err != nil { return err } } @@ -1419,19 +597,18 @@ func deepCopy_v1beta1_IngressSpec(in IngressSpec, out *IngressSpec, c *conversio return nil } -func deepCopy_v1beta1_IngressStatus(in IngressStatus, out *IngressStatus, c *conversion.Cloner) error { - if err := deepCopy_v1_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil { +func DeepCopy_v1beta1_IngressStatus(in IngressStatus, out *IngressStatus, c *conversion.Cloner) error { + if err := v1.DeepCopy_v1_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil { return err } return nil } -func deepCopy_v1beta1_IngressTLS(in IngressTLS, out *IngressTLS, c *conversion.Cloner) error { +func DeepCopy_v1beta1_IngressTLS(in IngressTLS, out *IngressTLS, c *conversion.Cloner) error { if in.Hosts != nil { - out.Hosts = make([]string, len(in.Hosts)) - for i := range in.Hosts { - out.Hosts[i] = in.Hosts[i] - } + in, out := in.Hosts, &out.Hosts + *out = make([]string, len(in)) + copy(*out, in) } else { out.Hosts = nil } @@ -1439,29 +616,29 @@ func deepCopy_v1beta1_IngressTLS(in IngressTLS, out *IngressTLS, c *conversion.C return nil } -func deepCopy_v1beta1_Job(in Job, out *Job, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_Job(in Job, out *Job, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1beta1_JobSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1beta1_JobSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1beta1_JobStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1beta1_JobStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1beta1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { +func DeepCopy_v1beta1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { out.Type = in.Type out.Status = in.Status - if err := deepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { return err } - if err := deepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { return err } out.Reason = in.Reason @@ -1469,17 +646,18 @@ func deepCopy_v1beta1_JobCondition(in JobCondition, out *JobCondition, c *conver return nil } -func deepCopy_v1beta1_JobList(in JobList, out *JobList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_JobList(in JobList, out *JobList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]Job, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1beta1_Job(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]Job, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_Job(in[i], &(*out)[i], c); err != nil { return err } } @@ -1489,50 +667,56 @@ func deepCopy_v1beta1_JobList(in JobList, out *JobList, c *conversion.Cloner) er return nil } -func deepCopy_v1beta1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { +func DeepCopy_v1beta1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { if in.Parallelism != nil { - out.Parallelism = new(int32) - *out.Parallelism = *in.Parallelism + in, out := in.Parallelism, &out.Parallelism + *out = new(int32) + **out = *in } else { out.Parallelism = nil } if in.Completions != nil { - out.Completions = new(int32) - *out.Completions = *in.Completions + in, out := in.Completions, &out.Completions + *out = new(int32) + **out = *in } else { out.Completions = nil } if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds + in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = *in } else { out.ActiveDeadlineSeconds = nil } if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := deepCopy_v1beta1_LabelSelector(*in.Selector, out.Selector, c); err != nil { + in, out := in.Selector, &out.Selector + *out = new(LabelSelector) + if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil { return err } } else { out.Selector = nil } if in.AutoSelector != nil { - out.AutoSelector = new(bool) - *out.AutoSelector = *in.AutoSelector + in, out := in.AutoSelector, &out.AutoSelector + *out = new(bool) + **out = *in } else { out.AutoSelector = nil } - if err := deepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { return err } return nil } -func deepCopy_v1beta1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { +func DeepCopy_v1beta1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { if in.Conditions != nil { - out.Conditions = make([]JobCondition, len(in.Conditions)) - for i := range in.Conditions { - if err := deepCopy_v1beta1_JobCondition(in.Conditions[i], &out.Conditions[i], c); err != nil { + in, out := in.Conditions, &out.Conditions + *out = make([]JobCondition, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_JobCondition(in[i], &(*out)[i], c); err != nil { return err } } @@ -1540,16 +724,18 @@ func deepCopy_v1beta1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Clon out.Conditions = nil } if in.StartTime != nil { - out.StartTime = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.StartTime, out.StartTime, c); err != nil { + in, out := in.StartTime, &out.StartTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { return err } } else { out.StartTime = nil } if in.CompletionTime != nil { - out.CompletionTime = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.CompletionTime, out.CompletionTime, c); err != nil { + in, out := in.CompletionTime, &out.CompletionTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { return err } } else { @@ -1561,19 +747,21 @@ func deepCopy_v1beta1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Clon return nil } -func deepCopy_v1beta1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error { +func DeepCopy_v1beta1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error { if in.MatchLabels != nil { - out.MatchLabels = make(map[string]string) - for key, val := range in.MatchLabels { - out.MatchLabels[key] = val + in, out := in.MatchLabels, &out.MatchLabels + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val } } else { out.MatchLabels = nil } if in.MatchExpressions != nil { - out.MatchExpressions = make([]LabelSelectorRequirement, len(in.MatchExpressions)) - for i := range in.MatchExpressions { - if err := deepCopy_v1beta1_LabelSelectorRequirement(in.MatchExpressions[i], &out.MatchExpressions[i], c); err != nil { + in, out := in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil { return err } } @@ -1583,22 +771,21 @@ func deepCopy_v1beta1_LabelSelector(in LabelSelector, out *LabelSelector, c *con return nil } -func deepCopy_v1beta1_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error { +func DeepCopy_v1beta1_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error { out.Key = in.Key out.Operator = in.Operator if in.Values != nil { - out.Values = make([]string, len(in.Values)) - for i := range in.Values { - out.Values[i] = in.Values[i] - } + in, out := in.Values, &out.Values + *out = make([]string, len(in)) + copy(*out, in) } else { out.Values = nil } return nil } -func deepCopy_v1beta1_ListOptions(in ListOptions, out *ListOptions, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_ListOptions(in ListOptions, out *ListOptions, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } out.LabelSelector = in.LabelSelector @@ -1606,38 +793,164 @@ func deepCopy_v1beta1_ListOptions(in ListOptions, out *ListOptions, c *conversio out.Watch = in.Watch out.ResourceVersion = in.ResourceVersion if in.TimeoutSeconds != nil { - out.TimeoutSeconds = new(int64) - *out.TimeoutSeconds = *in.TimeoutSeconds + in, out := in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = *in } else { out.TimeoutSeconds = nil } return nil } -func deepCopy_v1beta1_PodSecurityPolicy(in PodSecurityPolicy, out *PodSecurityPolicy, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_NetworkPolicy(in NetworkPolicy, out *NetworkPolicy, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_NetworkPolicySpec(in.Spec, &out.Spec, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1beta1_NetworkPolicyIngressRule(in NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, c *conversion.Cloner) error { + if in.Ports != nil { + in, out := in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_NetworkPolicyPort(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Ports = nil + } + if in.From != nil { + in, out := in.From, &out.From + *out = make([]NetworkPolicyPeer, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_NetworkPolicyPeer(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.From = nil + } + return nil +} + +func DeepCopy_v1beta1_NetworkPolicyList(in NetworkPolicyList, out *NetworkPolicyList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]NetworkPolicy, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_NetworkPolicy(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1beta1_NetworkPolicyPeer(in NetworkPolicyPeer, out *NetworkPolicyPeer, c *conversion.Cloner) error { + if in.PodSelector != nil { + in, out := in.PodSelector, &out.PodSelector + *out = new(LabelSelector) + if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.PodSelector = nil + } + if in.NamespaceSelector != nil { + in, out := in.NamespaceSelector, &out.NamespaceSelector + *out = new(LabelSelector) + if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.NamespaceSelector = nil + } + return nil +} + +func DeepCopy_v1beta1_NetworkPolicyPort(in NetworkPolicyPort, out *NetworkPolicyPort, c *conversion.Cloner) error { + if in.Protocol != nil { + in, out := in.Protocol, &out.Protocol + *out = new(v1.Protocol) + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + **out = newVal.(v1.Protocol) + } + } else { + out.Protocol = nil + } + if in.Port != nil { + in, out := in.Port, &out.Port + *out = new(intstr.IntOrString) + if err := intstr.DeepCopy_intstr_IntOrString(*in, *out, c); err != nil { + return err + } + } else { + out.Port = nil + } + return nil +} + +func DeepCopy_v1beta1_NetworkPolicySpec(in NetworkPolicySpec, out *NetworkPolicySpec, c *conversion.Cloner) error { + if err := DeepCopy_v1beta1_LabelSelector(in.PodSelector, &out.PodSelector, c); err != nil { + return err + } + if in.Ingress != nil { + in, out := in.Ingress, &out.Ingress + *out = make([]NetworkPolicyIngressRule, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_NetworkPolicyIngressRule(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Ingress = nil + } + return nil +} + +func DeepCopy_v1beta1_PodSecurityPolicy(in PodSecurityPolicy, out *PodSecurityPolicy, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1beta1_PodSecurityPolicySpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1beta1_PodSecurityPolicySpec(in.Spec, &out.Spec, c); err != nil { return err } return nil } -func deepCopy_v1beta1_PodSecurityPolicyList(in PodSecurityPolicyList, out *PodSecurityPolicyList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_PodSecurityPolicyList(in PodSecurityPolicyList, out *PodSecurityPolicyList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]PodSecurityPolicy, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1beta1_PodSecurityPolicy(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_PodSecurityPolicy(in[i], &(*out)[i], c); err != nil { return err } } @@ -1647,29 +960,50 @@ func deepCopy_v1beta1_PodSecurityPolicyList(in PodSecurityPolicyList, out *PodSe return nil } -func deepCopy_v1beta1_PodSecurityPolicySpec(in PodSecurityPolicySpec, out *PodSecurityPolicySpec, c *conversion.Cloner) error { +func DeepCopy_v1beta1_PodSecurityPolicySpec(in PodSecurityPolicySpec, out *PodSecurityPolicySpec, c *conversion.Cloner) error { out.Privileged = in.Privileged - if in.Capabilities != nil { - out.Capabilities = make([]v1.Capability, len(in.Capabilities)) - for i := range in.Capabilities { - out.Capabilities[i] = in.Capabilities[i] + if in.DefaultAddCapabilities != nil { + in, out := in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]v1.Capability, len(in)) + for i := range in { + (*out)[i] = in[i] } } else { - out.Capabilities = nil + out.DefaultAddCapabilities = nil + } + if in.RequiredDropCapabilities != nil { + in, out := in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]v1.Capability, len(in)) + for i := range in { + (*out)[i] = in[i] + } + } else { + out.RequiredDropCapabilities = nil + } + if in.AllowedCapabilities != nil { + in, out := in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]v1.Capability, len(in)) + for i := range in { + (*out)[i] = in[i] + } + } else { + out.AllowedCapabilities = nil } if in.Volumes != nil { - out.Volumes = make([]FSType, len(in.Volumes)) - for i := range in.Volumes { - out.Volumes[i] = in.Volumes[i] + in, out := in.Volumes, &out.Volumes + *out = make([]FSType, len(in)) + for i := range in { + (*out)[i] = in[i] } } else { out.Volumes = nil } out.HostNetwork = in.HostNetwork if in.HostPorts != nil { - out.HostPorts = make([]HostPortRange, len(in.HostPorts)) - for i := range in.HostPorts { - if err := deepCopy_v1beta1_HostPortRange(in.HostPorts[i], &out.HostPorts[i], c); err != nil { + in, out := in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_HostPortRange(in[i], &(*out)[i], c); err != nil { return err } } @@ -1678,42 +1012,50 @@ func deepCopy_v1beta1_PodSecurityPolicySpec(in PodSecurityPolicySpec, out *PodSe } out.HostPID = in.HostPID out.HostIPC = in.HostIPC - if err := deepCopy_v1beta1_SELinuxStrategyOptions(in.SELinux, &out.SELinux, c); err != nil { + if err := DeepCopy_v1beta1_SELinuxStrategyOptions(in.SELinux, &out.SELinux, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_RunAsUserStrategyOptions(in.RunAsUser, &out.RunAsUser, c); err != nil { return err } - if err := deepCopy_v1beta1_RunAsUserStrategyOptions(in.RunAsUser, &out.RunAsUser, c); err != nil { + if err := DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(in.SupplementalGroups, &out.SupplementalGroups, c); err != nil { return err } + if err := DeepCopy_v1beta1_FSGroupStrategyOptions(in.FSGroup, &out.FSGroup, c); err != nil { + return err + } + out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem return nil } -func deepCopy_v1beta1_ReplicaSet(in ReplicaSet, out *ReplicaSet, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_ReplicaSet(in ReplicaSet, out *ReplicaSet, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1beta1_ReplicaSetSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1beta1_ReplicaSetSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1beta1_ReplicaSetStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1beta1_ReplicaSetStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1beta1_ReplicaSetList(in ReplicaSetList, out *ReplicaSetList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_ReplicaSetList(in ReplicaSetList, out *ReplicaSetList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]ReplicaSet, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1beta1_ReplicaSet(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]ReplicaSet, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_ReplicaSet(in[i], &(*out)[i], c); err != nil { return err } } @@ -1723,58 +1065,62 @@ func deepCopy_v1beta1_ReplicaSetList(in ReplicaSetList, out *ReplicaSetList, c * return nil } -func deepCopy_v1beta1_ReplicaSetSpec(in ReplicaSetSpec, out *ReplicaSetSpec, c *conversion.Cloner) error { +func DeepCopy_v1beta1_ReplicaSetSpec(in ReplicaSetSpec, out *ReplicaSetSpec, c *conversion.Cloner) error { if in.Replicas != nil { - out.Replicas = new(int32) - *out.Replicas = *in.Replicas + in, out := in.Replicas, &out.Replicas + *out = new(int32) + **out = *in } else { out.Replicas = nil } if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := deepCopy_v1beta1_LabelSelector(*in.Selector, out.Selector, c); err != nil { + in, out := in.Selector, &out.Selector + *out = new(LabelSelector) + if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil { return err } } else { out.Selector = nil } - if err := deepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { return err } return nil } -func deepCopy_v1beta1_ReplicaSetStatus(in ReplicaSetStatus, out *ReplicaSetStatus, c *conversion.Cloner) error { +func DeepCopy_v1beta1_ReplicaSetStatus(in ReplicaSetStatus, out *ReplicaSetStatus, c *conversion.Cloner) error { out.Replicas = in.Replicas out.FullyLabeledReplicas = in.FullyLabeledReplicas out.ObservedGeneration = in.ObservedGeneration return nil } -func deepCopy_v1beta1_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } return nil } -func deepCopy_v1beta1_RollbackConfig(in RollbackConfig, out *RollbackConfig, c *conversion.Cloner) error { +func DeepCopy_v1beta1_RollbackConfig(in RollbackConfig, out *RollbackConfig, c *conversion.Cloner) error { out.Revision = in.Revision return nil } -func deepCopy_v1beta1_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error { +func DeepCopy_v1beta1_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error { if in.MaxUnavailable != nil { - out.MaxUnavailable = new(intstr.IntOrString) - if err := deepCopy_intstr_IntOrString(*in.MaxUnavailable, out.MaxUnavailable, c); err != nil { + in, out := in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + if err := intstr.DeepCopy_intstr_IntOrString(*in, *out, c); err != nil { return err } } else { out.MaxUnavailable = nil } if in.MaxSurge != nil { - out.MaxSurge = new(intstr.IntOrString) - if err := deepCopy_intstr_IntOrString(*in.MaxSurge, out.MaxSurge, c); err != nil { + in, out := in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + if err := intstr.DeepCopy_intstr_IntOrString(*in, *out, c); err != nil { return err } } else { @@ -1783,12 +1129,13 @@ func deepCopy_v1beta1_RollingUpdateDeployment(in RollingUpdateDeployment, out *R return nil } -func deepCopy_v1beta1_RunAsUserStrategyOptions(in RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, c *conversion.Cloner) error { +func DeepCopy_v1beta1_RunAsUserStrategyOptions(in RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, c *conversion.Cloner) error { out.Rule = in.Rule if in.Ranges != nil { - out.Ranges = make([]IDRange, len(in.Ranges)) - for i := range in.Ranges { - if err := deepCopy_v1beta1_IDRange(in.Ranges[i], &out.Ranges[i], c); err != nil { + in, out := in.Ranges, &out.Ranges + *out = make([]IDRange, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_IDRange(in[i], &(*out)[i], c); err != nil { return err } } @@ -1798,11 +1145,12 @@ func deepCopy_v1beta1_RunAsUserStrategyOptions(in RunAsUserStrategyOptions, out return nil } -func deepCopy_v1beta1_SELinuxStrategyOptions(in SELinuxStrategyOptions, out *SELinuxStrategyOptions, c *conversion.Cloner) error { +func DeepCopy_v1beta1_SELinuxStrategyOptions(in SELinuxStrategyOptions, out *SELinuxStrategyOptions, c *conversion.Cloner) error { out.Rule = in.Rule if in.SELinuxOptions != nil { - out.SELinuxOptions = new(v1.SELinuxOptions) - if err := deepCopy_v1_SELinuxOptions(*in.SELinuxOptions, out.SELinuxOptions, c); err != nil { + in, out := in.SELinuxOptions, &out.SELinuxOptions + *out = new(v1.SELinuxOptions) + if err := v1.DeepCopy_v1_SELinuxOptions(*in, *out, c); err != nil { return err } } else { @@ -1811,33 +1159,34 @@ func deepCopy_v1beta1_SELinuxStrategyOptions(in SELinuxStrategyOptions, out *SEL return nil } -func deepCopy_v1beta1_Scale(in Scale, out *Scale, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_Scale(in Scale, out *Scale, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1beta1_ScaleSpec(in.Spec, &out.Spec, c); err != nil { + if err := DeepCopy_v1beta1_ScaleSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1beta1_ScaleStatus(in.Status, &out.Status, c); err != nil { + if err := DeepCopy_v1beta1_ScaleStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1beta1_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { +func DeepCopy_v1beta1_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { out.Replicas = in.Replicas return nil } -func deepCopy_v1beta1_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { +func DeepCopy_v1beta1_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { out.Replicas = in.Replicas if in.Selector != nil { - out.Selector = make(map[string]string) - for key, val := range in.Selector { - out.Selector[key] = val + in, out := in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val } } else { out.Selector = nil @@ -1846,7 +1195,7 @@ func deepCopy_v1beta1_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversio return nil } -func deepCopy_v1beta1_SubresourceReference(in SubresourceReference, out *SubresourceReference, c *conversion.Cloner) error { +func DeepCopy_v1beta1_SubresourceReference(in SubresourceReference, out *SubresourceReference, c *conversion.Cloner) error { out.Kind = in.Kind out.Name = in.Name out.APIVersion = in.APIVersion @@ -1854,18 +1203,35 @@ func deepCopy_v1beta1_SubresourceReference(in SubresourceReference, out *Subreso return nil } -func deepCopy_v1beta1_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(in SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, c *conversion.Cloner) error { + out.Rule = in.Rule + if in.Ranges != nil { + in, out := in.Ranges, &out.Ranges + *out = make([]IDRange, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_IDRange(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Ranges = nil + } + return nil +} + +func DeepCopy_v1beta1_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } out.Description = in.Description if in.Versions != nil { - out.Versions = make([]APIVersion, len(in.Versions)) - for i := range in.Versions { - if err := deepCopy_v1beta1_APIVersion(in.Versions[i], &out.Versions[i], c); err != nil { + in, out := in.Versions, &out.Versions + *out = make([]APIVersion, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_APIVersion(in[i], &(*out)[i], c); err != nil { return err } } @@ -1875,35 +1241,35 @@ func deepCopy_v1beta1_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyR return nil } -func deepCopy_v1beta1_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } if in.Data != nil { - out.Data = make([]uint8, len(in.Data)) - for i := range in.Data { - out.Data[i] = in.Data[i] - } + in, out := in.Data, &out.Data + *out = make([]byte, len(in)) + copy(*out, in) } else { out.Data = nil } return nil } -func deepCopy_v1beta1_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]ThirdPartyResourceData, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1beta1_ThirdPartyResourceData(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]ThirdPartyResourceData, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_ThirdPartyResourceData(in[i], &(*out)[i], c); err != nil { return err } } @@ -1913,17 +1279,18 @@ func deepCopy_v1beta1_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, return nil } -func deepCopy_v1beta1_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1beta1_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } - if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { return err } if in.Items != nil { - out.Items = make([]ThirdPartyResource, len(in.Items)) - for i := range in.Items { - if err := deepCopy_v1beta1_ThirdPartyResource(in.Items[i], &out.Items[i], c); err != nil { + in, out := in.Items, &out.Items + *out = make([]ThirdPartyResource, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_ThirdPartyResource(in[i], &(*out)[i], c); err != nil { return err } } @@ -1932,129 +1299,3 @@ func deepCopy_v1beta1_ThirdPartyResourceList(in ThirdPartyResourceList, out *Thi } return nil } - -func deepCopy_intstr_IntOrString(in intstr.IntOrString, out *intstr.IntOrString, c *conversion.Cloner) error { - out.Type = in.Type - out.IntVal = in.IntVal - out.StrVal = in.StrVal - return nil -} - -func init() { - err := api.Scheme.AddGeneratedDeepCopyFuncs( - deepCopy_resource_Quantity, - deepCopy_unversioned_ListMeta, - deepCopy_unversioned_Time, - deepCopy_unversioned_TypeMeta, - deepCopy_v1_AWSElasticBlockStoreVolumeSource, - deepCopy_v1_AzureFileVolumeSource, - deepCopy_v1_Capabilities, - deepCopy_v1_CephFSVolumeSource, - deepCopy_v1_CinderVolumeSource, - deepCopy_v1_ConfigMapKeySelector, - deepCopy_v1_ConfigMapVolumeSource, - deepCopy_v1_Container, - deepCopy_v1_ContainerPort, - deepCopy_v1_DownwardAPIVolumeFile, - deepCopy_v1_DownwardAPIVolumeSource, - deepCopy_v1_EmptyDirVolumeSource, - deepCopy_v1_EnvVar, - deepCopy_v1_EnvVarSource, - deepCopy_v1_ExecAction, - deepCopy_v1_FCVolumeSource, - deepCopy_v1_FlexVolumeSource, - deepCopy_v1_FlockerVolumeSource, - deepCopy_v1_GCEPersistentDiskVolumeSource, - deepCopy_v1_GitRepoVolumeSource, - deepCopy_v1_GlusterfsVolumeSource, - deepCopy_v1_HTTPGetAction, - deepCopy_v1_HTTPHeader, - deepCopy_v1_Handler, - deepCopy_v1_HostPathVolumeSource, - deepCopy_v1_ISCSIVolumeSource, - deepCopy_v1_KeyToPath, - deepCopy_v1_Lifecycle, - deepCopy_v1_LoadBalancerIngress, - deepCopy_v1_LoadBalancerStatus, - deepCopy_v1_LocalObjectReference, - deepCopy_v1_NFSVolumeSource, - deepCopy_v1_ObjectFieldSelector, - deepCopy_v1_ObjectMeta, - deepCopy_v1_PersistentVolumeClaimVolumeSource, - deepCopy_v1_PodSecurityContext, - deepCopy_v1_PodSpec, - deepCopy_v1_PodTemplateSpec, - deepCopy_v1_Probe, - deepCopy_v1_RBDVolumeSource, - deepCopy_v1_ResourceRequirements, - deepCopy_v1_SELinuxOptions, - deepCopy_v1_SecretKeySelector, - deepCopy_v1_SecretVolumeSource, - deepCopy_v1_SecurityContext, - deepCopy_v1_TCPSocketAction, - deepCopy_v1_Volume, - deepCopy_v1_VolumeMount, - deepCopy_v1_VolumeSource, - deepCopy_v1beta1_APIVersion, - deepCopy_v1beta1_CPUTargetUtilization, - deepCopy_v1beta1_DaemonSet, - deepCopy_v1beta1_DaemonSetList, - deepCopy_v1beta1_DaemonSetSpec, - deepCopy_v1beta1_DaemonSetStatus, - deepCopy_v1beta1_Deployment, - deepCopy_v1beta1_DeploymentList, - deepCopy_v1beta1_DeploymentRollback, - deepCopy_v1beta1_DeploymentSpec, - deepCopy_v1beta1_DeploymentStatus, - deepCopy_v1beta1_DeploymentStrategy, - deepCopy_v1beta1_HTTPIngressPath, - deepCopy_v1beta1_HTTPIngressRuleValue, - deepCopy_v1beta1_HorizontalPodAutoscaler, - deepCopy_v1beta1_HorizontalPodAutoscalerList, - deepCopy_v1beta1_HorizontalPodAutoscalerSpec, - deepCopy_v1beta1_HorizontalPodAutoscalerStatus, - deepCopy_v1beta1_HostPortRange, - deepCopy_v1beta1_IDRange, - deepCopy_v1beta1_Ingress, - deepCopy_v1beta1_IngressBackend, - deepCopy_v1beta1_IngressList, - deepCopy_v1beta1_IngressRule, - deepCopy_v1beta1_IngressRuleValue, - deepCopy_v1beta1_IngressSpec, - deepCopy_v1beta1_IngressStatus, - deepCopy_v1beta1_IngressTLS, - deepCopy_v1beta1_Job, - deepCopy_v1beta1_JobCondition, - deepCopy_v1beta1_JobList, - deepCopy_v1beta1_JobSpec, - deepCopy_v1beta1_JobStatus, - deepCopy_v1beta1_LabelSelector, - deepCopy_v1beta1_LabelSelectorRequirement, - deepCopy_v1beta1_ListOptions, - deepCopy_v1beta1_PodSecurityPolicy, - deepCopy_v1beta1_PodSecurityPolicyList, - deepCopy_v1beta1_PodSecurityPolicySpec, - deepCopy_v1beta1_ReplicaSet, - deepCopy_v1beta1_ReplicaSetList, - deepCopy_v1beta1_ReplicaSetSpec, - deepCopy_v1beta1_ReplicaSetStatus, - deepCopy_v1beta1_ReplicationControllerDummy, - deepCopy_v1beta1_RollbackConfig, - deepCopy_v1beta1_RollingUpdateDeployment, - deepCopy_v1beta1_RunAsUserStrategyOptions, - deepCopy_v1beta1_SELinuxStrategyOptions, - deepCopy_v1beta1_Scale, - deepCopy_v1beta1_ScaleSpec, - deepCopy_v1beta1_ScaleStatus, - deepCopy_v1beta1_SubresourceReference, - deepCopy_v1beta1_ThirdPartyResource, - deepCopy_v1beta1_ThirdPartyResourceData, - deepCopy_v1beta1_ThirdPartyResourceDataList, - deepCopy_v1beta1_ThirdPartyResourceList, - deepCopy_intstr_IntOrString, - ) - if err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go index 46d4c3785b02..71e55a467a96 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go @@ -17,131 +17,151 @@ limitations under the License. package v1beta1 import ( + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/intstr" ) func addDefaultingFuncs(scheme *runtime.Scheme) { scheme.AddDefaultingFuncs( - func(obj *APIVersion) { - if len(obj.APIGroup) == 0 { - obj.APIGroup = GroupName - } - }, - func(obj *DaemonSet) { - labels := obj.Spec.Template.Labels - - // TODO: support templates defined elsewhere when we support them in the API - if labels != nil { - if obj.Spec.Selector == nil { - obj.Spec.Selector = &LabelSelector{ - MatchLabels: labels, - } - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - }, - func(obj *Deployment) { - // Default labels and selector to labels from pod template spec. - labels := obj.Spec.Template.Labels - - if labels != nil { - if obj.Spec.Selector == nil { - obj.Spec.Selector = &LabelSelector{MatchLabels: labels} - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - // Set DeploymentSpec.Replicas to 1 if it is not set. - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } - strategy := &obj.Spec.Strategy - // Set default DeploymentStrategyType as RollingUpdate. - if strategy.Type == "" { - strategy.Type = RollingUpdateDeploymentStrategyType - } - if strategy.Type == RollingUpdateDeploymentStrategyType { - if strategy.RollingUpdate == nil { - rollingUpdate := RollingUpdateDeployment{} - strategy.RollingUpdate = &rollingUpdate - } - if strategy.RollingUpdate.MaxUnavailable == nil { - // Set default MaxUnavailable as 1 by default. - maxUnavailable := intstr.FromInt(1) - strategy.RollingUpdate.MaxUnavailable = &maxUnavailable - } - if strategy.RollingUpdate.MaxSurge == nil { - // Set default MaxSurge as 1 by default. - maxSurge := intstr.FromInt(1) - strategy.RollingUpdate.MaxSurge = &maxSurge - } - } - }, - func(obj *Job) { - labels := obj.Spec.Template.Labels - // TODO: support templates defined elsewhere when we support them in the API - if labels != nil { - // if an autoselector is requested, we'll build the selector later with controller-uid and job-name - autoSelector := bool(obj.Spec.AutoSelector != nil && *obj.Spec.AutoSelector) - - // otherwise, we are using a manual selector - manualSelector := !autoSelector - - // and default behavior for an unspecified manual selector is to use the pod template labels - if manualSelector && obj.Spec.Selector == nil { - obj.Spec.Selector = &LabelSelector{ - MatchLabels: labels, - } - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - // For a non-parallel job, you can leave both `.spec.completions` and - // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. - if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { - obj.Spec.Completions = new(int32) - *obj.Spec.Completions = 1 - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - if obj.Spec.Parallelism == nil { - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - }, - func(obj *HorizontalPodAutoscaler) { - if obj.Spec.MinReplicas == nil { - minReplicas := int32(1) - obj.Spec.MinReplicas = &minReplicas + SetDefaults_DaemonSet, + SetDefaults_Deployment, + SetDefaults_Job, + SetDefaults_HorizontalPodAutoscaler, + SetDefaults_ReplicaSet, + SetDefaults_NetworkPolicy, + ) +} + +func SetDefaults_DaemonSet(obj *DaemonSet) { + labels := obj.Spec.Template.Labels + + // TODO: support templates defined elsewhere when we support them in the API + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &LabelSelector{ + MatchLabels: labels, } - if obj.Spec.CPUUtilization == nil { - obj.Spec.CPUUtilization = &CPUTargetUtilization{TargetPercentage: 80} + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } +} + +func SetDefaults_Deployment(obj *Deployment) { + // Default labels and selector to labels from pod template spec. + labels := obj.Spec.Template.Labels + + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &LabelSelector{MatchLabels: labels} + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + // Set DeploymentSpec.Replicas to 1 if it is not set. + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } + strategy := &obj.Spec.Strategy + // Set default DeploymentStrategyType as RollingUpdate. + if strategy.Type == "" { + strategy.Type = RollingUpdateDeploymentStrategyType + } + if strategy.Type == RollingUpdateDeploymentStrategyType { + if strategy.RollingUpdate == nil { + rollingUpdate := RollingUpdateDeployment{} + strategy.RollingUpdate = &rollingUpdate + } + if strategy.RollingUpdate.MaxUnavailable == nil { + // Set default MaxUnavailable as 1 by default. + maxUnavailable := intstr.FromInt(1) + strategy.RollingUpdate.MaxUnavailable = &maxUnavailable + } + if strategy.RollingUpdate.MaxSurge == nil { + // Set default MaxSurge as 1 by default. + maxSurge := intstr.FromInt(1) + strategy.RollingUpdate.MaxSurge = &maxSurge + } + } +} + +func SetDefaults_Job(obj *Job) { + labels := obj.Spec.Template.Labels + // TODO: support templates defined elsewhere when we support them in the API + if labels != nil { + // if an autoselector is requested, we'll build the selector later with controller-uid and job-name + autoSelector := bool(obj.Spec.AutoSelector != nil && *obj.Spec.AutoSelector) + + // otherwise, we are using a manual selector + manualSelector := !autoSelector + + // and default behavior for an unspecified manual selector is to use the pod template labels + if manualSelector && obj.Spec.Selector == nil { + obj.Spec.Selector = &LabelSelector{ + MatchLabels: labels, } - }, - func(obj *ReplicaSet) { - labels := obj.Spec.Template.Labels - - // TODO: support templates defined elsewhere when we support them in the API - if labels != nil { - if obj.Spec.Selector == nil { - obj.Spec.Selector = &LabelSelector{ - MatchLabels: labels, - } - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + // For a non-parallel job, you can leave both `.spec.completions` and + // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. + if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { + obj.Spec.Completions = new(int32) + *obj.Spec.Completions = 1 + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } + if obj.Spec.Parallelism == nil { + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } +} + +func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { + if obj.Spec.MinReplicas == nil { + minReplicas := int32(1) + obj.Spec.MinReplicas = &minReplicas + } + if obj.Spec.CPUUtilization == nil { + obj.Spec.CPUUtilization = &CPUTargetUtilization{TargetPercentage: 80} + } +} + +func SetDefaults_ReplicaSet(obj *ReplicaSet) { + labels := obj.Spec.Template.Labels + + // TODO: support templates defined elsewhere when we support them in the API + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &LabelSelector{ + MatchLabels: labels, } - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} + +func SetDefaults_NetworkPolicy(obj *NetworkPolicy) { + // Default any undefined Protocol fields to TCP. + for _, i := range obj.Spec.Ingress { + // TODO: Update Ports to be a pointer to slice as soon as auto-generation supports it. + for _, p := range i.Ports { + if p.Protocol == nil { + proto := v1.ProtocolTCP + p.Protocol = &proto } - }, - ) + } + } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults_test.go new file mode 100644 index 000000000000..f02b2c0d4466 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults_test.go @@ -0,0 +1,731 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1_test + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + _ "k8s.io/kubernetes/pkg/api/install" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" + _ "k8s.io/kubernetes/pkg/apis/extensions/install" + . "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/intstr" +) + +func TestSetDefaultDaemonSet(t *testing.T) { + defaultLabels := map[string]string{"foo": "bar"} + period := int64(v1.DefaultTerminationGracePeriodSeconds) + defaultTemplate := v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + DNSPolicy: v1.DNSClusterFirst, + RestartPolicy: v1.RestartPolicyAlways, + SecurityContext: &v1.PodSecurityContext{}, + TerminationGracePeriodSeconds: &period, + }, + ObjectMeta: v1.ObjectMeta{ + Labels: defaultLabels, + }, + } + templateNoLabel := v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + DNSPolicy: v1.DNSClusterFirst, + RestartPolicy: v1.RestartPolicyAlways, + SecurityContext: &v1.PodSecurityContext{}, + TerminationGracePeriodSeconds: &period, + }, + } + tests := []struct { + original *DaemonSet + expected *DaemonSet + }{ + { // Labels change/defaulting test. + original: &DaemonSet{ + Spec: DaemonSetSpec{ + Template: defaultTemplate, + }, + }, + expected: &DaemonSet{ + ObjectMeta: v1.ObjectMeta{ + Labels: defaultLabels, + }, + Spec: DaemonSetSpec{ + Selector: &LabelSelector{ + MatchLabels: defaultLabels, + }, + Template: defaultTemplate, + }, + }, + }, + { // Labels change/defaulting test. + original: &DaemonSet{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{ + "bar": "foo", + }, + }, + Spec: DaemonSetSpec{ + Template: defaultTemplate, + }, + }, + expected: &DaemonSet{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{ + "bar": "foo", + }, + }, + Spec: DaemonSetSpec{ + Selector: &LabelSelector{ + MatchLabels: defaultLabels, + }, + Template: defaultTemplate, + }, + }, + }, + { // Update strategy. + original: &DaemonSet{}, + expected: &DaemonSet{ + Spec: DaemonSetSpec{ + Template: templateNoLabel, + }, + }, + }, + { // Update strategy. + original: &DaemonSet{ + Spec: DaemonSetSpec{}, + }, + expected: &DaemonSet{ + Spec: DaemonSetSpec{ + Template: templateNoLabel, + }, + }, + }, + { // Custom unique label key. + original: &DaemonSet{ + Spec: DaemonSetSpec{}, + }, + expected: &DaemonSet{ + Spec: DaemonSetSpec{ + Template: templateNoLabel, + }, + }, + }, + } + + for i, test := range tests { + original := test.original + expected := test.expected + obj2 := roundTrip(t, runtime.Object(original)) + got, ok := obj2.(*DaemonSet) + if !ok { + t.Errorf("(%d) unexpected object: %v", i, got) + t.FailNow() + } + if !reflect.DeepEqual(got.Spec, expected.Spec) { + t.Errorf("(%d) got different than expected\ngot:\n\t%+v\nexpected:\n\t%+v", i, got.Spec, expected.Spec) + } + } +} + +func TestSetDefaultDeployment(t *testing.T) { + defaultIntOrString := intstr.FromInt(1) + differentIntOrString := intstr.FromInt(5) + period := int64(v1.DefaultTerminationGracePeriodSeconds) + defaultTemplate := v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + DNSPolicy: v1.DNSClusterFirst, + RestartPolicy: v1.RestartPolicyAlways, + SecurityContext: &v1.PodSecurityContext{}, + TerminationGracePeriodSeconds: &period, + }, + } + tests := []struct { + original *Deployment + expected *Deployment + }{ + { + original: &Deployment{}, + expected: &Deployment{ + Spec: DeploymentSpec{ + Replicas: newInt32(1), + Strategy: DeploymentStrategy{ + Type: RollingUpdateDeploymentStrategyType, + RollingUpdate: &RollingUpdateDeployment{ + MaxSurge: &defaultIntOrString, + MaxUnavailable: &defaultIntOrString, + }, + }, + Template: defaultTemplate, + }, + }, + }, + { + original: &Deployment{ + Spec: DeploymentSpec{ + Replicas: newInt32(5), + Strategy: DeploymentStrategy{ + RollingUpdate: &RollingUpdateDeployment{ + MaxSurge: &differentIntOrString, + }, + }, + }, + }, + expected: &Deployment{ + Spec: DeploymentSpec{ + Replicas: newInt32(5), + Strategy: DeploymentStrategy{ + Type: RollingUpdateDeploymentStrategyType, + RollingUpdate: &RollingUpdateDeployment{ + MaxSurge: &differentIntOrString, + MaxUnavailable: &defaultIntOrString, + }, + }, + Template: defaultTemplate, + }, + }, + }, + { + original: &Deployment{ + Spec: DeploymentSpec{ + Replicas: newInt32(5), + Strategy: DeploymentStrategy{ + Type: RecreateDeploymentStrategyType, + }, + }, + }, + expected: &Deployment{ + Spec: DeploymentSpec{ + Replicas: newInt32(5), + Strategy: DeploymentStrategy{ + Type: RecreateDeploymentStrategyType, + }, + Template: defaultTemplate, + }, + }, + }, + { + original: &Deployment{ + Spec: DeploymentSpec{ + Replicas: newInt32(5), + Strategy: DeploymentStrategy{ + Type: RecreateDeploymentStrategyType, + }, + }, + }, + expected: &Deployment{ + Spec: DeploymentSpec{ + Replicas: newInt32(5), + Strategy: DeploymentStrategy{ + Type: RecreateDeploymentStrategyType, + }, + Template: defaultTemplate, + }, + }, + }, + } + + for _, test := range tests { + original := test.original + expected := test.expected + obj2 := roundTrip(t, runtime.Object(original)) + got, ok := obj2.(*Deployment) + if !ok { + t.Errorf("unexpected object: %v", got) + t.FailNow() + } + if !reflect.DeepEqual(got.Spec, expected.Spec) { + t.Errorf("got different than expected:\n\t%+v\ngot:\n\t%+v", got.Spec, expected.Spec) + } + } +} + +func TestSetDefaultJobParallelismAndCompletions(t *testing.T) { + tests := []struct { + original *Job + expected *Job + }{ + // both unspecified -> sets both to 1 + { + original: &Job{ + Spec: JobSpec{}, + }, + expected: &Job{ + Spec: JobSpec{ + Completions: newInt32(1), + Parallelism: newInt32(1), + }, + }, + }, + // WQ: Parallelism explicitly 0 and completions unset -> no change + { + original: &Job{ + Spec: JobSpec{ + Parallelism: newInt32(0), + }, + }, + expected: &Job{ + Spec: JobSpec{ + Parallelism: newInt32(0), + }, + }, + }, + // WQ: Parallelism explicitly 2 and completions unset -> no change + { + original: &Job{ + Spec: JobSpec{ + Parallelism: newInt32(2), + }, + }, + expected: &Job{ + Spec: JobSpec{ + Parallelism: newInt32(2), + }, + }, + }, + // Completions explicitly 2 and parallelism unset -> parallelism is defaulted + { + original: &Job{ + Spec: JobSpec{ + Completions: newInt32(2), + }, + }, + expected: &Job{ + Spec: JobSpec{ + Completions: newInt32(2), + Parallelism: newInt32(1), + }, + }, + }, + // Both set -> no change + { + original: &Job{ + Spec: JobSpec{ + Completions: newInt32(10), + Parallelism: newInt32(11), + }, + }, + expected: &Job{ + Spec: JobSpec{ + Completions: newInt32(10), + Parallelism: newInt32(11), + }, + }, + }, + // Both set, flipped -> no change + { + original: &Job{ + Spec: JobSpec{ + Completions: newInt32(11), + Parallelism: newInt32(10), + }, + }, + expected: &Job{ + Spec: JobSpec{ + Completions: newInt32(11), + Parallelism: newInt32(10), + }, + }, + }, + } + + for _, tc := range tests { + original := tc.original + expected := tc.expected + obj2 := roundTrip(t, runtime.Object(original)) + got, ok := obj2.(*Job) + if !ok { + t.Errorf("unexpected object: %v", got) + t.FailNow() + } + if (got.Spec.Completions == nil) != (expected.Spec.Completions == nil) { + t.Errorf("got different *completions than expected: %v %v", got.Spec.Completions, expected.Spec.Completions) + } + if got.Spec.Completions != nil && expected.Spec.Completions != nil { + if *got.Spec.Completions != *expected.Spec.Completions { + t.Errorf("got different completions than expected: %d %d", *got.Spec.Completions, *expected.Spec.Completions) + } + } + if (got.Spec.Parallelism == nil) != (expected.Spec.Parallelism == nil) { + t.Errorf("got different *Parallelism than expected: %v %v", got.Spec.Parallelism, expected.Spec.Parallelism) + } + if got.Spec.Parallelism != nil && expected.Spec.Parallelism != nil { + if *got.Spec.Parallelism != *expected.Spec.Parallelism { + t.Errorf("got different parallelism than expected: %d %d", *got.Spec.Parallelism, *expected.Spec.Parallelism) + } + } + } +} + +func TestSetDefaultJobSelector(t *testing.T) { + tests := []struct { + original *Job + expectedSelector *LabelSelector + }{ + // selector set explicitly, nil autoSelector + { + original: &Job{ + Spec: JobSpec{ + Selector: &LabelSelector{ + MatchLabels: map[string]string{"job": "selector"}, + }, + }, + }, + expectedSelector: &LabelSelector{ + MatchLabels: map[string]string{"job": "selector"}, + }, + }, + // selector set explicitly, autoSelector=true + { + original: &Job{ + Spec: JobSpec{ + Selector: &LabelSelector{ + MatchLabels: map[string]string{"job": "selector"}, + }, + AutoSelector: newBool(true), + }, + }, + expectedSelector: &LabelSelector{ + MatchLabels: map[string]string{"job": "selector"}, + }, + }, + // selector set explicitly, autoSelector=false + { + original: &Job{ + Spec: JobSpec{ + Selector: &LabelSelector{ + MatchLabels: map[string]string{"job": "selector"}, + }, + AutoSelector: newBool(false), + }, + }, + expectedSelector: &LabelSelector{ + MatchLabels: map[string]string{"job": "selector"}, + }, + }, + // selector from template labels + { + original: &Job{ + Spec: JobSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{"job": "selector"}, + }, + }, + }, + }, + expectedSelector: &LabelSelector{ + MatchLabels: map[string]string{"job": "selector"}, + }, + }, + // selector from template labels, autoSelector=false + { + original: &Job{ + Spec: JobSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{"job": "selector"}, + }, + }, + AutoSelector: newBool(false), + }, + }, + expectedSelector: &LabelSelector{ + MatchLabels: map[string]string{"job": "selector"}, + }, + }, + // selector not copied from template labels, autoSelector=true + { + original: &Job{ + Spec: JobSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{"job": "selector"}, + }, + }, + AutoSelector: newBool(true), + }, + }, + expectedSelector: nil, + }, + } + + for i, testcase := range tests { + obj2 := roundTrip(t, runtime.Object(testcase.original)) + got, ok := obj2.(*Job) + if !ok { + t.Errorf("%d: unexpected object: %v", i, got) + t.FailNow() + } + if !reflect.DeepEqual(got.Spec.Selector, testcase.expectedSelector) { + t.Errorf("%d: got different selectors %#v %#v", i, got.Spec.Selector, testcase.expectedSelector) + } + } +} + +func TestSetDefaultReplicaSet(t *testing.T) { + tests := []struct { + rs *ReplicaSet + expectLabels bool + expectSelector bool + }{ + { + rs: &ReplicaSet{ + Spec: ReplicaSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectLabels: true, + expectSelector: true, + }, + { + rs: &ReplicaSet{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{ + "bar": "foo", + }, + }, + Spec: ReplicaSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectLabels: false, + expectSelector: true, + }, + { + rs: &ReplicaSet{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{ + "bar": "foo", + }, + }, + Spec: ReplicaSetSpec{ + Selector: &LabelSelector{ + MatchLabels: map[string]string{ + "some": "other", + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectLabels: false, + expectSelector: false, + }, + { + rs: &ReplicaSet{ + Spec: ReplicaSetSpec{ + Selector: &LabelSelector{ + MatchLabels: map[string]string{ + "some": "other", + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectLabels: true, + expectSelector: false, + }, + } + + for _, test := range tests { + rs := test.rs + obj2 := roundTrip(t, runtime.Object(rs)) + rs2, ok := obj2.(*ReplicaSet) + if !ok { + t.Errorf("unexpected object: %v", rs2) + t.FailNow() + } + if test.expectSelector != reflect.DeepEqual(rs2.Spec.Selector.MatchLabels, rs2.Spec.Template.Labels) { + if test.expectSelector { + t.Errorf("expected: %v, got: %v", rs2.Spec.Template.Labels, rs2.Spec.Selector) + } else { + t.Errorf("unexpected equality: %v", rs.Spec.Selector) + } + } + if test.expectLabels != reflect.DeepEqual(rs2.Labels, rs2.Spec.Template.Labels) { + if test.expectLabels { + t.Errorf("expected: %v, got: %v", rs2.Spec.Template.Labels, rs2.Labels) + } else { + t.Errorf("unexpected equality: %v", rs.Labels) + } + } + } +} + +func TestSetDefaultReplicaSetReplicas(t *testing.T) { + tests := []struct { + rs ReplicaSet + expectReplicas int32 + }{ + { + rs: ReplicaSet{ + Spec: ReplicaSetSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectReplicas: 1, + }, + { + rs: ReplicaSet{ + Spec: ReplicaSetSpec{ + Replicas: newInt32(0), + Template: v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectReplicas: 0, + }, + { + rs: ReplicaSet{ + Spec: ReplicaSetSpec{ + Replicas: newInt32(3), + Template: v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + }, + expectReplicas: 3, + }, + } + + for _, test := range tests { + rs := &test.rs + obj2 := roundTrip(t, runtime.Object(rs)) + rs2, ok := obj2.(*ReplicaSet) + if !ok { + t.Errorf("unexpected object: %v", rs2) + t.FailNow() + } + if rs2.Spec.Replicas == nil { + t.Errorf("unexpected nil Replicas") + } else if test.expectReplicas != *rs2.Spec.Replicas { + t.Errorf("expected: %d replicas, got: %d", test.expectReplicas, *rs2.Spec.Replicas) + } + } +} + +func TestDefaultRequestIsNotSetForReplicaSet(t *testing.T) { + s := v1.PodSpec{} + s.Containers = []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + }, + }, + }, + } + rs := &ReplicaSet{ + Spec: ReplicaSetSpec{ + Replicas: newInt32(3), + Template: v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: s, + }, + }, + } + output := roundTrip(t, runtime.Object(rs)) + rs2 := output.(*ReplicaSet) + defaultRequest := rs2.Spec.Template.Spec.Containers[0].Resources.Requests + requestValue := defaultRequest[v1.ResourceCPU] + if requestValue.String() != "0" { + t.Errorf("Expected 0 request value, got: %s", requestValue.String()) + } +} + +func roundTrip(t *testing.T, obj runtime.Object) runtime.Object { + data, err := runtime.Encode(api.Codecs.LegacyCodec(SchemeGroupVersion), obj) + if err != nil { + t.Errorf("%v\n %#v", err, obj) + return nil + } + obj2, err := runtime.Decode(api.Codecs.UniversalDecoder(), data) + if err != nil { + t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj) + return nil + } + obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object) + err = api.Scheme.Convert(obj2, obj3) + if err != nil { + t.Errorf("%v\nSource: %#v", err, obj2) + return nil + } + return obj3 +} + +func newInt32(val int32) *int32 { + p := new(int32) + *p = val + return p +} + +func newString(val string) *string { + p := new(string) + *p = val + return p +} + +func newBool(val bool) *bool { + b := new(bool) + *b = val + return b +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go new file mode 100644 index 000000000000..cfdb87c53d84 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1beta1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go new file mode 100644 index 000000000000..3120ce17ff31 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go @@ -0,0 +1,13005 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto + + It has these top-level messages: + APIVersion + CPUTargetUtilization + CustomMetricCurrentStatus + CustomMetricCurrentStatusList + CustomMetricTarget + CustomMetricTargetList + DaemonSet + DaemonSetList + DaemonSetSpec + DaemonSetStatus + Deployment + DeploymentList + DeploymentRollback + DeploymentSpec + DeploymentStatus + DeploymentStrategy + ExportOptions + FSGroupStrategyOptions + HTTPIngressPath + HTTPIngressRuleValue + HorizontalPodAutoscaler + HorizontalPodAutoscalerList + HorizontalPodAutoscalerSpec + HorizontalPodAutoscalerStatus + HostPortRange + IDRange + Ingress + IngressBackend + IngressList + IngressRule + IngressRuleValue + IngressSpec + IngressStatus + IngressTLS + Job + JobCondition + JobList + JobSpec + JobStatus + LabelSelector + LabelSelectorRequirement + ListOptions + NetworkPolicy + NetworkPolicyIngressRule + NetworkPolicyList + NetworkPolicyPeer + NetworkPolicyPort + NetworkPolicySpec + PodSecurityPolicy + PodSecurityPolicyList + PodSecurityPolicySpec + ReplicaSet + ReplicaSetList + ReplicaSetSpec + ReplicaSetStatus + ReplicationControllerDummy + RollbackConfig + RollingUpdateDeployment + RunAsUserStrategyOptions + SELinuxStrategyOptions + Scale + ScaleSpec + ScaleStatus + SubresourceReference + SupplementalGroupsStrategyOptions + ThirdPartyResource + ThirdPartyResourceData + ThirdPartyResourceDataList + ThirdPartyResourceList +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" + +import k8s_io_kubernetes_pkg_util_intstr "k8s.io/kubernetes/pkg/util/intstr" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *APIVersion) Reset() { *m = APIVersion{} } +func (m *APIVersion) String() string { return proto.CompactTextString(m) } +func (*APIVersion) ProtoMessage() {} + +func (m *CPUTargetUtilization) Reset() { *m = CPUTargetUtilization{} } +func (m *CPUTargetUtilization) String() string { return proto.CompactTextString(m) } +func (*CPUTargetUtilization) ProtoMessage() {} + +func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} } +func (m *CustomMetricCurrentStatus) String() string { return proto.CompactTextString(m) } +func (*CustomMetricCurrentStatus) ProtoMessage() {} + +func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} } +func (m *CustomMetricCurrentStatusList) String() string { return proto.CompactTextString(m) } +func (*CustomMetricCurrentStatusList) ProtoMessage() {} + +func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} } +func (m *CustomMetricTarget) String() string { return proto.CompactTextString(m) } +func (*CustomMetricTarget) ProtoMessage() {} + +func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} } +func (m *CustomMetricTargetList) String() string { return proto.CompactTextString(m) } +func (*CustomMetricTargetList) ProtoMessage() {} + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (m *DaemonSet) String() string { return proto.CompactTextString(m) } +func (*DaemonSet) ProtoMessage() {} + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (m *DaemonSetList) String() string { return proto.CompactTextString(m) } +func (*DaemonSetList) ProtoMessage() {} + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (m *DaemonSetSpec) String() string { return proto.CompactTextString(m) } +func (*DaemonSetSpec) ProtoMessage() {} + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (m *DaemonSetStatus) String() string { return proto.CompactTextString(m) } +func (*DaemonSetStatus) ProtoMessage() {} + +func (m *Deployment) Reset() { *m = Deployment{} } +func (m *Deployment) String() string { return proto.CompactTextString(m) } +func (*Deployment) ProtoMessage() {} + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (m *DeploymentList) String() string { return proto.CompactTextString(m) } +func (*DeploymentList) ProtoMessage() {} + +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (m *DeploymentRollback) String() string { return proto.CompactTextString(m) } +func (*DeploymentRollback) ProtoMessage() {} + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (m *DeploymentSpec) String() string { return proto.CompactTextString(m) } +func (*DeploymentSpec) ProtoMessage() {} + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (m *DeploymentStatus) String() string { return proto.CompactTextString(m) } +func (*DeploymentStatus) ProtoMessage() {} + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (m *DeploymentStrategy) String() string { return proto.CompactTextString(m) } +func (*DeploymentStrategy) ProtoMessage() {} + +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (m *ExportOptions) String() string { return proto.CompactTextString(m) } +func (*ExportOptions) ProtoMessage() {} + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (m *FSGroupStrategyOptions) String() string { return proto.CompactTextString(m) } +func (*FSGroupStrategyOptions) ProtoMessage() {} + +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (m *HTTPIngressPath) String() string { return proto.CompactTextString(m) } +func (*HTTPIngressPath) ProtoMessage() {} + +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (m *HTTPIngressRuleValue) String() string { return proto.CompactTextString(m) } +func (*HTTPIngressRuleValue) ProtoMessage() {} + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (m *HorizontalPodAutoscaler) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscaler) ProtoMessage() {} + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (m *HorizontalPodAutoscalerList) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (m *HorizontalPodAutoscalerSpec) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (m *HorizontalPodAutoscalerStatus) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (m *HostPortRange) String() string { return proto.CompactTextString(m) } +func (*HostPortRange) ProtoMessage() {} + +func (m *IDRange) Reset() { *m = IDRange{} } +func (m *IDRange) String() string { return proto.CompactTextString(m) } +func (*IDRange) ProtoMessage() {} + +func (m *Ingress) Reset() { *m = Ingress{} } +func (m *Ingress) String() string { return proto.CompactTextString(m) } +func (*Ingress) ProtoMessage() {} + +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (m *IngressBackend) String() string { return proto.CompactTextString(m) } +func (*IngressBackend) ProtoMessage() {} + +func (m *IngressList) Reset() { *m = IngressList{} } +func (m *IngressList) String() string { return proto.CompactTextString(m) } +func (*IngressList) ProtoMessage() {} + +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (m *IngressRule) String() string { return proto.CompactTextString(m) } +func (*IngressRule) ProtoMessage() {} + +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (m *IngressRuleValue) String() string { return proto.CompactTextString(m) } +func (*IngressRuleValue) ProtoMessage() {} + +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (m *IngressSpec) String() string { return proto.CompactTextString(m) } +func (*IngressSpec) ProtoMessage() {} + +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (m *IngressStatus) String() string { return proto.CompactTextString(m) } +func (*IngressStatus) ProtoMessage() {} + +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (m *IngressTLS) String() string { return proto.CompactTextString(m) } +func (*IngressTLS) ProtoMessage() {} + +func (m *Job) Reset() { *m = Job{} } +func (m *Job) String() string { return proto.CompactTextString(m) } +func (*Job) ProtoMessage() {} + +func (m *JobCondition) Reset() { *m = JobCondition{} } +func (m *JobCondition) String() string { return proto.CompactTextString(m) } +func (*JobCondition) ProtoMessage() {} + +func (m *JobList) Reset() { *m = JobList{} } +func (m *JobList) String() string { return proto.CompactTextString(m) } +func (*JobList) ProtoMessage() {} + +func (m *JobSpec) Reset() { *m = JobSpec{} } +func (m *JobSpec) String() string { return proto.CompactTextString(m) } +func (*JobSpec) ProtoMessage() {} + +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (m *JobStatus) String() string { return proto.CompactTextString(m) } +func (*JobStatus) ProtoMessage() {} + +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (m *LabelSelector) String() string { return proto.CompactTextString(m) } +func (*LabelSelector) ProtoMessage() {} + +func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } +func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) } +func (*LabelSelectorRequirement) ProtoMessage() {} + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (m *ListOptions) String() string { return proto.CompactTextString(m) } +func (*ListOptions) ProtoMessage() {} + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (m *NetworkPolicy) String() string { return proto.CompactTextString(m) } +func (*NetworkPolicy) ProtoMessage() {} + +func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } +func (m *NetworkPolicyIngressRule) String() string { return proto.CompactTextString(m) } +func (*NetworkPolicyIngressRule) ProtoMessage() {} + +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (m *NetworkPolicyList) String() string { return proto.CompactTextString(m) } +func (*NetworkPolicyList) ProtoMessage() {} + +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (m *NetworkPolicyPeer) String() string { return proto.CompactTextString(m) } +func (*NetworkPolicyPeer) ProtoMessage() {} + +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (m *NetworkPolicyPort) String() string { return proto.CompactTextString(m) } +func (*NetworkPolicyPort) ProtoMessage() {} + +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (m *NetworkPolicySpec) String() string { return proto.CompactTextString(m) } +func (*NetworkPolicySpec) ProtoMessage() {} + +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (m *PodSecurityPolicy) String() string { return proto.CompactTextString(m) } +func (*PodSecurityPolicy) ProtoMessage() {} + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (m *PodSecurityPolicyList) String() string { return proto.CompactTextString(m) } +func (*PodSecurityPolicyList) ProtoMessage() {} + +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (m *PodSecurityPolicySpec) String() string { return proto.CompactTextString(m) } +func (*PodSecurityPolicySpec) ProtoMessage() {} + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (m *ReplicaSet) String() string { return proto.CompactTextString(m) } +func (*ReplicaSet) ProtoMessage() {} + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (m *ReplicaSetList) String() string { return proto.CompactTextString(m) } +func (*ReplicaSetList) ProtoMessage() {} + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (m *ReplicaSetSpec) String() string { return proto.CompactTextString(m) } +func (*ReplicaSetSpec) ProtoMessage() {} + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (m *ReplicaSetStatus) String() string { return proto.CompactTextString(m) } +func (*ReplicaSetStatus) ProtoMessage() {} + +func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } +func (m *ReplicationControllerDummy) String() string { return proto.CompactTextString(m) } +func (*ReplicationControllerDummy) ProtoMessage() {} + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (m *RollbackConfig) String() string { return proto.CompactTextString(m) } +func (*RollbackConfig) ProtoMessage() {} + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (m *RollingUpdateDeployment) String() string { return proto.CompactTextString(m) } +func (*RollingUpdateDeployment) ProtoMessage() {} + +func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } +func (m *RunAsUserStrategyOptions) String() string { return proto.CompactTextString(m) } +func (*RunAsUserStrategyOptions) ProtoMessage() {} + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (m *SELinuxStrategyOptions) String() string { return proto.CompactTextString(m) } +func (*SELinuxStrategyOptions) ProtoMessage() {} + +func (m *Scale) Reset() { *m = Scale{} } +func (m *Scale) String() string { return proto.CompactTextString(m) } +func (*Scale) ProtoMessage() {} + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (m *ScaleSpec) String() string { return proto.CompactTextString(m) } +func (*ScaleSpec) ProtoMessage() {} + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (m *ScaleStatus) String() string { return proto.CompactTextString(m) } +func (*ScaleStatus) ProtoMessage() {} + +func (m *SubresourceReference) Reset() { *m = SubresourceReference{} } +func (m *SubresourceReference) String() string { return proto.CompactTextString(m) } +func (*SubresourceReference) ProtoMessage() {} + +func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } +func (m *SupplementalGroupsStrategyOptions) String() string { return proto.CompactTextString(m) } +func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} + +func (m *ThirdPartyResource) Reset() { *m = ThirdPartyResource{} } +func (m *ThirdPartyResource) String() string { return proto.CompactTextString(m) } +func (*ThirdPartyResource) ProtoMessage() {} + +func (m *ThirdPartyResourceData) Reset() { *m = ThirdPartyResourceData{} } +func (m *ThirdPartyResourceData) String() string { return proto.CompactTextString(m) } +func (*ThirdPartyResourceData) ProtoMessage() {} + +func (m *ThirdPartyResourceDataList) Reset() { *m = ThirdPartyResourceDataList{} } +func (m *ThirdPartyResourceDataList) String() string { return proto.CompactTextString(m) } +func (*ThirdPartyResourceDataList) ProtoMessage() {} + +func (m *ThirdPartyResourceList) Reset() { *m = ThirdPartyResourceList{} } +func (m *ThirdPartyResourceList) String() string { return proto.CompactTextString(m) } +func (*ThirdPartyResourceList) ProtoMessage() {} + +func init() { + proto.RegisterType((*APIVersion)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.APIVersion") + proto.RegisterType((*CPUTargetUtilization)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CPUTargetUtilization") + proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatus") + proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatusList") + proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricTarget") + proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricTargetList") + proto.RegisterType((*DaemonSet)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSet") + proto.RegisterType((*DaemonSetList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetList") + proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetSpec") + proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetStatus") + proto.RegisterType((*Deployment)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Deployment") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentList") + proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentRollback") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStrategy") + proto.RegisterType((*ExportOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ExportOptions") + proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.FSGroupStrategyOptions") + proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressPath") + proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*HostPortRange)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HostPortRange") + proto.RegisterType((*IDRange)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IDRange") + proto.RegisterType((*Ingress)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Ingress") + proto.RegisterType((*IngressBackend)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressBackend") + proto.RegisterType((*IngressList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressList") + proto.RegisterType((*IngressRule)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressRule") + proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressRuleValue") + proto.RegisterType((*IngressSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressSpec") + proto.RegisterType((*IngressStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressStatus") + proto.RegisterType((*IngressTLS)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressTLS") + proto.RegisterType((*Job)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Job") + proto.RegisterType((*JobCondition)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobCondition") + proto.RegisterType((*JobList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobList") + proto.RegisterType((*JobSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobSpec") + proto.RegisterType((*JobStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobStatus") + proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.LabelSelector") + proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.LabelSelectorRequirement") + proto.RegisterType((*ListOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ListOptions") + proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicy") + proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyIngressRule") + proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyList") + proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyPeer") + proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyPort") + proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicySpec") + proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicy") + proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicyList") + proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicySpec") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSet") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetStatus") + proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicationControllerDummy") + proto.RegisterType((*RollbackConfig)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SELinuxStrategyOptions") + proto.RegisterType((*Scale)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ScaleStatus") + proto.RegisterType((*SubresourceReference)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SubresourceReference") + proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SupplementalGroupsStrategyOptions") + proto.RegisterType((*ThirdPartyResource)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResource") + proto.RegisterType((*ThirdPartyResourceData)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResourceData") + proto.RegisterType((*ThirdPartyResourceDataList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResourceDataList") + proto.RegisterType((*ThirdPartyResourceList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResourceList") +} +func (m *APIVersion) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIVersion) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *CPUTargetUtilization) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CPUTargetUtilization) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetPercentage)) + return i, nil +} + +func (m *CustomMetricCurrentStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CustomMetricCurrentStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size())) + n1, err := m.CurrentValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *CustomMetricCurrentStatusList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CustomMetricCurrentStatusList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CustomMetricTarget) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CustomMetricTarget) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size())) + n2, err := m.TargetValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *CustomMetricTargetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CustomMetricTargetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSet) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSet) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n5, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *DaemonSetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Selector != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n7, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *DaemonSetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentNumberScheduled)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NumberMisscheduled)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredNumberScheduled)) + return i, nil +} + +func (m *Deployment) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Deployment) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n11, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil +} + +func (m *DeploymentList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n12, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentRollback) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentRollback) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if len(m.UpdatedAnnotations) > 0 { + for k := range m.UpdatedAnnotations { + data[i] = 0x12 + i++ + v := m.UpdatedAnnotations[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) + n13, err := m.RollbackTo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + return i, nil +} + +func (m *DeploymentSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n14, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n15, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Strategy.Size())) + n16, err := m.Strategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.RevisionHistoryLimit)) + } + data[i] = 0x38 + i++ + if m.Paused { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.RollbackTo != nil { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) + n17, err := m.RollbackTo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *DeploymentStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UpdatedReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UnavailableReplicas)) + return i, nil +} + +func (m *DeploymentStrategy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.RollingUpdate != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) + n18, err := m.RollingUpdate.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func (m *ExportOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ExportOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Export { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + if m.Exact { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *FSGroupStrategyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *FSGroupStrategyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HTTPIngressPath) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HTTPIngressPath) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Backend.Size())) + n19, err := m.Backend.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + return i, nil +} + +func (m *HTTPIngressRuleValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HTTPIngressRuleValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for _, msg := range m.Paths { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n20, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n21, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n21 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n22, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n22 + return i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n23, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n23 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleRef.Size())) + n24, err := m.ScaleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n24 + if m.MinReplicas != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas)) + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas)) + if m.CPUUtilization != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CPUUtilization.Size())) + n25, err := m.CPUUtilization.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n25 + } + return i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size())) + n26, err := m.LastScaleTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n26 + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas)) + if m.CurrentCPUUtilizationPercentage != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.CurrentCPUUtilizationPercentage)) + } + return i, nil +} + +func (m *HostPortRange) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HostPortRange) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Min)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Max)) + return i, nil +} + +func (m *IDRange) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IDRange) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Min)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Max)) + return i, nil +} + +func (m *Ingress) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Ingress) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n27, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n27 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n28, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n28 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n29, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n29 + return i, nil +} + +func (m *IngressBackend) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressBackend) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) + i += copy(data[i:], m.ServiceName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ServicePort.Size())) + n30, err := m.ServicePort.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n30 + return i, nil +} + +func (m *IngressList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n31, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n31 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *IngressRule) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressRule) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Host))) + i += copy(data[i:], m.Host) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.IngressRuleValue.Size())) + n32, err := m.IngressRuleValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n32 + return i, nil +} + +func (m *IngressRuleValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressRuleValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HTTP != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.HTTP.Size())) + n33, err := m.HTTP.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n33 + } + return i, nil +} + +func (m *IngressSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Backend != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Backend.Size())) + n34, err := m.Backend.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if len(m.TLS) > 0 { + for _, msg := range m.TLS { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *IngressStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) + n35, err := m.LoadBalancer.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n35 + return i, nil +} + +func (m *IngressTLS) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressTLS) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) + i += copy(data[i:], m.SecretName) + return i, nil +} + +func (m *Job) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Job) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n36, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n36 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n37 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n38, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n38 + return i, nil +} + +func (m *JobCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) + n39, err := m.LastProbeTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n39 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n40, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n40 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *JobList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n41, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n41 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *JobSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Parallelism != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Parallelism)) + } + if m.Completions != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n42, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n42 + } + if m.AutoSelector != nil { + data[i] = 0x28 + i++ + if *m.AutoSelector { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n43, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n43 + return i, nil +} + +func (m *JobStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.StartTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) + n44, err := m.StartTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n44 + } + if m.CompletionTime != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size())) + n45, err := m.CompletionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n45 + } + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Active)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Succeeded)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Failed)) + return i, nil +} + +func (m *LabelSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k := range m.MatchLabels { + data[i] = 0xa + i++ + v := m.MatchLabels[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ListOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ListOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.LabelSelector))) + i += copy(data[i:], m.LabelSelector) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FieldSelector))) + i += copy(data[i:], m.FieldSelector) + data[i] = 0x18 + i++ + if m.Watch { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + if m.TimeoutSeconds != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TimeoutSeconds)) + } + return i, nil +} + +func (m *NetworkPolicy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n46, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n46 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n47, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n47 + return i, nil +} + +func (m *NetworkPolicyIngressRule) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicyIngressRule) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.From) > 0 { + for _, msg := range m.From { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkPolicyList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicyList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n48, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n48 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkPolicyPeer) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicyPeer) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PodSelector != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) + n49, err := m.PodSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n49 + } + if m.NamespaceSelector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NamespaceSelector.Size())) + n50, err := m.NamespaceSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n50 + } + return i, nil +} + +func (m *NetworkPolicyPort) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicyPort) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Protocol != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.Protocol))) + i += copy(data[i:], *m.Protocol) + } + if m.Port != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) + n51, err := m.Port.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n51 + } + return i, nil +} + +func (m *NetworkPolicySpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicySpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) + n52, err := m.PodSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n52 + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodSecurityPolicy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSecurityPolicy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n53, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n53 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n54, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n54 + return i, nil +} + +func (m *PodSecurityPolicyList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSecurityPolicyList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n55, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n55 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodSecurityPolicySpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSecurityPolicySpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Privileged { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x30 + i++ + if m.HostNetwork { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.HostPorts) > 0 { + for _, msg := range m.HostPorts { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x40 + i++ + if m.HostPID { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x48 + i++ + if m.HostIPC { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinux.Size())) + n56, err := m.SELinux.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n56 + data[i] = 0x5a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RunAsUser.Size())) + n57, err := m.RunAsUser.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n57 + data[i] = 0x62 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SupplementalGroups.Size())) + n58, err := m.SupplementalGroups.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n58 + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(m.FSGroup.Size())) + n59, err := m.FSGroup.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n59 + data[i] = 0x70 + i++ + if m.ReadOnlyRootFilesystem { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *ReplicaSet) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSet) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n60, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n60 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n61, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n61 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n62, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n62 + return i, nil +} + +func (m *ReplicaSetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n63, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n63 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicaSetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n64, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n64 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n65, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n65 + return i, nil +} + +func (m *ReplicaSetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + return i, nil +} + +func (m *ReplicationControllerDummy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerDummy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *RollbackConfig) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RollbackConfig) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Revision)) + return i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) + n66, err := m.MaxUnavailable.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n66 + } + if m.MaxSurge != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxSurge.Size())) + n67, err := m.MaxSurge.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n67 + } + return i, nil +} + +func (m *RunAsUserStrategyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RunAsUserStrategyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SELinuxStrategyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SELinuxStrategyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if m.SELinuxOptions != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) + n68, err := m.SELinuxOptions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n68 + } + return i, nil +} + +func (m *Scale) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Scale) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n69, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n69 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n70, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n70 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n71, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n71 + return i, nil +} + +func (m *ScaleSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k := range m.Selector { + data[i] = 0x12 + i++ + v := m.Selector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TargetSelector))) + i += copy(data[i:], m.TargetSelector) + return i, nil +} + +func (m *SubresourceReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubresourceReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Subresource))) + i += copy(data[i:], m.Subresource) + return i, nil +} + +func (m *SupplementalGroupsStrategyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SupplementalGroupsStrategyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ThirdPartyResource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ThirdPartyResource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n72, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n72 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Description))) + i += copy(data[i:], m.Description) + if len(m.Versions) > 0 { + for _, msg := range m.Versions { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ThirdPartyResourceData) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ThirdPartyResourceData) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n73, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n73 + if m.Data != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Data))) + i += copy(data[i:], m.Data) + } + return i, nil +} + +func (m *ThirdPartyResourceDataList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ThirdPartyResourceDataList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n74, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n74 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ThirdPartyResourceList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ThirdPartyResourceList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n75, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n75 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *APIVersion) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CPUTargetUtilization) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.TargetPercentage)) + return n +} + +func (m *CustomMetricCurrentStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomMetricCurrentStatusList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomMetricTarget) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomMetricTargetList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + return n +} + +func (m *Deployment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentRollback) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExportOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *FSGroupStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HTTPIngressPath) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HTTPIngressRuleValue) Size() (n int) { + var l int + _ = l + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + var l int + _ = l + l = m.ScaleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if m.CPUUtilization != nil { + l = m.CPUUtilization.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if m.CurrentCPUUtilizationPercentage != nil { + n += 1 + sovGenerated(uint64(*m.CurrentCPUUtilizationPercentage)) + } + return n +} + +func (m *HostPortRange) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *IDRange) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *Ingress) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressBackend) Size() (n int) { + var l int + _ = l + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressRule) Size() (n int) { + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressRuleValue) Size() (n int) { + var l int + _ = l + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressSpec) Size() (n int) { + var l int + _ = l + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressStatus) Size() (n int) { + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressTLS) Size() (n int) { + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Job) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JobSpec) Size() (n int) { + var l int + _ = l + if m.Parallelism != nil { + n += 1 + sovGenerated(uint64(*m.Parallelism)) + } + if m.Completions != nil { + n += 1 + sovGenerated(uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AutoSelector != nil { + n += 2 + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobStatus) Size() (n int) { + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CompletionTime != nil { + l = m.CompletionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Active)) + n += 1 + sovGenerated(uint64(m.Succeeded)) + n += 1 + sovGenerated(uint64(m.Failed)) + return n +} + +func (m *LabelSelector) Size() (n int) { + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k, v := range m.MatchLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ListOptions) Size() (n int) { + var l int + _ = l + l = len(m.LabelSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldSelector) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + return n +} + +func (m *NetworkPolicy) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NetworkPolicyIngressRule) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyPeer) Size() (n int) { + var l int + _ = l + if m.PodSelector != nil { + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkPolicyPort) Size() (n int) { + var l int + _ = l + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkPolicySpec) Size() (n int) { + var l int + _ = l + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicy) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicyList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicySpec) Size() (n int) { + var l int + _ = l + n += 2 + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.HostPorts) > 0 { + for _, e := range m.HostPorts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + l = m.SELinux.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.RunAsUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SupplementalGroups.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FSGroup.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ReplicaSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicaSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + return n +} + +func (m *ReplicationControllerDummy) Size() (n int) { + var l int + _ = l + return n +} + +func (m *RollbackConfig) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *RollingUpdateDeployment) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RunAsUserStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SELinuxStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Scale) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.TargetSelector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubresourceReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ThirdPartyResource) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ThirdPartyResourceData) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ThirdPartyResourceDataList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ThirdPartyResourceList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *APIVersion) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CPUTargetUtilization) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPUTargetUtilization: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPUTargetUtilization: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPercentage", wireType) + } + m.TargetPercentage = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.TargetPercentage |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricCurrentStatusList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricCurrentStatusList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricCurrentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomMetricCurrentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricTarget) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricTarget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricTargetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricTargetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricTargetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomMetricTarget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) + } + m.CurrentNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) + } + m.NumberMisscheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) + } + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentRollback) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Export = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Exact = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = FSGroupStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressPath) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressRuleValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MaxReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CPUUtilization", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CPUUtilization == nil { + m.CPUUtilization = &CPUTargetUtilization{} + } + if err := m.CPUUtilization.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentCPUUtilizationPercentage = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostPortRange) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Min |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Max |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IDRange) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Min |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Max |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingress) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressBackend) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServicePort.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.IngressRuleValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRuleValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} + } + if err := m.HTTP.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Job) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Job: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = JobConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Job{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Parallelism = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Completions = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoSelector", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutoSelector = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, JobCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletionTime == nil { + m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + m.Active = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Active |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + m.Succeeded = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Succeeded |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + m.Failed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Failed |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.MatchLabels == nil { + m.MatchLabels = make(map[string]string) + } + m.MatchLabels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Watch = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPeer) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodSelector == nil { + m.PodSelector = &LabelSelector{} + } + if err := m.PodSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPort) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_kubernetes_pkg_api_v1.Protocol(data[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + } + if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicySpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicyList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodSecurityPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, FSType(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostNetwork = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostPorts = append(m.HostPorts, HostPortRange{}) + if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostPID = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostIPC = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SELinux.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RunAsUser.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SupplementalGroups.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FSGroup.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnlyRootFilesystem = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerDummy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerDummy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerDummy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollbackConfig) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAsUserStrategyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = RunAsUserStrategy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxStrategyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SELinuxStrategy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &k8s_io_kubernetes_pkg_api_v1.SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Selector == nil { + m.Selector = make(map[string]string) + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubresourceReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubresourceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubresourceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SupplementalGroupsStrategyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SupplementalGroupsStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ThirdPartyResource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThirdPartyResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThirdPartyResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, APIVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ThirdPartyResourceData) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThirdPartyResourceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThirdPartyResourceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ThirdPartyResourceDataList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThirdPartyResourceDataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThirdPartyResourceDataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ThirdPartyResourceData{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ThirdPartyResourceList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThirdPartyResourceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThirdPartyResourceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ThirdPartyResource{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto new file mode 100644 index 000000000000..aa408bb09652 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto @@ -0,0 +1,1010 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.extensions.v1beta1; + +import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/runtime/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// An APIVersion represents a single concrete version of an object model. +message APIVersion { + // Name of this version (e.g. 'v1'). + optional string name = 1; +} + +message CPUTargetUtilization { + // fraction of the requested CPU that should be utilized/used, + // e.g. 70 means that 70% of the requested CPU should be in use. + optional int32 targetPercentage = 1; +} + +message CustomMetricCurrentStatus { + // Custom Metric name. + optional string name = 1; + + // Custom Metric value (average). + optional k8s.io.kubernetes.pkg.api.resource.Quantity value = 2; +} + +message CustomMetricCurrentStatusList { + repeated CustomMetricCurrentStatus items = 1; +} + +// Alpha-level support for Custom Metrics in HPA (as annotations). +message CustomMetricTarget { + // Custom Metric name. + optional string name = 1; + + // Custom Metric value (average). + optional k8s.io.kubernetes.pkg.api.resource.Quantity value = 2; +} + +message CustomMetricTargetList { + repeated CustomMetricTarget items = 1; +} + +// DaemonSet represents the configuration of a daemon set. +message DaemonSet { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec defines the desired behavior of this daemon set. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional DaemonSetSpec spec = 2; + + // Status is the current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional DaemonSetStatus status = 3; +} + +// DaemonSetList is a collection of daemon sets. +message DaemonSetList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of daemon sets. + repeated DaemonSet items = 2; +} + +// DaemonSetSpec is the specification of a daemon set. +message DaemonSetSpec { + // Selector is a label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // If empty, defaulted to labels on Pod template. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + optional LabelSelector selector = 1; + + // Template is the object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 2; +} + +// DaemonSetStatus represents the current status of a daemon set. +message DaemonSetStatus { + // CurrentNumberScheduled is the number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + optional int32 currentNumberScheduled = 1; + + // NumberMisscheduled is the number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + optional int32 numberMisscheduled = 2; + + // DesiredNumberScheduled is the total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + optional int32 desiredNumberScheduled = 3; +} + +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + optional DeploymentStatus status = 3; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DeploymentRollback stores the information required to rollback a deployment. +message DeploymentRollback { + // Required: This must match the Name of a deployment. + optional string name = 1; + + // The annotations to be updated to a deployment + map updatedAnnotations = 2; + + // The config of this deployment rollback. + optional RollbackConfig rollbackTo = 3; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + optional LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused and will not be processed by the + // deployment controller. + optional bool paused = 7; + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + optional RollbackConfig rollbackTo = 8; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + optional int32 updatedReplicas = 3; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. + optional int32 unavailableReplicas = 5; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + optional RollingUpdateDeployment rollingUpdate = 2; +} + +// ExportOptions is the query options to the standard REST get call. +message ExportOptions { + // Should this value be exported. Export strips fields that a user can not specify. + optional bool export = 1; + + // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' + optional bool exact = 2; +} + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +message FSGroupStrategyOptions { + // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + optional string rule = 1; + + // Ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + repeated IDRange ranges = 2; +} + +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. +message HTTPIngressPath { + // Path is a extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. + optional string path = 1; + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + optional IngressBackend backend = 2; +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http:///? -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +message HTTPIngressRuleValue { + // A collection of paths that map requests to backends. + repeated HTTPIngressPath paths = 1; +} + +// configuration of a horizontal pod autoscaler. +message HorizontalPodAutoscaler { + // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + optional HorizontalPodAutoscalerSpec spec = 2; + + // current information about the autoscaler. + optional HorizontalPodAutoscalerStatus status = 3; +} + +// list of horizontal pod autoscaler objects. +message HorizontalPodAutoscalerList { + // Standard list metadata. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // list of horizontal pod autoscaler objects. + repeated HorizontalPodAutoscaler items = 2; +} + +// specification of a horizontal pod autoscaler. +message HorizontalPodAutoscalerSpec { + // reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, + // and will set the desired number of pods by modifying its spec. + optional SubresourceReference scaleRef = 1; + + // lower limit for the number of pods that can be set by the autoscaler, default 1. + optional int32 minReplicas = 2; + + // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + optional int32 maxReplicas = 3; + + // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // if not specified it defaults to the target CPU utilization at 80% of the requested resources. + optional CPUTargetUtilization cpuUtilization = 4; +} + +// current status of a horizontal pod autoscaler +message HorizontalPodAutoscalerStatus { + // most recent generation observed by this autoscaler. + optional int64 observedGeneration = 1; + + // last time the HorizontalPodAutoscaler scaled the number of pods; + // used by the autoscaler to control how often the number of pods is changed. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScaleTime = 2; + + // current number of replicas of pods managed by this autoscaler. + optional int32 currentReplicas = 3; + + // desired number of replicas of pods managed by this autoscaler. + optional int32 desiredReplicas = 4; + + // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // e.g. 70 means that an average pod is using now 70% of its requested CPU. + optional int32 currentCPUUtilizationPercentage = 5; +} + +// Host Port Range defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +message HostPortRange { + // min is the start of the range, inclusive. + optional int32 min = 1; + + // max is the end of the range, inclusive. + optional int32 max = 2; +} + +// ID Range provides a min/max of an allowed range of IDs. +message IDRange { + // Min is the start of the range, inclusive. + optional int64 min = 1; + + // Max is the end of the range, inclusive. + optional int64 max = 2; +} + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +message Ingress { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec is the desired state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional IngressSpec spec = 2; + + // Status is the current state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional IngressStatus status = 3; +} + +// IngressBackend describes all endpoints for a given service and port. +message IngressBackend { + // Specifies the name of the referenced service. + optional string serviceName = 1; + + // Specifies the port of the referenced service. + optional k8s.io.kubernetes.pkg.util.intstr.IntOrString servicePort = 2; +} + +// IngressList is a collection of Ingress. +message IngressList { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of Ingress. + repeated Ingress items = 2; +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +message IngressRule { + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. + optional string host = 1; + + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + optional IngressRuleValue ingressRuleValue = 2; +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +message IngressRuleValue { + optional HTTPIngressRuleValue http = 1; +} + +// IngressSpec describes the Ingress the user wishes to exist. +message IngressSpec { + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + optional IngressBackend backend = 1; + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + repeated IngressTLS tls = 2; + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + repeated IngressRule rules = 3; +} + +// IngressStatus describe the current state of the Ingress. +message IngressStatus { + // LoadBalancer contains the current status of the load-balancer. + optional k8s.io.kubernetes.pkg.api.v1.LoadBalancerStatus loadBalancer = 1; +} + +// IngressTLS describes the transport layer security associated with an Ingress. +message IngressTLS { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + repeated string hosts = 1; + + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + optional string secretName = 2; +} + +// Job represents the configuration of a single job. +message Job { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional JobSpec spec = 2; + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional JobStatus status = 3; +} + +// JobCondition describes current state of a job. +message JobCondition { + // Type of job condition, Complete or Failed. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + optional string reason = 5; + + // Human readable message indicating details about last transition. + optional string message = 6; +} + +// JobList is a collection of jobs. +message JobList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of Job. + repeated Job items = 2; +} + +// JobSpec describes how the job execution will look like. +message JobSpec { + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + optional int32 parallelism = 1; + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + optional int32 completions = 2; + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + optional int64 activeDeadlineSeconds = 3; + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + optional LabelSelector selector = 4; + + // AutoSelector controls generation of pod labels and pod selectors. + // It was not present in the original extensions/v1beta1 Job definition, but exists + // to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite + // meaning as, ManualSelector. + // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + optional bool autoSelector = 5; + + // Template is the object that describes the pod that will be created when + // executing a job. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; +} + +// JobStatus represents the current state of a Job. +message JobStatus { + // Conditions represent the latest available observations of an object's current state. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + repeated JobCondition conditions = 1; + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; + + // Active is the number of actively running pods. + optional int32 active = 4; + + // Succeeded is the number of pods which reached Phase Succeeded. + optional int32 succeeded = 5; + + // Failed is the number of pods which reached Phase Failed. + optional int32 failed = 6; +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +message LabelSelector { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + map matchLabels = 1; + + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + repeated LabelSelectorRequirement matchExpressions = 2; +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message LabelSelectorRequirement { + // key is the label key that the selector applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + optional string operator = 2; + + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + repeated string values = 3; +} + +// ListOptions is the query options to a standard REST list call. +message ListOptions { + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + optional string labelSelector = 1; + + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + optional string fieldSelector = 2; + + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + optional bool watch = 3; + + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + optional string resourceVersion = 4; + + // Timeout for the list/watch call. + optional int64 timeoutSeconds = 5; +} + +message NetworkPolicy { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior for this NetworkPolicy. + optional NetworkPolicySpec spec = 2; +} + +// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. +message NetworkPolicyIngressRule { + // List of ports which should be made accessible on the pods selected for this rule. + // Each item in this list is combined using a logical OR. + // If this field is not provided, this rule matches all ports (traffic not restricted by port). + // If this field is empty, this rule matches no ports (no traffic matches). + // If this field is present and contains at least one item, then this rule allows traffic + // only if the traffic matches at least one port in the list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + repeated NetworkPolicyPort ports = 1; + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. + // If this field is not provided, this rule matches all sources (traffic not restricted by source). + // If this field is empty, this rule matches no sources (no traffic matches). + // If this field is present and contains at least on item, this rule allows traffic only if the + // traffic matches at least one item in the from list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + repeated NetworkPolicyPeer from = 2; +} + +// Network Policy List is a list of NetworkPolicy objects. +message NetworkPolicyList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated NetworkPolicy items = 2; +} + +message NetworkPolicyPeer { + // This is a label selector which selects Pods in this namespace. + // This field follows standard label selector semantics. + // If not provided, this selector selects no pods. + // If present but empty, this selector selects all pods in this namespace. + optional LabelSelector podSelector = 1; + + // Selects Namespaces using cluster scoped-labels. This + // matches all pods in all namespaces selected by this label selector. + // This field follows standard label selector semantics. + // If omited, this selector selects no namespaces. + // If present but empty, this selector selects all namespaces. + optional LabelSelector namespaceSelector = 2; +} + +message NetworkPolicyPort { + // Optional. The protocol (TCP or UDP) which traffic must match. + // If not specified, this field defaults to TCP. + optional string protocol = 1; + + // If specified, the port on the given protocol. This can + // either be a numerical or named port on a pod. If this field is not provided, + // this matches all port names and numbers. + // If present, only traffic on the specified protocol AND port + // will be matched. + optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 2; +} + +message NetworkPolicySpec { + // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules + // is applied to any pods selected by this field. Multiple network policies can select the + // same set of pods. In this case, the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + optional LabelSelector podSelector = 1; + + // List of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, + // OR if the traffic source is the pod's local node, + // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy + // objects whose podSelector matches the pod. + // If this field is empty then this NetworkPolicy does not affect ingress isolation. + // If this field is present and contains at least one rule, this policy allows any traffic + // which matches at least one of the ingress rules in this list. + repeated NetworkPolicyIngressRule ingress = 2; +} + +// Pod Security Policy governs the ability to make requests that affect the Security Context +// that will be applied to a pod and container. +message PodSecurityPolicy { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // spec defines the policy enforced. + optional PodSecurityPolicySpec spec = 2; +} + +// Pod Security Policy List is a list of PodSecurityPolicy objects. +message PodSecurityPolicyList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated PodSecurityPolicy items = 2; +} + +// Pod Security Policy Spec defines the policy enforced. +message PodSecurityPolicySpec { + // privileged determines if a pod can request to be run as privileged. + optional bool privileged = 1; + + // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capabiility in both + // DefaultAddCapabilities and RequiredDropCapabilities. + repeated string defaultAddCapabilities = 2; + + // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + repeated string requiredDropCapabilities = 3; + + // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + repeated string allowedCapabilities = 4; + + // volumes is a white list of allowed volume plugins. Empty indicates that all plugins + // may be used. + repeated string volumes = 5; + + // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + optional bool hostNetwork = 6; + + // hostPorts determines which host port ranges are allowed to be exposed. + repeated HostPortRange hostPorts = 7; + + // hostPID determines if the policy allows the use of HostPID in the pod spec. + optional bool hostPID = 8; + + // hostIPC determines if the policy allows the use of HostIPC in the pod spec. + optional bool hostIPC = 9; + + // seLinux is the strategy that will dictate the allowable labels that may be set. + optional SELinuxStrategyOptions seLinux = 10; + + // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + optional RunAsUserStrategyOptions runAsUser = 11; + + // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + optional SupplementalGroupsStrategyOptions supplementalGroups = 12; + + // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + optional FSGroupStrategyOptions fsGroup = 13; + + // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + optional bool readOnlyRootFilesystem = 14; +} + +// ReplicaSet represents the configuration of a ReplicaSet. +message ReplicaSet { + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional ReplicaSetSpec spec = 2; + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + optional ReplicaSetStatus status = 3; +} + +// ReplicaSetList is a collection of ReplicaSets. +message ReplicaSetList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of ReplicaSets. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md + repeated ReplicaSet items = 2; +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +message ReplicaSetSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + optional int32 replicas = 1; + + // Selector is a label query over pods that should match the replica count. + // If the selector is empty, it is defaulted to the labels present on the pod template. + // Label keys and values that must match in order to be controlled by this replica set. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + optional LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +message ReplicaSetStatus { + // Replicas is the most recently oberved number of replicas. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + optional int32 fullyLabeledReplicas = 2; + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + optional int64 observedGeneration = 3; +} + +// Dummy definition +message ReplicationControllerDummy { +} + +message RollbackConfig { + // The revision to rollback to. If set to 0, rollbck to the last revision. + optional int64 revision = 1; +} + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding up. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy. +message RunAsUserStrategyOptions { + // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. + optional string rule = 1; + + // Ranges are the allowed ranges of uids that may be used. + repeated IDRange ranges = 2; +} + +// SELinux Strategy Options defines the strategy type and any options used to create the strategy. +message SELinuxStrategyOptions { + // type is the strategy that will dictate the allowable labels that may be set. + optional string rule = 1; + + // seLinuxOptions required to run as; required for MustRunAs + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + optional k8s.io.kubernetes.pkg.api.v1.SELinuxOptions seLinuxOptions = 2; +} + +// represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + optional ScaleSpec spec = 2; + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + optional ScaleStatus status = 3; +} + +// describes the attributes of a scale subresource +message ScaleSpec { + // desired number of instances for the scaled object. + optional int32 replicas = 1; +} + +// represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + map selector = 2; + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + optional string targetSelector = 3; +} + +// SubresourceReference contains enough information to let you inspect or modify the referred subresource. +message SubresourceReference { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional string kind = 1; + + // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + optional string name = 2; + + // API version of the referent + optional string apiVersion = 3; + + // Subresource name of the referent + optional string subresource = 4; +} + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +message SupplementalGroupsStrategyOptions { + // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + optional string rule = 1; + + // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + repeated IDRange ranges = 2; +} + +// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource +// types to the API. It consists of one or more Versions of the api. +message ThirdPartyResource { + // Standard object metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Description is the description of this object. + optional string description = 2; + + // Versions are versions for this third party object + repeated APIVersion versions = 3; +} + +// An internal object, used for versioned storage in etcd. Not exposed to the end user. +message ThirdPartyResourceData { + // Standard object metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Data is the raw JSON data for this data. + optional bytes data = 2; +} + +// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. +message ThirdPartyResourceDataList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of ThirdpartyResourceData. + repeated ThirdPartyResourceData items = 2; +} + +// ThirdPartyResourceList is a list of ThirdPartyResources. +message ThirdPartyResourceList { + // Standard list metadata. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of ThirdPartyResources. + repeated ThirdPartyResource items = 2; +} + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go index 026a9f6810cc..041e2cbc16b8 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go @@ -20,6 +20,7 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" ) // GroupName is the group name use in this package @@ -60,7 +61,11 @@ func addKnownTypes(scheme *runtime.Scheme) { &ReplicaSetList{}, &PodSecurityPolicy{}, &PodSecurityPolicyList{}, + &NetworkPolicy{}, + &NetworkPolicyList{}, ) + // Add the watch version that applies + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) } func (obj *Deployment) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } @@ -85,3 +90,5 @@ func (obj *ReplicaSet) GetObjectKind() unversioned.ObjectKind { func (obj *ReplicaSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } func (obj *PodSecurityPolicy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } func (obj *PodSecurityPolicyList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *NetworkPolicy) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *NetworkPolicyList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go index ffdeebcf59d7..cbe82eff3160 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,10 +29,9 @@ import ( pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" pkg3_types "k8s.io/kubernetes/pkg/types" - pkg6_intstr "k8s.io/kubernetes/pkg/util/intstr" + pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" "reflect" "runtime" - pkg5_inf "speter.net/go/exp/math/dec/inf" time "time" ) @@ -70,10 +69,9 @@ func init() { var v1 pkg1_unversioned.TypeMeta var v2 pkg2_v1.ObjectMeta var v3 pkg3_types.UID - var v4 pkg6_intstr.IntOrString - var v5 pkg5_inf.Dec - var v6 time.Time - _, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6 + var v4 pkg5_intstr.IntOrString + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 } } @@ -4587,14 +4585,13 @@ func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool + var yyq2 [1]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.Name != "" - yyq2[1] = x.APIGroup != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) + r.EncodeArrayStart(1) } else { yynn2 = 0 for _, b := range yyq2 { @@ -4630,31 +4627,6 @@ func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { } } } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) - } - } - } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -4722,12 +4694,6 @@ func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } else { x.Name = string(r.DecodeString()) } - case "apiGroup": - if r.TryDecodeAsNil() { - x.APIGroup = "" - } else { - x.APIGroup = string(r.DecodeString()) - } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -4739,16 +4705,16 @@ func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb6 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb6 { + if yyb5 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4758,34 +4724,18 @@ func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } else { x.Name = string(r.DecodeString()) } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIGroup = "" - } else { - x.APIGroup = string(r.DecodeString()) - } for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l } else { - yyb6 = r.CheckBreak() + yyb5 = r.CheckBreak() } - if yyb6 { + if yyb5 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") + z.DecStructFieldNotFound(yyj5-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -6971,7 +6921,7 @@ func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } else { if x.MaxUnavailable == nil { - x.MaxUnavailable = new(pkg6_intstr.IntOrString) + x.MaxUnavailable = new(pkg5_intstr.IntOrString) } yym5 := z.DecBinary() _ = yym5 @@ -6990,7 +6940,7 @@ func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Dec } } else { if x.MaxSurge == nil { - x.MaxSurge = new(pkg6_intstr.IntOrString) + x.MaxSurge = new(pkg5_intstr.IntOrString) } yym7 := z.DecBinary() _ = yym7 @@ -7033,7 +6983,7 @@ func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.D } } else { if x.MaxUnavailable == nil { - x.MaxUnavailable = new(pkg6_intstr.IntOrString) + x.MaxUnavailable = new(pkg5_intstr.IntOrString) } yym10 := z.DecBinary() _ = yym10 @@ -7062,7 +7012,7 @@ func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.D } } else { if x.MaxSurge == nil { - x.MaxSurge = new(pkg6_intstr.IntOrString) + x.MaxSurge = new(pkg5_intstr.IntOrString) } yym12 := z.DecBinary() _ = yym12 @@ -13842,7 +13792,7 @@ func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } case "servicePort": if r.TryDecodeAsNil() { - x.ServicePort = pkg6_intstr.IntOrString{} + x.ServicePort = pkg5_intstr.IntOrString{} } else { yyv5 := &x.ServicePort yym6 := z.DecBinary() @@ -13897,7 +13847,7 @@ func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ServicePort = pkg6_intstr.IntOrString{} + x.ServicePort = pkg5_intstr.IntOrString{} } else { yyv9 := &x.ServicePort yym10 := z.DecBinary() @@ -16813,23 +16763,24 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [9]bool + var yyq2 [14]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.Privileged != false - yyq2[1] = len(x.Capabilities) != 0 - yyq2[2] = len(x.Volumes) != 0 - yyq2[3] = x.HostNetwork != false - yyq2[4] = len(x.HostPorts) != 0 - yyq2[5] = x.HostPID != false - yyq2[6] = x.HostIPC != false - yyq2[7] = true - yyq2[8] = true + yyq2[1] = len(x.DefaultAddCapabilities) != 0 + yyq2[2] = len(x.RequiredDropCapabilities) != 0 + yyq2[3] = len(x.AllowedCapabilities) != 0 + yyq2[4] = len(x.Volumes) != 0 + yyq2[5] = x.HostNetwork != false + yyq2[6] = len(x.HostPorts) != 0 + yyq2[7] = x.HostPID != false + yyq2[8] = x.HostIPC != false + yyq2[13] = x.ReadOnlyRootFilesystem != false var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(9) + r.EncodeArrayStart(14) } else { - yynn2 = 0 + yynn2 = 4 for _, b := range yyq2 { if b { yynn2++ @@ -16866,14 +16817,14 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[1] { - if x.Capabilities == nil { + if x.DefaultAddCapabilities == nil { r.EncodeNil() } else { yym7 := z.EncBinary() _ = yym7 if false { } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.Capabilities), e) + h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.DefaultAddCapabilities), e) } } } else { @@ -16882,16 +16833,16 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capabilities")) + r.EncodeString(codecSelferC_UTF81234, string("defaultAddCapabilities")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capabilities == nil { + if x.DefaultAddCapabilities == nil { r.EncodeNil() } else { yym8 := z.EncBinary() _ = yym8 if false { } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.Capabilities), e) + h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.DefaultAddCapabilities), e) } } } @@ -16899,14 +16850,14 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[2] { - if x.Volumes == nil { + if x.RequiredDropCapabilities == nil { r.EncodeNil() } else { yym10 := z.EncBinary() _ = yym10 if false { } else { - h.encSliceFSType(([]FSType)(x.Volumes), e) + h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.RequiredDropCapabilities), e) } } } else { @@ -16915,16 +16866,16 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[2] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumes")) + r.EncodeString(codecSelferC_UTF81234, string("requiredDropCapabilities")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Volumes == nil { + if x.RequiredDropCapabilities == nil { r.EncodeNil() } else { yym11 := z.EncBinary() _ = yym11 if false { } else { - h.encSliceFSType(([]FSType)(x.Volumes), e) + h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.RequiredDropCapabilities), e) } } } @@ -16932,39 +16883,47 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { + if x.AllowedCapabilities == nil { + r.EncodeNil() } else { - r.EncodeBool(bool(x.HostNetwork)) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.AllowedCapabilities), e) + } } } else { - r.EncodeBool(false) + r.EncodeNil() } } else { if yyq2[3] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) + r.EncodeString(codecSelferC_UTF81234, string("allowedCapabilities")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { + if x.AllowedCapabilities == nil { + r.EncodeNil() } else { - r.EncodeBool(bool(x.HostNetwork)) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.AllowedCapabilities), e) + } } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[4] { - if x.HostPorts == nil { + if x.Volumes == nil { r.EncodeNil() } else { yym16 := z.EncBinary() _ = yym16 if false { } else { - h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) + h.encSliceFSType(([]FSType)(x.Volumes), e) } } } else { @@ -16973,16 +16932,16 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[4] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPorts")) + r.EncodeString(codecSelferC_UTF81234, string("volumes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPorts == nil { + if x.Volumes == nil { r.EncodeNil() } else { yym17 := z.EncBinary() _ = yym17 if false { } else { - h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) + h.encSliceFSType(([]FSType)(x.Volumes), e) } } } @@ -16994,7 +16953,7 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym19 if false { } else { - r.EncodeBool(bool(x.HostPID)) + r.EncodeBool(bool(x.HostNetwork)) } } else { r.EncodeBool(false) @@ -17002,73 +16961,166 @@ func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[5] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPID")) + r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym20 := z.EncBinary() _ = yym20 if false { } else { - r.EncodeBool(bool(x.HostPID)) + r.EncodeBool(bool(x.HostNetwork)) } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { + if x.HostPorts == nil { + r.EncodeNil() } else { - r.EncodeBool(bool(x.HostIPC)) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) + } } } else { - r.EncodeBool(false) + r.EncodeNil() } } else { if yyq2[6] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) + r.EncodeString(codecSelferC_UTF81234, string("hostPorts")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { + if x.HostPorts == nil { + r.EncodeNil() } else { - r.EncodeBool(bool(x.HostIPC)) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) + } } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[7] { - yy25 := &x.SELinux - yy25.CodecEncodeSelf(e) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } } else { - r.EncodeNil() + r.EncodeBool(false) } } else { if yyq2[7] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinux")) + r.EncodeString(codecSelferC_UTF81234, string("hostPID")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy27 := &x.SELinux - yy27.CodecEncodeSelf(e) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[8] { - yy30 := &x.RunAsUser - yy30.CodecEncodeSelf(e) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } } else { - r.EncodeNil() + r.EncodeBool(false) } } else { if yyq2[8] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) + r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy31 := &x.SELinux + yy31.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("seLinux")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy33 := &x.SELinux + yy33.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy36 := &x.RunAsUser + yy36.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy38 := &x.RunAsUser + yy38.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy41 := &x.SupplementalGroups + yy41.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy43 := &x.SupplementalGroups + yy43.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy46 := &x.FSGroup + yy46.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy48 := &x.FSGroup + yy48.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + yym51 := z.EncBinary() + _ = yym51 + if false { + } else { + r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy32 := &x.RunAsUser - yy32.CodecEncodeSelf(e) + yym52 := z.EncBinary() + _ = yym52 + if false { + } else { + r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) + } } } if yyr2 || yy2arr2 { @@ -17138,11 +17190,11 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decod } else { x.Privileged = bool(r.DecodeBool()) } - case "capabilities": + case "defaultAddCapabilities": if r.TryDecodeAsNil() { - x.Capabilities = nil + x.DefaultAddCapabilities = nil } else { - yyv5 := &x.Capabilities + yyv5 := &x.DefaultAddCapabilities yym6 := z.DecBinary() _ = yym6 if false { @@ -17150,16 +17202,40 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decod h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv5), d) } } - case "volumes": + case "requiredDropCapabilities": if r.TryDecodeAsNil() { - x.Volumes = nil + x.RequiredDropCapabilities = nil } else { - yyv7 := &x.Volumes + yyv7 := &x.RequiredDropCapabilities yym8 := z.DecBinary() _ = yym8 if false { } else { - h.decSliceFSType((*[]FSType)(yyv7), d) + h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv7), d) + } + } + case "allowedCapabilities": + if r.TryDecodeAsNil() { + x.AllowedCapabilities = nil + } else { + yyv9 := &x.AllowedCapabilities + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv9), d) + } + } + case "volumes": + if r.TryDecodeAsNil() { + x.Volumes = nil + } else { + yyv11 := &x.Volumes + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceFSType((*[]FSType)(yyv11), d) } } case "hostNetwork": @@ -17172,12 +17248,12 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.HostPorts = nil } else { - yyv10 := &x.HostPorts - yym11 := z.DecBinary() - _ = yym11 + yyv14 := &x.HostPorts + yym15 := z.DecBinary() + _ = yym15 if false { } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv10), d) + h.decSliceHostPortRange((*[]HostPortRange)(yyv14), d) } } case "hostPID": @@ -17196,15 +17272,35 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.SELinux = SELinuxStrategyOptions{} } else { - yyv14 := &x.SELinux - yyv14.CodecDecodeSelf(d) + yyv18 := &x.SELinux + yyv18.CodecDecodeSelf(d) } case "runAsUser": if r.TryDecodeAsNil() { x.RunAsUser = RunAsUserStrategyOptions{} } else { - yyv15 := &x.RunAsUser - yyv15.CodecDecodeSelf(d) + yyv19 := &x.RunAsUser + yyv19.CodecDecodeSelf(d) + } + case "supplementalGroups": + if r.TryDecodeAsNil() { + x.SupplementalGroups = SupplementalGroupsStrategyOptions{} + } else { + yyv20 := &x.SupplementalGroups + yyv20.CodecDecodeSelf(d) + } + case "fsGroup": + if r.TryDecodeAsNil() { + x.FSGroup = FSGroupStrategyOptions{} + } else { + yyv21 := &x.FSGroup + yyv21.CodecDecodeSelf(d) + } + case "readOnlyRootFilesystem": + if r.TryDecodeAsNil() { + x.ReadOnlyRootFilesystem = false + } else { + x.ReadOnlyRootFilesystem = bool(r.DecodeBool()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -17217,16 +17313,16 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb16 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb16 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17236,144 +17332,188 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec } else { x.Privileged = bool(r.DecodeBool()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb16 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb16 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Capabilities = nil + x.DefaultAddCapabilities = nil } else { - yyv18 := &x.Capabilities - yym19 := z.DecBinary() - _ = yym19 + yyv25 := &x.DefaultAddCapabilities + yym26 := z.DecBinary() + _ = yym26 if false { } else { - h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv18), d) + h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv25), d) } } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb16 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb16 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Volumes = nil + x.RequiredDropCapabilities = nil } else { - yyv20 := &x.Volumes - yym21 := z.DecBinary() - _ = yym21 + yyv27 := &x.RequiredDropCapabilities + yym28 := z.DecBinary() + _ = yym28 if false { } else { - h.decSliceFSType((*[]FSType)(yyv20), d) + h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv27), d) } } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb16 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb16 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.HostNetwork = false + x.AllowedCapabilities = nil } else { - x.HostNetwork = bool(r.DecodeBool()) + yyv29 := &x.AllowedCapabilities + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv29), d) + } } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb16 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb16 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.HostPorts = nil + x.Volumes = nil } else { - yyv23 := &x.HostPorts - yym24 := z.DecBinary() - _ = yym24 + yyv31 := &x.Volumes + yym32 := z.DecBinary() + _ = yym32 if false { } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv23), d) + h.decSliceFSType((*[]FSType)(yyv31), d) } } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb16 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb16 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.HostPID = false + x.HostNetwork = false } else { - x.HostPID = bool(r.DecodeBool()) + x.HostNetwork = bool(r.DecodeBool()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb16 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb16 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.HostIPC = false + x.HostPorts = nil } else { - x.HostIPC = bool(r.DecodeBool()) + yyv34 := &x.HostPorts + yym35 := z.DecBinary() + _ = yym35 + if false { + } else { + h.decSliceHostPortRange((*[]HostPortRange)(yyv34), d) + } } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb16 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb16 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.SELinux = SELinuxStrategyOptions{} + x.HostPID = false + } else { + x.HostPID = bool(r.DecodeBool()) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + x.HostIPC = bool(r.DecodeBool()) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SELinux = SELinuxStrategyOptions{} } else { - yyv27 := &x.SELinux - yyv27.CodecDecodeSelf(d) + yyv38 := &x.SELinux + yyv38.CodecDecodeSelf(d) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb16 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb16 { + if yyb23 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -17381,21 +17521,71 @@ func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Dec if r.TryDecodeAsNil() { x.RunAsUser = RunAsUserStrategyOptions{} } else { - yyv28 := &x.RunAsUser - yyv28.CodecDecodeSelf(d) + yyv39 := &x.RunAsUser + yyv39.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SupplementalGroups = SupplementalGroupsStrategyOptions{} + } else { + yyv40 := &x.SupplementalGroups + yyv40.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSGroup = FSGroupStrategyOptions{} + } else { + yyv41 := &x.FSGroup + yyv41.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnlyRootFilesystem = false + } else { + x.ReadOnlyRootFilesystem = bool(r.DecodeBool()) } for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l } else { - yyb16 = r.CheckBreak() + yyb23 = r.CheckBreak() } - if yyb16 { + if yyb23 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") + z.DecStructFieldNotFound(yyj23-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -18312,7 +18502,7 @@ func (x *RunAsUserStrategy) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { +func (x *FSGroupStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -18326,17 +18516,16 @@ func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool + var yyq2 [2]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" + yyq2[0] = x.Rule != "" + yyq2[1] = len(x.Ranges) != 0 var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) + r.EncodeArrayStart(2) } else { - yynn2 = 1 + yynn2 = 0 for _, b := range yyq2 { if b { yynn2++ @@ -18348,106 +18537,48 @@ func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } + x.Rule.CodecEncodeSelf(e) } else { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[2] { + if yyq2[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) + r.EncodeString(codecSelferC_UTF81234, string("rule")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } + x.Rule.CodecEncodeSelf(e) } } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { + if yyq2[1] { + if x.Ranges == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } } } else { - r.EncodeString(codecSelferC_UTF81234, "") + r.EncodeNil() } } else { - if yyq2[3] { + if yyq2[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + r.EncodeString(codecSelferC_UTF81234, string("ranges")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { + if x.Ranges == nil { + r.EncodeNil() } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } } } } @@ -18460,7 +18591,7 @@ func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { } } -func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { +func (x *FSGroupStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18490,7 +18621,7 @@ func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { +func (x *FSGroupStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18512,43 +18643,24 @@ func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decod yys3 := string(yys3Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) switch yys3 { - case "metadata": + case "rule": if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.Rule = "" } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } + x.Rule = FSGroupStrategyType(r.DecodeString()) } - case "items": + case "ranges": if r.TryDecodeAsNil() { - x.Items = nil + x.Ranges = nil } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 + yyv5 := &x.Ranges + yym6 := z.DecBinary() + _ = yym6 if false { } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv6), d) + h.decSliceIDRange((*[]IDRange)(yyv5), d) } } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -18556,107 +18668,2795 @@ func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decod z.DecSendContainerState(codecSelfer_containerMapEnd1234) } -func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { +func (x *FSGroupStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb10 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb10 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} + x.Rule = "" } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } + x.Rule = FSGroupStrategyType(r.DecodeString()) } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb10 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb10 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Items = nil + x.Ranges = nil } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 if false { } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv13), d) + h.decSliceIDRange((*[]IDRange)(yyv9), d) } } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x FSGroupStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *FSGroupStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *SupplementalGroupsStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Rule != "" + yyq2[1] = len(x.Ranges) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Rule.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Rule.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Ranges == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ranges")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ranges == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SupplementalGroupsStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "rule": + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + x.Rule = SupplementalGroupsStrategyType(r.DecodeString()) + } + case "ranges": + if r.TryDecodeAsNil() { + x.Ranges = nil + } else { + yyv5 := &x.Ranges + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.Kind = "" + x.Rule = "" } else { - x.Kind = string(r.DecodeString()) + x.Rule = SupplementalGroupsStrategyType(r.DecodeString()) } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb10 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb10 { + if yyb7 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - x.APIVersion = "" + x.Ranges = nil } else { - x.APIVersion = string(r.DecodeString()) + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv9), d) + } } for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb10 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb10 { + if yyb7 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x SupplementalGroupsStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *SupplementalGroupsStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = NetworkPolicySpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv9 := &x.ObjectMeta + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = NetworkPolicySpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Ingress) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.PodSelector + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.PodSelector + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Ingress == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ingress")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ingress == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podSelector": + if r.TryDecodeAsNil() { + x.PodSelector = LabelSelector{} + } else { + yyv4 := &x.PodSelector + yyv4.CodecDecodeSelf(d) + } + case "ingress": + if r.TryDecodeAsNil() { + x.Ingress = nil + } else { + yyv5 := &x.Ingress + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodSelector = LabelSelector{} + } else { + yyv8 := &x.PodSelector + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ingress = nil + } else { + yyv9 := &x.Ingress + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Ports) != 0 + yyq2[1] = len(x.From) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.From == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("from")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.From == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicyIngressRule) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicyIngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv4 := &x.Ports + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv4), d) + } + } + case "from": + if r.TryDecodeAsNil() { + x.From = nil + } else { + yyv6 := &x.From + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicyIngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv9 := &x.Ports + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.From = nil + } else { + yyv11 := &x.From + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicyPort) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Protocol != nil + yyq2[1] = x.Port != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Protocol == nil { + r.EncodeNil() + } else { + yy4 := *x.Protocol + yysf5 := &yy4 + yysf5.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Protocol == nil { + r.EncodeNil() + } else { + yy6 := *x.Protocol + yysf7 := &yy6 + yysf7.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Port == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Port) { + } else if !yym9 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Port) + } else { + z.EncFallback(x.Port) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Port == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Port) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Port) + } else { + z.EncFallback(x.Port) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicyPort) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "protocol": + if r.TryDecodeAsNil() { + if x.Protocol != nil { + x.Protocol = nil + } + } else { + if x.Protocol == nil { + x.Protocol = new(pkg2_v1.Protocol) + } + x.Protocol.CodecDecodeSelf(d) + } + case "port": + if r.TryDecodeAsNil() { + if x.Port != nil { + x.Port = nil + } + } else { + if x.Port == nil { + x.Port = new(pkg5_intstr.IntOrString) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else if z.HasExtensions() && z.DecExt(x.Port) { + } else if !yym6 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Port) + } else { + z.DecFallback(x.Port, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicyPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Protocol != nil { + x.Protocol = nil + } + } else { + if x.Protocol == nil { + x.Protocol = new(pkg2_v1.Protocol) + } + x.Protocol.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Port != nil { + x.Port = nil + } + } else { + if x.Port == nil { + x.Port = new(pkg5_intstr.IntOrString) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(x.Port) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Port) + } else { + z.DecFallback(x.Port, false) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PodSelector != nil + yyq2[1] = x.NamespaceSelector != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.PodSelector == nil { + r.EncodeNil() + } else { + x.PodSelector.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodSelector == nil { + r.EncodeNil() + } else { + x.PodSelector.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NamespaceSelector == nil { + r.EncodeNil() + } else { + x.NamespaceSelector.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespaceSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NamespaceSelector == nil { + r.EncodeNil() + } else { + x.NamespaceSelector.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicyPeer) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podSelector": + if r.TryDecodeAsNil() { + if x.PodSelector != nil { + x.PodSelector = nil + } + } else { + if x.PodSelector == nil { + x.PodSelector = new(LabelSelector) + } + x.PodSelector.CodecDecodeSelf(d) + } + case "namespaceSelector": + if r.TryDecodeAsNil() { + if x.NamespaceSelector != nil { + x.NamespaceSelector = nil + } + } else { + if x.NamespaceSelector == nil { + x.NamespaceSelector = new(LabelSelector) + } + x.NamespaceSelector.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodSelector != nil { + x.PodSelector = nil + } + } else { + if x.PodSelector == nil { + x.PodSelector = new(LabelSelector) + } + x.PodSelector.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NamespaceSelector != nil { + x.NamespaceSelector = nil + } + } else { + if x.NamespaceSelector == nil { + x.NamespaceSelector = new(LabelSelector) + } + x.NamespaceSelector.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CustomMetricTarget{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CustomMetricTarget, yyrl1) + } + } else { + yyv1 = make([]CustomMetricTarget, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricTarget{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CustomMetricTarget{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricTarget{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CustomMetricTarget{}) // var yyz1 CustomMetricTarget + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricTarget{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CustomMetricTarget{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurrentStatus, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurrentStatus, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CustomMetricCurrentStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CustomMetricCurrentStatus, yyrl1) + } + } else { + yyv1 = make([]CustomMetricCurrentStatus, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricCurrentStatus{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CustomMetricCurrentStatus{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricCurrentStatus{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CustomMetricCurrentStatus{}) // var yyz1 CustomMetricCurrentStatus + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricCurrentStatus{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CustomMetricCurrentStatus{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 360) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []APIVersion{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]APIVersion, yyrl1) + } + } else { + yyv1 = make([]APIVersion, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = APIVersion{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, APIVersion{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = APIVersion{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, APIVersion{}) // var yyz1 APIVersion + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = APIVersion{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []APIVersion{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -18669,7 +21469,7 @@ func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *c z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18680,7 +21480,7 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []CustomMetricTarget{} + yyv1 = []ThirdPartyResource{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -18695,15 +21495,15 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]CustomMetricTarget, yyrl1) + yyv1 = make([]ThirdPartyResource, yyrl1) } } else { - yyv1 = make([]CustomMetricTarget, yyrl1) + yyv1 = make([]ThirdPartyResource, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -18718,7 +21518,7 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} + yyv1[yyj1] = ThirdPartyResource{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -18727,10 +21527,10 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, CustomMetricTarget{}) + yyv1 = append(yyv1, ThirdPartyResource{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} + yyv1[yyj1] = ThirdPartyResource{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -18744,13 +21544,13 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, CustomMetricTarget{}) // var yyz1 CustomMetricTarget + yyv1 = append(yyv1, ThirdPartyResource{}) // var yyz1 ThirdPartyResource yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} + yyv1[yyj1] = ThirdPartyResource{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -18765,7 +21565,7 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []CustomMetricTarget{} + yyv1 = []ThirdPartyResource{} yyc1 = true } } @@ -18775,7 +21575,7 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * } } -func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurrentStatus, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -18788,7 +21588,7 @@ func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurre z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurrentStatus, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18799,7 +21599,7 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []CustomMetricCurrentStatus{} + yyv1 = []Deployment{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -18814,15 +21614,15 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]CustomMetricCurrentStatus, yyrl1) + yyv1 = make([]Deployment, yyrl1) } } else { - yyv1 = make([]CustomMetricCurrentStatus, yyrl1) + yyv1 = make([]Deployment, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -18837,7 +21637,7 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} + yyv1[yyj1] = Deployment{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -18846,10 +21646,10 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, CustomMetricCurrentStatus{}) + yyv1 = append(yyv1, Deployment{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} + yyv1[yyj1] = Deployment{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -18863,13 +21663,13 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, CustomMetricCurrentStatus{}) // var yyz1 CustomMetricCurrentStatus + yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} + yyv1[yyj1] = Deployment{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -18884,7 +21684,7 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []CustomMetricCurrentStatus{} + yyv1 = []Deployment{} yyc1 = true } } @@ -18894,7 +21694,7 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr } } -func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -18907,7 +21707,7 @@ func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutosc z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -18918,7 +21718,7 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} + yyv1 = []DaemonSet{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -18933,15 +21733,15 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 312) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 720) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + yyv1 = make([]DaemonSet, yyrl1) } } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + yyv1 = make([]DaemonSet, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -18956,7 +21756,7 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} + yyv1[yyj1] = DaemonSet{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -18965,10 +21765,10 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) + yyv1 = append(yyv1, DaemonSet{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} + yyv1[yyj1] = DaemonSet{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -18982,13 +21782,13 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler + yyv1 = append(yyv1, DaemonSet{}) // var yyz1 DaemonSet yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} + yyv1[yyj1] = DaemonSet{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -19003,7 +21803,7 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} + yyv1 = []DaemonSet{} yyc1 = true } } @@ -19013,7 +21813,7 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos } } -func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -19026,7 +21826,7 @@ func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19037,7 +21837,7 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []APIVersion{} + yyv1 = []ThirdPartyResourceData{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -19052,15 +21852,15 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]APIVersion, yyrl1) + yyv1 = make([]ThirdPartyResourceData, yyrl1) } } else { - yyv1 = make([]APIVersion, yyrl1) + yyv1 = make([]ThirdPartyResourceData, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -19075,7 +21875,7 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} + yyv1[yyj1] = ThirdPartyResourceData{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -19084,10 +21884,10 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, APIVersion{}) + yyv1 = append(yyv1, ThirdPartyResourceData{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} + yyv1[yyj1] = ThirdPartyResourceData{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -19101,13 +21901,13 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, APIVersion{}) // var yyz1 APIVersion + yyv1 = append(yyv1, ThirdPartyResourceData{}) // var yyz1 ThirdPartyResourceData yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} + yyv1[yyj1] = ThirdPartyResourceData{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -19122,7 +21922,7 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []APIVersion{} + yyv1 = []ThirdPartyResourceData{} yyc1 = true } } @@ -19132,7 +21932,7 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode } } -func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -19145,7 +21945,7 @@ func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *c z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19156,7 +21956,7 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []ThirdPartyResource{} + yyv1 = []Job{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -19171,15 +21971,15 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 232) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]ThirdPartyResource, yyrl1) + yyv1 = make([]Job, yyrl1) } } else { - yyv1 = make([]ThirdPartyResource, yyrl1) + yyv1 = make([]Job, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -19194,7 +21994,7 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} + yyv1[yyj1] = Job{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -19203,10 +22003,10 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ThirdPartyResource{}) + yyv1 = append(yyv1, Job{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} + yyv1[yyj1] = Job{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -19220,13 +22020,13 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ThirdPartyResource{}) // var yyz1 ThirdPartyResource + yyv1 = append(yyv1, Job{}) // var yyz1 Job yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} + yyv1[yyj1] = Job{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -19241,7 +22041,7 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ThirdPartyResource{} + yyv1 = []Job{} yyc1 = true } } @@ -19251,7 +22051,7 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * } } -func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -19264,7 +22064,7 @@ func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19275,7 +22075,7 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []Deployment{} + yyv1 = []JobCondition{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -19290,15 +22090,15 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 640) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]Deployment, yyrl1) + yyv1 = make([]JobCondition, yyrl1) } } else { - yyv1 = make([]Deployment, yyrl1) + yyv1 = make([]JobCondition, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -19313,7 +22113,7 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} + yyv1[yyj1] = JobCondition{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -19322,10 +22122,10 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Deployment{}) + yyv1 = append(yyv1, JobCondition{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} + yyv1[yyj1] = JobCondition{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -19339,13 +22139,13 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment + yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} + yyv1[yyj1] = JobCondition{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -19360,7 +22160,7 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Deployment{} + yyv1 = []JobCondition{} yyc1 = true } } @@ -19370,7 +22170,7 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode } } -func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -19383,7 +22183,7 @@ func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19394,7 +22194,7 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []DaemonSet{} + yyv1 = []Ingress{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -19409,15 +22209,15 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 568) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 320) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]DaemonSet, yyrl1) + yyv1 = make([]Ingress, yyrl1) } } else { - yyv1 = make([]DaemonSet, yyrl1) + yyv1 = make([]Ingress, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -19432,7 +22232,7 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} + yyv1[yyj1] = Ingress{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -19441,10 +22241,10 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, DaemonSet{}) + yyv1 = append(yyv1, Ingress{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} + yyv1[yyj1] = Ingress{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -19458,13 +22258,13 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, DaemonSet{}) // var yyz1 DaemonSet + yyv1 = append(yyv1, Ingress{}) // var yyz1 Ingress yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} + yyv1[yyj1] = Ingress{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -19479,7 +22279,7 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []DaemonSet{} + yyv1 = []Ingress{} yyc1 = true } } @@ -19489,7 +22289,7 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) } } -func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -19502,7 +22302,7 @@ func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceDa z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19513,7 +22313,7 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []ThirdPartyResourceData{} + yyv1 = []IngressTLS{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -19528,15 +22328,15 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 216) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]ThirdPartyResourceData, yyrl1) + yyv1 = make([]IngressTLS, yyrl1) } } else { - yyv1 = make([]ThirdPartyResourceData, yyrl1) + yyv1 = make([]IngressTLS, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -19551,7 +22351,7 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} + yyv1[yyj1] = IngressTLS{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -19560,10 +22360,10 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ThirdPartyResourceData{}) + yyv1 = append(yyv1, IngressTLS{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} + yyv1[yyj1] = IngressTLS{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -19577,13 +22377,13 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ThirdPartyResourceData{}) // var yyz1 ThirdPartyResourceData + yyv1 = append(yyv1, IngressTLS{}) // var yyz1 IngressTLS yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} + yyv1[yyj1] = IngressTLS{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -19598,7 +22398,7 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ThirdPartyResourceData{} + yyv1 = []IngressTLS{} yyc1 = true } } @@ -19608,7 +22408,7 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD } } -func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -19621,7 +22421,7 @@ func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19632,7 +22432,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []Job{} + yyv1 = []IngressRule{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -19647,15 +22447,15 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 640) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]Job, yyrl1) + yyv1 = make([]IngressRule, yyrl1) } } else { - yyv1 = make([]Job, yyrl1) + yyv1 = make([]IngressRule, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -19670,7 +22470,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} + yyv1[yyj1] = IngressRule{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -19679,10 +22479,10 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Job{}) + yyv1 = append(yyv1, IngressRule{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} + yyv1[yyj1] = IngressRule{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -19696,13 +22496,13 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Job{}) // var yyz1 Job + yyv1 = append(yyv1, IngressRule{}) // var yyz1 IngressRule yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} + yyv1[yyj1] = IngressRule{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -19717,7 +22517,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Job{} + yyv1 = []IngressRule{} yyc1 = true } } @@ -19727,7 +22527,7 @@ func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { } } -func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -19740,7 +22540,7 @@ func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Enc z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19751,7 +22551,7 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []JobCondition{} + yyv1 = []HTTPIngressPath{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -19766,15 +22566,15 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]JobCondition, yyrl1) + yyv1 = make([]HTTPIngressPath, yyrl1) } } else { - yyv1 = make([]JobCondition, yyrl1) + yyv1 = make([]HTTPIngressPath, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -19789,7 +22589,7 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} + yyv1[yyj1] = HTTPIngressPath{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -19798,10 +22598,10 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, JobCondition{}) + yyv1 = append(yyv1, HTTPIngressPath{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} + yyv1[yyj1] = HTTPIngressPath{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -19815,13 +22615,13 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition + yyv1 = append(yyv1, HTTPIngressPath{}) // var yyz1 HTTPIngressPath yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} + yyv1[yyj1] = HTTPIngressPath{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -19836,7 +22636,7 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []JobCondition{} + yyv1 = []HTTPIngressPath{} yyc1 = true } } @@ -19846,7 +22646,7 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De } } -func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceLabelSelectorRequirement(v []LabelSelectorRequirement, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -19859,7 +22659,7 @@ func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequirement, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19870,7 +22670,7 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []Ingress{} + yyv1 = []LabelSelectorRequirement{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -19885,15 +22685,15 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 272) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]Ingress, yyrl1) + yyv1 = make([]LabelSelectorRequirement, yyrl1) } } else { - yyv1 = make([]Ingress, yyrl1) + yyv1 = make([]LabelSelectorRequirement, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -19908,7 +22708,7 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} + yyv1[yyj1] = LabelSelectorRequirement{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -19917,10 +22717,10 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Ingress{}) + yyv1 = append(yyv1, LabelSelectorRequirement{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} + yyv1[yyj1] = LabelSelectorRequirement{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -19934,13 +22734,13 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Ingress{}) // var yyz1 Ingress + yyv1 = append(yyv1, LabelSelectorRequirement{}) // var yyz1 LabelSelectorRequirement yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} + yyv1[yyj1] = LabelSelectorRequirement{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -19955,7 +22755,7 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Ingress{} + yyv1 = []LabelSelectorRequirement{} yyc1 = true } } @@ -19965,7 +22765,7 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { } } -func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -19978,7 +22778,7 @@ func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -19989,7 +22789,7 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []IngressTLS{} + yyv1 = []ReplicaSet{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -20004,15 +22804,15 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 728) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]IngressTLS, yyrl1) + yyv1 = make([]ReplicaSet, yyrl1) } } else { - yyv1 = make([]IngressTLS, yyrl1) + yyv1 = make([]ReplicaSet, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -20027,7 +22827,7 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} + yyv1[yyj1] = ReplicaSet{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -20036,10 +22836,10 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IngressTLS{}) + yyv1 = append(yyv1, ReplicaSet{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} + yyv1[yyj1] = ReplicaSet{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -20053,13 +22853,13 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IngressTLS{}) // var yyz1 IngressTLS + yyv1 = append(yyv1, ReplicaSet{}) // var yyz1 ReplicaSet yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} + yyv1[yyj1] = ReplicaSet{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -20074,7 +22874,7 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IngressTLS{} + yyv1 = []ReplicaSet{} yyc1 = true } } @@ -20084,20 +22884,20 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode } } -func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { +func (x codecSelfer1234) encSlicev1_Capability(v []pkg2_v1.Capability, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) + yysf2 := &yyv1 + yysf2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { +func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -20108,7 +22908,7 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []IngressRule{} + yyv1 = []pkg2_v1.Capability{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -20121,23 +22921,18 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco yyrr1 = yyl1 // len(yyv1) if yyl1 > cap(yyv1) { - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]IngressRule, yyrl1) + yyv1 = make([]pkg2_v1.Capability, yyrl1) } } else { - yyv1 = make([]IngressRule, yyrl1) + yyv1 = make([]pkg2_v1.Capability, yyrl1) } yyc1 = true yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } } else if yyl1 != len(yyv1) { yyv1 = yyv1[:yyl1] yyc1 = true @@ -20146,22 +22941,20 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} + yyv1[yyj1] = "" } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) + yyv1[yyj1] = pkg2_v1.Capability(r.DecodeString()) } } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IngressRule{}) + yyv1 = append(yyv1, "") yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} + yyv1[yyj1] = "" } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) + yyv1[yyj1] = pkg2_v1.Capability(r.DecodeString()) } } @@ -20172,16 +22965,15 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IngressRule{}) // var yyz1 IngressRule + yyv1 = append(yyv1, "") // var yyz1 pkg2_v1.Capability yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} + yyv1[yyj1] = "" } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) + yyv1[yyj1] = pkg2_v1.Capability(r.DecodeString()) } } else { @@ -20193,7 +22985,7 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IngressRule{} + yyv1 = []pkg2_v1.Capability{} yyc1 = true } } @@ -20203,20 +22995,19 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco } } -func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) + yyv1.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -20227,7 +23018,7 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []HTTPIngressPath{} + yyv1 = []FSType{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -20240,23 +23031,18 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 yyrr1 = yyl1 // len(yyv1) if yyl1 > cap(yyv1) { - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]HTTPIngressPath, yyrl1) + yyv1 = make([]FSType, yyrl1) } } else { - yyv1 = make([]HTTPIngressPath, yyrl1) + yyv1 = make([]FSType, yyrl1) } yyc1 = true yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } } else if yyl1 != len(yyv1) { yyv1 = yyv1[:yyl1] yyc1 = true @@ -20265,22 +23051,20 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} + yyv1[yyj1] = "" } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) + yyv1[yyj1] = FSType(r.DecodeString()) } } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HTTPIngressPath{}) + yyv1 = append(yyv1, "") yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} + yyv1[yyj1] = "" } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) + yyv1[yyj1] = FSType(r.DecodeString()) } } @@ -20291,16 +23075,15 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HTTPIngressPath{}) // var yyz1 HTTPIngressPath + yyv1 = append(yyv1, "") // var yyz1 FSType yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} + yyv1[yyj1] = "" } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) + yyv1[yyj1] = FSType(r.DecodeString()) } } else { @@ -20312,7 +23095,7 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HTTPIngressPath{} + yyv1 = []FSType{} yyc1 = true } } @@ -20322,7 +23105,7 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 } } -func (x codecSelfer1234) encSliceLabelSelectorRequirement(v []LabelSelectorRequirement, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -20335,7 +23118,7 @@ func (x codecSelfer1234) encSliceLabelSelectorRequirement(v []LabelSelectorRequi z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequirement, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -20346,7 +23129,7 @@ func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequ _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []LabelSelectorRequirement{} + yyv1 = []HostPortRange{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -20361,15 +23144,15 @@ func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequ yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]LabelSelectorRequirement, yyrl1) + yyv1 = make([]HostPortRange, yyrl1) } } else { - yyv1 = make([]LabelSelectorRequirement, yyrl1) + yyv1 = make([]HostPortRange, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -20384,7 +23167,7 @@ func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequ for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = LabelSelectorRequirement{} + yyv1[yyj1] = HostPortRange{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -20393,10 +23176,10 @@ func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequ } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LabelSelectorRequirement{}) + yyv1 = append(yyv1, HostPortRange{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = LabelSelectorRequirement{} + yyv1[yyj1] = HostPortRange{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -20410,13 +23193,13 @@ func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequ for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LabelSelectorRequirement{}) // var yyz1 LabelSelectorRequirement + yyv1 = append(yyv1, HostPortRange{}) // var yyz1 HostPortRange yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = LabelSelectorRequirement{} + yyv1[yyj1] = HostPortRange{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -20431,7 +23214,7 @@ func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequ yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LabelSelectorRequirement{} + yyv1 = []HostPortRange{} yyc1 = true } } @@ -20441,7 +23224,7 @@ func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequ } } -func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -20454,7 +23237,7 @@ func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -20465,7 +23248,7 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []ReplicaSet{} + yyv1 = []IDRange{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -20480,15 +23263,15 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 576) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]ReplicaSet, yyrl1) + yyv1 = make([]IDRange, yyrl1) } } else { - yyv1 = make([]ReplicaSet, yyrl1) + yyv1 = make([]IDRange, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -20503,7 +23286,7 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} + yyv1[yyj1] = IDRange{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -20512,10 +23295,10 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ReplicaSet{}) + yyv1 = append(yyv1, IDRange{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} + yyv1[yyj1] = IDRange{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -20529,13 +23312,13 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ReplicaSet{}) // var yyz1 ReplicaSet + yyv1 = append(yyv1, IDRange{}) // var yyz1 IDRange yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} + yyv1[yyj1] = IDRange{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -20550,7 +23333,7 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ReplicaSet{} + yyv1 = []IDRange{} yyc1 = true } } @@ -20560,20 +23343,20 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode } } -func (x codecSelfer1234) encSlicev1_Capability(v []pkg2_v1.Capability, e *codec1978.Encoder) { +func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf2 := &yyv1 - yysf2.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec1978.Decoder) { +func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -20584,7 +23367,7 @@ func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []pkg2_v1.Capability{} + yyv1 = []PodSecurityPolicy{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -20597,18 +23380,23 @@ func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec yyrr1 = yyl1 // len(yyv1) if yyl1 > cap(yyv1) { - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 536) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]pkg2_v1.Capability, yyrl1) + yyv1 = make([]PodSecurityPolicy, yyrl1) } } else { - yyv1 = make([]pkg2_v1.Capability, yyrl1) + yyv1 = make([]PodSecurityPolicy, yyrl1) } yyc1 = true yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } } else if yyl1 != len(yyv1) { yyv1 = yyv1[:yyl1] yyc1 = true @@ -20617,20 +23405,22 @@ func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = "" + yyv1[yyj1] = PodSecurityPolicy{} } else { - yyv1[yyj1] = pkg2_v1.Capability(r.DecodeString()) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") + yyv1 = append(yyv1, PodSecurityPolicy{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = "" + yyv1[yyj1] = PodSecurityPolicy{} } else { - yyv1[yyj1] = pkg2_v1.Capability(r.DecodeString()) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } @@ -20641,15 +23431,16 @@ func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 pkg2_v1.Capability + yyv1 = append(yyv1, PodSecurityPolicy{}) // var yyz1 PodSecurityPolicy yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = "" + yyv1[yyj1] = PodSecurityPolicy{} } else { - yyv1[yyj1] = pkg2_v1.Capability(r.DecodeString()) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -20661,7 +23452,7 @@ func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg2_v1.Capability{} + yyv1 = []PodSecurityPolicy{} yyc1 = true } } @@ -20671,19 +23462,20 @@ func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec } } -func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) for _, yyv1 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -20694,7 +23486,7 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []FSType{} + yyv1 = []NetworkPolicyIngressRule{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -20707,18 +23499,23 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { yyrr1 = yyl1 // len(yyv1) if yyl1 > cap(yyv1) { - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]FSType, yyrl1) + yyv1 = make([]NetworkPolicyIngressRule, yyrl1) } } else { - yyv1 = make([]FSType, yyrl1) + yyv1 = make([]NetworkPolicyIngressRule, yyrl1) } yyc1 = true yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } } else if yyl1 != len(yyv1) { yyv1 = yyv1[:yyl1] yyc1 = true @@ -20727,20 +23524,22 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = "" + yyv1[yyj1] = NetworkPolicyIngressRule{} } else { - yyv1[yyj1] = FSType(r.DecodeString()) + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) } } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") + yyv1 = append(yyv1, NetworkPolicyIngressRule{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = "" + yyv1[yyj1] = NetworkPolicyIngressRule{} } else { - yyv1[yyj1] = FSType(r.DecodeString()) + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) } } @@ -20751,15 +23550,16 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 FSType + yyv1 = append(yyv1, NetworkPolicyIngressRule{}) // var yyz1 NetworkPolicyIngressRule yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = "" + yyv1[yyj1] = NetworkPolicyIngressRule{} } else { - yyv1[yyj1] = FSType(r.DecodeString()) + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) } } else { @@ -20771,7 +23571,7 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []FSType{} + yyv1 = []NetworkPolicyIngressRule{} yyc1 = true } } @@ -20781,7 +23581,7 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { } } -func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -20794,7 +23594,7 @@ func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.E z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -20805,7 +23605,7 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []HostPortRange{} + yyv1 = []NetworkPolicyPort{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -20820,15 +23620,15 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]HostPortRange, yyrl1) + yyv1 = make([]NetworkPolicyPort, yyrl1) } } else { - yyv1 = make([]HostPortRange, yyrl1) + yyv1 = make([]NetworkPolicyPort, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -20843,7 +23643,7 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} + yyv1[yyj1] = NetworkPolicyPort{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -20852,10 +23652,10 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HostPortRange{}) + yyv1 = append(yyv1, NetworkPolicyPort{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} + yyv1[yyj1] = NetworkPolicyPort{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -20869,13 +23669,13 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HostPortRange{}) // var yyz1 HostPortRange + yyv1 = append(yyv1, NetworkPolicyPort{}) // var yyz1 NetworkPolicyPort yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} + yyv1[yyj1] = NetworkPolicyPort{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -20890,7 +23690,7 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HostPortRange{} + yyv1 = []NetworkPolicyPort{} yyc1 = true } } @@ -20900,7 +23700,7 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. } } -func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -20913,7 +23713,7 @@ func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -20924,7 +23724,7 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []IDRange{} + yyv1 = []NetworkPolicyPeer{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -20944,10 +23744,10 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]IDRange, yyrl1) + yyv1 = make([]NetworkPolicyPeer, yyrl1) } } else { - yyv1 = make([]IDRange, yyrl1) + yyv1 = make([]NetworkPolicyPeer, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -20962,7 +23762,7 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} + yyv1[yyj1] = NetworkPolicyPeer{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -20971,10 +23771,10 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IDRange{}) + yyv1 = append(yyv1, NetworkPolicyPeer{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} + yyv1[yyj1] = NetworkPolicyPeer{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -20988,13 +23788,13 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IDRange{}) // var yyz1 IDRange + yyv1 = append(yyv1, NetworkPolicyPeer{}) // var yyz1 NetworkPolicyPeer yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} + yyv1[yyj1] = NetworkPolicyPeer{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -21009,7 +23809,7 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IDRange{} + yyv1 = []NetworkPolicyPeer{} yyc1 = true } } @@ -21019,7 +23819,7 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { } } -func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r @@ -21032,7 +23832,7 @@ func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *cod z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -21043,7 +23843,7 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co _ = yyc1 if yyl1 == 0 { if yyv1 == nil { - yyv1 = []PodSecurityPolicy{} + yyv1 = []NetworkPolicy{} yyc1 = true } else if len(yyv1) != 0 { yyv1 = yyv1[:0] @@ -21058,15 +23858,15 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co yyrg1 := len(yyv1) > 0 yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 352) + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) if yyrt1 { if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { - yyv1 = make([]PodSecurityPolicy, yyrl1) + yyv1 = make([]NetworkPolicy, yyrl1) } } else { - yyv1 = make([]PodSecurityPolicy, yyrl1) + yyv1 = make([]NetworkPolicy, yyrl1) } yyc1 = true yyrr1 = len(yyv1) @@ -21081,7 +23881,7 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co for ; yyj1 < yyrr1; yyj1++ { yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} + yyv1[yyj1] = NetworkPolicy{} } else { yyv2 := &yyv1[yyj1] yyv2.CodecDecodeSelf(d) @@ -21090,10 +23890,10 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co } if yyrt1 { for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodSecurityPolicy{}) + yyv1 = append(yyv1, NetworkPolicy{}) yyh1.ElemContainerState(yyj1) if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} + yyv1[yyj1] = NetworkPolicy{} } else { yyv3 := &yyv1[yyj1] yyv3.CodecDecodeSelf(d) @@ -21107,13 +23907,13 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co for ; !r.CheckBreak(); yyj1++ { if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodSecurityPolicy{}) // var yyz1 PodSecurityPolicy + yyv1 = append(yyv1, NetworkPolicy{}) // var yyz1 NetworkPolicy yyc1 = true } yyh1.ElemContainerState(yyj1) if yyj1 < len(yyv1) { if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} + yyv1[yyj1] = NetworkPolicy{} } else { yyv4 := &yyv1[yyj1] yyv4.CodecDecodeSelf(d) @@ -21128,7 +23928,7 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodSecurityPolicy{} + yyv1 = []NetworkPolicy{} yyc1 = true } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go index 1a67438b81f0..de374615f5f7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go @@ -26,24 +26,24 @@ import ( // describes the attributes of a scale subresource type ScaleSpec struct { // desired number of instances for the scaled object. - Replicas int32 `json:"replicas,omitempty"` + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } // represents the current status of a scale subresource. type ScaleStatus struct { // actual number of observed instances of the scaled object. - Replicas int32 `json:"replicas"` + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors - Selector map[string]string `json:"selector,omitempty"` + // label query over pods that should match the replicas count. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` // label selector for pods that should match the replicas count. This is a serializated // version of both map-based and more expressive set-based selectors. This is done to // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this // field and map-based selector field are populated. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors - TargetSelector string `json:"targetSelector,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } // +genclient=true,noMethods=true @@ -51,14 +51,14 @@ type ScaleStatus struct { // represents a scaling request for a resource. type Scale struct { unversioned.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata. - v1.ObjectMeta `json:"metadata,omitempty"` + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status. - Spec ScaleSpec `json:"spec,omitempty"` + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status. Read-only. - Status ScaleStatus `json:"status,omitempty"` + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // Dummy definition @@ -68,77 +68,77 @@ type ReplicationControllerDummy struct { // SubresourceReference contains enough information to let you inspect or modify the referred subresource. type SubresourceReference struct { - // Kind of the referent; More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds" - Kind string `json:"kind,omitempty"` - // Name of the referent; More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names - Name string `json:"name,omitempty"` + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"` // API version of the referent - APIVersion string `json:"apiVersion,omitempty"` + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` // Subresource name of the referent - Subresource string `json:"subresource,omitempty"` + Subresource string `json:"subresource,omitempty" protobuf:"bytes,4,opt,name=subresource"` } type CPUTargetUtilization struct { // fraction of the requested CPU that should be utilized/used, // e.g. 70 means that 70% of the requested CPU should be in use. - TargetPercentage int32 `json:"targetPercentage"` + TargetPercentage int32 `json:"targetPercentage" protobuf:"varint,1,opt,name=targetPercentage"` } // Alpha-level support for Custom Metrics in HPA (as annotations). type CustomMetricTarget struct { // Custom Metric name. - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Custom Metric value (average). - TargetValue resource.Quantity `json:"value"` + TargetValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` } type CustomMetricTargetList struct { - Items []CustomMetricTarget `json:"items"` + Items []CustomMetricTarget `json:"items" protobuf:"bytes,1,rep,name=items"` } type CustomMetricCurrentStatus struct { // Custom Metric name. - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Custom Metric value (average). - CurrentValue resource.Quantity `json:"value"` + CurrentValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` } type CustomMetricCurrentStatusList struct { - Items []CustomMetricCurrentStatus `json:"items"` + Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` } // specification of a horizontal pod autoscaler. type HorizontalPodAutoscalerSpec struct { // reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, // and will set the desired number of pods by modifying its spec. - ScaleRef SubresourceReference `json:"scaleRef"` + ScaleRef SubresourceReference `json:"scaleRef" protobuf:"bytes,1,opt,name=scaleRef"` // lower limit for the number of pods that can be set by the autoscaler, default 1. - MinReplicas *int32 `json:"minReplicas,omitempty"` + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - MaxReplicas int32 `json:"maxReplicas"` + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; // if not specified it defaults to the target CPU utilization at 80% of the requested resources. - CPUUtilization *CPUTargetUtilization `json:"cpuUtilization,omitempty"` + CPUUtilization *CPUTargetUtilization `json:"cpuUtilization,omitempty" protobuf:"bytes,4,opt,name=cpuUtilization"` } // current status of a horizontal pod autoscaler type HorizontalPodAutoscalerStatus struct { // most recent generation observed by this autoscaler. - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` // last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. - LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty"` + LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` // current number of replicas of pods managed by this autoscaler. - CurrentReplicas int32 `json:"currentReplicas"` + CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` // desired number of replicas of pods managed by this autoscaler. - DesiredReplicas int32 `json:"desiredReplicas"` + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` // current average CPU utilization over all pods, represented as a percentage of requested CPU, // e.g. 70 means that an average pod is using now 70% of its requested CPU. - CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"` + CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` } // +genclient=true @@ -146,27 +146,27 @@ type HorizontalPodAutoscalerStatus struct { // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { unversioned.TypeMeta `json:",inline"` - // Standard object metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty"` + // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status. - Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty"` + // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // current information about the autoscaler. - Status HorizontalPodAutoscalerStatus `json:"status,omitempty"` + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - unversioned.ListMeta `json:"metadata,omitempty"` + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // list of horizontal pod autoscaler objects. - Items []HorizontalPodAutoscaler `json:"items"` + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient=true +// +genclient=true,nonNamespaced=true // A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource // types to the API. It consists of one or more Versions of the api. @@ -174,13 +174,13 @@ type ThirdPartyResource struct { unversioned.TypeMeta `json:",inline"` // Standard object metadata - v1.ObjectMeta `json:"metadata,omitempty"` + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Description is the description of this object. - Description string `json:"description,omitempty"` + Description string `json:"description,omitempty" protobuf:"bytes,2,opt,name=description"` // Versions are versions for this third party object - Versions []APIVersion `json:"versions,omitempty"` + Versions []APIVersion `json:"versions,omitempty" protobuf:"bytes,3,rep,name=versions"` } // ThirdPartyResourceList is a list of ThirdPartyResources. @@ -188,29 +188,26 @@ type ThirdPartyResourceList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - unversioned.ListMeta `json:"metadata,omitempty"` + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of ThirdPartyResources. - Items []ThirdPartyResource `json:"items"` + Items []ThirdPartyResource `json:"items" protobuf:"bytes,2,rep,name=items"` } // An APIVersion represents a single concrete version of an object model. type APIVersion struct { // Name of this version (e.g. 'v1'). - Name string `json:"name,omitempty"` - - // The API group to add this object into, default 'experimental'. - APIGroup string `json:"apiGroup,omitempty"` + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` } // An internal object, used for versioned storage in etcd. Not exposed to the end user. type ThirdPartyResourceData struct { unversioned.TypeMeta `json:",inline"` // Standard object metadata. - v1.ObjectMeta `json:"metadata,omitempty"` + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Data is the raw JSON data for this data. - Data []byte `json:"data,omitempty"` + Data []byte `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` } // +genclient=true @@ -219,61 +216,61 @@ type ThirdPartyResourceData struct { type Deployment struct { unversioned.TypeMeta `json:",inline"` // Standard object metadata. - v1.ObjectMeta `json:"metadata,omitempty"` + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the Deployment. - Spec DeploymentSpec `json:"spec,omitempty"` + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the Deployment. - Status DeploymentStatus `json:"status,omitempty"` + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // DeploymentSpec is the specification of the desired behavior of the Deployment. type DeploymentSpec struct { // Number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. - Replicas *int32 `json:"replicas,omitempty"` + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. - Selector *LabelSelector `json:"selector,omitempty"` + Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. - Template v1.PodTemplateSpec `json:"template"` + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. - Strategy DeploymentStrategy `json:"strategy,omitempty"` + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - MinReadySeconds int32 `json:"minReadySeconds,omitempty"` + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` // The number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` // Indicates that the deployment is paused and will not be processed by the // deployment controller. - Paused bool `json:"paused,omitempty"` + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` // The config this deployment is rolling back to. Will be cleared after rollback is done. - RollbackTo *RollbackConfig `json:"rollbackTo,omitempty"` + RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` } // DeploymentRollback stores the information required to rollback a deployment. type DeploymentRollback struct { unversioned.TypeMeta `json:",inline"` // Required: This must match the Name of a deployment. - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // The annotations to be updated to a deployment - UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty"` + UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` // The config of this deployment rollback. - RollbackTo RollbackConfig `json:"rollbackTo"` + RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"` } type RollbackConfig struct { // The revision to rollback to. If set to 0, rollbck to the last revision. - Revision int64 `json:"revision,omitempty"` + Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"` } const ( @@ -286,14 +283,14 @@ const ( // DeploymentStrategy describes how to replace existing pods with new ones. type DeploymentStrategy struct { // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - Type DeploymentStrategyType `json:"type,omitempty"` + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` // Rolling update config params. Present only if DeploymentStrategyType = // RollingUpdate. //--- // TODO: Update this to follow our convention for oneOf, whatever we decide it // to be. - RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty"` + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` } type DeploymentStrategyType string @@ -318,7 +315,7 @@ type RollingUpdateDeployment struct { // can be scaled down further, followed by scaling up the new RC, ensuring // that the total number of pods available at all times during the update is at // least 70% of desired pods. - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -331,35 +328,35 @@ type RollingUpdateDeployment struct { // 130% of desired pods. Once old pods have been killed, // new RC can be scaled up further, ensuring that total number of pods running // at any time during the update is atmost 130% of desired pods. - MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } // DeploymentStatus is the most recently observed status of the Deployment. type DeploymentStatus struct { // The generation observed by the deployment controller. - ObservedGeneration int64 `json:"observedGeneration,omitempty"` + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - Replicas int32 `json:"replicas,omitempty"` + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - AvailableReplicas int32 `json:"availableReplicas,omitempty"` + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` // Total number of unavailable pods targeted by this deployment. - UnavailableReplicas int32 `json:"unavailableReplicas,omitempty"` + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` } // DeploymentList is a list of Deployments. type DeploymentList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - unversioned.ListMeta `json:"metadata,omitempty"` + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Deployments. - Items []Deployment `json:"items"` + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` } // TODO(madhusudancs): Uncomment while implementing DaemonSet updates. @@ -413,15 +410,15 @@ type DaemonSetSpec struct { // Selector is a label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors - Selector *LabelSelector `json:"selector,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` // Template is the object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#pod-template - Template v1.PodTemplateSpec `json:"template"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` // TODO(madhusudancs): Uncomment while implementing DaemonSet updates. /* Commenting out for v1.2. We are planning to bring these fields back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting these fields out. @@ -450,18 +447,18 @@ const ( type DaemonSetStatus struct { // CurrentNumberScheduled is the number of nodes that are running at least 1 // daemon pod and are supposed to run the daemon pod. - // More info: http://releases.k8s.io/release-1.2/docs/admin/daemons.md - CurrentNumberScheduled int32 `json:"currentNumberScheduled"` + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` // NumberMisscheduled is the number of nodes that are running the daemon pod, but are // not supposed to run the daemon pod. - // More info: http://releases.k8s.io/release-1.2/docs/admin/daemons.md - NumberMisscheduled int32 `json:"numberMisscheduled"` + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` // DesiredNumberScheduled is the total number of nodes that should be running the daemon // pod (including nodes correctly running the daemon pod). - // More info: http://releases.k8s.io/release-1.2/docs/admin/daemons.md - DesiredNumberScheduled int32 `json:"desiredNumberScheduled"` + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` } // +genclient=true @@ -470,41 +467,41 @@ type DaemonSetStatus struct { type DaemonSet struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired behavior of this daemon set. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec DaemonSetSpec `json:"spec,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the current status of this daemon set. This data may be // out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status DaemonSetStatus `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // DaemonSetList is a collection of daemon sets. type DaemonSetList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of daemon sets. - Items []DaemonSet `json:"items"` + Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` } // ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. type ThirdPartyResourceDataList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of ThirdpartyResourceData. - Items []ThirdPartyResourceData `json:"items"` + Items []ThirdPartyResourceData `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient=true @@ -513,27 +510,27 @@ type ThirdPartyResourceDataList struct { type Job struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec JobSpec `json:"spec,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status JobStatus `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // JobList is a collection of jobs. type JobList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Job. - Items []Job `json:"items"` + Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` } // JobSpec describes how the job execution will look like. @@ -543,64 +540,64 @@ type JobSpec struct { // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md - Parallelism *int32 `json:"parallelism,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` // Completions specifies the desired number of successfully finished pods the // job should be run with. Setting to nil means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md - Completions *int32 `json:"completions,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` // Optional duration in seconds relative to the startTime that the job may be active // before the system tries to terminate it; value must be positive integer - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` // Selector is a label query over pods that should match the pod count. // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors - Selector *LabelSelector `json:"selector,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` // AutoSelector controls generation of pod labels and pod selectors. // It was not present in the original extensions/v1beta1 Job definition, but exists // to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite // meaning as, ManualSelector. - // More info: http://releases.k8s.io/release-1.2/docs/design/selector-generation.md - AutoSelector *bool `json:"autoSelector,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + AutoSelector *bool `json:"autoSelector,omitempty" protobuf:"varint,5,opt,name=autoSelector"` // Template is the object that describes the pod that will be created when // executing a job. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md - Template v1.PodTemplateSpec `json:"template"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` } // JobStatus represents the current state of a Job. type JobStatus struct { // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md - Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // StartTime represents time when the job was acknowledged by the Job Manager. // It is not guaranteed to be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. - StartTime *unversioned.Time `json:"startTime,omitempty"` + StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` // CompletionTime represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. - CompletionTime *unversioned.Time `json:"completionTime,omitempty"` + CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` // Active is the number of actively running pods. - Active int32 `json:"active,omitempty"` + Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` // Succeeded is the number of pods which reached Phase Succeeded. - Succeeded int32 `json:"succeeded,omitempty"` + Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` // Failed is the number of pods which reached Phase Failed. - Failed int32 `json:"failed,omitempty"` + Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` } type JobConditionType string @@ -616,17 +613,17 @@ const ( // JobCondition describes current state of a job. type JobCondition struct { // Type of job condition, Complete or Failed. - Type JobConditionType `json:"type"` + Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status"` + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` // Last time the condition was checked. - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` + LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` // Last time the condition transit from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty"` + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` // Human readable message indicating details about last transition. - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } // +genclient=true @@ -638,27 +635,27 @@ type JobCondition struct { type Ingress struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is the desired state of the Ingress. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec IngressSpec `json:"spec,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the current state of the Ingress. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status IngressStatus `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // IngressList is a collection of Ingress. type IngressList struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Ingress. - Items []Ingress `json:"items"` + Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` } // IngressSpec describes the Ingress the user wishes to exist. @@ -667,17 +664,18 @@ type IngressSpec struct { // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. - Backend *IngressBackend `json:"backend,omitempty"` + Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443, and assumes TLS termination. If multiple members of this - // list specify different hosts, they will be multiplexed on the same - // port according to the hostname specified through the SNI TLS extension. - TLS []IngressTLS `json:"tls,omitempty"` + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` // A list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. - Rules []IngressRule `json:"rules,omitempty"` + Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` // TODO: Add the ability to specify load-balancer IP through claims } @@ -687,20 +685,20 @@ type IngressTLS struct { // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. - Hosts []string `json:"hosts,omitempty"` + Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` // SecretName is the name of the secret used to terminate SSL traffic on 443. // Field is left optional to allow SSL routing based on SNI hostname alone. // If the SNI host in a listener conflicts with the "Host" header field used // by an IngressRule, the SNI host is used for termination and value of the // Host header is used for routing. - SecretName string `json:"secretName,omitempty"` + SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` // TODO: Consider specifying different modes of termination, protocols etc. } // IngressStatus describe the current state of the Ingress. type IngressStatus struct { // LoadBalancer contains the current status of the load-balancer. - LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty"` + LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` } // IngressRule represents the rules mapping the paths under a specified host to @@ -719,13 +717,13 @@ type IngressRule struct { // Incoming requests are matched against the host before the IngressRuleValue. // If the host is unspecified, the Ingress routes all traffic based on the // specified IngressRuleValue. - Host string `json:"host,omitempty"` + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` // IngressRuleValue represents a rule to route requests for this IngressRule. // If unspecified, the rule defaults to a http catch-all. Whether that sends // just traffic matching the host to the default backend or all traffic to the // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. - IngressRuleValue `json:",inline,omitempty"` + IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` } // IngressRuleValue represents a rule to apply against incoming requests. If the @@ -739,7 +737,7 @@ type IngressRuleValue struct { // 2. Consider adding fields for ingress-type specific global options // usable by a loadbalancer, like http keep-alive. - HTTP *HTTPIngressRuleValue `json:"http,omitempty"` + HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"` } // HTTPIngressRuleValue is a list of http selectors pointing to backends. @@ -749,7 +747,7 @@ type IngressRuleValue struct { // or '#'. type HTTPIngressRuleValue struct { // A collection of paths that map requests to backends. - Paths []HTTPIngressPath `json:"paths"` + Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` // TODO: Consider adding fields for ingress-type specific global // options usable by a loadbalancer, like http keep-alive. } @@ -764,29 +762,29 @@ type HTTPIngressPath struct { // part of a URL as defined by RFC 3986. Paths must begin with // a '/'. If unspecified, the path defaults to a catch all sending // traffic to the backend. - Path string `json:"path,omitempty"` + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` // Backend defines the referenced service endpoint to which the traffic // will be forwarded to. - Backend IngressBackend `json:"backend"` + Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { // Specifies the name of the referenced service. - ServiceName string `json:"serviceName"` + ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"` // Specifies the port of the referenced service. - ServicePort intstr.IntOrString `json:"servicePort"` + ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"` } // ExportOptions is the query options to the standard REST get call. type ExportOptions struct { unversioned.TypeMeta `json:",inline"` // Should this value be exported. Export strips fields that a user can not specify. - Export bool `json:"export"` + Export bool `json:"export" protobuf:"varint,1,opt,name=export"` // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - Exact bool `json:"exact"` + Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` } // ListOptions is the query options to a standard REST list call. @@ -795,18 +793,18 @@ type ListOptions struct { // A selector to restrict the list of returned objects by their labels. // Defaults to everything. - LabelSelector string `json:"labelSelector,omitempty"` + LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` // A selector to restrict the list of returned objects by their fields. // Defaults to everything. - FieldSelector string `json:"fieldSelector,omitempty"` + FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. - Watch bool `json:"watch,omitempty"` + Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. - ResourceVersion string `json:"resourceVersion,omitempty"` + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` // Timeout for the list/watch call. - TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty"` + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` } // A label selector is a label query over a set of resources. The result of matchLabels and @@ -816,24 +814,24 @@ type LabelSelector struct { // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels // map is equivalent to an element of matchExpressions, whose key field is "key", the // operator is "In", and the values array contains only "value". The requirements are ANDed. - MatchLabels map[string]string `json:"matchLabels,omitempty"` + MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` // matchExpressions is a list of label selector requirements. The requirements are ANDed. - MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty"` + MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` } // A label selector requirement is a selector that contains values, a key, and an operator that // relates the key and values. type LabelSelectorRequirement struct { // key is the label key that the selector applies to. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"` + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` // operator represents a key's relationship to a set of values. // Valid operators ard In, NotIn, Exists and DoesNotExist. - Operator LabelSelectorOperator `json:"operator"` + Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` // values is an array of string values. If the operator is In or NotIn, // the values array must be non-empty. If the operator is Exists or DoesNotExist, // the values array must be empty. This array is replaced during a strategic // merge patch. - Values []string `json:"values,omitempty"` + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` } // A label selector operator is the set of operators that can be used in a selector requirement. @@ -854,31 +852,31 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty"` + // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Spec ReplicaSetSpec `json:"spec,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the most recently observed status of the ReplicaSet. // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status - Status ReplicaSetStatus `json:"status,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // ReplicaSetList is a collection of ReplicaSets. type ReplicaSetList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ReplicaSets. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md - Items []ReplicaSet `json:"items"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md + Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` } // ReplicaSetSpec is the specification of a ReplicaSet. @@ -886,73 +884,97 @@ type ReplicaSetSpec struct { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#what-is-a-replication-controller - Replicas *int32 `json:"replicas,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` // Selector is a label query over pods that should match the replica count. // If the selector is empty, it is defaulted to the labels present on the pod template. // Label keys and values that must match in order to be controlled by this replica set. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors - Selector *LabelSelector `json:"selector,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#pod-template - Template v1.PodTemplateSpec `json:"template,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template + Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` } // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { // Replicas is the most recently oberved number of replicas. - // More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#what-is-a-replication-controller - Replicas int32 `json:"replicas"` + // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` // The number of pods that have labels matching the labels of the pod template of the replicaset. - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. - ObservedGeneration int64 `json:"observedGeneration,omitempty"` + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` } +// +genclient=true,nonNamespaced=true + // Pod Security Policy governs the ability to make requests that affect the Security Context // that will be applied to a pod and container. type PodSecurityPolicy struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec defines the policy enforced. - Spec PodSecurityPolicySpec `json:"spec,omitempty"` + Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } // Pod Security Policy Spec defines the policy enforced. type PodSecurityPolicySpec struct { // privileged determines if a pod can request to be run as privileged. - Privileged bool `json:"privileged,omitempty"` - // capabilities is a list of capabilities that can be added. - Capabilities []v1.Capability `json:"capabilities,omitempty"` + Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` + // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capabiility in both + // DefaultAddCapabilities and RequiredDropCapabilities. + DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` + // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` + // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` // volumes is a white list of allowed volume plugins. Empty indicates that all plugins // may be used. - Volumes []FSType `json:"volumes,omitempty"` + Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - HostNetwork bool `json:"hostNetwork,omitempty"` + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` // hostPorts determines which host port ranges are allowed to be exposed. - HostPorts []HostPortRange `json:"hostPorts,omitempty"` + HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` // hostPID determines if the policy allows the use of HostPID in the pod spec. - HostPID bool `json:"hostPID,omitempty"` + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - HostIPC bool `json:"hostIPC,omitempty"` + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` // seLinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux,omitempty"` + SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser,omitempty"` + RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` + // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` + // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` + // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` } // FS Type gives strong typing to different file systems that are used by volumes. type FSType string var ( + AzureFile FSType = "azureFile" + Flocker FSType = "flocker" + FlexVolume FSType = "flexVolume" HostPath FSType = "hostPath" EmptyDir FSType = "emptyDir" GCEPersistentDisk FSType = "gcePersistentDisk" @@ -968,24 +990,26 @@ var ( CephFS FSType = "cephFS" DownwardAPI FSType = "downwardAPI" FC FSType = "fc" + ConfigMap FSType = "configMap" + All FSType = "*" ) // Host Port Range defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. type HostPortRange struct { // min is the start of the range, inclusive. - Min int32 `json:"min"` + Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` // max is the end of the range, inclusive. - Max int32 `json:"max"` + Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` } // SELinux Strategy Options defines the strategy type and any options used to create the strategy. type SELinuxStrategyOptions struct { // type is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule"` + Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` // seLinuxOptions required to run as; required for MustRunAs - // More info: http://releases.k8s.io/release-1.2/docs/design/security_context.md#security-context - SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` } // SELinuxStrategy denotes strategy types for generating SELinux options for a @@ -1002,17 +1026,17 @@ const ( // Run A sUser Strategy Options defines the strategy type and any options used to create the strategy. type RunAsUserStrategyOptions struct { // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule"` + Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` // Ranges are the allowed ranges of uids that may be used. - Ranges []IDRange `json:"ranges,omitempty"` + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } // ID Range provides a min/max of an allowed range of IDs. type IDRange struct { // Min is the start of the range, inclusive. - Min int64 `json:"min"` + Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` // Max is the end of the range, inclusive. - Max int64 `json:"max"` + Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` } // RunAsUserStrategy denotes strategy types for generating RunAsUser values for a @@ -1028,13 +1052,144 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +type FSGroupStrategyOptions struct { + // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` + // Ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// FSGroupStrategyType denotes strategy types for generating FSGroup values for a +// SecurityContext +type FSGroupStrategyType string + +const ( + // container must have FSGroup of X applied. + FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" + // container may make requests for any FSGroup labels. + FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" +) + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +type SupplementalGroupsStrategyOptions struct { + // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` + // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental +// groups for a SecurityContext. +type SupplementalGroupsStrategyType string + +const ( + // container must run as a particular gid. + SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" + // container may make requests for any gid. + SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" +) + // Pod Security Policy List is a list of PodSecurityPolicy objects. type PodSecurityPolicyList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +type NetworkPolicy struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior for this NetworkPolicy. + Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +type NetworkPolicySpec struct { + // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules + // is applied to any pods selected by this field. Multiple network policies can select the + // same set of pods. In this case, the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + PodSelector LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` + + // List of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, + // OR if the traffic source is the pod's local node, + // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy + // objects whose podSelector matches the pod. + // If this field is empty then this NetworkPolicy does not affect ingress isolation. + // If this field is present and contains at least one rule, this policy allows any traffic + // which matches at least one of the ingress rules in this list. + Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` +} + +// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. +type NetworkPolicyIngressRule struct { + // List of ports which should be made accessible on the pods selected for this rule. + // Each item in this list is combined using a logical OR. + // If this field is not provided, this rule matches all ports (traffic not restricted by port). + // If this field is empty, this rule matches no ports (no traffic matches). + // If this field is present and contains at least one item, then this rule allows traffic + // only if the traffic matches at least one port in the list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. + // If this field is not provided, this rule matches all sources (traffic not restricted by source). + // If this field is empty, this rule matches no sources (no traffic matches). + // If this field is present and contains at least on item, this rule allows traffic only if the + // traffic matches at least one item in the from list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` +} + +type NetworkPolicyPort struct { + // Optional. The protocol (TCP or UDP) which traffic must match. + // If not specified, this field defaults to TCP. + Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/kubernetes/pkg/api/v1.Protocol"` + + // If specified, the port on the given protocol. This can + // either be a numerical or named port on a pod. If this field is not provided, + // this matches all port names and numbers. + // If present, only traffic on the specified protocol AND port + // will be matched. + Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` +} + +type NetworkPolicyPeer struct { + // Exactly one of the following must be specified. + + // This is a label selector which selects Pods in this namespace. + // This field follows standard label selector semantics. + // If not provided, this selector selects no pods. + // If present but empty, this selector selects all pods in this namespace. + PodSelector *LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` + + // Selects Namespaces using cluster scoped-labels. This + // matches all pods in all namespaces selected by this label selector. + // This field follows standard label selector semantics. + // If omited, this selector selects no namespaces. + // If present but empty, this selector selects all namespaces. + NamespaceSelector *LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` +} + +// Network Policy List is a list of NetworkPolicy objects. +type NetworkPolicyList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of schema objects. - Items []PodSecurityPolicy `json:"items"` + Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go index 44e446215ad1..c8892f08a4c1 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -28,9 +28,8 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE var map_APIVersion = map[string]string{ - "": "An APIVersion represents a single concrete version of an object model.", - "name": "Name of this version (e.g. 'v1').", - "apiGroup": "The API group to add this object into, default 'experimental'.", + "": "An APIVersion represents a single concrete version of an object model.", + "name": "Name of this version (e.g. 'v1').", } func (APIVersion) SwaggerDoc() map[string]string { @@ -66,9 +65,9 @@ func (CustomMetricTarget) SwaggerDoc() map[string]string { var map_DaemonSet = map[string]string{ "": "DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired behavior of this daemon set. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -77,7 +76,7 @@ func (DaemonSet) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "items": "Items is a list of daemon sets.", } @@ -87,8 +86,8 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", - "selector": "Selector is a label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors", - "template": "Template is the object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#pod-template", + "selector": "Selector is a label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", + "template": "Template is the object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template", } func (DaemonSetSpec) SwaggerDoc() map[string]string { @@ -97,9 +96,9 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { var map_DaemonSetStatus = map[string]string{ "": "DaemonSetStatus represents the current status of a daemon set.", - "currentNumberScheduled": "CurrentNumberScheduled is the number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/release-1.2/docs/admin/daemons.md", - "numberMisscheduled": "NumberMisscheduled is the number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/release-1.2/docs/admin/daemons.md", - "desiredNumberScheduled": "DesiredNumberScheduled is the total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/release-1.2/docs/admin/daemons.md", + "currentNumberScheduled": "CurrentNumberScheduled is the number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", + "numberMisscheduled": "NumberMisscheduled is the number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", + "desiredNumberScheduled": "DesiredNumberScheduled is the total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", } func (DaemonSetStatus) SwaggerDoc() map[string]string { @@ -187,6 +186,16 @@ func (ExportOptions) SwaggerDoc() map[string]string { return map_ExportOptions } +var map_FSGroupStrategyOptions = map[string]string{ + "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", + "rule": "Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", + "ranges": "Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", +} + +func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_FSGroupStrategyOptions +} + var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.", "path": "Path is a extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.", @@ -208,8 +217,8 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status.", + "metadata": "Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", "status": "current information about the autoscaler.", } @@ -274,9 +283,9 @@ func (IDRange) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -295,7 +304,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "items": "Items is the list of Ingress.", } @@ -323,7 +332,7 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { var map_IngressSpec = map[string]string{ "": "IngressSpec describes the Ingress the user wishes to exist.", "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443, and assumes TLS termination. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension.", + "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } @@ -352,9 +361,9 @@ func (IngressTLS) SwaggerDoc() map[string]string { var map_Job = map[string]string{ "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (Job) SwaggerDoc() map[string]string { @@ -377,7 +386,7 @@ func (JobCondition) SwaggerDoc() map[string]string { var map_JobList = map[string]string{ "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "items": "Items is the list of Job.", } @@ -387,12 +396,12 @@ func (JobList) SwaggerDoc() map[string]string { var map_JobSpec = map[string]string{ "": "JobSpec describes how the job execution will look like.", - "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md", - "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md", + "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", - "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors", - "autoSelector": "AutoSelector controls generation of pod labels and pod selectors. It was not present in the original extensions/v1beta1 Job definition, but exists to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite meaning as, ManualSelector. More info: http://releases.k8s.io/release-1.2/docs/design/selector-generation.md", - "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md", + "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", + "autoSelector": "AutoSelector controls generation of pod labels and pod selectors. It was not present in the original extensions/v1beta1 Job definition, but exists to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite meaning as, ManualSelector. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md", + "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", } func (JobSpec) SwaggerDoc() map[string]string { @@ -401,7 +410,7 @@ func (JobSpec) SwaggerDoc() map[string]string { var map_JobStatus = map[string]string{ "": "JobStatus represents the current state of a Job.", - "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/release-1.2/docs/user-guide/jobs.md", + "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", "active": "Active is the number of actively running pods.", @@ -447,9 +456,65 @@ func (ListOptions) SwaggerDoc() map[string]string { return map_ListOptions } +var map_NetworkPolicy = map[string]string{ + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior for this NetworkPolicy.", +} + +func (NetworkPolicy) SwaggerDoc() map[string]string { + return map_NetworkPolicy +} + +var map_NetworkPolicyIngressRule = map[string]string{ + "": "This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", + "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is not provided, this rule matches all ports (traffic not restricted by port). If this field is empty, this rule matches no ports (no traffic matches). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is not provided, this rule matches all sources (traffic not restricted by source). If this field is empty, this rule matches no sources (no traffic matches). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", +} + +func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { + return map_NetworkPolicyIngressRule +} + +var map_NetworkPolicyList = map[string]string{ + "": "Network Policy List is a list of NetworkPolicy objects.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (NetworkPolicyList) SwaggerDoc() map[string]string { + return map_NetworkPolicyList +} + +var map_NetworkPolicyPeer = map[string]string{ + "podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If not provided, this selector selects no pods. If present but empty, this selector selects all pods in this namespace.", + "namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If omited, this selector selects no namespaces. If present but empty, this selector selects all namespaces.", +} + +func (NetworkPolicyPeer) SwaggerDoc() map[string]string { + return map_NetworkPolicyPeer +} + +var map_NetworkPolicyPort = map[string]string{ + "protocol": "Optional. The protocol (TCP or UDP) which traffic must match. If not specified, this field defaults to TCP.", + "port": "If specified, the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", +} + +func (NetworkPolicyPort) SwaggerDoc() map[string]string { + return map_NetworkPolicyPort +} + +var map_NetworkPolicySpec = map[string]string{ + "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not affect ingress isolation. If this field is present and contains at least one rule, this policy allows any traffic which matches at least one of the ingress rules in this list.", +} + +func (NetworkPolicySpec) SwaggerDoc() map[string]string { + return map_NetworkPolicySpec +} + var map_PodSecurityPolicy = map[string]string{ "": "Pod Security Policy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "spec": "spec defines the policy enforced.", } @@ -459,7 +524,7 @@ func (PodSecurityPolicy) SwaggerDoc() map[string]string { var map_PodSecurityPolicyList = map[string]string{ "": "Pod Security Policy List is a list of PodSecurityPolicy objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -468,16 +533,21 @@ func (PodSecurityPolicyList) SwaggerDoc() map[string]string { } var map_PodSecurityPolicySpec = map[string]string{ - "": "Pod Security Policy Spec defines the policy enforced.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "capabilities": "capabilities is a list of capabilities that can be added.", - "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that all plugins may be used.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "": "Pod Security Policy Spec defines the policy enforced.", + "privileged": "privileged determines if a pod can request to be run as privileged.", + "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", + "requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", + "allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.", + "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that all plugins may be used.", + "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", + "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", + "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", + "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", + "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", + "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "supplementalGroups": "SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", + "fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.", + "readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { @@ -486,9 +556,9 @@ func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "ReplicaSet represents the configuration of a ReplicaSet.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -497,8 +567,8 @@ func (ReplicaSet) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds", - "items": "List of ReplicaSets. More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of ReplicaSets. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md", } func (ReplicaSetList) SwaggerDoc() map[string]string { @@ -507,9 +577,9 @@ func (ReplicaSetList) SwaggerDoc() map[string]string { var map_ReplicaSetSpec = map[string]string{ "": "ReplicaSetSpec is the specification of a ReplicaSet.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#what-is-a-replication-controller", - "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#pod-template", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller", + "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template", } func (ReplicaSetSpec) SwaggerDoc() map[string]string { @@ -518,7 +588,7 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string { var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/release-1.2/docs/user-guide/replication-controller.md#what-is-a-replication-controller", + "replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", } @@ -566,7 +636,7 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { var map_SELinuxStrategyOptions = map[string]string{ "": "SELinux Strategy Options defines the strategy type and any options used to create the strategy.", "rule": "type is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: http://releases.k8s.io/release-1.2/docs/design/security_context.md#security-context", + "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context", } func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { @@ -575,9 +645,9 @@ func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -596,8 +666,8 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors", - "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/labels.md#label-selectors", + "selector": "label query over pods that should match the replicas count. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", + "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", } func (ScaleStatus) SwaggerDoc() map[string]string { @@ -606,8 +676,8 @@ func (ScaleStatus) SwaggerDoc() map[string]string { var map_SubresourceReference = map[string]string{ "": "SubresourceReference contains enough information to let you inspect or modify the referred subresource.", - "kind": "Kind of the referent; More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://releases.k8s.io/release-1.2/docs/user-guide/identifiers.md#names", + "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "name": "Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", "apiVersion": "API version of the referent", "subresource": "Subresource name of the referent", } @@ -616,6 +686,16 @@ func (SubresourceReference) SwaggerDoc() map[string]string { return map_SubresourceReference } +var map_SupplementalGroupsStrategyOptions = map[string]string{ + "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", + "rule": "Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", + "ranges": "Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", +} + +func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { + return map_SupplementalGroupsStrategyOptions +} + var map_ThirdPartyResource = map[string]string{ "": "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.", "metadata": "Standard object metadata", @@ -639,7 +719,7 @@ func (ThirdPartyResourceData) SwaggerDoc() map[string]string { var map_ThirdPartyResourceDataList = map[string]string{ "": "ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", "items": "Items is the list of ThirdpartyResourceData.", } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go index b87bff219f23..1d251e897f32 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go @@ -17,8 +17,9 @@ limitations under the License. package validation import ( - "encoding/json" + "fmt" "net" + "reflect" "regexp" "strconv" "strings" @@ -28,130 +29,51 @@ import ( unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation" apivalidation "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/controller/podautoscaler" "k8s.io/kubernetes/pkg/labels" + psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/validation" "k8s.io/kubernetes/pkg/util/validation/field" ) -// ValidateHorizontalPodAutoscaler can be used to check whether the given autoscaler name is valid. -// Prefix indicates this name will be used as part of generation, in which case trailing dashes are allowed. -func ValidateHorizontalPodAutoscalerName(name string, prefix bool) (bool, string) { - // TODO: finally move it to pkg/api/validation and use nameIsDNSSubdomain function - return apivalidation.ValidateReplicationControllerName(name, prefix) -} - -func validateHorizontalPodAutoscalerSpec(autoscaler extensions.HorizontalPodAutoscalerSpec, fldPath *field.Path) field.ErrorList { +func ValidateThirdPartyResourceUpdate(update, old *extensions.ThirdPartyResource) field.ErrorList { allErrs := field.ErrorList{} - if autoscaler.MinReplicas != nil && *autoscaler.MinReplicas < 1 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("minReplicas"), *autoscaler.MinReplicas, "must be greater than 0")) - } - if autoscaler.MaxReplicas < 1 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than 0")) - } - if autoscaler.MinReplicas != nil && autoscaler.MaxReplicas < *autoscaler.MinReplicas { - allErrs = append(allErrs, field.Invalid(fldPath.Child("maxReplicas"), autoscaler.MaxReplicas, "must be greater than or equal to `minReplicas`")) - } - if autoscaler.CPUUtilization != nil && autoscaler.CPUUtilization.TargetPercentage < 1 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("cpuUtilization", "targetPercentage"), autoscaler.CPUUtilization.TargetPercentage, "must be greater than 0")) - } - if refErrs := ValidateSubresourceReference(autoscaler.ScaleRef, fldPath.Child("scaleRef")); len(refErrs) > 0 { - allErrs = append(allErrs, refErrs...) - } else if autoscaler.ScaleRef.Subresource != "scale" { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("scaleRef", "subresource"), autoscaler.ScaleRef.Subresource, []string{"scale"})) - } + allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, ValidateThirdPartyResource(update)...) return allErrs } -func ValidateSubresourceReference(ref extensions.SubresourceReference, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(ref.Kind) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("kind"), "")) - } else if ok, msg := apivalidation.IsValidPathSegmentName(ref.Kind); !ok { - allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ref.Kind, msg)) - } - - if len(ref.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } else if ok, msg := apivalidation.IsValidPathSegmentName(ref.Name); !ok { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ref.Name, msg)) - } - - if len(ref.Subresource) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("subresource"), "")) - } else if ok, msg := apivalidation.IsValidPathSegmentName(ref.Subresource); !ok { - allErrs = append(allErrs, field.Invalid(fldPath.Child("subresource"), ref.Subresource, msg)) +func ValidateThirdPartyResourceName(name string, prefix bool) []string { + // Make sure it's a valid DNS subdomain + if msgs := apivalidation.NameIsDNSSubdomain(name, prefix); len(msgs) != 0 { + return msgs } - return allErrs -} -func validateHorizontalPodAutoscalerAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if annotationValue, found := annotations[podautoscaler.HpaCustomMetricsTargetAnnotationName]; found { - // Try to parse the annotation - var targetList extensions.CustomMetricTargetList - if err := json.Unmarshal([]byte(annotationValue), &targetList); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("annotations"), annotations, "failed to parse custom metrics target annotation")) - } else { - if len(targetList.Items) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("annotations", "items"), "custom metrics target must not be empty")) - } - for _, target := range targetList.Items { - if target.Name == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("annotations", "items", "name"), "missing custom metric target name")) - } - if target.TargetValue.MilliValue() <= 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("annotations", "items", "value"), target.TargetValue, "custom metric target value must be greater than 0")) - } - } + // Make sure it's at least three segments (kind + two-segment group name) + if !prefix { + parts := strings.Split(name, ".") + if len(parts) < 3 { + return []string{"must be at least three segments long: .."} } } - return allErrs -} - -func ValidateHorizontalPodAutoscaler(autoscaler *extensions.HorizontalPodAutoscaler) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&autoscaler.ObjectMeta, true, ValidateHorizontalPodAutoscalerName, field.NewPath("metadata")) - allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(autoscaler.Spec, field.NewPath("spec"))...) - allErrs = append(allErrs, validateHorizontalPodAutoscalerAnnotations(autoscaler.Annotations, field.NewPath("metadata"))...) - return allErrs -} - -func ValidateHorizontalPodAutoscalerUpdate(newAutoscaler, oldAutoscaler *extensions.HorizontalPodAutoscaler) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&newAutoscaler.ObjectMeta, &oldAutoscaler.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(newAutoscaler.Spec, field.NewPath("spec"))...) - return allErrs -} - -func ValidateHorizontalPodAutoscalerStatusUpdate(newAutoscaler, oldAutoscaler *extensions.HorizontalPodAutoscaler) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&newAutoscaler.ObjectMeta, &oldAutoscaler.ObjectMeta, field.NewPath("metadata")) - status := newAutoscaler.Status - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentReplicas), field.NewPath("status", "currentReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredReplicas), field.NewPath("status", "desiredReplicasa"))...) - return allErrs -} -func ValidateThirdPartyResourceUpdate(update, old *extensions.ThirdPartyResource) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidateThirdPartyResource(update)...) - return allErrs -} - -func ValidateThirdPartyResourceName(name string, prefix bool) (bool, string) { - return apivalidation.NameIsDNSSubdomain(name, prefix) + return nil } func ValidateThirdPartyResource(obj *extensions.ThirdPartyResource) field.ErrorList { allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, ValidateThirdPartyResourceName, field.NewPath("metadata"))...) + allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&obj.ObjectMeta, false, ValidateThirdPartyResourceName, field.NewPath("metadata"))...) versions := sets.String{} for ix := range obj.Versions { version := &obj.Versions[ix] if len(version.Name) == 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("versions").Index(ix).Child("name"), version, "must not be empty")) + } else { + for _, msg := range validation.IsDNS1123Label(version.Name) { + allErrs = append(allErrs, field.Invalid(field.NewPath("versions").Index(ix).Child("name"), version, msg)) + } } if versions.Has(version.Name) { allErrs = append(allErrs, field.Duplicate(field.NewPath("versions").Index(ix).Child("name"), version)) @@ -201,6 +123,9 @@ func ValidateDaemonSetSpec(spec *extensions.DaemonSetSpec, fldPath *field.Path) if err == nil && !selector.Matches(labels.Set(spec.Template.Labels)) { allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "metadata", "labels"), spec.Template.Labels, "`selector` does not match template `labels`")) } + if spec.Selector != nil && len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for daemonset.")) + } allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"))...) // Daemons typically run on more than one node, so mark Read-Write persistent disks as invalid. @@ -215,14 +140,10 @@ func ValidateDaemonSetSpec(spec *extensions.DaemonSetSpec, fldPath *field.Path) // ValidateDaemonSetName can be used to check whether the given daemon set name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidateDaemonSetName(name string, prefix bool) (bool, string) { - return apivalidation.NameIsDNSSubdomain(name, prefix) -} +var ValidateDaemonSetName = apivalidation.NameIsDNSSubdomain // Validates that the given name can be used as a deployment name. -func ValidateDeploymentName(name string, prefix bool) (bool, string) { - return apivalidation.NameIsDNSSubdomain(name, prefix) -} +var ValidateDeploymentName = apivalidation.NameIsDNSSubdomain func ValidatePositiveIntOrPercent(intOrPercent intstr.IntOrString, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -368,145 +289,11 @@ func ValidateDeploymentRollback(obj *extensions.DeploymentRollback) field.ErrorL } func ValidateThirdPartyResourceDataUpdate(update, old *extensions.ThirdPartyResourceData) field.ErrorList { - return ValidateThirdPartyResourceData(update) + return apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) } func ValidateThirdPartyResourceData(obj *extensions.ThirdPartyResourceData) field.ErrorList { - allErrs := field.ErrorList{} - if len(obj.Name) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("name"), "")) - } - return allErrs -} - -// TODO: generalize for other controller objects that will follow the same pattern, such as ReplicaSet and DaemonSet, and -// move to new location. Replace extensions.Job with an interface. -// -// ValidateGeneratedSelector validates that the generated selector on a controller object match the controller object -// metadata, and the labels on the pod template are as generated. -func ValidateGeneratedSelector(obj *extensions.Job) field.ErrorList { - allErrs := field.ErrorList{} - if obj.Spec.ManualSelector != nil && *obj.Spec.ManualSelector { - return allErrs - } - - if obj.Spec.Selector == nil { - return allErrs // This case should already have been checked in caller. No need for more errors. - } - - // If somehow uid was unset then we would get "controller-uid=" as the selector - // which is bad. - if obj.ObjectMeta.UID == "" { - allErrs = append(allErrs, field.Required(field.NewPath("metadata").Child("uid"), "")) - } - - // If somehow uid was unset then we would get "controller-uid=" as the selector - // which is bad. - if obj.ObjectMeta.UID == "" { - allErrs = append(allErrs, field.Required(field.NewPath("metadata").Child("uid"), "")) - } - - // If selector generation was requested, then expected labels must be - // present on pod template, and much match job's uid and name. The - // generated (not-manual) selectors/labels ensure no overlap with other - // controllers. The manual mode allows orphaning, adoption, - // backward-compatibility, and experimentation with new - // labeling/selection schemes. Automatic selector generation should - // have placed certain labels on the pod, but this could have failed if - // the user added coflicting labels. Validate that the expected - // generated ones are there. - - allErrs = append(allErrs, apivalidation.ValidateHasLabel(obj.Spec.Template.ObjectMeta, field.NewPath("spec").Child("template").Child("metadata"), "controller-uid", string(obj.UID))...) - allErrs = append(allErrs, apivalidation.ValidateHasLabel(obj.Spec.Template.ObjectMeta, field.NewPath("spec").Child("template").Child("metadata"), "job-name", string(obj.Name))...) - expectedLabels := make(map[string]string) - expectedLabels["controller-uid"] = string(obj.UID) - expectedLabels["job-name"] = string(obj.Name) - // Whether manually or automatically generated, the selector of the job must match the pods it will produce. - if selector, err := unversioned.LabelSelectorAsSelector(obj.Spec.Selector); err == nil { - if !selector.Matches(labels.Set(expectedLabels)) { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("selector"), obj.Spec.Selector, "`selector` not auto-generated")) - } - } - - return allErrs -} - -func ValidateJob(job *extensions.Job) field.ErrorList { - // Jobs and rcs have the same name validation - allErrs := apivalidation.ValidateObjectMeta(&job.ObjectMeta, true, apivalidation.ValidateReplicationControllerName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateGeneratedSelector(job)...) - allErrs = append(allErrs, ValidateJobSpec(&job.Spec, field.NewPath("spec"))...) - return allErrs -} - -func ValidateJobSpec(spec *extensions.JobSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if spec.Parallelism != nil { - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.Parallelism), fldPath.Child("parallelism"))...) - } - if spec.Completions != nil { - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.Completions), fldPath.Child("completions"))...) - } - if spec.ActiveDeadlineSeconds != nil { - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.ActiveDeadlineSeconds), fldPath.Child("activeDeadlineSeconds"))...) - } - if spec.Selector == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) - } else { - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) - } - - // Whether manually or automatically generated, the selector of the job must match the pods it will produce. - if selector, err := unversioned.LabelSelectorAsSelector(spec.Selector); err == nil { - labels := labels.Set(spec.Template.Labels) - if !selector.Matches(labels) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "metadata", "labels"), spec.Template.Labels, "`selector` does not match template `labels`")) - } - } - - allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"))...) - if spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure && - spec.Template.Spec.RestartPolicy != api.RestartPolicyNever { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), - spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyOnFailure), string(api.RestartPolicyNever)})) - } - return allErrs -} - -func ValidateJobStatus(status *extensions.JobStatus, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Active), fldPath.Child("active"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Succeeded), fldPath.Child("succeeded"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Failed), fldPath.Child("failed"))...) - return allErrs -} - -func ValidateJobUpdate(job, oldJob *extensions.Job) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&oldJob.ObjectMeta, &job.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateJobSpecUpdate(job.Spec, oldJob.Spec, field.NewPath("spec"))...) - return allErrs -} - -func ValidateJobUpdateStatus(job, oldJob *extensions.Job) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&oldJob.ObjectMeta, &job.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateJobStatusUpdate(job.Status, oldJob.Status)...) - return allErrs -} - -func ValidateJobSpecUpdate(spec, oldSpec extensions.JobSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateJobSpec(&spec, fldPath)...) - allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.Completions, oldSpec.Completions, fldPath.Child("completions"))...) - allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.Selector, oldSpec.Selector, fldPath.Child("selector"))...) - allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.Template, oldSpec.Template, fldPath.Child("template"))...) - return allErrs -} - -func ValidateJobStatusUpdate(status, oldStatus extensions.JobStatus) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateJobStatus(&status, field.NewPath("status"))...) - return allErrs + return apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, apivalidation.NameIsDNSLabel, field.NewPath("metadata")) } // ValidateIngress tests if required fields in the Ingress are set. @@ -517,19 +304,10 @@ func ValidateIngress(ingress *extensions.Ingress) field.ErrorList { } // ValidateIngressName validates that the given name can be used as an Ingress name. -func ValidateIngressName(name string, prefix bool) (bool, string) { - return apivalidation.NameIsDNSSubdomain(name, prefix) -} +var ValidateIngressName = apivalidation.NameIsDNSSubdomain func validateIngressTLS(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - // Currently the Ingress only supports HTTP(S), so a secretName is required. - // This will not be the case if we support SSL routing at L4 via SNI. - for i, t := range spec.TLS { - if t.SecretName == "" { - allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("secretName"), spec.TLS[i].SecretName)) - } - } // TODO: Perform a more thorough validation of spec.TLS.Hosts that takes // the wildcard spec from RFC 6125 into account. return allErrs @@ -576,8 +354,8 @@ func validateIngressRules(IngressRules []extensions.IngressRule, fldPath *field. if len(ih.Host) > 0 { // TODO: Ports and ips are allowed in the host part of a url // according to RFC 3986, consider allowing them. - if valid, errMsg := apivalidation.NameIsDNSSubdomain(ih.Host, false); !valid { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, errMsg)) + for _, msg := range validation.IsDNS1123Subdomain(ih.Host) { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, msg)) } if isIP := (net.ParseIP(ih.Host) != nil); isIP { allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, "must be a DNS name, not an IP address")) @@ -632,12 +410,14 @@ func validateIngressBackend(backend *extensions.IngressBackend, fldPath *field.P // All backends must reference a single local service by name, and a single service port by name or number. if len(backend.ServiceName) == 0 { return append(allErrs, field.Required(fldPath.Child("serviceName"), "")) - } else if ok, errMsg := apivalidation.ValidateServiceName(backend.ServiceName, false); !ok { - allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceName"), backend.ServiceName, errMsg)) + } else { + for _, msg := range apivalidation.ValidateServiceName(backend.ServiceName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceName"), backend.ServiceName, msg)) + } } if backend.ServicePort.Type == intstr.String { - if !validation.IsDNS1123Label(backend.ServicePort.StrVal) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("servicePort"), backend.ServicePort.StrVal, apivalidation.DNS1123LabelErrorMsg)) + for _, msg := range validation.IsDNS1123Label(backend.ServicePort.StrVal) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("servicePort"), backend.ServicePort.StrVal, msg)) } if !validation.IsValidPortName(backend.ServicePort.StrVal) { allErrs = append(allErrs, field.Invalid(fldPath.Child("servicePort"), backend.ServicePort.StrVal, apivalidation.PortNameErrorMsg)) @@ -663,9 +443,7 @@ func ValidateScale(scale *extensions.Scale) field.ErrorList { // name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidateReplicaSetName(name string, prefix bool) (bool, string) { - return apivalidation.NameIsDNSSubdomain(name, prefix) -} +var ValidateReplicaSetName = apivalidation.NameIsDNSSubdomain // ValidateReplicaSet tests if required fields in the ReplicaSet are set. func ValidateReplicaSet(rs *extensions.ReplicaSet) field.ErrorList { @@ -717,7 +495,7 @@ func ValidateReplicaSetSpec(spec *extensions.ReplicaSetSpec, fldPath *field.Path } // Validates the given template and ensures that it is in accordance with the desired selector and replicas. -func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int, fldPath *field.Path) field.ErrorList { +func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if template == nil { allErrs = append(allErrs, field.Required(fldPath, "")) @@ -745,9 +523,7 @@ func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selecto // pod security policy name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidatePodSecurityPolicyName(name string, prefix bool) (bool, string) { - return apivalidation.NameIsDNSSubdomain(name, prefix) -} +var ValidatePodSecurityPolicyName = apivalidation.NameIsDNSSubdomain func ValidatePodSecurityPolicy(psp *extensions.PodSecurityPolicy) field.ErrorList { allErrs := field.ErrorList{} @@ -761,7 +537,11 @@ func ValidatePodSecurityPolicySpec(spec *extensions.PodSecurityPolicySpec, fldPa allErrs = append(allErrs, validatePSPRunAsUser(fldPath.Child("runAsUser"), &spec.RunAsUser)...) allErrs = append(allErrs, validatePSPSELinux(fldPath.Child("seLinux"), &spec.SELinux)...) + allErrs = append(allErrs, validatePSPSupplementalGroup(fldPath.Child("supplementalGroups"), &spec.SupplementalGroups)...) + allErrs = append(allErrs, validatePSPFSGroup(fldPath.Child("fsGroup"), &spec.FSGroup)...) allErrs = append(allErrs, validatePodSecurityPolicyVolumes(fldPath, spec.Volumes)...) + allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.DefaultAddCapabilities, field.NewPath("defaultAddCapabilities"))...) + allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.AllowedCapabilities, field.NewPath("allowedCapabilities"))...) return allErrs } @@ -800,24 +580,48 @@ func validatePSPRunAsUser(fldPath *field.Path, runAsUser *extensions.RunAsUserSt return allErrs } +// validatePSPFSGroup validates the FSGroupStrategyOptions fields of the PodSecurityPolicy. +func validatePSPFSGroup(fldPath *field.Path, groupOptions *extensions.FSGroupStrategyOptions) field.ErrorList { + allErrs := field.ErrorList{} + + supportedRules := sets.NewString( + string(extensions.FSGroupStrategyMustRunAs), + string(extensions.FSGroupStrategyRunAsAny), + ) + if !supportedRules.Has(string(groupOptions.Rule)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), groupOptions.Rule, supportedRules.List())) + } + + for idx, rng := range groupOptions.Ranges { + allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...) + } + return allErrs +} + +// validatePSPSupplementalGroup validates the SupplementalGroupsStrategyOptions fields of the PodSecurityPolicy. +func validatePSPSupplementalGroup(fldPath *field.Path, groupOptions *extensions.SupplementalGroupsStrategyOptions) field.ErrorList { + allErrs := field.ErrorList{} + + supportedRules := sets.NewString( + string(extensions.SupplementalGroupsStrategyRunAsAny), + string(extensions.SupplementalGroupsStrategyMustRunAs), + ) + if !supportedRules.Has(string(groupOptions.Rule)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), groupOptions.Rule, supportedRules.List())) + } + + for idx, rng := range groupOptions.Ranges { + allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...) + } + return allErrs +} + // validatePodSecurityPolicyVolumes validates the volume fields of PodSecurityPolicy. func validatePodSecurityPolicyVolumes(fldPath *field.Path, volumes []extensions.FSType) field.ErrorList { allErrs := field.ErrorList{} - allowed := sets.NewString(string(extensions.HostPath), - string(extensions.EmptyDir), - string(extensions.GCEPersistentDisk), - string(extensions.AWSElasticBlockStore), - string(extensions.GitRepo), - string(extensions.Secret), - string(extensions.NFS), - string(extensions.ISCSI), - string(extensions.Glusterfs), - string(extensions.PersistentVolumeClaim), - string(extensions.RBD), - string(extensions.Cinder), - string(extensions.CephFS), - string(extensions.DownwardAPI), - string(extensions.FC)) + allowed := psputil.GetAllFSTypesAsSet() + // add in the * value since that is a pseudo type that is not included by default + allowed.Insert(string(extensions.All)) for _, v := range volumes { if !allowed.Has(string(v)) { allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumes"), v, allowed.List())) @@ -846,6 +650,31 @@ func validateIDRanges(fldPath *field.Path, rng extensions.IDRange) field.ErrorLi return allErrs } +// validatePSPCapsAgainstDrops ensures an allowed cap is not listed in the required drops. +func validatePSPCapsAgainstDrops(requiredDrops []api.Capability, capsToCheck []api.Capability, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if requiredDrops == nil { + return allErrs + } + for _, cap := range capsToCheck { + if hasCap(cap, requiredDrops) { + allErrs = append(allErrs, field.Invalid(fldPath, cap, + fmt.Sprintf("capability is listed in %s and requiredDropCapabilities", fldPath.String()))) + } + } + return allErrs +} + +// hasCap checks for needle in haystack. +func hasCap(needle api.Capability, haystack []api.Capability) bool { + for _, c := range haystack { + if needle == c { + return true + } + } + return false +} + // ValidatePodSecurityPolicyUpdate validates a PSP for updates. func ValidatePodSecurityPolicyUpdate(old *extensions.PodSecurityPolicy, new *extensions.PodSecurityPolicy) field.ErrorList { allErrs := field.ErrorList{} @@ -853,3 +682,58 @@ func ValidatePodSecurityPolicyUpdate(old *extensions.PodSecurityPolicy, new *ext allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&new.Spec, field.NewPath("spec"))...) return allErrs } + +// ValidateNetworkPolicyName can be used to check whether the given networkpolicy +// name is valid. +func ValidateNetworkPolicyName(name string, prefix bool) []string { + return apivalidation.NameIsDNSSubdomain(name, prefix) +} + +// ValidateNetworkPolicySpec tests if required fields in the networkpolicy spec are set. +func ValidateNetworkPolicySpec(spec *extensions.NetworkPolicySpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(&spec.PodSelector, fldPath.Child("podSelector"))...) + + // Validate ingress rules. + for _, i := range spec.Ingress { + // TODO: Update From to be a pointer to slice as soon as auto-generation supports it. + for _, f := range i.From { + numFroms := 0 + allErrs := field.ErrorList{} + if f.PodSelector != nil { + numFroms++ + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(f.PodSelector, fldPath.Child("podSelector"))...) + } + if f.NamespaceSelector != nil { + if numFroms > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath, "may not specify more than 1 from type")) + } else { + numFroms++ + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(f.NamespaceSelector, fldPath.Child("namespaces"))...) + } + } + + if numFroms == 0 { + // At least one of PodSelector and NamespaceSelector must be defined. + allErrs = append(allErrs, field.Required(fldPath, "must specify a from type")) + } + } + } + return allErrs +} + +// ValidateNetworkPolicy validates a networkpolicy. +func ValidateNetworkPolicy(np *extensions.NetworkPolicy) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&np.ObjectMeta, true, ValidateNetworkPolicyName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateNetworkPolicySpec(&np.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateNetworkPolicyUpdate tests if an update to a NetworkPolicy is valid. +func ValidateNetworkPolicyUpdate(np, oldNP *extensions.NetworkPolicy) field.ErrorList { + allErrs := field.ErrorList{} + if !reflect.DeepEqual(np, oldNP) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to networkpolicy spec are forbidden.")) + } + return allErrs +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/validation/validation_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/validation/validation_test.go new file mode 100644 index 000000000000..c019f3ed1ac1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/extensions/validation/validation_test.go @@ -0,0 +1,1681 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" + "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func TestValidateDaemonSetStatusUpdate(t *testing.T) { + type dsUpdateTest struct { + old extensions.DaemonSet + update extensions.DaemonSet + } + + successCases := []dsUpdateTest{ + { + old: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Status: extensions.DaemonSetStatus{ + CurrentNumberScheduled: 1, + NumberMisscheduled: 2, + DesiredNumberScheduled: 3, + }, + }, + update: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Status: extensions.DaemonSetStatus{ + CurrentNumberScheduled: 1, + NumberMisscheduled: 1, + DesiredNumberScheduled: 3, + }, + }, + }, + } + + for _, successCase := range successCases { + successCase.old.ObjectMeta.ResourceVersion = "1" + successCase.update.ObjectMeta.ResourceVersion = "1" + if errs := ValidateDaemonSetStatusUpdate(&successCase.update, &successCase.old); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + errorCases := map[string]dsUpdateTest{ + "negative values": { + old: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: api.NamespaceDefault, + ResourceVersion: "10", + }, + Status: extensions.DaemonSetStatus{ + CurrentNumberScheduled: 1, + NumberMisscheduled: 2, + DesiredNumberScheduled: 3, + }, + }, + update: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: api.NamespaceDefault, + ResourceVersion: "10", + }, + Status: extensions.DaemonSetStatus{ + CurrentNumberScheduled: -1, + NumberMisscheduled: -1, + DesiredNumberScheduled: -3, + }, + }, + }, + } + + for testName, errorCase := range errorCases { + if errs := ValidateDaemonSetStatusUpdate(&errorCase.update, &errorCase.old); len(errs) == 0 { + t.Errorf("expected failure: %s", testName) + } + } +} + +func TestValidateDaemonSetUpdate(t *testing.T) { + validSelector := map[string]string{"a": "b"} + validSelector2 := map[string]string{"c": "d"} + invalidSelector := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} + + validPodSpecAbc := api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + } + validPodSpecDef := api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "def", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + } + validPodSpecNodeSelector := api.PodSpec{ + NodeSelector: validSelector, + NodeName: "xyz", + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + } + validPodSpecVolume := api.PodSpec{ + Volumes: []api.Volume{{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{PDName: "my-PD", FSType: "ext4", Partition: 1, ReadOnly: false}}}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + } + + validPodTemplateAbc := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + Spec: validPodSpecAbc, + }, + } + validPodTemplateNodeSelector := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + Spec: validPodSpecNodeSelector, + }, + } + validPodTemplateAbc2 := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validSelector2, + }, + Spec: validPodSpecAbc, + }, + } + validPodTemplateDef := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validSelector2, + }, + Spec: validPodSpecDef, + }, + } + invalidPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + ObjectMeta: api.ObjectMeta{ + Labels: invalidSelector, + }, + }, + } + readWriteVolumePodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + Spec: validPodSpecVolume, + }, + } + + type dsUpdateTest struct { + old extensions.DaemonSet + update extensions.DaemonSet + } + successCases := []dsUpdateTest{ + { + old: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplateAbc.Template, + }, + }, + update: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplateAbc.Template, + }, + }, + }, + { + old: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplateAbc.Template, + }, + }, + update: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector2}, + Template: validPodTemplateAbc2.Template, + }, + }, + }, + { + old: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplateAbc.Template, + }, + }, + update: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplateNodeSelector.Template, + }, + }, + }, + } + for _, successCase := range successCases { + successCase.old.ObjectMeta.ResourceVersion = "1" + successCase.update.ObjectMeta.ResourceVersion = "1" + if errs := ValidateDaemonSetUpdate(&successCase.update, &successCase.old); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + errorCases := map[string]dsUpdateTest{ + "change daemon name": { + old: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplateAbc.Template, + }, + }, + update: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplateAbc.Template, + }, + }, + }, + "invalid selector": { + old: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplateAbc.Template, + }, + }, + update: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: invalidSelector}, + Template: validPodTemplateAbc.Template, + }, + }, + }, + "invalid pod": { + old: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplateAbc.Template, + }, + }, + update: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: invalidPodTemplate.Template, + }, + }, + }, + "change container image": { + old: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplateAbc.Template, + }, + }, + update: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplateDef.Template, + }, + }, + }, + "read-write volume": { + old: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplateAbc.Template, + }, + }, + update: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: readWriteVolumePodTemplate.Template, + }, + }, + }, + "invalid update strategy": { + old: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplateAbc.Template, + }, + }, + update: extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: invalidSelector}, + Template: validPodTemplateAbc.Template, + }, + }, + }, + } + for testName, errorCase := range errorCases { + if errs := ValidateDaemonSetUpdate(&errorCase.update, &errorCase.old); len(errs) == 0 { + t.Errorf("expected failure: %s", testName) + } + } +} + +func TestValidateDaemonSet(t *testing.T) { + validSelector := map[string]string{"a": "b"} + validPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + } + invalidSelector := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} + invalidPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + ObjectMeta: api.ObjectMeta{ + Labels: invalidSelector, + }, + }, + } + successCases := []extensions.DaemonSet{ + { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplate.Template, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplate.Template, + }, + }, + } + for _, successCase := range successCases { + if errs := ValidateDaemonSet(&successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := map[string]extensions.DaemonSet{ + "zero-length ID": { + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplate.Template, + }, + }, + "missing-namespace": { + ObjectMeta: api.ObjectMeta{Name: "abc-123"}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplate.Template, + }, + }, + "nil selector": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Template: validPodTemplate.Template, + }, + }, + "empty selector": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{}, + Template: validPodTemplate.Template, + }, + }, + "selector_doesnt_match": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + Template: validPodTemplate.Template, + }, + }, + "invalid template": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + }, + }, + "invalid_label": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + Labels: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplate.Template, + }, + }, + "invalid_label 2": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + Labels: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: extensions.DaemonSetSpec{ + Template: invalidPodTemplate.Template, + }, + }, + "invalid_annotation": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + Annotations: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: validPodTemplate.Template, + }, + }, + "invalid restart policy 1": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + }, + }, + }, + "invalid restart policy 2": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyNever, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + ObjectMeta: api.ObjectMeta{ + Labels: validSelector, + }, + }, + }, + }, + } + for k, v := range errorCases { + errs := ValidateDaemonSet(&v) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } + for i := range errs { + field := errs[i].Field + if !strings.HasPrefix(field, "spec.template.") && + !strings.HasPrefix(field, "spec.updateStrategy") && + field != "metadata.name" && + field != "metadata.namespace" && + field != "spec.selector" && + field != "spec.template" && + field != "GCEPersistentDisk.ReadOnly" && + field != "spec.template.labels" && + field != "metadata.annotations" && + field != "metadata.labels" { + t.Errorf("%s: missing prefix for: %v", k, errs[i]) + } + } + } +} + +func validDeployment() *extensions.Deployment { + return &extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.DeploymentSpec{ + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{ + "name": "abc", + }, + }, + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: api.NamespaceDefault, + Labels: map[string]string{ + "name": "abc", + }, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSDefault, + Containers: []api.Container{ + { + Name: "nginx", + Image: "image", + ImagePullPolicy: api.PullNever, + }, + }, + }, + }, + RollbackTo: &extensions.RollbackConfig{ + Revision: 1, + }, + }, + } +} + +func TestValidateDeployment(t *testing.T) { + successCases := []*extensions.Deployment{ + validDeployment(), + } + for _, successCase := range successCases { + if errs := ValidateDeployment(successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := map[string]*extensions.Deployment{} + errorCases["metadata.name: Required value"] = &extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + Namespace: api.NamespaceDefault, + }, + } + // selector should match the labels in pod template. + invalidSelectorDeployment := validDeployment() + invalidSelectorDeployment.Spec.Selector = &unversioned.LabelSelector{ + MatchLabels: map[string]string{ + "name": "def", + }, + } + errorCases["`selector` does not match template `labels`"] = invalidSelectorDeployment + + // RestartPolicy should be always. + invalidRestartPolicyDeployment := validDeployment() + invalidRestartPolicyDeployment.Spec.Template.Spec.RestartPolicy = api.RestartPolicyNever + errorCases["Unsupported value: \"Never\""] = invalidRestartPolicyDeployment + + // rollingUpdate should be nil for recreate. + invalidRecreateDeployment := validDeployment() + invalidRecreateDeployment.Spec.Strategy = extensions.DeploymentStrategy{ + Type: extensions.RecreateDeploymentStrategyType, + RollingUpdate: &extensions.RollingUpdateDeployment{}, + } + errorCases["may not be specified when strategy `type` is 'Recreate'"] = invalidRecreateDeployment + + // MaxSurge should be in the form of 20%. + invalidMaxSurgeDeployment := validDeployment() + invalidMaxSurgeDeployment.Spec.Strategy = extensions.DeploymentStrategy{ + Type: extensions.RollingUpdateDeploymentStrategyType, + RollingUpdate: &extensions.RollingUpdateDeployment{ + MaxSurge: intstr.FromString("20Percent"), + }, + } + errorCases["must be an integer or percentage"] = invalidMaxSurgeDeployment + + // MaxSurge and MaxUnavailable cannot both be zero. + invalidRollingUpdateDeployment := validDeployment() + invalidRollingUpdateDeployment.Spec.Strategy = extensions.DeploymentStrategy{ + Type: extensions.RollingUpdateDeploymentStrategyType, + RollingUpdate: &extensions.RollingUpdateDeployment{ + MaxSurge: intstr.FromString("0%"), + MaxUnavailable: intstr.FromInt(0), + }, + } + errorCases["may not be 0 when `maxSurge` is 0"] = invalidRollingUpdateDeployment + + // MaxUnavailable should not be more than 100%. + invalidMaxUnavailableDeployment := validDeployment() + invalidMaxUnavailableDeployment.Spec.Strategy = extensions.DeploymentStrategy{ + Type: extensions.RollingUpdateDeploymentStrategyType, + RollingUpdate: &extensions.RollingUpdateDeployment{ + MaxUnavailable: intstr.FromString("110%"), + }, + } + errorCases["must not be greater than 100%"] = invalidMaxUnavailableDeployment + + // Rollback.Revision must be non-negative + invalidRollbackRevisionDeployment := validDeployment() + invalidRollbackRevisionDeployment.Spec.RollbackTo.Revision = -3 + errorCases["must be greater than or equal to 0"] = invalidRollbackRevisionDeployment + + for k, v := range errorCases { + errs := ValidateDeployment(v) + if len(errs) == 0 { + t.Errorf("[%s] expected failure", k) + } else if !strings.Contains(errs[0].Error(), k) { + t.Errorf("unexpected error: %q, expected: %q", errs[0].Error(), k) + } + } +} + +func validDeploymentRollback() *extensions.DeploymentRollback { + return &extensions.DeploymentRollback{ + Name: "abc", + UpdatedAnnotations: map[string]string{ + "created-by": "abc", + }, + RollbackTo: extensions.RollbackConfig{ + Revision: 1, + }, + } +} + +func TestValidateDeploymentRollback(t *testing.T) { + noAnnotation := validDeploymentRollback() + noAnnotation.UpdatedAnnotations = nil + successCases := []*extensions.DeploymentRollback{ + validDeploymentRollback(), + noAnnotation, + } + for _, successCase := range successCases { + if errs := ValidateDeploymentRollback(successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := map[string]*extensions.DeploymentRollback{} + invalidNoName := validDeploymentRollback() + invalidNoName.Name = "" + errorCases["name: Required value"] = invalidNoName + + for k, v := range errorCases { + errs := ValidateDeploymentRollback(v) + if len(errs) == 0 { + t.Errorf("[%s] expected failure", k) + } else if !strings.Contains(errs[0].Error(), k) { + t.Errorf("unexpected error: %q, expected: %q", errs[0].Error(), k) + } + } +} + +type ingressRules map[string]string + +func TestValidateIngress(t *testing.T) { + defaultBackend := extensions.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + } + + newValid := func() extensions.Ingress { + return extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.IngressSpec{ + Backend: &extensions.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + Rules: []extensions.IngressRule{ + { + Host: "foo.bar.com", + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: "/foo", + Backend: defaultBackend, + }, + }, + }, + }, + }, + }, + }, + Status: extensions.IngressStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + {IP: "127.0.0.1"}, + }, + }, + }, + } + } + servicelessBackend := newValid() + servicelessBackend.Spec.Backend.ServiceName = "" + invalidNameBackend := newValid() + invalidNameBackend.Spec.Backend.ServiceName = "defaultBackend" + noPortBackend := newValid() + noPortBackend.Spec.Backend = &extensions.IngressBackend{ServiceName: defaultBackend.ServiceName} + noForwardSlashPath := newValid() + noForwardSlashPath.Spec.Rules[0].IngressRuleValue.HTTP.Paths = []extensions.HTTPIngressPath{ + { + Path: "invalid", + Backend: defaultBackend, + }, + } + noPaths := newValid() + noPaths.Spec.Rules[0].IngressRuleValue.HTTP.Paths = []extensions.HTTPIngressPath{} + badHost := newValid() + badHost.Spec.Rules[0].Host = "foobar:80" + badRegexPath := newValid() + badPathExpr := "/invalid[" + badRegexPath.Spec.Rules[0].IngressRuleValue.HTTP.Paths = []extensions.HTTPIngressPath{ + { + Path: badPathExpr, + Backend: defaultBackend, + }, + } + badPathErr := fmt.Sprintf("spec.rules[0].http.paths[0].path: Invalid value: '%v'", badPathExpr) + hostIP := "127.0.0.1" + badHostIP := newValid() + badHostIP.Spec.Rules[0].Host = hostIP + badHostIPErr := fmt.Sprintf("spec.rules[0].host: Invalid value: '%v'", hostIP) + + errorCases := map[string]extensions.Ingress{ + "spec.backend.serviceName: Required value": servicelessBackend, + "spec.backend.serviceName: Invalid value": invalidNameBackend, + "spec.backend.servicePort: Invalid value": noPortBackend, + "spec.rules[0].host: Invalid value": badHost, + "spec.rules[0].http.paths: Required value": noPaths, + "spec.rules[0].http.paths[0].path: Invalid value": noForwardSlashPath, + } + errorCases[badPathErr] = badRegexPath + errorCases[badHostIPErr] = badHostIP + + for k, v := range errorCases { + errs := ValidateIngress(&v) + if len(errs) == 0 { + t.Errorf("expected failure for %q", k) + } else { + s := strings.Split(k, ":") + err := errs[0] + if err.Field != s[0] || !strings.Contains(err.Error(), s[1]) { + t.Errorf("unexpected error: %q, expected: %q", err, k) + } + } + } +} + +func TestValidateIngressStatusUpdate(t *testing.T) { + defaultBackend := extensions.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + } + + newValid := func() extensions.Ingress { + return extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + ResourceVersion: "9", + }, + Spec: extensions.IngressSpec{ + Backend: &extensions.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + Rules: []extensions.IngressRule{ + { + Host: "foo.bar.com", + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: "/foo", + Backend: defaultBackend, + }, + }, + }, + }, + }, + }, + }, + Status: extensions.IngressStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + {IP: "127.0.0.1", Hostname: "foo.bar.com"}, + }, + }, + }, + } + } + oldValue := newValid() + newValue := newValid() + newValue.Status = extensions.IngressStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + {IP: "127.0.0.2", Hostname: "foo.com"}, + }, + }, + } + invalidIP := newValid() + invalidIP.Status = extensions.IngressStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + {IP: "abcd", Hostname: "foo.com"}, + }, + }, + } + invalidHostname := newValid() + invalidHostname.Status = extensions.IngressStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + {IP: "127.0.0.1", Hostname: "127.0.0.1"}, + }, + }, + } + + errs := ValidateIngressStatusUpdate(&newValue, &oldValue) + if len(errs) != 0 { + t.Errorf("Unexpected error %v", errs) + } + + errorCases := map[string]extensions.Ingress{ + "status.loadBalancer.ingress[0].ip: Invalid value": invalidIP, + "status.loadBalancer.ingress[0].hostname: Invalid value": invalidHostname, + } + for k, v := range errorCases { + errs := ValidateIngressStatusUpdate(&v, &oldValue) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } else { + s := strings.Split(k, ":") + err := errs[0] + if err.Field != s[0] || !strings.Contains(err.Error(), s[1]) { + t.Errorf("unexpected error: %q, expected: %q", err, k) + } + } + } +} + +func TestValidateScale(t *testing.T) { + successCases := []extensions.Scale{ + { + ObjectMeta: api.ObjectMeta{ + Name: "frontend", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.ScaleSpec{ + Replicas: 1, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "frontend", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.ScaleSpec{ + Replicas: 10, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "frontend", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.ScaleSpec{ + Replicas: 0, + }, + }, + } + + for _, successCase := range successCases { + if errs := ValidateScale(&successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := []struct { + scale extensions.Scale + msg string + }{ + { + scale: extensions.Scale{ + ObjectMeta: api.ObjectMeta{ + Name: "frontend", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.ScaleSpec{ + Replicas: -1, + }, + }, + msg: "must be greater than or equal to 0", + }, + } + + for _, c := range errorCases { + if errs := ValidateScale(&c.scale); len(errs) == 0 { + t.Errorf("expected failure for %s", c.msg) + } else if !strings.Contains(errs[0].Error(), c.msg) { + t.Errorf("unexpected error: %v, expected: %s", errs[0], c.msg) + } + } +} + +func TestValidateReplicaSetStatusUpdate(t *testing.T) { + validLabels := map[string]string{"a": "b"} + validPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validLabels, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + } + type rcUpdateTest struct { + old extensions.ReplicaSet + update extensions.ReplicaSet + } + successCases := []rcUpdateTest{ + { + old: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + Status: extensions.ReplicaSetStatus{ + Replicas: 2, + }, + }, + update: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Replicas: 3, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + Status: extensions.ReplicaSetStatus{ + Replicas: 4, + }, + }, + }, + } + for _, successCase := range successCases { + successCase.old.ObjectMeta.ResourceVersion = "1" + successCase.update.ObjectMeta.ResourceVersion = "1" + if errs := ValidateReplicaSetStatusUpdate(&successCase.update, &successCase.old); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + errorCases := map[string]rcUpdateTest{ + "negative replicas": { + old: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + Status: extensions.ReplicaSetStatus{ + Replicas: 3, + }, + }, + update: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Replicas: 2, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + Status: extensions.ReplicaSetStatus{ + Replicas: -3, + }, + }, + }, + } + for testName, errorCase := range errorCases { + if errs := ValidateReplicaSetStatusUpdate(&errorCase.update, &errorCase.old); len(errs) == 0 { + t.Errorf("expected failure: %s", testName) + } + } + +} + +func TestValidateReplicaSetUpdate(t *testing.T) { + validLabels := map[string]string{"a": "b"} + validPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validLabels, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + } + readWriteVolumePodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validLabels, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + Volumes: []api.Volume{{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{PDName: "my-PD", FSType: "ext4", Partition: 1, ReadOnly: false}}}}, + }, + }, + } + invalidLabels := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} + invalidPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + ObjectMeta: api.ObjectMeta{ + Labels: invalidLabels, + }, + }, + } + type rcUpdateTest struct { + old extensions.ReplicaSet + update extensions.ReplicaSet + } + successCases := []rcUpdateTest{ + { + old: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + update: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Replicas: 3, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + }, + { + old: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + update: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Replicas: 1, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: readWriteVolumePodTemplate.Template, + }, + }, + }, + } + for _, successCase := range successCases { + successCase.old.ObjectMeta.ResourceVersion = "1" + successCase.update.ObjectMeta.ResourceVersion = "1" + if errs := ValidateReplicaSetUpdate(&successCase.update, &successCase.old); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + errorCases := map[string]rcUpdateTest{ + "more than one read/write": { + old: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + update: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Replicas: 2, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: readWriteVolumePodTemplate.Template, + }, + }, + }, + "invalid selector": { + old: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + update: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Replicas: 2, + Selector: &unversioned.LabelSelector{MatchLabels: invalidLabels}, + Template: validPodTemplate.Template, + }, + }, + }, + "invalid pod": { + old: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + update: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Replicas: 2, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: invalidPodTemplate.Template, + }, + }, + }, + "negative replicas": { + old: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + update: extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Replicas: -1, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + }, + } + for testName, errorCase := range errorCases { + if errs := ValidateReplicaSetUpdate(&errorCase.update, &errorCase.old); len(errs) == 0 { + t.Errorf("expected failure: %s", testName) + } + } +} + +func TestValidateReplicaSet(t *testing.T) { + validLabels := map[string]string{"a": "b"} + validPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validLabels, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + } + readWriteVolumePodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: validLabels, + }, + Spec: api.PodSpec{ + Volumes: []api.Volume{{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{PDName: "my-PD", FSType: "ext4", Partition: 1, ReadOnly: false}}}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "abc", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + } + invalidLabels := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} + invalidPodTemplate := api.PodTemplate{ + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + ObjectMeta: api.ObjectMeta{ + Labels: invalidLabels, + }, + }, + } + successCases := []extensions.ReplicaSet{ + { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Replicas: 1, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: readWriteVolumePodTemplate.Template, + }, + }, + } + for _, successCase := range successCases { + if errs := ValidateReplicaSet(&successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := map[string]extensions.ReplicaSet{ + "zero-length ID": { + ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + "missing-namespace": { + ObjectMeta: api.ObjectMeta{Name: "abc-123"}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + "empty selector": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Template: validPodTemplate.Template, + }, + }, + "selector_doesnt_match": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + Template: validPodTemplate.Template, + }, + }, + "invalid manifest": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + }, + }, + "read-write persistent disk with > 1 pod": { + ObjectMeta: api.ObjectMeta{Name: "abc"}, + Spec: extensions.ReplicaSetSpec{ + Replicas: 2, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: readWriteVolumePodTemplate.Template, + }, + }, + "negative_replicas": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, + Spec: extensions.ReplicaSetSpec{ + Replicas: -1, + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + }, + }, + "invalid_label": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + Labels: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + "invalid_label 2": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + Labels: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: extensions.ReplicaSetSpec{ + Template: invalidPodTemplate.Template, + }, + }, + "invalid_annotation": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + Annotations: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: validPodTemplate.Template, + }, + }, + "invalid restart policy 1": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + ObjectMeta: api.ObjectMeta{ + Labels: validLabels, + }, + }, + }, + }, + "invalid restart policy 2": { + ObjectMeta: api.ObjectMeta{ + Name: "abc-123", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: validLabels}, + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyNever, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + ObjectMeta: api.ObjectMeta{ + Labels: validLabels, + }, + }, + }, + }, + } + for k, v := range errorCases { + errs := ValidateReplicaSet(&v) + if len(errs) == 0 { + t.Errorf("expected failure for %s", k) + } + for i := range errs { + field := errs[i].Field + if !strings.HasPrefix(field, "spec.template.") && + field != "metadata.name" && + field != "metadata.namespace" && + field != "spec.selector" && + field != "spec.template" && + field != "GCEPersistentDisk.ReadOnly" && + field != "spec.replicas" && + field != "spec.template.labels" && + field != "metadata.annotations" && + field != "metadata.labels" && + field != "status.replicas" { + t.Errorf("%s: missing prefix for: %v", k, errs[i]) + } + } + } +} + +func TestValidatePodSecurityPolicy(t *testing.T) { + validPSP := func() *extensions.PodSecurityPolicy { + return &extensions.PodSecurityPolicy{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: extensions.PodSecurityPolicySpec{ + SELinux: extensions.SELinuxStrategyOptions{ + Rule: extensions.SELinuxStrategyRunAsAny, + }, + RunAsUser: extensions.RunAsUserStrategyOptions{ + Rule: extensions.RunAsUserStrategyRunAsAny, + }, + FSGroup: extensions.FSGroupStrategyOptions{ + Rule: extensions.FSGroupStrategyRunAsAny, + }, + SupplementalGroups: extensions.SupplementalGroupsStrategyOptions{ + Rule: extensions.SupplementalGroupsStrategyRunAsAny, + }, + }, + } + } + + noUserOptions := validPSP() + noUserOptions.Spec.RunAsUser.Rule = "" + + noSELinuxOptions := validPSP() + noSELinuxOptions.Spec.SELinux.Rule = "" + + invalidUserStratType := validPSP() + invalidUserStratType.Spec.RunAsUser.Rule = "invalid" + + invalidSELinuxStratType := validPSP() + invalidSELinuxStratType.Spec.SELinux.Rule = "invalid" + + invalidUIDPSP := validPSP() + invalidUIDPSP.Spec.RunAsUser.Rule = extensions.RunAsUserStrategyMustRunAs + invalidUIDPSP.Spec.RunAsUser.Ranges = []extensions.IDRange{ + {Min: -1, Max: 1}, + } + + missingObjectMetaName := validPSP() + missingObjectMetaName.ObjectMeta.Name = "" + + noFSGroupOptions := validPSP() + noFSGroupOptions.Spec.FSGroup.Rule = "" + + invalidFSGroupStratType := validPSP() + invalidFSGroupStratType.Spec.FSGroup.Rule = "invalid" + + noSupplementalGroupsOptions := validPSP() + noSupplementalGroupsOptions.Spec.SupplementalGroups.Rule = "" + + invalidSupGroupStratType := validPSP() + invalidSupGroupStratType.Spec.SupplementalGroups.Rule = "invalid" + + invalidRangeMinGreaterThanMax := validPSP() + invalidRangeMinGreaterThanMax.Spec.FSGroup.Ranges = []extensions.IDRange{ + {Min: 2, Max: 1}, + } + + invalidRangeNegativeMin := validPSP() + invalidRangeNegativeMin.Spec.FSGroup.Ranges = []extensions.IDRange{ + {Min: -1, Max: 10}, + } + + invalidRangeNegativeMax := validPSP() + invalidRangeNegativeMax.Spec.FSGroup.Ranges = []extensions.IDRange{ + {Min: 1, Max: -10}, + } + + requiredCapAddAndDrop := validPSP() + requiredCapAddAndDrop.Spec.DefaultAddCapabilities = []api.Capability{"foo"} + requiredCapAddAndDrop.Spec.RequiredDropCapabilities = []api.Capability{"foo"} + + allowedCapListedInRequiredDrop := validPSP() + allowedCapListedInRequiredDrop.Spec.RequiredDropCapabilities = []api.Capability{"foo"} + allowedCapListedInRequiredDrop.Spec.AllowedCapabilities = []api.Capability{"foo"} + + errorCases := map[string]struct { + psp *extensions.PodSecurityPolicy + errorType field.ErrorType + errorDetail string + }{ + "no user options": { + psp: noUserOptions, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: MustRunAs, MustRunAsNonRoot, RunAsAny", + }, + "no selinux options": { + psp: noSELinuxOptions, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: MustRunAs, RunAsAny", + }, + "no fsgroup options": { + psp: noFSGroupOptions, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: MustRunAs, RunAsAny", + }, + "no sup group options": { + psp: noSupplementalGroupsOptions, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: MustRunAs, RunAsAny", + }, + "invalid user strategy type": { + psp: invalidUserStratType, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: MustRunAs, MustRunAsNonRoot, RunAsAny", + }, + "invalid selinux strategy type": { + psp: invalidSELinuxStratType, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: MustRunAs, RunAsAny", + }, + "invalid sup group strategy type": { + psp: invalidSupGroupStratType, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: MustRunAs, RunAsAny", + }, + "invalid fs group strategy type": { + psp: invalidFSGroupStratType, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: MustRunAs, RunAsAny", + }, + "invalid uid": { + psp: invalidUIDPSP, + errorType: field.ErrorTypeInvalid, + errorDetail: "min cannot be negative", + }, + "missing object meta name": { + psp: missingObjectMetaName, + errorType: field.ErrorTypeRequired, + errorDetail: "name or generateName is required", + }, + "invalid range min greater than max": { + psp: invalidRangeMinGreaterThanMax, + errorType: field.ErrorTypeInvalid, + errorDetail: "min cannot be greater than max", + }, + "invalid range negative min": { + psp: invalidRangeNegativeMin, + errorType: field.ErrorTypeInvalid, + errorDetail: "min cannot be negative", + }, + "invalid range negative max": { + psp: invalidRangeNegativeMax, + errorType: field.ErrorTypeInvalid, + errorDetail: "max cannot be negative", + }, + "invalid required caps": { + psp: requiredCapAddAndDrop, + errorType: field.ErrorTypeInvalid, + errorDetail: "capability is listed in defaultAddCapabilities and requiredDropCapabilities", + }, + "allowed cap listed in required drops": { + psp: allowedCapListedInRequiredDrop, + errorType: field.ErrorTypeInvalid, + errorDetail: "capability is listed in allowedCapabilities and requiredDropCapabilities", + }, + } + + for k, v := range errorCases { + errs := ValidatePodSecurityPolicy(v.psp) + if len(errs) == 0 { + t.Errorf("%s expected errors but got none", k) + continue + } + if errs[0].Type != v.errorType { + t.Errorf("%s received an unexpected error type. Expected: %v got: %v", k, v.errorType, errs[0].Type) + } + if errs[0].Detail != v.errorDetail { + t.Errorf("%s received an unexpected error detail. Expected %v got: %v", k, v.errorDetail, errs[0].Detail) + } + } + + mustRunAs := validPSP() + mustRunAs.Spec.FSGroup.Rule = extensions.FSGroupStrategyMustRunAs + mustRunAs.Spec.SupplementalGroups.Rule = extensions.SupplementalGroupsStrategyMustRunAs + mustRunAs.Spec.RunAsUser.Rule = extensions.RunAsUserStrategyMustRunAs + mustRunAs.Spec.RunAsUser.Ranges = []extensions.IDRange{ + {Min: 1, Max: 1}, + } + mustRunAs.Spec.SELinux.Rule = extensions.SELinuxStrategyMustRunAs + + runAsNonRoot := validPSP() + runAsNonRoot.Spec.RunAsUser.Rule = extensions.RunAsUserStrategyMustRunAsNonRoot + + caseInsensitiveAddDrop := validPSP() + caseInsensitiveAddDrop.Spec.DefaultAddCapabilities = []api.Capability{"foo"} + caseInsensitiveAddDrop.Spec.RequiredDropCapabilities = []api.Capability{"FOO"} + + caseInsensitiveAllowedDrop := validPSP() + caseInsensitiveAllowedDrop.Spec.RequiredDropCapabilities = []api.Capability{"FOO"} + caseInsensitiveAllowedDrop.Spec.AllowedCapabilities = []api.Capability{"foo"} + + successCases := map[string]struct { + psp *extensions.PodSecurityPolicy + }{ + "must run as": { + psp: mustRunAs, + }, + "run as any": { + psp: validPSP(), + }, + "run as non-root (user only)": { + psp: runAsNonRoot, + }, + "comparison for add -> drop is case sensitive": { + psp: caseInsensitiveAddDrop, + }, + "comparison for allowed -> drop is case sensitive": { + psp: caseInsensitiveAllowedDrop, + }, + } + + for k, v := range successCases { + if errs := ValidatePodSecurityPolicy(v.psp); len(errs) != 0 { + t.Errorf("Expected success for %s, got %v", k, errs) + } + } +} + +func TestValidatePSPVolumes(t *testing.T) { + validPSP := func() *extensions.PodSecurityPolicy { + return &extensions.PodSecurityPolicy{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: extensions.PodSecurityPolicySpec{ + SELinux: extensions.SELinuxStrategyOptions{ + Rule: extensions.SELinuxStrategyRunAsAny, + }, + RunAsUser: extensions.RunAsUserStrategyOptions{ + Rule: extensions.RunAsUserStrategyRunAsAny, + }, + FSGroup: extensions.FSGroupStrategyOptions{ + Rule: extensions.FSGroupStrategyRunAsAny, + }, + SupplementalGroups: extensions.SupplementalGroupsStrategyOptions{ + Rule: extensions.SupplementalGroupsStrategyRunAsAny, + }, + }, + } + } + + volumes := psputil.GetAllFSTypesAsSet() + // add in the * value since that is a pseudo type that is not included by default + volumes.Insert(string(extensions.All)) + + for _, strVolume := range volumes.List() { + psp := validPSP() + psp.Spec.Volumes = []extensions.FSType{extensions.FSType(strVolume)} + errs := ValidatePodSecurityPolicy(psp) + if len(errs) != 0 { + t.Errorf("%s validation expected no errors but received %v", strVolume, errs) + } + } +} + +func newBool(val bool) *bool { + p := new(bool) + *p = val + return p +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/deep_copy_generated.go index e7562de5221e..d467e6f59033 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/deep_copy_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,16 +16,36 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package metrics -import api "k8s.io/kubernetes/pkg/api" +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) func init() { - err := api.Scheme.AddGeneratedDeepCopyFuncs() - if err != nil { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_metrics_RawNode, + DeepCopy_metrics_RawPod, + ); err != nil { // if one of the deep copy functions is malformed, detect it immediately. panic(err) } } + +func DeepCopy_metrics_RawNode(in RawNode, out *RawNode, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + return nil +} + +func DeepCopy_metrics_RawPod(in RawPod, out *RawPod, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/types.generated.go index ecf3e8b36f28..17eedfd97455 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/types.generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/types.generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/conversion_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/conversion_generated.go index 928d2ca5740d..387e752a6969 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/conversion_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/conversion_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,64 +16,51 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-conversions.sh +// This file was autogenerated by conversion-gen. Do not edit it manually! package v1alpha1 import ( - reflect "reflect" - api "k8s.io/kubernetes/pkg/api" metrics "k8s.io/kubernetes/pkg/apis/metrics" conversion "k8s.io/kubernetes/pkg/conversion" ) -func autoConvert_metrics_RawNode_To_v1alpha1_RawNode(in *metrics.RawNode, out *RawNode, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*metrics.RawNode))(in) - } - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_RawNode_To_metrics_RawNode, + Convert_metrics_RawNode_To_v1alpha1_RawNode, + Convert_v1alpha1_RawPod_To_metrics_RawPod, + Convert_metrics_RawPod_To_v1alpha1_RawPod, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) } - return nil } -func Convert_metrics_RawNode_To_v1alpha1_RawNode(in *metrics.RawNode, out *RawNode, s conversion.Scope) error { - return autoConvert_metrics_RawNode_To_v1alpha1_RawNode(in, out, s) -} - -func autoConvert_metrics_RawPod_To_v1alpha1_RawPod(in *metrics.RawPod, out *RawPod, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*metrics.RawPod))(in) - } +func autoConvert_v1alpha1_RawNode_To_metrics_RawNode(in *RawNode, out *metrics.RawNode, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } return nil } -func Convert_metrics_RawPod_To_v1alpha1_RawPod(in *metrics.RawPod, out *RawPod, s conversion.Scope) error { - return autoConvert_metrics_RawPod_To_v1alpha1_RawPod(in, out, s) +func Convert_v1alpha1_RawNode_To_metrics_RawNode(in *RawNode, out *metrics.RawNode, s conversion.Scope) error { + return autoConvert_v1alpha1_RawNode_To_metrics_RawNode(in, out, s) } -func autoConvert_v1alpha1_RawNode_To_metrics_RawNode(in *RawNode, out *metrics.RawNode, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*RawNode))(in) - } +func autoConvert_metrics_RawNode_To_v1alpha1_RawNode(in *metrics.RawNode, out *RawNode, s conversion.Scope) error { if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } return nil } -func Convert_v1alpha1_RawNode_To_metrics_RawNode(in *RawNode, out *metrics.RawNode, s conversion.Scope) error { - return autoConvert_v1alpha1_RawNode_To_metrics_RawNode(in, out, s) +func Convert_metrics_RawNode_To_v1alpha1_RawNode(in *metrics.RawNode, out *RawNode, s conversion.Scope) error { + return autoConvert_metrics_RawNode_To_v1alpha1_RawNode(in, out, s) } func autoConvert_v1alpha1_RawPod_To_metrics_RawPod(in *RawPod, out *metrics.RawPod, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*RawPod))(in) - } if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err } @@ -82,15 +71,13 @@ func Convert_v1alpha1_RawPod_To_metrics_RawPod(in *RawPod, out *metrics.RawPod, return autoConvert_v1alpha1_RawPod_To_metrics_RawPod(in, out, s) } -func init() { - err := api.Scheme.AddGeneratedConversionFuncs( - autoConvert_metrics_RawNode_To_v1alpha1_RawNode, - autoConvert_metrics_RawPod_To_v1alpha1_RawPod, - autoConvert_v1alpha1_RawNode_To_metrics_RawNode, - autoConvert_v1alpha1_RawPod_To_metrics_RawPod, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) +func autoConvert_metrics_RawPod_To_v1alpha1_RawPod(in *metrics.RawPod, out *RawPod, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err } + return nil +} + +func Convert_metrics_RawPod_To_v1alpha1_RawPod(in *metrics.RawPod, out *RawPod, s conversion.Scope) error { + return autoConvert_metrics_RawPod_To_v1alpha1_RawPod(in, out, s) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/deep_copy_generated.go index d235d1a480bc..c5de917442eb 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/deep_copy_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/deep_copy_generated.go @@ -1,5 +1,7 @@ +// +build !ignore_autogenerated + /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package v1alpha1 @@ -24,34 +26,26 @@ import ( conversion "k8s.io/kubernetes/pkg/conversion" ) -func deepCopy_unversioned_TypeMeta(in unversioned.TypeMeta, out *unversioned.TypeMeta, c *conversion.Cloner) error { - out.Kind = in.Kind - out.APIVersion = in.APIVersion - return nil +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1alpha1_RawNode, + DeepCopy_v1alpha1_RawPod, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } } -func deepCopy_v1alpha1_RawNode(in RawNode, out *RawNode, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1alpha1_RawNode(in RawNode, out *RawNode, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } return nil } -func deepCopy_v1alpha1_RawPod(in RawPod, out *RawPod, c *conversion.Cloner) error { - if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { +func DeepCopy_v1alpha1_RawPod(in RawPod, out *RawPod, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } return nil } - -func init() { - err := api.Scheme.AddGeneratedDeepCopyFuncs( - deepCopy_unversioned_TypeMeta, - deepCopy_v1alpha1_RawNode, - deepCopy_v1alpha1_RawPod, - ) - if err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/doc.go new file mode 100644 index 000000000000..65a03a2093dc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1alpha1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/register.go index c943d54687d0..4af5dbfeaeab 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/register.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/register.go @@ -20,6 +20,7 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" ) // GroupName is the group name use in this package @@ -40,6 +41,8 @@ func addKnownTypes(scheme *runtime.Scheme) { &RawPod{}, &v1.DeleteOptions{}, ) + // Add the watch version that applies + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) } func (obj *RawNode) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/types.generated.go index 2e6d2f3c187e..e4f3f9db46af 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/types.generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/metrics/v1alpha1/types.generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go new file mode 100644 index 000000000000..390e4b4a7b48 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go @@ -0,0 +1,101 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package policy + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" + intstr "k8s.io/kubernetes/pkg/util/intstr" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_policy_PodDisruptionBudget, + DeepCopy_policy_PodDisruptionBudgetList, + DeepCopy_policy_PodDisruptionBudgetSpec, + DeepCopy_policy_PodDisruptionBudgetStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_policy_PodDisruptionBudget(in PodDisruptionBudget, out *PodDisruptionBudget, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_policy_PodDisruptionBudgetSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_policy_PodDisruptionBudgetStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_policy_PodDisruptionBudgetList(in PodDisruptionBudgetList, out *PodDisruptionBudgetList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]PodDisruptionBudget, len(in)) + for i := range in { + if err := DeepCopy_policy_PodDisruptionBudget(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_policy_PodDisruptionBudgetSpec(in PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, c *conversion.Cloner) error { + if err := intstr.DeepCopy_intstr_IntOrString(in.MinAvailable, &out.MinAvailable, c); err != nil { + return err + } + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + return nil +} + +func DeepCopy_policy_PodDisruptionBudgetStatus(in PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, c *conversion.Cloner) error { + out.PodDisruptionAllowed = in.PodDisruptionAllowed + out.CurrentHealthy = in.CurrentHealthy + out.DesiredHealthy = in.DesiredHealthy + out.ExpectedPods = in.ExpectedPods + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/install/install.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/install/install.go new file mode 100644 index 000000000000..7882a0c53108 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/install/install.go @@ -0,0 +1,129 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/apis/policy/v1alpha1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/pkg/apis/policy" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", policy.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + // the list of kinds that are scoped at the root of the api hierarchy + // if a kind is not enumerated here, it is assumed to have a namespace scope + rootScoped := sets.NewString() + + ignoredKinds := sets.NewString() + + return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +// interfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1alpha1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(policy.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + policy.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1alpha1.SchemeGroupVersion: + v1alpha1.AddToScheme(api.Scheme) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/register.go new file mode 100644 index 000000000000..7aa010a0f397 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// GroupName is the group name use in this package +const GroupName = "policy" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) unversioned.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) unversioned.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func AddToScheme(scheme *runtime.Scheme) { + // Add the API to Scheme. + addKnownTypes(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + // TODO this gets cleaned up when the types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &PodDisruptionBudget{}, + &PodDisruptionBudgetList{}, + ) +} + +func (obj *PodDisruptionBudget) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *PodDisruptionBudgetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/types.generated.go new file mode 100644 index 000000000000..08be370f1584 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/types.generated.go @@ -0,0 +1,1440 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package policy + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg3_api "k8s.io/kubernetes/pkg/api" + pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + pkg4_types "k8s.io/kubernetes/pkg/types" + pkg1_intstr "k8s.io/kubernetes/pkg/util/intstr" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg3_api.ObjectMeta + var v1 pkg2_unversioned.LabelSelector + var v2 pkg4_types.UID + var v3 pkg1_intstr.IntOrString + var v4 time.Time + _, _, _, _, _ = v0, v1, v2, v3, v4 + } +} + +func (x *PodDisruptionBudgetSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = x.Selector != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.MinAvailable + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(yy4) + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minAvailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.MinAvailable + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(yy6) + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudgetSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "minAvailable": + if r.TryDecodeAsNil() { + x.MinAvailable = pkg1_intstr.IntOrString{} + } else { + yyv4 := &x.MinAvailable + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_unversioned.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinAvailable = pkg1_intstr.IntOrString{} + } else { + yyv9 := &x.MinAvailable + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_unversioned.LabelSelector) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.PodDisruptionAllowed)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("disruptionAllowed")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.PodDisruptionAllowed)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.CurrentHealthy)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentHealthy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.CurrentHealthy)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.DesiredHealthy)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredHealthy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.DesiredHealthy)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.ExpectedPods)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("expectedPods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.ExpectedPods)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudgetStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "disruptionAllowed": + if r.TryDecodeAsNil() { + x.PodDisruptionAllowed = false + } else { + x.PodDisruptionAllowed = bool(r.DecodeBool()) + } + case "currentHealthy": + if r.TryDecodeAsNil() { + x.CurrentHealthy = 0 + } else { + x.CurrentHealthy = int32(r.DecodeInt(32)) + } + case "desiredHealthy": + if r.TryDecodeAsNil() { + x.DesiredHealthy = 0 + } else { + x.DesiredHealthy = int32(r.DecodeInt(32)) + } + case "expectedPods": + if r.TryDecodeAsNil() { + x.ExpectedPods = 0 + } else { + x.ExpectedPods = int32(r.DecodeInt(32)) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodDisruptionAllowed = false + } else { + x.PodDisruptionAllowed = bool(r.DecodeBool()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentHealthy = 0 + } else { + x.CurrentHealthy = int32(r.DecodeInt(32)) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredHealthy = 0 + } else { + x.DesiredHealthy = int32(r.DecodeInt(32)) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExpectedPods = 0 + } else { + x.ExpectedPods = int32(r.DecodeInt(32)) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodDisruptionBudget) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy14 := &x.Status + yy14.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy16 := &x.Status + yy16.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudget) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_api.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PodDisruptionBudgetSpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PodDisruptionBudgetStatus{} + } else { + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_api.ObjectMeta{} + } else { + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PodDisruptionBudgetSpec{} + } else { + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PodDisruptionBudgetStatus{} + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudgetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudgetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicePodDisruptionBudget(v []PodDisruptionBudget, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodDisruptionBudget(v *[]PodDisruptionBudget, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodDisruptionBudget{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodDisruptionBudget, yyrl1) + } + } else { + yyv1 = make([]PodDisruptionBudget, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodDisruptionBudget{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodDisruptionBudget{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodDisruptionBudget{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodDisruptionBudget{}) // var yyz1 PodDisruptionBudget + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodDisruptionBudget{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodDisruptionBudget{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/types.go new file mode 100644 index 000000000000..2ecf41bcffd3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/types.go @@ -0,0 +1,70 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/util/intstr" +) + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +type PodDisruptionBudgetSpec struct { + // The minimum number of pods that must be available simultaneously. This + // can be either an integer or a string specifying a percentage, e.g. "28%". + MinAvailable intstr.IntOrString `json:"minAvailable,omitempty"` + + // Label query over pods whose evictions are managed by the disruption + // budget. + Selector *unversioned.LabelSelector `json:"selector,omitempty"` +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +type PodDisruptionBudgetStatus struct { + // Whether or not a disruption is currently allowed. + PodDisruptionAllowed bool `json:"disruptionAllowed"` + + // current number of healthy pods + CurrentHealthy int32 `json:"currentHealthy"` + + // minimum desired number of healthy pods + DesiredHealthy int32 `json:"desiredHealthy"` + + // total number of pods counted by this disruption budget + ExpectedPods int32 `json:"expectedPods"` +} + +// +genclient=true,noMethods=true + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +type PodDisruptionBudget struct { + unversioned.TypeMeta `json:",inline"` + api.ObjectMeta `json:"metadata,omitempty"` + + // Specification of the desired behavior of the PodDisruptionBudget. + Spec PodDisruptionBudgetSpec `json:"spec,omitempty"` + // Most recently observed status of the PodDisruptionBudget. + Status PodDisruptionBudgetStatus `json:"status,omitempty"` +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +type PodDisruptionBudgetList struct { + unversioned.TypeMeta `json:",inline"` + unversioned.ListMeta `json:"metadata,omitempty"` + Items []PodDisruptionBudget `json:"items"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go new file mode 100644 index 000000000000..23aaa9a3787d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go @@ -0,0 +1,183 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + policy "k8s.io/kubernetes/pkg/apis/policy" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget, + Convert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget, + Convert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList, + Convert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList, + Convert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec, + Convert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec, + Convert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus, + Convert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error { + return autoConvert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(in, out, s) +} + +func autoConvert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]policy.PodDisruptionBudget, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error { + return autoConvert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodDisruptionBudget, len(*in)) + for i := range *in { + if err := Convert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList(in, out, s) +} + +func autoConvert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error { + if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.MinAvailable, &out.MinAvailable, s); err != nil { + return err + } + out.Selector = in.Selector + return nil +} + +func Convert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error { + if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.MinAvailable, &out.MinAvailable, s); err != nil { + return err + } + out.Selector = in.Selector + return nil +} + +func Convert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(in, out, s) +} + +func autoConvert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error { + out.PodDisruptionAllowed = in.PodDisruptionAllowed + out.CurrentHealthy = in.CurrentHealthy + out.DesiredHealthy = in.DesiredHealthy + out.ExpectedPods = in.ExpectedPods + return nil +} + +func Convert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error { + out.PodDisruptionAllowed = in.PodDisruptionAllowed + out.CurrentHealthy = in.CurrentHealthy + out.DesiredHealthy = in.DesiredHealthy + out.ExpectedPods = in.ExpectedPods + return nil +} + +func Convert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(in, out, s) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go new file mode 100644 index 000000000000..74680aff846a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go @@ -0,0 +1,102 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" + intstr "k8s.io/kubernetes/pkg/util/intstr" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1alpha1_PodDisruptionBudget, + DeepCopy_v1alpha1_PodDisruptionBudgetList, + DeepCopy_v1alpha1_PodDisruptionBudgetSpec, + DeepCopy_v1alpha1_PodDisruptionBudgetStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1alpha1_PodDisruptionBudget(in PodDisruptionBudget, out *PodDisruptionBudget, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v1alpha1_PodDisruptionBudgetSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1alpha1_PodDisruptionBudgetStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1alpha1_PodDisruptionBudgetList(in PodDisruptionBudgetList, out *PodDisruptionBudgetList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]PodDisruptionBudget, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_PodDisruptionBudget(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1alpha1_PodDisruptionBudgetSpec(in PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, c *conversion.Cloner) error { + if err := intstr.DeepCopy_intstr_IntOrString(in.MinAvailable, &out.MinAvailable, c); err != nil { + return err + } + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + return nil +} + +func DeepCopy_v1alpha1_PodDisruptionBudgetStatus(in PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, c *conversion.Cloner) error { + out.PodDisruptionAllowed = in.PodDisruptionAllowed + out.CurrentHealthy = in.CurrentHealthy + out.DesiredHealthy = in.DesiredHealthy + out.ExpectedPods = in.ExpectedPods + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go new file mode 100644 index 000000000000..5cb716c29952 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package policy is for any kind of policy object. Suitable examples, even if +// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, +// NetworkPolicy, etc. +// +genconversion=true +package v1alpha1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go new file mode 100644 index 000000000000..867a6b0a6dda --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go @@ -0,0 +1,903 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto + + It has these top-level messages: + PodDisruptionBudget + PodDisruptionBudgetList + PodDisruptionBudgetSpec + PodDisruptionBudgetStatus +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } +func (m *PodDisruptionBudget) String() string { return proto.CompactTextString(m) } +func (*PodDisruptionBudget) ProtoMessage() {} + +func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } +func (m *PodDisruptionBudgetList) String() string { return proto.CompactTextString(m) } +func (*PodDisruptionBudgetList) ProtoMessage() {} + +func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } +func (m *PodDisruptionBudgetSpec) String() string { return proto.CompactTextString(m) } +func (*PodDisruptionBudgetSpec) ProtoMessage() {} + +func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } +func (m *PodDisruptionBudgetStatus) String() string { return proto.CompactTextString(m) } +func (*PodDisruptionBudgetStatus) ProtoMessage() {} + +func init() { + proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudget") + proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudgetList") + proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudgetSpec") + proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudgetStatus") +} +func (m *PodDisruptionBudget) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudget) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *PodDisruptionBudgetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudgetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodDisruptionBudgetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudgetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinAvailable.Size())) + n5, err := m.MinAvailable.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n6, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *PodDisruptionBudgetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudgetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.PodDisruptionAllowed { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentHealthy)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredHealthy)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ExpectedPods)) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *PodDisruptionBudget) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodDisruptionBudgetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDisruptionBudgetSpec) Size() (n int) { + var l int + _ = l + l = m.MinAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodDisruptionBudgetStatus) Size() (n int) { + var l int + _ = l + n += 2 + n += 1 + sovGenerated(uint64(m.CurrentHealthy)) + n += 1 + sovGenerated(uint64(m.DesiredHealthy)) + n += 1 + sovGenerated(uint64(m.ExpectedPods)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PodDisruptionBudget) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodDisruptionBudget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MinAvailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionAllowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PodDisruptionAllowed = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) + } + m.CurrentHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentHealthy |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) + } + m.DesiredHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredHealthy |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) + } + m.ExpectedPods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ExpectedPods |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto new file mode 100644 index 000000000000..866d0ae57882 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto @@ -0,0 +1,77 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.policy.v1alpha1; + +import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/runtime/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +message PodDisruptionBudget { + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the PodDisruptionBudget. + optional PodDisruptionBudgetSpec spec = 2; + + // Most recently observed status of the PodDisruptionBudget. + optional PodDisruptionBudgetStatus status = 3; +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +message PodDisruptionBudgetList { + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + repeated PodDisruptionBudget items = 2; +} + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +message PodDisruptionBudgetSpec { + // The minimum number of pods that must be available simultaneously. This + // can be either an integer or a string specifying a percentage, e.g. "28%". + optional k8s.io.kubernetes.pkg.util.intstr.IntOrString minAvailable = 1; + + // Label query over pods whose evictions are managed by the disruption + // budget. + optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2; +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +message PodDisruptionBudgetStatus { + // Whether or not a disruption is currently allowed. + optional bool disruptionAllowed = 1; + + // current number of healthy pods + optional int32 currentHealthy = 2; + + // minimum desired number of healthy pods + optional int32 desiredHealthy = 3; + + // total number of pods counted by this disruption budget + optional int32 expectedPods = 4; +} + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go new file mode 100644 index 000000000000..ac41af6dbf8f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" +) + +// GroupName is the group name use in this package +const GroupName = "policy" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) + /* + addDefaultingFuncs(scheme) + addConversionFuncs(scheme) + */ +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodDisruptionBudget{}, + &PodDisruptionBudgetList{}, + ) + // Add the watch version that applies + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) +} + +func (obj *PodDisruptionBudget) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *PodDisruptionBudgetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.generated.go new file mode 100644 index 000000000000..7ed4308bcc64 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.generated.go @@ -0,0 +1,1440 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1alpha1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + pkg3_v1 "k8s.io/kubernetes/pkg/api/v1" + pkg4_types "k8s.io/kubernetes/pkg/types" + pkg1_intstr "k8s.io/kubernetes/pkg/util/intstr" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg2_unversioned.LabelSelector + var v1 pkg3_v1.ObjectMeta + var v2 pkg4_types.UID + var v3 pkg1_intstr.IntOrString + var v4 time.Time + _, _, _, _, _ = v0, v1, v2, v3, v4 + } +} + +func (x *PodDisruptionBudgetSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = x.Selector != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.MinAvailable + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(yy4) + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minAvailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.MinAvailable + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(yy6) + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudgetSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "minAvailable": + if r.TryDecodeAsNil() { + x.MinAvailable = pkg1_intstr.IntOrString{} + } else { + yyv4 := &x.MinAvailable + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_unversioned.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinAvailable = pkg1_intstr.IntOrString{} + } else { + yyv9 := &x.MinAvailable + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_unversioned.LabelSelector) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.PodDisruptionAllowed)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("disruptionAllowed")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.PodDisruptionAllowed)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.CurrentHealthy)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentHealthy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.CurrentHealthy)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.DesiredHealthy)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredHealthy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.DesiredHealthy)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.ExpectedPods)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("expectedPods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.ExpectedPods)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudgetStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "disruptionAllowed": + if r.TryDecodeAsNil() { + x.PodDisruptionAllowed = false + } else { + x.PodDisruptionAllowed = bool(r.DecodeBool()) + } + case "currentHealthy": + if r.TryDecodeAsNil() { + x.CurrentHealthy = 0 + } else { + x.CurrentHealthy = int32(r.DecodeInt(32)) + } + case "desiredHealthy": + if r.TryDecodeAsNil() { + x.DesiredHealthy = 0 + } else { + x.DesiredHealthy = int32(r.DecodeInt(32)) + } + case "expectedPods": + if r.TryDecodeAsNil() { + x.ExpectedPods = 0 + } else { + x.ExpectedPods = int32(r.DecodeInt(32)) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodDisruptionAllowed = false + } else { + x.PodDisruptionAllowed = bool(r.DecodeBool()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentHealthy = 0 + } else { + x.CurrentHealthy = int32(r.DecodeInt(32)) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredHealthy = 0 + } else { + x.DesiredHealthy = int32(r.DecodeInt(32)) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExpectedPods = 0 + } else { + x.ExpectedPods = int32(r.DecodeInt(32)) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodDisruptionBudget) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + yyq2[2] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy14 := &x.Status + yy14.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy16 := &x.Status + yy16.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudget) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PodDisruptionBudgetSpec{} + } else { + yyv5 := &x.Spec + yyv5.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PodDisruptionBudgetStatus{} + } else { + yyv6 := &x.Status + yyv6.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PodDisruptionBudgetSpec{} + } else { + yyv11 := &x.Spec + yyv11.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PodDisruptionBudgetStatus{} + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudgetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudgetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicePodDisruptionBudget(v []PodDisruptionBudget, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodDisruptionBudget(v *[]PodDisruptionBudget, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodDisruptionBudget{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodDisruptionBudget, yyrl1) + } + } else { + yyv1 = make([]PodDisruptionBudget, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodDisruptionBudget{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodDisruptionBudget{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodDisruptionBudget{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodDisruptionBudget{}) // var yyz1 PodDisruptionBudget + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodDisruptionBudget{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodDisruptionBudget{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go new file mode 100644 index 000000000000..1f3265ae272e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go @@ -0,0 +1,71 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/util/intstr" +) + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +type PodDisruptionBudgetSpec struct { + // The minimum number of pods that must be available simultaneously. This + // can be either an integer or a string specifying a percentage, e.g. "28%". + MinAvailable intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` + + // Label query over pods whose evictions are managed by the disruption + // budget. + Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +type PodDisruptionBudgetStatus struct { + // Whether or not a disruption is currently allowed. + PodDisruptionAllowed bool `json:"disruptionAllowed" protobuf:"varint,1,opt,name=disruptionAllowed"` + + // current number of healthy pods + CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,2,opt,name=currentHealthy"` + + // minimum desired number of healthy pods + DesiredHealthy int32 `json:"desiredHealthy" protobuf:"varint,3,opt,name=desiredHealthy"` + + // total number of pods counted by this disruption budget + ExpectedPods int32 `json:"expectedPods" protobuf:"varint,4,opt,name=expectedPods"` +} + +// +genclient=true,noMethods=true + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +type PodDisruptionBudget struct { + unversioned.TypeMeta `json:",inline"` + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the PodDisruptionBudget. + Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Most recently observed status of the PodDisruptionBudget. + Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +type PodDisruptionBudgetList struct { + unversioned.TypeMeta `json:",inline"` + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000000..8ca1782f4f98 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,70 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_PodDisruptionBudget = map[string]string{ + "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "spec": "Specification of the desired behavior of the PodDisruptionBudget.", + "status": "Most recently observed status of the PodDisruptionBudget.", +} + +func (PodDisruptionBudget) SwaggerDoc() map[string]string { + return map_PodDisruptionBudget +} + +var map_PodDisruptionBudgetList = map[string]string{ + "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", +} + +func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetList +} + +var map_PodDisruptionBudgetSpec = map[string]string{ + "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", + "minAvailable": "The minimum number of pods that must be available simultaneously. This can be either an integer or a string specifying a percentage, e.g. \"28%\".", + "selector": "Label query over pods whose evictions are managed by the disruption budget.", +} + +func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetSpec +} + +var map_PodDisruptionBudgetStatus = map[string]string{ + "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", + "disruptionAllowed": "Whether or not a disruption is currently allowed.", + "currentHealthy": "current number of healthy pods", + "desiredHealthy": "minimum desired number of healthy pods", + "expectedPods": "total number of pods counted by this disruption budget", +} + +func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go new file mode 100644 index 000000000000..6d40450ba880 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "reflect" + + unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation" + extensionsvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func ValidatePodDisruptionBudget(pdb *policy.PodDisruptionBudget) field.ErrorList { + allErrs := ValidatePodDisruptionBudgetSpec(pdb.Spec, field.NewPath("spec")) + return allErrs +} + +func ValidatePodDisruptionBudgetUpdate(pdb, oldPdb *policy.PodDisruptionBudget) field.ErrorList { + allErrs := field.ErrorList{} + + restoreGeneration := pdb.Generation + pdb.Generation = oldPdb.Generation + + if !reflect.DeepEqual(pdb, oldPdb) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to poddisruptionbudget spec are forbidden.")) + } + + pdb.Generation = restoreGeneration + return allErrs +} + +func ValidatePodDisruptionBudgetSpec(spec policy.PodDisruptionBudgetSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, extensionsvalidation.ValidatePositiveIntOrPercent(spec.MinAvailable, fldPath.Child("minAvailable"))...) + allErrs = append(allErrs, extensionsvalidation.IsNotMoreThan100Percent(spec.MinAvailable, fldPath.Child("minAvailable"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + + return allErrs +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/validation/validation_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/validation/validation_test.go new file mode 100644 index 000000000000..9b07371564aa --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/policy/validation/validation_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "testing" + + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func TestValidatePodDisruptionBudgetSpec(t *testing.T) { + successCases := []intstr.IntOrString{ + intstr.FromString("0%"), + intstr.FromString("1%"), + intstr.FromString("100%"), + intstr.FromInt(0), + intstr.FromInt(1), + intstr.FromInt(100), + } + for _, c := range successCases { + spec := policy.PodDisruptionBudgetSpec{ + MinAvailable: c, + } + errs := ValidatePodDisruptionBudgetSpec(spec, field.NewPath("foo")) + if len(errs) != 0 { + t.Errorf("unexpected failure %v for %v", errs, spec) + } + } + + failureCases := []intstr.IntOrString{ + intstr.FromString("1.1%"), + intstr.FromString("nope"), + intstr.FromString("-1%"), + intstr.FromString("101%"), + intstr.FromInt(-1), + } + for _, c := range failureCases { + spec := policy.PodDisruptionBudgetSpec{ + MinAvailable: c, + } + errs := ValidatePodDisruptionBudgetSpec(spec, field.NewPath("foo")) + if len(errs) == 0 { + t.Errorf("unexpected success for %v", spec) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go new file mode 100644 index 000000000000..5e9339a989bc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go @@ -0,0 +1,274 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package rbac + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" + runtime "k8s.io/kubernetes/pkg/runtime" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_rbac_ClusterRole, + DeepCopy_rbac_ClusterRoleBinding, + DeepCopy_rbac_ClusterRoleBindingList, + DeepCopy_rbac_ClusterRoleList, + DeepCopy_rbac_PolicyRule, + DeepCopy_rbac_Role, + DeepCopy_rbac_RoleBinding, + DeepCopy_rbac_RoleBindingList, + DeepCopy_rbac_RoleList, + DeepCopy_rbac_Subject, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_rbac_ClusterRole(in ClusterRole, out *ClusterRole, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Rules != nil { + in, out := in.Rules, &out.Rules + *out = make([]PolicyRule, len(in)) + for i := range in { + if err := DeepCopy_rbac_PolicyRule(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func DeepCopy_rbac_ClusterRoleBinding(in ClusterRoleBinding, out *ClusterRoleBinding, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Subjects != nil { + in, out := in.Subjects, &out.Subjects + *out = make([]Subject, len(in)) + for i := range in { + if err := DeepCopy_rbac_Subject(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := api.DeepCopy_api_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil { + return err + } + return nil +} + +func DeepCopy_rbac_ClusterRoleBindingList(in ClusterRoleBindingList, out *ClusterRoleBindingList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(in)) + for i := range in { + if err := DeepCopy_rbac_ClusterRoleBinding(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_rbac_ClusterRoleList(in ClusterRoleList, out *ClusterRoleList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ClusterRole, len(in)) + for i := range in { + if err := DeepCopy_rbac_ClusterRole(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_rbac_PolicyRule(in PolicyRule, out *PolicyRule, c *conversion.Cloner) error { + if in.Verbs != nil { + in, out := in.Verbs, &out.Verbs + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Verbs = nil + } + if in.AttributeRestrictions == nil { + out.AttributeRestrictions = nil + } else if newVal, err := c.DeepCopy(in.AttributeRestrictions); err != nil { + return err + } else { + out.AttributeRestrictions = newVal.(runtime.Object) + } + if in.APIGroups != nil { + in, out := in.APIGroups, &out.APIGroups + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.APIGroups = nil + } + if in.Resources != nil { + in, out := in.Resources, &out.Resources + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Resources = nil + } + if in.ResourceNames != nil { + in, out := in.ResourceNames, &out.ResourceNames + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.ResourceNames = nil + } + if in.NonResourceURLs != nil { + in, out := in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.NonResourceURLs = nil + } + return nil +} + +func DeepCopy_rbac_Role(in Role, out *Role, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Rules != nil { + in, out := in.Rules, &out.Rules + *out = make([]PolicyRule, len(in)) + for i := range in { + if err := DeepCopy_rbac_PolicyRule(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func DeepCopy_rbac_RoleBinding(in RoleBinding, out *RoleBinding, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Subjects != nil { + in, out := in.Subjects, &out.Subjects + *out = make([]Subject, len(in)) + for i := range in { + if err := DeepCopy_rbac_Subject(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := api.DeepCopy_api_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil { + return err + } + return nil +} + +func DeepCopy_rbac_RoleBindingList(in RoleBindingList, out *RoleBindingList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]RoleBinding, len(in)) + for i := range in { + if err := DeepCopy_rbac_RoleBinding(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_rbac_RoleList(in RoleList, out *RoleList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Role, len(in)) + for i := range in { + if err := DeepCopy_rbac_Role(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_rbac_Subject(in Subject, out *Subject, c *conversion.Cloner) error { + out.Kind = in.Kind + out.APIVersion = in.APIVersion + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/doc.go new file mode 100644 index 000000000000..15f91da2c380 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=rbac.authorization.k8s.io +package rbac diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/install/install.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/install/install.go new file mode 100644 index 000000000000..8cac247f4cad --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/install/install.go @@ -0,0 +1,130 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the batch API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/pkg/apis/rbac" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", rbac.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + rootScoped := sets.NewString( + "ClusterRole", + "ClusterRoleBinding", + ) + + ignoredKinds := sets.NewString() + + return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +// interfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1alpha1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(rbac.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + rbac.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1alpha1.SchemeGroupVersion: + v1alpha1.AddToScheme(api.Scheme) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/install/install_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/install/install_test.go new file mode 100644 index 000000000000..4f80a090dcf9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/install/install_test.go @@ -0,0 +1,114 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package install + +import ( + "encoding/json" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestResourceVersioner(t *testing.T) { + roleBinding := rbac.RoleBinding{ObjectMeta: api.ObjectMeta{ResourceVersion: "10"}} + version, err := accessor.ResourceVersion(&roleBinding) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if version != "10" { + t.Errorf("unexpected version %v", version) + } + + roleBindingList := rbac.RoleBindingList{ListMeta: unversioned.ListMeta{ResourceVersion: "10"}} + version, err = accessor.ResourceVersion(&roleBindingList) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if version != "10" { + t.Errorf("unexpected version %v", version) + } +} + +func TestCodec(t *testing.T) { + roleBinding := rbac.RoleBinding{} + // We do want to use package registered rather than testapi here, because we + // want to test if the package install and package registered work as expected. + data, err := runtime.Encode(api.Codecs.LegacyCodec(registered.GroupOrDie(rbac.GroupName).GroupVersion), &roleBinding) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + other := rbac.RoleBinding{} + if err := json.Unmarshal(data, &other); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if other.APIVersion != registered.GroupOrDie(rbac.GroupName).GroupVersion.String() || other.Kind != "RoleBinding" { + t.Errorf("unexpected unmarshalled object %#v", other) + } +} + +func TestInterfacesFor(t *testing.T) { + if _, err := registered.GroupOrDie(rbac.GroupName).InterfacesFor(rbac.SchemeGroupVersion); err == nil { + t.Fatalf("unexpected non-error: %v", err) + } + for i, version := range registered.GroupOrDie(rbac.GroupName).GroupVersions { + if vi, err := registered.GroupOrDie(rbac.GroupName).InterfacesFor(version); err != nil || vi == nil { + t.Fatalf("%d: unexpected result: %v", i, err) + } + } +} + +func TestRESTMapper(t *testing.T) { + gv := v1alpha1.SchemeGroupVersion + roleBindingGVK := gv.WithKind("RoleBinding") + + if gvk, err := registered.GroupOrDie(rbac.GroupName).RESTMapper.KindFor(gv.WithResource("rolebindings")); err != nil || gvk != roleBindingGVK { + t.Errorf("unexpected version mapping: %v %v", gvk, err) + } + + for _, version := range registered.GroupOrDie(rbac.GroupName).GroupVersions { + mapping, err := registered.GroupOrDie(rbac.GroupName).RESTMapper.RESTMapping(roleBindingGVK.GroupKind(), version.Version) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if mapping.Resource != "rolebindings" { + t.Errorf("incorrect resource name: %#v", mapping) + } + if mapping.GroupVersionKind.GroupVersion() != version { + t.Errorf("incorrect groupVersion: %v", mapping) + } + + interfaces, _ := registered.GroupOrDie(rbac.GroupName).InterfacesFor(version) + if mapping.ObjectConvertor != interfaces.ObjectConvertor { + t.Errorf("unexpected: %#v, expected: %#v", mapping, interfaces) + } + + roleBinding := &rbac.RoleBinding{ObjectMeta: api.ObjectMeta{Name: "foo"}} + name, err := mapping.MetadataAccessor.Name(roleBinding) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if name != "foo" { + t.Errorf("unable to retrieve object meta with: %v", mapping.MetadataAccessor) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/register.go new file mode 100644 index 000000000000..3ecc4905a723 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/register.go @@ -0,0 +1,69 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch/versioned" +) + +const GroupName = "rbac.authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) unversioned.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) unversioned.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func AddToScheme(scheme *runtime.Scheme) { + // Add the API to Scheme. + addKnownTypes(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + ) + versioned.AddToGroupVersion(scheme, SchemeGroupVersion) +} + +func (obj *ClusterRole) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ClusterRoleBinding) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ClusterRoleBindingList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ClusterRoleList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } + +func (obj *Role) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *RoleBinding) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *RoleBindingList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *RoleList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/types.go new file mode 100644 index 000000000000..a35eb7db9572 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/types.go @@ -0,0 +1,176 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + UserAll = "*" +) + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string + // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. + AttributeRestrictions runtime.Object + // APIGroups is the name of the APIGroup that contains the resources. + // If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. + APIGroups []string + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + Resources []string + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + ResourceNames []string + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // If an action is not a resource API request, then the URL is split on '/' and is checked against the NonResourceURLs to look for a match. + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + NonResourceURLs []string +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string + // APIVersion holds the API group and version of the referenced object. For non-object references such as "Group" and "User" this is + // expected to be API version of this API group. For example "rbac/v1alpha1". + APIVersion string + // Name of the object being referenced. + Name string + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + Namespace string +} + +// +genclient=true + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +type Role struct { + unversioned.TypeMeta + // Standard object's metadata. + api.ObjectMeta + + // Rules holds all the PolicyRules for this Role + Rules []PolicyRule +} + +// +genclient=true + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +type RoleBinding struct { + unversioned.TypeMeta + api.ObjectMeta + + // Subjects holds references to the objects the role applies to. + Subjects []Subject + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef api.ObjectReference +} + +// RoleBindingList is a collection of RoleBindings +type RoleBindingList struct { + unversioned.TypeMeta + // Standard object's metadata. + unversioned.ListMeta + + // Items is a list of roleBindings + Items []RoleBinding +} + +// RoleList is a collection of Roles +type RoleList struct { + unversioned.TypeMeta + // Standard object's metadata. + unversioned.ListMeta + + // Items is a list of roles + Items []Role +} + +// +genclient=true,nonNamespaced=true + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +type ClusterRole struct { + unversioned.TypeMeta + // Standard object's metadata. + api.ObjectMeta + + // Rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule +} + +// +genclient=true,nonNamespaced=true + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +type ClusterRoleBinding struct { + unversioned.TypeMeta + // Standard object's metadata. + api.ObjectMeta + + // Subjects holds references to the objects the role applies to. + Subjects []Subject + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef api.ObjectReference +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +type ClusterRoleBindingList struct { + unversioned.TypeMeta + // Standard object's metadata. + unversioned.ListMeta + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding +} + +// ClusterRoleList is a collection of ClusterRoles +type ClusterRoleList struct { + unversioned.TypeMeta + // Standard object's metadata. + unversioned.ListMeta + + // Items is a list of ClusterRoles + Items []ClusterRole +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go new file mode 100644 index 000000000000..f176aa090663 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go @@ -0,0 +1,536 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + conversion "k8s.io/kubernetes/pkg/conversion" + runtime "k8s.io/kubernetes/pkg/runtime" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole, + Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole, + Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, + Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding, + Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList, + Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList, + Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList, + Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList, + Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule, + Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule, + Convert_v1alpha1_Role_To_rbac_Role, + Convert_rbac_Role_To_v1alpha1_Role, + Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding, + Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding, + Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList, + Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList, + Convert_v1alpha1_RoleList_To_rbac_RoleList, + Convert_rbac_RoleList_To_v1alpha1_RoleList, + Convert_v1alpha1_Subject_To_rbac_Subject, + Convert_rbac_Subject_To_v1alpha1_Subject, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]rbac.PolicyRule, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in, out, s) +} + +func autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + return autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]rbac.Subject, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil { + return err + } + return nil +} + +func Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]rbac.ClusterRoleBinding, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + if err := Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]rbac.ClusterRole, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + if err := Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in, out, s) +} + +func autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + out.Verbs = in.Verbs + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.AttributeRestrictions, &out.AttributeRestrictions, s); err != nil { + return err + } + out.APIGroups = in.APIGroups + out.Resources = in.Resources + out.ResourceNames = in.ResourceNames + out.NonResourceURLs = in.NonResourceURLs + return nil +} + +func Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + return autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in, out, s) +} + +func autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + out.Verbs = in.Verbs + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.AttributeRestrictions, &out.AttributeRestrictions, s); err != nil { + return err + } + out.APIGroups = in.APIGroups + out.Resources = in.Resources + out.ResourceNames = in.ResourceNames + out.NonResourceURLs = in.NonResourceURLs + return nil +} + +func Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + return autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in, out, s) +} + +func autoConvert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]rbac.PolicyRule, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func Convert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + return autoConvert_v1alpha1_Role_To_rbac_Role(in, out, s) +} + +func autoConvert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func Convert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + return autoConvert_rbac_Role_To_v1alpha1_Role(in, out, s) +} + +func autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]rbac.Subject, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in, out, s) +} + +func autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil { + return err + } + return nil +} + +func Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + return autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in, out, s) +} + +func autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]rbac.RoleBinding, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in, out, s) +} + +func autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + if err := Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in, out, s) +} + +func autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]rbac.Role, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Role_To_rbac_Role(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in, out, s) +} + +func autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + if err := Convert_rbac_Role_To_v1alpha1_Role(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + return autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in, out, s) +} + +func autoConvert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { + out.Kind = in.Kind + out.APIVersion = in.APIVersion + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +func Convert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { + return autoConvert_v1alpha1_Subject_To_rbac_Subject(in, out, s) +} + +func autoConvert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { + out.Kind = in.Kind + out.APIVersion = in.APIVersion + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +func Convert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { + return autoConvert_rbac_Subject_To_v1alpha1_Subject(in, out, s) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go new file mode 100644 index 000000000000..f898a434baf5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go @@ -0,0 +1,271 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" + runtime "k8s.io/kubernetes/pkg/runtime" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1alpha1_ClusterRole, + DeepCopy_v1alpha1_ClusterRoleBinding, + DeepCopy_v1alpha1_ClusterRoleBindingList, + DeepCopy_v1alpha1_ClusterRoleList, + DeepCopy_v1alpha1_PolicyRule, + DeepCopy_v1alpha1_Role, + DeepCopy_v1alpha1_RoleBinding, + DeepCopy_v1alpha1_RoleBindingList, + DeepCopy_v1alpha1_RoleList, + DeepCopy_v1alpha1_Subject, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1alpha1_ClusterRole(in ClusterRole, out *ClusterRole, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Rules != nil { + in, out := in.Rules, &out.Rules + *out = make([]PolicyRule, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_PolicyRule(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func DeepCopy_v1alpha1_ClusterRoleBinding(in ClusterRoleBinding, out *ClusterRoleBinding, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Subjects != nil { + in, out := in.Subjects, &out.Subjects + *out = make([]Subject, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_Subject(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := v1.DeepCopy_v1_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1alpha1_ClusterRoleBindingList(in ClusterRoleBindingList, out *ClusterRoleBindingList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_ClusterRoleBinding(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1alpha1_ClusterRoleList(in ClusterRoleList, out *ClusterRoleList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ClusterRole, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_ClusterRole(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1alpha1_PolicyRule(in PolicyRule, out *PolicyRule, c *conversion.Cloner) error { + if in.Verbs != nil { + in, out := in.Verbs, &out.Verbs + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Verbs = nil + } + if err := runtime.DeepCopy_runtime_RawExtension(in.AttributeRestrictions, &out.AttributeRestrictions, c); err != nil { + return err + } + if in.APIGroups != nil { + in, out := in.APIGroups, &out.APIGroups + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.APIGroups = nil + } + if in.Resources != nil { + in, out := in.Resources, &out.Resources + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Resources = nil + } + if in.ResourceNames != nil { + in, out := in.ResourceNames, &out.ResourceNames + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.ResourceNames = nil + } + if in.NonResourceURLs != nil { + in, out := in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.NonResourceURLs = nil + } + return nil +} + +func DeepCopy_v1alpha1_Role(in Role, out *Role, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Rules != nil { + in, out := in.Rules, &out.Rules + *out = make([]PolicyRule, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_PolicyRule(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func DeepCopy_v1alpha1_RoleBinding(in RoleBinding, out *RoleBinding, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Subjects != nil { + in, out := in.Subjects, &out.Subjects + *out = make([]Subject, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_Subject(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := v1.DeepCopy_v1_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1alpha1_RoleBindingList(in RoleBindingList, out *RoleBindingList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]RoleBinding, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_RoleBinding(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1alpha1_RoleList(in RoleList, out *RoleList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Role, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_Role(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1alpha1_Subject(in Subject, out *Subject, c *conversion.Cloner) error { + out.Kind = in.Kind + out.APIVersion = in.APIVersion + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go new file mode 100644 index 000000000000..6873ebb101ff --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=rbac.authorization.k8s.io +// +genconversion=true +package v1alpha1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go new file mode 100644 index 000000000000..54b03ed15d1d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go @@ -0,0 +1,2209 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto + + It has these top-level messages: + ClusterRole + ClusterRoleBinding + ClusterRoleBindingList + ClusterRoleList + PolicyRule + Role + RoleBinding + RoleBindingList + RoleList + Subject +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (m *ClusterRole) String() string { return proto.CompactTextString(m) } +func (*ClusterRole) ProtoMessage() {} + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (m *ClusterRoleBinding) String() string { return proto.CompactTextString(m) } +func (*ClusterRoleBinding) ProtoMessage() {} + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (m *ClusterRoleBindingList) String() string { return proto.CompactTextString(m) } +func (*ClusterRoleBindingList) ProtoMessage() {} + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (m *ClusterRoleList) String() string { return proto.CompactTextString(m) } +func (*ClusterRoleList) ProtoMessage() {} + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (m *PolicyRule) String() string { return proto.CompactTextString(m) } +func (*PolicyRule) ProtoMessage() {} + +func (m *Role) Reset() { *m = Role{} } +func (m *Role) String() string { return proto.CompactTextString(m) } +func (*Role) ProtoMessage() {} + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (m *RoleBinding) String() string { return proto.CompactTextString(m) } +func (*RoleBinding) ProtoMessage() {} + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (m *RoleBindingList) String() string { return proto.CompactTextString(m) } +func (*RoleBindingList) ProtoMessage() {} + +func (m *RoleList) Reset() { *m = RoleList{} } +func (m *RoleList) String() string { return proto.CompactTextString(m) } +func (*RoleList) ProtoMessage() {} + +func (m *Subject) Reset() { *m = Subject{} } +func (m *Subject) String() string { return proto.CompactTextString(m) } +func (*Subject) ProtoMessage() {} + +func init() { + proto.RegisterType((*ClusterRole)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.PolicyRule") + proto.RegisterType((*Role)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleList") + proto.RegisterType((*Subject)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.Subject") +} +func (m *ClusterRole) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRole) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n2, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n3, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PolicyRule) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PolicyRule) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AttributeRestrictions.Size())) + n6, err := m.AttributeRestrictions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + data[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *Role) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Role) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n8, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n9, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + return i, nil +} + +func (m *RoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n10, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Subject) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Subject) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *ClusterRole) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.AttributeRestrictions.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Role) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Subject) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ClusterRole) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeRestrictions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AttributeRestrictions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto new file mode 100644 index 000000000000..71a2f612f140 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto @@ -0,0 +1,160 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.rbac.v1alpha1; + +import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/runtime/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +message ClusterRole { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this ClusterRole + repeated PolicyRule rules = 2; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +message ClusterRoleBinding { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subject = 2; + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference roleRef = 3; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +message ClusterRoleBindingList { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles +message ClusterRoleList { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. + optional k8s.io.kubernetes.pkg.runtime.RawExtension attributeRestrictions = 2; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + repeated string apiGroups = 3; + + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + repeated string resources = 4; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + repeated string resourceNames = 5; + + // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + repeated string nonResourceURLs = 6; +} + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +message Role { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this Role + repeated PolicyRule rules = 2; +} + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +message RoleBinding { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subject = 2; + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference roleRef = 3; +} + +// RoleBindingList is a collection of RoleBindings +message RoleBindingList { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleList is a collection of Roles +message RoleList { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of Roles + repeated Role items = 2; +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +message Subject { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + optional string kind = 1; + + // APIVersion holds the API group and version of the referenced object. For non-object references such as "Group" and "User" this is + // expected to be API version of this API group. For example "rbac/v1alpha1". + optional string apiVersion = 2; + + // Name of the object being referenced. + optional string name = 3; + + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + optional string namespace = 4; +} + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go new file mode 100644 index 000000000000..41cbe420e242 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go @@ -0,0 +1,57 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch/versioned" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: rbac.GroupName, Version: "v1alpha1"} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + ) + versioned.AddToGroupVersion(scheme, SchemeGroupVersion) +} + +func (obj *ClusterRole) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ClusterRoleBinding) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ClusterRoleBindingList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ClusterRoleList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } + +func (obj *Role) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *RoleBinding) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *RoleBindingList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *RoleList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go new file mode 100644 index 000000000000..f2e010ad1bca --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go @@ -0,0 +1,4320 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1alpha1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + pkg3_v1 "k8s.io/kubernetes/pkg/api/v1" + pkg1_runtime "k8s.io/kubernetes/pkg/runtime" + pkg4_types "k8s.io/kubernetes/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg2_unversioned.TypeMeta + var v1 pkg3_v1.ObjectMeta + var v2 pkg1_runtime.RawExtension + var v3 pkg4_types.UID + var v4 time.Time + _, _, _, _, _ = v0, v1, v2, v3, v4 + } +} + +func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = true + yyq2[4] = len(x.ResourceNames) != 0 + yyq2[5] = len(x.NonResourceURLs) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Verbs == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Verbs, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verbs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Verbs == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Verbs, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy7 := &x.AttributeRestrictions + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("attributeRestrictions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.AttributeRestrictions + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.APIGroups == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + z.F.EncSliceStringV(x.APIGroups, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.APIGroups == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.APIGroups, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Resources == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + z.F.EncSliceStringV(x.Resources, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resources == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.Resources, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ResourceNames == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + z.F.EncSliceStringV(x.ResourceNames, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceNames")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceNames == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + z.F.EncSliceStringV(x.ResourceNames, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.NonResourceURLs == nil { + r.EncodeNil() + } else { + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + z.F.EncSliceStringV(x.NonResourceURLs, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceURLs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceURLs == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + z.F.EncSliceStringV(x.NonResourceURLs, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PolicyRule) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PolicyRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "verbs": + if r.TryDecodeAsNil() { + x.Verbs = nil + } else { + yyv4 := &x.Verbs + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "attributeRestrictions": + if r.TryDecodeAsNil() { + x.AttributeRestrictions = pkg1_runtime.RawExtension{} + } else { + yyv6 := &x.AttributeRestrictions + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "apiGroups": + if r.TryDecodeAsNil() { + x.APIGroups = nil + } else { + yyv8 := &x.APIGroups + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "resources": + if r.TryDecodeAsNil() { + x.Resources = nil + } else { + yyv10 := &x.Resources + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecSliceStringX(yyv10, false, d) + } + } + case "resourceNames": + if r.TryDecodeAsNil() { + x.ResourceNames = nil + } else { + yyv12 := &x.ResourceNames + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecSliceStringX(yyv12, false, d) + } + } + case "nonResourceURLs": + if r.TryDecodeAsNil() { + x.NonResourceURLs = nil + } else { + yyv14 := &x.NonResourceURLs + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + z.F.DecSliceStringX(yyv14, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verbs = nil + } else { + yyv17 := &x.Verbs + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AttributeRestrictions = pkg1_runtime.RawExtension{} + } else { + yyv19 := &x.AttributeRestrictions + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroups = nil + } else { + yyv21 := &x.APIGroups + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + z.F.DecSliceStringX(yyv21, false, d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resources = nil + } else { + yyv23 := &x.Resources + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + z.F.DecSliceStringX(yyv23, false, d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceNames = nil + } else { + yyv25 := &x.ResourceNames + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + z.F.DecSliceStringX(yyv25, false, d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NonResourceURLs = nil + } else { + yyv27 := &x.NonResourceURLs + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + z.F.DecSliceStringX(yyv27, false, d) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[3] = x.Namespace != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Subject) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Subject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + x.Name = string(r.DecodeString()) + } + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + x.Namespace = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + x.Name = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + x.Namespace = string(r.DecodeString()) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Role) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Role) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv5 := &x.Rules + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv5), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv11 := &x.Rules + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv11), d) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subject")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.RoleRef + yy12.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("roleRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.RoleRef + yy14.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "subject": + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv5 := &x.Subjects + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv5), d) + } + } + case "roleRef": + if r.TryDecodeAsNil() { + x.RoleRef = pkg3_v1.ObjectReference{} + } else { + yyv7 := &x.RoleRef + yyv7.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv11 := &x.ObjectMeta + yyv11.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv12 := &x.Subjects + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv12), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RoleRef = pkg3_v1.ObjectReference{} + } else { + yyv14 := &x.RoleRef + yyv14.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceRoleBinding((*[]RoleBinding)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceRoleBinding((*[]RoleBinding)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceRole(([]Role)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceRole(([]Role)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceRole((*[]Role)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceRole((*[]Role)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRole) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRole) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv5 := &x.Rules + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv5), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv10 := &x.ObjectMeta + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv11 := &x.Rules + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv11), d) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[3] = x.Kind != "" + yyq2[4] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subject")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.RoleRef + yy12.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("roleRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.RoleRef + yy14.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "subject": + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv5 := &x.Subjects + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv5), d) + } + } + case "roleRef": + if r.TryDecodeAsNil() { + x.RoleRef = pkg3_v1.ObjectReference{} + } else { + yyv7 := &x.RoleRef + yyv7.CodecDecodeSelf(d) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv11 := &x.ObjectMeta + yyv11.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv12 := &x.Subjects + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv12), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RoleRef = pkg3_v1.ObjectReference{} + } else { + yyv14 := &x.RoleRef + yyv14.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ListMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ListMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceClusterRole(([]ClusterRole)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceClusterRole(([]ClusterRole)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv4 := &x.ListMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceClusterRole((*[]ClusterRole)(yyv6), d) + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_unversioned.ListMeta{} + } else { + yyv11 := &x.ListMeta + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else { + z.DecFallback(yyv11, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceClusterRole((*[]ClusterRole)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicePolicyRule(v []PolicyRule, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePolicyRule(v *[]PolicyRule, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 160) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PolicyRule, yyrl1) + } + } else { + yyv1 = make([]PolicyRule, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PolicyRule{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PolicyRule{}) // var yyz1 PolicyRule + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceSubject(v []Subject, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceSubject(v *[]Subject, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Subject, yyrl1) + } + } else { + yyv1 = make([]Subject, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Subject{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Subject{}) // var yyz1 Subject + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceRoleBinding(v []RoleBinding, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceRoleBinding(v *[]RoleBinding, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]RoleBinding, yyrl1) + } + } else { + yyv1 = make([]RoleBinding, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, RoleBinding{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, RoleBinding{}) // var yyz1 RoleBinding + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceRole(v []Role, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceRole(v *[]Role, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Role{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Role, yyrl1) + } + } else { + yyv1 = make([]Role, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Role{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Role{}) // var yyz1 Role + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Role{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterRoleBinding(v []ClusterRoleBinding, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterRoleBinding(v *[]ClusterRoleBinding, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterRoleBinding, yyrl1) + } + } else { + yyv1 = make([]ClusterRoleBinding, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRoleBinding{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRoleBinding{}) // var yyz1 ClusterRoleBinding + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterRole(v []ClusterRole, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterRole(v *[]ClusterRole, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterRole, yyrl1) + } + } else { + yyv1 = make([]ClusterRole, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRole{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRole{}) // var yyz1 ClusterRole + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go new file mode 100644 index 000000000000..0863dfbf588e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go @@ -0,0 +1,164 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. + AttributeRestrictions runtime.RawExtension `json:"attributeRestrictions,omitempty" protobuf:"bytes,2,opt,name=attributeRestrictions"` + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + APIGroups []string `json:"apiGroups" protobuf:"bytes,3,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + Resources []string `json:"resources" protobuf:"bytes,4,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` + // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // APIVersion holds the API group and version of the referenced object. For non-object references such as "Group" and "User" this is + // expected to be API version of this API group. For example "rbac/v1alpha1". + APIVersion string `json:"apiVersion" protobuf:"bytes,2,opt.name=apiVersion"` + // Name of the object being referenced. + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` +} + +// +genclient=true + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +type Role struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this Role + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +type RoleBinding struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subject" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef v1.ObjectReference `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// RoleBindingList is a collection of RoleBindings +type RoleBindingList struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// RoleList is a collection of Roles +type RoleList struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true,nonNamespaced=true + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +type ClusterRole struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true,nonNamespaced=true + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +type ClusterRoleBinding struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subject" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef v1.ObjectReference `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +type ClusterRoleBindingList struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ClusterRoleList is a collection of ClusterRoles +type ClusterRoleList struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoles + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000000..c9d723469df8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,138 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "metadata": "Standard object's metadata.", + "subject": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "attributeRestrictions": "AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_Role = map[string]string{ + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "metadata": "Standard object's metadata.", + "subject": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_Subject = map[string]string{ + "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "apiVersion": "APIVersion holds the API group and version of the referenced object. For non-object references such as \"Group\" and \"User\" this is expected to be API version of this API group. For example \"rbac/v1alpha1\".", + "name": "Name of the object being referenced.", + "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/cast.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/cast.go new file mode 100644 index 000000000000..1f5e83fdcbd6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/cast.go @@ -0,0 +1,107 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import "k8s.io/kubernetes/pkg/apis/rbac" + +// Casting utilities to and from "Cluster" level equivalents. + +func toClusterRole(in *rbac.Role) *rbac.ClusterRole { + if in == nil { + return nil + } + + ret := &rbac.ClusterRole{} + ret.ObjectMeta = in.ObjectMeta + ret.Rules = in.Rules + + return ret +} + +func toClusterRoleList(in *rbac.RoleList) *rbac.ClusterRoleList { + ret := &rbac.ClusterRoleList{} + for _, curr := range in.Items { + ret.Items = append(ret.Items, *toClusterRole(&curr)) + } + + return ret +} + +func toClusterRoleBinding(in *rbac.RoleBinding) *rbac.ClusterRoleBinding { + if in == nil { + return nil + } + + ret := &rbac.ClusterRoleBinding{} + ret.ObjectMeta = in.ObjectMeta + ret.Subjects = in.Subjects + ret.RoleRef = in.RoleRef + + return ret +} + +func toClusterRoleBindingList(in *rbac.RoleBindingList) *rbac.ClusterRoleBindingList { + ret := &rbac.ClusterRoleBindingList{} + for _, curr := range in.Items { + ret.Items = append(ret.Items, *toClusterRoleBinding(&curr)) + } + + return ret +} + +func toRole(in *rbac.ClusterRole) *rbac.Role { + if in == nil { + return nil + } + + ret := &rbac.Role{} + ret.ObjectMeta = in.ObjectMeta + ret.Rules = in.Rules + + return ret +} + +func toRoleList(in *rbac.ClusterRoleList) *rbac.RoleList { + ret := &rbac.RoleList{} + for _, curr := range in.Items { + ret.Items = append(ret.Items, *toRole(&curr)) + } + + return ret +} + +func toRoleBinding(in *rbac.ClusterRoleBinding) *rbac.RoleBinding { + if in == nil { + return nil + } + + ret := &rbac.RoleBinding{} + ret.ObjectMeta = in.ObjectMeta + ret.Subjects = in.Subjects + ret.RoleRef = in.RoleRef + + return ret +} + +func toRoleBindingList(in *rbac.ClusterRoleBindingList) *rbac.RoleBindingList { + ret := &rbac.RoleBindingList{} + for _, curr := range in.Items { + ret.Items = append(ret.Items, *toRoleBinding(&curr)) + } + + return ret +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/policy_comparator.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/policy_comparator.go new file mode 100644 index 000000000000..1e5e74faa175 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/policy_comparator.go @@ -0,0 +1,120 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import "k8s.io/kubernetes/pkg/apis/rbac" + +// Covers determines whether or not the ownerRules cover the servantRules in terms of allowed actions. +// It returns whether or not the ownerRules cover and a list of the rules that the ownerRules do not cover. +func Covers(ownerRules, servantRules []rbac.PolicyRule) (bool, []rbac.PolicyRule) { + // 1. Break every servantRule into individual rule tuples: group, verb, resource, resourceName + // 2. Compare the mini-rules against each owner rule. Because the breakdown is down to the most atomic level, we're guaranteed that each mini-servant rule will be either fully covered or not covered by a single owner rule + // 3. Any left over mini-rules means that we are not covered and we have a nice list of them. + // TODO: it might be nice to collapse the list down into something more human readable + + subrules := []rbac.PolicyRule{} + for _, servantRule := range servantRules { + subrules = append(subrules, breakdownRule(servantRule)...) + } + + uncoveredRules := []rbac.PolicyRule{} + for _, subrule := range subrules { + covered := false + for _, ownerRule := range ownerRules { + if ruleCovers(ownerRule, subrule) { + covered = true + break + } + } + + if !covered { + uncoveredRules = append(uncoveredRules, subrule) + } + } + + return (len(uncoveredRules) == 0), uncoveredRules +} + +// breadownRule takes a rule and builds an equivalent list of rules that each have at most one verb, one +// resource, and one resource name +func breakdownRule(rule rbac.PolicyRule) []rbac.PolicyRule { + subrules := []rbac.PolicyRule{} + for _, group := range rule.APIGroups { + for _, resource := range rule.Resources { + for _, verb := range rule.Verbs { + if len(rule.ResourceNames) > 0 { + for _, resourceName := range rule.ResourceNames { + subrules = append(subrules, rbac.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}, ResourceNames: []string{resourceName}}) + } + + } else { + subrules = append(subrules, rbac.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}}) + } + + } + } + } + + // Non-resource URLs are unique because they don't combine with other policy rule fields. + for _, nonResourceURL := range rule.NonResourceURLs { + subrules = append(subrules, rbac.PolicyRule{NonResourceURLs: []string{nonResourceURL}}) + } + + return subrules +} + +func has(set []string, ele string) bool { + for _, s := range set { + if s == ele { + return true + } + } + return false +} + +func hasAll(set, contains []string) bool { + owning := make(map[string]struct{}, len(set)) + for _, ele := range set { + owning[ele] = struct{}{} + } + for _, ele := range contains { + if _, ok := owning[ele]; !ok { + return false + } + } + return true +} + +// ruleCovers determines whether the ownerRule (which may have multiple verbs, resources, and resourceNames) covers +// the subrule (which may only contain at most one verb, resource, and resourceName) +func ruleCovers(ownerRule, subRule rbac.PolicyRule) bool { + + verbMatches := has(ownerRule.Verbs, rbac.VerbAll) || hasAll(ownerRule.Verbs, subRule.Verbs) + groupMatches := has(ownerRule.APIGroups, rbac.APIGroupAll) || hasAll(ownerRule.APIGroups, subRule.APIGroups) + resourceMatches := has(ownerRule.Resources, rbac.ResourceAll) || hasAll(ownerRule.Resources, subRule.Resources) + nonResourceURLMatches := has(ownerRule.NonResourceURLs, rbac.NonResourceAll) || hasAll(ownerRule.NonResourceURLs, subRule.NonResourceURLs) + + resourceNameMatches := false + + if len(subRule.ResourceNames) == 0 { + resourceNameMatches = (len(ownerRule.ResourceNames) == 0) + } else { + resourceNameMatches = (len(ownerRule.ResourceNames) == 0) || hasAll(ownerRule.ResourceNames, subRule.ResourceNames) + } + + return verbMatches && groupMatches && resourceMatches && resourceNameMatches && nonResourceURLMatches +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/policy_comparator_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/policy_comparator_test.go new file mode 100644 index 000000000000..da2f910a4e64 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/policy_comparator_test.go @@ -0,0 +1,371 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/apis/rbac" +) + +type escalationTest struct { + ownerRules []rbac.PolicyRule + servantRules []rbac.PolicyRule + + expectedCovered bool + expectedUncoveredRules []rbac.PolicyRule +} + +func TestCoversExactMatch(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) +} + +func TestCoversMultipleRulesCoveringSingleRule(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"delete"}, Resources: []string{"deployments"}}, + {APIGroups: []string{"v1"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, + {APIGroups: []string{"v1"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) + +} + +func TestCoversMultipleAPIGroupsCoveringSingleRule(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"group1"}, Verbs: []string{"delete"}, Resources: []string{"deployments"}}, + {APIGroups: []string{"group1"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, + {APIGroups: []string{"group1"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, + {APIGroups: []string{"group2"}, Verbs: []string{"delete"}, Resources: []string{"deployments"}}, + {APIGroups: []string{"group2"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, + {APIGroups: []string{"group2"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"group1", "group2"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) + +} + +func TestCoversSingleAPIGroupsCoveringMultiple(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"group1", "group2"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"group1"}, Verbs: []string{"delete"}, Resources: []string{"deployments"}}, + {APIGroups: []string{"group1"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, + {APIGroups: []string{"group1"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, + {APIGroups: []string{"group2"}, Verbs: []string{"delete"}, Resources: []string{"deployments"}}, + {APIGroups: []string{"group2"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, + {APIGroups: []string{"group2"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) + +} + +func TestCoversMultipleRulesMissingSingleVerbResourceCombination(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments"}}, + {APIGroups: []string{"v1"}, Verbs: []string{"delete"}, Resources: []string{"pods"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments", "pods"}}, + }, + + expectedCovered: false, + expectedUncoveredRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"update"}, Resources: []string{"pods"}}, + }, + }.test(t) +} + +func TestCoversAPIGroupStarCoveringMultiple(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"group1", "group2"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) +} + +func TestCoversEnumerationNotCoveringAPIGroupStar(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"dummy-group"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, + }, + + expectedCovered: false, + expectedUncoveredRules: []rbac.PolicyRule{ + {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, + }, + }.test(t) +} + +func TestCoversAPIGroupStarCoveringStar(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) +} + +func TestCoversVerbStarCoveringMultiple(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"watch", "list"}, Resources: []string{"roles"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) +} + +func TestCoversEnumerationNotCoveringVerbStar(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get", "list", "watch", "create", "update", "delete", "exec"}, Resources: []string{"roles"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, + }, + + expectedCovered: false, + expectedUncoveredRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, + }, + }.test(t) +} + +func TestCoversVerbStarCoveringStar(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) +} + +func TestCoversResourceStarCoveringMultiple(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"resourcegroup:deployments"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) +} + +func TestCoversEnumerationNotCoveringResourceStar(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"roles", "resourcegroup:deployments"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, + }, + + expectedCovered: false, + expectedUncoveredRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, + }, + }.test(t) +} + +func TestCoversResourceStarCoveringStar(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) +} + +func TestCoversResourceNameEmptyCoveringMultiple(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}, ResourceNames: []string{}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}, ResourceNames: []string{"foo", "bar"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) +} + +func TestCoversEnumerationNotCoveringResourceNameEmpty(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}, ResourceNames: []string{"foo", "bar"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}, ResourceNames: []string{}}, + }, + + expectedCovered: false, + expectedUncoveredRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}}, + }, + }.test(t) +} + +func TestCoversNonResourceURLs(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {NonResourceURLs: []string{"/apis"}}, + }, + servantRules: []rbac.PolicyRule{ + {NonResourceURLs: []string{"/apis"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) +} + +func TestCoversNonResourceURLsStar(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {NonResourceURLs: []string{"*"}}, + }, + servantRules: []rbac.PolicyRule{ + {NonResourceURLs: []string{"/apis", "/apis/v1", "/"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) +} + +func TestCoversNonResourceURLsWithOtherFields(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}, NonResourceURLs: []string{"/apis"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}, NonResourceURLs: []string{"/apis"}}, + }, + + expectedCovered: true, + expectedUncoveredRules: []rbac.PolicyRule{}, + }.test(t) +} + +func TestCoversNonResourceURLsWithOtherFieldsFailure(t *testing.T) { + escalationTest{ + ownerRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}}, + }, + servantRules: []rbac.PolicyRule{ + {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}, NonResourceURLs: []string{"/apis"}}, + }, + + expectedCovered: false, + expectedUncoveredRules: []rbac.PolicyRule{{NonResourceURLs: []string{"/apis"}}}, + }.test(t) +} + +func (test escalationTest) test(t *testing.T) { + actualCovered, actualUncoveredRules := Covers(test.ownerRules, test.servantRules) + + if actualCovered != test.expectedCovered { + t.Errorf("expected %v, but got %v", test.expectedCovered, actualCovered) + } + + if !rulesMatch(test.expectedUncoveredRules, actualUncoveredRules) { + t.Errorf("expected %v, but got %v", test.expectedUncoveredRules, actualUncoveredRules) + } +} + +func rulesMatch(expectedRules, actualRules []rbac.PolicyRule) bool { + if len(expectedRules) != len(actualRules) { + return false + } + + for _, expectedRule := range expectedRules { + found := false + for _, actualRule := range actualRules { + if reflect.DeepEqual(expectedRule, actualRule) { + found = true + } + } + + if !found { + return false + } + } + + return true +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/rulevalidation.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/rulevalidation.go new file mode 100644 index 000000000000..04b15622dc4b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/rulevalidation.go @@ -0,0 +1,208 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/auth/user" + utilerrors "k8s.io/kubernetes/pkg/util/errors" +) + +type AuthorizationRuleResolver interface { + // GetRoleReferenceRules attempts to resolve the role reference of a RoleBinding or ClusterRoleBinding. The passed namespace should be the namepsace + // of the role binding, the empty string if a cluster role binding. + GetRoleReferenceRules(ctx api.Context, roleRef api.ObjectReference, namespace string) ([]rbac.PolicyRule, error) + + // GetEffectivePolicyRules returns the list of rules that apply to a given user in a given namespace and error. If an error is returned, the slice of + // PolicyRules may not be complete, but it contains all retrievable rules. This is done because policy rules are purely additive and policy determinations + // can be made on the basis of those rules that are found. + GetEffectivePolicyRules(ctx api.Context) ([]rbac.PolicyRule, error) +} + +// ConfirmNoEscalation determines if the roles for a given user in a given namespace encompass the provided role. +func ConfirmNoEscalation(ctx api.Context, ruleResolver AuthorizationRuleResolver, rules []rbac.PolicyRule) error { + ruleResolutionErrors := []error{} + + ownerLocalRules, err := ruleResolver.GetEffectivePolicyRules(ctx) + if err != nil { + // As per AuthorizationRuleResolver contract, this may return a non fatal error with an incomplete list of policies. Log the error and continue. + user, _ := api.UserFrom(ctx) + glog.V(1).Infof("non-fatal error getting local rules for %v: %v", user, err) + ruleResolutionErrors = append(ruleResolutionErrors, err) + } + + masterContext := api.WithNamespace(ctx, "") + ownerGlobalRules, err := ruleResolver.GetEffectivePolicyRules(masterContext) + if err != nil { + // Same case as above. Log error, don't fail. + user, _ := api.UserFrom(ctx) + glog.V(1).Infof("non-fatal error getting global rules for %v: %v", user, err) + ruleResolutionErrors = append(ruleResolutionErrors, err) + } + + ownerRules := make([]rbac.PolicyRule, 0, len(ownerGlobalRules)+len(ownerLocalRules)) + ownerRules = append(ownerRules, ownerLocalRules...) + ownerRules = append(ownerRules, ownerGlobalRules...) + + ownerRightsCover, missingRights := Covers(ownerRules, rules) + if !ownerRightsCover { + user, _ := api.UserFrom(ctx) + return errors.NewUnauthorized(fmt.Sprintf("attempt to grant extra privileges: %v user=%v ownerrules=%v ruleResolutionErrors=%v", missingRights, user, ownerRules, ruleResolutionErrors)) + } + return nil +} + +type DefaultRuleResolver struct { + roleGetter RoleGetter + roleBindingLister RoleBindingLister + clusterRoleGetter ClusterRoleGetter + clusterRoleBindingLister ClusterRoleBindingLister +} + +func NewDefaultRuleResolver(roleGetter RoleGetter, roleBindingLister RoleBindingLister, clusterRoleGetter ClusterRoleGetter, clusterRoleBindingLister ClusterRoleBindingLister) *DefaultRuleResolver { + return &DefaultRuleResolver{roleGetter, roleBindingLister, clusterRoleGetter, clusterRoleBindingLister} +} + +type RoleGetter interface { + GetRole(ctx api.Context, id string) (*rbac.Role, error) +} + +type RoleBindingLister interface { + ListRoleBindings(ctx api.Context, options *api.ListOptions) (*rbac.RoleBindingList, error) +} + +type ClusterRoleGetter interface { + GetClusterRole(ctx api.Context, id string) (*rbac.ClusterRole, error) +} + +type ClusterRoleBindingLister interface { + ListClusterRoleBindings(ctx api.Context, options *api.ListOptions) (*rbac.ClusterRoleBindingList, error) +} + +// GetRoleReferenceRules attempts resolve the RoleBinding or ClusterRoleBinding. +func (r *DefaultRuleResolver) GetRoleReferenceRules(ctx api.Context, roleRef api.ObjectReference, bindingNamespace string) ([]rbac.PolicyRule, error) { + switch roleRef.Kind { + case "Role": + // Roles can only be referenced by RoleBindings within the same namespace. + if len(bindingNamespace) == 0 { + return nil, fmt.Errorf("cluster role binding references role %q in namespace %q", roleRef.Name, roleRef.Namespace) + } else { + if bindingNamespace != roleRef.Namespace { + return nil, fmt.Errorf("role binding in namespace %q references role %q in namespace %q", bindingNamespace, roleRef.Name, roleRef.Namespace) + } + } + + role, err := r.roleGetter.GetRole(api.WithNamespace(ctx, roleRef.Namespace), roleRef.Name) + if err != nil { + return nil, err + } + return role.Rules, nil + case "ClusterRole": + clusterRole, err := r.clusterRoleGetter.GetClusterRole(api.WithNamespace(ctx, ""), roleRef.Name) + if err != nil { + return nil, err + } + return clusterRole.Rules, nil + default: + return nil, fmt.Errorf("unsupported role reference kind: %q", roleRef.Kind) + } +} + +func (r *DefaultRuleResolver) GetEffectivePolicyRules(ctx api.Context) ([]rbac.PolicyRule, error) { + policyRules := []rbac.PolicyRule{} + errorlist := []error{} + + if namespace := api.NamespaceValue(ctx); len(namespace) == 0 { + clusterRoleBindings, err := r.clusterRoleBindingLister.ListClusterRoleBindings(ctx, &api.ListOptions{}) + if err != nil { + return nil, err + } + + for _, clusterRoleBinding := range clusterRoleBindings.Items { + if ok, err := appliesTo(ctx, clusterRoleBinding.Subjects); err != nil { + errorlist = append(errorlist, err) + } else if !ok { + continue + } + rules, err := r.GetRoleReferenceRules(ctx, clusterRoleBinding.RoleRef, namespace) + if err != nil { + errorlist = append(errorlist, err) + continue + } + policyRules = append(policyRules, rules...) + } + } else { + roleBindings, err := r.roleBindingLister.ListRoleBindings(ctx, &api.ListOptions{}) + if err != nil { + return nil, err + } + + for _, roleBinding := range roleBindings.Items { + if ok, err := appliesTo(ctx, roleBinding.Subjects); err != nil { + errorlist = append(errorlist, err) + } else if !ok { + continue + } + rules, err := r.GetRoleReferenceRules(ctx, roleBinding.RoleRef, namespace) + if err != nil { + errorlist = append(errorlist, err) + continue + } + policyRules = append(policyRules, rules...) + } + } + + if len(errorlist) != 0 { + return policyRules, utilerrors.NewAggregate(errorlist) + } + return policyRules, nil +} + +func appliesTo(ctx api.Context, subjects []rbac.Subject) (bool, error) { + user, ok := api.UserFrom(ctx) + if !ok { + return false, fmt.Errorf("no user data associated with context") + } + for _, subject := range subjects { + if ok, err := appliesToUser(user, subject); err != nil || ok { + return ok, err + } + } + return false, nil +} + +func appliesToUser(user user.Info, subject rbac.Subject) (bool, error) { + switch subject.Kind { + case rbac.UserKind: + return subject.Name == rbac.UserAll || user.GetName() == subject.Name, nil + case rbac.GroupKind: + return has(user.GetGroups(), subject.Name), nil + case rbac.ServiceAccountKind: + if subject.Namespace == "" { + return false, fmt.Errorf("subject of kind service account without specified namespace") + } + // TODO(ericchiang): Is there a better way of matching a service account name? + return "system:serviceaccount:"+subject.Name+":"+subject.Namespace == user.GetName(), nil + default: + return false, fmt.Errorf("unknown subject kind: %s", subject.Kind) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/rulevalidation_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/rulevalidation_test.go new file mode 100644 index 000000000000..ef2b22540525 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/rulevalidation_test.go @@ -0,0 +1,335 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "errors" + "hash/fnv" + "io" + "reflect" + "sort" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/auth/user" + "k8s.io/kubernetes/pkg/util/diff" +) + +func newMockRuleResolver(r *staticRoles) AuthorizationRuleResolver { + return NewDefaultRuleResolver(r, r, r, r) +} + +type staticRoles struct { + roles []rbac.Role + roleBindings []rbac.RoleBinding + clusterRoles []rbac.ClusterRole + clusterRoleBindings []rbac.ClusterRoleBinding +} + +func (r *staticRoles) GetRole(ctx api.Context, id string) (*rbac.Role, error) { + namespace, ok := api.NamespaceFrom(ctx) + if !ok || namespace == "" { + return nil, errors.New("must provide namespace when getting role") + } + for _, role := range r.roles { + if role.Namespace == namespace && role.Name == id { + return &role, nil + } + } + return nil, errors.New("role not found") +} + +func (r *staticRoles) GetClusterRole(ctx api.Context, id string) (*rbac.ClusterRole, error) { + namespace, ok := api.NamespaceFrom(ctx) + if ok && namespace != "" { + return nil, errors.New("cannot provide namespace when getting cluster role") + } + for _, clusterRole := range r.clusterRoles { + if clusterRole.Namespace == namespace && clusterRole.Name == id { + return &clusterRole, nil + } + } + return nil, errors.New("role not found") +} + +func (r *staticRoles) ListRoleBindings(ctx api.Context, options *api.ListOptions) (*rbac.RoleBindingList, error) { + namespace, ok := api.NamespaceFrom(ctx) + if !ok || namespace == "" { + return nil, errors.New("must provide namespace when listing role bindings") + } + + roleBindingList := new(rbac.RoleBindingList) + for _, roleBinding := range r.roleBindings { + if roleBinding.Namespace != namespace { + continue + } + // TODO(ericchiang): need to implement label selectors? + roleBindingList.Items = append(roleBindingList.Items, roleBinding) + } + return roleBindingList, nil +} + +func (r *staticRoles) ListClusterRoleBindings(ctx api.Context, options *api.ListOptions) (*rbac.ClusterRoleBindingList, error) { + namespace, ok := api.NamespaceFrom(ctx) + if ok && namespace != "" { + return nil, errors.New("cannot list cluster role bindings from within a namespace") + } + clusterRoleBindings := new(rbac.ClusterRoleBindingList) + clusterRoleBindings.Items = make([]rbac.ClusterRoleBinding, len(r.clusterRoleBindings)) + copy(clusterRoleBindings.Items, r.clusterRoleBindings) + return clusterRoleBindings, nil +} + +// compute a hash of a policy rule so we can sort in a deterministic order +func hashOf(p rbac.PolicyRule) string { + hash := fnv.New32() + writeStrings := func(slis ...[]string) { + for _, sli := range slis { + for _, s := range sli { + io.WriteString(hash, s) + } + } + } + writeStrings(p.Verbs, p.APIGroups, p.Resources, p.ResourceNames, p.NonResourceURLs) + return string(hash.Sum(nil)) +} + +// byHash sorts a set of policy rules by a hash of its fields +type byHash []rbac.PolicyRule + +func (b byHash) Len() int { return len(b) } +func (b byHash) Less(i, j int) bool { return hashOf(b[i]) < hashOf(b[j]) } +func (b byHash) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +func TestDefaultRuleResolver(t *testing.T) { + ruleReadPods := rbac.PolicyRule{ + Verbs: []string{"GET", "WATCH"}, + APIGroups: []string{"v1"}, + Resources: []string{"pods"}, + } + ruleReadServices := rbac.PolicyRule{ + Verbs: []string{"GET", "WATCH"}, + APIGroups: []string{"v1"}, + Resources: []string{"services"}, + } + ruleWriteNodes := rbac.PolicyRule{ + Verbs: []string{"PUT", "CREATE", "UPDATE"}, + APIGroups: []string{"v1"}, + Resources: []string{"nodes"}, + } + ruleAdmin := rbac.PolicyRule{ + Verbs: []string{"*"}, + APIGroups: []string{"*"}, + Resources: []string{"*"}, + } + + staticRoles1 := staticRoles{ + roles: []rbac.Role{ + { + ObjectMeta: api.ObjectMeta{Namespace: "namespace1", Name: "readthings"}, + Rules: []rbac.PolicyRule{ruleReadPods, ruleReadServices}, + }, + }, + clusterRoles: []rbac.ClusterRole{ + { + ObjectMeta: api.ObjectMeta{Name: "cluster-admin"}, + Rules: []rbac.PolicyRule{ruleAdmin}, + }, + { + ObjectMeta: api.ObjectMeta{Name: "write-nodes"}, + Rules: []rbac.PolicyRule{ruleWriteNodes}, + }, + }, + roleBindings: []rbac.RoleBinding{ + { + ObjectMeta: api.ObjectMeta{Namespace: "namespace1"}, + Subjects: []rbac.Subject{ + {Kind: rbac.UserKind, Name: "foobar"}, + {Kind: rbac.GroupKind, Name: "group1"}, + }, + RoleRef: api.ObjectReference{Kind: "Role", Namespace: "namespace1", Name: "readthings"}, + }, + }, + clusterRoleBindings: []rbac.ClusterRoleBinding{ + { + Subjects: []rbac.Subject{ + {Kind: rbac.UserKind, Name: "admin"}, + {Kind: rbac.GroupKind, Name: "admin"}, + }, + RoleRef: api.ObjectReference{Kind: "ClusterRole", Name: "cluster-admin"}, + }, + }, + } + + tests := []struct { + staticRoles + + // For a given context, what are the rules that apply? + ctx api.Context + effectiveRules []rbac.PolicyRule + }{ + { + staticRoles: staticRoles1, + ctx: api.WithNamespace( + api.WithUser(api.NewContext(), &user.DefaultInfo{Name: "foobar"}), "namespace1", + ), + effectiveRules: []rbac.PolicyRule{ruleReadPods, ruleReadServices}, + }, + { + staticRoles: staticRoles1, + ctx: api.WithNamespace( + // Same as above but diffrerent namespace. Should return no rules. + api.WithUser(api.NewContext(), &user.DefaultInfo{Name: "foobar"}), "namespace2", + ), + effectiveRules: []rbac.PolicyRule{}, + }, + { + staticRoles: staticRoles1, + // GetEffectivePolicyRules only returns the policies for the namespace, not the master namespace. + ctx: api.WithNamespace( + api.WithUser(api.NewContext(), &user.DefaultInfo{ + Name: "foobar", Groups: []string{"admin"}, + }), "namespace1", + ), + effectiveRules: []rbac.PolicyRule{ruleReadPods, ruleReadServices}, + }, + { + staticRoles: staticRoles1, + // Same as above but without a namespace. Only cluster rules should apply. + ctx: api.WithUser(api.NewContext(), &user.DefaultInfo{ + Name: "foobar", Groups: []string{"admin"}, + }), + effectiveRules: []rbac.PolicyRule{ruleAdmin}, + }, + { + staticRoles: staticRoles1, + ctx: api.WithUser(api.NewContext(), &user.DefaultInfo{}), + effectiveRules: []rbac.PolicyRule{}, + }, + } + + for i, tc := range tests { + ruleResolver := newMockRuleResolver(&tc.staticRoles) + rules, err := ruleResolver.GetEffectivePolicyRules(tc.ctx) + if err != nil { + t.Errorf("case %d: GetEffectivePolicyRules(context)=%v", i, err) + continue + } + + // Sort for deep equals + sort.Sort(byHash(rules)) + sort.Sort(byHash(tc.effectiveRules)) + + if !reflect.DeepEqual(rules, tc.effectiveRules) { + ruleDiff := diff.ObjectDiff(rules, tc.effectiveRules) + t.Errorf("case %d: %s", i, ruleDiff) + } + } +} + +func TestAppliesTo(t *testing.T) { + tests := []struct { + subjects []rbac.Subject + ctx api.Context + appliesTo bool + testCase string + }{ + { + subjects: []rbac.Subject{ + {Kind: rbac.UserKind, Name: "foobar"}, + }, + ctx: api.WithUser(api.NewContext(), &user.DefaultInfo{Name: "foobar"}), + appliesTo: true, + testCase: "single subject that matches username", + }, + { + subjects: []rbac.Subject{ + {Kind: rbac.UserKind, Name: "barfoo"}, + {Kind: rbac.UserKind, Name: "foobar"}, + }, + ctx: api.WithUser(api.NewContext(), &user.DefaultInfo{Name: "foobar"}), + appliesTo: true, + testCase: "multiple subjects, one that matches username", + }, + { + subjects: []rbac.Subject{ + {Kind: rbac.UserKind, Name: "barfoo"}, + {Kind: rbac.UserKind, Name: "foobar"}, + }, + ctx: api.WithUser(api.NewContext(), &user.DefaultInfo{Name: "zimzam"}), + appliesTo: false, + testCase: "multiple subjects, none that match username", + }, + { + subjects: []rbac.Subject{ + {Kind: rbac.UserKind, Name: "barfoo"}, + {Kind: rbac.GroupKind, Name: "foobar"}, + }, + ctx: api.WithUser(api.NewContext(), &user.DefaultInfo{Name: "zimzam", Groups: []string{"foobar"}}), + appliesTo: true, + testCase: "multiple subjects, one that match group", + }, + { + subjects: []rbac.Subject{ + {Kind: rbac.UserKind, Name: "barfoo"}, + {Kind: rbac.GroupKind, Name: "foobar"}, + }, + ctx: api.WithNamespace( + api.WithUser(api.NewContext(), &user.DefaultInfo{Name: "zimzam", Groups: []string{"foobar"}}), + "namespace1", + ), + appliesTo: true, + testCase: "multiple subjects, one that match group, should ignore namespace", + }, + { + subjects: []rbac.Subject{ + {Kind: rbac.UserKind, Name: "barfoo"}, + {Kind: rbac.GroupKind, Name: "foobar"}, + {Kind: rbac.ServiceAccountKind, Name: "kube-system", Namespace: "default"}, + }, + ctx: api.WithNamespace( + api.WithUser(api.NewContext(), &user.DefaultInfo{Name: "system:serviceaccount:kube-system:default"}), + "default", + ), + appliesTo: true, + testCase: "multiple subjects with a service account that matches", + }, + { + subjects: []rbac.Subject{ + {Kind: rbac.UserKind, Name: "*"}, + }, + ctx: api.WithNamespace( + api.WithUser(api.NewContext(), &user.DefaultInfo{Name: "foobar"}), + "default", + ), + appliesTo: true, + testCase: "multiple subjects with a service account that matches", + }, + } + + for _, tc := range tests { + got, err := appliesTo(tc.ctx, tc.subjects) + if err != nil { + t.Errorf("case %q %v", tc.testCase, err) + continue + } + if got != tc.appliesTo { + t.Errorf("case %q want appliesTo=%t, got appliesTo=%t", tc.testCase, tc.appliesTo, got) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/validation.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/validation.go new file mode 100644 index 000000000000..4a384e65ee4a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/validation.go @@ -0,0 +1,151 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// Minimal validation of names for roles and bindings. Identical to the validation for Openshift. See: +// * https://github.com/kubernetes/kubernetes/blob/60db50/pkg/api/validation/name.go +// * https://github.com/openshift/origin/blob/388478/pkg/api/helpers.go +func minimalNameRequirements(name string, prefix bool) []string { + return validation.IsValidPathSegmentName(name) +} + +func ValidateRole(policy *rbac.Role) field.ErrorList { + return validateRole(policy, true) +} + +func ValidateRoleUpdate(policy *rbac.Role, oldRole *rbac.Role) field.ErrorList { + return validateRoleUpdate(policy, oldRole, true) +} + +func ValidateClusterRole(policy *rbac.ClusterRole) field.ErrorList { + return validateRole(toRole(policy), false) +} + +func ValidateClusterRoleUpdate(policy *rbac.ClusterRole, oldRole *rbac.ClusterRole) field.ErrorList { + return validateRoleUpdate(toRole(policy), toRole(oldRole), false) +} + +func validateRole(role *rbac.Role, isNamespaced bool) field.ErrorList { + return validation.ValidateObjectMeta(&role.ObjectMeta, isNamespaced, minimalNameRequirements, field.NewPath("metadata")) +} + +func validateRoleUpdate(role *rbac.Role, oldRole *rbac.Role, isNamespaced bool) field.ErrorList { + allErrs := validateRole(role, isNamespaced) + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&role.ObjectMeta, &oldRole.ObjectMeta, field.NewPath("metadata"))...) + + return allErrs +} + +func ValidateRoleBinding(policy *rbac.RoleBinding) field.ErrorList { + return validateRoleBinding(policy, true) +} + +func ValidateRoleBindingUpdate(policy *rbac.RoleBinding, oldRoleBinding *rbac.RoleBinding) field.ErrorList { + return validateRoleBindingUpdate(policy, oldRoleBinding, true) +} + +func ValidateClusterRoleBinding(policy *rbac.ClusterRoleBinding) field.ErrorList { + return validateRoleBinding(toRoleBinding(policy), false) +} + +func ValidateClusterRoleBindingUpdate(policy *rbac.ClusterRoleBinding, oldRoleBinding *rbac.ClusterRoleBinding) field.ErrorList { + return validateRoleBindingUpdate(toRoleBinding(policy), toRoleBinding(oldRoleBinding), false) +} + +func validateRoleBinding(roleBinding *rbac.RoleBinding, isNamespaced bool) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, validation.ValidateObjectMeta(&roleBinding.ObjectMeta, isNamespaced, minimalNameRequirements, field.NewPath("metadata"))...) + + // roleRef namespace is empty when referring to global policy. + if len(roleBinding.RoleRef.Namespace) > 0 { + for _, msg := range validation.ValidateNamespaceName(roleBinding.RoleRef.Namespace, false) { + allErrs = append(allErrs, field.Invalid(field.NewPath("roleRef", "namespace"), roleBinding.RoleRef.Namespace, msg)) + } + } + + if len(roleBinding.RoleRef.Name) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("roleRef", "name"), "")) + } else { + for _, msg := range minimalNameRequirements(roleBinding.RoleRef.Name, false) { + allErrs = append(allErrs, field.Invalid(field.NewPath("roleRef", "name"), roleBinding.RoleRef.Name, msg)) + } + } + + subjectsPath := field.NewPath("subjects") + for i, subject := range roleBinding.Subjects { + allErrs = append(allErrs, validateRoleBindingSubject(subject, isNamespaced, subjectsPath.Index(i))...) + } + + return allErrs +} + +func validateRoleBindingSubject(subject rbac.Subject, isNamespaced bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(subject.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } + if len(subject.APIVersion) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("apiVersion"), subject.APIVersion)) + } + + switch subject.Kind { + case rbac.ServiceAccountKind: + if len(subject.Name) > 0 { + for _, msg := range validation.ValidateServiceAccountName(subject.Name, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), subject.Name, msg)) + } + } + if !isNamespaced && len(subject.Namespace) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "")) + } + + case rbac.UserKind: + // TODO(ericchiang): What other restrictions on user name are there? + if len(subject.Name) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), subject.Name, "user name cannot be empty")) + } + + case rbac.GroupKind: + // TODO(ericchiang): What other restrictions on group name are there? + if len(subject.Name) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), subject.Name, "group name cannot be empty")) + } + + default: + allErrs = append(allErrs, field.NotSupported(fldPath.Child("kind"), subject.Kind, []string{rbac.ServiceAccountKind, rbac.UserKind, rbac.GroupKind})) + } + + return allErrs +} + +func validateRoleBindingUpdate(roleBinding *rbac.RoleBinding, oldRoleBinding *rbac.RoleBinding, isNamespaced bool) field.ErrorList { + allErrs := validateRoleBinding(roleBinding, isNamespaced) + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&roleBinding.ObjectMeta, &oldRoleBinding.ObjectMeta, field.NewPath("metadata"))...) + + if oldRoleBinding.RoleRef != roleBinding.RoleRef { + allErrs = append(allErrs, field.Invalid(field.NewPath("roleRef"), roleBinding.RoleRef, "cannot change roleRef")) + } + + return allErrs +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/validation_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/validation_test.go new file mode 100644 index 000000000000..c513729a7b5c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/apis/rbac/validation/validation_test.go @@ -0,0 +1,230 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "testing" + + api "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func TestValidateRoleBinding(t *testing.T) { + errs := validateRoleBinding( + &rbac.RoleBinding{ + ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master"}, + RoleRef: api.ObjectReference{Namespace: "master", Name: "valid"}, + Subjects: []rbac.Subject{ + {Name: "validsaname", Kind: rbac.ServiceAccountKind}, + {Name: "valid@username", Kind: rbac.UserKind}, + {Name: "valid@groupname", Kind: rbac.GroupKind}, + }, + }, + true, + ) + if len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + + errorCases := map[string]struct { + A rbac.RoleBinding + T field.ErrorType + F string + }{ + "zero-length namespace": { + A: rbac.RoleBinding{ + ObjectMeta: api.ObjectMeta{Name: "default"}, + RoleRef: api.ObjectReference{Namespace: "master", Name: "valid"}, + }, + T: field.ErrorTypeRequired, + F: "metadata.namespace", + }, + "zero-length name": { + A: rbac.RoleBinding{ + ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}, + RoleRef: api.ObjectReference{Namespace: "master", Name: "valid"}, + }, + T: field.ErrorTypeRequired, + F: "metadata.name", + }, + "invalid ref": { + A: rbac.RoleBinding{ + ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "name"}, + RoleRef: api.ObjectReference{Namespace: "-192083", Name: "valid"}, + }, + T: field.ErrorTypeInvalid, + F: "roleRef.namespace", + }, + "bad role": { + A: rbac.RoleBinding{ + ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "default"}, + RoleRef: api.ObjectReference{Namespace: "default"}, + }, + T: field.ErrorTypeRequired, + F: "roleRef.name", + }, + "bad subject kind": { + A: rbac.RoleBinding{ + ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master"}, + RoleRef: api.ObjectReference{Namespace: "master", Name: "valid"}, + Subjects: []rbac.Subject{{Name: "subject"}}, + }, + T: field.ErrorTypeNotSupported, + F: "subjects[0].kind", + }, + "bad subject name": { + A: rbac.RoleBinding{ + ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master"}, + RoleRef: api.ObjectReference{Namespace: "master", Name: "valid"}, + Subjects: []rbac.Subject{{Name: "subject:bad", Kind: rbac.ServiceAccountKind}}, + }, + T: field.ErrorTypeInvalid, + F: "subjects[0].name", + }, + "forbidden fields": { + A: rbac.RoleBinding{ + ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master"}, + RoleRef: api.ObjectReference{Namespace: "master", Name: "valid"}, + Subjects: []rbac.Subject{{Name: "subject", Kind: rbac.ServiceAccountKind, APIVersion: "foo"}}, + }, + T: field.ErrorTypeForbidden, + F: "subjects[0].apiVersion", + }, + "missing subject name": { + A: rbac.RoleBinding{ + ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master"}, + RoleRef: api.ObjectReference{Namespace: "master", Name: "valid"}, + Subjects: []rbac.Subject{{Kind: rbac.ServiceAccountKind}}, + }, + T: field.ErrorTypeRequired, + F: "subjects[0].name", + }, + } + for k, v := range errorCases { + errs := validateRoleBinding(&v.A, true) + if len(errs) == 0 { + t.Errorf("expected failure %s for %v", k, v.A) + continue + } + for i := range errs { + if errs[i].Type != v.T { + t.Errorf("%s: expected errors to have type %s: %v", k, v.T, errs[i]) + } + if errs[i].Field != v.F { + t.Errorf("%s: expected errors to have field %s: %v", k, v.F, errs[i]) + } + } + } +} + +func TestValidateRoleBindingUpdate(t *testing.T) { + old := &rbac.RoleBinding{ + ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master", ResourceVersion: "1"}, + RoleRef: api.ObjectReference{Namespace: "master", Name: "valid"}, + } + + errs := validateRoleBindingUpdate( + &rbac.RoleBinding{ + ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master", ResourceVersion: "1"}, + RoleRef: api.ObjectReference{Namespace: "master", Name: "valid"}, + }, + old, + true, + ) + if len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + + errorCases := map[string]struct { + A rbac.RoleBinding + T field.ErrorType + F string + }{ + "changedRef": { + A: rbac.RoleBinding{ + ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master", ResourceVersion: "1"}, + RoleRef: api.ObjectReference{Namespace: "master", Name: "changed"}, + }, + T: field.ErrorTypeInvalid, + F: "roleRef", + }, + } + for k, v := range errorCases { + errs := validateRoleBindingUpdate(&v.A, old, true) + if len(errs) == 0 { + t.Errorf("expected failure %s for %v", k, v.A) + continue + } + for i := range errs { + if errs[i].Type != v.T { + t.Errorf("%s: expected errors to have type %s: %v", k, v.T, errs[i]) + } + if errs[i].Field != v.F { + t.Errorf("%s: expected errors to have field %s: %v", k, v.F, errs[i]) + } + } + } +} + +func TestValidateRole(t *testing.T) { + errs := validateRole( + &rbac.Role{ + ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault, Name: "master"}, + }, + true, + ) + if len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + + errorCases := map[string]struct { + A rbac.Role + T field.ErrorType + F string + }{ + "zero-length namespace": { + A: rbac.Role{ + ObjectMeta: api.ObjectMeta{Name: "default"}, + }, + T: field.ErrorTypeRequired, + F: "metadata.namespace", + }, + "zero-length name": { + A: rbac.Role{ + ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}, + }, + T: field.ErrorTypeRequired, + F: "metadata.name", + }, + } + for k, v := range errorCases { + errs := validateRole(&v.A, true) + if len(errs) == 0 { + t.Errorf("expected failure %s for %v", k, v.A) + continue + } + for i := range errs { + if errs[i].Type != v.T { + t.Errorf("%s: expected errors to have type %s: %v", k, v.T, errs[i]) + } + if errs[i].Field != v.F { + t.Errorf("%s: expected errors to have field %s: %v", k, v.F, errs[i]) + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/OWNERS b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/OWNERS new file mode 100644 index 000000000000..766c481bd7e6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/OWNERS @@ -0,0 +1,3 @@ +assignees: + - erictune + - liggitt diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authenticator/bearertoken/bearertoken.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authenticator/bearertoken/bearertoken.go new file mode 100644 index 000000000000..eff338bfba83 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authenticator/bearertoken/bearertoken.go @@ -0,0 +1,47 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bearertoken + +import ( + "net/http" + "strings" + + "k8s.io/kubernetes/pkg/auth/authenticator" + "k8s.io/kubernetes/pkg/auth/user" +) + +type Authenticator struct { + auth authenticator.Token +} + +func New(auth authenticator.Token) *Authenticator { + return &Authenticator{auth} +} + +func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { + auth := strings.TrimSpace(req.Header.Get("Authorization")) + if auth == "" { + return nil, false, nil + } + parts := strings.Split(auth, " ") + if len(parts) < 2 || strings.ToLower(parts[0]) != "bearer" { + return nil, false, nil + } + + token := parts[1] + return a.auth.AuthenticateToken(token) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authenticator/bearertoken/bearertoken_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authenticator/bearertoken/bearertoken_test.go new file mode 100644 index 000000000000..6b9f6766215c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authenticator/bearertoken/bearertoken_test.go @@ -0,0 +1,86 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bearertoken + +import ( + "errors" + "net/http" + "testing" + + "k8s.io/kubernetes/pkg/auth/authenticator" + "k8s.io/kubernetes/pkg/auth/user" +) + +func TestAuthenticateRequest(t *testing.T) { + auth := New(authenticator.TokenFunc(func(token string) (user.Info, bool, error) { + if token != "token" { + t.Errorf("unexpected token: %s", token) + } + return &user.DefaultInfo{Name: "user"}, true, nil + })) + user, ok, err := auth.AuthenticateRequest(&http.Request{ + Header: http.Header{"Authorization": []string{"Bearer token"}}, + }) + if !ok || user == nil || err != nil { + t.Errorf("expected valid user") + } +} + +func TestAuthenticateRequestTokenInvalid(t *testing.T) { + auth := New(authenticator.TokenFunc(func(token string) (user.Info, bool, error) { + return nil, false, nil + })) + user, ok, err := auth.AuthenticateRequest(&http.Request{ + Header: http.Header{"Authorization": []string{"Bearer token"}}, + }) + if ok || user != nil || err != nil { + t.Errorf("expected not authenticated user") + } +} + +func TestAuthenticateRequestTokenError(t *testing.T) { + auth := New(authenticator.TokenFunc(func(token string) (user.Info, bool, error) { + return nil, false, errors.New("error") + })) + user, ok, err := auth.AuthenticateRequest(&http.Request{ + Header: http.Header{"Authorization": []string{"Bearer token"}}, + }) + if ok || user != nil || err == nil { + t.Errorf("expected error") + } +} + +func TestAuthenticateRequestBadValue(t *testing.T) { + testCases := []struct { + Req *http.Request + }{ + {Req: &http.Request{}}, + {Req: &http.Request{Header: http.Header{"Authorization": []string{"Bearer"}}}}, + {Req: &http.Request{Header: http.Header{"Authorization": []string{"bear token"}}}}, + {Req: &http.Request{Header: http.Header{"Authorization": []string{"Bearer: token"}}}}, + } + for i, testCase := range testCases { + auth := New(authenticator.TokenFunc(func(token string) (user.Info, bool, error) { + t.Errorf("authentication should not have been called") + return nil, false, nil + })) + user, ok, err := auth.AuthenticateRequest(testCase.Req) + if ok || user != nil || err != nil { + t.Errorf("%d: expected not authenticated (no token)", i) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authenticator/interfaces.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authenticator/interfaces.go new file mode 100644 index 000000000000..2da820cc0894 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authenticator/interfaces.go @@ -0,0 +1,68 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import ( + "net/http" + + "k8s.io/kubernetes/pkg/auth/user" +) + +// Token checks a string value against a backing authentication store and returns +// information about the current user and true if successful, false if not successful, +// or an error if the token could not be checked. +type Token interface { + AuthenticateToken(token string) (user.Info, bool, error) +} + +// Request attempts to extract authentication information from a request and returns +// information about the current user and true if successful, false if not successful, +// or an error if the request could not be checked. +type Request interface { + AuthenticateRequest(req *http.Request) (user.Info, bool, error) +} + +// Password checks a username and password against a backing authentication store and +// returns information about the user and true if successful, false if not successful, +// or an error if the username and password could not be checked +type Password interface { + AuthenticatePassword(user, password string) (user.Info, bool, error) +} + +// TokenFunc is a function that implements the Token interface. +type TokenFunc func(token string) (user.Info, bool, error) + +// AuthenticateToken implements authenticator.Token. +func (f TokenFunc) AuthenticateToken(token string) (user.Info, bool, error) { + return f(token) +} + +// RequestFunc is a function that implements the Request interface. +type RequestFunc func(req *http.Request) (user.Info, bool, error) + +// AuthenticateRequest implements authenticator.Request. +func (f RequestFunc) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { + return f(req) +} + +// PasswordFunc is a function that implements the Password interface. +type PasswordFunc func(user, password string) (user.Info, bool, error) + +// AuthenticatePassword implements authenticator.Password. +func (f PasswordFunc) AuthenticatePassword(user, password string) (user.Info, bool, error) { + return f(user, password) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac.go new file mode 100644 index 000000000000..c3bfedcc4d8e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac.go @@ -0,0 +1,228 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package abac + +// Policy authorizes Kubernetes API actions using an Attribute-based access +// control scheme. + +import ( + "bufio" + "errors" + "fmt" + "os" + "strings" + + "github.com/golang/glog" + + api "k8s.io/kubernetes/pkg/apis/abac" + _ "k8s.io/kubernetes/pkg/apis/abac/latest" + "k8s.io/kubernetes/pkg/apis/abac/v0" + "k8s.io/kubernetes/pkg/auth/authorizer" + "k8s.io/kubernetes/pkg/runtime" +) + +type policyLoadError struct { + path string + line int + data []byte + err error +} + +func (p policyLoadError) Error() string { + if p.line >= 0 { + return fmt.Sprintf("error reading policy file %s, line %d: %s: %v", p.path, p.line, string(p.data), p.err) + } + return fmt.Sprintf("error reading policy file %s: %v", p.path, p.err) +} + +type policyList []*api.Policy + +// TODO: Have policies be created via an API call and stored in REST storage. +func NewFromFile(path string) (policyList, error) { + // File format is one map per line. This allows easy concatentation of files, + // comments in files, and identification of errors by line number. + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + pl := make(policyList, 0) + + decoder := api.Codecs.UniversalDecoder() + + i := 0 + unversionedLines := 0 + for scanner.Scan() { + i++ + p := &api.Policy{} + b := scanner.Bytes() + + // skip comment lines and blank lines + trimmed := strings.TrimSpace(string(b)) + if len(trimmed) == 0 || strings.HasPrefix(trimmed, "#") { + continue + } + + decodedObj, _, err := decoder.Decode(b, nil, nil) + if err != nil { + if !(runtime.IsMissingVersion(err) || runtime.IsMissingKind(err) || runtime.IsNotRegisteredError(err)) { + return nil, policyLoadError{path, i, b, err} + } + unversionedLines++ + // Migrate unversioned policy object + oldPolicy := &v0.Policy{} + if err := runtime.DecodeInto(decoder, b, oldPolicy); err != nil { + return nil, policyLoadError{path, i, b, err} + } + if err := api.Scheme.Convert(oldPolicy, p); err != nil { + return nil, policyLoadError{path, i, b, err} + } + pl = append(pl, p) + continue + } + + decodedPolicy, ok := decodedObj.(*api.Policy) + if !ok { + return nil, policyLoadError{path, i, b, fmt.Errorf("unrecognized object: %#v", decodedObj)} + } + pl = append(pl, decodedPolicy) + } + + if unversionedLines > 0 { + glog.Warningf(`Policy file %s contained unversioned rules. See docs/admin/authorization.md#abac-mode for ABAC file format details.`, path) + } + + if err := scanner.Err(); err != nil { + return nil, policyLoadError{path, -1, nil, err} + } + return pl, nil +} + +func matches(p api.Policy, a authorizer.Attributes) bool { + if subjectMatches(p, a) { + if verbMatches(p, a) { + // Resource and non-resource requests are mutually exclusive, at most one will match a policy + if resourceMatches(p, a) { + return true + } + if nonResourceMatches(p, a) { + return true + } + } + } + return false +} + +// subjectMatches returns true if specified user and group properties in the policy match the attributes +func subjectMatches(p api.Policy, a authorizer.Attributes) bool { + matched := false + + // If the policy specified a user, ensure it matches + if len(p.Spec.User) > 0 { + if p.Spec.User == "*" { + matched = true + } else { + matched = p.Spec.User == a.GetUserName() + if !matched { + return false + } + } + } + + // If the policy specified a group, ensure it matches + if len(p.Spec.Group) > 0 { + if p.Spec.Group == "*" { + matched = true + } else { + matched = false + for _, group := range a.GetGroups() { + if p.Spec.Group == group { + matched = true + } + } + if !matched { + return false + } + } + } + + return matched +} + +func verbMatches(p api.Policy, a authorizer.Attributes) bool { + // TODO: match on verb + + // All policies allow read only requests + if a.IsReadOnly() { + return true + } + + // Allow if policy is not readonly + if !p.Spec.Readonly { + return true + } + + return false +} + +func nonResourceMatches(p api.Policy, a authorizer.Attributes) bool { + // A non-resource policy cannot match a resource request + if !a.IsResourceRequest() { + // Allow wildcard match + if p.Spec.NonResourcePath == "*" { + return true + } + // Allow exact match + if p.Spec.NonResourcePath == a.GetPath() { + return true + } + // Allow a trailing * subpath match + if strings.HasSuffix(p.Spec.NonResourcePath, "*") && strings.HasPrefix(a.GetPath(), strings.TrimRight(p.Spec.NonResourcePath, "*")) { + return true + } + } + return false +} + +func resourceMatches(p api.Policy, a authorizer.Attributes) bool { + // A resource policy cannot match a non-resource request + if a.IsResourceRequest() { + if p.Spec.Namespace == "*" || p.Spec.Namespace == a.GetNamespace() { + if p.Spec.Resource == "*" || p.Spec.Resource == a.GetResource() { + if p.Spec.APIGroup == "*" || p.Spec.APIGroup == a.GetAPIGroup() { + return true + } + } + } + } + return false +} + +// Authorizer implements authorizer.Authorize +func (pl policyList) Authorize(a authorizer.Attributes) error { + for _, p := range pl { + if matches(*p, a) { + return nil + } + } + return errors.New("No policy matched.") + // TODO: Benchmark how much time policy matching takes with a medium size + // policy file, compared to other steps such as encoding/decoding. + // Then, add Caching only if needed. +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac_test.go new file mode 100644 index 000000000000..8b4e3b75ba2f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac_test.go @@ -0,0 +1,965 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package abac + +import ( + "io/ioutil" + "os" + "testing" + + api "k8s.io/kubernetes/pkg/apis/abac" + "k8s.io/kubernetes/pkg/apis/abac/v0" + "k8s.io/kubernetes/pkg/apis/abac/v1beta1" + "k8s.io/kubernetes/pkg/auth/authorizer" + "k8s.io/kubernetes/pkg/auth/user" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestEmptyFile(t *testing.T) { + _, err := newWithContents(t, "") + if err != nil { + t.Errorf("unable to read policy file: %v", err) + } +} + +func TestOneLineFileNoNewLine(t *testing.T) { + _, err := newWithContents(t, `{"user":"scheduler", "readonly": true, "resource": "pods", "namespace":"ns1"}`) + if err != nil { + t.Errorf("unable to read policy file: %v", err) + } +} + +func TestTwoLineFile(t *testing.T) { + _, err := newWithContents(t, `{"user":"scheduler", "readonly": true, "resource": "pods"} +{"user":"scheduler", "readonly": true, "resource": "services"} +`) + if err != nil { + t.Errorf("unable to read policy file: %v", err) + } +} + +// Test the file that we will point users at as an example. +func TestExampleFile(t *testing.T) { + _, err := NewFromFile("./example_policy_file.jsonl") + if err != nil { + t.Errorf("unable to read policy file: %v", err) + } +} + +func TestAuthorizeV0(t *testing.T) { + a, err := newWithContents(t, `{ "readonly": true, "resource": "events" } +{"user":"scheduler", "readonly": true, "resource": "pods" } +{"user":"scheduler", "resource": "bindings" } +{"user":"kubelet", "readonly": true, "resource": "bindings" } +{"user":"kubelet", "resource": "events" } +{"user":"alice", "namespace": "projectCaribou"} +{"user":"bob", "readonly": true, "namespace": "projectCaribou"} +`) + if err != nil { + t.Fatalf("unable to read policy file: %v", err) + } + + uScheduler := user.DefaultInfo{Name: "scheduler", UID: "uid1"} + uAlice := user.DefaultInfo{Name: "alice", UID: "uid3"} + uChuck := user.DefaultInfo{Name: "chuck", UID: "uid5"} + + testCases := []struct { + User user.DefaultInfo + Verb string + Resource string + NS string + APIGroup string + Path string + ExpectAllow bool + }{ + // Scheduler can read pods + {User: uScheduler, Verb: "list", Resource: "pods", NS: "ns1", ExpectAllow: true}, + {User: uScheduler, Verb: "list", Resource: "pods", NS: "", ExpectAllow: true}, + // Scheduler cannot write pods + {User: uScheduler, Verb: "create", Resource: "pods", NS: "ns1", ExpectAllow: false}, + {User: uScheduler, Verb: "create", Resource: "pods", NS: "", ExpectAllow: false}, + // Scheduler can write bindings + {User: uScheduler, Verb: "get", Resource: "bindings", NS: "ns1", ExpectAllow: true}, + {User: uScheduler, Verb: "get", Resource: "bindings", NS: "", ExpectAllow: true}, + + // Alice can read and write anything in the right namespace. + {User: uAlice, Verb: "get", Resource: "pods", NS: "projectCaribou", ExpectAllow: true}, + {User: uAlice, Verb: "get", Resource: "widgets", NS: "projectCaribou", ExpectAllow: true}, + {User: uAlice, Verb: "get", Resource: "", NS: "projectCaribou", ExpectAllow: true}, + {User: uAlice, Verb: "update", Resource: "pods", NS: "projectCaribou", ExpectAllow: true}, + {User: uAlice, Verb: "update", Resource: "widgets", NS: "projectCaribou", ExpectAllow: true}, + {User: uAlice, Verb: "update", Resource: "", NS: "projectCaribou", ExpectAllow: true}, + {User: uAlice, Verb: "update", Resource: "foo", NS: "projectCaribou", APIGroup: "bar", ExpectAllow: true}, + // .. but not the wrong namespace. + {User: uAlice, Verb: "get", Resource: "pods", NS: "ns1", ExpectAllow: false}, + {User: uAlice, Verb: "get", Resource: "widgets", NS: "ns1", ExpectAllow: false}, + {User: uAlice, Verb: "get", Resource: "", NS: "ns1", ExpectAllow: false}, + + // Chuck can read events, since anyone can. + {User: uChuck, Verb: "get", Resource: "events", NS: "ns1", ExpectAllow: true}, + {User: uChuck, Verb: "get", Resource: "events", NS: "", ExpectAllow: true}, + // Chuck can't do other things. + {User: uChuck, Verb: "update", Resource: "events", NS: "ns1", ExpectAllow: false}, + {User: uChuck, Verb: "get", Resource: "pods", NS: "ns1", ExpectAllow: false}, + {User: uChuck, Verb: "get", Resource: "floop", NS: "ns1", ExpectAllow: false}, + // Chunk can't access things with no kind or namespace + {User: uChuck, Verb: "get", Path: "/", Resource: "", NS: "", ExpectAllow: false}, + } + for i, tc := range testCases { + attr := authorizer.AttributesRecord{ + User: &tc.User, + Verb: tc.Verb, + Resource: tc.Resource, + Namespace: tc.NS, + APIGroup: tc.APIGroup, + Path: tc.Path, + + ResourceRequest: len(tc.NS) > 0 || len(tc.Resource) > 0, + } + err := a.Authorize(attr) + actualAllow := bool(err == nil) + if tc.ExpectAllow != actualAllow { + t.Logf("tc: %v -> attr %v", tc, attr) + t.Errorf("%d: Expected allowed=%v but actually allowed=%v\n\t%v", + i, tc.ExpectAllow, actualAllow, tc) + } + } +} + +func TestAuthorizeV1beta1(t *testing.T) { + a, err := newWithContents(t, + ` + # Comment line, after a blank line + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"*", "readonly": true, "nonResourcePath": "/api"}} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"*", "nonResourcePath": "/custom"}} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"*", "nonResourcePath": "/root/*"}} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"noresource", "nonResourcePath": "*"}} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"*", "readonly": true, "resource": "events", "namespace": "*"}} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"scheduler", "readonly": true, "resource": "pods", "namespace": "*"}} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"scheduler", "resource": "bindings", "namespace": "*"}} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"kubelet", "readonly": true, "resource": "bindings", "namespace": "*"}} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"kubelet", "resource": "events", "namespace": "*"}} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"alice", "resource": "*", "namespace": "projectCaribou"}} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"bob", "readonly": true, "resource": "*", "namespace": "projectCaribou"}} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"debbie", "resource": "pods", "namespace": "projectCaribou"}} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"apigroupuser", "resource": "*", "namespace": "projectAnyGroup", "apiGroup": "*"}} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"apigroupuser", "resource": "*", "namespace": "projectEmptyGroup", "apiGroup": "" }} + {"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","spec":{"user":"apigroupuser", "resource": "*", "namespace": "projectXGroup", "apiGroup": "x"}}`) + + if err != nil { + t.Fatalf("unable to read policy file: %v", err) + } + + uScheduler := user.DefaultInfo{Name: "scheduler", UID: "uid1"} + uAlice := user.DefaultInfo{Name: "alice", UID: "uid3"} + uChuck := user.DefaultInfo{Name: "chuck", UID: "uid5"} + uDebbie := user.DefaultInfo{Name: "debbie", UID: "uid6"} + uNoResource := user.DefaultInfo{Name: "noresource", UID: "uid7"} + uAPIGroup := user.DefaultInfo{Name: "apigroupuser", UID: "uid8"} + + testCases := []struct { + User user.DefaultInfo + Verb string + Resource string + APIGroup string + NS string + Path string + ExpectAllow bool + }{ + // Scheduler can read pods + {User: uScheduler, Verb: "list", Resource: "pods", NS: "ns1", ExpectAllow: true}, + {User: uScheduler, Verb: "list", Resource: "pods", NS: "", ExpectAllow: true}, + // Scheduler cannot write pods + {User: uScheduler, Verb: "create", Resource: "pods", NS: "ns1", ExpectAllow: false}, + {User: uScheduler, Verb: "create", Resource: "pods", NS: "", ExpectAllow: false}, + // Scheduler can write bindings + {User: uScheduler, Verb: "get", Resource: "bindings", NS: "ns1", ExpectAllow: true}, + {User: uScheduler, Verb: "get", Resource: "bindings", NS: "", ExpectAllow: true}, + + // Alice can read and write anything in the right namespace. + {User: uAlice, Verb: "get", Resource: "pods", NS: "projectCaribou", ExpectAllow: true}, + {User: uAlice, Verb: "get", Resource: "widgets", NS: "projectCaribou", ExpectAllow: true}, + {User: uAlice, Verb: "get", Resource: "", NS: "projectCaribou", ExpectAllow: true}, + {User: uAlice, Verb: "update", Resource: "pods", NS: "projectCaribou", ExpectAllow: true}, + {User: uAlice, Verb: "update", Resource: "widgets", NS: "projectCaribou", ExpectAllow: true}, + {User: uAlice, Verb: "update", Resource: "", NS: "projectCaribou", ExpectAllow: true}, + // .. but not the wrong namespace. + {User: uAlice, Verb: "get", Resource: "pods", NS: "ns1", ExpectAllow: false}, + {User: uAlice, Verb: "get", Resource: "widgets", NS: "ns1", ExpectAllow: false}, + {User: uAlice, Verb: "get", Resource: "", NS: "ns1", ExpectAllow: false}, + + // Debbie can write to pods in the right namespace + {User: uDebbie, Verb: "update", Resource: "pods", NS: "projectCaribou", ExpectAllow: true}, + + // Chuck can read events, since anyone can. + {User: uChuck, Verb: "get", Resource: "events", NS: "ns1", ExpectAllow: true}, + {User: uChuck, Verb: "get", Resource: "events", NS: "", ExpectAllow: true}, + // Chuck can't do other things. + {User: uChuck, Verb: "update", Resource: "events", NS: "ns1", ExpectAllow: false}, + {User: uChuck, Verb: "get", Resource: "pods", NS: "ns1", ExpectAllow: false}, + {User: uChuck, Verb: "get", Resource: "floop", NS: "ns1", ExpectAllow: false}, + // Chuck can't access things with no resource or namespace + {User: uChuck, Verb: "get", Path: "/", Resource: "", NS: "", ExpectAllow: false}, + // but can access /api + {User: uChuck, Verb: "get", Path: "/api", Resource: "", NS: "", ExpectAllow: true}, + // though he cannot write to it + {User: uChuck, Verb: "create", Path: "/api", Resource: "", NS: "", ExpectAllow: false}, + // while he can write to /custom + {User: uChuck, Verb: "update", Path: "/custom", Resource: "", NS: "", ExpectAllow: true}, + // he cannot get "/root" + {User: uChuck, Verb: "get", Path: "/root", Resource: "", NS: "", ExpectAllow: false}, + // but can get any subpath + {User: uChuck, Verb: "get", Path: "/root/", Resource: "", NS: "", ExpectAllow: true}, + {User: uChuck, Verb: "get", Path: "/root/test/1/2/3", Resource: "", NS: "", ExpectAllow: true}, + + // the user "noresource" can get any non-resource request + {User: uNoResource, Verb: "get", Path: "", Resource: "", NS: "", ExpectAllow: true}, + {User: uNoResource, Verb: "get", Path: "/", Resource: "", NS: "", ExpectAllow: true}, + {User: uNoResource, Verb: "get", Path: "/foo/bar/baz", Resource: "", NS: "", ExpectAllow: true}, + // but cannot get any request where IsResourceRequest() == true + {User: uNoResource, Verb: "get", Path: "/", Resource: "", NS: "bar", ExpectAllow: false}, + {User: uNoResource, Verb: "get", Path: "/foo/bar/baz", Resource: "foo", NS: "bar", ExpectAllow: false}, + + // Test APIGroup matching + {User: uAPIGroup, Verb: "get", APIGroup: "x", Resource: "foo", NS: "projectAnyGroup", ExpectAllow: true}, + {User: uAPIGroup, Verb: "get", APIGroup: "x", Resource: "foo", NS: "projectEmptyGroup", ExpectAllow: false}, + {User: uAPIGroup, Verb: "get", APIGroup: "x", Resource: "foo", NS: "projectXGroup", ExpectAllow: true}, + } + for i, tc := range testCases { + attr := authorizer.AttributesRecord{ + User: &tc.User, + Verb: tc.Verb, + Resource: tc.Resource, + APIGroup: tc.APIGroup, + Namespace: tc.NS, + ResourceRequest: len(tc.NS) > 0 || len(tc.Resource) > 0, + Path: tc.Path, + } + // t.Logf("tc %2v: %v -> attr %v", i, tc, attr) + err := a.Authorize(attr) + actualAllow := bool(err == nil) + if tc.ExpectAllow != actualAllow { + t.Errorf("%d: Expected allowed=%v but actually allowed=%v, for case %+v & %+v", + i, tc.ExpectAllow, actualAllow, tc, attr) + } + } +} + +func TestSubjectMatches(t *testing.T) { + testCases := map[string]struct { + User user.DefaultInfo + Policy runtime.Object + ExpectMatch bool + }{ + "v0 empty policy matches unauthed user": { + User: user.DefaultInfo{}, + Policy: &v0.Policy{ + User: "", + Group: "", + }, + ExpectMatch: true, + }, + "v0 empty policy matches authed user": { + User: user.DefaultInfo{Name: "Foo"}, + Policy: &v0.Policy{ + User: "", + Group: "", + }, + ExpectMatch: true, + }, + "v0 empty policy matches authed user with groups": { + User: user.DefaultInfo{Name: "Foo", Groups: []string{"a", "b"}}, + Policy: &v0.Policy{ + User: "", + Group: "", + }, + ExpectMatch: true, + }, + + "v0 user policy does not match unauthed user": { + User: user.DefaultInfo{}, + Policy: &v0.Policy{ + User: "Foo", + Group: "", + }, + ExpectMatch: false, + }, + "v0 user policy does not match different user": { + User: user.DefaultInfo{Name: "Bar"}, + Policy: &v0.Policy{ + User: "Foo", + Group: "", + }, + ExpectMatch: false, + }, + "v0 user policy is case-sensitive": { + User: user.DefaultInfo{Name: "foo"}, + Policy: &v0.Policy{ + User: "Foo", + Group: "", + }, + ExpectMatch: false, + }, + "v0 user policy does not match substring": { + User: user.DefaultInfo{Name: "FooBar"}, + Policy: &v0.Policy{ + User: "Foo", + Group: "", + }, + ExpectMatch: false, + }, + "v0 user policy matches username": { + User: user.DefaultInfo{Name: "Foo"}, + Policy: &v0.Policy{ + User: "Foo", + Group: "", + }, + ExpectMatch: true, + }, + + "v0 group policy does not match unauthed user": { + User: user.DefaultInfo{}, + Policy: &v0.Policy{ + User: "", + Group: "Foo", + }, + ExpectMatch: false, + }, + "v0 group policy does not match user in different group": { + User: user.DefaultInfo{Name: "FooBar", Groups: []string{"B"}}, + Policy: &v0.Policy{ + User: "", + Group: "A", + }, + ExpectMatch: false, + }, + "v0 group policy is case-sensitive": { + User: user.DefaultInfo{Name: "Foo", Groups: []string{"A", "B", "C"}}, + Policy: &v0.Policy{ + User: "", + Group: "b", + }, + ExpectMatch: false, + }, + "v0 group policy does not match substring": { + User: user.DefaultInfo{Name: "Foo", Groups: []string{"A", "BBB", "C"}}, + Policy: &v0.Policy{ + User: "", + Group: "B", + }, + ExpectMatch: false, + }, + "v0 group policy matches user in group": { + User: user.DefaultInfo{Name: "Foo", Groups: []string{"A", "B", "C"}}, + Policy: &v0.Policy{ + User: "", + Group: "B", + }, + ExpectMatch: true, + }, + + "v0 user and group policy requires user match": { + User: user.DefaultInfo{Name: "Bar", Groups: []string{"A", "B", "C"}}, + Policy: &v0.Policy{ + User: "Foo", + Group: "B", + }, + ExpectMatch: false, + }, + "v0 user and group policy requires group match": { + User: user.DefaultInfo{Name: "Foo", Groups: []string{"A", "B", "C"}}, + Policy: &v0.Policy{ + User: "Foo", + Group: "D", + }, + ExpectMatch: false, + }, + "v0 user and group policy matches": { + User: user.DefaultInfo{Name: "Foo", Groups: []string{"A", "B", "C"}}, + Policy: &v0.Policy{ + User: "Foo", + Group: "B", + }, + ExpectMatch: true, + }, + + "v1 empty policy does not match unauthed user": { + User: user.DefaultInfo{}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "", + Group: "", + }, + }, + ExpectMatch: false, + }, + "v1 empty policy does not match authed user": { + User: user.DefaultInfo{Name: "Foo"}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "", + Group: "", + }, + }, + ExpectMatch: false, + }, + "v1 empty policy does not match authed user with groups": { + User: user.DefaultInfo{Name: "Foo", Groups: []string{"a", "b"}}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "", + Group: "", + }, + }, + ExpectMatch: false, + }, + + "v1 user policy does not match unauthed user": { + User: user.DefaultInfo{}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "Foo", + Group: "", + }, + }, + ExpectMatch: false, + }, + "v1 user policy does not match different user": { + User: user.DefaultInfo{Name: "Bar"}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "Foo", + Group: "", + }, + }, + ExpectMatch: false, + }, + "v1 user policy is case-sensitive": { + User: user.DefaultInfo{Name: "foo"}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "Foo", + Group: "", + }, + }, + ExpectMatch: false, + }, + "v1 user policy does not match substring": { + User: user.DefaultInfo{Name: "FooBar"}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "Foo", + Group: "", + }, + }, + ExpectMatch: false, + }, + "v1 user policy matches username": { + User: user.DefaultInfo{Name: "Foo"}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "Foo", + Group: "", + }, + }, + ExpectMatch: true, + }, + + "v1 group policy does not match unauthed user": { + User: user.DefaultInfo{}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "", + Group: "Foo", + }, + }, + ExpectMatch: false, + }, + "v1 group policy does not match user in different group": { + User: user.DefaultInfo{Name: "FooBar", Groups: []string{"B"}}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "", + Group: "A", + }, + }, + ExpectMatch: false, + }, + "v1 group policy is case-sensitive": { + User: user.DefaultInfo{Name: "Foo", Groups: []string{"A", "B", "C"}}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "", + Group: "b", + }, + }, + ExpectMatch: false, + }, + "v1 group policy does not match substring": { + User: user.DefaultInfo{Name: "Foo", Groups: []string{"A", "BBB", "C"}}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "", + Group: "B", + }, + }, + ExpectMatch: false, + }, + "v1 group policy matches user in group": { + User: user.DefaultInfo{Name: "Foo", Groups: []string{"A", "B", "C"}}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "", + Group: "B", + }, + }, + ExpectMatch: true, + }, + + "v1 user and group policy requires user match": { + User: user.DefaultInfo{Name: "Bar", Groups: []string{"A", "B", "C"}}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "Foo", + Group: "B", + }, + }, + ExpectMatch: false, + }, + "v1 user and group policy requires group match": { + User: user.DefaultInfo{Name: "Foo", Groups: []string{"A", "B", "C"}}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "Foo", + Group: "D", + }, + }, + ExpectMatch: false, + }, + "v1 user and group policy matches": { + User: user.DefaultInfo{Name: "Foo", Groups: []string{"A", "B", "C"}}, + Policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "Foo", + Group: "B", + }, + }, + ExpectMatch: true, + }, + } + + for k, tc := range testCases { + policy := &api.Policy{} + if err := api.Scheme.Convert(tc.Policy, policy); err != nil { + t.Errorf("%s: error converting: %v", k, err) + continue + } + attr := authorizer.AttributesRecord{ + User: &tc.User, + } + actualMatch := subjectMatches(*policy, attr) + if tc.ExpectMatch != actualMatch { + t.Errorf("%v: Expected actorMatches=%v but actually got=%v", + k, tc.ExpectMatch, actualMatch) + } + } +} + +func newWithContents(t *testing.T, contents string) (authorizer.Authorizer, error) { + f, err := ioutil.TempFile("", "abac_test") + if err != nil { + t.Fatalf("unexpected error creating policyfile: %v", err) + } + f.Close() + defer os.Remove(f.Name()) + + if err := ioutil.WriteFile(f.Name(), []byte(contents), 0700); err != nil { + t.Fatalf("unexpected error writing policyfile: %v", err) + } + + pl, err := NewFromFile(f.Name()) + return pl, err +} + +func TestPolicy(t *testing.T) { + tests := []struct { + policy runtime.Object + attr authorizer.Attributes + matches bool + name string + }{ + // v0 + { + policy: &v0.Policy{}, + attr: authorizer.AttributesRecord{}, + matches: true, + name: "v0 null", + }, + + // v0 mismatches + { + policy: &v0.Policy{ + Readonly: true, + }, + attr: authorizer.AttributesRecord{}, + matches: false, + name: "v0 read-only mismatch", + }, + { + policy: &v0.Policy{ + User: "foo", + }, + attr: authorizer.AttributesRecord{ + User: &user.DefaultInfo{ + Name: "bar", + }, + }, + matches: false, + name: "v0 user name mis-match", + }, + { + policy: &v0.Policy{ + Resource: "foo", + }, + attr: authorizer.AttributesRecord{ + Resource: "bar", + ResourceRequest: true, + }, + matches: false, + name: "v0 resource mis-match", + }, + { + policy: &v0.Policy{ + User: "foo", + Resource: "foo", + Namespace: "foo", + }, + attr: authorizer.AttributesRecord{ + User: &user.DefaultInfo{ + Name: "foo", + }, + Resource: "foo", + Namespace: "foo", + ResourceRequest: true, + }, + matches: true, + name: "v0 namespace mis-match", + }, + + // v0 matches + { + policy: &v0.Policy{}, + attr: authorizer.AttributesRecord{ResourceRequest: true}, + matches: true, + name: "v0 null resource", + }, + { + policy: &v0.Policy{ + Readonly: true, + }, + attr: authorizer.AttributesRecord{ + Verb: "get", + }, + matches: true, + name: "v0 read-only match", + }, + { + policy: &v0.Policy{ + User: "foo", + }, + attr: authorizer.AttributesRecord{ + User: &user.DefaultInfo{ + Name: "foo", + }, + }, + matches: true, + name: "v0 user name match", + }, + { + policy: &v0.Policy{ + Resource: "foo", + }, + attr: authorizer.AttributesRecord{ + Resource: "foo", + ResourceRequest: true, + }, + matches: true, + name: "v0 resource match", + }, + + // v1 mismatches + { + policy: &v1beta1.Policy{}, + attr: authorizer.AttributesRecord{ + ResourceRequest: true, + }, + matches: false, + name: "v1 null", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "foo", + }, + }, + attr: authorizer.AttributesRecord{ + User: &user.DefaultInfo{ + Name: "bar", + }, + ResourceRequest: true, + }, + matches: false, + name: "v1 user name mis-match", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "*", + Readonly: true, + }, + }, + attr: authorizer.AttributesRecord{ + ResourceRequest: true, + }, + matches: false, + name: "v1 read-only mismatch", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "*", + Resource: "foo", + }, + }, + attr: authorizer.AttributesRecord{ + Resource: "bar", + ResourceRequest: true, + }, + matches: false, + name: "v1 resource mis-match", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "foo", + Namespace: "barr", + Resource: "baz", + }, + }, + attr: authorizer.AttributesRecord{ + User: &user.DefaultInfo{ + Name: "foo", + }, + Namespace: "bar", + Resource: "baz", + ResourceRequest: true, + }, + matches: false, + name: "v1 namespace mis-match", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "*", + NonResourcePath: "/api", + }, + }, + attr: authorizer.AttributesRecord{ + Path: "/api2", + ResourceRequest: false, + }, + matches: false, + name: "v1 non-resource mis-match", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "*", + NonResourcePath: "/api/*", + }, + }, + attr: authorizer.AttributesRecord{ + Path: "/api2/foo", + ResourceRequest: false, + }, + matches: false, + name: "v1 non-resource wildcard subpath mis-match", + }, + + // v1 matches + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "foo", + }, + }, + attr: authorizer.AttributesRecord{ + User: &user.DefaultInfo{ + Name: "foo", + }, + ResourceRequest: true, + }, + matches: true, + name: "v1 user match", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "*", + }, + }, + attr: authorizer.AttributesRecord{ + ResourceRequest: true, + }, + matches: true, + name: "v1 user wildcard match", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + Group: "bar", + }, + }, + attr: authorizer.AttributesRecord{ + User: &user.DefaultInfo{ + Name: "foo", + Groups: []string{"bar"}, + }, + ResourceRequest: true, + }, + matches: true, + name: "v1 group match", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + Group: "*", + }, + }, + attr: authorizer.AttributesRecord{ + User: &user.DefaultInfo{ + Name: "foo", + Groups: []string{"bar"}, + }, + ResourceRequest: true, + }, + matches: true, + name: "v1 group wildcard match", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "*", + Readonly: true, + }, + }, + attr: authorizer.AttributesRecord{ + Verb: "get", + ResourceRequest: true, + }, + matches: true, + name: "v1 read-only match", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "*", + Resource: "foo", + }, + }, + attr: authorizer.AttributesRecord{ + Resource: "foo", + ResourceRequest: true, + }, + matches: true, + name: "v1 resource match", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "foo", + Namespace: "bar", + Resource: "baz", + }, + }, + attr: authorizer.AttributesRecord{ + User: &user.DefaultInfo{ + Name: "foo", + }, + Namespace: "bar", + Resource: "baz", + ResourceRequest: true, + }, + matches: true, + name: "v1 namespace match", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "*", + NonResourcePath: "/api", + }, + }, + attr: authorizer.AttributesRecord{ + Path: "/api", + ResourceRequest: false, + }, + matches: true, + name: "v1 non-resource match", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "*", + NonResourcePath: "*", + }, + }, + attr: authorizer.AttributesRecord{ + Path: "/api", + ResourceRequest: false, + }, + matches: true, + name: "v1 non-resource wildcard match", + }, + { + policy: &v1beta1.Policy{ + Spec: v1beta1.PolicySpec{ + User: "*", + NonResourcePath: "/api/*", + }, + }, + attr: authorizer.AttributesRecord{ + Path: "/api/foo", + ResourceRequest: false, + }, + matches: true, + name: "v1 non-resource wildcard subpath match", + }, + } + for _, test := range tests { + policy := &api.Policy{} + if err := api.Scheme.Convert(test.policy, policy); err != nil { + t.Errorf("%s: error converting: %v", test.name, err) + continue + } + matches := matches(*policy, test.attr) + if test.matches != matches { + t.Errorf("%s: expected: %t, saw: %t", test.name, test.matches, matches) + continue + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/abac/example_policy_file.jsonl b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/abac/example_policy_file.jsonl new file mode 100644 index 000000000000..755145a1d7d3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/abac/example_policy_file.jsonl @@ -0,0 +1,10 @@ +{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"*", "nonResourcePath": "*", "readonly": true}} +{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"admin", "namespace": "*", "resource": "*", "apiGroup": "*" }} +{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"scheduler", "namespace": "*", "resource": "pods", "readonly": true }} +{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"scheduler", "namespace": "*", "resource": "bindings" }} +{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "pods", "readonly": true }} +{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "services", "readonly": true }} +{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "endpoints", "readonly": true }} +{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"kubelet", "namespace": "*", "resource": "events" }} +{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"alice", "namespace": "projectCaribou", "resource": "*", "apiGroup": "*" }} +{"apiVersion": "abac.authorization.kubernetes.io/v1beta1", "kind": "Policy", "spec": {"user":"bob", "namespace": "projectCaribou", "resource": "*", "apiGroup": "*", "readonly": true }} \ No newline at end of file diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/interfaces.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/interfaces.go new file mode 100644 index 000000000000..d4f02efbd4ad --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/interfaces.go @@ -0,0 +1,150 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorizer + +import ( + "net/http" + + "k8s.io/kubernetes/pkg/auth/user" +) + +// Attributes is an interface used by an Authorizer to get information about a request +// that is used to make an authorization decision. +type Attributes interface { + // The user string which the request was authenticated as, or empty if + // no authentication occurred and the request was allowed to proceed. + GetUserName() string + + // The list of group names the authenticated user is a member of. Can be + // empty if the authenticated user is not in any groups, or if no + // authentication occurred. + GetGroups() []string + + // GetVerb returns the kube verb associated with API requests (this includes get, list, watch, create, update, patch, delete, and proxy), + // or the lowercased HTTP verb associated with non-API requests (this includes get, put, post, patch, and delete) + GetVerb() string + + // When IsReadOnly() == true, the request has no side effects, other than + // caching, logging, and other incidentals. + IsReadOnly() bool + + // The namespace of the object, if a request is for a REST object. + GetNamespace() string + + // The kind of object, if a request is for a REST object. + GetResource() string + + // GetSubresource returns the subresource being requested, if present + GetSubresource() string + + // GetName returns the name of the object as parsed off the request. This will not be present for all request types, but + // will be present for: get, update, delete + GetName() string + + // The group of the resource, if a request is for a REST object. + GetAPIGroup() string + + // GetAPIVersion returns the version of the group requested, if a request is for a REST object. + GetAPIVersion() string + + // IsResourceRequest returns true for requests to API resources, like /api/v1/nodes, + // and false for non-resource endpoints like /api, /healthz, and /swaggerapi + IsResourceRequest() bool + + // GetPath returns the path of the request + GetPath() string +} + +// Authorizer makes an authorization decision based on information gained by making +// zero or more calls to methods of the Attributes interface. It returns nil when an action is +// authorized, otherwise it returns an error. +type Authorizer interface { + Authorize(a Attributes) (err error) +} + +type AuthorizerFunc func(a Attributes) error + +func (f AuthorizerFunc) Authorize(a Attributes) error { + return f(a) +} + +// RequestAttributesGetter provides a function that extracts Attributes from an http.Request +type RequestAttributesGetter interface { + GetRequestAttributes(user.Info, *http.Request) Attributes +} + +// AttributesRecord implements Attributes interface. +type AttributesRecord struct { + User user.Info + Verb string + Namespace string + APIGroup string + APIVersion string + Resource string + Subresource string + Name string + ResourceRequest bool + Path string +} + +func (a AttributesRecord) GetUserName() string { + return a.User.GetName() +} + +func (a AttributesRecord) GetGroups() []string { + return a.User.GetGroups() +} + +func (a AttributesRecord) GetVerb() string { + return a.Verb +} + +func (a AttributesRecord) IsReadOnly() bool { + return a.Verb == "get" || a.Verb == "list" || a.Verb == "watch" +} + +func (a AttributesRecord) GetNamespace() string { + return a.Namespace +} + +func (a AttributesRecord) GetResource() string { + return a.Resource +} + +func (a AttributesRecord) GetSubresource() string { + return a.Subresource +} + +func (a AttributesRecord) GetName() string { + return a.Name +} + +func (a AttributesRecord) GetAPIGroup() string { + return a.APIGroup +} + +func (a AttributesRecord) GetAPIVersion() string { + return a.APIVersion +} + +func (a AttributesRecord) IsResourceRequest() bool { + return a.ResourceRequest +} + +func (a AttributesRecord) GetPath() string { + return a.Path +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/union/union.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/union/union.go new file mode 100644 index 000000000000..255ad0823ac7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/union/union.go @@ -0,0 +1,45 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package union + +import ( + "k8s.io/kubernetes/pkg/auth/authorizer" + utilerrors "k8s.io/kubernetes/pkg/util/errors" +) + +// unionAuthzHandler authorizer against a chain of authorizer.Authorizer +type unionAuthzHandler []authorizer.Authorizer + +// New returns an authorizer that authorizes against a chain of authorizer.Authorizer objects +func New(authorizationHandlers ...authorizer.Authorizer) authorizer.Authorizer { + return unionAuthzHandler(authorizationHandlers) +} + +// Authorizes against a chain of authorizer.Authorizer objects and returns nil if successful and returns error if unsuccessful +func (authzHandler unionAuthzHandler) Authorize(a authorizer.Attributes) error { + var errlist []error + for _, currAuthzHandler := range authzHandler { + err := currAuthzHandler.Authorize(a) + if err != nil { + errlist = append(errlist, err) + continue + } + return nil + } + + return utilerrors.NewAggregate(errlist) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/union/union_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/union/union_test.go new file mode 100644 index 000000000000..1a01676af6cd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/authorizer/union/union_test.go @@ -0,0 +1,73 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package union + +import ( + "errors" + "testing" + + "k8s.io/kubernetes/pkg/auth/authorizer" +) + +type mockAuthzHandler struct { + isAuthorized bool + err error +} + +func (mock *mockAuthzHandler) Authorize(a authorizer.Attributes) error { + if mock.err != nil { + return mock.err + } + if !mock.isAuthorized { + return errors.New("Request unauthorized") + } else { + return nil + } +} + +func TestAuthorizationSecondPasses(t *testing.T) { + handler1 := &mockAuthzHandler{isAuthorized: false} + handler2 := &mockAuthzHandler{isAuthorized: true} + authzHandler := New(handler1, handler2) + + err := authzHandler.Authorize(nil) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestAuthorizationFirstPasses(t *testing.T) { + handler1 := &mockAuthzHandler{isAuthorized: true} + handler2 := &mockAuthzHandler{isAuthorized: false} + authzHandler := New(handler1, handler2) + + err := authzHandler.Authorize(nil) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestAuthorizationNonePasses(t *testing.T) { + handler1 := &mockAuthzHandler{isAuthorized: false} + handler2 := &mockAuthzHandler{isAuthorized: false} + authzHandler := New(handler1, handler2) + + err := authzHandler.Authorize(nil) + if err == nil { + t.Errorf("Expected error: %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/handlers/handlers.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/handlers/handlers.go new file mode 100644 index 000000000000..d005752f16a7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/handlers/handlers.go @@ -0,0 +1,109 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "net/http" + "strings" + + "github.com/golang/glog" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/auth/authenticator" +) + +var ( + authenticatedUserCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "authenticated_user_requests", + Help: "Counter of authenticated requests broken out by username.", + }, + []string{"username"}, + ) +) + +func init() { + prometheus.MustRegister(authenticatedUserCounter) +} + +// NewRequestAuthenticator creates an http handler that tries to authenticate the given request as a user, and then +// stores any such user found onto the provided context for the request. If authentication fails or returns an error +// the failed handler is used. On success, handler is invoked to serve the request. +func NewRequestAuthenticator(mapper api.RequestContextMapper, auth authenticator.Request, failed http.Handler, handler http.Handler) (http.Handler, error) { + return api.NewRequestContextFilter( + mapper, + http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + user, ok, err := auth.AuthenticateRequest(req) + if err != nil || !ok { + if err != nil { + glog.Errorf("Unable to authenticate the request due to an error: %v", err) + } + failed.ServeHTTP(w, req) + return + } + + if ctx, ok := mapper.Get(req); ok { + mapper.Update(req, api.WithUser(ctx, user)) + } + + authenticatedUserCounter.WithLabelValues(compressUsername(user.GetName())).Inc() + + handler.ServeHTTP(w, req) + }), + ) +} + +func Unauthorized(supportsBasicAuth bool) http.HandlerFunc { + if supportsBasicAuth { + return unauthorizedBasicAuth + } + return unauthorized +} + +// unauthorizedBasicAuth serves an unauthorized message to clients. +func unauthorizedBasicAuth(w http.ResponseWriter, req *http.Request) { + w.Header().Set("WWW-Authenticate", `Basic realm="kubernetes-master"`) + http.Error(w, "Unauthorized", http.StatusUnauthorized) +} + +// unauthorized serves an unauthorized message to clients. +func unauthorized(w http.ResponseWriter, req *http.Request) { + http.Error(w, "Unauthorized", http.StatusUnauthorized) +} + +// compressUsername maps all possible usernames onto a small set of categories +// of usernames. This is done both to limit the cardinality of the +// authorized_user_requests metric, and to avoid pushing actual usernames in the +// metric. +func compressUsername(username string) string { + switch { + // Known internal identities. + case username == "admin" || + username == "client" || + username == "kube_proxy" || + username == "kubelet" || + username == "system:serviceaccount:kube-system:default": + return username + // Probably an email address. + case strings.Contains(username, "@"): + return "email_id" + // Anything else (custom service accounts, custom external identities, etc.) + default: + return "other" + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/handlers/handlers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/handlers/handlers_test.go new file mode 100644 index 000000000000..6ad0e67b595b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/handlers/handlers_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/auth/authenticator" + "k8s.io/kubernetes/pkg/auth/user" +) + +func TestAuthenticateRequest(t *testing.T) { + success := make(chan struct{}) + contextMapper := api.NewRequestContextMapper() + auth, err := NewRequestAuthenticator( + contextMapper, + authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) { + return &user.DefaultInfo{Name: "user"}, true, nil + }), + http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { + t.Errorf("unexpected call to failed") + }), + http.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) { + ctx, ok := contextMapper.Get(req) + if ctx == nil || !ok { + t.Errorf("no context stored on contextMapper: %#v", contextMapper) + } + user, ok := api.UserFrom(ctx) + if user == nil || !ok { + t.Errorf("no user stored in context: %#v", ctx) + } + close(success) + }), + ) + + auth.ServeHTTP(httptest.NewRecorder(), &http.Request{}) + + <-success + empty, err := api.IsEmpty(contextMapper) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !empty { + t.Fatalf("contextMapper should have no stored requests: %v", contextMapper) + } +} + +func TestAuthenticateRequestFailed(t *testing.T) { + failed := make(chan struct{}) + contextMapper := api.NewRequestContextMapper() + auth, err := NewRequestAuthenticator( + contextMapper, + authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) { + return nil, false, nil + }), + http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { + close(failed) + }), + http.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) { + t.Errorf("unexpected call to handler") + }), + ) + + auth.ServeHTTP(httptest.NewRecorder(), &http.Request{}) + + <-failed + empty, err := api.IsEmpty(contextMapper) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !empty { + t.Fatalf("contextMapper should have no stored requests: %v", contextMapper) + } +} + +func TestAuthenticateRequestError(t *testing.T) { + failed := make(chan struct{}) + contextMapper := api.NewRequestContextMapper() + auth, err := NewRequestAuthenticator( + contextMapper, + authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) { + return nil, false, errors.New("failure") + }), + http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { + close(failed) + }), + http.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) { + t.Errorf("unexpected call to handler") + }), + ) + + auth.ServeHTTP(httptest.NewRecorder(), &http.Request{}) + + <-failed + empty, err := api.IsEmpty(contextMapper) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !empty { + t.Fatalf("contextMapper should have no stored requests: %v", contextMapper) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/user/user.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/user/user.go index ad7bccef9ff9..c4a4c00d5bff 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/user/user.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/auth/user/user.go @@ -27,6 +27,16 @@ type Info interface { GetUID() string // GetGroups returns the names of the groups the user is a member of GetGroups() []string + + // GetExtra can contain any additional information that the authenticator + // thought was interesting. One example would be scopes on a token. + // Keys in this map should be namespaced to the authenticator or + // authenticator/authorizer pair making use of them. + // For instance: "example.org/foo" instead of "foo" + // This is a map[string][]string because it needs to be serializeable into + // a SubjectAccessReviewSpec.authorization.k8s.io for proper authorization + // delegation flows + GetExtra() map[string][]string } // DefaultInfo provides a simple user information exchange object @@ -35,6 +45,7 @@ type DefaultInfo struct { Name string UID string Groups []string + Extra map[string][]string } func (i *DefaultInfo) GetName() string { @@ -48,3 +59,7 @@ func (i *DefaultInfo) GetUID() string { func (i *DefaultInfo) GetGroups() []string { return i.Groups } + +func (i *DefaultInfo) GetExtra() map[string][]string { + return i.Extra +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/OWNERS b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/OWNERS new file mode 100644 index 000000000000..e3fcb227ac12 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/OWNERS @@ -0,0 +1,6 @@ +assignees: + - caesarxuchao + - deads2k + - krousey + - lavalamp + - smarterclayton diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go index e7cc1aad19e6..3603c5f00283 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go @@ -82,9 +82,9 @@ func NewDeltaFIFO(keyFunc KeyFunc, compressor DeltaCompressor, knownObjects KeyL // different versions of the same object. // // A note on the KeyLister used by the DeltaFIFO: It's main purpose is -// to list keys that are "known", for the puspose of figuring out which +// to list keys that are "known", for the purpose of figuring out which // items have been deleted when Replace() or Delete() are called. The deleted -// objet will be included in the DeleteFinalStateUnknown markers. These objects +// object will be included in the DeleteFinalStateUnknown markers. These objects // could be stale. // // You may provide a function to compress deltas (e.g., represent a @@ -306,6 +306,10 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err func (f *DeltaFIFO) List() []interface{} { f.lock.RLock() defer f.lock.RUnlock() + return f.listLocked() +} + +func (f *DeltaFIFO) listLocked() []interface{} { list := make([]interface{}, 0, len(f.items)) for _, item := range f.items { // Copy item's slice so operations on this slice (delta @@ -452,6 +456,27 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { return nil } +// Resync will send a sync event for each item +func (f *DeltaFIFO) Resync() error { + f.lock.RLock() + defer f.lock.RUnlock() + for _, k := range f.knownObjects.ListKeys() { + obj, exists, err := f.knownObjects.GetByKey(k) + if err != nil { + glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, k) + continue + } else if !exists { + glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", k) + continue + } + + if err := f.queueActionLocked(Sync, obj); err != nil { + return fmt.Errorf("couldn't queue object: %v", err) + } + } + return nil +} + // A KeyListerGetter is anything that knows how to list its keys and look up by key. type KeyListerGetter interface { KeyLister diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/delta_fifo_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/delta_fifo_test.go new file mode 100644 index 000000000000..8efd982b5a4a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/delta_fifo_test.go @@ -0,0 +1,385 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "reflect" + "testing" + "time" +) + +// helper function to reduce stuttering +func testPop(f *DeltaFIFO) testFifoObject { + return f.Pop().(Deltas).Newest().Object.(testFifoObject) +} + +// keyLookupFunc adapts a raw function to be a KeyLookup. +type keyLookupFunc func() []string + +// ListKeys just calls kl. +func (kl keyLookupFunc) ListKeys() []string { + return kl() +} + +// GetByKey returns the key if it exists in the list returned by kl. +func (kl keyLookupFunc) GetByKey(key string) (interface{}, bool, error) { + for _, v := range kl() { + if v == key { + return key, true, nil + } + } + return nil, false, nil +} + +func TestDeltaFIFO_basic(t *testing.T) { + f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil) + const amount = 500 + go func() { + for i := 0; i < amount; i++ { + f.Add(mkFifoObj(string([]rune{'a', rune(i)}), i+1)) + } + }() + go func() { + for u := uint64(0); u < amount; u++ { + f.Add(mkFifoObj(string([]rune{'b', rune(u)}), u+1)) + } + }() + + lastInt := int(0) + lastUint := uint64(0) + for i := 0; i < amount*2; i++ { + switch obj := testPop(f).val.(type) { + case int: + if obj <= lastInt { + t.Errorf("got %v (int) out of order, last was %v", obj, lastInt) + } + lastInt = obj + case uint64: + if obj <= lastUint { + t.Errorf("got %v (uint) out of order, last was %v", obj, lastUint) + } else { + lastUint = obj + } + default: + t.Fatalf("unexpected type %#v", obj) + } + } +} + +func TestDeltaFIFO_compressorWorks(t *testing.T) { + oldestTypes := []DeltaType{} + f := NewDeltaFIFO( + testFifoObjectKeyFunc, + // This function just keeps the most recent delta + // and puts deleted ones in the list. + DeltaCompressorFunc(func(d Deltas) Deltas { + if n := len(d); n > 1 { + oldestTypes = append(oldestTypes, d[0].Type) + d = d[1:] + } + return d + }), + nil, + ) + f.Add(mkFifoObj("foo", 10)) + f.Update(mkFifoObj("foo", 12)) + f.Replace([]interface{}{mkFifoObj("foo", 20)}, "0") + f.Delete(mkFifoObj("foo", 22)) + f.Add(mkFifoObj("foo", 25)) // flush the last one out + expect := []DeltaType{Added, Updated, Sync, Deleted} + if e, a := expect, oldestTypes; !reflect.DeepEqual(e, a) { + t.Errorf("Expected %#v, got %#v", e, a) + } + if e, a := (Deltas{{Added, mkFifoObj("foo", 25)}}), f.Pop().(Deltas); !reflect.DeepEqual(e, a) { + t.Fatalf("Expected %#v, got %#v", e, a) + } + +} + +func TestDeltaFIFO_addUpdate(t *testing.T) { + f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil) + f.Add(mkFifoObj("foo", 10)) + f.Update(mkFifoObj("foo", 12)) + f.Delete(mkFifoObj("foo", 15)) + + if e, a := []interface{}{mkFifoObj("foo", 15)}, f.List(); !reflect.DeepEqual(e, a) { + t.Errorf("Expected %+v, got %+v", e, a) + } + if e, a := []string{"foo"}, f.ListKeys(); !reflect.DeepEqual(e, a) { + t.Errorf("Expected %+v, got %+v", e, a) + } + + got := make(chan testFifoObject, 2) + go func() { + for { + obj := f.Pop().(Deltas).Newest().Object.(testFifoObject) + t.Logf("got a thing %#v", obj) + t.Logf("D len: %v", len(f.queue)) + got <- obj + } + }() + + first := <-got + if e, a := 15, first.val; e != a { + t.Errorf("Didn't get updated value (%v), got %v", e, a) + } + select { + case unexpected := <-got: + t.Errorf("Got second value %v", unexpected.val) + case <-time.After(50 * time.Millisecond): + } + _, exists, _ := f.Get(mkFifoObj("foo", "")) + if exists { + t.Errorf("item did not get removed") + } +} + +func TestDeltaFIFO_enqueueingNoLister(t *testing.T) { + f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil) + f.Add(mkFifoObj("foo", 10)) + f.Update(mkFifoObj("bar", 15)) + f.Add(mkFifoObj("qux", 17)) + f.Delete(mkFifoObj("qux", 18)) + + // This delete does not enqueue anything because baz doesn't exist. + f.Delete(mkFifoObj("baz", 20)) + + expectList := []int{10, 15, 18} + for _, expect := range expectList { + if e, a := expect, testPop(f).val; e != a { + t.Errorf("Didn't get updated value (%v), got %v", e, a) + } + } + if e, a := 0, len(f.items); e != a { + t.Errorf("queue unexpectedly not empty: %v != %v\n%#v", e, a, f.items) + } +} + +func TestDeltaFIFO_enqueueingWithLister(t *testing.T) { + f := NewDeltaFIFO( + testFifoObjectKeyFunc, + nil, + keyLookupFunc(func() []string { + return []string{"foo", "bar", "baz"} + }), + ) + f.Add(mkFifoObj("foo", 10)) + f.Update(mkFifoObj("bar", 15)) + + // This delete does enqueue the deletion, because "baz" is in the key lister. + f.Delete(mkFifoObj("baz", 20)) + + expectList := []int{10, 15, 20} + for _, expect := range expectList { + if e, a := expect, testPop(f).val; e != a { + t.Errorf("Didn't get updated value (%v), got %v", e, a) + } + } + if e, a := 0, len(f.items); e != a { + t.Errorf("queue unexpectedly not empty: %v != %v", e, a) + } +} + +func TestDeltaFIFO_addReplace(t *testing.T) { + f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil) + f.Add(mkFifoObj("foo", 10)) + f.Replace([]interface{}{mkFifoObj("foo", 15)}, "0") + got := make(chan testFifoObject, 2) + go func() { + for { + got <- testPop(f) + } + }() + + first := <-got + if e, a := 15, first.val; e != a { + t.Errorf("Didn't get updated value (%v), got %v", e, a) + } + select { + case unexpected := <-got: + t.Errorf("Got second value %v", unexpected.val) + case <-time.After(50 * time.Millisecond): + } + _, exists, _ := f.Get(mkFifoObj("foo", "")) + if exists { + t.Errorf("item did not get removed") + } +} + +func TestDeltaFIFO_ReplaceMakesDeletions(t *testing.T) { + f := NewDeltaFIFO( + testFifoObjectKeyFunc, + nil, + keyLookupFunc(func() []string { + return []string{"foo", "bar", "baz"} + }), + ) + f.Delete(mkFifoObj("baz", 10)) + f.Replace([]interface{}{mkFifoObj("foo", 5)}, "0") + + expectedList := []Deltas{ + {{Deleted, mkFifoObj("baz", 10)}}, + {{Sync, mkFifoObj("foo", 5)}}, + // Since "bar" didn't have a delete event and wasn't in the Replace list + // it should get a tombstone key with the right Obj. + {{Deleted, DeletedFinalStateUnknown{Key: "bar", Obj: "bar"}}}, + } + + for _, expected := range expectedList { + cur := f.Pop().(Deltas) + if e, a := expected, cur; !reflect.DeepEqual(e, a) { + t.Errorf("Expected %#v, got %#v", e, a) + } + } +} + +func TestDeltaFIFO_detectLineJumpers(t *testing.T) { + f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil) + + f.Add(mkFifoObj("foo", 10)) + f.Add(mkFifoObj("bar", 1)) + f.Add(mkFifoObj("foo", 11)) + f.Add(mkFifoObj("foo", 13)) + f.Add(mkFifoObj("zab", 30)) + + if e, a := 13, testPop(f).val; a != e { + t.Fatalf("expected %d, got %d", e, a) + } + + f.Add(mkFifoObj("foo", 14)) // ensure foo doesn't jump back in line + + if e, a := 1, testPop(f).val; a != e { + t.Fatalf("expected %d, got %d", e, a) + } + + if e, a := 30, testPop(f).val; a != e { + t.Fatalf("expected %d, got %d", e, a) + } + + if e, a := 14, testPop(f).val; a != e { + t.Fatalf("expected %d, got %d", e, a) + } +} + +func TestDeltaFIFO_addIfNotPresent(t *testing.T) { + f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil) + + f.Add(mkFifoObj("b", 3)) + b3 := f.Pop() + f.Add(mkFifoObj("c", 4)) + c4 := f.Pop() + if e, a := 0, len(f.items); e != a { + t.Fatalf("Expected %v, got %v items in queue", e, a) + } + + f.Add(mkFifoObj("a", 1)) + f.Add(mkFifoObj("b", 2)) + f.AddIfNotPresent(b3) + f.AddIfNotPresent(c4) + + if e, a := 3, len(f.items); a != e { + t.Fatalf("expected queue length %d, got %d", e, a) + } + + expectedValues := []int{1, 2, 4} + for _, expected := range expectedValues { + if actual := testPop(f).val; actual != expected { + t.Fatalf("expected value %d, got %d", expected, actual) + } + } +} + +func TestDeltaFIFO_KeyOf(t *testing.T) { + f := DeltaFIFO{keyFunc: testFifoObjectKeyFunc} + + table := []struct { + obj interface{} + key string + }{ + {obj: testFifoObject{name: "A"}, key: "A"}, + {obj: DeletedFinalStateUnknown{Key: "B", Obj: nil}, key: "B"}, + {obj: Deltas{{Object: testFifoObject{name: "C"}}}, key: "C"}, + {obj: Deltas{{Object: DeletedFinalStateUnknown{Key: "D", Obj: nil}}}, key: "D"}, + } + + for _, item := range table { + got, err := f.KeyOf(item.obj) + if err != nil { + t.Errorf("Unexpected error for %q: %v", item.obj, err) + continue + } + if e, a := item.key, got; e != a { + t.Errorf("Expected %v, got %v", e, a) + } + } +} + +func TestDeltaFIFO_HasSynced(t *testing.T) { + tests := []struct { + actions []func(f *DeltaFIFO) + expectedSynced bool + }{ + { + actions: []func(f *DeltaFIFO){}, + expectedSynced: false, + }, + { + actions: []func(f *DeltaFIFO){ + func(f *DeltaFIFO) { f.Add(mkFifoObj("a", 1)) }, + }, + expectedSynced: true, + }, + { + actions: []func(f *DeltaFIFO){ + func(f *DeltaFIFO) { f.Replace([]interface{}{}, "0") }, + }, + expectedSynced: true, + }, + { + actions: []func(f *DeltaFIFO){ + func(f *DeltaFIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") }, + }, + expectedSynced: false, + }, + { + actions: []func(f *DeltaFIFO){ + func(f *DeltaFIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") }, + func(f *DeltaFIFO) { f.Pop() }, + }, + expectedSynced: false, + }, + { + actions: []func(f *DeltaFIFO){ + func(f *DeltaFIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") }, + func(f *DeltaFIFO) { f.Pop() }, + func(f *DeltaFIFO) { f.Pop() }, + }, + expectedSynced: true, + }, + } + + for i, test := range tests { + f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil) + + for _, action := range test.actions { + action(f) + } + if e, a := test.expectedSynced, f.HasSynced(); a != e { + t.Errorf("test case %v failed, expected: %v , got %v", i, e, a) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go index 964deda07950..ad8684e8c377 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go @@ -146,6 +146,7 @@ func (c *ExpirationCache) ListKeys() []string { func (c *ExpirationCache) Add(obj interface{}) error { c.expirationLock.Lock() defer c.expirationLock.Unlock() + key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} @@ -191,6 +192,11 @@ func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) er return nil } +// Resync will touch all objects to put them into the processing queue +func (c *ExpirationCache) Resync() error { + return c.cacheStorage.Resync() +} + // NewTTLStore creates and returns a ExpirationCache with a TTLPolicy func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { return &ExpirationCache{ diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/expiration_cache_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/expiration_cache_test.go new file mode 100644 index 000000000000..04a05786f89c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/expiration_cache_test.go @@ -0,0 +1,189 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "reflect" + "testing" + "time" + + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/wait" +) + +func TestTTLExpirationBasic(t *testing.T) { + testObj := testStoreObject{id: "foo", val: "bar"} + deleteChan := make(chan string, 1) + ttlStore := NewFakeExpirationStore( + testStoreKeyFunc, deleteChan, + &FakeExpirationPolicy{ + NeverExpire: sets.NewString(), + RetrieveKeyFunc: func(obj interface{}) (string, error) { + return obj.(*timestampedEntry).obj.(testStoreObject).id, nil + }, + }, + util.RealClock{}, + ) + err := ttlStore.Add(testObj) + if err != nil { + t.Errorf("Unable to add obj %#v", testObj) + } + item, exists, err := ttlStore.Get(testObj) + if err != nil { + t.Errorf("Failed to get from store, %v", err) + } + if exists || item != nil { + t.Errorf("Got unexpected item %#v", item) + } + key, _ := testStoreKeyFunc(testObj) + select { + case delKey := <-deleteChan: + if delKey != key { + t.Errorf("Unexpected delete for key %s", key) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Unexpected timeout waiting on delete") + } + close(deleteChan) +} + +func TestReAddExpiredItem(t *testing.T) { + deleteChan := make(chan string, 1) + exp := &FakeExpirationPolicy{ + NeverExpire: sets.NewString(), + RetrieveKeyFunc: func(obj interface{}) (string, error) { + return obj.(*timestampedEntry).obj.(testStoreObject).id, nil + }, + } + ttlStore := NewFakeExpirationStore( + testStoreKeyFunc, deleteChan, exp, util.RealClock{}) + testKey := "foo" + testObj := testStoreObject{id: testKey, val: "bar"} + err := ttlStore.Add(testObj) + if err != nil { + t.Errorf("Unable to add obj %#v", testObj) + } + + // This get will expire the item. + item, exists, err := ttlStore.Get(testObj) + if err != nil { + t.Errorf("Failed to get from store, %v", err) + } + if exists || item != nil { + t.Errorf("Got unexpected item %#v", item) + } + + key, _ := testStoreKeyFunc(testObj) + differentValue := "different_bar" + err = ttlStore.Add( + testStoreObject{id: testKey, val: differentValue}) + if err != nil { + t.Errorf("Failed to add second value") + } + + select { + case delKey := <-deleteChan: + if delKey != key { + t.Errorf("Unexpected delete for key %s", key) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Unexpected timeout waiting on delete") + } + exp.NeverExpire = sets.NewString(testKey) + item, exists, err = ttlStore.GetByKey(testKey) + if err != nil { + t.Errorf("Failed to get from store, %v", err) + } + if !exists || item == nil || item.(testStoreObject).val != differentValue { + t.Errorf("Got unexpected item %#v", item) + } + close(deleteChan) +} + +func TestTTLList(t *testing.T) { + testObjs := []testStoreObject{ + {id: "foo", val: "bar"}, + {id: "foo1", val: "bar1"}, + {id: "foo2", val: "bar2"}, + } + expireKeys := sets.NewString(testObjs[0].id, testObjs[2].id) + deleteChan := make(chan string, len(testObjs)) + defer close(deleteChan) + + ttlStore := NewFakeExpirationStore( + testStoreKeyFunc, deleteChan, + &FakeExpirationPolicy{ + NeverExpire: sets.NewString(testObjs[1].id), + RetrieveKeyFunc: func(obj interface{}) (string, error) { + return obj.(*timestampedEntry).obj.(testStoreObject).id, nil + }, + }, + util.RealClock{}, + ) + for _, obj := range testObjs { + err := ttlStore.Add(obj) + if err != nil { + t.Errorf("Unable to add obj %#v", obj) + } + } + listObjs := ttlStore.List() + if len(listObjs) != 1 || !reflect.DeepEqual(listObjs[0], testObjs[1]) { + t.Errorf("List returned unexpected results %#v", listObjs) + } + + // Make sure all our deletes come through in an acceptable rate (1/100ms) + for expireKeys.Len() != 0 { + select { + case delKey := <-deleteChan: + if !expireKeys.Has(delKey) { + t.Errorf("Unexpected delete for key %s", delKey) + } + expireKeys.Delete(delKey) + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Unexpected timeout waiting on delete") + return + } + } +} + +func TestTTLPolicy(t *testing.T) { + fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + ttl := 30 * time.Second + exactlyOnTTL := fakeTime.Add(-ttl) + expiredTime := fakeTime.Add(-(ttl + 1)) + + policy := TTLPolicy{ttl, util.NewFakeClock(fakeTime)} + fakeTimestampedEntry := ×tampedEntry{obj: struct{}{}, timestamp: exactlyOnTTL} + if policy.IsExpired(fakeTimestampedEntry) { + t.Errorf("TTL cache should not expire entries exactly on ttl") + } + fakeTimestampedEntry.timestamp = fakeTime + if policy.IsExpired(fakeTimestampedEntry) { + t.Errorf("TTL Cache should not expire entries before ttl") + } + fakeTimestampedEntry.timestamp = expiredTime + if !policy.IsExpired(fakeTimestampedEntry) { + t.Errorf("TTL Cache should expire entries older than ttl") + } + for _, ttl = range []time.Duration{0, -1} { + policy.Ttl = ttl + if policy.IsExpired(fakeTimestampedEntry) { + t.Errorf("TTL policy should only expire entries when initialized with a ttl > 0") + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go new file mode 100644 index 000000000000..ccd69ef7bfdd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// FakeStore lets you define custom functions for store operations +type FakeCustomStore struct { + AddFunc func(obj interface{}) error + UpdateFunc func(obj interface{}) error + DeleteFunc func(obj interface{}) error + ListFunc func() []interface{} + ListKeysFunc func() []string + GetFunc func(obj interface{}) (item interface{}, exists bool, err error) + GetByKeyFunc func(key string) (item interface{}, exists bool, err error) + ReplaceFunc func(list []interface{}, resourceVerion string) error + ResyncFunc func() error +} + +// Add calls the custom Add function if defined +func (f *FakeCustomStore) Add(obj interface{}) error { + if f.AddFunc != nil { + return f.AddFunc(obj) + } + return nil +} + +// Update calls the custom Update function if defined +func (f *FakeCustomStore) Update(obj interface{}) error { + if f.UpdateFunc != nil { + return f.Update(obj) + } + return nil +} + +// Delete calls the custom Delete function if defined +func (f *FakeCustomStore) Delete(obj interface{}) error { + if f.DeleteFunc != nil { + return f.DeleteFunc(obj) + } + return nil +} + +// List calls the custom List function if defined +func (f *FakeCustomStore) List() []interface{} { + if f.ListFunc != nil { + return f.ListFunc() + } + return nil +} + +// ListKeys calls the custom ListKeys function if defined +func (f *FakeCustomStore) ListKeys() []string { + if f.ListKeysFunc != nil { + return f.ListKeysFunc() + } + return nil +} + +// Get calls the custom Get function if defined +func (f *FakeCustomStore) Get(obj interface{}) (item interface{}, exists bool, err error) { + if f.GetFunc != nil { + return f.GetFunc(obj) + } + return nil, false, nil +} + +// GetByKey calls the custom GetByKey function if defined +func (f *FakeCustomStore) GetByKey(key string) (item interface{}, exists bool, err error) { + if f.GetByKeyFunc != nil { + return f.GetByKeyFunc(key) + } + return nil, false, nil +} + +// Replace calls the custom Replace function if defined +func (f *FakeCustomStore) Replace(list []interface{}, resourceVersion string) error { + if f.ReplaceFunc != nil { + return f.ReplaceFunc(list, resourceVersion) + } + return nil +} + +// Resync calls the custom Resync function if defined +func (f *FakeCustomStore) Resync() error { + if f.ResyncFunc != nil { + return f.ResyncFunc() + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/fifo.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/fifo.go index d4076a326d81..f98bea6f4453 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/fifo.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/fifo.go @@ -18,6 +18,8 @@ package cache import ( "sync" + + "k8s.io/kubernetes/pkg/util/sets" ) // Queue is exactly like a Store, but has a Pop() method too. @@ -241,6 +243,26 @@ func (f *FIFO) Replace(list []interface{}, resourceVersion string) error { return nil } +// Resync will touch all objects to put them into the processing queue +func (f *FIFO) Resync() error { + f.lock.Lock() + defer f.lock.Unlock() + + inQueue := sets.NewString() + for _, id := range f.queue { + inQueue.Insert(id) + } + for id := range f.items { + if !inQueue.Has(id) { + f.queue = append(f.queue, id) + } + } + if len(f.queue) > 0 { + f.cond.Broadcast() + } + return nil +} + // NewFIFO returns a Store which can be used to queue up items to // process. func NewFIFO(keyFunc KeyFunc) *FIFO { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/fifo_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/fifo_test.go new file mode 100644 index 000000000000..974fa6d3b738 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/fifo_test.go @@ -0,0 +1,235 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "reflect" + "testing" + "time" +) + +func testFifoObjectKeyFunc(obj interface{}) (string, error) { + return obj.(testFifoObject).name, nil +} + +type testFifoObject struct { + name string + val interface{} +} + +func mkFifoObj(name string, val interface{}) testFifoObject { + return testFifoObject{name: name, val: val} +} + +func TestFIFO_basic(t *testing.T) { + f := NewFIFO(testFifoObjectKeyFunc) + const amount = 500 + go func() { + for i := 0; i < amount; i++ { + f.Add(mkFifoObj(string([]rune{'a', rune(i)}), i+1)) + } + }() + go func() { + for u := uint64(0); u < amount; u++ { + f.Add(mkFifoObj(string([]rune{'b', rune(u)}), u+1)) + } + }() + + lastInt := int(0) + lastUint := uint64(0) + for i := 0; i < amount*2; i++ { + switch obj := f.Pop().(testFifoObject).val.(type) { + case int: + if obj <= lastInt { + t.Errorf("got %v (int) out of order, last was %v", obj, lastInt) + } + lastInt = obj + case uint64: + if obj <= lastUint { + t.Errorf("got %v (uint) out of order, last was %v", obj, lastUint) + } else { + lastUint = obj + } + default: + t.Fatalf("unexpected type %#v", obj) + } + } +} + +func TestFIFO_addUpdate(t *testing.T) { + f := NewFIFO(testFifoObjectKeyFunc) + f.Add(mkFifoObj("foo", 10)) + f.Update(mkFifoObj("foo", 15)) + + if e, a := []interface{}{mkFifoObj("foo", 15)}, f.List(); !reflect.DeepEqual(e, a) { + t.Errorf("Expected %+v, got %+v", e, a) + } + if e, a := []string{"foo"}, f.ListKeys(); !reflect.DeepEqual(e, a) { + t.Errorf("Expected %+v, got %+v", e, a) + } + + got := make(chan testFifoObject, 2) + go func() { + for { + got <- f.Pop().(testFifoObject) + } + }() + + first := <-got + if e, a := 15, first.val; e != a { + t.Errorf("Didn't get updated value (%v), got %v", e, a) + } + select { + case unexpected := <-got: + t.Errorf("Got second value %v", unexpected.val) + case <-time.After(50 * time.Millisecond): + } + _, exists, _ := f.Get(mkFifoObj("foo", "")) + if exists { + t.Errorf("item did not get removed") + } +} + +func TestFIFO_addReplace(t *testing.T) { + f := NewFIFO(testFifoObjectKeyFunc) + f.Add(mkFifoObj("foo", 10)) + f.Replace([]interface{}{mkFifoObj("foo", 15)}, "15") + got := make(chan testFifoObject, 2) + go func() { + for { + got <- f.Pop().(testFifoObject) + } + }() + + first := <-got + if e, a := 15, first.val; e != a { + t.Errorf("Didn't get updated value (%v), got %v", e, a) + } + select { + case unexpected := <-got: + t.Errorf("Got second value %v", unexpected.val) + case <-time.After(50 * time.Millisecond): + } + _, exists, _ := f.Get(mkFifoObj("foo", "")) + if exists { + t.Errorf("item did not get removed") + } +} + +func TestFIFO_detectLineJumpers(t *testing.T) { + f := NewFIFO(testFifoObjectKeyFunc) + + f.Add(mkFifoObj("foo", 10)) + f.Add(mkFifoObj("bar", 1)) + f.Add(mkFifoObj("foo", 11)) + f.Add(mkFifoObj("foo", 13)) + f.Add(mkFifoObj("zab", 30)) + + if e, a := 13, f.Pop().(testFifoObject).val; a != e { + t.Fatalf("expected %d, got %d", e, a) + } + + f.Add(mkFifoObj("foo", 14)) // ensure foo doesn't jump back in line + + if e, a := 1, f.Pop().(testFifoObject).val; a != e { + t.Fatalf("expected %d, got %d", e, a) + } + + if e, a := 30, f.Pop().(testFifoObject).val; a != e { + t.Fatalf("expected %d, got %d", e, a) + } + + if e, a := 14, f.Pop().(testFifoObject).val; a != e { + t.Fatalf("expected %d, got %d", e, a) + } +} + +func TestFIFO_addIfNotPresent(t *testing.T) { + f := NewFIFO(testFifoObjectKeyFunc) + + f.Add(mkFifoObj("a", 1)) + f.Add(mkFifoObj("b", 2)) + f.AddIfNotPresent(mkFifoObj("b", 3)) + f.AddIfNotPresent(mkFifoObj("c", 4)) + + if e, a := 3, len(f.items); a != e { + t.Fatalf("expected queue length %d, got %d", e, a) + } + + expectedValues := []int{1, 2, 4} + for _, expected := range expectedValues { + if actual := f.Pop().(testFifoObject).val; actual != expected { + t.Fatalf("expected value %d, got %d", expected, actual) + } + } +} + +func TestFIFO_HasSynced(t *testing.T) { + tests := []struct { + actions []func(f *FIFO) + expectedSynced bool + }{ + { + actions: []func(f *FIFO){}, + expectedSynced: false, + }, + { + actions: []func(f *FIFO){ + func(f *FIFO) { f.Add(mkFifoObj("a", 1)) }, + }, + expectedSynced: true, + }, + { + actions: []func(f *FIFO){ + func(f *FIFO) { f.Replace([]interface{}{}, "0") }, + }, + expectedSynced: true, + }, + { + actions: []func(f *FIFO){ + func(f *FIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") }, + }, + expectedSynced: false, + }, + { + actions: []func(f *FIFO){ + func(f *FIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") }, + func(f *FIFO) { f.Pop() }, + }, + expectedSynced: false, + }, + { + actions: []func(f *FIFO){ + func(f *FIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") }, + func(f *FIFO) { f.Pop() }, + func(f *FIFO) { f.Pop() }, + }, + expectedSynced: true, + }, + } + + for i, test := range tests { + f := NewFIFO(testFifoObjectKeyFunc) + + for _, action := range test.actions { + action(f) + } + if e, a := test.expectedSynced, f.HasSynced(); a != e { + t.Errorf("test case %v failed, expected: %v , got %v", i, e, a) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/index.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/index.go index a0c0c288c79c..572f2c06b69e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/index.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/index.go @@ -32,6 +32,12 @@ type Indexer interface { ListIndexFuncValues(indexName string) []string // ByIndex lists object that match on the named indexing function with the exact key ByIndex(indexName, indexKey string) ([]interface{}, error) + // GetIndexer return the indexers + GetIndexers() Indexers + + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(newIndexers Indexers) error } // IndexFunc knows how to provide an indexed value for an object. @@ -53,6 +59,10 @@ func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { } } +const ( + NamespaceIndex string = "namespace" +) + // MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { meta, err := meta.Accessor(obj) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/index_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/index_test.go new file mode 100644 index 000000000000..4b0d5ff4f9e6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/index_test.go @@ -0,0 +1,135 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +func testIndexFunc(obj interface{}) ([]string, error) { + pod := obj.(*api.Pod) + return []string{pod.Labels["foo"]}, nil +} + +func TestGetIndexFuncValues(t *testing.T) { + index := NewIndexer(MetaNamespaceKeyFunc, Indexers{"testmodes": testIndexFunc}) + + pod1 := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "one", Labels: map[string]string{"foo": "bar"}}} + pod2 := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "two", Labels: map[string]string{"foo": "bar"}}} + pod3 := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "tre", Labels: map[string]string{"foo": "biz"}}} + + index.Add(pod1) + index.Add(pod2) + index.Add(pod3) + + keys := index.ListIndexFuncValues("testmodes") + if len(keys) != 2 { + t.Errorf("Expected 2 keys but got %v", len(keys)) + } + + for _, key := range keys { + if key != "bar" && key != "biz" { + t.Errorf("Expected only 'bar' or 'biz' but got %s", key) + } + } +} + +func testUsersIndexFunc(obj interface{}) ([]string, error) { + pod := obj.(*api.Pod) + usersString := pod.Annotations["users"] + + return strings.Split(usersString, ","), nil +} + +func TestMultiIndexKeys(t *testing.T) { + index := NewIndexer(MetaNamespaceKeyFunc, Indexers{"byUser": testUsersIndexFunc}) + + pod1 := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "one", Annotations: map[string]string{"users": "ernie,bert"}}} + pod2 := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "two", Annotations: map[string]string{"users": "bert,oscar"}}} + pod3 := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "tre", Annotations: map[string]string{"users": "ernie,elmo"}}} + + index.Add(pod1) + index.Add(pod2) + index.Add(pod3) + + erniePods, err := index.ByIndex("byUser", "ernie") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(erniePods) != 2 { + t.Errorf("Expected 2 pods but got %v", len(erniePods)) + } + + bertPods, err := index.ByIndex("byUser", "bert") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(bertPods) != 2 { + t.Errorf("Expected 2 pods but got %v", len(bertPods)) + } + + oscarPods, err := index.ByIndex("byUser", "oscar") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(oscarPods) != 1 { + t.Errorf("Expected 1 pods but got %v", len(erniePods)) + } + + ernieAndBertKeys, err := index.Index("byUser", pod1) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(ernieAndBertKeys) != 3 { + t.Errorf("Expected 3 pods but got %v", len(ernieAndBertKeys)) + } + + index.Delete(pod3) + erniePods, err = index.ByIndex("byUser", "ernie") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(erniePods) != 1 { + t.Errorf("Expected 1 pods but got %v", len(erniePods)) + } + elmoPods, err := index.ByIndex("byUser", "elmo") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(elmoPods) != 0 { + t.Errorf("Expected 0 pods but got %v", len(elmoPods)) + } + + obj, err := api.Scheme.DeepCopy(pod2) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + copyOfPod2 := obj.(*api.Pod) + copyOfPod2.Annotations["users"] = "oscar" + index.Update(copyOfPod2) + bertPods, err = index.ByIndex("byUser", "bert") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(bertPods) != 1 { + t.Errorf("Expected 1 pods but got %v", len(bertPods)) + } + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/listers.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/listers.go index 3963a6941afe..1ba0be3a6be0 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/listers.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/listers.go @@ -22,6 +22,8 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/labels" ) @@ -39,7 +41,7 @@ import ( // l := StoreToPodLister{s} // l.List() type StoreToPodLister struct { - Store + Indexer } // Please note that selector is filtering among the pods that have gotten into @@ -52,7 +54,7 @@ func (s *StoreToPodLister) List(selector labels.Selector) (pods []*api.Pod, err // s.Pods(api.NamespaceAll).List(selector), however then we'd have to // remake the list.Items as a []*api.Pod. So leave this separate for // now. - for _, m := range s.Store.List() { + for _, m := range s.Indexer.List() { pod := m.(*api.Pod) if selector.Matches(labels.Set(pod.Labels)) { pods = append(pods, pod) @@ -63,33 +65,55 @@ func (s *StoreToPodLister) List(selector labels.Selector) (pods []*api.Pod, err // Pods is taking baby steps to be more like the api in pkg/client func (s *StoreToPodLister) Pods(namespace string) storePodsNamespacer { - return storePodsNamespacer{s.Store, namespace} + return storePodsNamespacer{s.Indexer, namespace} } type storePodsNamespacer struct { - store Store + indexer Indexer namespace string } // Please note that selector is filtering among the pods that have gotten into // the store; there may have been some filtering that already happened before // that. -func (s storePodsNamespacer) List(selector labels.Selector) (pods api.PodList, err error) { - list := api.PodList{} - for _, m := range s.store.List() { - pod := m.(*api.Pod) - if s.namespace == api.NamespaceAll || s.namespace == pod.Namespace { +func (s storePodsNamespacer) List(selector labels.Selector) (api.PodList, error) { + pods := api.PodList{} + + if s.namespace == api.NamespaceAll { + for _, m := range s.indexer.List() { + pod := m.(*api.Pod) if selector.Matches(labels.Set(pod.Labels)) { - list.Items = append(list.Items, *pod) + pods.Items = append(pods.Items, *pod) + } + } + return pods, nil + } + + key := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}} + items, err := s.indexer.Index(NamespaceIndex, key) + if err != nil { + // Ignore error; do slow search without index. + glog.Warningf("can not retrieve list of objects using index : %v", err) + for _, m := range s.indexer.List() { + pod := m.(*api.Pod) + if s.namespace == pod.Namespace && selector.Matches(labels.Set(pod.Labels)) { + pods.Items = append(pods.Items, *pod) } } + return pods, nil + } + for _, m := range items { + pod := m.(*api.Pod) + if selector.Matches(labels.Set(pod.Labels)) { + pods.Items = append(pods.Items, *pod) + } } - return list, nil + return pods, nil } // Exists returns true if a pod matching the namespace/name of the given pod exists in the store. func (s *StoreToPodLister) Exists(pod *api.Pod) (bool, error) { - _, exists, err := s.Store.Get(pod) + _, exists, err := s.Indexer.Get(pod) if err != nil { return false, err } @@ -141,12 +165,12 @@ func (s storeToNodeConditionLister) List() (nodes api.NodeList, err error) { // StoreToReplicationControllerLister gives a store List and Exists methods. The store must contain only ReplicationControllers. type StoreToReplicationControllerLister struct { - Store + Indexer } // Exists checks if the given rc exists in the store. func (s *StoreToReplicationControllerLister) Exists(controller *api.ReplicationController) (bool, error) { - _, exists, err := s.Store.Get(controller) + _, exists, err := s.Indexer.Get(controller) if err != nil { return false, err } @@ -156,31 +180,54 @@ func (s *StoreToReplicationControllerLister) Exists(controller *api.ReplicationC // StoreToReplicationControllerLister lists all controllers in the store. // TODO: converge on the interface in pkg/client func (s *StoreToReplicationControllerLister) List() (controllers []api.ReplicationController, err error) { - for _, c := range s.Store.List() { + for _, c := range s.Indexer.List() { controllers = append(controllers, *(c.(*api.ReplicationController))) } return controllers, nil } func (s *StoreToReplicationControllerLister) ReplicationControllers(namespace string) storeReplicationControllersNamespacer { - return storeReplicationControllersNamespacer{s.Store, namespace} + return storeReplicationControllersNamespacer{s.Indexer, namespace} } type storeReplicationControllersNamespacer struct { - store Store + indexer Indexer namespace string } -func (s storeReplicationControllersNamespacer) List(selector labels.Selector) (controllers []api.ReplicationController, err error) { - for _, c := range s.store.List() { - rc := *(c.(*api.ReplicationController)) - if s.namespace == api.NamespaceAll || s.namespace == rc.Namespace { +func (s storeReplicationControllersNamespacer) List(selector labels.Selector) ([]api.ReplicationController, error) { + controllers := []api.ReplicationController{} + + if s.namespace == api.NamespaceAll { + for _, m := range s.indexer.List() { + rc := *(m.(*api.ReplicationController)) if selector.Matches(labels.Set(rc.Labels)) { controllers = append(controllers, rc) } } + return controllers, nil } - return + + key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}} + items, err := s.indexer.Index(NamespaceIndex, key) + if err != nil { + // Ignore error; do slow search without index. + glog.Warningf("can not retrieve list of objects using index : %v", err) + for _, m := range s.indexer.List() { + rc := *(m.(*api.ReplicationController)) + if s.namespace == rc.Namespace && selector.Matches(labels.Set(rc.Labels)) { + controllers = append(controllers, rc) + } + } + return controllers, nil + } + for _, m := range items { + rc := *(m.(*api.ReplicationController)) + if selector.Matches(labels.Set(rc.Labels)) { + controllers = append(controllers, rc) + } + } + return controllers, nil } // GetPodControllers returns a list of replication controllers managing a pod. Returns an error only if no matching controllers are found. @@ -193,11 +240,14 @@ func (s *StoreToReplicationControllerLister) GetPodControllers(pod *api.Pod) (co return } - for _, m := range s.Store.List() { + key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: pod.Namespace}} + items, err := s.Indexer.Index(NamespaceIndex, key) + if err != nil { + return + } + + for _, m := range items { rc = *m.(*api.ReplicationController) - if rc.Namespace != pod.Namespace { - continue - } labelSet := labels.Set(rc.Spec.Selector) selector = labels.Set(rc.Spec.Selector).AsSelector() @@ -474,7 +524,7 @@ type StoreToJobLister struct { } // Exists checks if the given job exists in the store. -func (s *StoreToJobLister) Exists(job *extensions.Job) (bool, error) { +func (s *StoreToJobLister) Exists(job *batch.Job) (bool, error) { _, exists, err := s.Store.Get(job) if err != nil { return false, err @@ -483,17 +533,17 @@ func (s *StoreToJobLister) Exists(job *extensions.Job) (bool, error) { } // StoreToJobLister lists all jobs in the store. -func (s *StoreToJobLister) List() (jobs extensions.JobList, err error) { +func (s *StoreToJobLister) List() (jobs batch.JobList, err error) { for _, c := range s.Store.List() { - jobs.Items = append(jobs.Items, *(c.(*extensions.Job))) + jobs.Items = append(jobs.Items, *(c.(*batch.Job))) } return jobs, nil } // GetPodJobs returns a list of jobs managing a pod. Returns an error only if no matching jobs are found. -func (s *StoreToJobLister) GetPodJobs(pod *api.Pod) (jobs []extensions.Job, err error) { +func (s *StoreToJobLister) GetPodJobs(pod *api.Pod) (jobs []batch.Job, err error) { var selector labels.Selector - var job extensions.Job + var job batch.Job if len(pod.Labels) == 0 { err = fmt.Errorf("no jobs found for pod %v because it has no labels", pod.Name) @@ -501,7 +551,7 @@ func (s *StoreToJobLister) GetPodJobs(pod *api.Pod) (jobs []extensions.Job, err } for _, m := range s.Store.List() { - job = *m.(*extensions.Job) + job = *m.(*batch.Job) if job.Namespace != pod.Namespace { continue } @@ -556,3 +606,67 @@ func (s *StoreToPVCFetcher) GetPersistentVolumeClaimInfo(namespace string, id st return o.(*api.PersistentVolumeClaim), nil } + +// StoreToPetSetLister gives a store List and Exists methods. The store must contain only PetSets. +type StoreToPetSetLister struct { + Store +} + +// Exists checks if the given PetSet exists in the store. +func (s *StoreToPetSetLister) Exists(ps *apps.PetSet) (bool, error) { + _, exists, err := s.Store.Get(ps) + if err != nil { + return false, err + } + return exists, nil +} + +// List lists all PetSets in the store. +func (s *StoreToPetSetLister) List() (psList []apps.PetSet, err error) { + for _, ps := range s.Store.List() { + psList = append(psList, *(ps.(*apps.PetSet))) + } + return psList, nil +} + +type storePetSetsNamespacer struct { + store Store + namespace string +} + +func (s *StoreToPetSetLister) PetSets(namespace string) storePetSetsNamespacer { + return storePetSetsNamespacer{s.Store, namespace} +} + +// GetPodPetSets returns a list of PetSets managing a pod. Returns an error only if no matching PetSets are found. +func (s *StoreToPetSetLister) GetPodPetSets(pod *api.Pod) (psList []apps.PetSet, err error) { + var selector labels.Selector + var ps apps.PetSet + + if len(pod.Labels) == 0 { + err = fmt.Errorf("no PetSets found for pod %v because it has no labels", pod.Name) + return + } + + for _, m := range s.Store.List() { + ps = *m.(*apps.PetSet) + if ps.Namespace != pod.Namespace { + continue + } + selector, err = unversioned.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + err = fmt.Errorf("invalid selector: %v", err) + return + } + + // If a PetSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + psList = append(psList, ps) + } + if len(psList) == 0 { + err = fmt.Errorf("could not find PetSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + return +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/listers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/listers_test.go new file mode 100644 index 000000000000..30a60d5d4c3b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/listers_test.go @@ -0,0 +1,790 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/util/sets" +) + +func TestStoreToNodeLister(t *testing.T) { + store := NewStore(MetaNamespaceKeyFunc) + ids := sets.NewString("foo", "bar", "baz") + for id := range ids { + store.Add(&api.Node{ObjectMeta: api.ObjectMeta{Name: id}}) + } + sml := StoreToNodeLister{store} + + gotNodes, err := sml.List() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + got := make([]string, len(gotNodes.Items)) + for ix := range gotNodes.Items { + got[ix] = gotNodes.Items[ix].Name + } + if !ids.HasAll(got...) || len(got) != len(ids) { + t.Errorf("Expected %v, got %v", ids, got) + } +} + +func TestStoreToNodeConditionLister(t *testing.T) { + store := NewStore(MetaNamespaceKeyFunc) + nodes := []*api.Node{ + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + }, + { + Type: api.NodeOutOfDisk, + Status: api.ConditionFalse, + }, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "bar"}, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeOutOfDisk, + Status: api.ConditionTrue, + }, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "baz"}, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionFalse, + }, + { + Type: api.NodeOutOfDisk, + Status: api.ConditionUnknown, + }, + }, + }, + }, + } + for _, n := range nodes { + store.Add(n) + } + + predicate := func(node api.Node) bool { + for _, cond := range node.Status.Conditions { + if cond.Type == api.NodeOutOfDisk && cond.Status == api.ConditionTrue { + return false + } + } + return true + } + + snl := StoreToNodeLister{store} + sncl := snl.NodeCondition(predicate) + + want := sets.NewString("foo", "baz") + gotNodes, err := sncl.List() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + got := make([]string, len(gotNodes.Items)) + for ix := range gotNodes.Items { + got[ix] = gotNodes.Items[ix].Name + } + if !want.HasAll(got...) || len(got) != len(want) { + t.Errorf("Expected %v, got %v", want, got) + } +} + +func TestStoreToReplicationControllerLister(t *testing.T) { + testCases := []struct { + description string + inRCs []*api.ReplicationController + list func(StoreToReplicationControllerLister) ([]api.ReplicationController, error) + outRCNames sets.String + expectErr bool + onlyIfIndexedByNamespace bool + }{ + { + description: "Verify we can search all namespaces", + inRCs: []*api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}, + }, + { + ObjectMeta: api.ObjectMeta{Name: "hmm", Namespace: "hmm"}, + }, + }, + list: func(lister StoreToReplicationControllerLister) ([]api.ReplicationController, error) { + return lister.ReplicationControllers(api.NamespaceAll).List(labels.Set{}.AsSelector()) + }, + outRCNames: sets.NewString("hmm", "foo"), + }, + { + description: "Verify we can search a specific namespace", + inRCs: []*api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}, + }, + { + ObjectMeta: api.ObjectMeta{Name: "hmm", Namespace: "hmm"}, + }, + }, + list: func(lister StoreToReplicationControllerLister) ([]api.ReplicationController, error) { + return lister.ReplicationControllers("hmm").List(labels.Set{}.AsSelector()) + }, + outRCNames: sets.NewString("hmm"), + }, + { + description: "Basic listing with all labels and no selectors", + inRCs: []*api.ReplicationController{ + {ObjectMeta: api.ObjectMeta{Name: "basic"}}, + }, + list: func(lister StoreToReplicationControllerLister) ([]api.ReplicationController, error) { + return lister.List() + }, + outRCNames: sets.NewString("basic"), + }, + { + description: "No pod labels", + inRCs: []*api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{Name: "basic", Namespace: "ns"}, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{"foo": "baz"}, + }, + }, + }, + list: func(lister StoreToReplicationControllerLister) ([]api.ReplicationController, error) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "pod1", Namespace: "ns"}, + } + return lister.GetPodControllers(pod) + }, + outRCNames: sets.NewString(), + expectErr: true, + }, + { + description: "No RC selectors", + inRCs: []*api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{Name: "basic", Namespace: "ns"}, + }, + }, + list: func(lister StoreToReplicationControllerLister) ([]api.ReplicationController, error) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod1", + Namespace: "ns", + Labels: map[string]string{"foo": "bar"}, + }, + } + return lister.GetPodControllers(pod) + }, + outRCNames: sets.NewString(), + expectErr: true, + }, + { + description: "Matching labels to selectors and namespace", + inRCs: []*api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{"foo": "bar"}, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{"foo": "bar"}, + }, + }, + }, + list: func(lister StoreToReplicationControllerLister) ([]api.ReplicationController, error) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod1", + Labels: map[string]string{"foo": "bar"}, + Namespace: "ns", + }, + } + return lister.GetPodControllers(pod) + }, + outRCNames: sets.NewString("bar"), + onlyIfIndexedByNamespace: true, + }, + } + for _, c := range testCases { + for _, withIndex := range []bool{true, false} { + if c.onlyIfIndexedByNamespace && !withIndex { + continue + } + var store Indexer + if withIndex { + store = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc}) + } else { + store = NewIndexer(MetaNamespaceKeyFunc, Indexers{}) + } + + for _, r := range c.inRCs { + store.Add(r) + } + + gotControllers, err := c.list(StoreToReplicationControllerLister{store}) + if err != nil && c.expectErr { + continue + } else if c.expectErr { + t.Errorf("(%q, withIndex=%v) Expected error, got none", c.description, withIndex) + continue + } else if err != nil { + t.Errorf("(%q, withIndex=%v) Unexpected error %#v", c.description, withIndex, err) + continue + } + gotNames := make([]string, len(gotControllers)) + for ix := range gotControllers { + gotNames[ix] = gotControllers[ix].Name + } + if !c.outRCNames.HasAll(gotNames...) || len(gotNames) != len(c.outRCNames) { + t.Errorf("(%q, withIndex=%v) Unexpected got controllers %+v expected %+v", c.description, withIndex, gotNames, c.outRCNames) + } + } + } +} + +func TestStoreToReplicaSetLister(t *testing.T) { + store := NewStore(MetaNamespaceKeyFunc) + lister := StoreToReplicaSetLister{store} + testCases := []struct { + inRSs []*extensions.ReplicaSet + list func() ([]extensions.ReplicaSet, error) + outRSNames sets.String + expectErr bool + }{ + // Basic listing with all labels and no selectors + { + inRSs: []*extensions.ReplicaSet{ + {ObjectMeta: api.ObjectMeta{Name: "basic"}}, + }, + list: func() ([]extensions.ReplicaSet, error) { + return lister.List() + }, + outRSNames: sets.NewString("basic"), + }, + // No pod labels + { + inRSs: []*extensions.ReplicaSet{ + { + ObjectMeta: api.ObjectMeta{Name: "basic", Namespace: "ns"}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "baz"}}, + }, + }, + }, + list: func() ([]extensions.ReplicaSet, error) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "pod1", Namespace: "ns"}, + } + return lister.GetPodReplicaSets(pod) + }, + outRSNames: sets.NewString(), + expectErr: true, + }, + // No ReplicaSet selectors + { + inRSs: []*extensions.ReplicaSet{ + { + ObjectMeta: api.ObjectMeta{Name: "basic", Namespace: "ns"}, + }, + }, + list: func() ([]extensions.ReplicaSet, error) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod1", + Namespace: "ns", + Labels: map[string]string{"foo": "bar"}, + }, + } + return lister.GetPodReplicaSets(pod) + }, + outRSNames: sets.NewString(), + expectErr: true, + }, + // Matching labels to selectors and namespace + { + inRSs: []*extensions.ReplicaSet{ + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + }, + list: func() ([]extensions.ReplicaSet, error) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod1", + Labels: map[string]string{"foo": "bar"}, + Namespace: "ns", + }, + } + return lister.GetPodReplicaSets(pod) + }, + outRSNames: sets.NewString("bar"), + }, + } + for _, c := range testCases { + for _, r := range c.inRSs { + store.Add(r) + } + + gotRSs, err := c.list() + if err != nil && c.expectErr { + continue + } else if c.expectErr { + t.Error("Expected error, got none") + continue + } else if err != nil { + t.Errorf("Unexpected error %#v", err) + continue + } + gotNames := make([]string, len(gotRSs)) + for ix := range gotRSs { + gotNames[ix] = gotRSs[ix].Name + } + if !c.outRSNames.HasAll(gotNames...) || len(gotNames) != len(c.outRSNames) { + t.Errorf("Unexpected got ReplicaSets %+v expected %+v", gotNames, c.outRSNames) + } + } +} + +func TestStoreToDaemonSetLister(t *testing.T) { + store := NewStore(MetaNamespaceKeyFunc) + lister := StoreToDaemonSetLister{store} + testCases := []struct { + inDSs []*extensions.DaemonSet + list func() ([]extensions.DaemonSet, error) + outDaemonSetNames sets.String + expectErr bool + }{ + // Basic listing + { + inDSs: []*extensions.DaemonSet{ + {ObjectMeta: api.ObjectMeta{Name: "basic"}}, + }, + list: func() ([]extensions.DaemonSet, error) { + list, err := lister.List() + return list.Items, err + }, + outDaemonSetNames: sets.NewString("basic"), + }, + // Listing multiple daemon sets + { + inDSs: []*extensions.DaemonSet{ + {ObjectMeta: api.ObjectMeta{Name: "basic"}}, + {ObjectMeta: api.ObjectMeta{Name: "complex"}}, + {ObjectMeta: api.ObjectMeta{Name: "complex2"}}, + }, + list: func() ([]extensions.DaemonSet, error) { + list, err := lister.List() + return list.Items, err + }, + outDaemonSetNames: sets.NewString("basic", "complex", "complex2"), + }, + // No pod labels + { + inDSs: []*extensions.DaemonSet{ + { + ObjectMeta: api.ObjectMeta{Name: "basic", Namespace: "ns"}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "baz"}}, + }, + }, + }, + list: func() ([]extensions.DaemonSet, error) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "pod1", Namespace: "ns"}, + } + return lister.GetPodDaemonSets(pod) + }, + outDaemonSetNames: sets.NewString(), + expectErr: true, + }, + // No DS selectors + { + inDSs: []*extensions.DaemonSet{ + { + ObjectMeta: api.ObjectMeta{Name: "basic", Namespace: "ns"}, + }, + }, + list: func() ([]extensions.DaemonSet, error) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod1", + Namespace: "ns", + Labels: map[string]string{"foo": "bar"}, + }, + } + return lister.GetPodDaemonSets(pod) + }, + outDaemonSetNames: sets.NewString(), + expectErr: true, + }, + // Matching labels to selectors and namespace + { + inDSs: []*extensions.DaemonSet{ + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + }, + list: func() ([]extensions.DaemonSet, error) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod1", + Labels: map[string]string{"foo": "bar"}, + Namespace: "ns", + }, + } + return lister.GetPodDaemonSets(pod) + }, + outDaemonSetNames: sets.NewString("bar"), + }, + } + for _, c := range testCases { + for _, r := range c.inDSs { + store.Add(r) + } + + daemonSets, err := c.list() + if err != nil && c.expectErr { + continue + } else if c.expectErr { + t.Error("Expected error, got none") + continue + } else if err != nil { + t.Errorf("Unexpected error %#v", err) + continue + } + daemonSetNames := make([]string, len(daemonSets)) + for ix := range daemonSets { + daemonSetNames[ix] = daemonSets[ix].Name + } + if !c.outDaemonSetNames.HasAll(daemonSetNames...) || len(daemonSetNames) != len(c.outDaemonSetNames) { + t.Errorf("Unexpected got controllers %+v expected %+v", daemonSetNames, c.outDaemonSetNames) + } + } +} + +func TestStoreToJobLister(t *testing.T) { + store := NewStore(MetaNamespaceKeyFunc) + lister := StoreToJobLister{store} + testCases := []struct { + inJobs []*batch.Job + list func() ([]batch.Job, error) + outJobNames sets.String + expectErr bool + msg string + }{ + // Basic listing + { + inJobs: []*batch.Job{ + {ObjectMeta: api.ObjectMeta{Name: "basic"}}, + }, + list: func() ([]batch.Job, error) { + list, err := lister.List() + return list.Items, err + }, + outJobNames: sets.NewString("basic"), + msg: "basic listing failed", + }, + // Listing multiple jobs + { + inJobs: []*batch.Job{ + {ObjectMeta: api.ObjectMeta{Name: "basic"}}, + {ObjectMeta: api.ObjectMeta{Name: "complex"}}, + {ObjectMeta: api.ObjectMeta{Name: "complex2"}}, + }, + list: func() ([]batch.Job, error) { + list, err := lister.List() + return list.Items, err + }, + outJobNames: sets.NewString("basic", "complex", "complex2"), + msg: "listing multiple jobs failed", + }, + // No pod labels + { + inJobs: []*batch.Job{ + { + ObjectMeta: api.ObjectMeta{Name: "basic", Namespace: "ns"}, + Spec: batch.JobSpec{ + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"foo": "baz"}, + }, + }, + }, + }, + list: func() ([]batch.Job, error) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "pod", Namespace: "ns"}, + } + return lister.GetPodJobs(pod) + }, + outJobNames: sets.NewString(), + expectErr: true, + msg: "listing jobs failed when pod has no labels: expected error, got none", + }, + // No Job selectors + { + inJobs: []*batch.Job{ + { + ObjectMeta: api.ObjectMeta{Name: "basic", Namespace: "ns"}, + }, + }, + list: func() ([]batch.Job, error) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod", + Namespace: "ns", + Labels: map[string]string{"foo": "bar"}, + }, + } + return lister.GetPodJobs(pod) + }, + outJobNames: sets.NewString(), + expectErr: true, + msg: "listing jobs failed when job has no selector: expected error, got none", + }, + // Matching labels to selectors and namespace + { + inJobs: []*batch.Job{ + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: batch.JobSpec{ + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, + Spec: batch.JobSpec{ + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + }, + }, + }, + list: func() ([]batch.Job, error) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod", + Labels: map[string]string{"foo": "bar"}, + Namespace: "ns", + }, + } + return lister.GetPodJobs(pod) + }, + outJobNames: sets.NewString("bar"), + msg: "listing jobs with namespace and selector failed", + }, + // Matching labels to selectors and namespace, error case + { + inJobs: []*batch.Job{ + { + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "foo"}, + Spec: batch.JobSpec{ + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "bar"}, + Spec: batch.JobSpec{ + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + }, + }, + }, + list: func() ([]batch.Job, error) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod", + Labels: map[string]string{"foo": "bar"}, + Namespace: "baz", + }, + } + return lister.GetPodJobs(pod) + }, + expectErr: true, + msg: "listing jobs with namespace and selector failed: expected error, got none", + }, + } + for _, c := range testCases { + for _, r := range c.inJobs { + store.Add(r) + } + + Jobs, err := c.list() + if err != nil && c.expectErr { + continue + } else if c.expectErr { + t.Errorf("%v", c.msg) + continue + } else if err != nil { + t.Errorf("Unexpected error %#v", err) + continue + } + JobNames := make([]string, len(Jobs)) + for ix := range Jobs { + JobNames[ix] = Jobs[ix].Name + } + if !c.outJobNames.HasAll(JobNames...) || len(JobNames) != len(c.outJobNames) { + t.Errorf("%v : expected %v, got %v", c.msg, JobNames, c.outJobNames) + } + } +} + +func TestStoreToPodLister(t *testing.T) { + // We test with and without a namespace index, because StoreToPodLister has + // special logic to work on namespaces even when no namespace index is + // present. + stores := []Indexer{ + NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc}), + NewIndexer(MetaNamespaceKeyFunc, Indexers{}), + } + for _, store := range stores { + ids := []string{"foo", "bar", "baz"} + for _, id := range ids { + store.Add(&api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: id, + Labels: map[string]string{"name": id}, + }, + }) + } + store.Add(&api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "quux", + Namespace: api.NamespaceDefault, + Labels: map[string]string{"name": "quux"}, + }, + }) + spl := StoreToPodLister{store} + + // Verify that we can always look up by Namespace. + defaultPods, err := spl.Pods(api.NamespaceDefault).List(labels.Set{}.AsSelector()) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } else if e, a := 1, len(defaultPods.Items); e != a { + t.Errorf("Expected %v, got %v", e, a) + } else if e, a := "quux", defaultPods.Items[0].Name; e != a { + t.Errorf("Expected %v, got %v", e, a) + } + + for _, id := range ids { + got, err := spl.List(labels.Set{"name": id}.AsSelector()) + if err != nil { + t.Errorf("Unexpected error: %v", err) + continue + } + if e, a := 1, len(got); e != a { + t.Errorf("Expected %v, got %v", e, a) + continue + } + if e, a := id, got[0].Name; e != a { + t.Errorf("Expected %v, got %v", e, a) + continue + } + + exists, err := spl.Exists(&api.Pod{ObjectMeta: api.ObjectMeta{Name: id}}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !exists { + t.Errorf("exists returned false for %v", id) + } + } + + exists, err := spl.Exists(&api.Pod{ObjectMeta: api.ObjectMeta{Name: "qux"}}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if exists { + t.Error("Unexpected pod exists") + } + } +} + +func TestStoreToServiceLister(t *testing.T) { + store := NewStore(MetaNamespaceKeyFunc) + store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{}, + }, + }) + store.Add(&api.Service{ObjectMeta: api.ObjectMeta{Name: "bar"}}) + ssl := StoreToServiceLister{store} + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foopod", + Labels: map[string]string{"role": "foo"}, + }, + } + + services, err := ssl.GetPodServices(pod) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if len(services) != 1 { + t.Fatalf("Expected 1 service, got %v", len(services)) + } + if e, a := "foo", services[0].Name; e != a { + t.Errorf("Expected service %q, got %q", e, a) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/listwatch_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/listwatch_test.go new file mode 100644 index 000000000000..ddd58cf747c1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/listwatch_test.go @@ -0,0 +1,172 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "net/http/httptest" + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/fields" + utiltesting "k8s.io/kubernetes/pkg/util/testing" +) + +func parseSelectorOrDie(s string) fields.Selector { + selector, err := fields.ParseSelector(s) + if err != nil { + panic(err) + } + return selector +} + +// buildQueryValues is a convenience function for knowing if a namespace should be in a query param or not +func buildQueryValues(query url.Values) url.Values { + v := url.Values{} + if query != nil { + for key, values := range query { + for _, value := range values { + v.Add(key, value) + } + } + } + return v +} + +func buildLocation(resourcePath string, query url.Values) string { + return resourcePath + "?" + query.Encode() +} + +func TestListWatchesCanList(t *testing.T) { + fieldSelectorQueryParamName := unversioned.FieldSelectorQueryParam(testapi.Default.GroupVersion().String()) + table := []struct { + location string + resource string + namespace string + fieldSelector fields.Selector + }{ + // Node + { + location: testapi.Default.ResourcePath("nodes", api.NamespaceAll, ""), + resource: "nodes", + namespace: api.NamespaceAll, + fieldSelector: parseSelectorOrDie(""), + }, + // pod with "assigned" field selector. + { + location: buildLocation( + testapi.Default.ResourcePath("pods", api.NamespaceAll, ""), + buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}})), + resource: "pods", + namespace: api.NamespaceAll, + fieldSelector: fields.Set{"spec.host": ""}.AsSelector(), + }, + // pod in namespace "foo" + { + location: buildLocation( + testapi.Default.ResourcePath("pods", "foo", ""), + buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}})), + resource: "pods", + namespace: "foo", + fieldSelector: fields.Set{"spec.host": ""}.AsSelector(), + }, + } + for _, item := range table { + handler := utiltesting.FakeHandler{ + StatusCode: 500, + ResponseBody: "", + T: t, + } + server := httptest.NewServer(&handler) + defer server.Close() + client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + lw := NewListWatchFromClient(client, item.resource, item.namespace, item.fieldSelector) + // This test merely tests that the correct request is made. + lw.List(api.ListOptions{}) + handler.ValidateRequest(t, item.location, "GET", nil) + } +} + +func TestListWatchesCanWatch(t *testing.T) { + fieldSelectorQueryParamName := unversioned.FieldSelectorQueryParam(testapi.Default.GroupVersion().String()) + table := []struct { + rv string + location string + resource string + namespace string + fieldSelector fields.Selector + }{ + // Node + { + location: buildLocation( + testapi.Default.ResourcePathWithPrefix("watch", "nodes", api.NamespaceAll, ""), + buildQueryValues(url.Values{})), + rv: "", + resource: "nodes", + namespace: api.NamespaceAll, + fieldSelector: parseSelectorOrDie(""), + }, + { + location: buildLocation( + testapi.Default.ResourcePathWithPrefix("watch", "nodes", api.NamespaceAll, ""), + buildQueryValues(url.Values{"resourceVersion": []string{"42"}})), + rv: "42", + resource: "nodes", + namespace: api.NamespaceAll, + fieldSelector: parseSelectorOrDie(""), + }, + // pod with "assigned" field selector. + { + location: buildLocation( + testapi.Default.ResourcePathWithPrefix("watch", "pods", api.NamespaceAll, ""), + buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}, "resourceVersion": []string{"0"}})), + rv: "0", + resource: "pods", + namespace: api.NamespaceAll, + fieldSelector: fields.Set{"spec.host": ""}.AsSelector(), + }, + // pod with namespace foo and assigned field selector + { + location: buildLocation( + testapi.Default.ResourcePathWithPrefix("watch", "pods", "foo", ""), + buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}, "resourceVersion": []string{"0"}})), + rv: "0", + resource: "pods", + namespace: "foo", + fieldSelector: fields.Set{"spec.host": ""}.AsSelector(), + }, + } + + for _, item := range table { + handler := utiltesting.FakeHandler{ + StatusCode: 500, + ResponseBody: "", + T: t, + } + server := httptest.NewServer(&handler) + defer server.Close() + client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + lw := NewListWatchFromClient(client, item.resource, item.namespace, item.fieldSelector) + // This test merely tests that the correct request is made. + lw.Watch(api.ListOptions{ResourceVersion: item.rv}) + handler.ValidateRequest(t, item.location, "GET", nil) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/reflector.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/reflector.go index 31b076b43e7f..3a5025a28a7e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/reflector.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/reflector.go @@ -24,7 +24,10 @@ import ( "net" "net/url" "reflect" + "regexp" goruntime "runtime" + "runtime/debug" + "strconv" "strings" "sync" "syscall" @@ -124,46 +127,95 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common // call chains to NewReflector, so they'd be low entropy names for reflectors -var internalPackages = []string{"kubernetes/pkg/client/cache/", "kubernetes/pkg/controller/framework/"} +var internalPackages = []string{"kubernetes/pkg/client/cache/", "kubernetes/pkg/controller/framework/", "/runtime/asm_"} // getDefaultReflectorName walks back through the call stack until we find a caller from outside of the ignoredPackages // it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging func getDefaultReflectorName(ignoredPackages ...string) string { name := "????" -outer: - for i := 1; i < 10; i++ { + const maxStack = 10 + for i := 1; i < maxStack; i++ { _, file, line, ok := goruntime.Caller(i) if !ok { - break - } - for _, ignoredPackage := range ignoredPackages { - if strings.Contains(file, ignoredPackage) { - continue outer + file, line, ok = extractStackCreator() + if !ok { + break } - + i += maxStack } - - pkgLocation := strings.LastIndex(file, "/pkg/") - if pkgLocation >= 0 { - file = file[pkgLocation+1:] + if hasPackage(file, ignoredPackages) { + continue } + + file = trimPackagePrefix(file) name = fmt.Sprintf("%s:%d", file, line) break } - return name } +// hasPackage returns true if the file is in one of the ignored packages. +func hasPackage(file string, ignoredPackages []string) bool { + for _, ignoredPackage := range ignoredPackages { + if strings.Contains(file, ignoredPackage) { + return true + } + } + return false +} + +// trimPackagePrefix reduces dulpicate values off the front of a package name. +func trimPackagePrefix(file string) string { + if l := strings.LastIndex(file, "k8s.io/kubernetes/pkg/"); l >= 0 { + return file[l+len("k8s.io/kubernetes/"):] + } + if l := strings.LastIndex(file, "/src/"); l >= 0 { + return file[l+5:] + } + if l := strings.LastIndex(file, "/pkg/"); l >= 0 { + return file[l+1:] + } + return file +} + +var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`) + +// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false +// if the creator cannot be located. +// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440 +func extractStackCreator() (string, int, bool) { + stack := debug.Stack() + matches := stackCreator.FindStringSubmatch(string(stack)) + if matches == nil || len(matches) != 4 { + return "", 0, false + } + line, err := strconv.Atoi(matches[3]) + if err != nil { + return "", 0, false + } + return matches[2], line, true +} + // Run starts a watch and handles watch events. Will restart the watch if it is closed. // Run starts a goroutine and returns immediately. func (r *Reflector) Run() { - go wait.Until(func() { r.ListAndWatch(wait.NeverStop) }, r.period, wait.NeverStop) + glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) + go wait.Until(func() { + if err := r.ListAndWatch(wait.NeverStop); err != nil { + utilruntime.HandleError(err) + } + }, r.period, wait.NeverStop) } // RunUntil starts a watch and handles watch events. Will restart the watch if it is closed. // RunUntil starts a goroutine and returns immediately. It will exit when stopCh is closed. func (r *Reflector) RunUntil(stopCh <-chan struct{}) { - go wait.Until(func() { r.ListAndWatch(stopCh) }, r.period, stopCh) + glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) + go wait.Until(func() { + if err := r.ListAndWatch(stopCh); err != nil { + utilruntime.HandleError(err) + } + }, r.period, stopCh) } var ( @@ -194,39 +246,11 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { return t.C, t.Stop } -// We want to avoid situations when periodic resyncing is breaking the TCP -// connection. -// If response`s body is not read to completion before calling body.Close(), -// that TCP connection will not be reused in the future - see #15664 issue -// for more details. -// Thus, we set timeout for watch requests to be smaller than the remaining -// time until next periodic resync and force resyncing ourself to avoid -// breaking TCP connection. -// -// TODO: This should be parametrizable based on server load. -func (r *Reflector) timeoutForWatch() *int64 { - randTimeout := time.Duration(float64(minWatchTimeout) * (rand.Float64() + 1.0)) - timeout := r.nextResync.Sub(r.now()) - timeoutThreshold - if timeout < 0 || randTimeout < timeout { - timeout = randTimeout - } - timeoutSeconds := int64(timeout.Seconds()) - return &timeoutSeconds -} - -// Returns true if we are close enough to next planned periodic resync -// and we can force resyncing ourself now. -func (r *Reflector) canForceResyncNow() bool { - if r.nextResync.IsZero() { - return false - } - return r.now().Add(forceResyncThreshold).After(r.nextResync) -} - // ListAndWatch first lists all items and get the resource version at the moment of call, // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { + glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) var resourceVersion string resyncCh, cleanup := r.resyncChan() defer cleanup() @@ -239,11 +263,11 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { if err != nil { return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) } - metaInterface, err := meta.Accessor(list) + listMetaInterface, err := meta.ListAccessor(list) if err != nil { - return fmt.Errorf("%s: Unable to understand list result %#v", r.name, list) + return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err) } - resourceVersion = metaInterface.GetResourceVersion() + resourceVersion = listMetaInterface.GetResourceVersion() items, err := meta.ExtractList(list) if err != nil { return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err) @@ -253,13 +277,33 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { } r.setLastSyncResourceVersion(resourceVersion) + resyncerrc := make(chan error, 1) + go func() { + for { + select { + case <-resyncCh: + case <-stopCh: + return + } + glog.V(4).Infof("%s: next resync planned for %#v, forcing now", r.name, r.nextResync) + if err := r.store.Resync(); err != nil { + resyncerrc <- err + return + } + cleanup() + resyncCh, cleanup = r.resyncChan() + } + }() + for { - options := api.ListOptions{ + timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options = api.ListOptions{ ResourceVersion: resourceVersion, - // We want to avoid situations when resyncing is breaking the TCP connection - // - see comment for 'timeoutForWatch()' for more details. - TimeoutSeconds: r.timeoutForWatch(), + // We want to avoid situations of hanging watchers. Stop any wachers that do not + // receive any events within the timeout window. + TimeoutSeconds: &timemoutseconds, } + w, err := r.listerWatcher.Watch(options) if err != nil { switch err { @@ -284,16 +328,13 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { } return nil } - if err := r.watchHandler(w, &resourceVersion, resyncCh, stopCh); err != nil { - if err != errorResyncRequested && err != errorStopRequested { + + if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil { + if err != errorStopRequested { glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) } return nil } - if r.canForceResyncNow() { - glog.V(4).Infof("%s: next resync planned for %#v, forcing now", r.name, r.nextResync) - return nil - } } } @@ -307,7 +348,7 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err } // watchHandler watches w and keeps *resourceVersion up to date. -func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, resyncCh <-chan time.Time, stopCh <-chan struct{}) error { +func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error { start := time.Now() eventCount := 0 @@ -320,8 +361,8 @@ loop: select { case <-stopCh: return errorStopRequested - case <-resyncCh: - return errorResyncRequested + case err := <-errc: + return err case event, ok := <-w.ResultChan(): if !ok { break loop diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/reflector_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/reflector_test.go new file mode 100644 index 000000000000..d28bdb7b247d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/reflector_test.go @@ -0,0 +1,389 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "fmt" + "math/rand" + "strconv" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" +) + +var nevererrc chan error + +type testLW struct { + ListFunc func() (runtime.Object, error) + WatchFunc func(options api.ListOptions) (watch.Interface, error) +} + +func (t *testLW) List(options api.ListOptions) (runtime.Object, error) { + return t.ListFunc() +} +func (t *testLW) Watch(options api.ListOptions) (watch.Interface, error) { + return t.WatchFunc(options) +} + +func TestCloseWatchChannelOnError(t *testing.T) { + r := NewReflector(&testLW{}, &api.Pod{}, NewStore(MetaNamespaceKeyFunc), 0) + pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}} + fw := watch.NewFake() + r.listerWatcher = &testLW{ + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return fw, nil + }, + ListFunc: func() (runtime.Object, error) { + return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil + }, + } + go r.ListAndWatch(wait.NeverStop) + fw.Error(pod) + select { + case _, ok := <-fw.ResultChan(): + if ok { + t.Errorf("Watch channel left open after cancellation") + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("the cancellation is at least %s late", wait.ForeverTestTimeout.String()) + break + } +} + +func TestRunUntil(t *testing.T) { + stopCh := make(chan struct{}) + store := NewStore(MetaNamespaceKeyFunc) + r := NewReflector(&testLW{}, &api.Pod{}, store, 0) + fw := watch.NewFake() + r.listerWatcher = &testLW{ + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return fw, nil + }, + ListFunc: func() (runtime.Object, error) { + return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil + }, + } + r.RunUntil(stopCh) + // Synchronously add a dummy pod into the watch channel so we + // know the RunUntil go routine is in the watch handler. + fw.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}}) + close(stopCh) + select { + case _, ok := <-fw.ResultChan(): + if ok { + t.Errorf("Watch channel left open after stopping the watch") + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("the cancellation is at least %s late", wait.ForeverTestTimeout.String()) + break + } +} + +func TestReflectorResyncChan(t *testing.T) { + s := NewStore(MetaNamespaceKeyFunc) + g := NewReflector(&testLW{}, &api.Pod{}, s, time.Millisecond) + a, _ := g.resyncChan() + b := time.After(wait.ForeverTestTimeout) + select { + case <-a: + t.Logf("got timeout as expected") + case <-b: + t.Errorf("resyncChan() is at least 99 milliseconds late??") + } +} + +func BenchmarkReflectorResyncChanMany(b *testing.B) { + s := NewStore(MetaNamespaceKeyFunc) + g := NewReflector(&testLW{}, &api.Pod{}, s, 25*time.Millisecond) + // The improvement to this (calling the timer's Stop() method) makes + // this benchmark about 40% faster. + for i := 0; i < b.N; i++ { + g.resyncPeriod = time.Duration(rand.Float64() * float64(time.Millisecond) * 25) + _, stop := g.resyncChan() + stop() + } +} + +func TestReflectorWatchHandlerError(t *testing.T) { + s := NewStore(MetaNamespaceKeyFunc) + g := NewReflector(&testLW{}, &api.Pod{}, s, 0) + fw := watch.NewFake() + go func() { + fw.Stop() + }() + var resumeRV string + err := g.watchHandler(fw, &resumeRV, nevererrc, wait.NeverStop) + if err == nil { + t.Errorf("unexpected non-error") + } +} + +func TestReflectorWatchHandler(t *testing.T) { + s := NewStore(MetaNamespaceKeyFunc) + g := NewReflector(&testLW{}, &api.Pod{}, s, 0) + fw := watch.NewFake() + s.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + s.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}}) + go func() { + fw.Add(&api.Service{ObjectMeta: api.ObjectMeta{Name: "rejected"}}) + fw.Delete(&api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + fw.Modify(&api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar", ResourceVersion: "55"}}) + fw.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: "baz", ResourceVersion: "32"}}) + fw.Stop() + }() + var resumeRV string + err := g.watchHandler(fw, &resumeRV, nevererrc, wait.NeverStop) + if err != nil { + t.Errorf("unexpected error %v", err) + } + + mkPod := func(id string, rv string) *api.Pod { + return &api.Pod{ObjectMeta: api.ObjectMeta{Name: id, ResourceVersion: rv}} + } + + table := []struct { + Pod *api.Pod + exists bool + }{ + {mkPod("foo", ""), false}, + {mkPod("rejected", ""), false}, + {mkPod("bar", "55"), true}, + {mkPod("baz", "32"), true}, + } + for _, item := range table { + obj, exists, _ := s.Get(item.Pod) + if e, a := item.exists, exists; e != a { + t.Errorf("%v: expected %v, got %v", item.Pod, e, a) + } + if !exists { + continue + } + if e, a := item.Pod.ResourceVersion, obj.(*api.Pod).ResourceVersion; e != a { + t.Errorf("%v: expected %v, got %v", item.Pod, e, a) + } + } + + // RV should send the last version we see. + if e, a := "32", resumeRV; e != a { + t.Errorf("expected %v, got %v", e, a) + } + + // last sync resource version should be the last version synced with store + if e, a := "32", g.LastSyncResourceVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } +} + +func TestReflectorStopWatch(t *testing.T) { + s := NewStore(MetaNamespaceKeyFunc) + g := NewReflector(&testLW{}, &api.Pod{}, s, 0) + fw := watch.NewFake() + var resumeRV string + stopWatch := make(chan struct{}, 1) + stopWatch <- struct{}{} + err := g.watchHandler(fw, &resumeRV, nevererrc, stopWatch) + if err != errorStopRequested { + t.Errorf("expected stop error, got %q", err) + } +} + +func TestReflectorListAndWatch(t *testing.T) { + createdFakes := make(chan *watch.FakeWatcher) + + // The ListFunc says that it's at revision 1. Therefore, we expect our WatchFunc + // to get called at the beginning of the watch with 1, and again with 3 when we + // inject an error. + expectedRVs := []string{"1", "3"} + lw := &testLW{ + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + rv := options.ResourceVersion + fw := watch.NewFake() + if e, a := expectedRVs[0], rv; e != a { + t.Errorf("Expected rv %v, but got %v", e, a) + } + expectedRVs = expectedRVs[1:] + // channel is not buffered because the for loop below needs to block. But + // we don't want to block here, so report the new fake via a go routine. + go func() { createdFakes <- fw }() + return fw, nil + }, + ListFunc: func() (runtime.Object, error) { + return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}, nil + }, + } + s := NewFIFO(MetaNamespaceKeyFunc) + r := NewReflector(lw, &api.Pod{}, s, 0) + go r.ListAndWatch(wait.NeverStop) + + ids := []string{"foo", "bar", "baz", "qux", "zoo"} + var fw *watch.FakeWatcher + for i, id := range ids { + if fw == nil { + fw = <-createdFakes + } + sendingRV := strconv.FormatUint(uint64(i+2), 10) + fw.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: id, ResourceVersion: sendingRV}}) + if sendingRV == "3" { + // Inject a failure. + fw.Stop() + fw = nil + } + } + + // Verify we received the right ids with the right resource versions. + for i, id := range ids { + pod := s.Pop().(*api.Pod) + if e, a := id, pod.Name; e != a { + t.Errorf("%v: Expected %v, got %v", i, e, a) + } + if e, a := strconv.FormatUint(uint64(i+2), 10), pod.ResourceVersion; e != a { + t.Errorf("%v: Expected %v, got %v", i, e, a) + } + } + + if len(expectedRVs) != 0 { + t.Error("called watchStarter an unexpected number of times") + } +} + +func TestReflectorListAndWatchWithErrors(t *testing.T) { + mkPod := func(id string, rv string) *api.Pod { + return &api.Pod{ObjectMeta: api.ObjectMeta{Name: id, ResourceVersion: rv}} + } + mkList := func(rv string, pods ...*api.Pod) *api.PodList { + list := &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: rv}} + for _, pod := range pods { + list.Items = append(list.Items, *pod) + } + return list + } + table := []struct { + list *api.PodList + listErr error + events []watch.Event + watchErr error + }{ + { + list: mkList("1"), + events: []watch.Event{ + {watch.Added, mkPod("foo", "2")}, + {watch.Added, mkPod("bar", "3")}, + }, + }, { + list: mkList("3", mkPod("foo", "2"), mkPod("bar", "3")), + events: []watch.Event{ + {watch.Deleted, mkPod("foo", "4")}, + {watch.Added, mkPod("qux", "5")}, + }, + }, { + listErr: fmt.Errorf("a list error"), + }, { + list: mkList("5", mkPod("bar", "3"), mkPod("qux", "5")), + watchErr: fmt.Errorf("a watch error"), + }, { + list: mkList("5", mkPod("bar", "3"), mkPod("qux", "5")), + events: []watch.Event{ + {watch.Added, mkPod("baz", "6")}, + }, + }, { + list: mkList("6", mkPod("bar", "3"), mkPod("qux", "5"), mkPod("baz", "6")), + }, + } + + s := NewFIFO(MetaNamespaceKeyFunc) + for line, item := range table { + if item.list != nil { + // Test that the list is what currently exists in the store. + current := s.List() + checkMap := map[string]string{} + for _, item := range current { + pod := item.(*api.Pod) + checkMap[pod.Name] = pod.ResourceVersion + } + for _, pod := range item.list.Items { + if e, a := pod.ResourceVersion, checkMap[pod.Name]; e != a { + t.Errorf("%v: expected %v, got %v for pod %v", line, e, a, pod.Name) + } + } + if e, a := len(item.list.Items), len(checkMap); e != a { + t.Errorf("%v: expected %v, got %v", line, e, a) + } + } + watchRet, watchErr := item.events, item.watchErr + lw := &testLW{ + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + if watchErr != nil { + return nil, watchErr + } + watchErr = fmt.Errorf("second watch") + fw := watch.NewFake() + go func() { + for _, e := range watchRet { + fw.Action(e.Type, e.Object) + } + fw.Stop() + }() + return fw, nil + }, + ListFunc: func() (runtime.Object, error) { + return item.list, item.listErr + }, + } + r := NewReflector(lw, &api.Pod{}, s, 0) + r.ListAndWatch(wait.NeverStop) + } +} + +func TestReflectorResync(t *testing.T) { + iteration := 0 + stopCh := make(chan struct{}) + rerr := errors.New("expected resync reached") + s := &FakeCustomStore{ + ResyncFunc: func() error { + iteration++ + if iteration == 2 { + return rerr + } + return nil + }, + } + + lw := &testLW{ + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + fw := watch.NewFake() + return fw, nil + }, + ListFunc: func() (runtime.Object, error) { + return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "0"}}, nil + }, + } + resyncPeriod := 1 * time.Millisecond + r := NewReflector(lw, &api.Pod{}, s, resyncPeriod) + if err := r.ListAndWatch(stopCh); err != nil { + // error from Resync is not propaged up to here. + t.Errorf("expected error %v", err) + } + if iteration != 2 { + t.Errorf("exactly 2 iterations were expected, got: %v", iteration) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/store.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/store.go index 17a360f8e424..71115f2ce5dd 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/store.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/store.go @@ -44,6 +44,7 @@ type Store interface { // given list. Store takes ownership of the list, you should not reference // it after calling this function. Replace([]interface{}, string) error + Resync() error } // KeyFunc knows how to make a key from an object. Implementations should be deterministic. @@ -160,6 +161,11 @@ func (c *cache) ListKeys() []string { return c.cacheStorage.ListKeys() } +// GetIndexers returns the indexers of cache +func (c *cache) GetIndexers() Indexers { + return c.cacheStorage.GetIndexers() +} + // Index returns a list of items that match on the index function // Index is thread-safe so long as you treat all items as immutable func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) { @@ -175,6 +181,10 @@ func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) { return c.cacheStorage.ByIndex(indexName, indexKey) } +func (c *cache) AddIndexers(newIndexers Indexers) error { + return c.cacheStorage.AddIndexers(newIndexers) +} + // Get returns the requested item, or sets exists=false. // Get is completely threadsafe as long as you treat all items as immutable. func (c *cache) Get(obj interface{}) (item interface{}, exists bool, err error) { @@ -208,6 +218,11 @@ func (c *cache) Replace(list []interface{}, resourceVersion string) error { return nil } +// Resync touches all items in the store to force processing +func (c *cache) Resync() error { + return c.cacheStorage.Resync() +} + // NewStore returns a Store implemented simply with a map and a lock. func NewStore(keyFunc KeyFunc) Store { return &cache{ diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/store_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/store_test.go new file mode 100644 index 000000000000..07275f493de4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/store_test.go @@ -0,0 +1,156 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "testing" + + "k8s.io/kubernetes/pkg/util/sets" +) + +// Test public interface +func doTestStore(t *testing.T, store Store) { + mkObj := func(id string, val string) testStoreObject { + return testStoreObject{id: id, val: val} + } + + store.Add(mkObj("foo", "bar")) + if item, ok, _ := store.Get(mkObj("foo", "")); !ok { + t.Errorf("didn't find inserted item") + } else { + if e, a := "bar", item.(testStoreObject).val; e != a { + t.Errorf("expected %v, got %v", e, a) + } + } + store.Update(mkObj("foo", "baz")) + if item, ok, _ := store.Get(mkObj("foo", "")); !ok { + t.Errorf("didn't find inserted item") + } else { + if e, a := "baz", item.(testStoreObject).val; e != a { + t.Errorf("expected %v, got %v", e, a) + } + } + store.Delete(mkObj("foo", "")) + if _, ok, _ := store.Get(mkObj("foo", "")); ok { + t.Errorf("found deleted item??") + } + + // Test List. + store.Add(mkObj("a", "b")) + store.Add(mkObj("c", "d")) + store.Add(mkObj("e", "e")) + { + found := sets.String{} + for _, item := range store.List() { + found.Insert(item.(testStoreObject).val) + } + if !found.HasAll("b", "d", "e") { + t.Errorf("missing items, found: %v", found) + } + if len(found) != 3 { + t.Errorf("extra items") + } + } + + // Test Replace. + store.Replace([]interface{}{ + mkObj("foo", "foo"), + mkObj("bar", "bar"), + }, "0") + + { + found := sets.String{} + for _, item := range store.List() { + found.Insert(item.(testStoreObject).val) + } + if !found.HasAll("foo", "bar") { + t.Errorf("missing items") + } + if len(found) != 2 { + t.Errorf("extra items") + } + } +} + +// Test public interface +func doTestIndex(t *testing.T, indexer Indexer) { + mkObj := func(id string, val string) testStoreObject { + return testStoreObject{id: id, val: val} + } + + // Test Index + expected := map[string]sets.String{} + expected["b"] = sets.NewString("a", "c") + expected["f"] = sets.NewString("e") + expected["h"] = sets.NewString("g") + indexer.Add(mkObj("a", "b")) + indexer.Add(mkObj("c", "b")) + indexer.Add(mkObj("e", "f")) + indexer.Add(mkObj("g", "h")) + { + for k, v := range expected { + found := sets.String{} + indexResults, err := indexer.Index("by_val", mkObj("", k)) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + for _, item := range indexResults { + found.Insert(item.(testStoreObject).id) + } + items := v.List() + if !found.HasAll(items...) { + t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List()) + } + } + } +} + +func testStoreKeyFunc(obj interface{}) (string, error) { + return obj.(testStoreObject).id, nil +} + +func testStoreIndexFunc(obj interface{}) ([]string, error) { + return []string{obj.(testStoreObject).val}, nil +} + +func testStoreIndexers() Indexers { + indexers := Indexers{} + indexers["by_val"] = testStoreIndexFunc + return indexers +} + +type testStoreObject struct { + id string + val string +} + +func TestCache(t *testing.T) { + doTestStore(t, NewStore(testStoreKeyFunc)) +} + +func TestFIFOCache(t *testing.T) { + doTestStore(t, NewFIFO(testStoreKeyFunc)) +} + +func TestUndeltaStore(t *testing.T) { + nop := func([]interface{}) {} + doTestStore(t, NewUndeltaStore(nop, testStoreKeyFunc)) +} + +func TestIndex(t *testing.T) { + doTestIndex(t, NewIndexer(testStoreKeyFunc, testStoreIndexers())) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go index ae4b802c8159..11077e25b2ee 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go @@ -45,6 +45,12 @@ type ThreadSafeStore interface { Index(indexName string, obj interface{}) ([]interface{}, error) ListIndexFuncValues(name string) []string ByIndex(indexName, indexKey string) ([]interface{}, error) + GetIndexers() Indexers + + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(newIndexers Indexers) error + Resync() error } // threadSafeMap implements ThreadSafeStore @@ -179,6 +185,9 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro } func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string { + c.lock.RLock() + defer c.lock.RUnlock() + index := c.indices[indexName] names := make([]string, 0, len(index)) for key := range index { @@ -187,6 +196,31 @@ func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string { return names } +func (c *threadSafeMap) GetIndexers() Indexers { + return c.indexers +} + +func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { + c.lock.Lock() + defer c.lock.Unlock() + + if len(c.items) > 0 { + return fmt.Errorf("cannot add indexers to running index") + } + + oldKeys := sets.StringKeySet(c.indexers) + newKeys := sets.StringKeySet(newIndexers) + + if oldKeys.HasAny(newKeys.List()...) { + return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys)) + } + + for k, v := range newIndexers { + c.indexers[k] = v + } + return nil +} + // updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj // updateIndices must be called from a function that already has a lock on the cache func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) error { @@ -239,6 +273,11 @@ func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error { return nil } +func (c *threadSafeMap) Resync() error { + // Nothing to do + return nil +} + func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore { return &threadSafeMap{ items: map[string]interface{}{}, diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/undelta_store_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/undelta_store_test.go new file mode 100644 index 000000000000..c14b7a800873 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/cache/undelta_store_test.go @@ -0,0 +1,131 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "reflect" + "testing" +) + +// store_test.go checks that UndeltaStore conforms to the Store interface +// behavior. This test just tests that it calls the push func in addition. + +type testUndeltaObject struct { + name string + val interface{} +} + +func testUndeltaKeyFunc(obj interface{}) (string, error) { + return obj.(testUndeltaObject).name, nil +} + +/* +var ( + o1 interface{} = t{1} + o2 interface{} = t{2} + l1 []interface{} = []interface{}{t{1}} +) +*/ + +func TestUpdateCallsPush(t *testing.T) { + mkObj := func(name string, val interface{}) testUndeltaObject { + return testUndeltaObject{name: name, val: val} + } + + var got []interface{} + var callcount int = 0 + push := func(m []interface{}) { + callcount++ + got = m + } + + u := NewUndeltaStore(push, testUndeltaKeyFunc) + + u.Add(mkObj("a", 2)) + u.Update(mkObj("a", 1)) + if callcount != 2 { + t.Errorf("Expected 2 calls, got %d", callcount) + } + + l := []interface{}{mkObj("a", 1)} + if !reflect.DeepEqual(l, got) { + t.Errorf("Expected %#v, Got %#v", l, got) + } +} + +func TestDeleteCallsPush(t *testing.T) { + mkObj := func(name string, val interface{}) testUndeltaObject { + return testUndeltaObject{name: name, val: val} + } + + var got []interface{} + var callcount int = 0 + push := func(m []interface{}) { + callcount++ + got = m + } + + u := NewUndeltaStore(push, testUndeltaKeyFunc) + + u.Add(mkObj("a", 2)) + u.Delete(mkObj("a", "")) + if callcount != 2 { + t.Errorf("Expected 2 calls, got %d", callcount) + } + expected := []interface{}{} + if !reflect.DeepEqual(expected, got) { + t.Errorf("Expected %#v, Got %#v", expected, got) + } +} + +func TestReadsDoNotCallPush(t *testing.T) { + push := func(m []interface{}) { + t.Errorf("Unexpected call to push!") + } + + u := NewUndeltaStore(push, testUndeltaKeyFunc) + + // These should not call push. + _ = u.List() + _, _, _ = u.Get(testUndeltaObject{"a", ""}) +} + +func TestReplaceCallsPush(t *testing.T) { + mkObj := func(name string, val interface{}) testUndeltaObject { + return testUndeltaObject{name: name, val: val} + } + + var got []interface{} + var callcount int = 0 + push := func(m []interface{}) { + callcount++ + got = m + } + + u := NewUndeltaStore(push, testUndeltaKeyFunc) + + m := []interface{}{mkObj("a", 1)} + + u.Replace(m, "0") + if callcount != 1 { + t.Errorf("Expected 1 calls, got %d", callcount) + } + expected := []interface{}{mkObj("a", 1)} + if !reflect.DeepEqual(expected, got) { + t.Errorf("Expected %#v, Got %#v", expected, got) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/chaosclient/chaosclient.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/chaosclient/chaosclient.go new file mode 100644 index 000000000000..a0ed4b4c157a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/chaosclient/chaosclient.go @@ -0,0 +1,156 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package chaosclient makes it easy to simulate network latency, misbehaving +// servers, and random errors from servers. It is intended to stress test components +// under failure conditions and expose weaknesses in the error handling logic +// of the codebase. +package chaosclient + +import ( + "errors" + "fmt" + "log" + "math/rand" + "net/http" + "reflect" + "runtime" + + "k8s.io/kubernetes/pkg/util/net" +) + +// chaosrt provides the ability to perform simulations of HTTP client failures +// under the Golang http.Transport interface. +type chaosrt struct { + rt http.RoundTripper + notify ChaosNotifier + c []Chaos +} + +// Chaos intercepts requests to a remote HTTP endpoint and can inject arbitrary +// failures. +type Chaos interface { + // Intercept should return true if the normal flow should be skipped, and the + // return response and error used instead. Modifications to the request will + // be ignored, but may be used to make decisions about types of failures. + Intercept(req *http.Request) (bool, *http.Response, error) +} + +// ChaosNotifier notifies another component that the ChaosRoundTripper has simulated +// a failure. +type ChaosNotifier interface { + // OnChaos is invoked when a chaotic outcome was triggered. fn is the + // source of Chaos and req was the outgoing request + OnChaos(req *http.Request, c Chaos) +} + +// ChaosFunc takes an http.Request and decides whether to alter the response. It +// returns true if it wishes to mutate the response, with a http.Response or +// error. +type ChaosFunc func(req *http.Request) (bool, *http.Response, error) + +func (fn ChaosFunc) Intercept(req *http.Request) (bool, *http.Response, error) { + return fn.Intercept(req) +} +func (fn ChaosFunc) String() string { + return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() +} + +// NewChaosRoundTripper creates an http.RoundTripper that will intercept requests +// based on the provided Chaos functions. The notifier is invoked when a Chaos +// Intercept is fired. +func NewChaosRoundTripper(rt http.RoundTripper, notify ChaosNotifier, c ...Chaos) http.RoundTripper { + return &chaosrt{rt, notify, c} +} + +// RoundTrip gives each ChaosFunc an opportunity to intercept the request. The first +// interceptor wins. +func (rt *chaosrt) RoundTrip(req *http.Request) (*http.Response, error) { + for _, c := range rt.c { + if intercept, resp, err := c.Intercept(req); intercept { + rt.notify.OnChaos(req, c) + return resp, err + } + } + return rt.rt.RoundTrip(req) +} + +var _ = net.RoundTripperWrapper(&chaosrt{}) + +func (rt *chaosrt) WrappedRoundTripper() http.RoundTripper { + return rt.rt +} + +// Seed represents a consistent stream of chaos. +type Seed struct { + *rand.Rand +} + +// NewSeed creates an object that assists in generating random chaotic events +// based on a deterministic seed. +func NewSeed(seed int64) Seed { + return Seed{rand.New(rand.NewSource(seed))} +} + +type pIntercept struct { + Chaos + s Seed + p float64 +} + +// P returns a ChaosFunc that fires with a probability of p (p between 0.0 +// and 1.0 with 0.0 meaning never and 1.0 meaning always). +func (s Seed) P(p float64, c Chaos) Chaos { + return pIntercept{c, s, p} +} + +// Intercept intercepts requests with the provided probability p. +func (c pIntercept) Intercept(req *http.Request) (bool, *http.Response, error) { + if c.s.Float64() < c.p { + return c.Chaos.Intercept(req) + } + return false, nil, nil +} + +func (c pIntercept) String() string { + return fmt.Sprintf("P{%f %s}", c.p, c.Chaos) +} + +// ErrSimulatedConnectionResetByPeer emulates the golang net error when a connection +// is reset by a peer. +// TODO: make this more accurate +// TODO: add other error types +// TODO: add a helper for returning multiple errors randomly. +var ErrSimulatedConnectionResetByPeer = Error{errors.New("connection reset by peer")} + +// Error returns the nested error when C() is invoked. +type Error struct { + error +} + +// C returns the nested error +func (e Error) Intercept(_ *http.Request) (bool, *http.Response, error) { + return true, nil, e.error +} + +// LogChaos is the default ChaosNotifier and writes a message to the Golang log. +var LogChaos = ChaosNotifier(logChaos{}) + +type logChaos struct{} + +func (logChaos) OnChaos(req *http.Request, c Chaos) { + log.Printf("Triggered chaotic behavior for %s %s: %v", req.Method, req.URL.String(), c) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/chaosclient/chaosclient_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/chaosclient/chaosclient_test.go new file mode 100644 index 000000000000..0c76736cfcd2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/chaosclient/chaosclient_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chaosclient + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" +) + +type TestLogChaos struct { + *testing.T +} + +func (t TestLogChaos) OnChaos(req *http.Request, c Chaos) { + t.Logf("CHAOS: chaotic behavior for %s %s: %v", req.Method, req.URL.String(), c) +} + +func unwrapURLError(err error) error { + if urlErr, ok := err.(*url.Error); ok && urlErr != nil { + return urlErr.Err + } + return err +} + +func TestChaos(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + client := http.Client{ + Transport: NewChaosRoundTripper(http.DefaultTransport, TestLogChaos{t}, ErrSimulatedConnectionResetByPeer), + } + resp, err := client.Get(server.URL) + if unwrapURLError(err) != ErrSimulatedConnectionResetByPeer.error { + t.Fatalf("expected reset by peer: %v", err) + } + if resp != nil { + t.Fatalf("expected no response object: %#v", resp) + } +} + +func TestPartialChaos(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + seed := NewSeed(1) + client := http.Client{ + Transport: NewChaosRoundTripper( + http.DefaultTransport, TestLogChaos{t}, + seed.P(0.5, ErrSimulatedConnectionResetByPeer), + ), + } + success, fail := 0, 0 + for { + _, err := client.Get(server.URL) + if err != nil { + fail++ + } else { + success++ + } + if success > 1 && fail > 1 { + break + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go index 599a0c40b296..9876441b73ca 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go @@ -18,16 +18,23 @@ package internalclientset import ( "github.com/golang/glog" + unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned" + unversionedbatch "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" + unversionedrbac "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned" restclient "k8s.io/kubernetes/pkg/client/restclient" discovery "k8s.io/kubernetes/pkg/client/typed/discovery" - unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" - unversionedextensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned" + "k8s.io/kubernetes/pkg/util/flowcontrol" ) type Interface interface { Discovery() discovery.DiscoveryInterface Core() unversionedcore.CoreInterface Extensions() unversionedextensions.ExtensionsInterface + Autoscaling() unversionedautoscaling.AutoscalingInterface + Batch() unversionedbatch.BatchInterface + Rbac() unversionedrbac.RbacInterface } // Clientset contains the clients for groups. Each group has exactly one @@ -36,18 +43,51 @@ type Clientset struct { *discovery.DiscoveryClient *unversionedcore.CoreClient *unversionedextensions.ExtensionsClient + *unversionedautoscaling.AutoscalingClient + *unversionedbatch.BatchClient + *unversionedrbac.RbacClient } // Core retrieves the CoreClient func (c *Clientset) Core() unversionedcore.CoreInterface { + if c == nil { + return nil + } return c.CoreClient } // Extensions retrieves the ExtensionsClient func (c *Clientset) Extensions() unversionedextensions.ExtensionsInterface { + if c == nil { + return nil + } return c.ExtensionsClient } +// Autoscaling retrieves the AutoscalingClient +func (c *Clientset) Autoscaling() unversionedautoscaling.AutoscalingInterface { + if c == nil { + return nil + } + return c.AutoscalingClient +} + +// Batch retrieves the BatchClient +func (c *Clientset) Batch() unversionedbatch.BatchInterface { + if c == nil { + return nil + } + return c.BatchClient +} + +// Rbac retrieves the RbacClient +func (c *Clientset) Rbac() unversionedrbac.RbacInterface { + if c == nil { + return nil + } + return c.RbacClient +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.DiscoveryClient @@ -55,18 +95,34 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { // NewForConfig creates a new Clientset for the given config. func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } var clientset Clientset var err error - clientset.CoreClient, err = unversionedcore.NewForConfig(c) + clientset.CoreClient, err = unversionedcore.NewForConfig(&configShallowCopy) + if err != nil { + return &clientset, err + } + clientset.ExtensionsClient, err = unversionedextensions.NewForConfig(&configShallowCopy) + if err != nil { + return &clientset, err + } + clientset.AutoscalingClient, err = unversionedautoscaling.NewForConfig(&configShallowCopy) + if err != nil { + return &clientset, err + } + clientset.BatchClient, err = unversionedbatch.NewForConfig(&configShallowCopy) if err != nil { return &clientset, err } - clientset.ExtensionsClient, err = unversionedextensions.NewForConfig(c) + clientset.RbacClient, err = unversionedrbac.NewForConfig(&configShallowCopy) if err != nil { return &clientset, err } - clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(c) + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { glog.Errorf("failed to create the DiscoveryClient: %v", err) } @@ -79,6 +135,9 @@ func NewForConfigOrDie(c *restclient.Config) *Clientset { var clientset Clientset clientset.CoreClient = unversionedcore.NewForConfigOrDie(c) clientset.ExtensionsClient = unversionedextensions.NewForConfigOrDie(c) + clientset.AutoscalingClient = unversionedautoscaling.NewForConfigOrDie(c) + clientset.BatchClient = unversionedbatch.NewForConfigOrDie(c) + clientset.RbacClient = unversionedrbac.NewForConfigOrDie(c) clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &clientset @@ -89,6 +148,9 @@ func New(c *restclient.RESTClient) *Clientset { var clientset Clientset clientset.CoreClient = unversionedcore.New(c) clientset.ExtensionsClient = unversionedextensions.New(c) + clientset.AutoscalingClient = unversionedautoscaling.New(c) + clientset.BatchClient = unversionedbatch.New(c) + clientset.RbacClient = unversionedrbac.New(c) clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) return &clientset diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go new file mode 100644 index 000000000000..3934caa42ca9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// This package has the automatically generated clientset. +package internalclientset diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go index d2861b7d3c04..699b2f4e153f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/clientset_generated.go @@ -18,13 +18,21 @@ package fake import ( "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned" + fakeunversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/fake" + unversionedbatch "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned" + fakeunversionedbatch "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + fakeunversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake" + unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" + fakeunversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake" + unversionedrbac "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned" + fakeunversionedrbac "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake" "k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/typed/discovery" - unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" - fakeunversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake" - unversionedextensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned" - fakeunversionedextensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/watch" ) @@ -39,7 +47,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { } fakePtr := core.Fake{} - fakePtr.AddReactor("*", "*", core.ObjectReaction(o, api.RESTMapper)) + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) @@ -54,17 +62,32 @@ type Clientset struct { } func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return &FakeDiscovery{&c.Fake} + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} } var _ clientset.Interface = &Clientset{} // Core retrieves the CoreClient func (c *Clientset) Core() unversionedcore.CoreInterface { - return &fakeunversionedcore.FakeCore{&c.Fake} + return &fakeunversionedcore.FakeCore{Fake: &c.Fake} } // Extensions retrieves the ExtensionsClient func (c *Clientset) Extensions() unversionedextensions.ExtensionsInterface { - return &fakeunversionedextensions.FakeExtensions{&c.Fake} + return &fakeunversionedextensions.FakeExtensions{Fake: &c.Fake} +} + +// Autoscaling retrieves the AutoscalingClient +func (c *Clientset) Autoscaling() unversionedautoscaling.AutoscalingInterface { + return &fakeunversionedautoscaling.FakeAutoscaling{Fake: &c.Fake} +} + +// Batch retrieves the BatchClient +func (c *Clientset) Batch() unversionedbatch.BatchInterface { + return &fakeunversionedbatch.FakeBatch{Fake: &c.Fake} +} + +// Rbac retrieves the RbacClient +func (c *Clientset) Rbac() unversionedrbac.RbacInterface { + return &fakeunversionedrbac.FakeRbac{Fake: &c.Fake} } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/doc.go new file mode 100644 index 000000000000..559cf8914e1e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// This package has the automatically generated fake clientset. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/import_known_versions.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/import_known_versions.go new file mode 100644 index 000000000000..8631b8b28788 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/import_known_versions.go @@ -0,0 +1,40 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalclientset + +// These imports are the API groups the client will support. +import ( + "fmt" + + _ "k8s.io/kubernetes/pkg/api/install" + "k8s.io/kubernetes/pkg/apimachinery/registered" + _ "k8s.io/kubernetes/pkg/apis/apps/install" + _ "k8s.io/kubernetes/pkg/apis/authorization/install" + _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" + _ "k8s.io/kubernetes/pkg/apis/batch/install" + _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" + _ "k8s.io/kubernetes/pkg/apis/extensions/install" + _ "k8s.io/kubernetes/pkg/apis/metrics/install" + _ "k8s.io/kubernetes/pkg/apis/policy/install" + _ "k8s.io/kubernetes/pkg/apis/rbac/install" +) + +func init() { + if missingVersions := registered.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { + panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/autoscaling_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/autoscaling_client.go new file mode 100644 index 000000000000..752b5d554e25 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/autoscaling_client.go @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type AutoscalingInterface interface { + GetRESTClient() *restclient.RESTClient + HorizontalPodAutoscalersGetter +} + +// AutoscalingClient is used to interact with features provided by the Autoscaling group. +type AutoscalingClient struct { + *restclient.RESTClient +} + +func (c *AutoscalingClient) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingClient for the given config. +func NewForConfig(c *restclient.Config) (*AutoscalingClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingClient{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *AutoscalingClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingClient for the given RESTClient. +func New(c *restclient.RESTClient) *AutoscalingClient { + return &AutoscalingClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if autoscaling group is not registered, return an error + g, err := registered.Group("autoscaling") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/doc.go new file mode 100644 index 000000000000..47517b6422dd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// This package has the automatically generated typed clients. +package unversioned diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/fake/doc.go similarity index 90% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/doc.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/fake/doc.go index dd6d4da715a9..eb358c26c80f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/doc.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/fake/doc.go @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// This package is generated by client-gen with the default arguments. + // Package fake has the automatically generated clients. package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/fake/fake_autoscaling_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/fake/fake_autoscaling_client.go new file mode 100644 index 000000000000..d25beeebbaed --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/fake/fake_autoscaling_client.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + unversioned "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeAutoscaling struct { + *core.Fake +} + +func (c *FakeAutoscaling) HorizontalPodAutoscalers(namespace string) unversioned.HorizontalPodAutoscalerInterface { + return &FakeHorizontalPodAutoscalers{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAutoscaling) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/fake/fake_horizontalpodautoscaler.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/fake/fake_horizontalpodautoscaler.go new file mode 100644 index 000000000000..fb889a446a06 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/fake/fake_horizontalpodautoscaler.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type FakeHorizontalPodAutoscalers struct { + Fake *FakeAutoscaling + ns string +} + +var horizontalpodautoscalersResource = unversioned.GroupVersionResource{Group: "autoscaling", Version: "", Resource: "horizontalpodautoscalers"} + +func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscaling.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscaling.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscaling.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscaling.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &autoscaling.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscaling.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &autoscaling.HorizontalPodAutoscaler{}) + + return err +} + +func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &autoscaling.HorizontalPodAutoscalerList{}) + return err +} + +func (c *FakeHorizontalPodAutoscalers) Get(name string) (result *autoscaling.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &autoscaling.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscaling.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) List(opts api.ListOptions) (result *autoscaling.HorizontalPodAutoscalerList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(horizontalpodautoscalersResource, c.ns, opts), &autoscaling.HorizontalPodAutoscalerList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &autoscaling.HorizontalPodAutoscalerList{} + for _, item := range obj.(*autoscaling.HorizontalPodAutoscalerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *FakeHorizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/generated_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/generated_expansion.go new file mode 100644 index 000000000000..39324902aaaf --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/horizontalpodautoscaler.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/horizontalpodautoscaler.go new file mode 100644 index 000000000000..ae185ad7fb9b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/horizontalpodautoscaler.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + watch "k8s.io/kubernetes/pkg/watch" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) + Update(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) + UpdateStatus(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*autoscaling.HorizontalPodAutoscaler, error) + List(opts api.ListOptions) (*autoscaling.HorizontalPodAutoscalerList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client *AutoscalingClient + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingClient, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts api.ListOptions) (result *autoscaling.HorizontalPodAutoscalerList, err error) { + result = &autoscaling.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/batch_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/batch_client.go new file mode 100644 index 000000000000..83d9d749c458 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/batch_client.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type BatchInterface interface { + GetRESTClient() *restclient.RESTClient + JobsGetter + ScheduledJobsGetter +} + +// BatchClient is used to interact with features provided by the Batch group. +type BatchClient struct { + *restclient.RESTClient +} + +func (c *BatchClient) Jobs(namespace string) JobInterface { + return newJobs(c, namespace) +} + +func (c *BatchClient) ScheduledJobs(namespace string) ScheduledJobInterface { + return newScheduledJobs(c, namespace) +} + +// NewForConfig creates a new BatchClient for the given config. +func NewForConfig(c *restclient.Config) (*BatchClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchClient{client}, nil +} + +// NewForConfigOrDie creates a new BatchClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *BatchClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BatchClient for the given RESTClient. +func New(c *restclient.RESTClient) *BatchClient { + return &BatchClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if batch group is not registered, return an error + g, err := registered.Group("batch") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BatchClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/doc.go new file mode 100644 index 000000000000..47517b6422dd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// This package has the automatically generated typed clients. +package unversioned diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake/doc.go similarity index 90% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/doc.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake/doc.go index dd6d4da715a9..eb358c26c80f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/doc.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake/doc.go @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// This package is generated by client-gen with the default arguments. + // Package fake has the automatically generated clients. package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake/fake_batch_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake/fake_batch_client.go new file mode 100644 index 000000000000..bf2a41a76a22 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake/fake_batch_client.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + unversioned "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeBatch struct { + *core.Fake +} + +func (c *FakeBatch) Jobs(namespace string) unversioned.JobInterface { + return &FakeJobs{c, namespace} +} + +func (c *FakeBatch) ScheduledJobs(namespace string) unversioned.ScheduledJobInterface { + return &FakeScheduledJobs{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeBatch) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake/fake_job.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake/fake_job.go new file mode 100644 index 000000000000..701c102b4119 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake/fake_job.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + batch "k8s.io/kubernetes/pkg/apis/batch" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeJobs implements JobInterface +type FakeJobs struct { + Fake *FakeBatch + ns string +} + +var jobsResource = unversioned.GroupVersionResource{Group: "batch", Version: "", Resource: "jobs"} + +func (c *FakeJobs) Create(job *batch.Job) (result *batch.Job, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(jobsResource, c.ns, job), &batch.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*batch.Job), err +} + +func (c *FakeJobs) Update(job *batch.Job) (result *batch.Job, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(jobsResource, c.ns, job), &batch.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*batch.Job), err +} + +func (c *FakeJobs) UpdateStatus(job *batch.Job) (*batch.Job, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &batch.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*batch.Job), err +} + +func (c *FakeJobs) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(jobsResource, c.ns, name), &batch.Job{}) + + return err +} + +func (c *FakeJobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(jobsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &batch.JobList{}) + return err +} + +func (c *FakeJobs) Get(name string) (result *batch.Job, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(jobsResource, c.ns, name), &batch.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*batch.Job), err +} + +func (c *FakeJobs) List(opts api.ListOptions) (result *batch.JobList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(jobsResource, c.ns, opts), &batch.JobList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &batch.JobList{} + for _, item := range obj.(*batch.JobList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *FakeJobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(jobsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake/fake_scheduledjob.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake/fake_scheduledjob.go new file mode 100644 index 000000000000..db09aa9bc964 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/fake/fake_scheduledjob.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + batch "k8s.io/kubernetes/pkg/apis/batch" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeScheduledJobs implements ScheduledJobInterface +type FakeScheduledJobs struct { + Fake *FakeBatch + ns string +} + +var scheduledjobsResource = unversioned.GroupVersionResource{Group: "batch", Version: "", Resource: "scheduledjobs"} + +func (c *FakeScheduledJobs) Create(scheduledJob *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(scheduledjobsResource, c.ns, scheduledJob), &batch.ScheduledJob{}) + + if obj == nil { + return nil, err + } + return obj.(*batch.ScheduledJob), err +} + +func (c *FakeScheduledJobs) Update(scheduledJob *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(scheduledjobsResource, c.ns, scheduledJob), &batch.ScheduledJob{}) + + if obj == nil { + return nil, err + } + return obj.(*batch.ScheduledJob), err +} + +func (c *FakeScheduledJobs) UpdateStatus(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(scheduledjobsResource, "status", c.ns, scheduledJob), &batch.ScheduledJob{}) + + if obj == nil { + return nil, err + } + return obj.(*batch.ScheduledJob), err +} + +func (c *FakeScheduledJobs) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(scheduledjobsResource, c.ns, name), &batch.ScheduledJob{}) + + return err +} + +func (c *FakeScheduledJobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(scheduledjobsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &batch.ScheduledJobList{}) + return err +} + +func (c *FakeScheduledJobs) Get(name string) (result *batch.ScheduledJob, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(scheduledjobsResource, c.ns, name), &batch.ScheduledJob{}) + + if obj == nil { + return nil, err + } + return obj.(*batch.ScheduledJob), err +} + +func (c *FakeScheduledJobs) List(opts api.ListOptions) (result *batch.ScheduledJobList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(scheduledjobsResource, c.ns, opts), &batch.ScheduledJobList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &batch.ScheduledJobList{} + for _, item := range obj.(*batch.ScheduledJobList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested scheduledJobs. +func (c *FakeScheduledJobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(scheduledjobsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/generated_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/generated_expansion.go new file mode 100644 index 000000000000..f876ef63fc2a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +type JobExpansion interface{} + +type ScheduledJobExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/job.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/job.go new file mode 100644 index 000000000000..680c50654c69 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/job.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + batch "k8s.io/kubernetes/pkg/apis/batch" + watch "k8s.io/kubernetes/pkg/watch" +) + +// JobsGetter has a method to return a JobInterface. +// A group's client should implement this interface. +type JobsGetter interface { + Jobs(namespace string) JobInterface +} + +// JobInterface has methods to work with Job resources. +type JobInterface interface { + Create(*batch.Job) (*batch.Job, error) + Update(*batch.Job) (*batch.Job, error) + UpdateStatus(*batch.Job) (*batch.Job, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*batch.Job, error) + List(opts api.ListOptions) (*batch.JobList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + JobExpansion +} + +// jobs implements JobInterface +type jobs struct { + client *BatchClient + ns string +} + +// newJobs returns a Jobs +func newJobs(c *BatchClient, namespace string) *jobs { + return &jobs{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Create(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.client.Post(). + Namespace(c.ns). + Resource("jobs"). + Body(job). + Do(). + Into(result) + return +} + +// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Update(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + Body(job). + Do(). + Into(result) + return +} + +func (c *jobs) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + SubResource("status"). + Body(job). + Do(). + Into(result) + return +} + +// Delete takes name of the job and deletes it. Returns an error if one occurs. +func (c *jobs) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *jobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the job, and returns the corresponding job object, and an error if there is any. +func (c *jobs) Get(name string) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Jobs that match those selectors. +func (c *jobs) List(opts api.ListOptions) (result *batch.JobList, err error) { + result = &batch.JobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *jobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/scheduledjob.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/scheduledjob.go new file mode 100644 index 000000000000..2675d11c48f2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/scheduledjob.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + batch "k8s.io/kubernetes/pkg/apis/batch" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ScheduledJobsGetter has a method to return a ScheduledJobInterface. +// A group's client should implement this interface. +type ScheduledJobsGetter interface { + ScheduledJobs(namespace string) ScheduledJobInterface +} + +// ScheduledJobInterface has methods to work with ScheduledJob resources. +type ScheduledJobInterface interface { + Create(*batch.ScheduledJob) (*batch.ScheduledJob, error) + Update(*batch.ScheduledJob) (*batch.ScheduledJob, error) + UpdateStatus(*batch.ScheduledJob) (*batch.ScheduledJob, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*batch.ScheduledJob, error) + List(opts api.ListOptions) (*batch.ScheduledJobList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ScheduledJobExpansion +} + +// scheduledJobs implements ScheduledJobInterface +type scheduledJobs struct { + client *BatchClient + ns string +} + +// newScheduledJobs returns a ScheduledJobs +func newScheduledJobs(c *BatchClient, namespace string) *scheduledJobs { + return &scheduledJobs{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a scheduledJob and creates it. Returns the server's representation of the scheduledJob, and an error, if there is any. +func (c *scheduledJobs) Create(scheduledJob *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.client.Post(). + Namespace(c.ns). + Resource("scheduledjobs"). + Body(scheduledJob). + Do(). + Into(result) + return +} + +// Update takes the representation of a scheduledJob and updates it. Returns the server's representation of the scheduledJob, and an error, if there is any. +func (c *scheduledJobs) Update(scheduledJob *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("scheduledjobs"). + Name(scheduledJob.Name). + Body(scheduledJob). + Do(). + Into(result) + return +} + +func (c *scheduledJobs) UpdateStatus(scheduledJob *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("scheduledjobs"). + Name(scheduledJob.Name). + SubResource("status"). + Body(scheduledJob). + Do(). + Into(result) + return +} + +// Delete takes name of the scheduledJob and deletes it. Returns an error if one occurs. +func (c *scheduledJobs) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("scheduledjobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *scheduledJobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("scheduledjobs"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the scheduledJob, and returns the corresponding scheduledJob object, and an error if there is any. +func (c *scheduledJobs) Get(name string) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.client.Get(). + Namespace(c.ns). + Resource("scheduledjobs"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ScheduledJobs that match those selectors. +func (c *scheduledJobs) List(opts api.ListOptions) (result *batch.ScheduledJobList, err error) { + result = &batch.ScheduledJobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("scheduledjobs"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested scheduledJobs. +func (c *scheduledJobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("scheduledjobs"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/componentstatus.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/componentstatus.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/componentstatus.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/componentstatus.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/configmap.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/configmap.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/configmap.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/configmap.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/core_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/core_client.go new file mode 100644 index 000000000000..41aee4cf6543 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/core_client.go @@ -0,0 +1,176 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + ComponentStatusesGetter + ConfigMapsGetter + EndpointsGetter + EventsGetter + LimitRangesGetter + NamespacesGetter + NodesGetter + PersistentVolumesGetter + PersistentVolumeClaimsGetter + PodsGetter + PodTemplatesGetter + ReplicationControllersGetter + ResourceQuotasGetter + SecretsGetter + ServicesGetter + ServiceAccountsGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) ComponentStatuses() ComponentStatusInterface { + return newComponentStatuses(c) +} + +func (c *CoreClient) ConfigMaps(namespace string) ConfigMapInterface { + return newConfigMaps(c, namespace) +} + +func (c *CoreClient) Endpoints(namespace string) EndpointsInterface { + return newEndpoints(c, namespace) +} + +func (c *CoreClient) Events(namespace string) EventInterface { + return newEvents(c, namespace) +} + +func (c *CoreClient) LimitRanges(namespace string) LimitRangeInterface { + return newLimitRanges(c, namespace) +} + +func (c *CoreClient) Namespaces() NamespaceInterface { + return newNamespaces(c) +} + +func (c *CoreClient) Nodes() NodeInterface { + return newNodes(c) +} + +func (c *CoreClient) PersistentVolumes() PersistentVolumeInterface { + return newPersistentVolumes(c) +} + +func (c *CoreClient) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface { + return newPersistentVolumeClaims(c, namespace) +} + +func (c *CoreClient) Pods(namespace string) PodInterface { + return newPods(c, namespace) +} + +func (c *CoreClient) PodTemplates(namespace string) PodTemplateInterface { + return newPodTemplates(c, namespace) +} + +func (c *CoreClient) ReplicationControllers(namespace string) ReplicationControllerInterface { + return newReplicationControllers(c, namespace) +} + +func (c *CoreClient) ResourceQuotas(namespace string) ResourceQuotaInterface { + return newResourceQuotas(c, namespace) +} + +func (c *CoreClient) Secrets(namespace string) SecretInterface { + return newSecrets(c, namespace) +} + +func (c *CoreClient) Services(namespace string) ServiceInterface { + return newServices(c, namespace) +} + +func (c *CoreClient) ServiceAccounts(namespace string) ServiceAccountInterface { + return newServiceAccounts(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/api" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/doc.go new file mode 100644 index 000000000000..47517b6422dd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// This package has the automatically generated typed clients. +package unversioned diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/endpoints.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/endpoints.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/endpoints.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/endpoints.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/event.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/event.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/event_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event_expansion.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/event_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event_expansion.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/doc.go new file mode 100644 index 000000000000..eb358c26c80f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_componentstatus.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_componentstatus.go similarity index 75% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_componentstatus.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_componentstatus.go index 478dd9dbf372..84dca9984eaf 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_componentstatus.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_componentstatus.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -28,9 +29,11 @@ type FakeComponentStatuses struct { Fake *FakeCore } +var componentstatusesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "componentstatuses"} + func (c *FakeComponentStatuses) Create(componentStatus *api.ComponentStatus) (result *api.ComponentStatus, err error) { obj, err := c.Fake. - Invokes(core.NewRootCreateAction("componentstatuses", componentStatus), &api.ComponentStatus{}) + Invokes(core.NewRootCreateAction(componentstatusesResource, componentStatus), &api.ComponentStatus{}) if obj == nil { return nil, err } @@ -39,7 +42,7 @@ func (c *FakeComponentStatuses) Create(componentStatus *api.ComponentStatus) (re func (c *FakeComponentStatuses) Update(componentStatus *api.ComponentStatus) (result *api.ComponentStatus, err error) { obj, err := c.Fake. - Invokes(core.NewRootUpdateAction("componentstatuses", componentStatus), &api.ComponentStatus{}) + Invokes(core.NewRootUpdateAction(componentstatusesResource, componentStatus), &api.ComponentStatus{}) if obj == nil { return nil, err } @@ -48,12 +51,12 @@ func (c *FakeComponentStatuses) Update(componentStatus *api.ComponentStatus) (re func (c *FakeComponentStatuses) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewRootDeleteAction("componentstatuses", name), &api.ComponentStatus{}) + Invokes(core.NewRootDeleteAction(componentstatusesResource, name), &api.ComponentStatus{}) return err } func (c *FakeComponentStatuses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction("componentstatuses", listOptions) + action := core.NewRootDeleteCollectionAction(componentstatusesResource, listOptions) _, err := c.Fake.Invokes(action, &api.ComponentStatusList{}) return err @@ -61,7 +64,7 @@ func (c *FakeComponentStatuses) DeleteCollection(options *api.DeleteOptions, lis func (c *FakeComponentStatuses) Get(name string) (result *api.ComponentStatus, err error) { obj, err := c.Fake. - Invokes(core.NewRootGetAction("componentstatuses", name), &api.ComponentStatus{}) + Invokes(core.NewRootGetAction(componentstatusesResource, name), &api.ComponentStatus{}) if obj == nil { return nil, err } @@ -70,7 +73,7 @@ func (c *FakeComponentStatuses) Get(name string) (result *api.ComponentStatus, e func (c *FakeComponentStatuses) List(opts api.ListOptions) (result *api.ComponentStatusList, err error) { obj, err := c.Fake. - Invokes(core.NewRootListAction("componentstatuses", opts), &api.ComponentStatusList{}) + Invokes(core.NewRootListAction(componentstatusesResource, opts), &api.ComponentStatusList{}) if obj == nil { return nil, err } @@ -91,5 +94,5 @@ func (c *FakeComponentStatuses) List(opts api.ListOptions) (result *api.Componen // Watch returns a watch.Interface that watches the requested componentStatuses. func (c *FakeComponentStatuses) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewRootWatchAction("componentstatuses", opts)) + InvokesWatch(core.NewRootWatchAction(componentstatusesResource, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_configmap.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_configmap.go similarity index 76% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_configmap.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_configmap.go index 34fa0b229bb8..8908c26a36cd 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_configmap.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_configmap.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -29,9 +30,11 @@ type FakeConfigMaps struct { ns string } +var configmapsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "configmaps"} + func (c *FakeConfigMaps) Create(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("configmaps", c.ns, configMap), &api.ConfigMap{}) + Invokes(core.NewCreateAction(configmapsResource, c.ns, configMap), &api.ConfigMap{}) if obj == nil { return nil, err @@ -41,7 +44,7 @@ func (c *FakeConfigMaps) Create(configMap *api.ConfigMap) (result *api.ConfigMap func (c *FakeConfigMaps) Update(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("configmaps", c.ns, configMap), &api.ConfigMap{}) + Invokes(core.NewUpdateAction(configmapsResource, c.ns, configMap), &api.ConfigMap{}) if obj == nil { return nil, err @@ -51,13 +54,13 @@ func (c *FakeConfigMaps) Update(configMap *api.ConfigMap) (result *api.ConfigMap func (c *FakeConfigMaps) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("configmaps", c.ns, name), &api.ConfigMap{}) + Invokes(core.NewDeleteAction(configmapsResource, c.ns, name), &api.ConfigMap{}) return err } func (c *FakeConfigMaps) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("configmaps", c.ns, listOptions) + action := core.NewDeleteCollectionAction(configmapsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &api.ConfigMapList{}) return err @@ -65,7 +68,7 @@ func (c *FakeConfigMaps) DeleteCollection(options *api.DeleteOptions, listOption func (c *FakeConfigMaps) Get(name string) (result *api.ConfigMap, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("configmaps", c.ns, name), &api.ConfigMap{}) + Invokes(core.NewGetAction(configmapsResource, c.ns, name), &api.ConfigMap{}) if obj == nil { return nil, err @@ -75,7 +78,7 @@ func (c *FakeConfigMaps) Get(name string) (result *api.ConfigMap, err error) { func (c *FakeConfigMaps) List(opts api.ListOptions) (result *api.ConfigMapList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("configmaps", c.ns, opts), &api.ConfigMapList{}) + Invokes(core.NewListAction(configmapsResource, c.ns, opts), &api.ConfigMapList{}) if obj == nil { return nil, err @@ -97,6 +100,6 @@ func (c *FakeConfigMaps) List(opts api.ListOptions) (result *api.ConfigMapList, // Watch returns a watch.Interface that watches the requested configMaps. func (c *FakeConfigMaps) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("configmaps", c.ns, opts)) + InvokesWatch(core.NewWatchAction(configmapsResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_core_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_core_client.go similarity index 88% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_core_client.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_core_client.go index afc6cf6f9026..533735e7c1ba 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_core_client.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_core_client.go @@ -17,8 +17,9 @@ limitations under the License. package fake import ( + unversioned "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + restclient "k8s.io/kubernetes/pkg/client/restclient" core "k8s.io/kubernetes/pkg/client/testing/core" - unversioned "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" ) type FakeCore struct { @@ -88,3 +89,9 @@ func (c *FakeCore) Services(namespace string) unversioned.ServiceInterface { func (c *FakeCore) ServiceAccounts(namespace string) unversioned.ServiceAccountInterface { return &FakeServiceAccounts{c, namespace} } + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_endpoints.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_endpoints.go similarity index 76% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_endpoints.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_endpoints.go index cc25b6e06cb7..b888d89c0bd8 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_endpoints.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_endpoints.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -29,9 +30,11 @@ type FakeEndpoints struct { ns string } +var endpointsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "endpoints"} + func (c *FakeEndpoints) Create(endpoints *api.Endpoints) (result *api.Endpoints, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("endpoints", c.ns, endpoints), &api.Endpoints{}) + Invokes(core.NewCreateAction(endpointsResource, c.ns, endpoints), &api.Endpoints{}) if obj == nil { return nil, err @@ -41,7 +44,7 @@ func (c *FakeEndpoints) Create(endpoints *api.Endpoints) (result *api.Endpoints, func (c *FakeEndpoints) Update(endpoints *api.Endpoints) (result *api.Endpoints, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("endpoints", c.ns, endpoints), &api.Endpoints{}) + Invokes(core.NewUpdateAction(endpointsResource, c.ns, endpoints), &api.Endpoints{}) if obj == nil { return nil, err @@ -51,13 +54,13 @@ func (c *FakeEndpoints) Update(endpoints *api.Endpoints) (result *api.Endpoints, func (c *FakeEndpoints) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("endpoints", c.ns, name), &api.Endpoints{}) + Invokes(core.NewDeleteAction(endpointsResource, c.ns, name), &api.Endpoints{}) return err } func (c *FakeEndpoints) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("endpoints", c.ns, listOptions) + action := core.NewDeleteCollectionAction(endpointsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &api.EndpointsList{}) return err @@ -65,7 +68,7 @@ func (c *FakeEndpoints) DeleteCollection(options *api.DeleteOptions, listOptions func (c *FakeEndpoints) Get(name string) (result *api.Endpoints, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("endpoints", c.ns, name), &api.Endpoints{}) + Invokes(core.NewGetAction(endpointsResource, c.ns, name), &api.Endpoints{}) if obj == nil { return nil, err @@ -75,7 +78,7 @@ func (c *FakeEndpoints) Get(name string) (result *api.Endpoints, err error) { func (c *FakeEndpoints) List(opts api.ListOptions) (result *api.EndpointsList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("endpoints", c.ns, opts), &api.EndpointsList{}) + Invokes(core.NewListAction(endpointsResource, c.ns, opts), &api.EndpointsList{}) if obj == nil { return nil, err @@ -97,6 +100,6 @@ func (c *FakeEndpoints) List(opts api.ListOptions) (result *api.EndpointsList, e // Watch returns a watch.Interface that watches the requested endpoints. func (c *FakeEndpoints) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("endpoints", c.ns, opts)) + InvokesWatch(core.NewWatchAction(endpointsResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_event.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_event.go similarity index 77% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_event.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_event.go index a9f88153bc75..d86b05a7bd71 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_event.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_event.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -29,9 +30,11 @@ type FakeEvents struct { ns string } +var eventsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "events"} + func (c *FakeEvents) Create(event *api.Event) (result *api.Event, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("events", c.ns, event), &api.Event{}) + Invokes(core.NewCreateAction(eventsResource, c.ns, event), &api.Event{}) if obj == nil { return nil, err @@ -41,7 +44,7 @@ func (c *FakeEvents) Create(event *api.Event) (result *api.Event, err error) { func (c *FakeEvents) Update(event *api.Event) (result *api.Event, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("events", c.ns, event), &api.Event{}) + Invokes(core.NewUpdateAction(eventsResource, c.ns, event), &api.Event{}) if obj == nil { return nil, err @@ -51,13 +54,13 @@ func (c *FakeEvents) Update(event *api.Event) (result *api.Event, err error) { func (c *FakeEvents) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("events", c.ns, name), &api.Event{}) + Invokes(core.NewDeleteAction(eventsResource, c.ns, name), &api.Event{}) return err } func (c *FakeEvents) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("events", c.ns, listOptions) + action := core.NewDeleteCollectionAction(eventsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &api.EventList{}) return err @@ -65,7 +68,7 @@ func (c *FakeEvents) DeleteCollection(options *api.DeleteOptions, listOptions ap func (c *FakeEvents) Get(name string) (result *api.Event, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("events", c.ns, name), &api.Event{}) + Invokes(core.NewGetAction(eventsResource, c.ns, name), &api.Event{}) if obj == nil { return nil, err @@ -75,7 +78,7 @@ func (c *FakeEvents) Get(name string) (result *api.Event, err error) { func (c *FakeEvents) List(opts api.ListOptions) (result *api.EventList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("events", c.ns, opts), &api.EventList{}) + Invokes(core.NewListAction(eventsResource, c.ns, opts), &api.EventList{}) if obj == nil { return nil, err @@ -97,6 +100,6 @@ func (c *FakeEvents) List(opts api.ListOptions) (result *api.EventList, err erro // Watch returns a watch.Interface that watches the requested events. func (c *FakeEvents) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("events", c.ns, opts)) + InvokesWatch(core.NewWatchAction(eventsResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_event_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_event_expansion.go similarity index 80% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_event_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_event_expansion.go index bc514d505481..b76be860d69d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_event_expansion.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_event_expansion.go @@ -24,9 +24,9 @@ import ( ) func (c *FakeEvents) CreateWithEventNamespace(event *api.Event) (*api.Event, error) { - action := core.NewRootCreateAction("events", event) + action := core.NewRootCreateAction(eventsResource, event) if c.ns != "" { - action = core.NewCreateAction("events", c.ns, event) + action = core.NewCreateAction(eventsResource, c.ns, event) } obj, err := c.Fake.Invokes(action, event) if obj == nil { @@ -38,9 +38,9 @@ func (c *FakeEvents) CreateWithEventNamespace(event *api.Event) (*api.Event, err // Update replaces an existing event. Returns the copy of the event the server returns, or an error. func (c *FakeEvents) UpdateWithEventNamespace(event *api.Event) (*api.Event, error) { - action := core.NewRootUpdateAction("events", event) + action := core.NewRootUpdateAction(eventsResource, event) if c.ns != "" { - action = core.NewUpdateAction("events", c.ns, event) + action = core.NewUpdateAction(eventsResource, c.ns, event) } obj, err := c.Fake.Invokes(action, event) if obj == nil { @@ -52,9 +52,9 @@ func (c *FakeEvents) UpdateWithEventNamespace(event *api.Event) (*api.Event, err // Patch patches an existing event. Returns the copy of the event the server returns, or an error. func (c *FakeEvents) Patch(event *api.Event, data []byte) (*api.Event, error) { - action := core.NewRootPatchAction("events", event) + action := core.NewRootPatchAction(eventsResource, event) if c.ns != "" { - action = core.NewPatchAction("events", c.ns, event) + action = core.NewPatchAction(eventsResource, c.ns, event) } obj, err := c.Fake.Invokes(action, event) if obj == nil { @@ -66,9 +66,9 @@ func (c *FakeEvents) Patch(event *api.Event, data []byte) (*api.Event, error) { // Search returns a list of events matching the specified object. func (c *FakeEvents) Search(objOrRef runtime.Object) (*api.EventList, error) { - action := core.NewRootListAction("events", api.ListOptions{}) + action := core.NewRootListAction(eventsResource, api.ListOptions{}) if c.ns != "" { - action = core.NewListAction("events", c.ns, api.ListOptions{}) + action = core.NewListAction(eventsResource, c.ns, api.ListOptions{}) } obj, err := c.Fake.Invokes(action, &api.EventList{}) if obj == nil { @@ -81,7 +81,7 @@ func (c *FakeEvents) Search(objOrRef runtime.Object) (*api.EventList, error) { func (c *FakeEvents) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { action := core.GenericActionImpl{} action.Verb = "get-field-selector" - action.Resource = "events" + action.Resource = eventsResource c.Fake.Invokes(action, nil) return fields.Everything() diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_limitrange.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_limitrange.go similarity index 76% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_limitrange.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_limitrange.go index cab44ce4ee36..b46f5eba4b8a 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_limitrange.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_limitrange.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -29,9 +30,11 @@ type FakeLimitRanges struct { ns string } +var limitrangesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "limitranges"} + func (c *FakeLimitRanges) Create(limitRange *api.LimitRange) (result *api.LimitRange, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("limitranges", c.ns, limitRange), &api.LimitRange{}) + Invokes(core.NewCreateAction(limitrangesResource, c.ns, limitRange), &api.LimitRange{}) if obj == nil { return nil, err @@ -41,7 +44,7 @@ func (c *FakeLimitRanges) Create(limitRange *api.LimitRange) (result *api.LimitR func (c *FakeLimitRanges) Update(limitRange *api.LimitRange) (result *api.LimitRange, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("limitranges", c.ns, limitRange), &api.LimitRange{}) + Invokes(core.NewUpdateAction(limitrangesResource, c.ns, limitRange), &api.LimitRange{}) if obj == nil { return nil, err @@ -51,13 +54,13 @@ func (c *FakeLimitRanges) Update(limitRange *api.LimitRange) (result *api.LimitR func (c *FakeLimitRanges) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("limitranges", c.ns, name), &api.LimitRange{}) + Invokes(core.NewDeleteAction(limitrangesResource, c.ns, name), &api.LimitRange{}) return err } func (c *FakeLimitRanges) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("limitranges", c.ns, listOptions) + action := core.NewDeleteCollectionAction(limitrangesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &api.LimitRangeList{}) return err @@ -65,7 +68,7 @@ func (c *FakeLimitRanges) DeleteCollection(options *api.DeleteOptions, listOptio func (c *FakeLimitRanges) Get(name string) (result *api.LimitRange, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("limitranges", c.ns, name), &api.LimitRange{}) + Invokes(core.NewGetAction(limitrangesResource, c.ns, name), &api.LimitRange{}) if obj == nil { return nil, err @@ -75,7 +78,7 @@ func (c *FakeLimitRanges) Get(name string) (result *api.LimitRange, err error) { func (c *FakeLimitRanges) List(opts api.ListOptions) (result *api.LimitRangeList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("limitranges", c.ns, opts), &api.LimitRangeList{}) + Invokes(core.NewListAction(limitrangesResource, c.ns, opts), &api.LimitRangeList{}) if obj == nil { return nil, err @@ -97,6 +100,6 @@ func (c *FakeLimitRanges) List(opts api.ListOptions) (result *api.LimitRangeList // Watch returns a watch.Interface that watches the requested limitRanges. func (c *FakeLimitRanges) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("limitranges", c.ns, opts)) + InvokesWatch(core.NewWatchAction(limitrangesResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_namespace.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_namespace.go similarity index 75% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_namespace.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_namespace.go index 78933814f7e0..233dd8ff9a1d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_namespace.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_namespace.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -28,9 +29,11 @@ type FakeNamespaces struct { Fake *FakeCore } +var namespacesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "namespaces"} + func (c *FakeNamespaces) Create(namespace *api.Namespace) (result *api.Namespace, err error) { obj, err := c.Fake. - Invokes(core.NewRootCreateAction("namespaces", namespace), &api.Namespace{}) + Invokes(core.NewRootCreateAction(namespacesResource, namespace), &api.Namespace{}) if obj == nil { return nil, err } @@ -39,7 +42,7 @@ func (c *FakeNamespaces) Create(namespace *api.Namespace) (result *api.Namespace func (c *FakeNamespaces) Update(namespace *api.Namespace) (result *api.Namespace, err error) { obj, err := c.Fake. - Invokes(core.NewRootUpdateAction("namespaces", namespace), &api.Namespace{}) + Invokes(core.NewRootUpdateAction(namespacesResource, namespace), &api.Namespace{}) if obj == nil { return nil, err } @@ -48,7 +51,7 @@ func (c *FakeNamespaces) Update(namespace *api.Namespace) (result *api.Namespace func (c *FakeNamespaces) UpdateStatus(namespace *api.Namespace) (*api.Namespace, error) { obj, err := c.Fake. - Invokes(core.NewRootUpdateSubresourceAction("namespaces", "status", namespace), &api.Namespace{}) + Invokes(core.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &api.Namespace{}) if obj == nil { return nil, err } @@ -57,12 +60,12 @@ func (c *FakeNamespaces) UpdateStatus(namespace *api.Namespace) (*api.Namespace, func (c *FakeNamespaces) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewRootDeleteAction("namespaces", name), &api.Namespace{}) + Invokes(core.NewRootDeleteAction(namespacesResource, name), &api.Namespace{}) return err } func (c *FakeNamespaces) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction("namespaces", listOptions) + action := core.NewRootDeleteCollectionAction(namespacesResource, listOptions) _, err := c.Fake.Invokes(action, &api.NamespaceList{}) return err @@ -70,7 +73,7 @@ func (c *FakeNamespaces) DeleteCollection(options *api.DeleteOptions, listOption func (c *FakeNamespaces) Get(name string) (result *api.Namespace, err error) { obj, err := c.Fake. - Invokes(core.NewRootGetAction("namespaces", name), &api.Namespace{}) + Invokes(core.NewRootGetAction(namespacesResource, name), &api.Namespace{}) if obj == nil { return nil, err } @@ -79,7 +82,7 @@ func (c *FakeNamespaces) Get(name string) (result *api.Namespace, err error) { func (c *FakeNamespaces) List(opts api.ListOptions) (result *api.NamespaceList, err error) { obj, err := c.Fake. - Invokes(core.NewRootListAction("namespaces", opts), &api.NamespaceList{}) + Invokes(core.NewRootListAction(namespacesResource, opts), &api.NamespaceList{}) if obj == nil { return nil, err } @@ -100,5 +103,5 @@ func (c *FakeNamespaces) List(opts api.ListOptions) (result *api.NamespaceList, // Watch returns a watch.Interface that watches the requested namespaces. func (c *FakeNamespaces) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewRootWatchAction("namespaces", opts)) + InvokesWatch(core.NewRootWatchAction(namespacesResource, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_namespace_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_namespace_expansion.go similarity index 96% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_namespace_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_namespace_expansion.go index 8bb49ff2b08b..04b3acc14d1e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_namespace_expansion.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_namespace_expansion.go @@ -24,7 +24,7 @@ import ( func (c *FakeNamespaces) Finalize(namespace *api.Namespace) (*api.Namespace, error) { action := core.CreateActionImpl{} action.Verb = "create" - action.Resource = "namespaces" + action.Resource = namespacesResource action.Subresource = "finalize" action.Object = namespace diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_node.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_node.go similarity index 76% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_node.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_node.go index 8761c8772ccc..7370e5a2a4b9 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_node.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_node.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -28,9 +29,11 @@ type FakeNodes struct { Fake *FakeCore } +var nodesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "nodes"} + func (c *FakeNodes) Create(node *api.Node) (result *api.Node, err error) { obj, err := c.Fake. - Invokes(core.NewRootCreateAction("nodes", node), &api.Node{}) + Invokes(core.NewRootCreateAction(nodesResource, node), &api.Node{}) if obj == nil { return nil, err } @@ -39,7 +42,7 @@ func (c *FakeNodes) Create(node *api.Node) (result *api.Node, err error) { func (c *FakeNodes) Update(node *api.Node) (result *api.Node, err error) { obj, err := c.Fake. - Invokes(core.NewRootUpdateAction("nodes", node), &api.Node{}) + Invokes(core.NewRootUpdateAction(nodesResource, node), &api.Node{}) if obj == nil { return nil, err } @@ -48,7 +51,7 @@ func (c *FakeNodes) Update(node *api.Node) (result *api.Node, err error) { func (c *FakeNodes) UpdateStatus(node *api.Node) (*api.Node, error) { obj, err := c.Fake. - Invokes(core.NewRootUpdateSubresourceAction("nodes", "status", node), &api.Node{}) + Invokes(core.NewRootUpdateSubresourceAction(nodesResource, "status", node), &api.Node{}) if obj == nil { return nil, err } @@ -57,12 +60,12 @@ func (c *FakeNodes) UpdateStatus(node *api.Node) (*api.Node, error) { func (c *FakeNodes) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewRootDeleteAction("nodes", name), &api.Node{}) + Invokes(core.NewRootDeleteAction(nodesResource, name), &api.Node{}) return err } func (c *FakeNodes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction("nodes", listOptions) + action := core.NewRootDeleteCollectionAction(nodesResource, listOptions) _, err := c.Fake.Invokes(action, &api.NodeList{}) return err @@ -70,7 +73,7 @@ func (c *FakeNodes) DeleteCollection(options *api.DeleteOptions, listOptions api func (c *FakeNodes) Get(name string) (result *api.Node, err error) { obj, err := c.Fake. - Invokes(core.NewRootGetAction("nodes", name), &api.Node{}) + Invokes(core.NewRootGetAction(nodesResource, name), &api.Node{}) if obj == nil { return nil, err } @@ -79,7 +82,7 @@ func (c *FakeNodes) Get(name string) (result *api.Node, err error) { func (c *FakeNodes) List(opts api.ListOptions) (result *api.NodeList, err error) { obj, err := c.Fake. - Invokes(core.NewRootListAction("nodes", opts), &api.NodeList{}) + Invokes(core.NewRootListAction(nodesResource, opts), &api.NodeList{}) if obj == nil { return nil, err } @@ -100,5 +103,5 @@ func (c *FakeNodes) List(opts api.ListOptions) (result *api.NodeList, err error) // Watch returns a watch.Interface that watches the requested nodes. func (c *FakeNodes) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewRootWatchAction("nodes", opts)) + InvokesWatch(core.NewRootWatchAction(nodesResource, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_persistentvolume.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_persistentvolume.go similarity index 74% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_persistentvolume.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_persistentvolume.go index d3d8c79f52d5..fd0ae18ae6d9 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_persistentvolume.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_persistentvolume.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -28,9 +29,11 @@ type FakePersistentVolumes struct { Fake *FakeCore } +var persistentvolumesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "persistentvolumes"} + func (c *FakePersistentVolumes) Create(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { obj, err := c.Fake. - Invokes(core.NewRootCreateAction("persistentvolumes", persistentVolume), &api.PersistentVolume{}) + Invokes(core.NewRootCreateAction(persistentvolumesResource, persistentVolume), &api.PersistentVolume{}) if obj == nil { return nil, err } @@ -39,7 +42,7 @@ func (c *FakePersistentVolumes) Create(persistentVolume *api.PersistentVolume) ( func (c *FakePersistentVolumes) Update(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { obj, err := c.Fake. - Invokes(core.NewRootUpdateAction("persistentvolumes", persistentVolume), &api.PersistentVolume{}) + Invokes(core.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &api.PersistentVolume{}) if obj == nil { return nil, err } @@ -48,7 +51,7 @@ func (c *FakePersistentVolumes) Update(persistentVolume *api.PersistentVolume) ( func (c *FakePersistentVolumes) UpdateStatus(persistentVolume *api.PersistentVolume) (*api.PersistentVolume, error) { obj, err := c.Fake. - Invokes(core.NewRootUpdateSubresourceAction("persistentvolumes", "status", persistentVolume), &api.PersistentVolume{}) + Invokes(core.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &api.PersistentVolume{}) if obj == nil { return nil, err } @@ -57,12 +60,12 @@ func (c *FakePersistentVolumes) UpdateStatus(persistentVolume *api.PersistentVol func (c *FakePersistentVolumes) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewRootDeleteAction("persistentvolumes", name), &api.PersistentVolume{}) + Invokes(core.NewRootDeleteAction(persistentvolumesResource, name), &api.PersistentVolume{}) return err } func (c *FakePersistentVolumes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewRootDeleteCollectionAction("persistentvolumes", listOptions) + action := core.NewRootDeleteCollectionAction(persistentvolumesResource, listOptions) _, err := c.Fake.Invokes(action, &api.PersistentVolumeList{}) return err @@ -70,7 +73,7 @@ func (c *FakePersistentVolumes) DeleteCollection(options *api.DeleteOptions, lis func (c *FakePersistentVolumes) Get(name string) (result *api.PersistentVolume, err error) { obj, err := c.Fake. - Invokes(core.NewRootGetAction("persistentvolumes", name), &api.PersistentVolume{}) + Invokes(core.NewRootGetAction(persistentvolumesResource, name), &api.PersistentVolume{}) if obj == nil { return nil, err } @@ -79,7 +82,7 @@ func (c *FakePersistentVolumes) Get(name string) (result *api.PersistentVolume, func (c *FakePersistentVolumes) List(opts api.ListOptions) (result *api.PersistentVolumeList, err error) { obj, err := c.Fake. - Invokes(core.NewRootListAction("persistentvolumes", opts), &api.PersistentVolumeList{}) + Invokes(core.NewRootListAction(persistentvolumesResource, opts), &api.PersistentVolumeList{}) if obj == nil { return nil, err } @@ -100,5 +103,5 @@ func (c *FakePersistentVolumes) List(opts api.ListOptions) (result *api.Persiste // Watch returns a watch.Interface that watches the requested persistentVolumes. func (c *FakePersistentVolumes) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewRootWatchAction("persistentvolumes", opts)) + InvokesWatch(core.NewRootWatchAction(persistentvolumesResource, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_persistentvolumeclaim.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_persistentvolumeclaim.go similarity index 73% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_persistentvolumeclaim.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_persistentvolumeclaim.go index ba674f269af4..bd10e834d3a2 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_persistentvolumeclaim.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_persistentvolumeclaim.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -29,9 +30,11 @@ type FakePersistentVolumeClaims struct { ns string } +var persistentvolumeclaimsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "persistentvolumeclaims"} + func (c *FakePersistentVolumeClaims) Create(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("persistentvolumeclaims", c.ns, persistentVolumeClaim), &api.PersistentVolumeClaim{}) + Invokes(core.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &api.PersistentVolumeClaim{}) if obj == nil { return nil, err @@ -41,7 +44,7 @@ func (c *FakePersistentVolumeClaims) Create(persistentVolumeClaim *api.Persisten func (c *FakePersistentVolumeClaims) Update(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("persistentvolumeclaims", c.ns, persistentVolumeClaim), &api.PersistentVolumeClaim{}) + Invokes(core.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &api.PersistentVolumeClaim{}) if obj == nil { return nil, err @@ -51,7 +54,7 @@ func (c *FakePersistentVolumeClaims) Update(persistentVolumeClaim *api.Persisten func (c *FakePersistentVolumeClaims) UpdateStatus(persistentVolumeClaim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) { obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction("persistentvolumeclaims", "status", c.ns, persistentVolumeClaim), &api.PersistentVolumeClaim{}) + Invokes(core.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &api.PersistentVolumeClaim{}) if obj == nil { return nil, err @@ -61,13 +64,13 @@ func (c *FakePersistentVolumeClaims) UpdateStatus(persistentVolumeClaim *api.Per func (c *FakePersistentVolumeClaims) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("persistentvolumeclaims", c.ns, name), &api.PersistentVolumeClaim{}) + Invokes(core.NewDeleteAction(persistentvolumeclaimsResource, c.ns, name), &api.PersistentVolumeClaim{}) return err } func (c *FakePersistentVolumeClaims) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("persistentvolumeclaims", c.ns, listOptions) + action := core.NewDeleteCollectionAction(persistentvolumeclaimsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &api.PersistentVolumeClaimList{}) return err @@ -75,7 +78,7 @@ func (c *FakePersistentVolumeClaims) DeleteCollection(options *api.DeleteOptions func (c *FakePersistentVolumeClaims) Get(name string) (result *api.PersistentVolumeClaim, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("persistentvolumeclaims", c.ns, name), &api.PersistentVolumeClaim{}) + Invokes(core.NewGetAction(persistentvolumeclaimsResource, c.ns, name), &api.PersistentVolumeClaim{}) if obj == nil { return nil, err @@ -85,7 +88,7 @@ func (c *FakePersistentVolumeClaims) Get(name string) (result *api.PersistentVol func (c *FakePersistentVolumeClaims) List(opts api.ListOptions) (result *api.PersistentVolumeClaimList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("persistentvolumeclaims", c.ns, opts), &api.PersistentVolumeClaimList{}) + Invokes(core.NewListAction(persistentvolumeclaimsResource, c.ns, opts), &api.PersistentVolumeClaimList{}) if obj == nil { return nil, err @@ -107,6 +110,6 @@ func (c *FakePersistentVolumeClaims) List(opts api.ListOptions) (result *api.Per // Watch returns a watch.Interface that watches the requested persistentVolumeClaims. func (c *FakePersistentVolumeClaims) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("persistentvolumeclaims", c.ns, opts)) + InvokesWatch(core.NewWatchAction(persistentvolumeclaimsResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_pod.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_pod.go similarity index 76% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_pod.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_pod.go index 6488c021d0a3..98c0b8e3f796 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_pod.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_pod.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -29,9 +30,11 @@ type FakePods struct { ns string } +var podsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "pods"} + func (c *FakePods) Create(pod *api.Pod) (result *api.Pod, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("pods", c.ns, pod), &api.Pod{}) + Invokes(core.NewCreateAction(podsResource, c.ns, pod), &api.Pod{}) if obj == nil { return nil, err @@ -41,7 +44,7 @@ func (c *FakePods) Create(pod *api.Pod) (result *api.Pod, err error) { func (c *FakePods) Update(pod *api.Pod) (result *api.Pod, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("pods", c.ns, pod), &api.Pod{}) + Invokes(core.NewUpdateAction(podsResource, c.ns, pod), &api.Pod{}) if obj == nil { return nil, err @@ -51,7 +54,7 @@ func (c *FakePods) Update(pod *api.Pod) (result *api.Pod, err error) { func (c *FakePods) UpdateStatus(pod *api.Pod) (*api.Pod, error) { obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction("pods", "status", c.ns, pod), &api.Pod{}) + Invokes(core.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &api.Pod{}) if obj == nil { return nil, err @@ -61,13 +64,13 @@ func (c *FakePods) UpdateStatus(pod *api.Pod) (*api.Pod, error) { func (c *FakePods) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("pods", c.ns, name), &api.Pod{}) + Invokes(core.NewDeleteAction(podsResource, c.ns, name), &api.Pod{}) return err } func (c *FakePods) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("pods", c.ns, listOptions) + action := core.NewDeleteCollectionAction(podsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &api.PodList{}) return err @@ -75,7 +78,7 @@ func (c *FakePods) DeleteCollection(options *api.DeleteOptions, listOptions api. func (c *FakePods) Get(name string) (result *api.Pod, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("pods", c.ns, name), &api.Pod{}) + Invokes(core.NewGetAction(podsResource, c.ns, name), &api.Pod{}) if obj == nil { return nil, err @@ -85,7 +88,7 @@ func (c *FakePods) Get(name string) (result *api.Pod, err error) { func (c *FakePods) List(opts api.ListOptions) (result *api.PodList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("pods", c.ns, opts), &api.PodList{}) + Invokes(core.NewListAction(podsResource, c.ns, opts), &api.PodList{}) if obj == nil { return nil, err @@ -107,6 +110,6 @@ func (c *FakePods) List(opts api.ListOptions) (result *api.PodList, err error) { // Watch returns a watch.Interface that watches the requested pods. func (c *FakePods) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("pods", c.ns, opts)) + InvokesWatch(core.NewWatchAction(podsResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_pod_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_pod_expansion.go similarity index 95% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_pod_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_pod_expansion.go index 53fe932212ab..32fd74c027ea 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_pod_expansion.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_pod_expansion.go @@ -25,7 +25,7 @@ import ( func (c *FakePods) Bind(binding *api.Binding) error { action := core.CreateActionImpl{} action.Verb = "create" - action.Resource = "pods" + action.Resource = podsResource action.Subresource = "bindings" action.Object = binding @@ -37,7 +37,7 @@ func (c *FakePods) GetLogs(name string, opts *api.PodLogOptions) *restclient.Req action := core.GenericActionImpl{} action.Verb = "get" action.Namespace = c.ns - action.Resource = "pod" + action.Resource = podsResource action.Subresource = "logs" action.Value = opts diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_podtemplate.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_podtemplate.go similarity index 75% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_podtemplate.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_podtemplate.go index b900a113c60e..c08d06bea0e7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_podtemplate.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_podtemplate.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -29,9 +30,11 @@ type FakePodTemplates struct { ns string } +var podtemplatesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "podtemplates"} + func (c *FakePodTemplates) Create(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("podtemplates", c.ns, podTemplate), &api.PodTemplate{}) + Invokes(core.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &api.PodTemplate{}) if obj == nil { return nil, err @@ -41,7 +44,7 @@ func (c *FakePodTemplates) Create(podTemplate *api.PodTemplate) (result *api.Pod func (c *FakePodTemplates) Update(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("podtemplates", c.ns, podTemplate), &api.PodTemplate{}) + Invokes(core.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &api.PodTemplate{}) if obj == nil { return nil, err @@ -51,13 +54,13 @@ func (c *FakePodTemplates) Update(podTemplate *api.PodTemplate) (result *api.Pod func (c *FakePodTemplates) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("podtemplates", c.ns, name), &api.PodTemplate{}) + Invokes(core.NewDeleteAction(podtemplatesResource, c.ns, name), &api.PodTemplate{}) return err } func (c *FakePodTemplates) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("podtemplates", c.ns, listOptions) + action := core.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &api.PodTemplateList{}) return err @@ -65,7 +68,7 @@ func (c *FakePodTemplates) DeleteCollection(options *api.DeleteOptions, listOpti func (c *FakePodTemplates) Get(name string) (result *api.PodTemplate, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("podtemplates", c.ns, name), &api.PodTemplate{}) + Invokes(core.NewGetAction(podtemplatesResource, c.ns, name), &api.PodTemplate{}) if obj == nil { return nil, err @@ -75,7 +78,7 @@ func (c *FakePodTemplates) Get(name string) (result *api.PodTemplate, err error) func (c *FakePodTemplates) List(opts api.ListOptions) (result *api.PodTemplateList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("podtemplates", c.ns, opts), &api.PodTemplateList{}) + Invokes(core.NewListAction(podtemplatesResource, c.ns, opts), &api.PodTemplateList{}) if obj == nil { return nil, err @@ -97,6 +100,6 @@ func (c *FakePodTemplates) List(opts api.ListOptions) (result *api.PodTemplateLi // Watch returns a watch.Interface that watches the requested podTemplates. func (c *FakePodTemplates) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("podtemplates", c.ns, opts)) + InvokesWatch(core.NewWatchAction(podtemplatesResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_replicationcontroller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_replicationcontroller.go similarity index 73% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_replicationcontroller.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_replicationcontroller.go index 205f094565aa..0a7faddbc440 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_replicationcontroller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_replicationcontroller.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -29,9 +30,11 @@ type FakeReplicationControllers struct { ns string } +var replicationcontrollersResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "replicationcontrollers"} + func (c *FakeReplicationControllers) Create(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("replicationcontrollers", c.ns, replicationController), &api.ReplicationController{}) + Invokes(core.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &api.ReplicationController{}) if obj == nil { return nil, err @@ -41,7 +44,7 @@ func (c *FakeReplicationControllers) Create(replicationController *api.Replicati func (c *FakeReplicationControllers) Update(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("replicationcontrollers", c.ns, replicationController), &api.ReplicationController{}) + Invokes(core.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &api.ReplicationController{}) if obj == nil { return nil, err @@ -51,7 +54,7 @@ func (c *FakeReplicationControllers) Update(replicationController *api.Replicati func (c *FakeReplicationControllers) UpdateStatus(replicationController *api.ReplicationController) (*api.ReplicationController, error) { obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction("replicationcontrollers", "status", c.ns, replicationController), &api.ReplicationController{}) + Invokes(core.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &api.ReplicationController{}) if obj == nil { return nil, err @@ -61,13 +64,13 @@ func (c *FakeReplicationControllers) UpdateStatus(replicationController *api.Rep func (c *FakeReplicationControllers) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("replicationcontrollers", c.ns, name), &api.ReplicationController{}) + Invokes(core.NewDeleteAction(replicationcontrollersResource, c.ns, name), &api.ReplicationController{}) return err } func (c *FakeReplicationControllers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("replicationcontrollers", c.ns, listOptions) + action := core.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &api.ReplicationControllerList{}) return err @@ -75,7 +78,7 @@ func (c *FakeReplicationControllers) DeleteCollection(options *api.DeleteOptions func (c *FakeReplicationControllers) Get(name string) (result *api.ReplicationController, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("replicationcontrollers", c.ns, name), &api.ReplicationController{}) + Invokes(core.NewGetAction(replicationcontrollersResource, c.ns, name), &api.ReplicationController{}) if obj == nil { return nil, err @@ -85,7 +88,7 @@ func (c *FakeReplicationControllers) Get(name string) (result *api.ReplicationCo func (c *FakeReplicationControllers) List(opts api.ListOptions) (result *api.ReplicationControllerList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("replicationcontrollers", c.ns, opts), &api.ReplicationControllerList{}) + Invokes(core.NewListAction(replicationcontrollersResource, c.ns, opts), &api.ReplicationControllerList{}) if obj == nil { return nil, err @@ -107,6 +110,6 @@ func (c *FakeReplicationControllers) List(opts api.ListOptions) (result *api.Rep // Watch returns a watch.Interface that watches the requested replicationControllers. func (c *FakeReplicationControllers) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("replicationcontrollers", c.ns, opts)) + InvokesWatch(core.NewWatchAction(replicationcontrollersResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_resourcequota.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_resourcequota.go similarity index 74% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_resourcequota.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_resourcequota.go index 056e61ed55e3..d91ee2685bb5 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_resourcequota.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_resourcequota.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -29,9 +30,11 @@ type FakeResourceQuotas struct { ns string } +var resourcequotasResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "resourcequotas"} + func (c *FakeResourceQuotas) Create(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("resourcequotas", c.ns, resourceQuota), &api.ResourceQuota{}) + Invokes(core.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &api.ResourceQuota{}) if obj == nil { return nil, err @@ -41,7 +44,7 @@ func (c *FakeResourceQuotas) Create(resourceQuota *api.ResourceQuota) (result *a func (c *FakeResourceQuotas) Update(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("resourcequotas", c.ns, resourceQuota), &api.ResourceQuota{}) + Invokes(core.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &api.ResourceQuota{}) if obj == nil { return nil, err @@ -51,7 +54,7 @@ func (c *FakeResourceQuotas) Update(resourceQuota *api.ResourceQuota) (result *a func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) { obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction("resourcequotas", "status", c.ns, resourceQuota), &api.ResourceQuota{}) + Invokes(core.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &api.ResourceQuota{}) if obj == nil { return nil, err @@ -61,13 +64,13 @@ func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *api.ResourceQuota) (*ap func (c *FakeResourceQuotas) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("resourcequotas", c.ns, name), &api.ResourceQuota{}) + Invokes(core.NewDeleteAction(resourcequotasResource, c.ns, name), &api.ResourceQuota{}) return err } func (c *FakeResourceQuotas) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("resourcequotas", c.ns, listOptions) + action := core.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &api.ResourceQuotaList{}) return err @@ -75,7 +78,7 @@ func (c *FakeResourceQuotas) DeleteCollection(options *api.DeleteOptions, listOp func (c *FakeResourceQuotas) Get(name string) (result *api.ResourceQuota, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("resourcequotas", c.ns, name), &api.ResourceQuota{}) + Invokes(core.NewGetAction(resourcequotasResource, c.ns, name), &api.ResourceQuota{}) if obj == nil { return nil, err @@ -85,7 +88,7 @@ func (c *FakeResourceQuotas) Get(name string) (result *api.ResourceQuota, err er func (c *FakeResourceQuotas) List(opts api.ListOptions) (result *api.ResourceQuotaList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("resourcequotas", c.ns, opts), &api.ResourceQuotaList{}) + Invokes(core.NewListAction(resourcequotasResource, c.ns, opts), &api.ResourceQuotaList{}) if obj == nil { return nil, err @@ -107,6 +110,6 @@ func (c *FakeResourceQuotas) List(opts api.ListOptions) (result *api.ResourceQuo // Watch returns a watch.Interface that watches the requested resourceQuotas. func (c *FakeResourceQuotas) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("resourcequotas", c.ns, opts)) + InvokesWatch(core.NewWatchAction(resourcequotasResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_secret.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_secret.go similarity index 76% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_secret.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_secret.go index 2f09be6e5e8b..0caaa47cf71c 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_secret.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_secret.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -29,9 +30,11 @@ type FakeSecrets struct { ns string } +var secretsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "secrets"} + func (c *FakeSecrets) Create(secret *api.Secret) (result *api.Secret, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("secrets", c.ns, secret), &api.Secret{}) + Invokes(core.NewCreateAction(secretsResource, c.ns, secret), &api.Secret{}) if obj == nil { return nil, err @@ -41,7 +44,7 @@ func (c *FakeSecrets) Create(secret *api.Secret) (result *api.Secret, err error) func (c *FakeSecrets) Update(secret *api.Secret) (result *api.Secret, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("secrets", c.ns, secret), &api.Secret{}) + Invokes(core.NewUpdateAction(secretsResource, c.ns, secret), &api.Secret{}) if obj == nil { return nil, err @@ -51,13 +54,13 @@ func (c *FakeSecrets) Update(secret *api.Secret) (result *api.Secret, err error) func (c *FakeSecrets) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("secrets", c.ns, name), &api.Secret{}) + Invokes(core.NewDeleteAction(secretsResource, c.ns, name), &api.Secret{}) return err } func (c *FakeSecrets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("secrets", c.ns, listOptions) + action := core.NewDeleteCollectionAction(secretsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &api.SecretList{}) return err @@ -65,7 +68,7 @@ func (c *FakeSecrets) DeleteCollection(options *api.DeleteOptions, listOptions a func (c *FakeSecrets) Get(name string) (result *api.Secret, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("secrets", c.ns, name), &api.Secret{}) + Invokes(core.NewGetAction(secretsResource, c.ns, name), &api.Secret{}) if obj == nil { return nil, err @@ -75,7 +78,7 @@ func (c *FakeSecrets) Get(name string) (result *api.Secret, err error) { func (c *FakeSecrets) List(opts api.ListOptions) (result *api.SecretList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("secrets", c.ns, opts), &api.SecretList{}) + Invokes(core.NewListAction(secretsResource, c.ns, opts), &api.SecretList{}) if obj == nil { return nil, err @@ -97,6 +100,6 @@ func (c *FakeSecrets) List(opts api.ListOptions) (result *api.SecretList, err er // Watch returns a watch.Interface that watches the requested secrets. func (c *FakeSecrets) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("secrets", c.ns, opts)) + InvokesWatch(core.NewWatchAction(secretsResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_service.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_service.go new file mode 100644 index 000000000000..62eae6481e54 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_service.go @@ -0,0 +1,115 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeServices implements ServiceInterface +type FakeServices struct { + Fake *FakeCore + ns string +} + +var servicesResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "services"} + +func (c *FakeServices) Create(service *api.Service) (result *api.Service, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(servicesResource, c.ns, service), &api.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*api.Service), err +} + +func (c *FakeServices) Update(service *api.Service) (result *api.Service, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(servicesResource, c.ns, service), &api.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*api.Service), err +} + +func (c *FakeServices) UpdateStatus(service *api.Service) (*api.Service, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &api.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*api.Service), err +} + +func (c *FakeServices) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(servicesResource, c.ns, name), &api.Service{}) + + return err +} + +func (c *FakeServices) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(servicesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &api.ServiceList{}) + return err +} + +func (c *FakeServices) Get(name string) (result *api.Service, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(servicesResource, c.ns, name), &api.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*api.Service), err +} + +func (c *FakeServices) List(opts api.ListOptions) (result *api.ServiceList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(servicesResource, c.ns, opts), &api.ServiceList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &api.ServiceList{} + for _, item := range obj.(*api.ServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *FakeServices) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(servicesResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_service_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_service_expansion.go similarity index 88% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_service_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_service_expansion.go index 18f1b7803db1..3494b873762e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_service_expansion.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_service_expansion.go @@ -22,5 +22,5 @@ import ( ) func (c *FakeServices) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - return c.Fake.InvokesProxy(core.NewProxyGetAction("services", c.ns, scheme, name, port, path, params)) + return c.Fake.InvokesProxy(core.NewProxyGetAction(servicesResource, c.ns, scheme, name, port, path, params)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_serviceaccount.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_serviceaccount.go similarity index 75% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_serviceaccount.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_serviceaccount.go index 61d7a04f5ebe..fbce964a1246 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/fake/fake_serviceaccount.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/fake/fake_serviceaccount.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -29,9 +30,11 @@ type FakeServiceAccounts struct { ns string } +var serviceaccountsResource = unversioned.GroupVersionResource{Group: "", Version: "", Resource: "serviceaccounts"} + func (c *FakeServiceAccounts) Create(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("serviceaccounts", c.ns, serviceAccount), &api.ServiceAccount{}) + Invokes(core.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &api.ServiceAccount{}) if obj == nil { return nil, err @@ -41,7 +44,7 @@ func (c *FakeServiceAccounts) Create(serviceAccount *api.ServiceAccount) (result func (c *FakeServiceAccounts) Update(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("serviceaccounts", c.ns, serviceAccount), &api.ServiceAccount{}) + Invokes(core.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &api.ServiceAccount{}) if obj == nil { return nil, err @@ -51,13 +54,13 @@ func (c *FakeServiceAccounts) Update(serviceAccount *api.ServiceAccount) (result func (c *FakeServiceAccounts) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("serviceaccounts", c.ns, name), &api.ServiceAccount{}) + Invokes(core.NewDeleteAction(serviceaccountsResource, c.ns, name), &api.ServiceAccount{}) return err } func (c *FakeServiceAccounts) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("serviceaccounts", c.ns, listOptions) + action := core.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &api.ServiceAccountList{}) return err @@ -65,7 +68,7 @@ func (c *FakeServiceAccounts) DeleteCollection(options *api.DeleteOptions, listO func (c *FakeServiceAccounts) Get(name string) (result *api.ServiceAccount, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("serviceaccounts", c.ns, name), &api.ServiceAccount{}) + Invokes(core.NewGetAction(serviceaccountsResource, c.ns, name), &api.ServiceAccount{}) if obj == nil { return nil, err @@ -75,7 +78,7 @@ func (c *FakeServiceAccounts) Get(name string) (result *api.ServiceAccount, err func (c *FakeServiceAccounts) List(opts api.ListOptions) (result *api.ServiceAccountList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("serviceaccounts", c.ns, opts), &api.ServiceAccountList{}) + Invokes(core.NewListAction(serviceaccountsResource, c.ns, opts), &api.ServiceAccountList{}) if obj == nil { return nil, err @@ -97,6 +100,6 @@ func (c *FakeServiceAccounts) List(opts api.ListOptions) (result *api.ServiceAcc // Watch returns a watch.Interface that watches the requested serviceAccounts. func (c *FakeServiceAccounts) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("serviceaccounts", c.ns, opts)) + InvokesWatch(core.NewWatchAction(serviceaccountsResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/generated_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/generated_expansion.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/generated_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/generated_expansion.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/limitrange.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/limitrange.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/limitrange.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/limitrange.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/namespace.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/namespace.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/namespace_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace_expansion.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/namespace_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace_expansion.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/node.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/node.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/persistentvolume.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolume.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/persistentvolume.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolume.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/persistentvolumeclaim.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolumeclaim.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/persistentvolumeclaim.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolumeclaim.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/pod.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/pod.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/pod_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod_expansion.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/pod_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod_expansion.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/podtemplate.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/podtemplate.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/podtemplate.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/podtemplate.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/replicationcontroller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/replicationcontroller.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/replicationcontroller.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/replicationcontroller.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/resourcequota.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/resourcequota.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/resourcequota.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/resourcequota.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/secret.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/secret.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/secret.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/secret.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service.go new file mode 100644 index 000000000000..006f601c271f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service.go @@ -0,0 +1,149 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ServicesGetter has a method to return a ServiceInterface. +// A group's client should implement this interface. +type ServicesGetter interface { + Services(namespace string) ServiceInterface +} + +// ServiceInterface has methods to work with Service resources. +type ServiceInterface interface { + Create(*api.Service) (*api.Service, error) + Update(*api.Service) (*api.Service, error) + UpdateStatus(*api.Service) (*api.Service, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.Service, error) + List(opts api.ListOptions) (*api.ServiceList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ServiceExpansion +} + +// services implements ServiceInterface +type services struct { + client *CoreClient + ns string +} + +// newServices returns a Services +func newServices(c *CoreClient, namespace string) *services { + return &services{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Create(service *api.Service) (result *api.Service, err error) { + result = &api.Service{} + err = c.client.Post(). + Namespace(c.ns). + Resource("services"). + Body(service). + Do(). + Into(result) + return +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Update(service *api.Service) (result *api.Service, err error) { + result = &api.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + Body(service). + Do(). + Into(result) + return +} + +func (c *services) UpdateStatus(service *api.Service) (result *api.Service, err error) { + result = &api.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + SubResource("status"). + Body(service). + Do(). + Into(result) + return +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *services) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *services) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *services) Get(name string) (result *api.Service, err error) { + result = &api.Service{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *services) List(opts api.ListOptions) (result *api.ServiceList, err error) { + result = &api.ServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *services) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/service_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service_expansion.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/service_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service_expansion.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/serviceaccount.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/serviceaccount.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/serviceaccount.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/serviceaccount.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/daemonset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/daemonset.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/daemonset.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/daemonset.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/deployment.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/deployment.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/deployment_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment_expansion.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/deployment_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment_expansion.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/doc.go new file mode 100644 index 000000000000..47517b6422dd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// This package has the automatically generated typed clients. +package unversioned diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go new file mode 100644 index 000000000000..9b9f4749a64d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go @@ -0,0 +1,131 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type ExtensionsInterface interface { + GetRESTClient() *restclient.RESTClient + DaemonSetsGetter + DeploymentsGetter + IngressesGetter + PodSecurityPoliciesGetter + ReplicaSetsGetter + ScalesGetter + ThirdPartyResourcesGetter +} + +// ExtensionsClient is used to interact with features provided by the Extensions group. +type ExtensionsClient struct { + *restclient.RESTClient +} + +func (c *ExtensionsClient) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *ExtensionsClient) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *ExtensionsClient) Ingresses(namespace string) IngressInterface { + return newIngresses(c, namespace) +} + +func (c *ExtensionsClient) PodSecurityPolicies() PodSecurityPolicyInterface { + return newPodSecurityPolicies(c) +} + +func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +func (c *ExtensionsClient) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + +func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface { + return newThirdPartyResources(c) +} + +// NewForConfig creates a new ExtensionsClient for the given config. +func NewForConfig(c *restclient.Config) (*ExtensionsClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ExtensionsClient{client}, nil +} + +// NewForConfigOrDie creates a new ExtensionsClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *ExtensionsClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ExtensionsClient for the given RESTClient. +func New(c *restclient.RESTClient) *ExtensionsClient { + return &ExtensionsClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if extensions group is not registered, return an error + g, err := registered.Group("extensions") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ExtensionsClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/doc.go new file mode 100644 index 000000000000..eb358c26c80f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_daemonset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_daemonset.go similarity index 75% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_daemonset.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_daemonset.go index 7de9f927f773..d53fa797aa6d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_daemonset.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_daemonset.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" extensions "k8s.io/kubernetes/pkg/apis/extensions" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" @@ -30,9 +31,11 @@ type FakeDaemonSets struct { ns string } +var daemonsetsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "daemonsets"} + func (c *FakeDaemonSets) Create(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("daemonsets", c.ns, daemonSet), &extensions.DaemonSet{}) + Invokes(core.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &extensions.DaemonSet{}) if obj == nil { return nil, err @@ -42,7 +45,7 @@ func (c *FakeDaemonSets) Create(daemonSet *extensions.DaemonSet) (result *extens func (c *FakeDaemonSets) Update(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("daemonsets", c.ns, daemonSet), &extensions.DaemonSet{}) + Invokes(core.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &extensions.DaemonSet{}) if obj == nil { return nil, err @@ -52,7 +55,7 @@ func (c *FakeDaemonSets) Update(daemonSet *extensions.DaemonSet) (result *extens func (c *FakeDaemonSets) UpdateStatus(daemonSet *extensions.DaemonSet) (*extensions.DaemonSet, error) { obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction("daemonsets", "status", c.ns, daemonSet), &extensions.DaemonSet{}) + Invokes(core.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &extensions.DaemonSet{}) if obj == nil { return nil, err @@ -62,13 +65,13 @@ func (c *FakeDaemonSets) UpdateStatus(daemonSet *extensions.DaemonSet) (*extensi func (c *FakeDaemonSets) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("daemonsets", c.ns, name), &extensions.DaemonSet{}) + Invokes(core.NewDeleteAction(daemonsetsResource, c.ns, name), &extensions.DaemonSet{}) return err } func (c *FakeDaemonSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("daemonsets", c.ns, listOptions) + action := core.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &extensions.DaemonSetList{}) return err @@ -76,7 +79,7 @@ func (c *FakeDaemonSets) DeleteCollection(options *api.DeleteOptions, listOption func (c *FakeDaemonSets) Get(name string) (result *extensions.DaemonSet, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("daemonsets", c.ns, name), &extensions.DaemonSet{}) + Invokes(core.NewGetAction(daemonsetsResource, c.ns, name), &extensions.DaemonSet{}) if obj == nil { return nil, err @@ -86,7 +89,7 @@ func (c *FakeDaemonSets) Get(name string) (result *extensions.DaemonSet, err err func (c *FakeDaemonSets) List(opts api.ListOptions) (result *extensions.DaemonSetList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("daemonsets", c.ns, opts), &extensions.DaemonSetList{}) + Invokes(core.NewListAction(daemonsetsResource, c.ns, opts), &extensions.DaemonSetList{}) if obj == nil { return nil, err @@ -108,6 +111,6 @@ func (c *FakeDaemonSets) List(opts api.ListOptions) (result *extensions.DaemonSe // Watch returns a watch.Interface that watches the requested daemonSets. func (c *FakeDaemonSets) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("daemonsets", c.ns, opts)) + InvokesWatch(core.NewWatchAction(daemonsetsResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_deployment.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_deployment.go similarity index 75% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_deployment.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_deployment.go index 748968a9dbf5..c70a3a81d5f9 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_deployment.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_deployment.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" extensions "k8s.io/kubernetes/pkg/apis/extensions" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" @@ -30,9 +31,11 @@ type FakeDeployments struct { ns string } +var deploymentsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "deployments"} + func (c *FakeDeployments) Create(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("deployments", c.ns, deployment), &extensions.Deployment{}) + Invokes(core.NewCreateAction(deploymentsResource, c.ns, deployment), &extensions.Deployment{}) if obj == nil { return nil, err @@ -42,7 +45,7 @@ func (c *FakeDeployments) Create(deployment *extensions.Deployment) (result *ext func (c *FakeDeployments) Update(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("deployments", c.ns, deployment), &extensions.Deployment{}) + Invokes(core.NewUpdateAction(deploymentsResource, c.ns, deployment), &extensions.Deployment{}) if obj == nil { return nil, err @@ -52,7 +55,7 @@ func (c *FakeDeployments) Update(deployment *extensions.Deployment) (result *ext func (c *FakeDeployments) UpdateStatus(deployment *extensions.Deployment) (*extensions.Deployment, error) { obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction("deployments", "status", c.ns, deployment), &extensions.Deployment{}) + Invokes(core.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &extensions.Deployment{}) if obj == nil { return nil, err @@ -62,13 +65,13 @@ func (c *FakeDeployments) UpdateStatus(deployment *extensions.Deployment) (*exte func (c *FakeDeployments) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("deployments", c.ns, name), &extensions.Deployment{}) + Invokes(core.NewDeleteAction(deploymentsResource, c.ns, name), &extensions.Deployment{}) return err } func (c *FakeDeployments) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("deployments", c.ns, listOptions) + action := core.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &extensions.DeploymentList{}) return err @@ -76,7 +79,7 @@ func (c *FakeDeployments) DeleteCollection(options *api.DeleteOptions, listOptio func (c *FakeDeployments) Get(name string) (result *extensions.Deployment, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("deployments", c.ns, name), &extensions.Deployment{}) + Invokes(core.NewGetAction(deploymentsResource, c.ns, name), &extensions.Deployment{}) if obj == nil { return nil, err @@ -86,7 +89,7 @@ func (c *FakeDeployments) Get(name string) (result *extensions.Deployment, err e func (c *FakeDeployments) List(opts api.ListOptions) (result *extensions.DeploymentList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("deployments", c.ns, opts), &extensions.DeploymentList{}) + Invokes(core.NewListAction(deploymentsResource, c.ns, opts), &extensions.DeploymentList{}) if obj == nil { return nil, err @@ -108,6 +111,6 @@ func (c *FakeDeployments) List(opts api.ListOptions) (result *extensions.Deploym // Watch returns a watch.Interface that watches the requested deployments. func (c *FakeDeployments) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("deployments", c.ns, opts)) + InvokesWatch(core.NewWatchAction(deploymentsResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_deployment_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_deployment_expansion.go similarity index 96% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_deployment_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_deployment_expansion.go index 3edc64c010bc..3a7c06e50546 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_deployment_expansion.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_deployment_expansion.go @@ -24,7 +24,7 @@ import ( func (c *FakeDeployments) Rollback(deploymentRollback *extensions.DeploymentRollback) error { action := core.CreateActionImpl{} action.Verb = "create" - action.Resource = "deployments" + action.Resource = deploymentsResource action.Subresource = "rollback" action.Object = deploymentRollback diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_extensions_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_extensions_client.go similarity index 68% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_extensions_client.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_extensions_client.go index 2f7fb0423854..7c2fa08518a2 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_extensions_client.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_extensions_client.go @@ -17,8 +17,9 @@ limitations under the License. package fake import ( + unversioned "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" + restclient "k8s.io/kubernetes/pkg/client/restclient" core "k8s.io/kubernetes/pkg/client/testing/core" - unversioned "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned" ) type FakeExtensions struct { @@ -33,16 +34,12 @@ func (c *FakeExtensions) Deployments(namespace string) unversioned.DeploymentInt return &FakeDeployments{c, namespace} } -func (c *FakeExtensions) HorizontalPodAutoscalers(namespace string) unversioned.HorizontalPodAutoscalerInterface { - return &FakeHorizontalPodAutoscalers{c, namespace} -} - func (c *FakeExtensions) Ingresses(namespace string) unversioned.IngressInterface { return &FakeIngresses{c, namespace} } -func (c *FakeExtensions) Jobs(namespace string) unversioned.JobInterface { - return &FakeJobs{c, namespace} +func (c *FakeExtensions) PodSecurityPolicies() unversioned.PodSecurityPolicyInterface { + return &FakePodSecurityPolicies{c} } func (c *FakeExtensions) ReplicaSets(namespace string) unversioned.ReplicaSetInterface { @@ -53,6 +50,12 @@ func (c *FakeExtensions) Scales(namespace string) unversioned.ScaleInterface { return &FakeScales{c, namespace} } -func (c *FakeExtensions) ThirdPartyResources(namespace string) unversioned.ThirdPartyResourceInterface { - return &FakeThirdPartyResources{c, namespace} +func (c *FakeExtensions) ThirdPartyResources() unversioned.ThirdPartyResourceInterface { + return &FakeThirdPartyResources{c} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeExtensions) GetRESTClient() *restclient.RESTClient { + return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_ingress.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_ingress.go similarity index 76% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_ingress.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_ingress.go index a331644e47c0..68578ce120bf 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_ingress.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_ingress.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" extensions "k8s.io/kubernetes/pkg/apis/extensions" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" @@ -30,9 +31,11 @@ type FakeIngresses struct { ns string } +var ingressesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "ingresses"} + func (c *FakeIngresses) Create(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("ingresses", c.ns, ingress), &extensions.Ingress{}) + Invokes(core.NewCreateAction(ingressesResource, c.ns, ingress), &extensions.Ingress{}) if obj == nil { return nil, err @@ -42,7 +45,7 @@ func (c *FakeIngresses) Create(ingress *extensions.Ingress) (result *extensions. func (c *FakeIngresses) Update(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("ingresses", c.ns, ingress), &extensions.Ingress{}) + Invokes(core.NewUpdateAction(ingressesResource, c.ns, ingress), &extensions.Ingress{}) if obj == nil { return nil, err @@ -52,7 +55,7 @@ func (c *FakeIngresses) Update(ingress *extensions.Ingress) (result *extensions. func (c *FakeIngresses) UpdateStatus(ingress *extensions.Ingress) (*extensions.Ingress, error) { obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction("ingresses", "status", c.ns, ingress), &extensions.Ingress{}) + Invokes(core.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &extensions.Ingress{}) if obj == nil { return nil, err @@ -62,13 +65,13 @@ func (c *FakeIngresses) UpdateStatus(ingress *extensions.Ingress) (*extensions.I func (c *FakeIngresses) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("ingresses", c.ns, name), &extensions.Ingress{}) + Invokes(core.NewDeleteAction(ingressesResource, c.ns, name), &extensions.Ingress{}) return err } func (c *FakeIngresses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("ingresses", c.ns, listOptions) + action := core.NewDeleteCollectionAction(ingressesResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &extensions.IngressList{}) return err @@ -76,7 +79,7 @@ func (c *FakeIngresses) DeleteCollection(options *api.DeleteOptions, listOptions func (c *FakeIngresses) Get(name string) (result *extensions.Ingress, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("ingresses", c.ns, name), &extensions.Ingress{}) + Invokes(core.NewGetAction(ingressesResource, c.ns, name), &extensions.Ingress{}) if obj == nil { return nil, err @@ -86,7 +89,7 @@ func (c *FakeIngresses) Get(name string) (result *extensions.Ingress, err error) func (c *FakeIngresses) List(opts api.ListOptions) (result *extensions.IngressList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("ingresses", c.ns, opts), &extensions.IngressList{}) + Invokes(core.NewListAction(ingressesResource, c.ns, opts), &extensions.IngressList{}) if obj == nil { return nil, err @@ -108,6 +111,6 @@ func (c *FakeIngresses) List(opts api.ListOptions) (result *extensions.IngressLi // Watch returns a watch.Interface that watches the requested ingresses. func (c *FakeIngresses) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("ingresses", c.ns, opts)) + InvokesWatch(core.NewWatchAction(ingressesResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_podsecuritypolicy.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_podsecuritypolicy.go new file mode 100644 index 000000000000..52b6f3acd16a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_podsecuritypolicy.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + extensions "k8s.io/kubernetes/pkg/apis/extensions" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakePodSecurityPolicies implements PodSecurityPolicyInterface +type FakePodSecurityPolicies struct { + Fake *FakeExtensions +} + +var podsecuritypoliciesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "podsecuritypolicies"} + +func (c *FakePodSecurityPolicies) Create(podSecurityPolicy *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &extensions.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.PodSecurityPolicy), err +} + +func (c *FakePodSecurityPolicies) Update(podSecurityPolicy *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &extensions.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.PodSecurityPolicy), err +} + +func (c *FakePodSecurityPolicies) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(podsecuritypoliciesResource, name), &extensions.PodSecurityPolicy{}) + return err +} + +func (c *FakePodSecurityPolicies) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOptions) + + _, err := c.Fake.Invokes(action, &extensions.PodSecurityPolicyList{}) + return err +} + +func (c *FakePodSecurityPolicies) Get(name string) (result *extensions.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(podsecuritypoliciesResource, name), &extensions.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.PodSecurityPolicy), err +} + +func (c *FakePodSecurityPolicies) List(opts api.ListOptions) (result *extensions.PodSecurityPolicyList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(podsecuritypoliciesResource, opts), &extensions.PodSecurityPolicyList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &extensions.PodSecurityPolicyList{} + for _, item := range obj.(*extensions.PodSecurityPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *FakePodSecurityPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(podsecuritypoliciesResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_replicaset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_replicaset.go similarity index 75% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_replicaset.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_replicaset.go index d861326b7c9d..9d7241ca49a4 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_replicaset.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_replicaset.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" extensions "k8s.io/kubernetes/pkg/apis/extensions" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" @@ -30,9 +31,11 @@ type FakeReplicaSets struct { ns string } +var replicasetsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "replicasets"} + func (c *FakeReplicaSets) Create(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("replicasets", c.ns, replicaSet), &extensions.ReplicaSet{}) + Invokes(core.NewCreateAction(replicasetsResource, c.ns, replicaSet), &extensions.ReplicaSet{}) if obj == nil { return nil, err @@ -42,7 +45,7 @@ func (c *FakeReplicaSets) Create(replicaSet *extensions.ReplicaSet) (result *ext func (c *FakeReplicaSets) Update(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("replicasets", c.ns, replicaSet), &extensions.ReplicaSet{}) + Invokes(core.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &extensions.ReplicaSet{}) if obj == nil { return nil, err @@ -52,7 +55,7 @@ func (c *FakeReplicaSets) Update(replicaSet *extensions.ReplicaSet) (result *ext func (c *FakeReplicaSets) UpdateStatus(replicaSet *extensions.ReplicaSet) (*extensions.ReplicaSet, error) { obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction("replicasets", "status", c.ns, replicaSet), &extensions.ReplicaSet{}) + Invokes(core.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &extensions.ReplicaSet{}) if obj == nil { return nil, err @@ -62,13 +65,13 @@ func (c *FakeReplicaSets) UpdateStatus(replicaSet *extensions.ReplicaSet) (*exte func (c *FakeReplicaSets) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("replicasets", c.ns, name), &extensions.ReplicaSet{}) + Invokes(core.NewDeleteAction(replicasetsResource, c.ns, name), &extensions.ReplicaSet{}) return err } func (c *FakeReplicaSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("replicasets", c.ns, listOptions) + action := core.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions) _, err := c.Fake.Invokes(action, &extensions.ReplicaSetList{}) return err @@ -76,7 +79,7 @@ func (c *FakeReplicaSets) DeleteCollection(options *api.DeleteOptions, listOptio func (c *FakeReplicaSets) Get(name string) (result *extensions.ReplicaSet, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("replicasets", c.ns, name), &extensions.ReplicaSet{}) + Invokes(core.NewGetAction(replicasetsResource, c.ns, name), &extensions.ReplicaSet{}) if obj == nil { return nil, err @@ -86,7 +89,7 @@ func (c *FakeReplicaSets) Get(name string) (result *extensions.ReplicaSet, err e func (c *FakeReplicaSets) List(opts api.ListOptions) (result *extensions.ReplicaSetList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("replicasets", c.ns, opts), &extensions.ReplicaSetList{}) + Invokes(core.NewListAction(replicasetsResource, c.ns, opts), &extensions.ReplicaSetList{}) if obj == nil { return nil, err @@ -108,6 +111,6 @@ func (c *FakeReplicaSets) List(opts api.ListOptions) (result *extensions.Replica // Watch returns a watch.Interface that watches the requested replicaSets. func (c *FakeReplicaSets) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("replicasets", c.ns, opts)) + InvokesWatch(core.NewWatchAction(replicasetsResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_scale.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_scale.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_scale.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_scale.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_scale_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_scale_expansion.go similarity index 88% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_scale_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_scale_expansion.go index 8c52e409b454..949836afe546 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_scale_expansion.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_scale_expansion.go @@ -17,6 +17,7 @@ limitations under the License. package fake import ( + "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/testing/core" ) @@ -25,7 +26,7 @@ func (c *FakeScales) Get(kind string, name string) (result *extensions.Scale, er action := core.GetActionImpl{} action.Verb = "get" action.Namespace = c.ns - action.Resource = kind + action.Resource = unversioned.GroupVersionResource{Resource: kind} action.Subresource = "scale" action.Name = name obj, err := c.Fake.Invokes(action, &extensions.Scale{}) @@ -37,7 +38,7 @@ func (c *FakeScales) Update(kind string, scale *extensions.Scale) (result *exten action := core.UpdateActionImpl{} action.Verb = "update" action.Namespace = c.ns - action.Resource = kind + action.Resource = unversioned.GroupVersionResource{Resource: kind} action.Subresource = "scale" action.Object = scale obj, err := c.Fake.Invokes(action, scale) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_thirdpartyresource.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_thirdpartyresource.go similarity index 74% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_thirdpartyresource.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_thirdpartyresource.go index 9a005d570890..37cbca979ca1 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_thirdpartyresource.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/fake/fake_thirdpartyresource.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" extensions "k8s.io/kubernetes/pkg/apis/extensions" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" @@ -27,13 +28,13 @@ import ( // FakeThirdPartyResources implements ThirdPartyResourceInterface type FakeThirdPartyResources struct { Fake *FakeExtensions - ns string } +var thirdpartyresourcesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "", Resource: "thirdpartyresources"} + func (c *FakeThirdPartyResources) Create(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("thirdpartyresources", c.ns, thirdPartyResource), &extensions.ThirdPartyResource{}) - + Invokes(core.NewRootCreateAction(thirdpartyresourcesResource, thirdPartyResource), &extensions.ThirdPartyResource{}) if obj == nil { return nil, err } @@ -42,8 +43,7 @@ func (c *FakeThirdPartyResources) Create(thirdPartyResource *extensions.ThirdPar func (c *FakeThirdPartyResources) Update(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("thirdpartyresources", c.ns, thirdPartyResource), &extensions.ThirdPartyResource{}) - + Invokes(core.NewRootUpdateAction(thirdpartyresourcesResource, thirdPartyResource), &extensions.ThirdPartyResource{}) if obj == nil { return nil, err } @@ -52,13 +52,12 @@ func (c *FakeThirdPartyResources) Update(thirdPartyResource *extensions.ThirdPar func (c *FakeThirdPartyResources) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("thirdpartyresources", c.ns, name), &extensions.ThirdPartyResource{}) - + Invokes(core.NewRootDeleteAction(thirdpartyresourcesResource, name), &extensions.ThirdPartyResource{}) return err } func (c *FakeThirdPartyResources) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("thirdpartyresources", c.ns, listOptions) + action := core.NewRootDeleteCollectionAction(thirdpartyresourcesResource, listOptions) _, err := c.Fake.Invokes(action, &extensions.ThirdPartyResourceList{}) return err @@ -66,8 +65,7 @@ func (c *FakeThirdPartyResources) DeleteCollection(options *api.DeleteOptions, l func (c *FakeThirdPartyResources) Get(name string) (result *extensions.ThirdPartyResource, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("thirdpartyresources", c.ns, name), &extensions.ThirdPartyResource{}) - + Invokes(core.NewRootGetAction(thirdpartyresourcesResource, name), &extensions.ThirdPartyResource{}) if obj == nil { return nil, err } @@ -76,8 +74,7 @@ func (c *FakeThirdPartyResources) Get(name string) (result *extensions.ThirdPart func (c *FakeThirdPartyResources) List(opts api.ListOptions) (result *extensions.ThirdPartyResourceList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("thirdpartyresources", c.ns, opts), &extensions.ThirdPartyResourceList{}) - + Invokes(core.NewRootListAction(thirdpartyresourcesResource, opts), &extensions.ThirdPartyResourceList{}) if obj == nil { return nil, err } @@ -98,6 +95,5 @@ func (c *FakeThirdPartyResources) List(opts api.ListOptions) (result *extensions // Watch returns a watch.Interface that watches the requested thirdPartyResources. func (c *FakeThirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("thirdpartyresources", c.ns, opts)) - + InvokesWatch(core.NewRootWatchAction(thirdpartyresourcesResource, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go new file mode 100644 index 000000000000..7a1999454e6e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +type DaemonSetExpansion interface{} + +type HorizontalPodAutoscalerExpansion interface{} + +type IngressExpansion interface{} + +type JobExpansion interface{} + +type PodSecurityPolicyExpansion interface{} + +type ThirdPartyResourceExpansion interface{} + +type ReplicaSetExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/ingress.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/ingress.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/ingress.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/ingress.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/job.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/job.go similarity index 79% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/job.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/job.go index 04d0d1282355..4ae3f6cac557 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/job.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/job.go @@ -18,7 +18,7 @@ package unversioned import ( api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" + batch "k8s.io/kubernetes/pkg/apis/batch" watch "k8s.io/kubernetes/pkg/watch" ) @@ -30,13 +30,13 @@ type JobsGetter interface { // JobInterface has methods to work with Job resources. type JobInterface interface { - Create(*extensions.Job) (*extensions.Job, error) - Update(*extensions.Job) (*extensions.Job, error) - UpdateStatus(*extensions.Job) (*extensions.Job, error) + Create(*batch.Job) (*batch.Job, error) + Update(*batch.Job) (*batch.Job, error) + UpdateStatus(*batch.Job) (*batch.Job, error) Delete(name string, options *api.DeleteOptions) error DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.Job, error) - List(opts api.ListOptions) (*extensions.JobList, error) + Get(name string) (*batch.Job, error) + List(opts api.ListOptions) (*batch.JobList, error) Watch(opts api.ListOptions) (watch.Interface, error) JobExpansion } @@ -56,8 +56,8 @@ func newJobs(c *ExtensionsClient, namespace string) *jobs { } // Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Create(job *extensions.Job) (result *extensions.Job, err error) { - result = &extensions.Job{} +func (c *jobs) Create(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} err = c.client.Post(). Namespace(c.ns). Resource("jobs"). @@ -68,8 +68,8 @@ func (c *jobs) Create(job *extensions.Job) (result *extensions.Job, err error) { } // Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Update(job *extensions.Job) (result *extensions.Job, err error) { - result = &extensions.Job{} +func (c *jobs) Update(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} err = c.client.Put(). Namespace(c.ns). Resource("jobs"). @@ -80,8 +80,8 @@ func (c *jobs) Update(job *extensions.Job) (result *extensions.Job, err error) { return } -func (c *jobs) UpdateStatus(job *extensions.Job) (result *extensions.Job, err error) { - result = &extensions.Job{} +func (c *jobs) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} err = c.client.Put(). Namespace(c.ns). Resource("jobs"). @@ -116,8 +116,8 @@ func (c *jobs) DeleteCollection(options *api.DeleteOptions, listOptions api.List } // Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *jobs) Get(name string) (result *extensions.Job, err error) { - result = &extensions.Job{} +func (c *jobs) Get(name string) (result *batch.Job, err error) { + result = &batch.Job{} err = c.client.Get(). Namespace(c.ns). Resource("jobs"). @@ -128,8 +128,8 @@ func (c *jobs) Get(name string) (result *extensions.Job, err error) { } // List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *jobs) List(opts api.ListOptions) (result *extensions.JobList, err error) { - result = &extensions.JobList{} +func (c *jobs) List(opts api.ListOptions) (result *batch.JobList, err error) { + result = &batch.JobList{} err = c.client.Get(). Namespace(c.ns). Resource("jobs"). diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/podsecuritypolicy.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/podsecuritypolicy.go new file mode 100644 index 000000000000..06a7908f4675 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/podsecuritypolicy.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + extensions "k8s.io/kubernetes/pkg/apis/extensions" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. +// A group's client should implement this interface. +type PodSecurityPoliciesGetter interface { + PodSecurityPolicies() PodSecurityPolicyInterface +} + +// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. +type PodSecurityPolicyInterface interface { + Create(*extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) + Update(*extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*extensions.PodSecurityPolicy, error) + List(opts api.ListOptions) (*extensions.PodSecurityPolicyList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + PodSecurityPolicyExpansion +} + +// podSecurityPolicies implements PodSecurityPolicyInterface +type podSecurityPolicies struct { + client *ExtensionsClient +} + +// newPodSecurityPolicies returns a PodSecurityPolicies +func newPodSecurityPolicies(c *ExtensionsClient) *podSecurityPolicies { + return &podSecurityPolicies{ + client: c, + } +} + +// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Create(podSecurityPolicy *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { + result = &extensions.PodSecurityPolicy{} + err = c.client.Post(). + Resource("podsecuritypolicies"). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Update(podSecurityPolicy *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { + result = &extensions.PodSecurityPolicy{} + err = c.client.Put(). + Resource("podsecuritypolicies"). + Name(podSecurityPolicy.Name). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. +func (c *podSecurityPolicies) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSecurityPolicies) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. +func (c *podSecurityPolicies) Get(name string) (result *extensions.PodSecurityPolicy, err error) { + result = &extensions.PodSecurityPolicy{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. +func (c *podSecurityPolicies) List(opts api.ListOptions) (result *extensions.PodSecurityPolicyList, err error) { + result = &extensions.PodSecurityPolicyList{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *podSecurityPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("podsecuritypolicies"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/replicaset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/replicaset.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/replicaset.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/replicaset.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/scale.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/scale.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/scale_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale_expansion.go similarity index 100% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/scale_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale_expansion.go diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/thirdpartyresource.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/thirdpartyresource.go similarity index 93% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/thirdpartyresource.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/thirdpartyresource.go index 0f1026fab524..a64ffb62c426 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/thirdpartyresource.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/thirdpartyresource.go @@ -25,7 +25,7 @@ import ( // ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. // A group's client should implement this interface. type ThirdPartyResourcesGetter interface { - ThirdPartyResources(namespace string) ThirdPartyResourceInterface + ThirdPartyResources() ThirdPartyResourceInterface } // ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. @@ -43,14 +43,12 @@ type ThirdPartyResourceInterface interface { // thirdPartyResources implements ThirdPartyResourceInterface type thirdPartyResources struct { client *ExtensionsClient - ns string } // newThirdPartyResources returns a ThirdPartyResources -func newThirdPartyResources(c *ExtensionsClient, namespace string) *thirdPartyResources { +func newThirdPartyResources(c *ExtensionsClient) *thirdPartyResources { return &thirdPartyResources{ client: c, - ns: namespace, } } @@ -58,7 +56,6 @@ func newThirdPartyResources(c *ExtensionsClient, namespace string) *thirdPartyRe func (c *thirdPartyResources) Create(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { result = &extensions.ThirdPartyResource{} err = c.client.Post(). - Namespace(c.ns). Resource("thirdpartyresources"). Body(thirdPartyResource). Do(). @@ -70,7 +67,6 @@ func (c *thirdPartyResources) Create(thirdPartyResource *extensions.ThirdPartyRe func (c *thirdPartyResources) Update(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { result = &extensions.ThirdPartyResource{} err = c.client.Put(). - Namespace(c.ns). Resource("thirdpartyresources"). Name(thirdPartyResource.Name). Body(thirdPartyResource). @@ -82,7 +78,6 @@ func (c *thirdPartyResources) Update(thirdPartyResource *extensions.ThirdPartyRe // Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. func (c *thirdPartyResources) Delete(name string, options *api.DeleteOptions) error { return c.client.Delete(). - Namespace(c.ns). Resource("thirdpartyresources"). Name(name). Body(options). @@ -93,7 +88,6 @@ func (c *thirdPartyResources) Delete(name string, options *api.DeleteOptions) er // DeleteCollection deletes a collection of objects. func (c *thirdPartyResources) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { return c.client.Delete(). - Namespace(c.ns). Resource("thirdpartyresources"). VersionedParams(&listOptions, api.ParameterCodec). Body(options). @@ -105,7 +99,6 @@ func (c *thirdPartyResources) DeleteCollection(options *api.DeleteOptions, listO func (c *thirdPartyResources) Get(name string) (result *extensions.ThirdPartyResource, err error) { result = &extensions.ThirdPartyResource{} err = c.client.Get(). - Namespace(c.ns). Resource("thirdpartyresources"). Name(name). Do(). @@ -117,7 +110,6 @@ func (c *thirdPartyResources) Get(name string) (result *extensions.ThirdPartyRes func (c *thirdPartyResources) List(opts api.ListOptions) (result *extensions.ThirdPartyResourceList, err error) { result = &extensions.ThirdPartyResourceList{} err = c.client.Get(). - Namespace(c.ns). Resource("thirdpartyresources"). VersionedParams(&opts, api.ParameterCodec). Do(). @@ -129,7 +121,6 @@ func (c *thirdPartyResources) List(opts api.ListOptions) (result *extensions.Thi func (c *thirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { return c.client.Get(). Prefix("watch"). - Namespace(c.ns). Resource("thirdpartyresources"). VersionedParams(&opts, api.ParameterCodec). Watch() diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrole.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrole.go new file mode 100644 index 000000000000..5d0b3912a53f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrole.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(*rbac.ClusterRole) (*rbac.ClusterRole, error) + Update(*rbac.ClusterRole) (*rbac.ClusterRole, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*rbac.ClusterRole, error) + List(opts api.ListOptions) (*rbac.ClusterRoleList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + client *RbacClient +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *RbacClient) *clusterRoles { + return &clusterRoles{ + client: c, + } +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Create(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { + result = &rbac.ClusterRole{} + err = c.client.Post(). + Resource("clusterroles"). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Update(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { + result = &rbac.ClusterRole{} + err = c.client.Put(). + Resource("clusterroles"). + Name(clusterRole.Name). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string) (result *rbac.ClusterRole, err error) { + result = &rbac.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts api.ListOptions) (result *rbac.ClusterRoleList, err error) { + result = &rbac.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("clusterroles"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrolebinding.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrolebinding.go new file mode 100644 index 000000000000..f2102592afcd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrolebinding.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(*rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) + Update(*rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*rbac.ClusterRoleBinding, error) + List(opts api.ListOptions) (*rbac.ClusterRoleBindingList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + client *RbacClient +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *RbacClient) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c, + } +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Create(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { + result = &rbac.ClusterRoleBinding{} + err = c.client.Post(). + Resource("clusterrolebindings"). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Update(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { + result = &rbac.ClusterRoleBinding{} + err = c.client.Put(). + Resource("clusterrolebindings"). + Name(clusterRoleBinding.Name). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string) (result *rbac.ClusterRoleBinding, err error) { + result = &rbac.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts api.ListOptions) (result *rbac.ClusterRoleBindingList, err error) { + result = &rbac.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("clusterrolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/doc.go new file mode 100644 index 000000000000..47517b6422dd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// This package has the automatically generated typed clients. +package unversioned diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/doc.go new file mode 100644 index 000000000000..eb358c26c80f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// Package fake has the automatically generated clients. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_clusterrole.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_clusterrole.go new file mode 100644 index 000000000000..c86ec5f705cc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_clusterrole.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeClusterRoles implements ClusterRoleInterface +type FakeClusterRoles struct { + Fake *FakeRbac +} + +var clusterrolesResource = unversioned.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "", Resource: "clusterroles"} + +func (c *FakeClusterRoles) Create(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(clusterrolesResource, clusterRole), &rbac.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*rbac.ClusterRole), err +} + +func (c *FakeClusterRoles) Update(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(clusterrolesResource, clusterRole), &rbac.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*rbac.ClusterRole), err +} + +func (c *FakeClusterRoles) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(clusterrolesResource, name), &rbac.ClusterRole{}) + return err +} + +func (c *FakeClusterRoles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(clusterrolesResource, listOptions) + + _, err := c.Fake.Invokes(action, &rbac.ClusterRoleList{}) + return err +} + +func (c *FakeClusterRoles) Get(name string) (result *rbac.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(clusterrolesResource, name), &rbac.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*rbac.ClusterRole), err +} + +func (c *FakeClusterRoles) List(opts api.ListOptions) (result *rbac.ClusterRoleList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(clusterrolesResource, opts), &rbac.ClusterRoleList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &rbac.ClusterRoleList{} + for _, item := range obj.(*rbac.ClusterRoleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *FakeClusterRoles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(clusterrolesResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_clusterrolebinding.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_clusterrolebinding.go new file mode 100644 index 000000000000..bf16661b1721 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_clusterrolebinding.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeClusterRoleBindings implements ClusterRoleBindingInterface +type FakeClusterRoleBindings struct { + Fake *FakeRbac +} + +var clusterrolebindingsResource = unversioned.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "", Resource: "clusterrolebindings"} + +func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &rbac.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*rbac.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &rbac.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*rbac.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(clusterrolebindingsResource, name), &rbac.ClusterRoleBinding{}) + return err +} + +func (c *FakeClusterRoleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions) + + _, err := c.Fake.Invokes(action, &rbac.ClusterRoleBindingList{}) + return err +} + +func (c *FakeClusterRoleBindings) Get(name string) (result *rbac.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(clusterrolebindingsResource, name), &rbac.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*rbac.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) List(opts api.ListOptions) (result *rbac.ClusterRoleBindingList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(clusterrolebindingsResource, opts), &rbac.ClusterRoleBindingList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &rbac.ClusterRoleBindingList{} + for _, item := range obj.(*rbac.ClusterRoleBindingList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *FakeClusterRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(clusterrolebindingsResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_rbac_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_rbac_client.go new file mode 100644 index 000000000000..5c4951fe1829 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_rbac_client.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + unversioned "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeRbac struct { + *core.Fake +} + +func (c *FakeRbac) ClusterRoles() unversioned.ClusterRoleInterface { + return &FakeClusterRoles{c} +} + +func (c *FakeRbac) ClusterRoleBindings() unversioned.ClusterRoleBindingInterface { + return &FakeClusterRoleBindings{c} +} + +func (c *FakeRbac) Roles(namespace string) unversioned.RoleInterface { + return &FakeRoles{c, namespace} +} + +func (c *FakeRbac) RoleBindings(namespace string) unversioned.RoleBindingInterface { + return &FakeRoleBindings{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeRbac) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_role.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_role.go new file mode 100644 index 000000000000..eacccc39ac1f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_role.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeRoles implements RoleInterface +type FakeRoles struct { + Fake *FakeRbac + ns string +} + +var rolesResource = unversioned.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "", Resource: "roles"} + +func (c *FakeRoles) Create(role *rbac.Role) (result *rbac.Role, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(rolesResource, c.ns, role), &rbac.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac.Role), err +} + +func (c *FakeRoles) Update(role *rbac.Role) (result *rbac.Role, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(rolesResource, c.ns, role), &rbac.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac.Role), err +} + +func (c *FakeRoles) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(rolesResource, c.ns, name), &rbac.Role{}) + + return err +} + +func (c *FakeRoles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(rolesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &rbac.RoleList{}) + return err +} + +func (c *FakeRoles) Get(name string) (result *rbac.Role, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(rolesResource, c.ns, name), &rbac.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac.Role), err +} + +func (c *FakeRoles) List(opts api.ListOptions) (result *rbac.RoleList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(rolesResource, c.ns, opts), &rbac.RoleList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &rbac.RoleList{} + for _, item := range obj.(*rbac.RoleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *FakeRoles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(rolesResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_rolebinding.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_rolebinding.go new file mode 100644 index 000000000000..ff7576617511 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/fake/fake_rolebinding.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeRoleBindings implements RoleBindingInterface +type FakeRoleBindings struct { + Fake *FakeRbac + ns string +} + +var rolebindingsResource = unversioned.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "", Resource: "rolebindings"} + +func (c *FakeRoleBindings) Create(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &rbac.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac.RoleBinding), err +} + +func (c *FakeRoleBindings) Update(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &rbac.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac.RoleBinding), err +} + +func (c *FakeRoleBindings) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(rolebindingsResource, c.ns, name), &rbac.RoleBinding{}) + + return err +} + +func (c *FakeRoleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &rbac.RoleBindingList{}) + return err +} + +func (c *FakeRoleBindings) Get(name string) (result *rbac.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(rolebindingsResource, c.ns, name), &rbac.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*rbac.RoleBinding), err +} + +func (c *FakeRoleBindings) List(opts api.ListOptions) (result *rbac.RoleBindingList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(rolebindingsResource, c.ns, opts), &rbac.RoleBindingList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &rbac.RoleBindingList{} + for _, item := range obj.(*rbac.RoleBindingList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *FakeRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(rolebindingsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/generated_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/generated_expansion.go new file mode 100644 index 000000000000..a3b9c689d359 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/generated_expansion.go @@ -0,0 +1,25 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +type ClusterRoleExpansion interface{} + +type ClusterRoleBindingExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rbac_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rbac_client.go new file mode 100644 index 000000000000..4d67337cde2c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rbac_client.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type RbacInterface interface { + GetRESTClient() *restclient.RESTClient + ClusterRolesGetter + ClusterRoleBindingsGetter + RolesGetter + RoleBindingsGetter +} + +// RbacClient is used to interact with features provided by the Rbac group. +type RbacClient struct { + *restclient.RESTClient +} + +func (c *RbacClient) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *RbacClient) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacClient) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacClient) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +// NewForConfig creates a new RbacClient for the given config. +func NewForConfig(c *restclient.Config) (*RbacClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacClient{client}, nil +} + +// NewForConfigOrDie creates a new RbacClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *RbacClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RbacClient for the given RESTClient. +func New(c *restclient.RESTClient) *RbacClient { + return &RbacClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if rbac group is not registered, return an error + g, err := registered.Group("rbac.authorization.k8s.io") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RbacClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/role.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/role.go new file mode 100644 index 000000000000..68e7ebe93b8c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/role.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + watch "k8s.io/kubernetes/pkg/watch" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(*rbac.Role) (*rbac.Role, error) + Update(*rbac.Role) (*rbac.Role, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*rbac.Role, error) + List(opts api.ListOptions) (*rbac.RoleList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + client *RbacClient + ns string +} + +// newRoles returns a Roles +func newRoles(c *RbacClient, namespace string) *roles { + return &roles{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Create(role *rbac.Role) (result *rbac.Role, err error) { + result = &rbac.Role{} + err = c.client.Post(). + Namespace(c.ns). + Resource("roles"). + Body(role). + Do(). + Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Update(role *rbac.Role) (result *rbac.Role, err error) { + result = &rbac.Role{} + err = c.client.Put(). + Namespace(c.ns). + Resource("roles"). + Name(role.Name). + Body(role). + Do(). + Into(result) + return +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string) (result *rbac.Role, err error) { + result = &rbac.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts api.ListOptions) (result *rbac.RoleList, err error) { + result = &rbac.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rolebinding.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rolebinding.go new file mode 100644 index 000000000000..c73318c9794c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rolebinding.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + watch "k8s.io/kubernetes/pkg/watch" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(*rbac.RoleBinding) (*rbac.RoleBinding, error) + Update(*rbac.RoleBinding) (*rbac.RoleBinding, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*rbac.RoleBinding, error) + List(opts api.ListOptions) (*rbac.RoleBindingList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + client *RbacClient + ns string +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *RbacClient, namespace string) *roleBindings { + return &roleBindings{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Create(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { + result = &rbac.RoleBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("rolebindings"). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Update(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { + result = &rbac.RoleBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("rolebindings"). + Name(roleBinding.Name). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string) (result *rbac.RoleBinding, err error) { + result = &rbac.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts api.ListOptions) (result *rbac.RoleBindingList, err error) { + result = &rbac.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/clientset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/clientset.go new file mode 100644 index 000000000000..2fbae3028901 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/clientset.go @@ -0,0 +1,95 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release_1_2 + +import ( + "github.com/golang/glog" + v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1" + v1beta1extensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Core() v1core.CoreInterface + Extensions() v1beta1extensions.ExtensionsInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *v1core.CoreClient + *v1beta1extensions.ExtensionsClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return c.CoreClient +} + +// Extensions retrieves the ExtensionsClient +func (c *Clientset) Extensions() v1beta1extensions.ExtensionsInterface { + return c.ExtensionsClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + var clientset Clientset + var err error + clientset.CoreClient, err = v1core.NewForConfig(c) + if err != nil { + return &clientset, err + } + clientset.ExtensionsClient, err = v1beta1extensions.NewForConfig(c) + if err != nil { + return &clientset, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(c) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + } + return &clientset, err +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.NewForConfigOrDie(c) + clientset.ExtensionsClient = v1beta1extensions.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.New(c) + clientset.ExtensionsClient = v1beta1extensions.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/doc.go new file mode 100644 index 000000000000..01f164f17b91 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_2 --input=[api/v1,extensions/v1beta1] + +// This package has the automatically generated clientset. +package release_1_2 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/fake/clientset_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/fake/clientset_generated.go new file mode 100644 index 000000000000..ac11770fc01d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/fake/clientset_generated.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2" + v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1" + fakev1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake" + v1beta1extensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1" + fakev1beta1extensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// Clientset returns a clientset that will respond with the provided objects +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjects(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return &fakev1core.FakeCore{Fake: &c.Fake} +} + +// Extensions retrieves the ExtensionsClient +func (c *Clientset) Extensions() v1beta1extensions.ExtensionsInterface { + return &fakev1beta1extensions.FakeExtensions{Fake: &c.Fake} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/fake/doc.go new file mode 100644 index 000000000000..d2e2dcd1d79b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_2 --input=[api/v1,extensions/v1beta1] + +// This package has the automatically generated fake clientset. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/import_known_versions.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/import_known_versions.go new file mode 100644 index 000000000000..e69af18cd2d1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/import_known_versions.go @@ -0,0 +1,37 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release_1_2 + +// These imports are the API groups the client will support. +import ( + "fmt" + + _ "k8s.io/kubernetes/pkg/api/install" + "k8s.io/kubernetes/pkg/apimachinery/registered" + _ "k8s.io/kubernetes/pkg/apis/authorization/install" + _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" + _ "k8s.io/kubernetes/pkg/apis/batch/install" + _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" + _ "k8s.io/kubernetes/pkg/apis/extensions/install" + _ "k8s.io/kubernetes/pkg/apis/metrics/install" +) + +func init() { + if missingVersions := registered.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { + panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/componentstatus.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/componentstatus.go new file mode 100644 index 000000000000..23363f530d4a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/componentstatus.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ComponentStatusesGetter has a method to return a ComponentStatusInterface. +// A group's client should implement this interface. +type ComponentStatusesGetter interface { + ComponentStatuses() ComponentStatusInterface +} + +// ComponentStatusInterface has methods to work with ComponentStatus resources. +type ComponentStatusInterface interface { + Create(*v1.ComponentStatus) (*v1.ComponentStatus, error) + Update(*v1.ComponentStatus) (*v1.ComponentStatus, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.ComponentStatus, error) + List(opts api.ListOptions) (*v1.ComponentStatusList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ComponentStatusExpansion +} + +// componentStatuses implements ComponentStatusInterface +type componentStatuses struct { + client *CoreClient +} + +// newComponentStatuses returns a ComponentStatuses +func newComponentStatuses(c *CoreClient) *componentStatuses { + return &componentStatuses{ + client: c, + } +} + +// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Post(). + Resource("componentstatuses"). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Put(). + Resource("componentstatuses"). + Name(componentStatus.Name). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. +func (c *componentStatuses) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("componentstatuses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *componentStatuses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("componentstatuses"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. +func (c *componentStatuses) Get(name string) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Get(). + Resource("componentstatuses"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. +func (c *componentStatuses) List(opts api.ListOptions) (result *v1.ComponentStatusList, err error) { + result = &v1.ComponentStatusList{} + err = c.client.Get(). + Resource("componentstatuses"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested componentStatuses. +func (c *componentStatuses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("componentstatuses"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/configmap.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/configmap.go new file mode 100644 index 000000000000..4fbb31328a42 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/configmap.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ConfigMapsGetter has a method to return a ConfigMapInterface. +// A group's client should implement this interface. +type ConfigMapsGetter interface { + ConfigMaps(namespace string) ConfigMapInterface +} + +// ConfigMapInterface has methods to work with ConfigMap resources. +type ConfigMapInterface interface { + Create(*v1.ConfigMap) (*v1.ConfigMap, error) + Update(*v1.ConfigMap) (*v1.ConfigMap, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.ConfigMap, error) + List(opts api.ListOptions) (*v1.ConfigMapList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ConfigMapExpansion +} + +// configMaps implements ConfigMapInterface +type configMaps struct { + client *CoreClient + ns string +} + +// newConfigMaps returns a ConfigMaps +func newConfigMaps(c *CoreClient, namespace string) *configMaps { + return &configMaps{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Post(). + Namespace(c.ns). + Resource("configmaps"). + Body(configMap). + Do(). + Into(result) + return +} + +// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Put(). + Namespace(c.ns). + Resource("configmaps"). + Name(configMap.Name). + Body(configMap). + Do(). + Into(result) + return +} + +// Delete takes name of the configMap and deletes it. Returns an error if one occurs. +func (c *configMaps) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *configMaps) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. +func (c *configMaps) Get(name string) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. +func (c *configMaps) List(opts api.ListOptions) (result *v1.ConfigMapList, err error) { + result = &v1.ConfigMapList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested configMaps. +func (c *configMaps) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/core_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/core_client.go similarity index 94% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/core_client.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/core_client.go index dc3561c0b498..6c2874e386d0 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned/core_client.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/core_client.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unversioned +package v1 import ( api "k8s.io/kubernetes/pkg/api" @@ -31,7 +31,6 @@ type CoreInterface interface { NamespacesGetter NodesGetter PersistentVolumesGetter - PersistentVolumeClaimsGetter PodsGetter PodTemplatesGetter ReplicationControllersGetter @@ -78,10 +77,6 @@ func (c *CoreClient) PersistentVolumes() PersistentVolumeInterface { return newPersistentVolumes(c) } -func (c *CoreClient) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface { - return newPersistentVolumeClaims(c, namespace) -} - func (c *CoreClient) Pods(namespace string) PodInterface { return newPods(c, namespace) } @@ -154,7 +149,8 @@ func setConfigDefaults(config *restclient.Config) error { config.GroupVersion = ©GroupVersion //} - config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs + if config.QPS == 0 { config.QPS = 5 } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/doc.go new file mode 100644 index 000000000000..30d096852550 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_2 --input=[api/v1,extensions/v1beta1] + +// This package has the automatically generated typed clients. +package v1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/endpoints.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/endpoints.go new file mode 100644 index 000000000000..409b044c72d6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/endpoints.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// EndpointsGetter has a method to return a EndpointsInterface. +// A group's client should implement this interface. +type EndpointsGetter interface { + Endpoints(namespace string) EndpointsInterface +} + +// EndpointsInterface has methods to work with Endpoints resources. +type EndpointsInterface interface { + Create(*v1.Endpoints) (*v1.Endpoints, error) + Update(*v1.Endpoints) (*v1.Endpoints, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Endpoints, error) + List(opts api.ListOptions) (*v1.EndpointsList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + EndpointsExpansion +} + +// endpoints implements EndpointsInterface +type endpoints struct { + client *CoreClient + ns string +} + +// newEndpoints returns a Endpoints +func newEndpoints(c *CoreClient, namespace string) *endpoints { + return &endpoints{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpoints"). + Body(endpoints). + Do(). + Into(result) + return +} + +// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpoints"). + Name(endpoints.Name). + Body(endpoints). + Do(). + Into(result) + return +} + +// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. +func (c *endpoints) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpoints) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. +func (c *endpoints) Get(name string) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Endpoints that match those selectors. +func (c *endpoints) List(opts api.ListOptions) (result *v1.EndpointsList, err error) { + result = &v1.EndpointsList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpoints. +func (c *endpoints) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/event.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/event.go new file mode 100644 index 000000000000..92266c98b619 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/event.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// EventsGetter has a method to return a EventInterface. +// A group's client should implement this interface. +type EventsGetter interface { + Events(namespace string) EventInterface +} + +// EventInterface has methods to work with Event resources. +type EventInterface interface { + Create(*v1.Event) (*v1.Event, error) + Update(*v1.Event) (*v1.Event, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Event, error) + List(opts api.ListOptions) (*v1.EventList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + EventExpansion +} + +// events implements EventInterface +type events struct { + client *CoreClient + ns string +} + +// newEvents returns a Events +func newEvents(c *CoreClient, namespace string) *events { + return &events{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Create(event *v1.Event) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Post(). + Namespace(c.ns). + Resource("events"). + Body(event). + Do(). + Into(result) + return +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Update(event *v1.Event) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Put(). + Namespace(c.ns). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *events) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *events) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *events) Get(name string) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *events) List(opts api.ListOptions) (result *v1.EventList, err error) { + result = &v1.EventList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *events) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/event_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/event_expansion.go new file mode 100644 index 000000000000..971c850c7a37 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/event_expansion.go @@ -0,0 +1,158 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/runtime" +) + +// The EventExpansion interface allows manually adding extra methods to the EventInterface. +type EventExpansion interface { + // CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace. + CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) + // UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace. + UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) + Patch(event *v1.Event, data []byte) (*v1.Event, error) + // Search finds events about the specified object + Search(objOrRef runtime.Object) (*v1.EventList, error) + // Returns the appropriate field selector based on the API version being used to communicate with the server. + // The returned field selector can be used with List and Watch to filter desired events. + GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector +} + +// CreateWithEventNamespace makes a new event. Returns the copy of the event the server returns, +// or an error. The namespace to create the event within is deduced from the +// event; it must either match this event client's namespace, or this event +// client must have been created with the "" namespace. +func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1.Event{} + err := e.client.Post(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Body(event). + Do(). + Into(result) + return result, err +} + +// UpdateWithEventNamespace modifies an existing event. It returns the copy of the event that the server returns, +// or an error. The namespace and key to update the event within is deduced from the event. The +// namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. Update also requires the ResourceVersion to be set in the event +// object. +func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + result := &v1.Event{} + err := e.client.Put(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return result, err +} + +// Patch modifies an existing event. It returns the copy of the event that the server returns, or an +// error. The namespace and name of the target event is deduced from the incompleteEvent. The +// namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. +func (e *events) Patch(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) { + result := &v1.Event{} + err := e.client.Patch(api.StrategicMergePatchType). + NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). + Resource("events"). + Name(incompleteEvent.Name). + Body(data). + Do(). + Into(result) + return result, err +} + +// Search finds events about the specified object. The namespace of the +// object must match this event's client namespace unless the event client +// was made with the "" namespace. +func (e *events) Search(objOrRef runtime.Object) (*v1.EventList, error) { + ref, err := api.GetReference(objOrRef) + if err != nil { + return nil, err + } + if e.ns != "" && ref.Namespace != e.ns { + return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) + } + stringRefKind := string(ref.Kind) + var refKind *string + if stringRefKind != "" { + refKind = &stringRefKind + } + stringRefUID := string(ref.UID) + var refUID *string + if stringRefUID != "" { + refUID = &stringRefUID + } + fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) + return e.List(api.ListOptions{FieldSelector: fieldSelector}) +} + +// Returns the appropriate field selector based on the API version being used to communicate with the server. +// The returned field selector can be used with List and Watch to filter desired events. +func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { + apiVersion := e.client.APIVersion().String() + field := fields.Set{} + if involvedObjectName != nil { + field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName + } + if involvedObjectNamespace != nil { + field["involvedObject.namespace"] = *involvedObjectNamespace + } + if involvedObjectKind != nil { + field["involvedObject.kind"] = *involvedObjectKind + } + if involvedObjectUID != nil { + field["involvedObject.uid"] = *involvedObjectUID + } + return field.AsSelector() +} + +// Returns the appropriate field label to use for name of the involved object as per the given API version. +func GetInvolvedObjectNameFieldLabel(version string) string { + return "involvedObject.name" +} + +// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset. +type EventSinkImpl struct { + Interface EventInterface +} + +func (e *EventSinkImpl) Create(event *v1.Event) (*v1.Event, error) { + return e.Interface.CreateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Update(event *v1.Event) (*v1.Event, error) { + return e.Interface.UpdateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Patch(event *v1.Event, data []byte) (*v1.Event, error) { + return e.Interface.Patch(event, data) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/doc.go new file mode 100644 index 000000000000..bafa0bfe44c5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_2 --input=[api/v1,extensions/v1beta1] + +// Package fake has the automatically generated clients. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_componentstatus.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_componentstatus.go new file mode 100644 index 000000000000..6a86ac56902e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_componentstatus.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeComponentStatuses implements ComponentStatusInterface +type FakeComponentStatuses struct { + Fake *FakeCore +} + +var componentstatusesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "componentstatuses"} + +func (c *FakeComponentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ComponentStatus), err +} + +func (c *FakeComponentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ComponentStatus), err +} + +func (c *FakeComponentStatuses) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(componentstatusesResource, name), &v1.ComponentStatus{}) + return err +} + +func (c *FakeComponentStatuses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(componentstatusesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ComponentStatusList{}) + return err +} + +func (c *FakeComponentStatuses) Get(name string) (result *v1.ComponentStatus, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(componentstatusesResource, name), &v1.ComponentStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ComponentStatus), err +} + +func (c *FakeComponentStatuses) List(opts api.ListOptions) (result *v1.ComponentStatusList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(componentstatusesResource, opts), &v1.ComponentStatusList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ComponentStatusList{} + for _, item := range obj.(*v1.ComponentStatusList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested componentStatuses. +func (c *FakeComponentStatuses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(componentstatusesResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_configmap.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_configmap.go new file mode 100644 index 000000000000..81dcc633add5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_configmap.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeConfigMaps implements ConfigMapInterface +type FakeConfigMaps struct { + Fake *FakeCore + ns string +} + +var configmapsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} + +func (c *FakeConfigMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ConfigMap), err +} + +func (c *FakeConfigMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ConfigMap), err +} + +func (c *FakeConfigMaps) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(configmapsResource, c.ns, name), &v1.ConfigMap{}) + + return err +} + +func (c *FakeConfigMaps) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(configmapsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ConfigMapList{}) + return err +} + +func (c *FakeConfigMaps) Get(name string) (result *v1.ConfigMap, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(configmapsResource, c.ns, name), &v1.ConfigMap{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ConfigMap), err +} + +func (c *FakeConfigMaps) List(opts api.ListOptions) (result *v1.ConfigMapList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(configmapsResource, c.ns, opts), &v1.ConfigMapList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ConfigMapList{} + for _, item := range obj.(*v1.ConfigMapList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested configMaps. +func (c *FakeConfigMaps) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(configmapsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_core_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 000000000000..2f2c2345487b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,86 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) ComponentStatuses() v1.ComponentStatusInterface { + return &FakeComponentStatuses{c} +} + +func (c *FakeCore) ConfigMaps(namespace string) v1.ConfigMapInterface { + return &FakeConfigMaps{c, namespace} +} + +func (c *FakeCore) Endpoints(namespace string) v1.EndpointsInterface { + return &FakeEndpoints{c, namespace} +} + +func (c *FakeCore) Events(namespace string) v1.EventInterface { + return &FakeEvents{c, namespace} +} + +func (c *FakeCore) LimitRanges(namespace string) v1.LimitRangeInterface { + return &FakeLimitRanges{c, namespace} +} + +func (c *FakeCore) Namespaces() v1.NamespaceInterface { + return &FakeNamespaces{c} +} + +func (c *FakeCore) Nodes() v1.NodeInterface { + return &FakeNodes{c} +} + +func (c *FakeCore) PersistentVolumes() v1.PersistentVolumeInterface { + return &FakePersistentVolumes{c} +} + +func (c *FakeCore) Pods(namespace string) v1.PodInterface { + return &FakePods{c, namespace} +} + +func (c *FakeCore) PodTemplates(namespace string) v1.PodTemplateInterface { + return &FakePodTemplates{c, namespace} +} + +func (c *FakeCore) ReplicationControllers(namespace string) v1.ReplicationControllerInterface { + return &FakeReplicationControllers{c, namespace} +} + +func (c *FakeCore) ResourceQuotas(namespace string) v1.ResourceQuotaInterface { + return &FakeResourceQuotas{c, namespace} +} + +func (c *FakeCore) Secrets(namespace string) v1.SecretInterface { + return &FakeSecrets{c, namespace} +} + +func (c *FakeCore) Services(namespace string) v1.ServiceInterface { + return &FakeServices{c, namespace} +} + +func (c *FakeCore) ServiceAccounts(namespace string) v1.ServiceAccountInterface { + return &FakeServiceAccounts{c, namespace} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_endpoints.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_endpoints.go new file mode 100644 index 000000000000..f5c570ffd21c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_endpoints.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeEndpoints implements EndpointsInterface +type FakeEndpoints struct { + Fake *FakeCore + ns string +} + +var endpointsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "endpoints"} + +func (c *FakeEndpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} + +func (c *FakeEndpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} + +func (c *FakeEndpoints) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(endpointsResource, c.ns, name), &v1.Endpoints{}) + + return err +} + +func (c *FakeEndpoints) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(endpointsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.EndpointsList{}) + return err +} + +func (c *FakeEndpoints) Get(name string) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(endpointsResource, c.ns, name), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} + +func (c *FakeEndpoints) List(opts api.ListOptions) (result *v1.EndpointsList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(endpointsResource, c.ns, opts), &v1.EndpointsList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.EndpointsList{} + for _, item := range obj.(*v1.EndpointsList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested endpoints. +func (c *FakeEndpoints) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(endpointsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_event.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_event.go new file mode 100644 index 000000000000..5dd7e08b8d26 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_event.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeEvents implements EventInterface +type FakeEvents struct { + Fake *FakeCore + ns string +} + +var eventsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "events"} + +func (c *FakeEvents) Create(event *v1.Event) (result *v1.Event, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(eventsResource, c.ns, event), &v1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Event), err +} + +func (c *FakeEvents) Update(event *v1.Event) (result *v1.Event, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Event), err +} + +func (c *FakeEvents) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(eventsResource, c.ns, name), &v1.Event{}) + + return err +} + +func (c *FakeEvents) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(eventsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.EventList{}) + return err +} + +func (c *FakeEvents) Get(name string) (result *v1.Event, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(eventsResource, c.ns, name), &v1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Event), err +} + +func (c *FakeEvents) List(opts api.ListOptions) (result *v1.EventList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(eventsResource, c.ns, opts), &v1.EventList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.EventList{} + for _, item := range obj.(*v1.EventList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *FakeEvents) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(eventsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_event_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_event_expansion.go new file mode 100644 index 000000000000..173032b60cb7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_event_expansion.go @@ -0,0 +1,89 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/runtime" +) + +func (c *FakeEvents) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + action := core.NewRootCreateAction(eventsResource, event) + if c.ns != "" { + action = core.NewCreateAction(eventsResource, c.ns, event) + } + obj, err := c.Fake.Invokes(action, event) + if obj == nil { + return nil, err + } + + return obj.(*v1.Event), err +} + +// Update replaces an existing event. Returns the copy of the event the server returns, or an error. +func (c *FakeEvents) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + action := core.NewRootUpdateAction(eventsResource, event) + if c.ns != "" { + action = core.NewUpdateAction(eventsResource, c.ns, event) + } + obj, err := c.Fake.Invokes(action, event) + if obj == nil { + return nil, err + } + + return obj.(*v1.Event), err +} + +// Patch patches an existing event. Returns the copy of the event the server returns, or an error. +func (c *FakeEvents) Patch(event *v1.Event, data []byte) (*v1.Event, error) { + action := core.NewRootPatchAction(eventsResource, event) + if c.ns != "" { + action = core.NewPatchAction(eventsResource, c.ns, event) + } + obj, err := c.Fake.Invokes(action, event) + if obj == nil { + return nil, err + } + + return obj.(*v1.Event), err +} + +// Search returns a list of events matching the specified object. +func (c *FakeEvents) Search(objOrRef runtime.Object) (*v1.EventList, error) { + action := core.NewRootListAction(eventsResource, api.ListOptions{}) + if c.ns != "" { + action = core.NewListAction(eventsResource, c.ns, api.ListOptions{}) + } + obj, err := c.Fake.Invokes(action, &v1.EventList{}) + if obj == nil { + return nil, err + } + + return obj.(*v1.EventList), err +} + +func (c *FakeEvents) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { + action := core.GenericActionImpl{} + action.Verb = "get-field-selector" + action.Resource = eventsResource + + c.Fake.Invokes(action, nil) + return fields.Everything() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_limitrange.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_limitrange.go new file mode 100644 index 000000000000..f5755a87b668 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_limitrange.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeLimitRanges implements LimitRangeInterface +type FakeLimitRanges struct { + Fake *FakeCore + ns string +} + +var limitrangesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "limitranges"} + +func (c *FakeLimitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.LimitRange), err +} + +func (c *FakeLimitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.LimitRange), err +} + +func (c *FakeLimitRanges) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(limitrangesResource, c.ns, name), &v1.LimitRange{}) + + return err +} + +func (c *FakeLimitRanges) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(limitrangesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.LimitRangeList{}) + return err +} + +func (c *FakeLimitRanges) Get(name string) (result *v1.LimitRange, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(limitrangesResource, c.ns, name), &v1.LimitRange{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.LimitRange), err +} + +func (c *FakeLimitRanges) List(opts api.ListOptions) (result *v1.LimitRangeList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(limitrangesResource, c.ns, opts), &v1.LimitRangeList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.LimitRangeList{} + for _, item := range obj.(*v1.LimitRangeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested limitRanges. +func (c *FakeLimitRanges) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(limitrangesResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_namespace.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_namespace.go new file mode 100644 index 000000000000..b81ca5c52ec9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_namespace.go @@ -0,0 +1,108 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeNamespaces implements NamespaceInterface +type FakeNamespaces struct { + Fake *FakeCore +} + +var namespacesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} + +func (c *FakeNamespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(namespacesResource, namespace), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +func (c *FakeNamespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(namespacesResource, namespace), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +func (c *FakeNamespaces) UpdateStatus(namespace *v1.Namespace) (*v1.Namespace, error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +func (c *FakeNamespaces) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(namespacesResource, name), &v1.Namespace{}) + return err +} + +func (c *FakeNamespaces) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(namespacesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.NamespaceList{}) + return err +} + +func (c *FakeNamespaces) Get(name string) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(namespacesResource, name), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +func (c *FakeNamespaces) List(opts api.ListOptions) (result *v1.NamespaceList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(namespacesResource, opts), &v1.NamespaceList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.NamespaceList{} + for _, item := range obj.(*v1.NamespaceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested namespaces. +func (c *FakeNamespaces) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(namespacesResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_namespace_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_namespace_expansion.go new file mode 100644 index 000000000000..a4416ffcf473 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_namespace_expansion.go @@ -0,0 +1,37 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/testing/core" +) + +func (c *FakeNamespaces) Finalize(namespace *v1.Namespace) (*v1.Namespace, error) { + action := core.CreateActionImpl{} + action.Verb = "create" + action.Resource = namespacesResource + action.Subresource = "finalize" + action.Object = namespace + + obj, err := c.Fake.Invokes(action, namespace) + if obj == nil { + return nil, err + } + + return obj.(*v1.Namespace), err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_node.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_node.go new file mode 100644 index 000000000000..320f80364fb4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_node.go @@ -0,0 +1,108 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeNodes implements NodeInterface +type FakeNodes struct { + Fake *FakeCore +} + +var nodesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"} + +func (c *FakeNodes) Create(node *v1.Node) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(nodesResource, node), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +func (c *FakeNodes) Update(node *v1.Node) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(nodesResource, node), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +func (c *FakeNodes) UpdateStatus(node *v1.Node) (*v1.Node, error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateSubresourceAction(nodesResource, "status", node), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +func (c *FakeNodes) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(nodesResource, name), &v1.Node{}) + return err +} + +func (c *FakeNodes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(nodesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.NodeList{}) + return err +} + +func (c *FakeNodes) Get(name string) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(nodesResource, name), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +func (c *FakeNodes) List(opts api.ListOptions) (result *v1.NodeList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(nodesResource, opts), &v1.NodeList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.NodeList{} + for _, item := range obj.(*v1.NodeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *FakeNodes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(nodesResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_persistentvolume.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_persistentvolume.go new file mode 100644 index 000000000000..0aa61b830d8e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_persistentvolume.go @@ -0,0 +1,108 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakePersistentVolumes implements PersistentVolumeInterface +type FakePersistentVolumes struct { + Fake *FakeCore +} + +var persistentvolumesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumes"} + +func (c *FakePersistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolume), err +} + +func (c *FakePersistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolume), err +} + +func (c *FakePersistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (*v1.PersistentVolume, error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolume), err +} + +func (c *FakePersistentVolumes) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(persistentvolumesResource, name), &v1.PersistentVolume{}) + return err +} + +func (c *FakePersistentVolumes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(persistentvolumesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.PersistentVolumeList{}) + return err +} + +func (c *FakePersistentVolumes) Get(name string) (result *v1.PersistentVolume, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(persistentvolumesResource, name), &v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolume), err +} + +func (c *FakePersistentVolumes) List(opts api.ListOptions) (result *v1.PersistentVolumeList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(persistentvolumesResource, opts), &v1.PersistentVolumeList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.PersistentVolumeList{} + for _, item := range obj.(*v1.PersistentVolumeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested persistentVolumes. +func (c *FakePersistentVolumes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(persistentvolumesResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_pod.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_pod.go new file mode 100644 index 000000000000..0273bb9b0722 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_pod.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakePods implements PodInterface +type FakePods struct { + Fake *FakeCore + ns string +} + +var podsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} + +func (c *FakePods) Create(pod *v1.Pod) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(podsResource, c.ns, pod), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +func (c *FakePods) Update(pod *v1.Pod) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(podsResource, c.ns, pod), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +func (c *FakePods) UpdateStatus(pod *v1.Pod) (*v1.Pod, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +func (c *FakePods) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(podsResource, c.ns, name), &v1.Pod{}) + + return err +} + +func (c *FakePods) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(podsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.PodList{}) + return err +} + +func (c *FakePods) Get(name string) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(podsResource, c.ns, name), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +func (c *FakePods) List(opts api.ListOptions) (result *v1.PodList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(podsResource, c.ns, opts), &v1.PodList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.PodList{} + for _, item := range obj.(*v1.PodList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *FakePods) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(podsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_pod_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_pod_expansion.go new file mode 100644 index 000000000000..7e478dd5ec72 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_pod_expansion.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/testing/core" +) + +func (c *FakePods) Bind(binding *v1.Binding) error { + action := core.CreateActionImpl{} + action.Verb = "create" + action.Resource = podsResource + action.Subresource = "bindings" + action.Object = binding + + _, err := c.Fake.Invokes(action, binding) + return err +} + +func (c *FakePods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { + action := core.GenericActionImpl{} + action.Verb = "get" + action.Namespace = c.ns + action.Resource = podsResource + action.Subresource = "logs" + action.Value = opts + + _, _ = c.Fake.Invokes(action, &v1.Pod{}) + return &restclient.Request{} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_podtemplate.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_podtemplate.go new file mode 100644 index 000000000000..89302ae8c43a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_podtemplate.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakePodTemplates implements PodTemplateInterface +type FakePodTemplates struct { + Fake *FakeCore + ns string +} + +var podtemplatesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "podtemplates"} + +func (c *FakePodTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PodTemplate), err +} + +func (c *FakePodTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PodTemplate), err +} + +func (c *FakePodTemplates) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(podtemplatesResource, c.ns, name), &v1.PodTemplate{}) + + return err +} + +func (c *FakePodTemplates) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.PodTemplateList{}) + return err +} + +func (c *FakePodTemplates) Get(name string) (result *v1.PodTemplate, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(podtemplatesResource, c.ns, name), &v1.PodTemplate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PodTemplate), err +} + +func (c *FakePodTemplates) List(opts api.ListOptions) (result *v1.PodTemplateList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(podtemplatesResource, c.ns, opts), &v1.PodTemplateList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.PodTemplateList{} + for _, item := range obj.(*v1.PodTemplateList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podTemplates. +func (c *FakePodTemplates) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(podtemplatesResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_replicationcontroller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_replicationcontroller.go new file mode 100644 index 000000000000..3599a46e2220 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_replicationcontroller.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeReplicationControllers implements ReplicationControllerInterface +type FakeReplicationControllers struct { + Fake *FakeCore + ns string +} + +var replicationcontrollersResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"} + +func (c *FakeReplicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ReplicationController), err +} + +func (c *FakeReplicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ReplicationController), err +} + +func (c *FakeReplicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (*v1.ReplicationController, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ReplicationController), err +} + +func (c *FakeReplicationControllers) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(replicationcontrollersResource, c.ns, name), &v1.ReplicationController{}) + + return err +} + +func (c *FakeReplicationControllers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ReplicationControllerList{}) + return err +} + +func (c *FakeReplicationControllers) Get(name string) (result *v1.ReplicationController, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(replicationcontrollersResource, c.ns, name), &v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ReplicationController), err +} + +func (c *FakeReplicationControllers) List(opts api.ListOptions) (result *v1.ReplicationControllerList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(replicationcontrollersResource, c.ns, opts), &v1.ReplicationControllerList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ReplicationControllerList{} + for _, item := range obj.(*v1.ReplicationControllerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested replicationControllers. +func (c *FakeReplicationControllers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(replicationcontrollersResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_resourcequota.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_resourcequota.go new file mode 100644 index 000000000000..2def4eec54d1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_resourcequota.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeResourceQuotas implements ResourceQuotaInterface +type FakeResourceQuotas struct { + Fake *FakeCore + ns string +} + +var resourcequotasResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "resourcequotas"} + +func (c *FakeResourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ResourceQuota), err +} + +func (c *FakeResourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ResourceQuota), err +} + +func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ResourceQuota), err +} + +func (c *FakeResourceQuotas) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(resourcequotasResource, c.ns, name), &v1.ResourceQuota{}) + + return err +} + +func (c *FakeResourceQuotas) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ResourceQuotaList{}) + return err +} + +func (c *FakeResourceQuotas) Get(name string) (result *v1.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(resourcequotasResource, c.ns, name), &v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ResourceQuota), err +} + +func (c *FakeResourceQuotas) List(opts api.ListOptions) (result *v1.ResourceQuotaList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(resourcequotasResource, c.ns, opts), &v1.ResourceQuotaList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ResourceQuotaList{} + for _, item := range obj.(*v1.ResourceQuotaList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested resourceQuotas. +func (c *FakeResourceQuotas) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(resourcequotasResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_secret.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_secret.go new file mode 100644 index 000000000000..921da249aabb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_secret.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeSecrets implements SecretInterface +type FakeSecrets struct { + Fake *FakeCore + ns string +} + +var secretsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} + +func (c *FakeSecrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(secretsResource, c.ns, secret), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} + +func (c *FakeSecrets) Update(secret *v1.Secret) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(secretsResource, c.ns, secret), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} + +func (c *FakeSecrets) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(secretsResource, c.ns, name), &v1.Secret{}) + + return err +} + +func (c *FakeSecrets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(secretsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.SecretList{}) + return err +} + +func (c *FakeSecrets) Get(name string) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(secretsResource, c.ns, name), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} + +func (c *FakeSecrets) List(opts api.ListOptions) (result *v1.SecretList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(secretsResource, c.ns, opts), &v1.SecretList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.SecretList{} + for _, item := range obj.(*v1.SecretList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested secrets. +func (c *FakeSecrets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(secretsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_service.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_service.go new file mode 100644 index 000000000000..3355aa94e1f8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_service.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeServices implements ServiceInterface +type FakeServices struct { + Fake *FakeCore + ns string +} + +var servicesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "services"} + +func (c *FakeServices) Create(service *v1.Service) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(servicesResource, c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) Update(service *v1.Service) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(servicesResource, c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) UpdateStatus(service *v1.Service) (*v1.Service, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(servicesResource, c.ns, name), &v1.Service{}) + + return err +} + +func (c *FakeServices) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(servicesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ServiceList{}) + return err +} + +func (c *FakeServices) Get(name string) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(servicesResource, c.ns, name), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) List(opts api.ListOptions) (result *v1.ServiceList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(servicesResource, c.ns, opts), &v1.ServiceList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ServiceList{} + for _, item := range obj.(*v1.ServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *FakeServices) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(servicesResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_service_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_service_expansion.go new file mode 100644 index 000000000000..3494b873762e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_service_expansion.go @@ -0,0 +1,26 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/testing/core" +) + +func (c *FakeServices) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { + return c.Fake.InvokesProxy(core.NewProxyGetAction(servicesResource, c.ns, scheme, name, port, path, params)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_serviceaccount.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_serviceaccount.go new file mode 100644 index 000000000000..fa10a5353c6d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/fake/fake_serviceaccount.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeServiceAccounts implements ServiceAccountInterface +type FakeServiceAccounts struct { + Fake *FakeCore + ns string +} + +var serviceaccountsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"} + +func (c *FakeServiceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ServiceAccount), err +} + +func (c *FakeServiceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ServiceAccount), err +} + +func (c *FakeServiceAccounts) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(serviceaccountsResource, c.ns, name), &v1.ServiceAccount{}) + + return err +} + +func (c *FakeServiceAccounts) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ServiceAccountList{}) + return err +} + +func (c *FakeServiceAccounts) Get(name string) (result *v1.ServiceAccount, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(serviceaccountsResource, c.ns, name), &v1.ServiceAccount{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ServiceAccount), err +} + +func (c *FakeServiceAccounts) List(opts api.ListOptions) (result *v1.ServiceAccountList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(serviceaccountsResource, c.ns, opts), &v1.ServiceAccountList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ServiceAccountList{} + for _, item := range obj.(*v1.ServiceAccountList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serviceAccounts. +func (c *FakeServiceAccounts) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(serviceaccountsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/generated_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000000..9974ef5c6a54 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/generated_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type ComponentStatusExpansion interface{} + +type EndpointsExpansion interface{} + +type LimitRangeExpansion interface{} + +type NodeExpansion interface{} + +type PersistentVolumeExpansion interface{} + +type PersistentVolumeClaimExpansion interface{} + +type PodTemplateExpansion interface{} + +type ReplicationControllerExpansion interface{} + +type ResourceQuotaExpansion interface{} + +type SecretExpansion interface{} + +type ServiceAccountExpansion interface{} + +type ConfigMapExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/limitrange.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/limitrange.go new file mode 100644 index 000000000000..a44c61fa2280 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/limitrange.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// LimitRangesGetter has a method to return a LimitRangeInterface. +// A group's client should implement this interface. +type LimitRangesGetter interface { + LimitRanges(namespace string) LimitRangeInterface +} + +// LimitRangeInterface has methods to work with LimitRange resources. +type LimitRangeInterface interface { + Create(*v1.LimitRange) (*v1.LimitRange, error) + Update(*v1.LimitRange) (*v1.LimitRange, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.LimitRange, error) + List(opts api.ListOptions) (*v1.LimitRangeList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + LimitRangeExpansion +} + +// limitRanges implements LimitRangeInterface +type limitRanges struct { + client *CoreClient + ns string +} + +// newLimitRanges returns a LimitRanges +func newLimitRanges(c *CoreClient, namespace string) *limitRanges { + return &limitRanges{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Post(). + Namespace(c.ns). + Resource("limitranges"). + Body(limitRange). + Do(). + Into(result) + return +} + +// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Put(). + Namespace(c.ns). + Resource("limitranges"). + Name(limitRange.Name). + Body(limitRange). + Do(). + Into(result) + return +} + +// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. +func (c *limitRanges) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *limitRanges) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. +func (c *limitRanges) Get(name string) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. +func (c *limitRanges) List(opts api.ListOptions) (result *v1.LimitRangeList, err error) { + result = &v1.LimitRangeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested limitRanges. +func (c *limitRanges) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/namespace.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/namespace.go new file mode 100644 index 000000000000..3d2cff144648 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/namespace.go @@ -0,0 +1,140 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// NamespacesGetter has a method to return a NamespaceInterface. +// A group's client should implement this interface. +type NamespacesGetter interface { + Namespaces() NamespaceInterface +} + +// NamespaceInterface has methods to work with Namespace resources. +type NamespaceInterface interface { + Create(*v1.Namespace) (*v1.Namespace, error) + Update(*v1.Namespace) (*v1.Namespace, error) + UpdateStatus(*v1.Namespace) (*v1.Namespace, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Namespace, error) + List(opts api.ListOptions) (*v1.NamespaceList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + NamespaceExpansion +} + +// namespaces implements NamespaceInterface +type namespaces struct { + client *CoreClient +} + +// newNamespaces returns a Namespaces +func newNamespaces(c *CoreClient) *namespaces { + return &namespaces{ + client: c, + } +} + +// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Post(). + Resource("namespaces"). + Body(namespace). + Do(). + Into(result) + return +} + +// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + Body(namespace). + Do(). + Into(result) + return +} + +func (c *namespaces) UpdateStatus(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + SubResource("status"). + Body(namespace). + Do(). + Into(result) + return +} + +// Delete takes name of the namespace and deletes it. Returns an error if one occurs. +func (c *namespaces) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("namespaces"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *namespaces) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("namespaces"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. +func (c *namespaces) Get(name string) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Get(). + Resource("namespaces"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Namespaces that match those selectors. +func (c *namespaces) List(opts api.ListOptions) (result *v1.NamespaceList, err error) { + result = &v1.NamespaceList{} + err = c.client.Get(). + Resource("namespaces"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested namespaces. +func (c *namespaces) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("namespaces"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/namespace_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/namespace_expansion.go new file mode 100644 index 000000000000..7b5cf683d0ea --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/namespace_expansion.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "k8s.io/kubernetes/pkg/api/v1" + +// The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. +type NamespaceExpansion interface { + Finalize(item *v1.Namespace) (*v1.Namespace, error) +} + +// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. +func (c *namespaces) Finalize(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) + return +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/node.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/node.go new file mode 100644 index 000000000000..464eb8d6d693 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/node.go @@ -0,0 +1,140 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// NodesGetter has a method to return a NodeInterface. +// A group's client should implement this interface. +type NodesGetter interface { + Nodes() NodeInterface +} + +// NodeInterface has methods to work with Node resources. +type NodeInterface interface { + Create(*v1.Node) (*v1.Node, error) + Update(*v1.Node) (*v1.Node, error) + UpdateStatus(*v1.Node) (*v1.Node, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Node, error) + List(opts api.ListOptions) (*v1.NodeList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + NodeExpansion +} + +// nodes implements NodeInterface +type nodes struct { + client *CoreClient +} + +// newNodes returns a Nodes +func newNodes(c *CoreClient) *nodes { + return &nodes{ + client: c, + } +} + +// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Create(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Post(). + Resource("nodes"). + Body(node). + Do(). + Into(result) + return +} + +// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Update(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + Body(node). + Do(). + Into(result) + return +} + +func (c *nodes) UpdateStatus(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + SubResource("status"). + Body(node). + Do(). + Into(result) + return +} + +// Delete takes name of the node and deletes it. Returns an error if one occurs. +func (c *nodes) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("nodes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *nodes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("nodes"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the node, and returns the corresponding node object, and an error if there is any. +func (c *nodes) Get(name string) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Get(). + Resource("nodes"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Nodes that match those selectors. +func (c *nodes) List(opts api.ListOptions) (result *v1.NodeList, err error) { + result = &v1.NodeList{} + err = c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *nodes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("nodes"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/persistentvolume.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/persistentvolume.go new file mode 100644 index 000000000000..85ddf060e270 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/persistentvolume.go @@ -0,0 +1,140 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PersistentVolumesGetter has a method to return a PersistentVolumeInterface. +// A group's client should implement this interface. +type PersistentVolumesGetter interface { + PersistentVolumes() PersistentVolumeInterface +} + +// PersistentVolumeInterface has methods to work with PersistentVolume resources. +type PersistentVolumeInterface interface { + Create(*v1.PersistentVolume) (*v1.PersistentVolume, error) + Update(*v1.PersistentVolume) (*v1.PersistentVolume, error) + UpdateStatus(*v1.PersistentVolume) (*v1.PersistentVolume, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.PersistentVolume, error) + List(opts api.ListOptions) (*v1.PersistentVolumeList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + PersistentVolumeExpansion +} + +// persistentVolumes implements PersistentVolumeInterface +type persistentVolumes struct { + client *CoreClient +} + +// newPersistentVolumes returns a PersistentVolumes +func newPersistentVolumes(c *CoreClient) *persistentVolumes { + return &persistentVolumes{ + client: c, + } +} + +// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Post(). + Resource("persistentvolumes"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + Body(persistentVolume). + Do(). + Into(result) + return +} + +func (c *persistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + SubResource("status"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. +func (c *persistentVolumes) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("persistentvolumes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *persistentVolumes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("persistentvolumes"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. +func (c *persistentVolumes) Get(name string) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Get(). + Resource("persistentvolumes"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. +func (c *persistentVolumes) List(opts api.ListOptions) (result *v1.PersistentVolumeList, err error) { + result = &v1.PersistentVolumeList{} + err = c.client.Get(). + Resource("persistentvolumes"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumes. +func (c *persistentVolumes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("persistentvolumes"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/pod.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/pod.go new file mode 100644 index 000000000000..d2ed5faaa845 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/pod.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PodsGetter has a method to return a PodInterface. +// A group's client should implement this interface. +type PodsGetter interface { + Pods(namespace string) PodInterface +} + +// PodInterface has methods to work with Pod resources. +type PodInterface interface { + Create(*v1.Pod) (*v1.Pod, error) + Update(*v1.Pod) (*v1.Pod, error) + UpdateStatus(*v1.Pod) (*v1.Pod, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Pod, error) + List(opts api.ListOptions) (*v1.PodList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + PodExpansion +} + +// pods implements PodInterface +type pods struct { + client *CoreClient + ns string +} + +// newPods returns a Pods +func newPods(c *CoreClient, namespace string) *pods { + return &pods{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Create(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pods"). + Body(pod). + Do(). + Into(result) + return +} + +// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Update(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + Body(pod). + Do(). + Into(result) + return +} + +func (c *pods) UpdateStatus(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + SubResource("status"). + Body(pod). + Do(). + Into(result) + return +} + +// Delete takes name of the pod and deletes it. Returns an error if one occurs. +func (c *pods) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pods) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. +func (c *pods) Get(name string) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Pods that match those selectors. +func (c *pods) List(opts api.ListOptions) (result *v1.PodList, err error) { + result = &v1.PodList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *pods) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/pod_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/pod_expansion.go new file mode 100644 index 000000000000..f061b5d9234a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/pod_expansion.go @@ -0,0 +1,39 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/restclient" +) + +// The PodExpansion interface allows manually adding extra methods to the PodInterface. +type PodExpansion interface { + Bind(binding *v1.Binding) error + GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request +} + +// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). +func (c *pods) Bind(binding *v1.Binding) error { + return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() +} + +// Get constructs a request for getting the logs for a pod +func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { + return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/podtemplate.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/podtemplate.go new file mode 100644 index 000000000000..1b95106d1752 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/podtemplate.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PodTemplatesGetter has a method to return a PodTemplateInterface. +// A group's client should implement this interface. +type PodTemplatesGetter interface { + PodTemplates(namespace string) PodTemplateInterface +} + +// PodTemplateInterface has methods to work with PodTemplate resources. +type PodTemplateInterface interface { + Create(*v1.PodTemplate) (*v1.PodTemplate, error) + Update(*v1.PodTemplate) (*v1.PodTemplate, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.PodTemplate, error) + List(opts api.ListOptions) (*v1.PodTemplateList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + PodTemplateExpansion +} + +// podTemplates implements PodTemplateInterface +type podTemplates struct { + client *CoreClient + ns string +} + +// newPodTemplates returns a PodTemplates +func newPodTemplates(c *CoreClient, namespace string) *podTemplates { + return &podTemplates{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podtemplates"). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podtemplates"). + Name(podTemplate.Name). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. +func (c *podTemplates) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podTemplates) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. +func (c *podTemplates) Get(name string) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. +func (c *podTemplates) List(opts api.ListOptions) (result *v1.PodTemplateList, err error) { + result = &v1.PodTemplateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podTemplates. +func (c *podTemplates) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/replicationcontroller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/replicationcontroller.go new file mode 100644 index 000000000000..20bcc90c3723 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/replicationcontroller.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ReplicationControllersGetter has a method to return a ReplicationControllerInterface. +// A group's client should implement this interface. +type ReplicationControllersGetter interface { + ReplicationControllers(namespace string) ReplicationControllerInterface +} + +// ReplicationControllerInterface has methods to work with ReplicationController resources. +type ReplicationControllerInterface interface { + Create(*v1.ReplicationController) (*v1.ReplicationController, error) + Update(*v1.ReplicationController) (*v1.ReplicationController, error) + UpdateStatus(*v1.ReplicationController) (*v1.ReplicationController, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.ReplicationController, error) + List(opts api.ListOptions) (*v1.ReplicationControllerList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ReplicationControllerExpansion +} + +// replicationControllers implements ReplicationControllerInterface +type replicationControllers struct { + client *CoreClient + ns string +} + +// newReplicationControllers returns a ReplicationControllers +func newReplicationControllers(c *CoreClient, namespace string) *replicationControllers { + return &replicationControllers{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + Body(replicationController). + Do(). + Into(result) + return +} + +func (c *replicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + SubResource("status"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. +func (c *replicationControllers) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicationControllers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. +func (c *replicationControllers) Get(name string) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. +func (c *replicationControllers) List(opts api.ListOptions) (result *v1.ReplicationControllerList, err error) { + result = &v1.ReplicationControllerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicationControllers. +func (c *replicationControllers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/resourcequota.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/resourcequota.go new file mode 100644 index 000000000000..466e963d6c9b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/resourcequota.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ResourceQuotasGetter has a method to return a ResourceQuotaInterface. +// A group's client should implement this interface. +type ResourceQuotasGetter interface { + ResourceQuotas(namespace string) ResourceQuotaInterface +} + +// ResourceQuotaInterface has methods to work with ResourceQuota resources. +type ResourceQuotaInterface interface { + Create(*v1.ResourceQuota) (*v1.ResourceQuota, error) + Update(*v1.ResourceQuota) (*v1.ResourceQuota, error) + UpdateStatus(*v1.ResourceQuota) (*v1.ResourceQuota, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.ResourceQuota, error) + List(opts api.ListOptions) (*v1.ResourceQuotaList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ResourceQuotaExpansion +} + +// resourceQuotas implements ResourceQuotaInterface +type resourceQuotas struct { + client *CoreClient + ns string +} + +// newResourceQuotas returns a ResourceQuotas +func newResourceQuotas(c *CoreClient, namespace string) *resourceQuotas { + return &resourceQuotas{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourcequotas"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + Body(resourceQuota). + Do(). + Into(result) + return +} + +func (c *resourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + SubResource("status"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. +func (c *resourceQuotas) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceQuotas) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. +func (c *resourceQuotas) Get(name string) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. +func (c *resourceQuotas) List(opts api.ListOptions) (result *v1.ResourceQuotaList, err error) { + result = &v1.ResourceQuotaList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceQuotas. +func (c *resourceQuotas) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/secret.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/secret.go new file mode 100644 index 000000000000..a95aa84f440d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/secret.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// SecretsGetter has a method to return a SecretInterface. +// A group's client should implement this interface. +type SecretsGetter interface { + Secrets(namespace string) SecretInterface +} + +// SecretInterface has methods to work with Secret resources. +type SecretInterface interface { + Create(*v1.Secret) (*v1.Secret, error) + Update(*v1.Secret) (*v1.Secret, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Secret, error) + List(opts api.ListOptions) (*v1.SecretList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + SecretExpansion +} + +// secrets implements SecretInterface +type secrets struct { + client *CoreClient + ns string +} + +// newSecrets returns a Secrets +func newSecrets(c *CoreClient, namespace string) *secrets { + return &secrets{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Post(). + Namespace(c.ns). + Resource("secrets"). + Body(secret). + Do(). + Into(result) + return +} + +// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Update(secret *v1.Secret) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Put(). + Namespace(c.ns). + Resource("secrets"). + Name(secret.Name). + Body(secret). + Do(). + Into(result) + return +} + +// Delete takes name of the secret and deletes it. Returns an error if one occurs. +func (c *secrets) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *secrets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. +func (c *secrets) Get(name string) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Secrets that match those selectors. +func (c *secrets) List(opts api.ListOptions) (result *v1.SecretList, err error) { + result = &v1.SecretList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested secrets. +func (c *secrets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/service.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/service.go new file mode 100644 index 000000000000..cd62b5d94f20 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/service.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ServicesGetter has a method to return a ServiceInterface. +// A group's client should implement this interface. +type ServicesGetter interface { + Services(namespace string) ServiceInterface +} + +// ServiceInterface has methods to work with Service resources. +type ServiceInterface interface { + Create(*v1.Service) (*v1.Service, error) + Update(*v1.Service) (*v1.Service, error) + UpdateStatus(*v1.Service) (*v1.Service, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Service, error) + List(opts api.ListOptions) (*v1.ServiceList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ServiceExpansion +} + +// services implements ServiceInterface +type services struct { + client *CoreClient + ns string +} + +// newServices returns a Services +func newServices(c *CoreClient, namespace string) *services { + return &services{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Create(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Post(). + Namespace(c.ns). + Resource("services"). + Body(service). + Do(). + Into(result) + return +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Update(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + Body(service). + Do(). + Into(result) + return +} + +func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + SubResource("status"). + Body(service). + Do(). + Into(result) + return +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *services) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *services) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *services) Get(name string) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *services) List(opts api.ListOptions) (result *v1.ServiceList, err error) { + result = &v1.ServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *services) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/service_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/service_expansion.go new file mode 100644 index 000000000000..b4300483b846 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/service_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/util/net" +) + +// The ServiceExpansion interface allows manually adding extra methods to the ServiceInterface. +type ServiceExpansion interface { + ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper +} + +// ProxyGet returns a response of the service by calling it through the proxy. +func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { + request := c.client.Get(). + Prefix("proxy"). + Namespace(c.ns). + Resource("services"). + Name(net.JoinSchemeNamePort(scheme, name, port)). + Suffix(path) + for k, v := range params { + request = request.Param(k, v) + } + return request +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/serviceaccount.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/serviceaccount.go new file mode 100644 index 000000000000..eb0b258fa919 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1/serviceaccount.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ServiceAccountsGetter has a method to return a ServiceAccountInterface. +// A group's client should implement this interface. +type ServiceAccountsGetter interface { + ServiceAccounts(namespace string) ServiceAccountInterface +} + +// ServiceAccountInterface has methods to work with ServiceAccount resources. +type ServiceAccountInterface interface { + Create(*v1.ServiceAccount) (*v1.ServiceAccount, error) + Update(*v1.ServiceAccount) (*v1.ServiceAccount, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.ServiceAccount, error) + List(opts api.ListOptions) (*v1.ServiceAccountList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ServiceAccountExpansion +} + +// serviceAccounts implements ServiceAccountInterface +type serviceAccounts struct { + client *CoreClient + ns string +} + +// newServiceAccounts returns a ServiceAccounts +func newServiceAccounts(c *CoreClient, namespace string) *serviceAccounts { + return &serviceAccounts{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Post(). + Namespace(c.ns). + Resource("serviceaccounts"). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Put(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(serviceAccount.Name). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. +func (c *serviceAccounts) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceAccounts) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. +func (c *serviceAccounts) Get(name string) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. +func (c *serviceAccounts) List(opts api.ListOptions) (result *v1.ServiceAccountList, err error) { + result = &v1.ServiceAccountList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceAccounts. +func (c *serviceAccounts) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/daemonset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/daemonset.go new file mode 100644 index 000000000000..ecbece591bd5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/daemonset.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// DaemonSetsGetter has a method to return a DaemonSetInterface. +// A group's client should implement this interface. +type DaemonSetsGetter interface { + DaemonSets(namespace string) DaemonSetInterface +} + +// DaemonSetInterface has methods to work with DaemonSet resources. +type DaemonSetInterface interface { + Create(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + Update(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + UpdateStatus(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.DaemonSet, error) + List(opts api.ListOptions) (*v1beta1.DaemonSetList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + DaemonSetExpansion +} + +// daemonSets implements DaemonSetInterface +type daemonSets struct { + client *ExtensionsClient + ns string +} + +// newDaemonSets returns a DaemonSets +func newDaemonSets(c *ExtensionsClient, namespace string) *daemonSets { + return &daemonSets{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("daemonsets"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + Body(daemonSet). + Do(). + Into(result) + return +} + +func (c *daemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + SubResource("status"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *daemonSets) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *daemonSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts api.ListOptions) (result *v1beta1.DaemonSetList, err error) { + result = &v1beta1.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/deployment.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/deployment.go new file mode 100644 index 000000000000..7cc3ff9d3f42 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/deployment.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) + UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.Deployment, error) + List(opts api.ListOptions) (*v1beta1.DeploymentList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client *ExtensionsClient + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *ExtensionsClient, namespace string) *deployments { + return &deployments{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts api.ListOptions) (result *v1beta1.DeploymentList, err error) { + result = &v1beta1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/deployment_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/deployment_expansion.go new file mode 100644 index 000000000000..0c3ff63678a4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/deployment_expansion.go @@ -0,0 +1,29 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + +// The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. +type DeploymentExpansion interface { + Rollback(*v1beta1.DeploymentRollback) error +} + +// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. +func (c *deployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error { + return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/doc.go new file mode 100644 index 000000000000..ffd3806e8ac5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_2 --input=[api/v1,extensions/v1beta1] + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/extensions_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/extensions_client.go similarity index 93% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/extensions_client.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/extensions_client.go index 220dd69b5cd6..a1f6a982b8c4 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/extensions_client.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/extensions_client.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unversioned +package v1beta1 import ( api "k8s.io/kubernetes/pkg/api" @@ -66,8 +66,8 @@ func (c *ExtensionsClient) Scales(namespace string) ScaleInterface { return newScales(c, namespace) } -func (c *ExtensionsClient) ThirdPartyResources(namespace string) ThirdPartyResourceInterface { - return newThirdPartyResources(c, namespace) +func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface { + return newThirdPartyResources(c) } // NewForConfig creates a new ExtensionsClient for the given config. @@ -114,7 +114,8 @@ func setConfigDefaults(config *restclient.Config) error { config.GroupVersion = ©GroupVersion //} - config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs + if config.QPS == 0 { config.QPS = 5 } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/doc.go new file mode 100644 index 000000000000..bafa0bfe44c5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_2 --input=[api/v1,extensions/v1beta1] + +// Package fake has the automatically generated clients. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_daemonset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_daemonset.go new file mode 100644 index 000000000000..26187506e505 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeDaemonSets implements DaemonSetInterface +type FakeDaemonSets struct { + Fake *FakeExtensions + ns string +} + +var daemonsetsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "daemonsets"} + +func (c *FakeDaemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +func (c *FakeDaemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +func (c *FakeDaemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +func (c *FakeDaemonSets) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) + + return err +} + +func (c *FakeDaemonSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.DaemonSetList{}) + return err +} + +func (c *FakeDaemonSets) Get(name string) (result *v1beta1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +func (c *FakeDaemonSets) List(opts api.ListOptions) (result *v1beta1.DaemonSetList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(daemonsetsResource, c.ns, opts), &v1beta1.DaemonSetList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.DaemonSetList{} + for _, item := range obj.(*v1beta1.DaemonSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *FakeDaemonSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(daemonsetsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_deployment.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_deployment.go new file mode 100644 index 000000000000..dc6e55db6922 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_deployment.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeDeployments implements DeploymentInterface +type FakeDeployments struct { + Fake *FakeExtensions + ns string +} + +var deploymentsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"} + +func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + + return err +} + +func (c *FakeDeployments) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) + return err +} + +func (c *FakeDeployments) Get(name string) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) List(opts api.ListOptions) (result *v1beta1.DeploymentList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(deploymentsResource, c.ns, opts), &v1beta1.DeploymentList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.DeploymentList{} + for _, item := range obj.(*v1beta1.DeploymentList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *FakeDeployments) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(deploymentsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_deployment_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_deployment_expansion.go new file mode 100644 index 000000000000..f154693645de --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_deployment_expansion.go @@ -0,0 +1,33 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/client/testing/core" +) + +func (c *FakeDeployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error { + action := core.CreateActionImpl{} + action.Verb = "create" + action.Resource = deploymentsResource + action.Subresource = "rollback" + action.Object = deploymentRollback + + _, err := c.Fake.Invokes(action, deploymentRollback) + return err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_extensions_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_extensions_client.go new file mode 100644 index 000000000000..702778093294 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_extensions_client.go @@ -0,0 +1,58 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1beta1 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeExtensions struct { + *core.Fake +} + +func (c *FakeExtensions) DaemonSets(namespace string) v1beta1.DaemonSetInterface { + return &FakeDaemonSets{c, namespace} +} + +func (c *FakeExtensions) Deployments(namespace string) v1beta1.DeploymentInterface { + return &FakeDeployments{c, namespace} +} + +func (c *FakeExtensions) HorizontalPodAutoscalers(namespace string) v1beta1.HorizontalPodAutoscalerInterface { + return &FakeHorizontalPodAutoscalers{c, namespace} +} + +func (c *FakeExtensions) Ingresses(namespace string) v1beta1.IngressInterface { + return &FakeIngresses{c, namespace} +} + +func (c *FakeExtensions) Jobs(namespace string) v1beta1.JobInterface { + return &FakeJobs{c, namespace} +} + +func (c *FakeExtensions) ReplicaSets(namespace string) v1beta1.ReplicaSetInterface { + return &FakeReplicaSets{c, namespace} +} + +func (c *FakeExtensions) Scales(namespace string) v1beta1.ScaleInterface { + return &FakeScales{c, namespace} +} + +func (c *FakeExtensions) ThirdPartyResources() v1beta1.ThirdPartyResourceInterface { + return &FakeThirdPartyResources{c} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_horizontalpodautoscaler.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_horizontalpodautoscaler.go new file mode 100644 index 000000000000..8517996527bc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_horizontalpodautoscaler.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type FakeHorizontalPodAutoscalers struct { + Fake *FakeExtensions + ns string +} + +var horizontalpodautoscalersResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "horizontalpodautoscalers"} + +func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1beta1.HorizontalPodAutoscaler) (result *v1beta1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1beta1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1beta1.HorizontalPodAutoscaler) (result *v1beta1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1beta1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1beta1.HorizontalPodAutoscaler) (*v1beta1.HorizontalPodAutoscaler, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v1beta1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v1beta1.HorizontalPodAutoscaler{}) + + return err +} + +func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.HorizontalPodAutoscalerList{}) + return err +} + +func (c *FakeHorizontalPodAutoscalers) Get(name string) (result *v1beta1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v1beta1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) List(opts api.ListOptions) (result *v1beta1.HorizontalPodAutoscalerList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(horizontalpodautoscalersResource, c.ns, opts), &v1beta1.HorizontalPodAutoscalerList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.HorizontalPodAutoscalerList{} + for _, item := range obj.(*v1beta1.HorizontalPodAutoscalerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *FakeHorizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_ingress.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_ingress.go new file mode 100644 index 000000000000..e1c46d4272d6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_ingress.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeIngresses implements IngressInterface +type FakeIngresses struct { + Fake *FakeExtensions + ns string +} + +var ingressesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"} + +func (c *FakeIngresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +func (c *FakeIngresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +func (c *FakeIngresses) UpdateStatus(ingress *v1beta1.Ingress) (*v1beta1.Ingress, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +func (c *FakeIngresses) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) + + return err +} + +func (c *FakeIngresses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(ingressesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) + return err +} + +func (c *FakeIngresses) Get(name string) (result *v1beta1.Ingress, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +func (c *FakeIngresses) List(opts api.ListOptions) (result *v1beta1.IngressList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(ingressesResource, c.ns, opts), &v1beta1.IngressList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.IngressList{} + for _, item := range obj.(*v1beta1.IngressList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *FakeIngresses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(ingressesResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_job.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_job.go new file mode 100644 index 000000000000..e7819d36aa3f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_job.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeJobs implements JobInterface +type FakeJobs struct { + Fake *FakeExtensions + ns string +} + +var jobsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "jobs"} + +func (c *FakeJobs) Create(job *v1beta1.Job) (result *v1beta1.Job, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(jobsResource, c.ns, job), &v1beta1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Job), err +} + +func (c *FakeJobs) Update(job *v1beta1.Job) (result *v1beta1.Job, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(jobsResource, c.ns, job), &v1beta1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Job), err +} + +func (c *FakeJobs) UpdateStatus(job *v1beta1.Job) (*v1beta1.Job, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &v1beta1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Job), err +} + +func (c *FakeJobs) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(jobsResource, c.ns, name), &v1beta1.Job{}) + + return err +} + +func (c *FakeJobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(jobsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.JobList{}) + return err +} + +func (c *FakeJobs) Get(name string) (result *v1beta1.Job, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(jobsResource, c.ns, name), &v1beta1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Job), err +} + +func (c *FakeJobs) List(opts api.ListOptions) (result *v1beta1.JobList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(jobsResource, c.ns, opts), &v1beta1.JobList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.JobList{} + for _, item := range obj.(*v1beta1.JobList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *FakeJobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(jobsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_replicaset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_replicaset.go new file mode 100644 index 000000000000..85aa5b87fa56 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeReplicaSets implements ReplicaSetInterface +type FakeReplicaSets struct { + Fake *FakeExtensions + ns string +} + +var replicasetsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "replicasets"} + +func (c *FakeReplicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +func (c *FakeReplicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +func (c *FakeReplicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +func (c *FakeReplicaSets) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) + + return err +} + +func (c *FakeReplicaSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ReplicaSetList{}) + return err +} + +func (c *FakeReplicaSets) Get(name string) (result *v1beta1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +func (c *FakeReplicaSets) List(opts api.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(replicasetsResource, c.ns, opts), &v1beta1.ReplicaSetList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ReplicaSetList{} + for _, item := range obj.(*v1beta1.ReplicaSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *FakeReplicaSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(replicasetsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_scale.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_scale.go new file mode 100644 index 000000000000..d2cfc5f7b789 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_scale.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +// FakeScales implements ScaleInterface +type FakeScales struct { + Fake *FakeExtensions + ns string +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_scale_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_scale_expansion.go new file mode 100644 index 000000000000..c76f35e7cf5e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_scale_expansion.go @@ -0,0 +1,47 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/client/testing/core" +) + +func (c *FakeScales) Get(kind string, name string) (result *v1beta1.Scale, err error) { + action := core.GetActionImpl{} + action.Verb = "get" + action.Namespace = c.ns + action.Resource = unversioned.GroupVersionResource{Resource: kind} + action.Subresource = "scale" + action.Name = name + obj, err := c.Fake.Invokes(action, &v1beta1.Scale{}) + result = obj.(*v1beta1.Scale) + return +} + +func (c *FakeScales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + action := core.UpdateActionImpl{} + action.Verb = "update" + action.Namespace = c.ns + action.Resource = unversioned.GroupVersionResource{Resource: kind} + action.Subresource = "scale" + action.Object = scale + obj, err := c.Fake.Invokes(action, scale) + result = obj.(*v1beta1.Scale) + return +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/v1beta1/fake/fake_thirdpartyresource.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go similarity index 74% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/v1beta1/fake/fake_thirdpartyresource.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go index 364d1efb9643..49d8a30b5f9a 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/v1beta1/fake/fake_thirdpartyresource.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go @@ -18,6 +18,7 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" @@ -27,12 +28,13 @@ import ( // FakeThirdPartyResources implements ThirdPartyResourceInterface type FakeThirdPartyResources struct { Fake *FakeExtensions - ns string } +var thirdpartyresourcesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "thirdpartyresources"} + func (c *FakeThirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("thirdpartyresources", c.ns, thirdPartyResource), &v1beta1.ThirdPartyResource{}) + Invokes(core.NewRootCreateAction(thirdpartyresourcesResource, thirdPartyResource), &v1beta1.ThirdPartyResource{}) if obj == nil { return nil, err @@ -42,7 +44,7 @@ func (c *FakeThirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyR func (c *FakeThirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("thirdpartyresources", c.ns, thirdPartyResource), &v1beta1.ThirdPartyResource{}) + Invokes(core.NewRootUpdateAction(thirdpartyresourcesResource, thirdPartyResource), &v1beta1.ThirdPartyResource{}) if obj == nil { return nil, err @@ -52,13 +54,13 @@ func (c *FakeThirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyR func (c *FakeThirdPartyResources) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("thirdpartyresources", c.ns, name), &v1beta1.ThirdPartyResource{}) + Invokes(core.NewRootDeleteAction(thirdpartyresourcesResource, name), &v1beta1.ThirdPartyResource{}) return err } func (c *FakeThirdPartyResources) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("thirdpartyresources", c.ns, listOptions) + action := core.NewRootDeleteCollectionAction(thirdpartyresourcesResource, listOptions) _, err := c.Fake.Invokes(action, &v1beta1.ThirdPartyResourceList{}) return err @@ -66,7 +68,7 @@ func (c *FakeThirdPartyResources) DeleteCollection(options *api.DeleteOptions, l func (c *FakeThirdPartyResources) Get(name string) (result *v1beta1.ThirdPartyResource, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("thirdpartyresources", c.ns, name), &v1beta1.ThirdPartyResource{}) + Invokes(core.NewRootGetAction(thirdpartyresourcesResource, name), &v1beta1.ThirdPartyResource{}) if obj == nil { return nil, err @@ -76,7 +78,7 @@ func (c *FakeThirdPartyResources) Get(name string) (result *v1beta1.ThirdPartyRe func (c *FakeThirdPartyResources) List(opts api.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("thirdpartyresources", c.ns, opts), &v1beta1.ThirdPartyResourceList{}) + Invokes(core.NewRootListAction(thirdpartyresourcesResource, opts), &v1beta1.ThirdPartyResourceList{}) if obj == nil { return nil, err @@ -98,6 +100,6 @@ func (c *FakeThirdPartyResources) List(opts api.ListOptions) (result *v1beta1.Th // Watch returns a watch.Interface that watches the requested thirdPartyResources. func (c *FakeThirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("thirdpartyresources", c.ns, opts)) + InvokesWatch(core.NewRootWatchAction(thirdpartyresourcesResource, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/generated_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/generated_expansion.go similarity index 97% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/generated_expansion.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/generated_expansion.go index 0690e0c8c339..97c6a1c0667e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/generated_expansion.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/generated_expansion.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unversioned +package v1beta1 type DaemonSetExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/horizontalpodautoscaler.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/horizontalpodautoscaler.go similarity index 79% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/horizontalpodautoscaler.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/horizontalpodautoscaler.go index 2cffcee46565..93b486b894c3 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/horizontalpodautoscaler.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/horizontalpodautoscaler.go @@ -14,11 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unversioned +package v1beta1 import ( api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" watch "k8s.io/kubernetes/pkg/watch" ) @@ -30,13 +30,13 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(*extensions.HorizontalPodAutoscaler) (*extensions.HorizontalPodAutoscaler, error) - Update(*extensions.HorizontalPodAutoscaler) (*extensions.HorizontalPodAutoscaler, error) - UpdateStatus(*extensions.HorizontalPodAutoscaler) (*extensions.HorizontalPodAutoscaler, error) + Create(*v1beta1.HorizontalPodAutoscaler) (*v1beta1.HorizontalPodAutoscaler, error) + Update(*v1beta1.HorizontalPodAutoscaler) (*v1beta1.HorizontalPodAutoscaler, error) + UpdateStatus(*v1beta1.HorizontalPodAutoscaler) (*v1beta1.HorizontalPodAutoscaler, error) Delete(name string, options *api.DeleteOptions) error DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.HorizontalPodAutoscaler, error) - List(opts api.ListOptions) (*extensions.HorizontalPodAutoscalerList, error) + Get(name string) (*v1beta1.HorizontalPodAutoscaler, error) + List(opts api.ListOptions) (*v1beta1.HorizontalPodAutoscalerList, error) Watch(opts api.ListOptions) (watch.Interface, error) HorizontalPodAutoscalerExpansion } @@ -56,8 +56,8 @@ func newHorizontalPodAutoscalers(c *ExtensionsClient, namespace string) *horizon } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (result *extensions.HorizontalPodAutoscaler, err error) { - result = &extensions.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1beta1.HorizontalPodAutoscaler) (result *v1beta1.HorizontalPodAutoscaler, err error) { + result = &v1beta1.HorizontalPodAutoscaler{} err = c.client.Post(). Namespace(c.ns). Resource("horizontalpodautoscalers"). @@ -68,8 +68,8 @@ func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *extensions.Ho } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (result *extensions.HorizontalPodAutoscaler, err error) { - result = &extensions.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1beta1.HorizontalPodAutoscaler) (result *v1beta1.HorizontalPodAutoscaler, err error) { + result = &v1beta1.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). @@ -80,8 +80,8 @@ func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *extensions.Ho return } -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (result *extensions.HorizontalPodAutoscaler, err error) { - result = &extensions.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1beta1.HorizontalPodAutoscaler) (result *v1beta1.HorizontalPodAutoscaler, err error) { + result = &v1beta1.HorizontalPodAutoscaler{} err = c.client.Put(). Namespace(c.ns). Resource("horizontalpodautoscalers"). @@ -116,8 +116,8 @@ func (c *horizontalPodAutoscalers) DeleteCollection(options *api.DeleteOptions, } // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(name string) (result *extensions.HorizontalPodAutoscaler, err error) { - result = &extensions.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) Get(name string) (result *v1beta1.HorizontalPodAutoscaler, err error) { + result = &v1beta1.HorizontalPodAutoscaler{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). @@ -128,8 +128,8 @@ func (c *horizontalPodAutoscalers) Get(name string) (result *extensions.Horizont } // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts api.ListOptions) (result *extensions.HorizontalPodAutoscalerList, err error) { - result = &extensions.HorizontalPodAutoscalerList{} +func (c *horizontalPodAutoscalers) List(opts api.ListOptions) (result *v1beta1.HorizontalPodAutoscalerList, err error) { + result = &v1beta1.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/ingress.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/ingress.go new file mode 100644 index 000000000000..96b4d04396a4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/ingress.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// IngressesGetter has a method to return a IngressInterface. +// A group's client should implement this interface. +type IngressesGetter interface { + Ingresses(namespace string) IngressInterface +} + +// IngressInterface has methods to work with Ingress resources. +type IngressInterface interface { + Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) + UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.Ingress, error) + List(opts api.ListOptions) (*v1beta1.IngressList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + IngressExpansion +} + +// ingresses implements IngressInterface +type ingresses struct { + client *ExtensionsClient + ns string +} + +// newIngresses returns a Ingresses +func newIngresses(c *ExtensionsClient, namespace string) *ingresses { + return &ingresses{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Post(). + Namespace(c.ns). + Resource("ingresses"). + Body(ingress). + Do(). + Into(result) + return +} + +// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + Body(ingress). + Do(). + Into(result) + return +} + +func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + SubResource("status"). + Body(ingress). + Do(). + Into(result) + return +} + +// Delete takes name of the ingress and deletes it. Returns an error if one occurs. +func (c *ingresses) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ingresses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. +func (c *ingresses) Get(name string) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. +func (c *ingresses) List(opts api.ListOptions) (result *v1beta1.IngressList, err error) { + result = &v1beta1.IngressList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *ingresses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/job.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/job.go new file mode 100644 index 000000000000..c518c5abda69 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/job.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// JobsGetter has a method to return a JobInterface. +// A group's client should implement this interface. +type JobsGetter interface { + Jobs(namespace string) JobInterface +} + +// JobInterface has methods to work with Job resources. +type JobInterface interface { + Create(*v1beta1.Job) (*v1beta1.Job, error) + Update(*v1beta1.Job) (*v1beta1.Job, error) + UpdateStatus(*v1beta1.Job) (*v1beta1.Job, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.Job, error) + List(opts api.ListOptions) (*v1beta1.JobList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + JobExpansion +} + +// jobs implements JobInterface +type jobs struct { + client *ExtensionsClient + ns string +} + +// newJobs returns a Jobs +func newJobs(c *ExtensionsClient, namespace string) *jobs { + return &jobs{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Create(job *v1beta1.Job) (result *v1beta1.Job, err error) { + result = &v1beta1.Job{} + err = c.client.Post(). + Namespace(c.ns). + Resource("jobs"). + Body(job). + Do(). + Into(result) + return +} + +// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Update(job *v1beta1.Job) (result *v1beta1.Job, err error) { + result = &v1beta1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + Body(job). + Do(). + Into(result) + return +} + +func (c *jobs) UpdateStatus(job *v1beta1.Job) (result *v1beta1.Job, err error) { + result = &v1beta1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + SubResource("status"). + Body(job). + Do(). + Into(result) + return +} + +// Delete takes name of the job and deletes it. Returns an error if one occurs. +func (c *jobs) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *jobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the job, and returns the corresponding job object, and an error if there is any. +func (c *jobs) Get(name string) (result *v1beta1.Job, err error) { + result = &v1beta1.Job{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Jobs that match those selectors. +func (c *jobs) List(opts api.ListOptions) (result *v1beta1.JobList, err error) { + result = &v1beta1.JobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *jobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/replicaset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/replicaset.go new file mode 100644 index 000000000000..1822f052c9b8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/replicaset.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + Update(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + UpdateStatus(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.ReplicaSet, error) + List(opts api.ListOptions) (*v1beta1.ReplicaSetList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client *ExtensionsClient + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *ExtensionsClient, namespace string) *replicaSets { + return &replicaSets{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +func (c *replicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts api.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + result = &v1beta1.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/scale.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/scale.go new file mode 100644 index 000000000000..231fe5ccf706 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/scale.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client *ExtensionsClient + ns string +} + +// newScales returns a Scales +func newScales(c *ExtensionsClient, namespace string) *scales { + return &scales{ + client: c, + ns: namespace, + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/scale_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/scale_expansion.go new file mode 100644 index 000000000000..488863d9f89c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/scale_expansion.go @@ -0,0 +1,65 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" +) + +// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. +type ScaleExpansion interface { + Get(kind string, name string) (*v1beta1.Scale, error) + Update(kind string, scale *v1beta1.Scale) (*v1beta1.Scale, error) +} + +// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. +func (c *scales) Get(kind string, name string) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Get(). + Namespace(c.ns). + Resource(resource.Resource). + Name(name). + SubResource("scale"). + Do(). + Into(result) + return +} + +func (c *scales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Put(). + Namespace(scale.Namespace). + Resource(resource.Resource). + Name(scale.Name). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/v1beta1/thirdpartyresource.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/thirdpartyresource.go similarity index 93% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/v1beta1/thirdpartyresource.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/thirdpartyresource.go index cfd128dc3c73..81d73d32e235 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/v1beta1/thirdpartyresource.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1/thirdpartyresource.go @@ -25,7 +25,7 @@ import ( // ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. // A group's client should implement this interface. type ThirdPartyResourcesGetter interface { - ThirdPartyResources(namespace string) ThirdPartyResourceInterface + ThirdPartyResources() ThirdPartyResourceInterface } // ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. @@ -43,14 +43,12 @@ type ThirdPartyResourceInterface interface { // thirdPartyResources implements ThirdPartyResourceInterface type thirdPartyResources struct { client *ExtensionsClient - ns string } // newThirdPartyResources returns a ThirdPartyResources -func newThirdPartyResources(c *ExtensionsClient, namespace string) *thirdPartyResources { +func newThirdPartyResources(c *ExtensionsClient) *thirdPartyResources { return &thirdPartyResources{ client: c, - ns: namespace, } } @@ -58,7 +56,6 @@ func newThirdPartyResources(c *ExtensionsClient, namespace string) *thirdPartyRe func (c *thirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { result = &v1beta1.ThirdPartyResource{} err = c.client.Post(). - Namespace(c.ns). Resource("thirdpartyresources"). Body(thirdPartyResource). Do(). @@ -70,7 +67,6 @@ func (c *thirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResou func (c *thirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { result = &v1beta1.ThirdPartyResource{} err = c.client.Put(). - Namespace(c.ns). Resource("thirdpartyresources"). Name(thirdPartyResource.Name). Body(thirdPartyResource). @@ -82,7 +78,6 @@ func (c *thirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResou // Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. func (c *thirdPartyResources) Delete(name string, options *api.DeleteOptions) error { return c.client.Delete(). - Namespace(c.ns). Resource("thirdpartyresources"). Name(name). Body(options). @@ -93,7 +88,6 @@ func (c *thirdPartyResources) Delete(name string, options *api.DeleteOptions) er // DeleteCollection deletes a collection of objects. func (c *thirdPartyResources) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { return c.client.Delete(). - Namespace(c.ns). Resource("thirdpartyresources"). VersionedParams(&listOptions, api.ParameterCodec). Body(options). @@ -105,7 +99,6 @@ func (c *thirdPartyResources) DeleteCollection(options *api.DeleteOptions, listO func (c *thirdPartyResources) Get(name string) (result *v1beta1.ThirdPartyResource, err error) { result = &v1beta1.ThirdPartyResource{} err = c.client.Get(). - Namespace(c.ns). Resource("thirdpartyresources"). Name(name). Do(). @@ -117,7 +110,6 @@ func (c *thirdPartyResources) Get(name string) (result *v1beta1.ThirdPartyResour func (c *thirdPartyResources) List(opts api.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { result = &v1beta1.ThirdPartyResourceList{} err = c.client.Get(). - Namespace(c.ns). Resource("thirdpartyresources"). VersionedParams(&opts, api.ParameterCodec). Do(). @@ -129,7 +121,6 @@ func (c *thirdPartyResources) List(opts api.ListOptions) (result *v1beta1.ThirdP func (c *thirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { return c.client.Get(). Prefix("watch"). - Namespace(c.ns). Resource("thirdpartyresources"). VersionedParams(&opts, api.ParameterCodec). Watch() diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/clientset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/clientset.go new file mode 100644 index 000000000000..29093e061d73 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/clientset.go @@ -0,0 +1,140 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release_1_3 + +import ( + "github.com/golang/glog" + v1autoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1" + v1batch "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1" + v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1" + v1beta1extensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Core() v1core.CoreInterface + Extensions() v1beta1extensions.ExtensionsInterface + Autoscaling() v1autoscaling.AutoscalingInterface + Batch() v1batch.BatchInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *v1core.CoreClient + *v1beta1extensions.ExtensionsClient + *v1autoscaling.AutoscalingClient + *v1batch.BatchClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Extensions retrieves the ExtensionsClient +func (c *Clientset) Extensions() v1beta1extensions.ExtensionsInterface { + if c == nil { + return nil + } + return c.ExtensionsClient +} + +// Autoscaling retrieves the AutoscalingClient +func (c *Clientset) Autoscaling() v1autoscaling.AutoscalingInterface { + if c == nil { + return nil + } + return c.AutoscalingClient +} + +// Batch retrieves the BatchClient +func (c *Clientset) Batch() v1batch.BatchInterface { + if c == nil { + return nil + } + return c.BatchClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.CoreClient, err = v1core.NewForConfig(&configShallowCopy) + if err != nil { + return &clientset, err + } + clientset.ExtensionsClient, err = v1beta1extensions.NewForConfig(&configShallowCopy) + if err != nil { + return &clientset, err + } + clientset.AutoscalingClient, err = v1autoscaling.NewForConfig(&configShallowCopy) + if err != nil { + return &clientset, err + } + clientset.BatchClient, err = v1batch.NewForConfig(&configShallowCopy) + if err != nil { + return &clientset, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + } + return &clientset, err +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.NewForConfigOrDie(c) + clientset.ExtensionsClient = v1beta1extensions.NewForConfigOrDie(c) + clientset.AutoscalingClient = v1autoscaling.NewForConfigOrDie(c) + clientset.BatchClient = v1batch.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.New(c) + clientset.ExtensionsClient = v1beta1extensions.New(c) + clientset.AutoscalingClient = v1autoscaling.New(c) + clientset.BatchClient = v1batch.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/doc.go new file mode 100644 index 000000000000..721f4723451d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_3 --input=[api/v1,extensions/v1beta1,autoscaling/v1,batch/v1] + +// This package has the automatically generated clientset. +package release_1_3 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/fake/clientset_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/fake/clientset_generated.go new file mode 100644 index 000000000000..4c8913116f50 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/fake/clientset_generated.go @@ -0,0 +1,86 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3" + v1autoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1" + fakev1autoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/fake" + v1batch "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1" + fakev1batch "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/fake" + v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1" + fakev1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake" + v1beta1extensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1" + fakev1beta1extensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// Clientset returns a clientset that will respond with the provided objects +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjects(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return &fakev1core.FakeCore{Fake: &c.Fake} +} + +// Extensions retrieves the ExtensionsClient +func (c *Clientset) Extensions() v1beta1extensions.ExtensionsInterface { + return &fakev1beta1extensions.FakeExtensions{Fake: &c.Fake} +} + +// Autoscaling retrieves the AutoscalingClient +func (c *Clientset) Autoscaling() v1autoscaling.AutoscalingInterface { + return &fakev1autoscaling.FakeAutoscaling{Fake: &c.Fake} +} + +// Batch retrieves the BatchClient +func (c *Clientset) Batch() v1batch.BatchInterface { + return &fakev1batch.FakeBatch{Fake: &c.Fake} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/fake/doc.go new file mode 100644 index 000000000000..4866da9984d3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_3 --input=[api/v1,extensions/v1beta1,autoscaling/v1,batch/v1] + +// This package has the automatically generated fake clientset. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/import_known_versions.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/import_known_versions.go new file mode 100644 index 000000000000..2694d639318e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/import_known_versions.go @@ -0,0 +1,37 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release_1_3 + +// These imports are the API groups the client will support. +import ( + "fmt" + + _ "k8s.io/kubernetes/pkg/api/install" + "k8s.io/kubernetes/pkg/apimachinery/registered" + _ "k8s.io/kubernetes/pkg/apis/authorization/install" + _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" + _ "k8s.io/kubernetes/pkg/apis/batch/install" + _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" + _ "k8s.io/kubernetes/pkg/apis/extensions/install" + _ "k8s.io/kubernetes/pkg/apis/metrics/install" +) + +func init() { + if missingVersions := registered.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { + panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/autoscaling_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/autoscaling_client.go new file mode 100644 index 000000000000..330ce852bc41 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/autoscaling_client.go @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type AutoscalingInterface interface { + GetRESTClient() *restclient.RESTClient + HorizontalPodAutoscalersGetter +} + +// AutoscalingClient is used to interact with features provided by the Autoscaling group. +type AutoscalingClient struct { + *restclient.RESTClient +} + +func (c *AutoscalingClient) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingClient for the given config. +func NewForConfig(c *restclient.Config) (*AutoscalingClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingClient{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *AutoscalingClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingClient for the given RESTClient. +func New(c *restclient.RESTClient) *AutoscalingClient { + return &AutoscalingClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if autoscaling group is not registered, return an error + g, err := registered.Group("autoscaling") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/doc.go new file mode 100644 index 000000000000..2dbfc490135c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_3 --input=[api/v1,extensions/v1beta1,autoscaling/v1,batch/v1] + +// This package has the automatically generated typed clients. +package v1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/fake/doc.go new file mode 100644 index 000000000000..924812c706ec --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_3 --input=[api/v1,extensions/v1beta1,autoscaling/v1,batch/v1] + +// Package fake has the automatically generated clients. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/fake/fake_autoscaling_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/fake/fake_autoscaling_client.go new file mode 100644 index 000000000000..70c665c9347c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/fake/fake_autoscaling_client.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeAutoscaling struct { + *core.Fake +} + +func (c *FakeAutoscaling) HorizontalPodAutoscalers(namespace string) v1.HorizontalPodAutoscalerInterface { + return &FakeHorizontalPodAutoscalers{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAutoscaling) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_horizontalpodautoscaler.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go similarity index 53% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_horizontalpodautoscaler.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go index 71b5cf322de3..497b0f575e82 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_horizontalpodautoscaler.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go @@ -18,7 +18,8 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -26,67 +27,69 @@ import ( // FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type FakeHorizontalPodAutoscalers struct { - Fake *FakeExtensions + Fake *FakeAutoscaling ns string } -func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (result *extensions.HorizontalPodAutoscaler, err error) { +var horizontalpodautoscalersResource = unversioned.GroupVersionResource{Group: "autoscaling", Version: "v1", Resource: "horizontalpodautoscalers"} + +func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("horizontalpodautoscalers", c.ns, horizontalPodAutoscaler), &extensions.HorizontalPodAutoscaler{}) + Invokes(core.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*extensions.HorizontalPodAutoscaler), err + return obj.(*v1.HorizontalPodAutoscaler), err } -func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (result *extensions.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("horizontalpodautoscalers", c.ns, horizontalPodAutoscaler), &extensions.HorizontalPodAutoscaler{}) + Invokes(core.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*extensions.HorizontalPodAutoscaler), err + return obj.(*v1.HorizontalPodAutoscaler), err } -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (*extensions.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) { obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction("horizontalpodautoscalers", "status", c.ns, horizontalPodAutoscaler), &extensions.HorizontalPodAutoscaler{}) + Invokes(core.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*extensions.HorizontalPodAutoscaler), err + return obj.(*v1.HorizontalPodAutoscaler), err } func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("horizontalpodautoscalers", c.ns, name), &extensions.HorizontalPodAutoscaler{}) + Invokes(core.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v1.HorizontalPodAutoscaler{}) return err } func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("horizontalpodautoscalers", c.ns, listOptions) + action := core.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) - _, err := c.Fake.Invokes(action, &extensions.HorizontalPodAutoscalerList{}) + _, err := c.Fake.Invokes(action, &v1.HorizontalPodAutoscalerList{}) return err } -func (c *FakeHorizontalPodAutoscalers) Get(name string) (result *extensions.HorizontalPodAutoscaler, err error) { +func (c *FakeHorizontalPodAutoscalers) Get(name string) (result *v1.HorizontalPodAutoscaler, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("horizontalpodautoscalers", c.ns, name), &extensions.HorizontalPodAutoscaler{}) + Invokes(core.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v1.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*extensions.HorizontalPodAutoscaler), err + return obj.(*v1.HorizontalPodAutoscaler), err } -func (c *FakeHorizontalPodAutoscalers) List(opts api.ListOptions) (result *extensions.HorizontalPodAutoscalerList, err error) { +func (c *FakeHorizontalPodAutoscalers) List(opts api.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("horizontalpodautoscalers", c.ns, opts), &extensions.HorizontalPodAutoscalerList{}) + Invokes(core.NewListAction(horizontalpodautoscalersResource, c.ns, opts), &v1.HorizontalPodAutoscalerList{}) if obj == nil { return nil, err @@ -96,8 +99,8 @@ func (c *FakeHorizontalPodAutoscalers) List(opts api.ListOptions) (result *exten if label == nil { label = labels.Everything() } - list := &extensions.HorizontalPodAutoscalerList{} - for _, item := range obj.(*extensions.HorizontalPodAutoscalerList).Items { + list := &v1.HorizontalPodAutoscalerList{} + for _, item := range obj.(*v1.HorizontalPodAutoscalerList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -108,6 +111,6 @@ func (c *FakeHorizontalPodAutoscalers) List(opts api.ListOptions) (result *exten // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *FakeHorizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("horizontalpodautoscalers", c.ns, opts)) + InvokesWatch(core.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/generated_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/generated_expansion.go new file mode 100644 index 000000000000..444cc29aeec4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/horizontalpodautoscaler.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/horizontalpodautoscaler.go new file mode 100644 index 000000000000..3b9f61e0a4d2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + Update(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + UpdateStatus(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.HorizontalPodAutoscaler, error) + List(opts api.ListOptions) (*v1.HorizontalPodAutoscalerList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client *AutoscalingClient + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingClient, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts api.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { + result = &v1.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/batch_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/batch_client.go new file mode 100644 index 000000000000..0e566ed3aa3a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/batch_client.go @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type BatchInterface interface { + GetRESTClient() *restclient.RESTClient + JobsGetter +} + +// BatchClient is used to interact with features provided by the Batch group. +type BatchClient struct { + *restclient.RESTClient +} + +func (c *BatchClient) Jobs(namespace string) JobInterface { + return newJobs(c, namespace) +} + +// NewForConfig creates a new BatchClient for the given config. +func NewForConfig(c *restclient.Config) (*BatchClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchClient{client}, nil +} + +// NewForConfigOrDie creates a new BatchClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *BatchClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BatchClient for the given RESTClient. +func New(c *restclient.RESTClient) *BatchClient { + return &BatchClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if batch group is not registered, return an error + g, err := registered.Group("batch") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BatchClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/doc.go new file mode 100644 index 000000000000..2dbfc490135c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_3 --input=[api/v1,extensions/v1beta1,autoscaling/v1,batch/v1] + +// This package has the automatically generated typed clients. +package v1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/fake/doc.go new file mode 100644 index 000000000000..924812c706ec --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_3 --input=[api/v1,extensions/v1beta1,autoscaling/v1,batch/v1] + +// Package fake has the automatically generated clients. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/fake/fake_batch_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/fake/fake_batch_client.go new file mode 100644 index 000000000000..546256aa20b2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/fake/fake_batch_client.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeBatch struct { + *core.Fake +} + +func (c *FakeBatch) Jobs(namespace string) v1.JobInterface { + return &FakeJobs{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeBatch) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_job.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/fake/fake_job.go similarity index 56% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_job.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/fake/fake_job.go index c1875c006bed..163745bfa7e7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake/fake_job.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/fake/fake_job.go @@ -18,7 +18,8 @@ package fake import ( api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/apis/batch/v1" core "k8s.io/kubernetes/pkg/client/testing/core" labels "k8s.io/kubernetes/pkg/labels" watch "k8s.io/kubernetes/pkg/watch" @@ -26,67 +27,69 @@ import ( // FakeJobs implements JobInterface type FakeJobs struct { - Fake *FakeExtensions + Fake *FakeBatch ns string } -func (c *FakeJobs) Create(job *extensions.Job) (result *extensions.Job, err error) { +var jobsResource = unversioned.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"} + +func (c *FakeJobs) Create(job *v1.Job) (result *v1.Job, err error) { obj, err := c.Fake. - Invokes(core.NewCreateAction("jobs", c.ns, job), &extensions.Job{}) + Invokes(core.NewCreateAction(jobsResource, c.ns, job), &v1.Job{}) if obj == nil { return nil, err } - return obj.(*extensions.Job), err + return obj.(*v1.Job), err } -func (c *FakeJobs) Update(job *extensions.Job) (result *extensions.Job, err error) { +func (c *FakeJobs) Update(job *v1.Job) (result *v1.Job, err error) { obj, err := c.Fake. - Invokes(core.NewUpdateAction("jobs", c.ns, job), &extensions.Job{}) + Invokes(core.NewUpdateAction(jobsResource, c.ns, job), &v1.Job{}) if obj == nil { return nil, err } - return obj.(*extensions.Job), err + return obj.(*v1.Job), err } -func (c *FakeJobs) UpdateStatus(job *extensions.Job) (*extensions.Job, error) { +func (c *FakeJobs) UpdateStatus(job *v1.Job) (*v1.Job, error) { obj, err := c.Fake. - Invokes(core.NewUpdateSubresourceAction("jobs", "status", c.ns, job), &extensions.Job{}) + Invokes(core.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &v1.Job{}) if obj == nil { return nil, err } - return obj.(*extensions.Job), err + return obj.(*v1.Job), err } func (c *FakeJobs) Delete(name string, options *api.DeleteOptions) error { _, err := c.Fake. - Invokes(core.NewDeleteAction("jobs", c.ns, name), &extensions.Job{}) + Invokes(core.NewDeleteAction(jobsResource, c.ns, name), &v1.Job{}) return err } func (c *FakeJobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := core.NewDeleteCollectionAction("jobs", c.ns, listOptions) + action := core.NewDeleteCollectionAction(jobsResource, c.ns, listOptions) - _, err := c.Fake.Invokes(action, &extensions.JobList{}) + _, err := c.Fake.Invokes(action, &v1.JobList{}) return err } -func (c *FakeJobs) Get(name string) (result *extensions.Job, err error) { +func (c *FakeJobs) Get(name string) (result *v1.Job, err error) { obj, err := c.Fake. - Invokes(core.NewGetAction("jobs", c.ns, name), &extensions.Job{}) + Invokes(core.NewGetAction(jobsResource, c.ns, name), &v1.Job{}) if obj == nil { return nil, err } - return obj.(*extensions.Job), err + return obj.(*v1.Job), err } -func (c *FakeJobs) List(opts api.ListOptions) (result *extensions.JobList, err error) { +func (c *FakeJobs) List(opts api.ListOptions) (result *v1.JobList, err error) { obj, err := c.Fake. - Invokes(core.NewListAction("jobs", c.ns, opts), &extensions.JobList{}) + Invokes(core.NewListAction(jobsResource, c.ns, opts), &v1.JobList{}) if obj == nil { return nil, err @@ -96,8 +99,8 @@ func (c *FakeJobs) List(opts api.ListOptions) (result *extensions.JobList, err e if label == nil { label = labels.Everything() } - list := &extensions.JobList{} - for _, item := range obj.(*extensions.JobList).Items { + list := &v1.JobList{} + for _, item := range obj.(*v1.JobList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -108,6 +111,6 @@ func (c *FakeJobs) List(opts api.ListOptions) (result *extensions.JobList, err e // Watch returns a watch.Interface that watches the requested jobs. func (c *FakeJobs) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(core.NewWatchAction("jobs", c.ns, opts)) + InvokesWatch(core.NewWatchAction(jobsResource, c.ns, opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/generated_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/generated_expansion.go new file mode 100644 index 000000000000..40daeb1c7b8e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type JobExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/job.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/job.go new file mode 100644 index 000000000000..f369642836d9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/batch/v1/job.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/apis/batch/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// JobsGetter has a method to return a JobInterface. +// A group's client should implement this interface. +type JobsGetter interface { + Jobs(namespace string) JobInterface +} + +// JobInterface has methods to work with Job resources. +type JobInterface interface { + Create(*v1.Job) (*v1.Job, error) + Update(*v1.Job) (*v1.Job, error) + UpdateStatus(*v1.Job) (*v1.Job, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Job, error) + List(opts api.ListOptions) (*v1.JobList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + JobExpansion +} + +// jobs implements JobInterface +type jobs struct { + client *BatchClient + ns string +} + +// newJobs returns a Jobs +func newJobs(c *BatchClient, namespace string) *jobs { + return &jobs{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Create(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Post(). + Namespace(c.ns). + Resource("jobs"). + Body(job). + Do(). + Into(result) + return +} + +// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Update(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + Body(job). + Do(). + Into(result) + return +} + +func (c *jobs) UpdateStatus(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + SubResource("status"). + Body(job). + Do(). + Into(result) + return +} + +// Delete takes name of the job and deletes it. Returns an error if one occurs. +func (c *jobs) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *jobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the job, and returns the corresponding job object, and an error if there is any. +func (c *jobs) Get(name string) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Jobs that match those selectors. +func (c *jobs) List(opts api.ListOptions) (result *v1.JobList, err error) { + result = &v1.JobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *jobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/componentstatus.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/componentstatus.go new file mode 100644 index 000000000000..23363f530d4a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/componentstatus.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ComponentStatusesGetter has a method to return a ComponentStatusInterface. +// A group's client should implement this interface. +type ComponentStatusesGetter interface { + ComponentStatuses() ComponentStatusInterface +} + +// ComponentStatusInterface has methods to work with ComponentStatus resources. +type ComponentStatusInterface interface { + Create(*v1.ComponentStatus) (*v1.ComponentStatus, error) + Update(*v1.ComponentStatus) (*v1.ComponentStatus, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.ComponentStatus, error) + List(opts api.ListOptions) (*v1.ComponentStatusList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ComponentStatusExpansion +} + +// componentStatuses implements ComponentStatusInterface +type componentStatuses struct { + client *CoreClient +} + +// newComponentStatuses returns a ComponentStatuses +func newComponentStatuses(c *CoreClient) *componentStatuses { + return &componentStatuses{ + client: c, + } +} + +// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Post(). + Resource("componentstatuses"). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Put(). + Resource("componentstatuses"). + Name(componentStatus.Name). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. +func (c *componentStatuses) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("componentstatuses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *componentStatuses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("componentstatuses"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. +func (c *componentStatuses) Get(name string) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Get(). + Resource("componentstatuses"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. +func (c *componentStatuses) List(opts api.ListOptions) (result *v1.ComponentStatusList, err error) { + result = &v1.ComponentStatusList{} + err = c.client.Get(). + Resource("componentstatuses"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested componentStatuses. +func (c *componentStatuses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("componentstatuses"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/configmap.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/configmap.go new file mode 100644 index 000000000000..4fbb31328a42 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/configmap.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ConfigMapsGetter has a method to return a ConfigMapInterface. +// A group's client should implement this interface. +type ConfigMapsGetter interface { + ConfigMaps(namespace string) ConfigMapInterface +} + +// ConfigMapInterface has methods to work with ConfigMap resources. +type ConfigMapInterface interface { + Create(*v1.ConfigMap) (*v1.ConfigMap, error) + Update(*v1.ConfigMap) (*v1.ConfigMap, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.ConfigMap, error) + List(opts api.ListOptions) (*v1.ConfigMapList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ConfigMapExpansion +} + +// configMaps implements ConfigMapInterface +type configMaps struct { + client *CoreClient + ns string +} + +// newConfigMaps returns a ConfigMaps +func newConfigMaps(c *CoreClient, namespace string) *configMaps { + return &configMaps{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Post(). + Namespace(c.ns). + Resource("configmaps"). + Body(configMap). + Do(). + Into(result) + return +} + +// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Put(). + Namespace(c.ns). + Resource("configmaps"). + Name(configMap.Name). + Body(configMap). + Do(). + Into(result) + return +} + +// Delete takes name of the configMap and deletes it. Returns an error if one occurs. +func (c *configMaps) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *configMaps) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. +func (c *configMaps) Get(name string) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. +func (c *configMaps) List(opts api.ListOptions) (result *v1.ConfigMapList, err error) { + result = &v1.ConfigMapList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested configMaps. +func (c *configMaps) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/core_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/core_client.go new file mode 100644 index 000000000000..c670a0f2a1e1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/core_client.go @@ -0,0 +1,171 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + ComponentStatusesGetter + ConfigMapsGetter + EndpointsGetter + EventsGetter + LimitRangesGetter + NamespacesGetter + NodesGetter + PersistentVolumesGetter + PodsGetter + PodTemplatesGetter + ReplicationControllersGetter + ResourceQuotasGetter + SecretsGetter + ServicesGetter + ServiceAccountsGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) ComponentStatuses() ComponentStatusInterface { + return newComponentStatuses(c) +} + +func (c *CoreClient) ConfigMaps(namespace string) ConfigMapInterface { + return newConfigMaps(c, namespace) +} + +func (c *CoreClient) Endpoints(namespace string) EndpointsInterface { + return newEndpoints(c, namespace) +} + +func (c *CoreClient) Events(namespace string) EventInterface { + return newEvents(c, namespace) +} + +func (c *CoreClient) LimitRanges(namespace string) LimitRangeInterface { + return newLimitRanges(c, namespace) +} + +func (c *CoreClient) Namespaces() NamespaceInterface { + return newNamespaces(c) +} + +func (c *CoreClient) Nodes() NodeInterface { + return newNodes(c) +} + +func (c *CoreClient) PersistentVolumes() PersistentVolumeInterface { + return newPersistentVolumes(c) +} + +func (c *CoreClient) Pods(namespace string) PodInterface { + return newPods(c, namespace) +} + +func (c *CoreClient) PodTemplates(namespace string) PodTemplateInterface { + return newPodTemplates(c, namespace) +} + +func (c *CoreClient) ReplicationControllers(namespace string) ReplicationControllerInterface { + return newReplicationControllers(c, namespace) +} + +func (c *CoreClient) ResourceQuotas(namespace string) ResourceQuotaInterface { + return newResourceQuotas(c, namespace) +} + +func (c *CoreClient) Secrets(namespace string) SecretInterface { + return newSecrets(c, namespace) +} + +func (c *CoreClient) Services(namespace string) ServiceInterface { + return newServices(c, namespace) +} + +func (c *CoreClient) ServiceAccounts(namespace string) ServiceAccountInterface { + return newServiceAccounts(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/api" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/doc.go new file mode 100644 index 000000000000..2dbfc490135c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_3 --input=[api/v1,extensions/v1beta1,autoscaling/v1,batch/v1] + +// This package has the automatically generated typed clients. +package v1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/endpoints.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/endpoints.go new file mode 100644 index 000000000000..409b044c72d6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/endpoints.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// EndpointsGetter has a method to return a EndpointsInterface. +// A group's client should implement this interface. +type EndpointsGetter interface { + Endpoints(namespace string) EndpointsInterface +} + +// EndpointsInterface has methods to work with Endpoints resources. +type EndpointsInterface interface { + Create(*v1.Endpoints) (*v1.Endpoints, error) + Update(*v1.Endpoints) (*v1.Endpoints, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Endpoints, error) + List(opts api.ListOptions) (*v1.EndpointsList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + EndpointsExpansion +} + +// endpoints implements EndpointsInterface +type endpoints struct { + client *CoreClient + ns string +} + +// newEndpoints returns a Endpoints +func newEndpoints(c *CoreClient, namespace string) *endpoints { + return &endpoints{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpoints"). + Body(endpoints). + Do(). + Into(result) + return +} + +// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpoints"). + Name(endpoints.Name). + Body(endpoints). + Do(). + Into(result) + return +} + +// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. +func (c *endpoints) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpoints) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. +func (c *endpoints) Get(name string) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Endpoints that match those selectors. +func (c *endpoints) List(opts api.ListOptions) (result *v1.EndpointsList, err error) { + result = &v1.EndpointsList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpoints. +func (c *endpoints) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/event.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/event.go new file mode 100644 index 000000000000..92266c98b619 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/event.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// EventsGetter has a method to return a EventInterface. +// A group's client should implement this interface. +type EventsGetter interface { + Events(namespace string) EventInterface +} + +// EventInterface has methods to work with Event resources. +type EventInterface interface { + Create(*v1.Event) (*v1.Event, error) + Update(*v1.Event) (*v1.Event, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Event, error) + List(opts api.ListOptions) (*v1.EventList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + EventExpansion +} + +// events implements EventInterface +type events struct { + client *CoreClient + ns string +} + +// newEvents returns a Events +func newEvents(c *CoreClient, namespace string) *events { + return &events{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Create(event *v1.Event) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Post(). + Namespace(c.ns). + Resource("events"). + Body(event). + Do(). + Into(result) + return +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Update(event *v1.Event) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Put(). + Namespace(c.ns). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *events) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *events) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *events) Get(name string) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *events) List(opts api.ListOptions) (result *v1.EventList, err error) { + result = &v1.EventList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *events) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/event_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/event_expansion.go new file mode 100644 index 000000000000..971c850c7a37 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/event_expansion.go @@ -0,0 +1,158 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/runtime" +) + +// The EventExpansion interface allows manually adding extra methods to the EventInterface. +type EventExpansion interface { + // CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace. + CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) + // UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace. + UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) + Patch(event *v1.Event, data []byte) (*v1.Event, error) + // Search finds events about the specified object + Search(objOrRef runtime.Object) (*v1.EventList, error) + // Returns the appropriate field selector based on the API version being used to communicate with the server. + // The returned field selector can be used with List and Watch to filter desired events. + GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector +} + +// CreateWithEventNamespace makes a new event. Returns the copy of the event the server returns, +// or an error. The namespace to create the event within is deduced from the +// event; it must either match this event client's namespace, or this event +// client must have been created with the "" namespace. +func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1.Event{} + err := e.client.Post(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Body(event). + Do(). + Into(result) + return result, err +} + +// UpdateWithEventNamespace modifies an existing event. It returns the copy of the event that the server returns, +// or an error. The namespace and key to update the event within is deduced from the event. The +// namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. Update also requires the ResourceVersion to be set in the event +// object. +func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + result := &v1.Event{} + err := e.client.Put(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return result, err +} + +// Patch modifies an existing event. It returns the copy of the event that the server returns, or an +// error. The namespace and name of the target event is deduced from the incompleteEvent. The +// namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. +func (e *events) Patch(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) { + result := &v1.Event{} + err := e.client.Patch(api.StrategicMergePatchType). + NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). + Resource("events"). + Name(incompleteEvent.Name). + Body(data). + Do(). + Into(result) + return result, err +} + +// Search finds events about the specified object. The namespace of the +// object must match this event's client namespace unless the event client +// was made with the "" namespace. +func (e *events) Search(objOrRef runtime.Object) (*v1.EventList, error) { + ref, err := api.GetReference(objOrRef) + if err != nil { + return nil, err + } + if e.ns != "" && ref.Namespace != e.ns { + return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) + } + stringRefKind := string(ref.Kind) + var refKind *string + if stringRefKind != "" { + refKind = &stringRefKind + } + stringRefUID := string(ref.UID) + var refUID *string + if stringRefUID != "" { + refUID = &stringRefUID + } + fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) + return e.List(api.ListOptions{FieldSelector: fieldSelector}) +} + +// Returns the appropriate field selector based on the API version being used to communicate with the server. +// The returned field selector can be used with List and Watch to filter desired events. +func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { + apiVersion := e.client.APIVersion().String() + field := fields.Set{} + if involvedObjectName != nil { + field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName + } + if involvedObjectNamespace != nil { + field["involvedObject.namespace"] = *involvedObjectNamespace + } + if involvedObjectKind != nil { + field["involvedObject.kind"] = *involvedObjectKind + } + if involvedObjectUID != nil { + field["involvedObject.uid"] = *involvedObjectUID + } + return field.AsSelector() +} + +// Returns the appropriate field label to use for name of the involved object as per the given API version. +func GetInvolvedObjectNameFieldLabel(version string) string { + return "involvedObject.name" +} + +// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset. +type EventSinkImpl struct { + Interface EventInterface +} + +func (e *EventSinkImpl) Create(event *v1.Event) (*v1.Event, error) { + return e.Interface.CreateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Update(event *v1.Event) (*v1.Event, error) { + return e.Interface.UpdateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Patch(event *v1.Event, data []byte) (*v1.Event, error) { + return e.Interface.Patch(event, data) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/doc.go new file mode 100644 index 000000000000..924812c706ec --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_3 --input=[api/v1,extensions/v1beta1,autoscaling/v1,batch/v1] + +// Package fake has the automatically generated clients. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_componentstatus.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_componentstatus.go new file mode 100644 index 000000000000..6a86ac56902e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_componentstatus.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeComponentStatuses implements ComponentStatusInterface +type FakeComponentStatuses struct { + Fake *FakeCore +} + +var componentstatusesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "componentstatuses"} + +func (c *FakeComponentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ComponentStatus), err +} + +func (c *FakeComponentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ComponentStatus), err +} + +func (c *FakeComponentStatuses) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(componentstatusesResource, name), &v1.ComponentStatus{}) + return err +} + +func (c *FakeComponentStatuses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(componentstatusesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ComponentStatusList{}) + return err +} + +func (c *FakeComponentStatuses) Get(name string) (result *v1.ComponentStatus, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(componentstatusesResource, name), &v1.ComponentStatus{}) + if obj == nil { + return nil, err + } + return obj.(*v1.ComponentStatus), err +} + +func (c *FakeComponentStatuses) List(opts api.ListOptions) (result *v1.ComponentStatusList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(componentstatusesResource, opts), &v1.ComponentStatusList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ComponentStatusList{} + for _, item := range obj.(*v1.ComponentStatusList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested componentStatuses. +func (c *FakeComponentStatuses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(componentstatusesResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_configmap.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_configmap.go new file mode 100644 index 000000000000..81dcc633add5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_configmap.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeConfigMaps implements ConfigMapInterface +type FakeConfigMaps struct { + Fake *FakeCore + ns string +} + +var configmapsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} + +func (c *FakeConfigMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ConfigMap), err +} + +func (c *FakeConfigMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ConfigMap), err +} + +func (c *FakeConfigMaps) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(configmapsResource, c.ns, name), &v1.ConfigMap{}) + + return err +} + +func (c *FakeConfigMaps) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(configmapsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ConfigMapList{}) + return err +} + +func (c *FakeConfigMaps) Get(name string) (result *v1.ConfigMap, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(configmapsResource, c.ns, name), &v1.ConfigMap{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ConfigMap), err +} + +func (c *FakeConfigMaps) List(opts api.ListOptions) (result *v1.ConfigMapList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(configmapsResource, c.ns, opts), &v1.ConfigMapList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ConfigMapList{} + for _, item := range obj.(*v1.ConfigMapList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested configMaps. +func (c *FakeConfigMaps) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(configmapsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_core_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 000000000000..6ffe30d03a72 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,93 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) ComponentStatuses() v1.ComponentStatusInterface { + return &FakeComponentStatuses{c} +} + +func (c *FakeCore) ConfigMaps(namespace string) v1.ConfigMapInterface { + return &FakeConfigMaps{c, namespace} +} + +func (c *FakeCore) Endpoints(namespace string) v1.EndpointsInterface { + return &FakeEndpoints{c, namespace} +} + +func (c *FakeCore) Events(namespace string) v1.EventInterface { + return &FakeEvents{c, namespace} +} + +func (c *FakeCore) LimitRanges(namespace string) v1.LimitRangeInterface { + return &FakeLimitRanges{c, namespace} +} + +func (c *FakeCore) Namespaces() v1.NamespaceInterface { + return &FakeNamespaces{c} +} + +func (c *FakeCore) Nodes() v1.NodeInterface { + return &FakeNodes{c} +} + +func (c *FakeCore) PersistentVolumes() v1.PersistentVolumeInterface { + return &FakePersistentVolumes{c} +} + +func (c *FakeCore) Pods(namespace string) v1.PodInterface { + return &FakePods{c, namespace} +} + +func (c *FakeCore) PodTemplates(namespace string) v1.PodTemplateInterface { + return &FakePodTemplates{c, namespace} +} + +func (c *FakeCore) ReplicationControllers(namespace string) v1.ReplicationControllerInterface { + return &FakeReplicationControllers{c, namespace} +} + +func (c *FakeCore) ResourceQuotas(namespace string) v1.ResourceQuotaInterface { + return &FakeResourceQuotas{c, namespace} +} + +func (c *FakeCore) Secrets(namespace string) v1.SecretInterface { + return &FakeSecrets{c, namespace} +} + +func (c *FakeCore) Services(namespace string) v1.ServiceInterface { + return &FakeServices{c, namespace} +} + +func (c *FakeCore) ServiceAccounts(namespace string) v1.ServiceAccountInterface { + return &FakeServiceAccounts{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_endpoints.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_endpoints.go new file mode 100644 index 000000000000..f5c570ffd21c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_endpoints.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeEndpoints implements EndpointsInterface +type FakeEndpoints struct { + Fake *FakeCore + ns string +} + +var endpointsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "endpoints"} + +func (c *FakeEndpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} + +func (c *FakeEndpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} + +func (c *FakeEndpoints) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(endpointsResource, c.ns, name), &v1.Endpoints{}) + + return err +} + +func (c *FakeEndpoints) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(endpointsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.EndpointsList{}) + return err +} + +func (c *FakeEndpoints) Get(name string) (result *v1.Endpoints, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(endpointsResource, c.ns, name), &v1.Endpoints{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Endpoints), err +} + +func (c *FakeEndpoints) List(opts api.ListOptions) (result *v1.EndpointsList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(endpointsResource, c.ns, opts), &v1.EndpointsList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.EndpointsList{} + for _, item := range obj.(*v1.EndpointsList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested endpoints. +func (c *FakeEndpoints) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(endpointsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_event.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_event.go new file mode 100644 index 000000000000..5dd7e08b8d26 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_event.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeEvents implements EventInterface +type FakeEvents struct { + Fake *FakeCore + ns string +} + +var eventsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "events"} + +func (c *FakeEvents) Create(event *v1.Event) (result *v1.Event, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(eventsResource, c.ns, event), &v1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Event), err +} + +func (c *FakeEvents) Update(event *v1.Event) (result *v1.Event, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Event), err +} + +func (c *FakeEvents) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(eventsResource, c.ns, name), &v1.Event{}) + + return err +} + +func (c *FakeEvents) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(eventsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.EventList{}) + return err +} + +func (c *FakeEvents) Get(name string) (result *v1.Event, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(eventsResource, c.ns, name), &v1.Event{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Event), err +} + +func (c *FakeEvents) List(opts api.ListOptions) (result *v1.EventList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(eventsResource, c.ns, opts), &v1.EventList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.EventList{} + for _, item := range obj.(*v1.EventList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *FakeEvents) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(eventsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_event_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_event_expansion.go new file mode 100644 index 000000000000..173032b60cb7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_event_expansion.go @@ -0,0 +1,89 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/runtime" +) + +func (c *FakeEvents) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + action := core.NewRootCreateAction(eventsResource, event) + if c.ns != "" { + action = core.NewCreateAction(eventsResource, c.ns, event) + } + obj, err := c.Fake.Invokes(action, event) + if obj == nil { + return nil, err + } + + return obj.(*v1.Event), err +} + +// Update replaces an existing event. Returns the copy of the event the server returns, or an error. +func (c *FakeEvents) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + action := core.NewRootUpdateAction(eventsResource, event) + if c.ns != "" { + action = core.NewUpdateAction(eventsResource, c.ns, event) + } + obj, err := c.Fake.Invokes(action, event) + if obj == nil { + return nil, err + } + + return obj.(*v1.Event), err +} + +// Patch patches an existing event. Returns the copy of the event the server returns, or an error. +func (c *FakeEvents) Patch(event *v1.Event, data []byte) (*v1.Event, error) { + action := core.NewRootPatchAction(eventsResource, event) + if c.ns != "" { + action = core.NewPatchAction(eventsResource, c.ns, event) + } + obj, err := c.Fake.Invokes(action, event) + if obj == nil { + return nil, err + } + + return obj.(*v1.Event), err +} + +// Search returns a list of events matching the specified object. +func (c *FakeEvents) Search(objOrRef runtime.Object) (*v1.EventList, error) { + action := core.NewRootListAction(eventsResource, api.ListOptions{}) + if c.ns != "" { + action = core.NewListAction(eventsResource, c.ns, api.ListOptions{}) + } + obj, err := c.Fake.Invokes(action, &v1.EventList{}) + if obj == nil { + return nil, err + } + + return obj.(*v1.EventList), err +} + +func (c *FakeEvents) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { + action := core.GenericActionImpl{} + action.Verb = "get-field-selector" + action.Resource = eventsResource + + c.Fake.Invokes(action, nil) + return fields.Everything() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_limitrange.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_limitrange.go new file mode 100644 index 000000000000..f5755a87b668 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_limitrange.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeLimitRanges implements LimitRangeInterface +type FakeLimitRanges struct { + Fake *FakeCore + ns string +} + +var limitrangesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "limitranges"} + +func (c *FakeLimitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.LimitRange), err +} + +func (c *FakeLimitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.LimitRange), err +} + +func (c *FakeLimitRanges) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(limitrangesResource, c.ns, name), &v1.LimitRange{}) + + return err +} + +func (c *FakeLimitRanges) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(limitrangesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.LimitRangeList{}) + return err +} + +func (c *FakeLimitRanges) Get(name string) (result *v1.LimitRange, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(limitrangesResource, c.ns, name), &v1.LimitRange{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.LimitRange), err +} + +func (c *FakeLimitRanges) List(opts api.ListOptions) (result *v1.LimitRangeList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(limitrangesResource, c.ns, opts), &v1.LimitRangeList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.LimitRangeList{} + for _, item := range obj.(*v1.LimitRangeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested limitRanges. +func (c *FakeLimitRanges) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(limitrangesResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_namespace.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_namespace.go new file mode 100644 index 000000000000..b81ca5c52ec9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_namespace.go @@ -0,0 +1,108 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeNamespaces implements NamespaceInterface +type FakeNamespaces struct { + Fake *FakeCore +} + +var namespacesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} + +func (c *FakeNamespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(namespacesResource, namespace), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +func (c *FakeNamespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(namespacesResource, namespace), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +func (c *FakeNamespaces) UpdateStatus(namespace *v1.Namespace) (*v1.Namespace, error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +func (c *FakeNamespaces) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(namespacesResource, name), &v1.Namespace{}) + return err +} + +func (c *FakeNamespaces) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(namespacesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.NamespaceList{}) + return err +} + +func (c *FakeNamespaces) Get(name string) (result *v1.Namespace, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(namespacesResource, name), &v1.Namespace{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Namespace), err +} + +func (c *FakeNamespaces) List(opts api.ListOptions) (result *v1.NamespaceList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(namespacesResource, opts), &v1.NamespaceList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.NamespaceList{} + for _, item := range obj.(*v1.NamespaceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested namespaces. +func (c *FakeNamespaces) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(namespacesResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_namespace_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_namespace_expansion.go new file mode 100644 index 000000000000..a4416ffcf473 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_namespace_expansion.go @@ -0,0 +1,37 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/testing/core" +) + +func (c *FakeNamespaces) Finalize(namespace *v1.Namespace) (*v1.Namespace, error) { + action := core.CreateActionImpl{} + action.Verb = "create" + action.Resource = namespacesResource + action.Subresource = "finalize" + action.Object = namespace + + obj, err := c.Fake.Invokes(action, namespace) + if obj == nil { + return nil, err + } + + return obj.(*v1.Namespace), err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_node.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_node.go new file mode 100644 index 000000000000..320f80364fb4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_node.go @@ -0,0 +1,108 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeNodes implements NodeInterface +type FakeNodes struct { + Fake *FakeCore +} + +var nodesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"} + +func (c *FakeNodes) Create(node *v1.Node) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(nodesResource, node), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +func (c *FakeNodes) Update(node *v1.Node) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(nodesResource, node), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +func (c *FakeNodes) UpdateStatus(node *v1.Node) (*v1.Node, error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateSubresourceAction(nodesResource, "status", node), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +func (c *FakeNodes) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(nodesResource, name), &v1.Node{}) + return err +} + +func (c *FakeNodes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(nodesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.NodeList{}) + return err +} + +func (c *FakeNodes) Get(name string) (result *v1.Node, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(nodesResource, name), &v1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Node), err +} + +func (c *FakeNodes) List(opts api.ListOptions) (result *v1.NodeList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(nodesResource, opts), &v1.NodeList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.NodeList{} + for _, item := range obj.(*v1.NodeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *FakeNodes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(nodesResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_persistentvolume.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_persistentvolume.go new file mode 100644 index 000000000000..0aa61b830d8e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_persistentvolume.go @@ -0,0 +1,108 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakePersistentVolumes implements PersistentVolumeInterface +type FakePersistentVolumes struct { + Fake *FakeCore +} + +var persistentvolumesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumes"} + +func (c *FakePersistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolume), err +} + +func (c *FakePersistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolume), err +} + +func (c *FakePersistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (*v1.PersistentVolume, error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolume), err +} + +func (c *FakePersistentVolumes) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(persistentvolumesResource, name), &v1.PersistentVolume{}) + return err +} + +func (c *FakePersistentVolumes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(persistentvolumesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.PersistentVolumeList{}) + return err +} + +func (c *FakePersistentVolumes) Get(name string) (result *v1.PersistentVolume, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(persistentvolumesResource, name), &v1.PersistentVolume{}) + if obj == nil { + return nil, err + } + return obj.(*v1.PersistentVolume), err +} + +func (c *FakePersistentVolumes) List(opts api.ListOptions) (result *v1.PersistentVolumeList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(persistentvolumesResource, opts), &v1.PersistentVolumeList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.PersistentVolumeList{} + for _, item := range obj.(*v1.PersistentVolumeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested persistentVolumes. +func (c *FakePersistentVolumes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(persistentvolumesResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_pod.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_pod.go new file mode 100644 index 000000000000..0273bb9b0722 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_pod.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakePods implements PodInterface +type FakePods struct { + Fake *FakeCore + ns string +} + +var podsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} + +func (c *FakePods) Create(pod *v1.Pod) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(podsResource, c.ns, pod), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +func (c *FakePods) Update(pod *v1.Pod) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(podsResource, c.ns, pod), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +func (c *FakePods) UpdateStatus(pod *v1.Pod) (*v1.Pod, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +func (c *FakePods) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(podsResource, c.ns, name), &v1.Pod{}) + + return err +} + +func (c *FakePods) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(podsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.PodList{}) + return err +} + +func (c *FakePods) Get(name string) (result *v1.Pod, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(podsResource, c.ns, name), &v1.Pod{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Pod), err +} + +func (c *FakePods) List(opts api.ListOptions) (result *v1.PodList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(podsResource, c.ns, opts), &v1.PodList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.PodList{} + for _, item := range obj.(*v1.PodList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *FakePods) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(podsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_pod_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_pod_expansion.go new file mode 100644 index 000000000000..7e478dd5ec72 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_pod_expansion.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/testing/core" +) + +func (c *FakePods) Bind(binding *v1.Binding) error { + action := core.CreateActionImpl{} + action.Verb = "create" + action.Resource = podsResource + action.Subresource = "bindings" + action.Object = binding + + _, err := c.Fake.Invokes(action, binding) + return err +} + +func (c *FakePods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { + action := core.GenericActionImpl{} + action.Verb = "get" + action.Namespace = c.ns + action.Resource = podsResource + action.Subresource = "logs" + action.Value = opts + + _, _ = c.Fake.Invokes(action, &v1.Pod{}) + return &restclient.Request{} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_podtemplate.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_podtemplate.go new file mode 100644 index 000000000000..89302ae8c43a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_podtemplate.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakePodTemplates implements PodTemplateInterface +type FakePodTemplates struct { + Fake *FakeCore + ns string +} + +var podtemplatesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "podtemplates"} + +func (c *FakePodTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PodTemplate), err +} + +func (c *FakePodTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PodTemplate), err +} + +func (c *FakePodTemplates) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(podtemplatesResource, c.ns, name), &v1.PodTemplate{}) + + return err +} + +func (c *FakePodTemplates) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.PodTemplateList{}) + return err +} + +func (c *FakePodTemplates) Get(name string) (result *v1.PodTemplate, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(podtemplatesResource, c.ns, name), &v1.PodTemplate{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.PodTemplate), err +} + +func (c *FakePodTemplates) List(opts api.ListOptions) (result *v1.PodTemplateList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(podtemplatesResource, c.ns, opts), &v1.PodTemplateList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.PodTemplateList{} + for _, item := range obj.(*v1.PodTemplateList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podTemplates. +func (c *FakePodTemplates) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(podtemplatesResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_replicationcontroller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_replicationcontroller.go new file mode 100644 index 000000000000..3599a46e2220 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_replicationcontroller.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeReplicationControllers implements ReplicationControllerInterface +type FakeReplicationControllers struct { + Fake *FakeCore + ns string +} + +var replicationcontrollersResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"} + +func (c *FakeReplicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ReplicationController), err +} + +func (c *FakeReplicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ReplicationController), err +} + +func (c *FakeReplicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (*v1.ReplicationController, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ReplicationController), err +} + +func (c *FakeReplicationControllers) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(replicationcontrollersResource, c.ns, name), &v1.ReplicationController{}) + + return err +} + +func (c *FakeReplicationControllers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ReplicationControllerList{}) + return err +} + +func (c *FakeReplicationControllers) Get(name string) (result *v1.ReplicationController, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(replicationcontrollersResource, c.ns, name), &v1.ReplicationController{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ReplicationController), err +} + +func (c *FakeReplicationControllers) List(opts api.ListOptions) (result *v1.ReplicationControllerList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(replicationcontrollersResource, c.ns, opts), &v1.ReplicationControllerList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ReplicationControllerList{} + for _, item := range obj.(*v1.ReplicationControllerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested replicationControllers. +func (c *FakeReplicationControllers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(replicationcontrollersResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_resourcequota.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_resourcequota.go new file mode 100644 index 000000000000..2def4eec54d1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_resourcequota.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeResourceQuotas implements ResourceQuotaInterface +type FakeResourceQuotas struct { + Fake *FakeCore + ns string +} + +var resourcequotasResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "resourcequotas"} + +func (c *FakeResourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ResourceQuota), err +} + +func (c *FakeResourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ResourceQuota), err +} + +func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ResourceQuota), err +} + +func (c *FakeResourceQuotas) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(resourcequotasResource, c.ns, name), &v1.ResourceQuota{}) + + return err +} + +func (c *FakeResourceQuotas) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ResourceQuotaList{}) + return err +} + +func (c *FakeResourceQuotas) Get(name string) (result *v1.ResourceQuota, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(resourcequotasResource, c.ns, name), &v1.ResourceQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ResourceQuota), err +} + +func (c *FakeResourceQuotas) List(opts api.ListOptions) (result *v1.ResourceQuotaList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(resourcequotasResource, c.ns, opts), &v1.ResourceQuotaList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ResourceQuotaList{} + for _, item := range obj.(*v1.ResourceQuotaList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested resourceQuotas. +func (c *FakeResourceQuotas) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(resourcequotasResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_secret.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_secret.go new file mode 100644 index 000000000000..921da249aabb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_secret.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeSecrets implements SecretInterface +type FakeSecrets struct { + Fake *FakeCore + ns string +} + +var secretsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} + +func (c *FakeSecrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(secretsResource, c.ns, secret), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} + +func (c *FakeSecrets) Update(secret *v1.Secret) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(secretsResource, c.ns, secret), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} + +func (c *FakeSecrets) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(secretsResource, c.ns, name), &v1.Secret{}) + + return err +} + +func (c *FakeSecrets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(secretsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.SecretList{}) + return err +} + +func (c *FakeSecrets) Get(name string) (result *v1.Secret, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(secretsResource, c.ns, name), &v1.Secret{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Secret), err +} + +func (c *FakeSecrets) List(opts api.ListOptions) (result *v1.SecretList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(secretsResource, c.ns, opts), &v1.SecretList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.SecretList{} + for _, item := range obj.(*v1.SecretList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested secrets. +func (c *FakeSecrets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(secretsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_service.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_service.go new file mode 100644 index 000000000000..3355aa94e1f8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_service.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeServices implements ServiceInterface +type FakeServices struct { + Fake *FakeCore + ns string +} + +var servicesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "services"} + +func (c *FakeServices) Create(service *v1.Service) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(servicesResource, c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) Update(service *v1.Service) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(servicesResource, c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) UpdateStatus(service *v1.Service) (*v1.Service, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(servicesResource, c.ns, name), &v1.Service{}) + + return err +} + +func (c *FakeServices) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(servicesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ServiceList{}) + return err +} + +func (c *FakeServices) Get(name string) (result *v1.Service, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(servicesResource, c.ns, name), &v1.Service{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Service), err +} + +func (c *FakeServices) List(opts api.ListOptions) (result *v1.ServiceList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(servicesResource, c.ns, opts), &v1.ServiceList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ServiceList{} + for _, item := range obj.(*v1.ServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *FakeServices) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(servicesResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_service_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_service_expansion.go new file mode 100644 index 000000000000..3494b873762e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_service_expansion.go @@ -0,0 +1,26 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/testing/core" +) + +func (c *FakeServices) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { + return c.Fake.InvokesProxy(core.NewProxyGetAction(servicesResource, c.ns, scheme, name, port, path, params)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_serviceaccount.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_serviceaccount.go new file mode 100644 index 000000000000..fa10a5353c6d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/fake/fake_serviceaccount.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeServiceAccounts implements ServiceAccountInterface +type FakeServiceAccounts struct { + Fake *FakeCore + ns string +} + +var serviceaccountsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"} + +func (c *FakeServiceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ServiceAccount), err +} + +func (c *FakeServiceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ServiceAccount), err +} + +func (c *FakeServiceAccounts) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(serviceaccountsResource, c.ns, name), &v1.ServiceAccount{}) + + return err +} + +func (c *FakeServiceAccounts) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ServiceAccountList{}) + return err +} + +func (c *FakeServiceAccounts) Get(name string) (result *v1.ServiceAccount, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(serviceaccountsResource, c.ns, name), &v1.ServiceAccount{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ServiceAccount), err +} + +func (c *FakeServiceAccounts) List(opts api.ListOptions) (result *v1.ServiceAccountList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(serviceaccountsResource, c.ns, opts), &v1.ServiceAccountList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ServiceAccountList{} + for _, item := range obj.(*v1.ServiceAccountList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serviceAccounts. +func (c *FakeServiceAccounts) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(serviceaccountsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/generated_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000000..9974ef5c6a54 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/generated_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type ComponentStatusExpansion interface{} + +type EndpointsExpansion interface{} + +type LimitRangeExpansion interface{} + +type NodeExpansion interface{} + +type PersistentVolumeExpansion interface{} + +type PersistentVolumeClaimExpansion interface{} + +type PodTemplateExpansion interface{} + +type ReplicationControllerExpansion interface{} + +type ResourceQuotaExpansion interface{} + +type SecretExpansion interface{} + +type ServiceAccountExpansion interface{} + +type ConfigMapExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/limitrange.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/limitrange.go new file mode 100644 index 000000000000..a44c61fa2280 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/limitrange.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// LimitRangesGetter has a method to return a LimitRangeInterface. +// A group's client should implement this interface. +type LimitRangesGetter interface { + LimitRanges(namespace string) LimitRangeInterface +} + +// LimitRangeInterface has methods to work with LimitRange resources. +type LimitRangeInterface interface { + Create(*v1.LimitRange) (*v1.LimitRange, error) + Update(*v1.LimitRange) (*v1.LimitRange, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.LimitRange, error) + List(opts api.ListOptions) (*v1.LimitRangeList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + LimitRangeExpansion +} + +// limitRanges implements LimitRangeInterface +type limitRanges struct { + client *CoreClient + ns string +} + +// newLimitRanges returns a LimitRanges +func newLimitRanges(c *CoreClient, namespace string) *limitRanges { + return &limitRanges{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Post(). + Namespace(c.ns). + Resource("limitranges"). + Body(limitRange). + Do(). + Into(result) + return +} + +// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Put(). + Namespace(c.ns). + Resource("limitranges"). + Name(limitRange.Name). + Body(limitRange). + Do(). + Into(result) + return +} + +// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. +func (c *limitRanges) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *limitRanges) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. +func (c *limitRanges) Get(name string) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. +func (c *limitRanges) List(opts api.ListOptions) (result *v1.LimitRangeList, err error) { + result = &v1.LimitRangeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested limitRanges. +func (c *limitRanges) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/namespace.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/namespace.go new file mode 100644 index 000000000000..3d2cff144648 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/namespace.go @@ -0,0 +1,140 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// NamespacesGetter has a method to return a NamespaceInterface. +// A group's client should implement this interface. +type NamespacesGetter interface { + Namespaces() NamespaceInterface +} + +// NamespaceInterface has methods to work with Namespace resources. +type NamespaceInterface interface { + Create(*v1.Namespace) (*v1.Namespace, error) + Update(*v1.Namespace) (*v1.Namespace, error) + UpdateStatus(*v1.Namespace) (*v1.Namespace, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Namespace, error) + List(opts api.ListOptions) (*v1.NamespaceList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + NamespaceExpansion +} + +// namespaces implements NamespaceInterface +type namespaces struct { + client *CoreClient +} + +// newNamespaces returns a Namespaces +func newNamespaces(c *CoreClient) *namespaces { + return &namespaces{ + client: c, + } +} + +// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Post(). + Resource("namespaces"). + Body(namespace). + Do(). + Into(result) + return +} + +// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + Body(namespace). + Do(). + Into(result) + return +} + +func (c *namespaces) UpdateStatus(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + SubResource("status"). + Body(namespace). + Do(). + Into(result) + return +} + +// Delete takes name of the namespace and deletes it. Returns an error if one occurs. +func (c *namespaces) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("namespaces"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *namespaces) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("namespaces"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. +func (c *namespaces) Get(name string) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Get(). + Resource("namespaces"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Namespaces that match those selectors. +func (c *namespaces) List(opts api.ListOptions) (result *v1.NamespaceList, err error) { + result = &v1.NamespaceList{} + err = c.client.Get(). + Resource("namespaces"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested namespaces. +func (c *namespaces) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("namespaces"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/namespace_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/namespace_expansion.go new file mode 100644 index 000000000000..7b5cf683d0ea --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/namespace_expansion.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "k8s.io/kubernetes/pkg/api/v1" + +// The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. +type NamespaceExpansion interface { + Finalize(item *v1.Namespace) (*v1.Namespace, error) +} + +// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. +func (c *namespaces) Finalize(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) + return +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/node.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/node.go new file mode 100644 index 000000000000..464eb8d6d693 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/node.go @@ -0,0 +1,140 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// NodesGetter has a method to return a NodeInterface. +// A group's client should implement this interface. +type NodesGetter interface { + Nodes() NodeInterface +} + +// NodeInterface has methods to work with Node resources. +type NodeInterface interface { + Create(*v1.Node) (*v1.Node, error) + Update(*v1.Node) (*v1.Node, error) + UpdateStatus(*v1.Node) (*v1.Node, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Node, error) + List(opts api.ListOptions) (*v1.NodeList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + NodeExpansion +} + +// nodes implements NodeInterface +type nodes struct { + client *CoreClient +} + +// newNodes returns a Nodes +func newNodes(c *CoreClient) *nodes { + return &nodes{ + client: c, + } +} + +// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Create(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Post(). + Resource("nodes"). + Body(node). + Do(). + Into(result) + return +} + +// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Update(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + Body(node). + Do(). + Into(result) + return +} + +func (c *nodes) UpdateStatus(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + SubResource("status"). + Body(node). + Do(). + Into(result) + return +} + +// Delete takes name of the node and deletes it. Returns an error if one occurs. +func (c *nodes) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("nodes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *nodes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("nodes"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the node, and returns the corresponding node object, and an error if there is any. +func (c *nodes) Get(name string) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Get(). + Resource("nodes"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Nodes that match those selectors. +func (c *nodes) List(opts api.ListOptions) (result *v1.NodeList, err error) { + result = &v1.NodeList{} + err = c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *nodes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("nodes"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/persistentvolume.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/persistentvolume.go new file mode 100644 index 000000000000..85ddf060e270 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/persistentvolume.go @@ -0,0 +1,140 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PersistentVolumesGetter has a method to return a PersistentVolumeInterface. +// A group's client should implement this interface. +type PersistentVolumesGetter interface { + PersistentVolumes() PersistentVolumeInterface +} + +// PersistentVolumeInterface has methods to work with PersistentVolume resources. +type PersistentVolumeInterface interface { + Create(*v1.PersistentVolume) (*v1.PersistentVolume, error) + Update(*v1.PersistentVolume) (*v1.PersistentVolume, error) + UpdateStatus(*v1.PersistentVolume) (*v1.PersistentVolume, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.PersistentVolume, error) + List(opts api.ListOptions) (*v1.PersistentVolumeList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + PersistentVolumeExpansion +} + +// persistentVolumes implements PersistentVolumeInterface +type persistentVolumes struct { + client *CoreClient +} + +// newPersistentVolumes returns a PersistentVolumes +func newPersistentVolumes(c *CoreClient) *persistentVolumes { + return &persistentVolumes{ + client: c, + } +} + +// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Post(). + Resource("persistentvolumes"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + Body(persistentVolume). + Do(). + Into(result) + return +} + +func (c *persistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + SubResource("status"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. +func (c *persistentVolumes) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("persistentvolumes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *persistentVolumes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("persistentvolumes"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. +func (c *persistentVolumes) Get(name string) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Get(). + Resource("persistentvolumes"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. +func (c *persistentVolumes) List(opts api.ListOptions) (result *v1.PersistentVolumeList, err error) { + result = &v1.PersistentVolumeList{} + err = c.client.Get(). + Resource("persistentvolumes"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumes. +func (c *persistentVolumes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("persistentvolumes"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/pod.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/pod.go new file mode 100644 index 000000000000..d2ed5faaa845 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/pod.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PodsGetter has a method to return a PodInterface. +// A group's client should implement this interface. +type PodsGetter interface { + Pods(namespace string) PodInterface +} + +// PodInterface has methods to work with Pod resources. +type PodInterface interface { + Create(*v1.Pod) (*v1.Pod, error) + Update(*v1.Pod) (*v1.Pod, error) + UpdateStatus(*v1.Pod) (*v1.Pod, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Pod, error) + List(opts api.ListOptions) (*v1.PodList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + PodExpansion +} + +// pods implements PodInterface +type pods struct { + client *CoreClient + ns string +} + +// newPods returns a Pods +func newPods(c *CoreClient, namespace string) *pods { + return &pods{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Create(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pods"). + Body(pod). + Do(). + Into(result) + return +} + +// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Update(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + Body(pod). + Do(). + Into(result) + return +} + +func (c *pods) UpdateStatus(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + SubResource("status"). + Body(pod). + Do(). + Into(result) + return +} + +// Delete takes name of the pod and deletes it. Returns an error if one occurs. +func (c *pods) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pods) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. +func (c *pods) Get(name string) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Pods that match those selectors. +func (c *pods) List(opts api.ListOptions) (result *v1.PodList, err error) { + result = &v1.PodList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *pods) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/pod_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/pod_expansion.go new file mode 100644 index 000000000000..f061b5d9234a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/pod_expansion.go @@ -0,0 +1,39 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/restclient" +) + +// The PodExpansion interface allows manually adding extra methods to the PodInterface. +type PodExpansion interface { + Bind(binding *v1.Binding) error + GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request +} + +// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). +func (c *pods) Bind(binding *v1.Binding) error { + return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() +} + +// Get constructs a request for getting the logs for a pod +func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { + return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/podtemplate.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/podtemplate.go new file mode 100644 index 000000000000..1b95106d1752 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/podtemplate.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PodTemplatesGetter has a method to return a PodTemplateInterface. +// A group's client should implement this interface. +type PodTemplatesGetter interface { + PodTemplates(namespace string) PodTemplateInterface +} + +// PodTemplateInterface has methods to work with PodTemplate resources. +type PodTemplateInterface interface { + Create(*v1.PodTemplate) (*v1.PodTemplate, error) + Update(*v1.PodTemplate) (*v1.PodTemplate, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.PodTemplate, error) + List(opts api.ListOptions) (*v1.PodTemplateList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + PodTemplateExpansion +} + +// podTemplates implements PodTemplateInterface +type podTemplates struct { + client *CoreClient + ns string +} + +// newPodTemplates returns a PodTemplates +func newPodTemplates(c *CoreClient, namespace string) *podTemplates { + return &podTemplates{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podtemplates"). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podtemplates"). + Name(podTemplate.Name). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. +func (c *podTemplates) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podTemplates) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. +func (c *podTemplates) Get(name string) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. +func (c *podTemplates) List(opts api.ListOptions) (result *v1.PodTemplateList, err error) { + result = &v1.PodTemplateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podTemplates. +func (c *podTemplates) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/replicationcontroller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/replicationcontroller.go new file mode 100644 index 000000000000..20bcc90c3723 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/replicationcontroller.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ReplicationControllersGetter has a method to return a ReplicationControllerInterface. +// A group's client should implement this interface. +type ReplicationControllersGetter interface { + ReplicationControllers(namespace string) ReplicationControllerInterface +} + +// ReplicationControllerInterface has methods to work with ReplicationController resources. +type ReplicationControllerInterface interface { + Create(*v1.ReplicationController) (*v1.ReplicationController, error) + Update(*v1.ReplicationController) (*v1.ReplicationController, error) + UpdateStatus(*v1.ReplicationController) (*v1.ReplicationController, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.ReplicationController, error) + List(opts api.ListOptions) (*v1.ReplicationControllerList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ReplicationControllerExpansion +} + +// replicationControllers implements ReplicationControllerInterface +type replicationControllers struct { + client *CoreClient + ns string +} + +// newReplicationControllers returns a ReplicationControllers +func newReplicationControllers(c *CoreClient, namespace string) *replicationControllers { + return &replicationControllers{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + Body(replicationController). + Do(). + Into(result) + return +} + +func (c *replicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + SubResource("status"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. +func (c *replicationControllers) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicationControllers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. +func (c *replicationControllers) Get(name string) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. +func (c *replicationControllers) List(opts api.ListOptions) (result *v1.ReplicationControllerList, err error) { + result = &v1.ReplicationControllerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicationControllers. +func (c *replicationControllers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/resourcequota.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/resourcequota.go new file mode 100644 index 000000000000..466e963d6c9b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/resourcequota.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ResourceQuotasGetter has a method to return a ResourceQuotaInterface. +// A group's client should implement this interface. +type ResourceQuotasGetter interface { + ResourceQuotas(namespace string) ResourceQuotaInterface +} + +// ResourceQuotaInterface has methods to work with ResourceQuota resources. +type ResourceQuotaInterface interface { + Create(*v1.ResourceQuota) (*v1.ResourceQuota, error) + Update(*v1.ResourceQuota) (*v1.ResourceQuota, error) + UpdateStatus(*v1.ResourceQuota) (*v1.ResourceQuota, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.ResourceQuota, error) + List(opts api.ListOptions) (*v1.ResourceQuotaList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ResourceQuotaExpansion +} + +// resourceQuotas implements ResourceQuotaInterface +type resourceQuotas struct { + client *CoreClient + ns string +} + +// newResourceQuotas returns a ResourceQuotas +func newResourceQuotas(c *CoreClient, namespace string) *resourceQuotas { + return &resourceQuotas{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourcequotas"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + Body(resourceQuota). + Do(). + Into(result) + return +} + +func (c *resourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + SubResource("status"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. +func (c *resourceQuotas) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceQuotas) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. +func (c *resourceQuotas) Get(name string) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. +func (c *resourceQuotas) List(opts api.ListOptions) (result *v1.ResourceQuotaList, err error) { + result = &v1.ResourceQuotaList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceQuotas. +func (c *resourceQuotas) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/secret.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/secret.go new file mode 100644 index 000000000000..a95aa84f440d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/secret.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// SecretsGetter has a method to return a SecretInterface. +// A group's client should implement this interface. +type SecretsGetter interface { + Secrets(namespace string) SecretInterface +} + +// SecretInterface has methods to work with Secret resources. +type SecretInterface interface { + Create(*v1.Secret) (*v1.Secret, error) + Update(*v1.Secret) (*v1.Secret, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Secret, error) + List(opts api.ListOptions) (*v1.SecretList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + SecretExpansion +} + +// secrets implements SecretInterface +type secrets struct { + client *CoreClient + ns string +} + +// newSecrets returns a Secrets +func newSecrets(c *CoreClient, namespace string) *secrets { + return &secrets{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Post(). + Namespace(c.ns). + Resource("secrets"). + Body(secret). + Do(). + Into(result) + return +} + +// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Update(secret *v1.Secret) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Put(). + Namespace(c.ns). + Resource("secrets"). + Name(secret.Name). + Body(secret). + Do(). + Into(result) + return +} + +// Delete takes name of the secret and deletes it. Returns an error if one occurs. +func (c *secrets) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *secrets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. +func (c *secrets) Get(name string) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Secrets that match those selectors. +func (c *secrets) List(opts api.ListOptions) (result *v1.SecretList, err error) { + result = &v1.SecretList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested secrets. +func (c *secrets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/service.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/service.go new file mode 100644 index 000000000000..cd62b5d94f20 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/service.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ServicesGetter has a method to return a ServiceInterface. +// A group's client should implement this interface. +type ServicesGetter interface { + Services(namespace string) ServiceInterface +} + +// ServiceInterface has methods to work with Service resources. +type ServiceInterface interface { + Create(*v1.Service) (*v1.Service, error) + Update(*v1.Service) (*v1.Service, error) + UpdateStatus(*v1.Service) (*v1.Service, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Service, error) + List(opts api.ListOptions) (*v1.ServiceList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ServiceExpansion +} + +// services implements ServiceInterface +type services struct { + client *CoreClient + ns string +} + +// newServices returns a Services +func newServices(c *CoreClient, namespace string) *services { + return &services{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Create(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Post(). + Namespace(c.ns). + Resource("services"). + Body(service). + Do(). + Into(result) + return +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Update(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + Body(service). + Do(). + Into(result) + return +} + +func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + SubResource("status"). + Body(service). + Do(). + Into(result) + return +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *services) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *services) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *services) Get(name string) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *services) List(opts api.ListOptions) (result *v1.ServiceList, err error) { + result = &v1.ServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *services) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/service_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/service_expansion.go new file mode 100644 index 000000000000..b4300483b846 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/service_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/util/net" +) + +// The ServiceExpansion interface allows manually adding extra methods to the ServiceInterface. +type ServiceExpansion interface { + ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper +} + +// ProxyGet returns a response of the service by calling it through the proxy. +func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { + request := c.client.Get(). + Prefix("proxy"). + Namespace(c.ns). + Resource("services"). + Name(net.JoinSchemeNamePort(scheme, name, port)). + Suffix(path) + for k, v := range params { + request = request.Param(k, v) + } + return request +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/serviceaccount.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/serviceaccount.go new file mode 100644 index 000000000000..eb0b258fa919 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1/serviceaccount.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ServiceAccountsGetter has a method to return a ServiceAccountInterface. +// A group's client should implement this interface. +type ServiceAccountsGetter interface { + ServiceAccounts(namespace string) ServiceAccountInterface +} + +// ServiceAccountInterface has methods to work with ServiceAccount resources. +type ServiceAccountInterface interface { + Create(*v1.ServiceAccount) (*v1.ServiceAccount, error) + Update(*v1.ServiceAccount) (*v1.ServiceAccount, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.ServiceAccount, error) + List(opts api.ListOptions) (*v1.ServiceAccountList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ServiceAccountExpansion +} + +// serviceAccounts implements ServiceAccountInterface +type serviceAccounts struct { + client *CoreClient + ns string +} + +// newServiceAccounts returns a ServiceAccounts +func newServiceAccounts(c *CoreClient, namespace string) *serviceAccounts { + return &serviceAccounts{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Post(). + Namespace(c.ns). + Resource("serviceaccounts"). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Put(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(serviceAccount.Name). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. +func (c *serviceAccounts) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceAccounts) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. +func (c *serviceAccounts) Get(name string) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. +func (c *serviceAccounts) List(opts api.ListOptions) (result *v1.ServiceAccountList, err error) { + result = &v1.ServiceAccountList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceAccounts. +func (c *serviceAccounts) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/daemonset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/daemonset.go new file mode 100644 index 000000000000..ecbece591bd5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/daemonset.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// DaemonSetsGetter has a method to return a DaemonSetInterface. +// A group's client should implement this interface. +type DaemonSetsGetter interface { + DaemonSets(namespace string) DaemonSetInterface +} + +// DaemonSetInterface has methods to work with DaemonSet resources. +type DaemonSetInterface interface { + Create(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + Update(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + UpdateStatus(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.DaemonSet, error) + List(opts api.ListOptions) (*v1beta1.DaemonSetList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + DaemonSetExpansion +} + +// daemonSets implements DaemonSetInterface +type daemonSets struct { + client *ExtensionsClient + ns string +} + +// newDaemonSets returns a DaemonSets +func newDaemonSets(c *ExtensionsClient, namespace string) *daemonSets { + return &daemonSets{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("daemonsets"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + Body(daemonSet). + Do(). + Into(result) + return +} + +func (c *daemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + SubResource("status"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *daemonSets) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *daemonSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts api.ListOptions) (result *v1beta1.DaemonSetList, err error) { + result = &v1beta1.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/deployment.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/deployment.go new file mode 100644 index 000000000000..7cc3ff9d3f42 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/deployment.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) + UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.Deployment, error) + List(opts api.ListOptions) (*v1beta1.DeploymentList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client *ExtensionsClient + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *ExtensionsClient, namespace string) *deployments { + return &deployments{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts api.ListOptions) (result *v1beta1.DeploymentList, err error) { + result = &v1beta1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/deployment_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/deployment_expansion.go new file mode 100644 index 000000000000..0c3ff63678a4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/deployment_expansion.go @@ -0,0 +1,29 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + +// The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. +type DeploymentExpansion interface { + Rollback(*v1beta1.DeploymentRollback) error +} + +// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. +func (c *deployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error { + return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/doc.go new file mode 100644 index 000000000000..22d20e331d8c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_3 --input=[api/v1,extensions/v1beta1,autoscaling/v1,batch/v1] + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/extensions_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/extensions_client.go new file mode 100644 index 000000000000..6b6d91140230 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/extensions_client.go @@ -0,0 +1,141 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type ExtensionsInterface interface { + GetRESTClient() *restclient.RESTClient + DaemonSetsGetter + DeploymentsGetter + HorizontalPodAutoscalersGetter + IngressesGetter + JobsGetter + PodSecurityPoliciesGetter + ReplicaSetsGetter + ScalesGetter + ThirdPartyResourcesGetter +} + +// ExtensionsClient is used to interact with features provided by the Extensions group. +type ExtensionsClient struct { + *restclient.RESTClient +} + +func (c *ExtensionsClient) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *ExtensionsClient) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *ExtensionsClient) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +func (c *ExtensionsClient) Ingresses(namespace string) IngressInterface { + return newIngresses(c, namespace) +} + +func (c *ExtensionsClient) Jobs(namespace string) JobInterface { + return newJobs(c, namespace) +} + +func (c *ExtensionsClient) PodSecurityPolicies() PodSecurityPolicyInterface { + return newPodSecurityPolicies(c) +} + +func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +func (c *ExtensionsClient) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + +func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface { + return newThirdPartyResources(c) +} + +// NewForConfig creates a new ExtensionsClient for the given config. +func NewForConfig(c *restclient.Config) (*ExtensionsClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ExtensionsClient{client}, nil +} + +// NewForConfigOrDie creates a new ExtensionsClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *ExtensionsClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ExtensionsClient for the given RESTClient. +func New(c *restclient.RESTClient) *ExtensionsClient { + return &ExtensionsClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if extensions group is not registered, return an error + g, err := registered.Group("extensions") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ExtensionsClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/doc.go new file mode 100644 index 000000000000..924812c706ec --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_3 --input=[api/v1,extensions/v1beta1,autoscaling/v1,batch/v1] + +// Package fake has the automatically generated clients. +package fake diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_daemonset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_daemonset.go new file mode 100644 index 000000000000..26187506e505 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_daemonset.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeDaemonSets implements DaemonSetInterface +type FakeDaemonSets struct { + Fake *FakeExtensions + ns string +} + +var daemonsetsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "daemonsets"} + +func (c *FakeDaemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +func (c *FakeDaemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +func (c *FakeDaemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +func (c *FakeDaemonSets) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) + + return err +} + +func (c *FakeDaemonSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.DaemonSetList{}) + return err +} + +func (c *FakeDaemonSets) Get(name string) (result *v1beta1.DaemonSet, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.DaemonSet), err +} + +func (c *FakeDaemonSets) List(opts api.ListOptions) (result *v1beta1.DaemonSetList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(daemonsetsResource, c.ns, opts), &v1beta1.DaemonSetList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.DaemonSetList{} + for _, item := range obj.(*v1beta1.DaemonSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *FakeDaemonSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(daemonsetsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_deployment.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_deployment.go new file mode 100644 index 000000000000..dc6e55db6922 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_deployment.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeDeployments implements DeploymentInterface +type FakeDeployments struct { + Fake *FakeExtensions + ns string +} + +var deploymentsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"} + +func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + + return err +} + +func (c *FakeDeployments) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) + return err +} + +func (c *FakeDeployments) Get(name string) (result *v1beta1.Deployment, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Deployment), err +} + +func (c *FakeDeployments) List(opts api.ListOptions) (result *v1beta1.DeploymentList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(deploymentsResource, c.ns, opts), &v1beta1.DeploymentList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.DeploymentList{} + for _, item := range obj.(*v1beta1.DeploymentList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *FakeDeployments) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(deploymentsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_deployment_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_deployment_expansion.go new file mode 100644 index 000000000000..f154693645de --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_deployment_expansion.go @@ -0,0 +1,33 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/client/testing/core" +) + +func (c *FakeDeployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error { + action := core.CreateActionImpl{} + action.Verb = "create" + action.Resource = deploymentsResource + action.Subresource = "rollback" + action.Object = deploymentRollback + + _, err := c.Fake.Invokes(action, deploymentRollback) + return err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_extensions_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_extensions_client.go new file mode 100644 index 000000000000..ac0f33da68aa --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_extensions_client.go @@ -0,0 +1,69 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1beta1 "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeExtensions struct { + *core.Fake +} + +func (c *FakeExtensions) DaemonSets(namespace string) v1beta1.DaemonSetInterface { + return &FakeDaemonSets{c, namespace} +} + +func (c *FakeExtensions) Deployments(namespace string) v1beta1.DeploymentInterface { + return &FakeDeployments{c, namespace} +} + +func (c *FakeExtensions) HorizontalPodAutoscalers(namespace string) v1beta1.HorizontalPodAutoscalerInterface { + return &FakeHorizontalPodAutoscalers{c, namespace} +} + +func (c *FakeExtensions) Ingresses(namespace string) v1beta1.IngressInterface { + return &FakeIngresses{c, namespace} +} + +func (c *FakeExtensions) Jobs(namespace string) v1beta1.JobInterface { + return &FakeJobs{c, namespace} +} + +func (c *FakeExtensions) PodSecurityPolicies() v1beta1.PodSecurityPolicyInterface { + return &FakePodSecurityPolicies{c} +} + +func (c *FakeExtensions) ReplicaSets(namespace string) v1beta1.ReplicaSetInterface { + return &FakeReplicaSets{c, namespace} +} + +func (c *FakeExtensions) Scales(namespace string) v1beta1.ScaleInterface { + return &FakeScales{c, namespace} +} + +func (c *FakeExtensions) ThirdPartyResources() v1beta1.ThirdPartyResourceInterface { + return &FakeThirdPartyResources{c} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeExtensions) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_horizontalpodautoscaler.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_horizontalpodautoscaler.go new file mode 100644 index 000000000000..8517996527bc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_horizontalpodautoscaler.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type FakeHorizontalPodAutoscalers struct { + Fake *FakeExtensions + ns string +} + +var horizontalpodautoscalersResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "horizontalpodautoscalers"} + +func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1beta1.HorizontalPodAutoscaler) (result *v1beta1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1beta1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1beta1.HorizontalPodAutoscaler) (result *v1beta1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1beta1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1beta1.HorizontalPodAutoscaler) (*v1beta1.HorizontalPodAutoscaler, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v1beta1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v1beta1.HorizontalPodAutoscaler{}) + + return err +} + +func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.HorizontalPodAutoscalerList{}) + return err +} + +func (c *FakeHorizontalPodAutoscalers) Get(name string) (result *v1beta1.HorizontalPodAutoscaler, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v1beta1.HorizontalPodAutoscaler{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) List(opts api.ListOptions) (result *v1beta1.HorizontalPodAutoscalerList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(horizontalpodautoscalersResource, c.ns, opts), &v1beta1.HorizontalPodAutoscalerList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.HorizontalPodAutoscalerList{} + for _, item := range obj.(*v1beta1.HorizontalPodAutoscalerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *FakeHorizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_ingress.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_ingress.go new file mode 100644 index 000000000000..e1c46d4272d6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_ingress.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeIngresses implements IngressInterface +type FakeIngresses struct { + Fake *FakeExtensions + ns string +} + +var ingressesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"} + +func (c *FakeIngresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +func (c *FakeIngresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +func (c *FakeIngresses) UpdateStatus(ingress *v1beta1.Ingress) (*v1beta1.Ingress, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +func (c *FakeIngresses) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) + + return err +} + +func (c *FakeIngresses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(ingressesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) + return err +} + +func (c *FakeIngresses) Get(name string) (result *v1beta1.Ingress, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Ingress), err +} + +func (c *FakeIngresses) List(opts api.ListOptions) (result *v1beta1.IngressList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(ingressesResource, c.ns, opts), &v1beta1.IngressList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.IngressList{} + for _, item := range obj.(*v1beta1.IngressList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *FakeIngresses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(ingressesResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_job.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_job.go new file mode 100644 index 000000000000..e7819d36aa3f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_job.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeJobs implements JobInterface +type FakeJobs struct { + Fake *FakeExtensions + ns string +} + +var jobsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "jobs"} + +func (c *FakeJobs) Create(job *v1beta1.Job) (result *v1beta1.Job, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(jobsResource, c.ns, job), &v1beta1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Job), err +} + +func (c *FakeJobs) Update(job *v1beta1.Job) (result *v1beta1.Job, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(jobsResource, c.ns, job), &v1beta1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Job), err +} + +func (c *FakeJobs) UpdateStatus(job *v1beta1.Job) (*v1beta1.Job, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &v1beta1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Job), err +} + +func (c *FakeJobs) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(jobsResource, c.ns, name), &v1beta1.Job{}) + + return err +} + +func (c *FakeJobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(jobsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.JobList{}) + return err +} + +func (c *FakeJobs) Get(name string) (result *v1beta1.Job, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(jobsResource, c.ns, name), &v1beta1.Job{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Job), err +} + +func (c *FakeJobs) List(opts api.ListOptions) (result *v1beta1.JobList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(jobsResource, c.ns, opts), &v1beta1.JobList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.JobList{} + for _, item := range obj.(*v1beta1.JobList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *FakeJobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(jobsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go new file mode 100644 index 000000000000..c40b04a11835 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakePodSecurityPolicies implements PodSecurityPolicyInterface +type FakePodSecurityPolicies struct { + Fake *FakeExtensions +} + +var podsecuritypoliciesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "podsecuritypolicies"} + +func (c *FakePodSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodSecurityPolicy), err +} + +func (c *FakePodSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodSecurityPolicy), err +} + +func (c *FakePodSecurityPolicies) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) + return err +} + +func (c *FakePodSecurityPolicies) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.PodSecurityPolicyList{}) + return err +} + +func (c *FakePodSecurityPolicies) Get(name string) (result *v1beta1.PodSecurityPolicy, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.PodSecurityPolicy), err +} + +func (c *FakePodSecurityPolicies) List(opts api.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(podsecuritypoliciesResource, opts), &v1beta1.PodSecurityPolicyList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.PodSecurityPolicyList{} + for _, item := range obj.(*v1beta1.PodSecurityPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *FakePodSecurityPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(podsecuritypoliciesResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_replicaset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_replicaset.go new file mode 100644 index 000000000000..85aa5b87fa56 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_replicaset.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeReplicaSets implements ReplicaSetInterface +type FakeReplicaSets struct { + Fake *FakeExtensions + ns string +} + +var replicasetsResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "replicasets"} + +func (c *FakeReplicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +func (c *FakeReplicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +func (c *FakeReplicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +func (c *FakeReplicaSets) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) + + return err +} + +func (c *FakeReplicaSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ReplicaSetList{}) + return err +} + +func (c *FakeReplicaSets) Get(name string) (result *v1beta1.ReplicaSet, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ReplicaSet), err +} + +func (c *FakeReplicaSets) List(opts api.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(replicasetsResource, c.ns, opts), &v1beta1.ReplicaSetList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ReplicaSetList{} + for _, item := range obj.(*v1beta1.ReplicaSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *FakeReplicaSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(replicasetsResource, c.ns, opts)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_scale.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_scale.go new file mode 100644 index 000000000000..d2cfc5f7b789 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_scale.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +// FakeScales implements ScaleInterface +type FakeScales struct { + Fake *FakeExtensions + ns string +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_scale_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_scale_expansion.go new file mode 100644 index 000000000000..c76f35e7cf5e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_scale_expansion.go @@ -0,0 +1,47 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/client/testing/core" +) + +func (c *FakeScales) Get(kind string, name string) (result *v1beta1.Scale, err error) { + action := core.GetActionImpl{} + action.Verb = "get" + action.Namespace = c.ns + action.Resource = unversioned.GroupVersionResource{Resource: kind} + action.Subresource = "scale" + action.Name = name + obj, err := c.Fake.Invokes(action, &v1beta1.Scale{}) + result = obj.(*v1beta1.Scale) + return +} + +func (c *FakeScales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + action := core.UpdateActionImpl{} + action.Verb = "update" + action.Namespace = c.ns + action.Resource = unversioned.GroupVersionResource{Resource: kind} + action.Subresource = "scale" + action.Object = scale + obj, err := c.Fake.Invokes(action, scale) + result = obj.(*v1beta1.Scale) + return +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go new file mode 100644 index 000000000000..4cfe0ec50ca6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeThirdPartyResources implements ThirdPartyResourceInterface +type FakeThirdPartyResources struct { + Fake *FakeExtensions +} + +var thirdpartyresourcesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "thirdpartyresources"} + +func (c *FakeThirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(thirdpartyresourcesResource, thirdPartyResource), &v1beta1.ThirdPartyResource{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ThirdPartyResource), err +} + +func (c *FakeThirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(thirdpartyresourcesResource, thirdPartyResource), &v1beta1.ThirdPartyResource{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ThirdPartyResource), err +} + +func (c *FakeThirdPartyResources) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(thirdpartyresourcesResource, name), &v1beta1.ThirdPartyResource{}) + return err +} + +func (c *FakeThirdPartyResources) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(thirdpartyresourcesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ThirdPartyResourceList{}) + return err +} + +func (c *FakeThirdPartyResources) Get(name string) (result *v1beta1.ThirdPartyResource, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(thirdpartyresourcesResource, name), &v1beta1.ThirdPartyResource{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ThirdPartyResource), err +} + +func (c *FakeThirdPartyResources) List(opts api.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(thirdpartyresourcesResource, opts), &v1beta1.ThirdPartyResourceList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ThirdPartyResourceList{} + for _, item := range obj.(*v1beta1.ThirdPartyResourceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested thirdPartyResources. +func (c *FakeThirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(thirdpartyresourcesResource, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/generated_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/generated_expansion.go new file mode 100644 index 000000000000..7477a5711336 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/generated_expansion.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type DaemonSetExpansion interface{} + +type HorizontalPodAutoscalerExpansion interface{} + +type IngressExpansion interface{} + +type JobExpansion interface{} + +type ThirdPartyResourceExpansion interface{} + +type ReplicaSetExpansion interface{} + +type PodSecurityPolicyExpansion interface{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/horizontalpodautoscaler.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/horizontalpodautoscaler.go new file mode 100644 index 000000000000..93b486b894c3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/horizontalpodautoscaler.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(*v1beta1.HorizontalPodAutoscaler) (*v1beta1.HorizontalPodAutoscaler, error) + Update(*v1beta1.HorizontalPodAutoscaler) (*v1beta1.HorizontalPodAutoscaler, error) + UpdateStatus(*v1beta1.HorizontalPodAutoscaler) (*v1beta1.HorizontalPodAutoscaler, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.HorizontalPodAutoscaler, error) + List(opts api.ListOptions) (*v1beta1.HorizontalPodAutoscalerList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client *ExtensionsClient + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *ExtensionsClient, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1beta1.HorizontalPodAutoscaler) (result *v1beta1.HorizontalPodAutoscaler, err error) { + result = &v1beta1.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1beta1.HorizontalPodAutoscaler) (result *v1beta1.HorizontalPodAutoscaler, err error) { + result = &v1beta1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1beta1.HorizontalPodAutoscaler) (result *v1beta1.HorizontalPodAutoscaler, err error) { + result = &v1beta1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string) (result *v1beta1.HorizontalPodAutoscaler, err error) { + result = &v1beta1.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts api.ListOptions) (result *v1beta1.HorizontalPodAutoscalerList, err error) { + result = &v1beta1.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/ingress.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/ingress.go new file mode 100644 index 000000000000..96b4d04396a4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/ingress.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// IngressesGetter has a method to return a IngressInterface. +// A group's client should implement this interface. +type IngressesGetter interface { + Ingresses(namespace string) IngressInterface +} + +// IngressInterface has methods to work with Ingress resources. +type IngressInterface interface { + Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) + UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.Ingress, error) + List(opts api.ListOptions) (*v1beta1.IngressList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + IngressExpansion +} + +// ingresses implements IngressInterface +type ingresses struct { + client *ExtensionsClient + ns string +} + +// newIngresses returns a Ingresses +func newIngresses(c *ExtensionsClient, namespace string) *ingresses { + return &ingresses{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Post(). + Namespace(c.ns). + Resource("ingresses"). + Body(ingress). + Do(). + Into(result) + return +} + +// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + Body(ingress). + Do(). + Into(result) + return +} + +func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + SubResource("status"). + Body(ingress). + Do(). + Into(result) + return +} + +// Delete takes name of the ingress and deletes it. Returns an error if one occurs. +func (c *ingresses) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ingresses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. +func (c *ingresses) Get(name string) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. +func (c *ingresses) List(opts api.ListOptions) (result *v1beta1.IngressList, err error) { + result = &v1beta1.IngressList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *ingresses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/job.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/job.go new file mode 100644 index 000000000000..c518c5abda69 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/job.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// JobsGetter has a method to return a JobInterface. +// A group's client should implement this interface. +type JobsGetter interface { + Jobs(namespace string) JobInterface +} + +// JobInterface has methods to work with Job resources. +type JobInterface interface { + Create(*v1beta1.Job) (*v1beta1.Job, error) + Update(*v1beta1.Job) (*v1beta1.Job, error) + UpdateStatus(*v1beta1.Job) (*v1beta1.Job, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.Job, error) + List(opts api.ListOptions) (*v1beta1.JobList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + JobExpansion +} + +// jobs implements JobInterface +type jobs struct { + client *ExtensionsClient + ns string +} + +// newJobs returns a Jobs +func newJobs(c *ExtensionsClient, namespace string) *jobs { + return &jobs{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Create(job *v1beta1.Job) (result *v1beta1.Job, err error) { + result = &v1beta1.Job{} + err = c.client.Post(). + Namespace(c.ns). + Resource("jobs"). + Body(job). + Do(). + Into(result) + return +} + +// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Update(job *v1beta1.Job) (result *v1beta1.Job, err error) { + result = &v1beta1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + Body(job). + Do(). + Into(result) + return +} + +func (c *jobs) UpdateStatus(job *v1beta1.Job) (result *v1beta1.Job, err error) { + result = &v1beta1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + SubResource("status"). + Body(job). + Do(). + Into(result) + return +} + +// Delete takes name of the job and deletes it. Returns an error if one occurs. +func (c *jobs) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *jobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the job, and returns the corresponding job object, and an error if there is any. +func (c *jobs) Get(name string) (result *v1beta1.Job, err error) { + result = &v1beta1.Job{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Jobs that match those selectors. +func (c *jobs) List(opts api.ListOptions) (result *v1beta1.JobList, err error) { + result = &v1beta1.JobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *jobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/podsecuritypolicy.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/podsecuritypolicy.go new file mode 100644 index 000000000000..2f5dadabce99 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/podsecuritypolicy.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. +// A group's client should implement this interface. +type PodSecurityPoliciesGetter interface { + PodSecurityPolicies() PodSecurityPolicyInterface +} + +// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. +type PodSecurityPolicyInterface interface { + Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.PodSecurityPolicy, error) + List(opts api.ListOptions) (*v1beta1.PodSecurityPolicyList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + PodSecurityPolicyExpansion +} + +// podSecurityPolicies implements PodSecurityPolicyInterface +type podSecurityPolicies struct { + client *ExtensionsClient +} + +// newPodSecurityPolicies returns a PodSecurityPolicies +func newPodSecurityPolicies(c *ExtensionsClient) *podSecurityPolicies { + return &podSecurityPolicies{ + client: c, + } +} + +// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Post(). + Resource("podsecuritypolicies"). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Put(). + Resource("podsecuritypolicies"). + Name(podSecurityPolicy.Name). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. +func (c *podSecurityPolicies) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSecurityPolicies) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. +func (c *podSecurityPolicies) Get(name string) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. +func (c *podSecurityPolicies) List(opts api.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + result = &v1beta1.PodSecurityPolicyList{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *podSecurityPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("podsecuritypolicies"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/replicaset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/replicaset.go new file mode 100644 index 000000000000..1822f052c9b8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/replicaset.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + Update(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + UpdateStatus(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.ReplicaSet, error) + List(opts api.ListOptions) (*v1beta1.ReplicaSetList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client *ExtensionsClient + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *ExtensionsClient, namespace string) *replicaSets { + return &replicaSets{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +func (c *replicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts api.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + result = &v1beta1.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/scale.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/scale.go new file mode 100644 index 000000000000..231fe5ccf706 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/scale.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client *ExtensionsClient + ns string +} + +// newScales returns a Scales +func newScales(c *ExtensionsClient, namespace string) *scales { + return &scales{ + client: c, + ns: namespace, + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/scale_expansion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/scale_expansion.go new file mode 100644 index 000000000000..488863d9f89c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/scale_expansion.go @@ -0,0 +1,65 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" +) + +// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. +type ScaleExpansion interface { + Get(kind string, name string) (*v1beta1.Scale, error) + Update(kind string, scale *v1beta1.Scale) (*v1beta1.Scale, error) +} + +// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. +func (c *scales) Get(kind string, name string) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Get(). + Namespace(c.ns). + Resource(resource.Resource). + Name(name). + SubResource("scale"). + Do(). + Into(result) + return +} + +func (c *scales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Put(). + Namespace(scale.Namespace). + Resource(resource.Resource). + Name(scale.Name). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/thirdpartyresource.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/thirdpartyresource.go new file mode 100644 index 000000000000..81d73d32e235 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1/thirdpartyresource.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. +// A group's client should implement this interface. +type ThirdPartyResourcesGetter interface { + ThirdPartyResources() ThirdPartyResourceInterface +} + +// ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. +type ThirdPartyResourceInterface interface { + Create(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) + Update(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1beta1.ThirdPartyResource, error) + List(opts api.ListOptions) (*v1beta1.ThirdPartyResourceList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ThirdPartyResourceExpansion +} + +// thirdPartyResources implements ThirdPartyResourceInterface +type thirdPartyResources struct { + client *ExtensionsClient +} + +// newThirdPartyResources returns a ThirdPartyResources +func newThirdPartyResources(c *ExtensionsClient) *thirdPartyResources { + return &thirdPartyResources{ + client: c, + } +} + +// Create takes the representation of a thirdPartyResource and creates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. +func (c *thirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Post(). + Resource("thirdpartyresources"). + Body(thirdPartyResource). + Do(). + Into(result) + return +} + +// Update takes the representation of a thirdPartyResource and updates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. +func (c *thirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Put(). + Resource("thirdpartyresources"). + Name(thirdPartyResource.Name). + Body(thirdPartyResource). + Do(). + Into(result) + return +} + +// Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. +func (c *thirdPartyResources) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("thirdpartyresources"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *thirdPartyResources) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("thirdpartyresources"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the thirdPartyResource, and returns the corresponding thirdPartyResource object, and an error if there is any. +func (c *thirdPartyResources) Get(name string) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Get(). + Resource("thirdpartyresources"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ThirdPartyResources that match those selectors. +func (c *thirdPartyResources) List(opts api.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { + result = &v1beta1.ThirdPartyResourceList{} + err = c.client.Get(). + Resource("thirdpartyresources"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested thirdPartyResources. +func (c *thirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("thirdpartyresources"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/leaderelection/OWNERS b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/leaderelection/OWNERS new file mode 100644 index 000000000000..ac1004ec6c1a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/leaderelection/OWNERS @@ -0,0 +1,2 @@ +assignees: + - mikedanese diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/leaderelection/leaderelection.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/leaderelection/leaderelection.go new file mode 100644 index 000000000000..fd8d09c9f42e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/leaderelection/leaderelection.go @@ -0,0 +1,363 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package leaderelection implements leader election of a set of endpoints. +// It uses an annotation in the endpoints object to store the record of the +// election state. +// +// This implementation does not guarantee that only one client is acting as a +// leader (a.k.a. fencing). A client observes timestamps captured locally to +// infer the state of the leader election. Thus the implementation is tolerant +// to arbitrary clock skew, but is not tolerant to arbitrary clock skew rate. +// +// However the level of tolerance to skew rate can be configured by setting +// RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a +// maximum tolerated ratio of time passed on the fastest node to time passed on +// the slowest node can be approximately achieved with a configuration that sets +// the same ratio of LeaseDuration to RenewDeadline. For example if a user wanted +// to tolerate some nodes progressing forward in time twice as fast as other nodes, +// the user could set LeaseDuration to 60 seconds and RenewDeadline to 30 seconds. +// +// While not required, some method of clock synchronization between nodes in the +// cluster is highly recommended. It's important to keep in mind when configuring +// this client that the tolerance to skew rate varies inversely to master +// availability. +// +// Larger clusters often have a more lenient SLA for API latency. This should be +// taken into account when configuring the client. The rate of leader transitions +// should be monitored and RetryPeriod and LeaseDuration should be increased +// until the rate is stable and acceptably low. It's important to keep in mind +// when configuring this client that the tolerance to API latency varies inversely +// to master availability. +// +// DISCLAIMER: this is an alpha API. This library will likely change significantly +// or even be removed entirely in subsequent releases. Depend on this API at +// your own risk. +package leaderelection + +import ( + "encoding/json" + "fmt" + "reflect" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/client/record" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/wait" + + "github.com/golang/glog" + "github.com/spf13/pflag" +) + +const ( + JitterFactor = 1.2 + + LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader" + + DefaultLeaseDuration = 15 * time.Second + DefaultRenewDeadline = 10 * time.Second + DefaultRetryPeriod = 2 * time.Second +) + +// NewLeadereElector creates a LeaderElector from a LeaderElecitionConfig +func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) { + if lec.LeaseDuration <= lec.RenewDeadline { + return nil, fmt.Errorf("leaseDuration must be greater than renewDeadline") + } + if lec.RenewDeadline <= time.Duration(JitterFactor*float64(lec.RetryPeriod)) { + return nil, fmt.Errorf("renewDeadline must be greater than retryPeriod*JitterFactor") + } + if lec.Client == nil { + return nil, fmt.Errorf("Client must not be nil.") + } + if lec.EventRecorder == nil { + return nil, fmt.Errorf("EventRecorder must not be nil.") + } + return &LeaderElector{ + config: lec, + }, nil +} + +type LeaderElectionConfig struct { + // EndpointsMeta should contain a Name and a Namespace of an + // Endpoints object that the LeaderElector will attempt to lead. + EndpointsMeta api.ObjectMeta + // Identity is a unique identifier of the leader elector. + Identity string + + Client client.Interface + EventRecorder record.EventRecorder + + // LeaseDuration is the duration that non-leader candidates will + // wait to force acquire leadership. This is measured against time of + // last observed ack. + LeaseDuration time.Duration + // RenewDeadline is the duration that the acting master will retry + // refreshing leadership before giving up. + RenewDeadline time.Duration + // RetryPeriod is the duration the LeaderElector clients should wait + // between tries of actions. + RetryPeriod time.Duration + + // Callbacks are callbacks that are triggered during certain lifecycle + // events of the LeaderElector + Callbacks LeaderCallbacks +} + +// LeaderCallbacks are callbacks that are triggered during certain +// lifecycle events of the LeaderElector. These are invoked asynchronously. +// +// possible future callbacks: +// * OnChallenge() +type LeaderCallbacks struct { + // OnStartedLeading is called when a LeaderElector client starts leading + OnStartedLeading func(stop <-chan struct{}) + // OnStoppedLeading is called when a LeaderElector client stops leading + OnStoppedLeading func() + // OnNewLeader is called when the client observes a leader that is + // not the previously observed leader. This includes the first observed + // leader when the client starts. + OnNewLeader func(identity string) +} + +// LeaderElector is a leader election client. +// +// possible future methods: +// * (le *LeaderElector) IsLeader() +// * (le *LeaderElector) GetLeader() +type LeaderElector struct { + config LeaderElectionConfig + // internal bookkeeping + observedRecord LeaderElectionRecord + observedTime time.Time + // used to implement OnNewLeader(), may lag slightly from the + // value observedRecord.HolderIdentity if the transition has + // not yet been reported. + reportedLeader string +} + +// LeaderElectionRecord is the record that is stored in the leader election annotation. +// This information should be used for observational purposes only and could be replaced +// with a random string (e.g. UUID) with only slight modification of this code. +// TODO(mikedanese): this should potentially be versioned +type LeaderElectionRecord struct { + HolderIdentity string `json:"holderIdentity"` + LeaseDurationSeconds int `json:"leaseDurationSeconds"` + AcquireTime unversioned.Time `json:"acquireTime"` + RenewTime unversioned.Time `json:"renewTime"` + LeaderTransitions int `json:"leaderTransitions"` +} + +// Run starts the leader election loop +func (le *LeaderElector) Run() { + defer func() { + runtime.HandleCrash() + le.config.Callbacks.OnStoppedLeading() + }() + le.acquire() + stop := make(chan struct{}) + go le.config.Callbacks.OnStartedLeading(stop) + le.renew() + close(stop) +} + +// RunOrDie starts a client with the provided config or panics if the config +// fails to validate. +func RunOrDie(lec LeaderElectionConfig) { + le, err := NewLeaderElector(lec) + if err != nil { + panic(err) + } + le.Run() +} + +// GetLeader returns the identity of the last observed leader or returns the empty string if +// no leader has yet been observed. +func (le *LeaderElector) GetLeader() string { + return le.observedRecord.HolderIdentity +} + +// IsLeader returns true if the last observed leader was this client else returns false. +func (le *LeaderElector) IsLeader() bool { + return le.observedRecord.HolderIdentity == le.config.Identity +} + +// acquire loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew succeeds. +func (le *LeaderElector) acquire() { + stop := make(chan struct{}) + wait.Until(func() { + succeeded := le.tryAcquireOrRenew() + le.maybeReportTransition() + if !succeeded { + glog.V(4).Infof("failed to renew lease %v/%v", le.config.EndpointsMeta.Namespace, le.config.EndpointsMeta.Name) + time.Sleep(wait.Jitter(le.config.RetryPeriod, JitterFactor)) + return + } + le.config.EventRecorder.Eventf(&api.Endpoints{ObjectMeta: le.config.EndpointsMeta}, api.EventTypeNormal, "%v became leader", le.config.Identity) + glog.Infof("sucessfully acquired lease %v/%v", le.config.EndpointsMeta.Namespace, le.config.EndpointsMeta.Name) + close(stop) + }, 0, stop) +} + +// renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails. +func (le *LeaderElector) renew() { + stop := make(chan struct{}) + wait.Until(func() { + err := wait.Poll(le.config.RetryPeriod, le.config.RenewDeadline, func() (bool, error) { + return le.tryAcquireOrRenew(), nil + }) + le.maybeReportTransition() + if err == nil { + glog.V(4).Infof("succesfully renewed lease %v/%v", le.config.EndpointsMeta.Namespace, le.config.EndpointsMeta.Name) + return + } + le.config.EventRecorder.Eventf(&api.Endpoints{ObjectMeta: le.config.EndpointsMeta}, api.EventTypeNormal, "%v stopped leading", le.config.Identity) + glog.Infof("failed to renew lease %v/%v", le.config.EndpointsMeta.Namespace, le.config.EndpointsMeta.Name) + close(stop) + }, 0, stop) +} + +// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired, +// else it tries to renew the lease if it has already been acquired. Returns true +// on success else returns false. +func (le *LeaderElector) tryAcquireOrRenew() bool { + now := unversioned.Now() + leaderElectionRecord := LeaderElectionRecord{ + HolderIdentity: le.config.Identity, + LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second), + RenewTime: now, + AcquireTime: now, + } + + e, err := le.config.Client.Endpoints(le.config.EndpointsMeta.Namespace).Get(le.config.EndpointsMeta.Name) + if err != nil { + if !errors.IsNotFound(err) { + return false + } + + leaderElectionRecordBytes, err := json.Marshal(leaderElectionRecord) + if err != nil { + return false + } + _, err = le.config.Client.Endpoints(le.config.EndpointsMeta.Namespace).Create(&api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: le.config.EndpointsMeta.Name, + Namespace: le.config.EndpointsMeta.Namespace, + Annotations: map[string]string{ + LeaderElectionRecordAnnotationKey: string(leaderElectionRecordBytes), + }, + }, + }) + if err != nil { + glog.Errorf("error initially creating endpoints: %v", err) + return false + } + le.observedRecord = leaderElectionRecord + le.observedTime = time.Now() + return true + } + + if e.Annotations == nil { + e.Annotations = make(map[string]string) + } + + var oldLeaderElectionRecord LeaderElectionRecord + + if oldLeaderElectionRecordBytes, found := e.Annotations[LeaderElectionRecordAnnotationKey]; found { + if err := json.Unmarshal([]byte(oldLeaderElectionRecordBytes), &oldLeaderElectionRecord); err != nil { + glog.Errorf("error unmarshaling leader election record: %v", err) + return false + } + if !reflect.DeepEqual(le.observedRecord, oldLeaderElectionRecord) { + le.observedRecord = oldLeaderElectionRecord + le.observedTime = time.Now() + } + if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && + oldLeaderElectionRecord.HolderIdentity != le.config.Identity { + glog.Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) + return false + } + } + + // We're going to try to update. The leaderElectionRecord is set to it's default + // here. Let's correct it before updating. + if oldLeaderElectionRecord.HolderIdentity == le.config.Identity { + leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime + } else { + leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1 + } + + leaderElectionRecordBytes, err := json.Marshal(leaderElectionRecord) + if err != nil { + glog.Errorf("err marshaling leader election record: %v", err) + return false + } + e.Annotations[LeaderElectionRecordAnnotationKey] = string(leaderElectionRecordBytes) + + _, err = le.config.Client.Endpoints(le.config.EndpointsMeta.Namespace).Update(e) + if err != nil { + glog.Errorf("err: %v", err) + return false + } + le.observedRecord = leaderElectionRecord + le.observedTime = time.Now() + return true +} + +func (l *LeaderElector) maybeReportTransition() { + if l.observedRecord.HolderIdentity == l.reportedLeader { + return + } + l.reportedLeader = l.observedRecord.HolderIdentity + if l.config.Callbacks.OnNewLeader != nil { + go l.config.Callbacks.OnNewLeader(l.reportedLeader) + } +} + +func DefaultLeaderElectionConfiguration() componentconfig.LeaderElectionConfiguration { + return componentconfig.LeaderElectionConfiguration{ + LeaderElect: false, + LeaseDuration: unversioned.Duration{Duration: DefaultLeaseDuration}, + RenewDeadline: unversioned.Duration{Duration: DefaultRenewDeadline}, + RetryPeriod: unversioned.Duration{Duration: DefaultRetryPeriod}, + } +} + +// BindFlags binds the common LeaderElectionCLIConfig flags to a flagset +func BindFlags(l *componentconfig.LeaderElectionConfiguration, fs *pflag.FlagSet) { + fs.BoolVar(&l.LeaderElect, "leader-elect", l.LeaderElect, ""+ + "Start a leader election client and gain leadership before "+ + "executing the main loop. Enable this when running replicated "+ + "components for high availability.") + fs.DurationVar(&l.LeaseDuration.Duration, "leader-elect-lease-duration", l.LeaseDuration.Duration, ""+ + "The duration that non-leader candidates will wait after observing a leadership "+ + "renewal until attempting to acquire leadership of a led but unrenewed leader "+ + "slot. This is effectively the maximum duration that a leader can be stopped "+ + "before it is replaced by another candidate. This is only applicable if leader "+ + "election is enabled.") + fs.DurationVar(&l.RenewDeadline.Duration, "leader-elect-renew-deadline", l.RenewDeadline.Duration, ""+ + "The interval between attempts by the acting master to renew a leadership slot "+ + "before it stops leading. This must be less than or equal to the lease duration. "+ + "This is only applicable if leader election is enabled.") + fs.DurationVar(&l.RetryPeriod.Duration, "leader-elect-retry-period", l.RetryPeriod.Duration, ""+ + "The duration the clients should wait between attempting acquisition and renewal "+ + "of a leadership. This is only applicable if leader election is enabled.") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/leaderelection/leaderelection_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/leaderelection/leaderelection_test.go new file mode 100644 index 000000000000..cd880f73e1a5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/leaderelection/leaderelection_test.go @@ -0,0 +1,258 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package leaderelection implements leader election of a set of endpoints. +// It uses an annotation in the endpoints object to store the record of the +// election state. + +package leaderelection + +import ( + "fmt" + "sync" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/client/unversioned/testclient" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestTryAcquireOrRenew(t *testing.T) { + future := time.Now().Add(1000 * time.Hour) + past := time.Now().Add(-1000 * time.Hour) + + tests := []struct { + observedRecord LeaderElectionRecord + observedTime time.Time + reactors []struct { + verb string + reaction testclient.ReactionFunc + } + + expectSuccess bool + transitionLeader bool + outHolder string + }{ + // acquire from no endpoints + { + reactors: []struct { + verb string + reaction testclient.ReactionFunc + }{ + { + verb: "get", + reaction: func(action testclient.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.NewNotFound(api.Resource(action.(testclient.GetAction).GetResource()), action.(testclient.GetAction).GetName()) + }, + }, + { + verb: "create", + reaction: func(action testclient.Action) (handled bool, ret runtime.Object, err error) { + return true, action.(testclient.CreateAction).GetObject().(*api.Endpoints), nil + }, + }, + }, + expectSuccess: true, + outHolder: "baz", + }, + // acquire from unled endpoints + { + reactors: []struct { + verb string + reaction testclient.ReactionFunc + }{ + { + verb: "get", + reaction: func(action testclient.Action) (handled bool, ret runtime.Object, err error) { + return true, &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Namespace: action.GetNamespace(), + Name: action.(testclient.GetAction).GetName(), + }, + }, nil + }, + }, + { + verb: "update", + reaction: func(action testclient.Action) (handled bool, ret runtime.Object, err error) { + return true, action.(testclient.CreateAction).GetObject().(*api.Endpoints), nil + }, + }, + }, + + expectSuccess: true, + transitionLeader: true, + outHolder: "baz", + }, + // acquire from led, unacked endpoints + { + reactors: []struct { + verb string + reaction testclient.ReactionFunc + }{ + { + verb: "get", + reaction: func(action testclient.Action) (handled bool, ret runtime.Object, err error) { + return true, &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Namespace: action.GetNamespace(), + Name: action.(testclient.GetAction).GetName(), + Annotations: map[string]string{ + LeaderElectionRecordAnnotationKey: `{"holderIdentity":"bing"}`, + }, + }, + }, nil + }, + }, + { + verb: "update", + reaction: func(action testclient.Action) (handled bool, ret runtime.Object, err error) { + return true, action.(testclient.CreateAction).GetObject().(*api.Endpoints), nil + }, + }, + }, + observedRecord: LeaderElectionRecord{HolderIdentity: "bing"}, + observedTime: past, + + expectSuccess: true, + transitionLeader: true, + outHolder: "baz", + }, + // don't acquire from led, acked endpoints + { + reactors: []struct { + verb string + reaction testclient.ReactionFunc + }{ + { + verb: "get", + reaction: func(action testclient.Action) (handled bool, ret runtime.Object, err error) { + return true, &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Namespace: action.GetNamespace(), + Name: action.(testclient.GetAction).GetName(), + Annotations: map[string]string{ + LeaderElectionRecordAnnotationKey: `{"holderIdentity":"bing"}`, + }, + }, + }, nil + }, + }, + }, + observedTime: future, + + expectSuccess: false, + outHolder: "bing", + }, + // renew already acquired endpoints + { + reactors: []struct { + verb string + reaction testclient.ReactionFunc + }{ + { + verb: "get", + reaction: func(action testclient.Action) (handled bool, ret runtime.Object, err error) { + return true, &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Namespace: action.GetNamespace(), + Name: action.(testclient.GetAction).GetName(), + Annotations: map[string]string{ + LeaderElectionRecordAnnotationKey: `{"holderIdentity":"baz"}`, + }, + }, + }, nil + }, + }, + { + verb: "update", + reaction: func(action testclient.Action) (handled bool, ret runtime.Object, err error) { + return true, action.(testclient.CreateAction).GetObject().(*api.Endpoints), nil + }, + }, + }, + observedTime: future, + observedRecord: LeaderElectionRecord{HolderIdentity: "baz"}, + + expectSuccess: true, + outHolder: "baz", + }, + } + + for i, test := range tests { + // OnNewLeader is called async so we have to wait for it. + var wg sync.WaitGroup + wg.Add(1) + var reportedLeader string + + lec := LeaderElectionConfig{ + EndpointsMeta: api.ObjectMeta{Namespace: "foo", Name: "bar"}, + Identity: "baz", + EventRecorder: &record.FakeRecorder{}, + LeaseDuration: 10 * time.Second, + Callbacks: LeaderCallbacks{ + OnNewLeader: func(l string) { + defer wg.Done() + reportedLeader = l + }, + }, + } + c := &testclient.Fake{} + for _, reactor := range test.reactors { + c.AddReactor(reactor.verb, "endpoints", reactor.reaction) + } + c.AddReactor("*", "*", func(action testclient.Action) (bool, runtime.Object, error) { + t.Errorf("[%v] unreachable action. testclient called too many times: %+v", i, action) + return true, nil, fmt.Errorf("uncreachable action") + }) + + le := &LeaderElector{ + config: lec, + observedRecord: test.observedRecord, + observedTime: test.observedTime, + } + le.config.Client = c + + if test.expectSuccess != le.tryAcquireOrRenew() { + t.Errorf("[%v]unexpected result of tryAcquireOrRenew: [succeded=%v]", i, !test.expectSuccess) + } + + le.observedRecord.AcquireTime = unversioned.Time{} + le.observedRecord.RenewTime = unversioned.Time{} + if le.observedRecord.HolderIdentity != test.outHolder { + t.Errorf("[%v]expected holder:\n\t%+v\ngot:\n\t%+v", i, test.outHolder, le.observedRecord.HolderIdentity) + } + if len(test.reactors) != len(c.Actions()) { + t.Errorf("[%v]wrong number of api interactions", i) + } + if test.transitionLeader && le.observedRecord.LeaderTransitions != 1 { + t.Errorf("[%v]leader should have transitioned but did not", i) + } + if !test.transitionLeader && le.observedRecord.LeaderTransitions != 0 { + t.Errorf("[%v]leader should not have transitioned but did", i) + } + + le.maybeReportTransition() + wg.Wait() + if reportedLeader != test.outHolder { + t.Errorf("[%v]reported leader was not the new leader. expected %q, got %q", i, test.outHolder, reportedLeader) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/event.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/event.go index a4786824755a..47cbe3eca69f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/event.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/event.go @@ -31,11 +31,12 @@ import ( "k8s.io/kubernetes/pkg/watch" "github.com/golang/glog" + "net/http" ) const maxTriesPerEvent = 12 -var sleepDuration = 10 * time.Second +var defaultSleepDuration = 10 * time.Second const maxQueuedEvents = 1000 @@ -93,11 +94,16 @@ type EventBroadcaster interface { // Creates a new event broadcaster. func NewBroadcaster() EventBroadcaster { - return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull)} + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration} +} + +func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration} } type eventBroadcasterImpl struct { *watch.Broadcaster + sleepDuration time.Duration } // StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. @@ -110,11 +116,11 @@ func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSin eventCorrelator := NewEventCorrelator(util.RealClock{}) return eventBroadcaster.StartEventWatcher( func(event *api.Event) { - recordToSink(sink, event, eventCorrelator, randGen) + recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) }) } -func recordToSink(sink EventSink, event *api.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand) { +func recordToSink(sink EventSink, event *api.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { // Make a copy before modification, because there could be multiple listeners. // Events are safe to copy like this. eventCopy := *event @@ -148,12 +154,11 @@ func recordToSink(sink EventSink, event *api.Event, eventCorrelator *EventCorrel func isKeyNotFoundError(err error) bool { statusErr, _ := err.(*errors.StatusError) - // At the moment the server is returning 500 instead of a more specific - // error. When changing this remember that it should be backward compatible - // with old api servers that may be still returning 500. - if statusErr != nil && statusErr.Status().Code == 500 { + + if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { return true } + return false } @@ -289,7 +294,7 @@ func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp unvers } func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, eventtype, reason, message string) *api.Event { - t := unversioned.Time{recorder.clock.Now()} + t := unversioned.Time{Time: recorder.clock.Now()} namespace := ref.Namespace if namespace == "" { namespace = api.NamespaceDefault diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/event_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/event_test.go new file mode 100644 index 000000000000..ba7005abf774 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/event_test.go @@ -0,0 +1,926 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "encoding/json" + "fmt" + "math/rand" + "strconv" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + _ "k8s.io/kubernetes/pkg/api/install" // To register api.Pod used in tests below + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" + k8sruntime "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/strategicpatch" + "net/http" +) + +type testEventSink struct { + OnCreate func(e *api.Event) (*api.Event, error) + OnUpdate func(e *api.Event) (*api.Event, error) + OnPatch func(e *api.Event, p []byte) (*api.Event, error) +} + +// CreateEvent records the event for testing. +func (t *testEventSink) Create(e *api.Event) (*api.Event, error) { + if t.OnCreate != nil { + return t.OnCreate(e) + } + return e, nil +} + +// UpdateEvent records the event for testing. +func (t *testEventSink) Update(e *api.Event) (*api.Event, error) { + if t.OnUpdate != nil { + return t.OnUpdate(e) + } + return e, nil +} + +// PatchEvent records the event for testing. +func (t *testEventSink) Patch(e *api.Event, p []byte) (*api.Event, error) { + if t.OnPatch != nil { + return t.OnPatch(e, p) + } + return e, nil +} + +type OnCreateFunc func(*api.Event) (*api.Event, error) + +func OnCreateFactory(testCache map[string]*api.Event, createEvent chan<- *api.Event) OnCreateFunc { + return func(event *api.Event) (*api.Event, error) { + testCache[getEventKey(event)] = event + createEvent <- event + return event, nil + } +} + +type OnPatchFunc func(*api.Event, []byte) (*api.Event, error) + +func OnPatchFactory(testCache map[string]*api.Event, patchEvent chan<- *api.Event) OnPatchFunc { + return func(event *api.Event, patch []byte) (*api.Event, error) { + cachedEvent, found := testCache[getEventKey(event)] + if !found { + return nil, fmt.Errorf("unexpected error: couldn't find Event in testCache.") + } + originalData, err := json.Marshal(cachedEvent) + if err != nil { + return nil, fmt.Errorf("unexpected error: %v", err) + } + patched, err := strategicpatch.StrategicMergePatch(originalData, patch, event) + if err != nil { + return nil, fmt.Errorf("unexpected error: %v", err) + } + patchedObj := &api.Event{} + err = json.Unmarshal(patched, patchedObj) + if err != nil { + return nil, fmt.Errorf("unexpected error: %v", err) + } + patchEvent <- patchedObj + return patchedObj, nil + } +} + +func TestEventf(t *testing.T) { + testPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + SelfLink: "/api/version/pods/foo", + Name: "foo", + Namespace: "baz", + UID: "bar", + }, + } + testPod2 := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + SelfLink: "/api/version/pods/foo", + Name: "foo", + Namespace: "baz", + UID: "differentUid", + }, + } + testRef, err := api.GetPartialReference(testPod, "spec.containers[2]") + testRef2, err := api.GetPartialReference(testPod2, "spec.containers[3]") + if err != nil { + t.Fatal(err) + } + table := []struct { + obj k8sruntime.Object + eventtype string + reason string + messageFmt string + elements []interface{} + expect *api.Event + expectLog string + expectUpdate bool + }{ + { + obj: testRef, + eventtype: api.EventTypeNormal, + reason: "Started", + messageFmt: "some verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "bar", + APIVersion: "version", + FieldPath: "spec.containers[2]", + }, + Reason: "Started", + Message: "some verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 1, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`, + expectUpdate: false, + }, + { + obj: testPod, + eventtype: api.EventTypeNormal, + reason: "Killed", + messageFmt: "some other verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "bar", + APIVersion: "version", + }, + Reason: "Killed", + Message: "some other verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 1, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'Killed' some other verbose message: 1`, + expectUpdate: false, + }, + { + obj: testRef, + eventtype: api.EventTypeNormal, + reason: "Started", + messageFmt: "some verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "bar", + APIVersion: "version", + FieldPath: "spec.containers[2]", + }, + Reason: "Started", + Message: "some verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 2, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`, + expectUpdate: true, + }, + { + obj: testRef2, + eventtype: api.EventTypeNormal, + reason: "Started", + messageFmt: "some verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "differentUid", + APIVersion: "version", + FieldPath: "spec.containers[3]", + }, + Reason: "Started", + Message: "some verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 1, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Started' some verbose message: 1`, + expectUpdate: false, + }, + { + obj: testRef, + eventtype: api.EventTypeNormal, + reason: "Started", + messageFmt: "some verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "bar", + APIVersion: "version", + FieldPath: "spec.containers[2]", + }, + Reason: "Started", + Message: "some verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 3, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`, + expectUpdate: true, + }, + { + obj: testRef2, + eventtype: api.EventTypeNormal, + reason: "Stopped", + messageFmt: "some verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "differentUid", + APIVersion: "version", + FieldPath: "spec.containers[3]", + }, + Reason: "Stopped", + Message: "some verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 1, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`, + expectUpdate: false, + }, + { + obj: testRef2, + eventtype: api.EventTypeNormal, + reason: "Stopped", + messageFmt: "some verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "differentUid", + APIVersion: "version", + FieldPath: "spec.containers[3]", + }, + Reason: "Stopped", + Message: "some verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 2, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`, + expectUpdate: true, + }, + } + + testCache := map[string]*api.Event{} + logCalled := make(chan struct{}) + createEvent := make(chan *api.Event) + updateEvent := make(chan *api.Event) + patchEvent := make(chan *api.Event) + testEvents := testEventSink{ + OnCreate: OnCreateFactory(testCache, createEvent), + OnUpdate: func(event *api.Event) (*api.Event, error) { + updateEvent <- event + return event, nil + }, + OnPatch: OnPatchFactory(testCache, patchEvent), + } + eventBroadcaster := NewBroadcasterForTests(0) + sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents) + + clock := util.NewFakeClock(time.Now()) + recorder := recorderWithFakeClock(api.EventSource{Component: "eventTest"}, eventBroadcaster, clock) + for index, item := range table { + clock.Step(1 * time.Second) + // TODO: uncomment this after we upgrade to Go 1.6.1. + // testing.(*common).log() is racing with testing.(*T).report() in Go 1.6. + // See #23533 for more details. + // logWatcher1 := eventBroadcaster.StartLogging(t.Logf) // Prove that it is useful + logWatcher2 := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) { + if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a { + t.Errorf("Expected '%v', got '%v'", e, a) + } + logCalled <- struct{}{} + }) + recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...) + + <-logCalled + + // validate event + if item.expectUpdate { + actualEvent := <-patchEvent + validateEvent(string(index), actualEvent, item.expect, t) + } else { + actualEvent := <-createEvent + validateEvent(string(index), actualEvent, item.expect, t) + } + // TODO: uncomment this after we upgrade to Go 1.6.1. + // logWatcher1.Stop() + logWatcher2.Stop() + } + sinkWatcher.Stop() +} + +func recorderWithFakeClock(eventSource api.EventSource, eventBroadcaster EventBroadcaster, clock util.Clock) EventRecorder { + return &recorderImpl{eventSource, eventBroadcaster.(*eventBroadcasterImpl).Broadcaster, clock} +} + +func TestWriteEventError(t *testing.T) { + type entry struct { + timesToSendError int + attemptsWanted int + err error + } + table := map[string]*entry{ + "giveUp1": { + timesToSendError: 1000, + attemptsWanted: 1, + err: &restclient.RequestConstructionError{}, + }, + "giveUp2": { + timesToSendError: 1000, + attemptsWanted: 1, + err: &errors.StatusError{}, + }, + "retry1": { + timesToSendError: 1000, + attemptsWanted: 12, + err: &errors.UnexpectedObjectError{}, + }, + "retry2": { + timesToSendError: 1000, + attemptsWanted: 12, + err: fmt.Errorf("A weird error"), + }, + "succeedEventually": { + timesToSendError: 2, + attemptsWanted: 2, + err: fmt.Errorf("A weird error"), + }, + } + + eventCorrelator := NewEventCorrelator(util.RealClock{}) + randGen := rand.New(rand.NewSource(time.Now().UnixNano())) + + for caseName, ent := range table { + attempts := 0 + sink := &testEventSink{ + OnCreate: func(event *api.Event) (*api.Event, error) { + attempts++ + if attempts < ent.timesToSendError { + return nil, ent.err + } + return event, nil + }, + } + ev := &api.Event{} + recordToSink(sink, ev, eventCorrelator, randGen, 0) + if attempts != ent.attemptsWanted { + t.Errorf("case %v: wanted %d, got %d attempts", caseName, ent.attemptsWanted, attempts) + } + } +} + +func TestUpdateExpiredEvent(t *testing.T) { + eventCorrelator := NewEventCorrelator(util.RealClock{}) + randGen := rand.New(rand.NewSource(time.Now().UnixNano())) + + var createdEvent *api.Event + + sink := &testEventSink{ + OnPatch: func(*api.Event, []byte) (*api.Event, error) { + return nil, &errors.StatusError{ + ErrStatus: unversioned.Status{ + Code: http.StatusNotFound, + Reason: unversioned.StatusReasonNotFound, + }} + }, + OnCreate: func(event *api.Event) (*api.Event, error) { + createdEvent = event + return event, nil + }, + } + + ev := &api.Event{} + ev.ResourceVersion = "updated-resource-version" + ev.Count = 2 + recordToSink(sink, ev, eventCorrelator, randGen, 0) + + if createdEvent == nil { + t.Error("Event did not get created after patch failed") + return + } + + if createdEvent.ResourceVersion != "" { + t.Errorf("Event did not have its resource version cleared, was %s", createdEvent.ResourceVersion) + } +} + +func TestLotsOfEvents(t *testing.T) { + recorderCalled := make(chan struct{}) + loggerCalled := make(chan struct{}) + + // Fail each event a few times to ensure there's some load on the tested code. + var counts [1000]int + testEvents := testEventSink{ + OnCreate: func(event *api.Event) (*api.Event, error) { + num, err := strconv.Atoi(event.Message) + if err != nil { + t.Error(err) + return event, nil + } + counts[num]++ + if counts[num] < 5 { + return nil, fmt.Errorf("fake error") + } + recorderCalled <- struct{}{} + return event, nil + }, + } + + eventBroadcaster := NewBroadcasterForTests(0) + sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents) + logWatcher := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) { + loggerCalled <- struct{}{} + }) + recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "eventTest"}) + ref := &api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "bar", + APIVersion: "version", + } + for i := 0; i < maxQueuedEvents; i++ { + // we need to vary the reason to prevent aggregation + go recorder.Eventf(ref, api.EventTypeNormal, "Reason-"+string(i), strconv.Itoa(i)) + } + // Make sure no events were dropped by either of the listeners. + for i := 0; i < maxQueuedEvents; i++ { + <-recorderCalled + <-loggerCalled + } + // Make sure that every event was attempted 5 times + for i := 0; i < maxQueuedEvents; i++ { + if counts[i] < 5 { + t.Errorf("Only attempted to record event '%d' %d times.", i, counts[i]) + } + } + sinkWatcher.Stop() + logWatcher.Stop() +} + +func TestEventfNoNamespace(t *testing.T) { + testPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + SelfLink: "/api/version/pods/foo", + Name: "foo", + UID: "bar", + }, + } + testRef, err := api.GetPartialReference(testPod, "spec.containers[2]") + if err != nil { + t.Fatal(err) + } + table := []struct { + obj k8sruntime.Object + eventtype string + reason string + messageFmt string + elements []interface{} + expect *api.Event + expectLog string + expectUpdate bool + }{ + { + obj: testRef, + eventtype: api.EventTypeNormal, + reason: "Started", + messageFmt: "some verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "", + UID: "bar", + APIVersion: "version", + FieldPath: "spec.containers[2]", + }, + Reason: "Started", + Message: "some verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 1, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`, + expectUpdate: false, + }, + } + + testCache := map[string]*api.Event{} + logCalled := make(chan struct{}) + createEvent := make(chan *api.Event) + updateEvent := make(chan *api.Event) + patchEvent := make(chan *api.Event) + testEvents := testEventSink{ + OnCreate: OnCreateFactory(testCache, createEvent), + OnUpdate: func(event *api.Event) (*api.Event, error) { + updateEvent <- event + return event, nil + }, + OnPatch: OnPatchFactory(testCache, patchEvent), + } + eventBroadcaster := NewBroadcasterForTests(0) + sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents) + + clock := util.NewFakeClock(time.Now()) + recorder := recorderWithFakeClock(api.EventSource{Component: "eventTest"}, eventBroadcaster, clock) + + for index, item := range table { + clock.Step(1 * time.Second) + // TODO: uncomment this after we upgrade to Go 1.6.1. + // testing.(*common).log() is racing with testing.(*T).report() in Go 1.6. + // See #23533 for more details. + // logWatcher1 := eventBroadcaster.StartLogging(t.Logf) // Prove that it is useful + logWatcher2 := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) { + if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a { + t.Errorf("Expected '%v', got '%v'", e, a) + } + logCalled <- struct{}{} + }) + recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...) + + <-logCalled + + // validate event + if item.expectUpdate { + actualEvent := <-patchEvent + validateEvent(string(index), actualEvent, item.expect, t) + } else { + actualEvent := <-createEvent + validateEvent(string(index), actualEvent, item.expect, t) + } + + // TODO: uncomment this after we upgrade to Go 1.6.1. + // logWatcher1.Stop() + logWatcher2.Stop() + } + sinkWatcher.Stop() +} + +func TestMultiSinkCache(t *testing.T) { + testPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + SelfLink: "/api/version/pods/foo", + Name: "foo", + Namespace: "baz", + UID: "bar", + }, + } + testPod2 := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + SelfLink: "/api/version/pods/foo", + Name: "foo", + Namespace: "baz", + UID: "differentUid", + }, + } + testRef, err := api.GetPartialReference(testPod, "spec.containers[2]") + testRef2, err := api.GetPartialReference(testPod2, "spec.containers[3]") + if err != nil { + t.Fatal(err) + } + table := []struct { + obj k8sruntime.Object + eventtype string + reason string + messageFmt string + elements []interface{} + expect *api.Event + expectLog string + expectUpdate bool + }{ + { + obj: testRef, + eventtype: api.EventTypeNormal, + reason: "Started", + messageFmt: "some verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "bar", + APIVersion: "version", + FieldPath: "spec.containers[2]", + }, + Reason: "Started", + Message: "some verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 1, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`, + expectUpdate: false, + }, + { + obj: testPod, + eventtype: api.EventTypeNormal, + reason: "Killed", + messageFmt: "some other verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "bar", + APIVersion: "version", + }, + Reason: "Killed", + Message: "some other verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 1, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'Killed' some other verbose message: 1`, + expectUpdate: false, + }, + { + obj: testRef, + eventtype: api.EventTypeNormal, + reason: "Started", + messageFmt: "some verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "bar", + APIVersion: "version", + FieldPath: "spec.containers[2]", + }, + Reason: "Started", + Message: "some verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 2, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`, + expectUpdate: true, + }, + { + obj: testRef2, + eventtype: api.EventTypeNormal, + reason: "Started", + messageFmt: "some verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "differentUid", + APIVersion: "version", + FieldPath: "spec.containers[3]", + }, + Reason: "Started", + Message: "some verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 1, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Started' some verbose message: 1`, + expectUpdate: false, + }, + { + obj: testRef, + eventtype: api.EventTypeNormal, + reason: "Started", + messageFmt: "some verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "bar", + APIVersion: "version", + FieldPath: "spec.containers[2]", + }, + Reason: "Started", + Message: "some verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 3, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`, + expectUpdate: true, + }, + { + obj: testRef2, + eventtype: api.EventTypeNormal, + reason: "Stopped", + messageFmt: "some verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "differentUid", + APIVersion: "version", + FieldPath: "spec.containers[3]", + }, + Reason: "Stopped", + Message: "some verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 1, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`, + expectUpdate: false, + }, + { + obj: testRef2, + eventtype: api.EventTypeNormal, + reason: "Stopped", + messageFmt: "some verbose message: %v", + elements: []interface{}{1}, + expect: &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + }, + InvolvedObject: api.ObjectReference{ + Kind: "Pod", + Name: "foo", + Namespace: "baz", + UID: "differentUid", + APIVersion: "version", + FieldPath: "spec.containers[3]", + }, + Reason: "Stopped", + Message: "some verbose message: 1", + Source: api.EventSource{Component: "eventTest"}, + Count: 2, + Type: api.EventTypeNormal, + }, + expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`, + expectUpdate: true, + }, + } + + testCache := map[string]*api.Event{} + createEvent := make(chan *api.Event) + updateEvent := make(chan *api.Event) + patchEvent := make(chan *api.Event) + testEvents := testEventSink{ + OnCreate: OnCreateFactory(testCache, createEvent), + OnUpdate: func(event *api.Event) (*api.Event, error) { + updateEvent <- event + return event, nil + }, + OnPatch: OnPatchFactory(testCache, patchEvent), + } + + testCache2 := map[string]*api.Event{} + createEvent2 := make(chan *api.Event) + updateEvent2 := make(chan *api.Event) + patchEvent2 := make(chan *api.Event) + testEvents2 := testEventSink{ + OnCreate: OnCreateFactory(testCache2, createEvent2), + OnUpdate: func(event *api.Event) (*api.Event, error) { + updateEvent2 <- event + return event, nil + }, + OnPatch: OnPatchFactory(testCache2, patchEvent2), + } + + eventBroadcaster := NewBroadcasterForTests(0) + clock := util.NewFakeClock(time.Now()) + recorder := recorderWithFakeClock(api.EventSource{Component: "eventTest"}, eventBroadcaster, clock) + + sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents) + for index, item := range table { + clock.Step(1 * time.Second) + recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...) + + // validate event + if item.expectUpdate { + actualEvent := <-patchEvent + validateEvent(string(index), actualEvent, item.expect, t) + } else { + actualEvent := <-createEvent + validateEvent(string(index), actualEvent, item.expect, t) + } + } + + // Another StartRecordingToSink call should start to record events with new clean cache. + sinkWatcher2 := eventBroadcaster.StartRecordingToSink(&testEvents2) + for index, item := range table { + clock.Step(1 * time.Second) + recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...) + + // validate event + if item.expectUpdate { + actualEvent := <-patchEvent2 + validateEvent(string(index), actualEvent, item.expect, t) + } else { + actualEvent := <-createEvent2 + validateEvent(string(index), actualEvent, item.expect, t) + } + } + + sinkWatcher.Stop() + sinkWatcher2.Stop() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/events_cache.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/events_cache.go index 5d93ba6a68a5..fa76db795813 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/events_cache.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/events_cache.go @@ -236,7 +236,7 @@ func (e *eventLogger) eventObserve(newEvent *api.Event) (*api.Event, []byte, err event.Name = lastObservation.name event.ResourceVersion = lastObservation.resourceVersion event.FirstTimestamp = lastObservation.firstTimestamp - event.Count = lastObservation.count + 1 + event.Count = int32(lastObservation.count) + 1 eventCopy2 := *event eventCopy2.Count = 0 @@ -251,7 +251,7 @@ func (e *eventLogger) eventObserve(newEvent *api.Event) (*api.Event, []byte, err e.cache.Add( key, eventLog{ - count: event.Count, + count: int(event.Count), firstTimestamp: event.FirstTimestamp, name: event.Name, resourceVersion: event.ResourceVersion, @@ -269,7 +269,7 @@ func (e *eventLogger) updateState(event *api.Event) { e.cache.Add( key, eventLog{ - count: event.Count, + count: int(event.Count), firstTimestamp: event.FirstTimestamp, name: event.Name, resourceVersion: event.ResourceVersion, diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/events_cache_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/events_cache_test.go new file mode 100644 index 000000000000..166550783fba --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/events_cache_test.go @@ -0,0 +1,254 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "reflect" + "strings" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/diff" +) + +func makeObjectReference(kind, name, namespace string) api.ObjectReference { + return api.ObjectReference{ + Kind: kind, + Name: name, + Namespace: namespace, + UID: "C934D34AFB20242", + APIVersion: "version", + } +} + +func makeEvent(reason, message string, involvedObject api.ObjectReference) api.Event { + eventTime := unversioned.Now() + event := api.Event{ + Reason: reason, + Message: message, + InvolvedObject: involvedObject, + Source: api.EventSource{ + Component: "kubelet", + Host: "kublet.node1", + }, + Count: 1, + FirstTimestamp: eventTime, + LastTimestamp: eventTime, + Type: api.EventTypeNormal, + } + return event +} + +func makeEvents(num int, template api.Event) []api.Event { + events := []api.Event{} + for i := 0; i < num; i++ { + events = append(events, template) + } + return events +} + +func makeUniqueEvents(num int) []api.Event { + events := []api.Event{} + kind := "Pod" + for i := 0; i < num; i++ { + reason := strings.Join([]string{"reason", string(i)}, "-") + message := strings.Join([]string{"message", string(i)}, "-") + name := strings.Join([]string{"pod", string(i)}, "-") + namespace := strings.Join([]string{"ns", string(i)}, "-") + involvedObject := makeObjectReference(kind, name, namespace) + events = append(events, makeEvent(reason, message, involvedObject)) + } + return events +} + +func makeSimilarEvents(num int, template api.Event, messagePrefix string) []api.Event { + events := makeEvents(num, template) + for i := range events { + events[i].Message = strings.Join([]string{messagePrefix, string(i), events[i].Message}, "-") + } + return events +} + +func setCount(event api.Event, count int) api.Event { + event.Count = int32(count) + return event +} + +func validateEvent(messagePrefix string, actualEvent *api.Event, expectedEvent *api.Event, t *testing.T) (*api.Event, error) { + recvEvent := *actualEvent + expectCompression := expectedEvent.Count > 1 + t.Logf("%v - expectedEvent.Count is %d\n", messagePrefix, expectedEvent.Count) + // Just check that the timestamp was set. + if recvEvent.FirstTimestamp.IsZero() || recvEvent.LastTimestamp.IsZero() { + t.Errorf("%v - timestamp wasn't set: %#v", messagePrefix, recvEvent) + } + actualFirstTimestamp := recvEvent.FirstTimestamp + actualLastTimestamp := recvEvent.LastTimestamp + if actualFirstTimestamp.Equal(actualLastTimestamp) { + if expectCompression { + t.Errorf("%v - FirstTimestamp (%q) and LastTimestamp (%q) must be different to indicate event compression happened, but were the same. Actual Event: %#v", messagePrefix, actualFirstTimestamp, actualLastTimestamp, recvEvent) + } + } else { + if expectedEvent.Count == 1 { + t.Errorf("%v - FirstTimestamp (%q) and LastTimestamp (%q) must be equal to indicate only one occurrence of the event, but were different. Actual Event: %#v", messagePrefix, actualFirstTimestamp, actualLastTimestamp, recvEvent) + } + } + // Temp clear time stamps for comparison because actual values don't matter for comparison + recvEvent.FirstTimestamp = expectedEvent.FirstTimestamp + recvEvent.LastTimestamp = expectedEvent.LastTimestamp + // Check that name has the right prefix. + if n, en := recvEvent.Name, expectedEvent.Name; !strings.HasPrefix(n, en) { + t.Errorf("%v - Name '%v' does not contain prefix '%v'", messagePrefix, n, en) + } + recvEvent.Name = expectedEvent.Name + if e, a := expectedEvent, &recvEvent; !reflect.DeepEqual(e, a) { + t.Errorf("%v - diff: %s", messagePrefix, diff.ObjectGoPrintDiff(e, a)) + } + recvEvent.FirstTimestamp = actualFirstTimestamp + recvEvent.LastTimestamp = actualLastTimestamp + return actualEvent, nil +} + +// TestDefaultEventFilterFunc ensures that no events are filtered +func TestDefaultEventFilterFunc(t *testing.T) { + event := makeEvent("end-of-world", "it was fun", makeObjectReference("Pod", "pod1", "other")) + if DefaultEventFilterFunc(&event) { + t.Fatalf("DefaultEventFilterFunc should always return false") + } +} + +// TestEventAggregatorByReasonFunc ensures that two events are aggregated if they vary only by event.message +func TestEventAggregatorByReasonFunc(t *testing.T) { + event1 := makeEvent("end-of-world", "it was fun", makeObjectReference("Pod", "pod1", "other")) + event2 := makeEvent("end-of-world", "it was awful", makeObjectReference("Pod", "pod1", "other")) + event3 := makeEvent("nevermind", "it was a bug", makeObjectReference("Pod", "pod1", "other")) + + aggKey1, localKey1 := EventAggregatorByReasonFunc(&event1) + aggKey2, localKey2 := EventAggregatorByReasonFunc(&event2) + aggKey3, _ := EventAggregatorByReasonFunc(&event3) + + if aggKey1 != aggKey2 { + t.Errorf("Expected %v equal %v", aggKey1, aggKey2) + } + if localKey1 == localKey2 { + t.Errorf("Expected %v to not equal %v", aggKey1, aggKey3) + } + if aggKey1 == aggKey3 { + t.Errorf("Expected %v to not equal %v", aggKey1, aggKey3) + } +} + +// TestEventAggregatorByReasonMessageFunc validates the proper output for an aggregate message +func TestEventAggregatorByReasonMessageFunc(t *testing.T) { + expected := "(events with common reason combined)" + event1 := makeEvent("end-of-world", "it was fun", makeObjectReference("Pod", "pod1", "other")) + if actual := EventAggregatorByReasonMessageFunc(&event1); expected != actual { + t.Errorf("Expected %v got %v", expected, actual) + } +} + +// TestEventCorrelator validates proper counting, aggregation of events +func TestEventCorrelator(t *testing.T) { + firstEvent := makeEvent("first", "i am first", makeObjectReference("Pod", "my-pod", "my-ns")) + duplicateEvent := makeEvent("duplicate", "me again", makeObjectReference("Pod", "my-pod", "my-ns")) + uniqueEvent := makeEvent("unique", "snowflake", makeObjectReference("Pod", "my-pod", "my-ns")) + similarEvent := makeEvent("similar", "similar message", makeObjectReference("Pod", "my-pod", "my-ns")) + aggregateEvent := makeEvent(similarEvent.Reason, EventAggregatorByReasonMessageFunc(&similarEvent), similarEvent.InvolvedObject) + scenario := map[string]struct { + previousEvents []api.Event + newEvent api.Event + expectedEvent api.Event + intervalSeconds int + }{ + "create-a-single-event": { + previousEvents: []api.Event{}, + newEvent: firstEvent, + expectedEvent: setCount(firstEvent, 1), + intervalSeconds: 5, + }, + "the-same-event-should-just-count": { + previousEvents: makeEvents(1, duplicateEvent), + newEvent: duplicateEvent, + expectedEvent: setCount(duplicateEvent, 2), + intervalSeconds: 5, + }, + "the-same-event-should-just-count-even-if-more-than-aggregate": { + previousEvents: makeEvents(defaultAggregateMaxEvents, duplicateEvent), + newEvent: duplicateEvent, + expectedEvent: setCount(duplicateEvent, defaultAggregateMaxEvents+1), + intervalSeconds: 5, + }, + "create-many-unique-events": { + previousEvents: makeUniqueEvents(30), + newEvent: uniqueEvent, + expectedEvent: setCount(uniqueEvent, 1), + intervalSeconds: 5, + }, + "similar-events-should-aggregate-event": { + previousEvents: makeSimilarEvents(defaultAggregateMaxEvents-1, similarEvent, similarEvent.Message), + newEvent: similarEvent, + expectedEvent: setCount(aggregateEvent, 1), + intervalSeconds: 5, + }, + "similar-events-many-times-should-count-the-aggregate": { + previousEvents: makeSimilarEvents(defaultAggregateMaxEvents, similarEvent, similarEvent.Message), + newEvent: similarEvent, + expectedEvent: setCount(aggregateEvent, 2), + intervalSeconds: 5, + }, + "similar-events-whose-interval-is-greater-than-aggregate-interval-do-not-aggregate": { + previousEvents: makeSimilarEvents(defaultAggregateMaxEvents-1, similarEvent, similarEvent.Message), + newEvent: similarEvent, + expectedEvent: setCount(similarEvent, 1), + intervalSeconds: defaultAggregateIntervalInSeconds, + }, + } + + for testScenario, testInput := range scenario { + eventInterval := time.Duration(testInput.intervalSeconds) * time.Second + clock := util.IntervalClock{Time: time.Now(), Duration: eventInterval} + correlator := NewEventCorrelator(&clock) + for i := range testInput.previousEvents { + event := testInput.previousEvents[i] + now := unversioned.NewTime(clock.Now()) + event.FirstTimestamp = now + event.LastTimestamp = now + result, err := correlator.EventCorrelate(&event) + if err != nil { + t.Errorf("scenario %v: unexpected error playing back prevEvents %v", testScenario, err) + } + correlator.UpdateState(result.Event) + } + + // update the input to current clock value + now := unversioned.NewTime(clock.Now()) + testInput.newEvent.FirstTimestamp = now + testInput.newEvent.LastTimestamp = now + result, err := correlator.EventCorrelate(&testInput.newEvent) + if err != nil { + t.Errorf("scenario %v: unexpected error correlating input event %v", testScenario, err) + } + + _, err = validateEvent(testScenario, result.Event, &testInput.expectedEvent, t) + if err != nil { + t.Errorf("scenario %v: unexpected error validating result %v", testScenario, err) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/fake.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/fake.go index 7afe1bab2055..35204ef2df6d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/fake.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/record/fake.go @@ -23,18 +23,32 @@ import ( "k8s.io/kubernetes/pkg/runtime" ) -// FakeRecorder is used as a fake during tests. +// FakeRecorder is used as a fake during tests. It is thread safe. It is usable +// when created manually and not by NewFakeRecorder, however all events may be +// thrown away in this case. type FakeRecorder struct { - Events []string + Events chan string } func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { - f.Events = append(f.Events, fmt.Sprintf("%s %s %s", eventtype, reason, message)) + if f.Events != nil { + f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message) + } } func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { - f.Events = append(f.Events, fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...)) + if f.Events != nil { + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + } } func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) { } + +// NewFakeRecorder creates new fake event recorder with event channel with +// buffer of given size. +func NewFakeRecorder(bufferSize int) *FakeRecorder { + return &FakeRecorder{ + Events: make(chan string, bufferSize), + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/client.go index 2e378a9225b0..bf813d052a0c 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/client.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/client.go @@ -17,6 +17,7 @@ limitations under the License. package restclient import ( + "fmt" "net/http" "net/url" "os" @@ -26,7 +27,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/flowcontrol" ) const ( @@ -52,17 +54,28 @@ type RESTClient struct { // contentConfig is the information used to communicate with the server. contentConfig ContentConfig + // serializers contain all serializers for undelying content type. + serializers Serializers + // TODO extract this into a wrapper interface via the RESTClient interface in kubectl. - Throttle util.RateLimiter + Throttle flowcontrol.RateLimiter // Set specific behavior of the client. If not set http.DefaultClient will be used. Client *http.Client } +type Serializers struct { + Encoder runtime.Encoder + Decoder runtime.Decoder + StreamingSerializer runtime.Serializer + Framer runtime.Framer + RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error) +} + // NewRESTClient creates a new RESTClient. This client performs generic REST functions // such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and // decoding of responses from the server. -func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, client *http.Client) *RESTClient { +func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { base := *baseURL if !strings.HasSuffix(base.Path, "/") { base.Path += "/" @@ -76,18 +89,33 @@ func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConf if len(config.ContentType) == 0 { config.ContentType = "application/json" } + serializers, err := createSerializers(config) + if err != nil { + return nil, err + } - var throttle util.RateLimiter - if maxQPS > 0 { - throttle = util.NewTokenBucketRateLimiter(maxQPS, maxBurst) + var throttle flowcontrol.RateLimiter + if maxQPS > 0 && rateLimiter == nil { + throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst) + } else if rateLimiter != nil { + throttle = rateLimiter } return &RESTClient{ base: &base, versionedAPIPath: versionedAPIPath, contentConfig: config, + serializers: *serializers, Throttle: throttle, Client: client, + }, nil +} + +// GetRateLimiter returns rate limier for a given client, or nil if it's called on a nil client +func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { + if c == nil { + return nil } + return c.Throttle } // readExpBackoffConfig handles the internal logic of determining what the @@ -103,15 +131,47 @@ func readExpBackoffConfig() BackoffManager { return &NoBackoff{} } return &URLBackoff{ - Backoff: util.NewBackOff( + Backoff: flowcontrol.NewBackOff( time.Duration(backoffBaseInt)*time.Second, time.Duration(backoffDurationInt)*time.Second)} } +// createSerializers creates all necessary serializers for given contentType. +func createSerializers(config ContentConfig) (*Serializers, error) { + negotiated := config.NegotiatedSerializer + contentType := config.ContentType + info, ok := negotiated.SerializerForMediaType(contentType, nil) + if !ok { + return nil, fmt.Errorf("serializer for %s not registered", contentType) + } + streamInfo, ok := negotiated.StreamingSerializerForMediaType(contentType, nil) + if !ok { + return nil, fmt.Errorf("streaming serializer for %s not registered", contentType) + } + internalGV := unversioned.GroupVersion{ + Group: config.GroupVersion.Group, + Version: runtime.APIVersionInternal, + } + return &Serializers{ + Encoder: negotiated.EncoderForVersion(info.Serializer, *config.GroupVersion), + Decoder: negotiated.DecoderToVersion(info.Serializer, internalGV), + StreamingSerializer: streamInfo.Serializer, + Framer: streamInfo.Framer, + RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) { + renegotiated, ok := negotiated.SerializerForMediaType(contentType, params) + if !ok { + return nil, fmt.Errorf("serializer for %s not registered", contentType) + } + return negotiated.DecoderToVersion(renegotiated.Serializer, internalGV), nil + }, + }, nil +} + // Verb begins a request with a verb (GET, POST, PUT, DELETE). // // Example usage of RESTClient's request building interface: -// c := NewRESTClient(url, codec) +// c, err := NewRESTClient(...) +// if err != nil { ... } // resp, err := c.Verb("GET"). // Path("pods"). // SelectorParam("labels", "area=staging"). @@ -124,9 +184,9 @@ func (c *RESTClient) Verb(verb string) *Request { backoff := readExpBackoffConfig() if c.Client == nil { - return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, backoff, c.Throttle) + return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) } - return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, backoff, c.Throttle) + return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) } // Post begins a POST request. Short for c.Verb("POST"). @@ -158,3 +218,7 @@ func (c *RESTClient) Delete() *Request { func (c *RESTClient) APIVersion() unversioned.GroupVersion { return *c.contentConfig.GroupVersion } + +func (c *RESTClient) Codec() runtime.Codec { + return c.contentConfig.Codec +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/client_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/client_test.go new file mode 100644 index 000000000000..e1cc1f9fae8d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/client_test.go @@ -0,0 +1,193 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/diff" + utiltesting "k8s.io/kubernetes/pkg/util/testing" +) + +func TestDoRequestSuccess(t *testing.T) { + status := &unversioned.Status{Status: unversioned.StatusSuccess} + expectedBody, _ := runtime.Encode(testapi.Default.Codec(), status) + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: string(expectedBody), + T: t, + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + c, err := RESTClientFor(&Config{ + Host: testServer.URL, + ContentConfig: ContentConfig{ + GroupVersion: testapi.Default.GroupVersion(), + NegotiatedSerializer: testapi.Default.NegotiatedSerializer(), + }, + Username: "user", + Password: "pass", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + body, err := c.Get().Prefix("test").Do().Raw() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if fakeHandler.RequestReceived.Header["Authorization"] == nil { + t.Errorf("Request is missing authorization header: %#v", fakeHandler.RequestReceived) + } + statusOut, err := runtime.Decode(testapi.Default.Codec(), body) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + if !reflect.DeepEqual(status, statusOut) { + t.Errorf("Unexpected mis-match. Expected %#v. Saw %#v", status, statusOut) + } + fakeHandler.ValidateRequest(t, "/"+testapi.Default.GroupVersion().String()+"/test", "GET", nil) +} + +func TestDoRequestFailed(t *testing.T) { + status := &unversioned.Status{ + Code: http.StatusNotFound, + Status: unversioned.StatusFailure, + Reason: unversioned.StatusReasonNotFound, + Message: " \"\" not found", + Details: &unversioned.StatusDetails{}, + } + expectedBody, _ := runtime.Encode(testapi.Default.Codec(), status) + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 404, + ResponseBody: string(expectedBody), + T: t, + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + c, err := RESTClientFor(&Config{ + Host: testServer.URL, + ContentConfig: ContentConfig{ + GroupVersion: testapi.Default.GroupVersion(), + NegotiatedSerializer: testapi.Default.NegotiatedSerializer(), + }, + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + body, err := c.Get().Do().Raw() + if err == nil || body != nil { + t.Errorf("unexpected non-error: %#v", body) + } + ss, ok := err.(errors.APIStatus) + if !ok { + t.Errorf("unexpected error type %v", err) + } + actual := ss.Status() + expected := *status + // The decoder will apply the default Version and Kind to the Status. + expected.APIVersion = "v1" + expected.Kind = "Status" + if !reflect.DeepEqual(&expected, &actual) { + t.Errorf("Unexpected mis-match: %s", diff.ObjectDiff(status, &actual)) + } +} + +func TestDoRequestCreated(t *testing.T) { + status := &unversioned.Status{Status: unversioned.StatusSuccess} + expectedBody, _ := runtime.Encode(testapi.Default.Codec(), status) + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 201, + ResponseBody: string(expectedBody), + T: t, + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + c, err := RESTClientFor(&Config{ + Host: testServer.URL, + ContentConfig: ContentConfig{ + GroupVersion: testapi.Default.GroupVersion(), + NegotiatedSerializer: testapi.Default.NegotiatedSerializer(), + }, + Username: "user", + Password: "pass", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + created := false + body, err := c.Get().Prefix("test").Do().WasCreated(&created).Raw() + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + if !created { + t.Errorf("Expected object to be created") + } + statusOut, err := runtime.Decode(testapi.Default.Codec(), body) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + if !reflect.DeepEqual(status, statusOut) { + t.Errorf("Unexpected mis-match. Expected %#v. Saw %#v", status, statusOut) + } + fakeHandler.ValidateRequest(t, "/"+testapi.Default.GroupVersion().String()+"/test", "GET", nil) +} + +func TestCreateBackoffManager(t *testing.T) { + + theUrl, _ := url.Parse("http://localhost") + + // 1 second base backoff + duration of 2 seconds -> exponential backoff for requests. + os.Setenv(envBackoffBase, "1") + os.Setenv(envBackoffDuration, "2") + backoff := readExpBackoffConfig() + backoff.UpdateBackoff(theUrl, nil, 500) + backoff.UpdateBackoff(theUrl, nil, 500) + if backoff.CalculateBackoff(theUrl)/time.Second != 2 { + t.Errorf("Backoff env not working.") + } + + // 0 duration -> no backoff. + os.Setenv(envBackoffBase, "1") + os.Setenv(envBackoffDuration, "0") + backoff.UpdateBackoff(theUrl, nil, 500) + backoff.UpdateBackoff(theUrl, nil, 500) + backoff = readExpBackoffConfig() + if backoff.CalculateBackoff(theUrl)/time.Second != 0 { + t.Errorf("Zero backoff duration, but backoff still occuring.") + } + + // No env -> No backoff. + os.Setenv(envBackoffBase, "") + os.Setenv(envBackoffDuration, "") + backoff = readExpBackoffConfig() + backoff.UpdateBackoff(theUrl, nil, 500) + backoff.UpdateBackoff(theUrl, nil, 500) + if backoff.CalculateBackoff(theUrl)/time.Second != 0 { + t.Errorf("Backoff should have been 0.") + } + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/config.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/config.go index b084a8eb2b87..0741e3c2d8f4 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/config.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/config.go @@ -30,8 +30,10 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/crypto" + "k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/version" ) @@ -62,6 +64,15 @@ type Config struct { // TODO: demonstrate an OAuth2 compatible client. BearerToken string + // Impersonate is the username that this RESTClient will impersonate + Impersonate string + + // Server requires plugin-specified authentication. + AuthProvider *clientcmdapi.AuthProviderConfig + + // Callback to persist config for AuthProvider. + AuthConfigPersister AuthProviderConfigPersister + // TLSClientConfig contains settings to enable transport layer security TLSClientConfig @@ -87,6 +98,9 @@ type Config struct { // Maximum burst for throttle Burst int + + // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst + RateLimiter flowcontrol.RateLimiter } // TLSClientConfig contains settings to enable transport layer security @@ -119,9 +133,17 @@ type ContentConfig struct { // a RESTClient directly. When initializing a Client, will be set with the default // code version. GroupVersion *unversioned.GroupVersion + // NegotiatedSerializer is used for obtaining encoders and decoders for multiple + // supported media types. + NegotiatedSerializer runtime.NegotiatedSerializer + // Codec specifies the encoding and decoding behavior for runtime.Objects passed // to a RESTClient or Client. Required when initializing a RESTClient, optional // when initializing a Client. + // + // DEPRECATED: Please use NegotiatedSerializer instead. + // Codec is currently used only in some tests and will be removed soon. + // All production setups should use NegotiatedSerializer. Codec runtime.Codec } @@ -133,8 +155,8 @@ func RESTClientFor(config *Config) (*RESTClient, error) { if config.GroupVersion == nil { return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient") } - if config.Codec == nil { - return nil, fmt.Errorf("Codec is required when initializing a RESTClient") + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") } baseURL, versionedAPIPath, err := defaultServerUrlFor(config) @@ -152,16 +174,14 @@ func RESTClientFor(config *Config) (*RESTClient, error) { httpClient = &http.Client{Transport: transport} } - client := NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, config.QPS, config.Burst, httpClient) - - return client, nil + return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, config.QPS, config.Burst, config.RateLimiter, httpClient) } // UnversionedRESTClientFor is the same as RESTClientFor, except that it allows // the config.Version to be empty. func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { - if config.Codec == nil { - return nil, fmt.Errorf("Codec is required when initializing a RESTClient") + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NeogitatedSerializer is required when initializing a RESTClient") } baseURL, versionedAPIPath, err := defaultServerUrlFor(config) @@ -185,8 +205,7 @@ func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { versionConfig.GroupVersion = &v } - client := NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, httpClient) - return client, nil + return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient) } // SetKubernetesDefaults sets default values on the provided client config for accessing the @@ -235,7 +254,7 @@ func InClusterConfig() (*Config, error) { } tlsClientConfig := TLSClientConfig{} rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountRootCAKey - if _, err := util.CertPoolFromFile(rootCAFile); err != nil { + if _, err := crypto.CertPoolFromFile(rootCAFile); err != nil { glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) } else { tlsClientConfig.CAFile = rootCAFile diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/config_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/config_test.go new file mode 100644 index 000000000000..a9c71e19d814 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/config_test.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api/testapi" +) + +func TestIsConfigTransportTLS(t *testing.T) { + testCases := []struct { + Config *Config + TransportTLS bool + }{ + { + Config: &Config{}, + TransportTLS: false, + }, + { + Config: &Config{ + Host: "https://localhost", + }, + TransportTLS: true, + }, + { + Config: &Config{ + Host: "localhost", + TLSClientConfig: TLSClientConfig{ + CertFile: "foo", + }, + }, + TransportTLS: true, + }, + { + Config: &Config{ + Host: "///:://localhost", + TLSClientConfig: TLSClientConfig{ + CertFile: "foo", + }, + }, + TransportTLS: false, + }, + { + Config: &Config{ + Host: "1.2.3.4:567", + Insecure: true, + }, + TransportTLS: true, + }, + } + for _, testCase := range testCases { + if err := SetKubernetesDefaults(testCase.Config); err != nil { + t.Errorf("setting defaults failed for %#v: %v", testCase.Config, err) + continue + } + useTLS := IsConfigTransportTLS(*testCase.Config) + if testCase.TransportTLS != useTLS { + t.Errorf("expected %v for %#v", testCase.TransportTLS, testCase.Config) + } + } +} + +func TestSetKubernetesDefaultsUserAgent(t *testing.T) { + config := &Config{} + if err := SetKubernetesDefaults(config); err != nil { + t.Errorf("unexpected error: %v", err) + } + if !strings.Contains(config.UserAgent, "kubernetes/") { + t.Errorf("no user agent set: %#v", config) + } +} + +func TestRESTClientRequires(t *testing.T) { + if _, err := RESTClientFor(&Config{Host: "127.0.0.1", ContentConfig: ContentConfig{NegotiatedSerializer: testapi.Default.NegotiatedSerializer()}}); err == nil { + t.Errorf("unexpected non-error") + } + if _, err := RESTClientFor(&Config{Host: "127.0.0.1", ContentConfig: ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}); err == nil { + t.Errorf("unexpected non-error") + } + if _, err := RESTClientFor(&Config{Host: "127.0.0.1", ContentConfig: ContentConfig{GroupVersion: testapi.Default.GroupVersion(), NegotiatedSerializer: testapi.Default.NegotiatedSerializer()}}); err != nil { + t.Errorf("unexpected error: %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/plugin.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/plugin.go new file mode 100644 index 000000000000..4752e375bc71 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/plugin.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "fmt" + "net/http" + "sync" + + "github.com/golang/glog" + + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" +) + +type AuthProvider interface { + // WrapTransport allows the plugin to create a modified RoundTripper that + // attaches authorization headers (or other info) to requests. + WrapTransport(http.RoundTripper) http.RoundTripper + // Login allows the plugin to initialize its configuration. It must not + // require direct user interaction. + Login() error +} + +// Factory generates an AuthProvider plugin. +// clusterAddress is the address of the current cluster. +// config is the initial configuration for this plugin. +// persister allows the plugin to save updated configuration. +type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error) + +// AuthProviderConfigPersister allows a plugin to persist configuration info +// for just itself. +type AuthProviderConfigPersister interface { + Persist(map[string]string) error +} + +// All registered auth provider plugins. +var pluginsLock sync.Mutex +var plugins = make(map[string]Factory) + +func RegisterAuthProviderPlugin(name string, plugin Factory) error { + pluginsLock.Lock() + defer pluginsLock.Unlock() + if _, found := plugins[name]; found { + return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) + } + glog.V(4).Infof("Registered Auth Provider Plugin %q", name) + plugins[name] = plugin + return nil +} + +func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig, persister AuthProviderConfigPersister) (AuthProvider, error) { + pluginsLock.Lock() + defer pluginsLock.Unlock() + p, ok := plugins[apc.Name] + if !ok { + return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name) + } + return p(clusterAddress, apc.Config, persister) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/plugin_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/plugin_test.go new file mode 100644 index 000000000000..3419ecb8acb0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/plugin_test.go @@ -0,0 +1,311 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "fmt" + "net/http" + "reflect" + "strconv" + "testing" + + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" +) + +func TestAuthPluginWrapTransport(t *testing.T) { + if err := RegisterAuthProviderPlugin("pluginA", pluginAProvider); err != nil { + t.Errorf("Unexpected error: failed to register pluginA: %v", err) + } + if err := RegisterAuthProviderPlugin("pluginB", pluginBProvider); err != nil { + t.Errorf("Unexpected error: failed to register pluginB: %v", err) + } + if err := RegisterAuthProviderPlugin("pluginFail", pluginFailProvider); err != nil { + t.Errorf("Unexpected error: failed to register pluginFail: %v", err) + } + testCases := []struct { + useWrapTransport bool + plugin string + expectErr bool + expectPluginA bool + expectPluginB bool + }{ + {false, "", false, false, false}, + {false, "pluginA", false, true, false}, + {false, "pluginB", false, false, true}, + {false, "pluginFail", true, false, false}, + {false, "pluginUnknown", true, false, false}, + } + for i, tc := range testCases { + c := Config{} + if tc.useWrapTransport { + // Specify an existing WrapTransport in the config to make sure that + // plugins play nicely. + c.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + return &wrapTransport{rt} + } + } + if len(tc.plugin) != 0 { + c.AuthProvider = &clientcmdapi.AuthProviderConfig{Name: tc.plugin} + } + tConfig, err := c.transportConfig() + if err != nil { + // Unknown/bad plugins are expected to fail here. + if !tc.expectErr { + t.Errorf("%d. Did not expect errors loading Auth Plugin: %q. Got: %v", i, tc.plugin, err) + } + continue + } + var fullyWrappedTransport http.RoundTripper + fullyWrappedTransport = &emptyTransport{} + if tConfig.WrapTransport != nil { + fullyWrappedTransport = tConfig.WrapTransport(&emptyTransport{}) + } + res, err := fullyWrappedTransport.RoundTrip(&http.Request{}) + if err != nil { + t.Errorf("%d. Unexpected error in RoundTrip: %v", i, err) + continue + } + hasWrapTransport := res.Header.Get("wrapTransport") == "Y" + hasPluginA := res.Header.Get("pluginA") == "Y" + hasPluginB := res.Header.Get("pluginB") == "Y" + if hasWrapTransport != tc.useWrapTransport { + t.Errorf("%d. Expected Existing config.WrapTransport: %t; Got: %t", i, tc.useWrapTransport, hasWrapTransport) + } + if hasPluginA != tc.expectPluginA { + t.Errorf("%d. Expected Plugin A: %t; Got: %t", i, tc.expectPluginA, hasPluginA) + } + if hasPluginB != tc.expectPluginB { + t.Errorf("%d. Expected Plugin B: %t; Got: %t", i, tc.expectPluginB, hasPluginB) + } + } +} + +func TestAuthPluginPersist(t *testing.T) { + // register pluginA by a different name so we don't collide across tests. + if err := RegisterAuthProviderPlugin("pluginA2", pluginAProvider); err != nil { + t.Errorf("Unexpected error: failed to register pluginA: %v", err) + } + if err := RegisterAuthProviderPlugin("pluginPersist", pluginPersistProvider); err != nil { + t.Errorf("Unexpected error: failed to register pluginPersist: %v", err) + } + fooBarConfig := map[string]string{"foo": "bar"} + testCases := []struct { + plugin string + startingConfig map[string]string + expectedConfigAfterLogin map[string]string + expectedConfigAfterRoundTrip map[string]string + }{ + // non-persisting plugins should work fine without modifying config. + {"pluginA2", map[string]string{}, map[string]string{}, map[string]string{}}, + {"pluginA2", fooBarConfig, fooBarConfig, fooBarConfig}, + // plugins that persist config should be able to persist when they want. + { + "pluginPersist", + map[string]string{}, + map[string]string{ + "login": "Y", + }, + map[string]string{ + "login": "Y", + "roundTrips": "1", + }, + }, + { + "pluginPersist", + map[string]string{ + "login": "Y", + "roundTrips": "123", + }, + map[string]string{ + "login": "Y", + "roundTrips": "123", + }, + map[string]string{ + "login": "Y", + "roundTrips": "124", + }, + }, + } + for i, tc := range testCases { + cfg := &clientcmdapi.AuthProviderConfig{ + Name: tc.plugin, + Config: tc.startingConfig, + } + persister := &inMemoryPersister{make(map[string]string)} + persister.Persist(tc.startingConfig) + plugin, err := GetAuthProvider("127.0.0.1", cfg, persister) + if err != nil { + t.Errorf("%d. Unexpected error: failed to get plugin %q: %v", i, tc.plugin, err) + } + if err := plugin.Login(); err != nil { + t.Errorf("%d. Unexpected error calling Login() w/ plugin %q: %v", i, tc.plugin, err) + } + // Make sure the plugin persisted what we expect after Login(). + if !reflect.DeepEqual(persister.savedConfig, tc.expectedConfigAfterLogin) { + t.Errorf("%d. Unexpected persisted config after calling %s.Login(): \nGot:\n%v\nExpected:\n%v", + i, tc.plugin, persister.savedConfig, tc.expectedConfigAfterLogin) + } + if _, err := plugin.WrapTransport(&emptyTransport{}).RoundTrip(&http.Request{}); err != nil { + t.Errorf("%d. Unexpected error round-tripping w/ plugin %q: %v", i, tc.plugin, err) + } + // Make sure the plugin persisted what we expect after RoundTrip(). + if !reflect.DeepEqual(persister.savedConfig, tc.expectedConfigAfterRoundTrip) { + t.Errorf("%d. Unexpected persisted config after calling %s.WrapTransport.RoundTrip(): \nGot:\n%v\nExpected:\n%v", + i, tc.plugin, persister.savedConfig, tc.expectedConfigAfterLogin) + } + } + +} + +// emptyTransport provides an empty http.Response with an initialized header +// to allow wrapping RoundTrippers to set header values. +type emptyTransport struct{} + +func (*emptyTransport) RoundTrip(req *http.Request) (*http.Response, error) { + res := &http.Response{ + Header: make(map[string][]string), + } + return res, nil +} + +// wrapTransport sets "wrapTransport" = "Y" on the response. +type wrapTransport struct { + rt http.RoundTripper +} + +func (w *wrapTransport) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := w.rt.RoundTrip(req) + if err != nil { + return nil, err + } + res.Header.Add("wrapTransport", "Y") + return res, nil +} + +// wrapTransportA sets "pluginA" = "Y" on the response. +type wrapTransportA struct { + rt http.RoundTripper +} + +func (w *wrapTransportA) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := w.rt.RoundTrip(req) + if err != nil { + return nil, err + } + res.Header.Add("pluginA", "Y") + return res, nil +} + +type pluginA struct{} + +func (*pluginA) WrapTransport(rt http.RoundTripper) http.RoundTripper { + return &wrapTransportA{rt} +} + +func (*pluginA) Login() error { return nil } + +func pluginAProvider(string, map[string]string, AuthProviderConfigPersister) (AuthProvider, error) { + return &pluginA{}, nil +} + +// wrapTransportB sets "pluginB" = "Y" on the response. +type wrapTransportB struct { + rt http.RoundTripper +} + +func (w *wrapTransportB) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := w.rt.RoundTrip(req) + if err != nil { + return nil, err + } + res.Header.Add("pluginB", "Y") + return res, nil +} + +type pluginB struct{} + +func (*pluginB) WrapTransport(rt http.RoundTripper) http.RoundTripper { + return &wrapTransportB{rt} +} + +func (*pluginB) Login() error { return nil } + +func pluginBProvider(string, map[string]string, AuthProviderConfigPersister) (AuthProvider, error) { + return &pluginB{}, nil +} + +// pluginFailProvider simulates a registered AuthPlugin that fails to load. +func pluginFailProvider(string, map[string]string, AuthProviderConfigPersister) (AuthProvider, error) { + return nil, fmt.Errorf("Failed to load AuthProvider") +} + +type inMemoryPersister struct { + savedConfig map[string]string +} + +func (i *inMemoryPersister) Persist(config map[string]string) error { + i.savedConfig = make(map[string]string) + for k, v := range config { + i.savedConfig[k] = v + } + return nil +} + +// wrapTransportPersist increments the "roundTrips" entry from the config when +// roundTrip is called. +type wrapTransportPersist struct { + rt http.RoundTripper + config map[string]string + persister AuthProviderConfigPersister +} + +func (w *wrapTransportPersist) RoundTrip(req *http.Request) (*http.Response, error) { + roundTrips := 0 + if rtVal, ok := w.config["roundTrips"]; ok { + var err error + roundTrips, err = strconv.Atoi(rtVal) + if err != nil { + return nil, err + } + } + roundTrips++ + w.config["roundTrips"] = fmt.Sprintf("%d", roundTrips) + if err := w.persister.Persist(w.config); err != nil { + return nil, err + } + return w.rt.RoundTrip(req) +} + +type pluginPersist struct { + config map[string]string + persister AuthProviderConfigPersister +} + +func (p *pluginPersist) WrapTransport(rt http.RoundTripper) http.RoundTripper { + return &wrapTransportPersist{rt, p.config, p.persister} +} + +// Login sets the config entry "login" to "Y". +func (p *pluginPersist) Login() error { + p.config["login"] = "Y" + p.persister.Persist(p.config) + return nil +} + +func pluginPersistProvider(_ string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error) { + return &pluginPersist{config, persister}, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/request.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/request.go index 0c9a10b7de09..9fd3f0ddb091 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/request.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/request.go @@ -39,11 +39,12 @@ import ( "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/runtime/serializer/streaming" + "k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/net" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" - watchjson "k8s.io/kubernetes/pkg/watch/json" + "k8s.io/kubernetes/pkg/watch/versioned" ) var ( @@ -90,8 +91,9 @@ type Request struct { client HTTPClient verb string - baseURL *url.URL - content ContentConfig + baseURL *url.URL + content ContentConfig + serializers Serializers // generic components accessible via method setters pathPrefix string @@ -117,11 +119,11 @@ type Request struct { resp *http.Response backoffMgr BackoffManager - throttle util.RateLimiter + throttle flowcontrol.RateLimiter } // NewRequest creates a new request helper object for accessing runtime.Objects on a server. -func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, backoff BackoffManager, throttle util.RateLimiter) *Request { +func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter) *Request { if backoff == nil { glog.V(2).Infof("Not implementing request backoff strategy.") backoff = &NoBackoff{} @@ -132,13 +134,14 @@ func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPa pathPrefix = path.Join(pathPrefix, baseURL.Path) } r := &Request{ - client: client, - verb: verb, - baseURL: baseURL, - pathPrefix: path.Join(pathPrefix, versionedAPIPath), - content: content, - backoffMgr: backoff, - throttle: throttle, + client: client, + verb: verb, + baseURL: baseURL, + pathPrefix: path.Join(pathPrefix, versionedAPIPath), + content: content, + serializers: serializers, + backoffMgr: backoff, + throttle: throttle, } if len(content.ContentType) > 0 { r.SetHeader("Accept", content.ContentType+", */*") @@ -176,8 +179,8 @@ func (r *Request) Resource(resource string) *Request { r.err = fmt.Errorf("resource already set to %q, cannot change to %q", r.resource, resource) return r } - if ok, msg := validation.IsValidPathSegmentName(resource); !ok { - r.err = fmt.Errorf("invalid resource %q: %s", resource, msg) + if msgs := validation.IsValidPathSegmentName(resource); len(msgs) != 0 { + r.err = fmt.Errorf("invalid resource %q: %v", resource, msgs) return r } r.resource = resource @@ -196,8 +199,8 @@ func (r *Request) SubResource(subresources ...string) *Request { return r } for _, s := range subresources { - if ok, msg := validation.IsValidPathSegmentName(s); !ok { - r.err = fmt.Errorf("invalid subresource %q: %s", s, msg) + if msgs := validation.IsValidPathSegmentName(s); len(msgs) != 0 { + r.err = fmt.Errorf("invalid subresource %q: %v", s, msgs) return r } } @@ -218,8 +221,8 @@ func (r *Request) Name(resourceName string) *Request { r.err = fmt.Errorf("resource name already set to %q, cannot change to %q", r.resourceName, resourceName) return r } - if ok, msg := validation.IsValidPathSegmentName(resourceName); !ok { - r.err = fmt.Errorf("invalid resource name %q: %s", resourceName, msg) + if msgs := validation.IsValidPathSegmentName(resourceName); len(msgs) != 0 { + r.err = fmt.Errorf("invalid resource name %q: %v", resourceName, msgs) return r } r.resourceName = resourceName @@ -235,8 +238,8 @@ func (r *Request) Namespace(namespace string) *Request { r.err = fmt.Errorf("namespace already set to %q, cannot change to %q", r.namespace, namespace) return r } - if ok, msg := validation.IsValidPathSegmentName(namespace); !ok { - r.err = fmt.Errorf("invalid namespace %q: %s", namespace, msg) + if msgs := validation.IsValidPathSegmentName(namespace); len(msgs) != 0 { + r.err = fmt.Errorf("invalid namespace %q: %v", namespace, msgs) return r } r.namespaceSet = true @@ -536,10 +539,10 @@ func (r *Request) Body(obj interface{}) *Request { return r } glog.V(8).Infof("Request Body: %s", string(data)) - r.body = bytes.NewBuffer(data) + r.body = bytes.NewReader(data) case []byte: glog.V(8).Infof("Request Body: %s", string(t)) - r.body = bytes.NewBuffer(t) + r.body = bytes.NewReader(t) case io.Reader: r.body = t case runtime.Object: @@ -547,13 +550,13 @@ func (r *Request) Body(obj interface{}) *Request { if reflect.ValueOf(t).IsNil() { return r } - data, err := runtime.Encode(r.content.Codec, t) + data, err := runtime.Encode(r.serializers.Encoder, t) if err != nil { r.err = err return r } glog.V(8).Infof("Request Body: %s", string(data)) - r.body = bytes.NewBuffer(data) + r.body = bytes.NewReader(data) r.SetHeader("Content-Type", r.content.ContentType) default: r.err = fmt.Errorf("unknown type used for body: %+v", obj) @@ -624,7 +627,7 @@ func (r *Request) tryThrottle() { r.throttle.Accept() } if latency := time.Since(now); latency > longThrottleLatency { - glog.Warningf("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) + glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) } } @@ -636,11 +639,16 @@ func (r *Request) Watch() (watch.Interface, error) { if r.err != nil { return nil, r.err } + if r.serializers.Framer == nil { + return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType) + } + url := r.URL().String() req, err := http.NewRequest(r.verb, url, r.body) if err != nil { return nil, err } + req.Header = r.headers client := r.client if client == nil { client = http.DefaultClient @@ -670,7 +678,9 @@ func (r *Request) Watch() (watch.Interface, error) { } return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode) } - return watch.NewStreamWatcher(watchjson.NewDecoder(resp.Body, r.content.Codec)), nil + framer := r.serializers.Framer.NewFrameReader(resp.Body) + decoder := streaming.NewDecoder(framer, r.serializers.StreamingSerializer) + return watch.NewStreamWatcher(versioned.NewDecoder(decoder, r.serializers.Decoder)), nil } // updateURLMetrics is a convenience function for pushing metrics. @@ -706,6 +716,7 @@ func (r *Request) Stream() (io.ReadCloser, error) { if err != nil { return nil, err } + req.Header = r.headers client := r.client if client == nil { client = http.DefaultClient @@ -738,7 +749,8 @@ func (r *Request) Stream() (io.ReadCloser, error) { return nil, fmt.Errorf("%v while accessing %v", resp.Status, url) } - if runtimeObject, err := runtime.Decode(r.content.Codec, bodyBytes); err == nil { + // TODO: Check ContentType. + if runtimeObject, err := runtime.Decode(r.serializers.Decoder, bodyBytes); err == nil { statusError := errors.FromObject(runtimeObject) if _, ok := statusError.(errors.APIStatus); ok { @@ -811,6 +823,15 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { retries++ if seconds, wait := checkWait(resp); wait && retries < maxRetries { + if seeker, ok := r.body.(io.Seeker); ok && r.body != nil { + _, err := seeker.Seek(0, 0) + if err != nil { + glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) + fn(req, resp) + return true + } + } + glog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", seconds, retries, url) r.backoffMgr.Sleep(time.Duration(seconds) * time.Second) return false @@ -876,7 +897,7 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu // default groupVersion, otherwise a status response won't be correctly // decoded. status := &unversioned.Status{} - err := runtime.DecodeInto(r.content.Codec, body, status) + err := runtime.DecodeInto(r.serializers.Decoder, body, status) if err == nil && len(status.Status) > 0 { isStatusResponse = true } @@ -898,11 +919,30 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu return Result{err: errors.FromObject(status)} } + contentType := resp.Header.Get("Content-Type") + var decoder runtime.Decoder + if contentType == r.content.ContentType { + decoder = r.serializers.Decoder + } else { + mediaType, params, err := mime.ParseMediaType(contentType) + if err != nil { + return Result{err: errors.NewInternalError(err)} + } + decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params) + if err != nil { + return Result{ + body: body, + contentType: contentType, + statusCode: resp.StatusCode, + } + } + } + return Result{ body: body, - contentType: resp.Header.Get("Content-Type"), + contentType: contentType, statusCode: resp.StatusCode, - decoder: r.content.Codec, + decoder: decoder, } } @@ -1008,6 +1048,9 @@ func (r Result) Get() (runtime.Object, error) { if r.err != nil { return nil, r.err } + if r.decoder == nil { + return nil, fmt.Errorf("serializer for %s doesn't exist", r.contentType) + } return runtime.Decode(r.decoder, r.body) } @@ -1023,6 +1066,9 @@ func (r Result) Into(obj runtime.Object) error { if r.err != nil { return r.err } + if r.decoder == nil { + return fmt.Errorf("serializer for %s doesn't exist", r.contentType) + } return runtime.DecodeInto(r.decoder, r.body, obj) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/request_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/request_test.go new file mode 100644 index 000000000000..e770a1ca083d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/request_test.go @@ -0,0 +1,1371 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "strings" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + apierrors "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/streaming" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/util/httpstream" + "k8s.io/kubernetes/pkg/util/intstr" + utiltesting "k8s.io/kubernetes/pkg/util/testing" + "k8s.io/kubernetes/pkg/watch" + "k8s.io/kubernetes/pkg/watch/versioned" +) + +func TestNewRequestSetsAccept(t *testing.T) { + r := NewRequest(nil, "get", &url.URL{Path: "/path/"}, "", ContentConfig{}, Serializers{}, nil, nil) + if r.headers.Get("Accept") != "" { + t.Errorf("unexpected headers: %#v", r.headers) + } + r = NewRequest(nil, "get", &url.URL{Path: "/path/"}, "", ContentConfig{ContentType: "application/other"}, Serializers{}, nil, nil) + if r.headers.Get("Accept") != "application/other, */*" { + t.Errorf("unexpected headers: %#v", r.headers) + } +} + +type clientFunc func(req *http.Request) (*http.Response, error) + +func (f clientFunc) Do(req *http.Request) (*http.Response, error) { + return f(req) +} + +func TestRequestSetsHeaders(t *testing.T) { + server := clientFunc(func(req *http.Request) (*http.Response, error) { + if req.Header.Get("Accept") != "application/other, */*" { + t.Errorf("unexpected headers: %#v", req.Header) + } + return &http.Response{ + StatusCode: http.StatusForbidden, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + }, nil + }) + config := defaultContentConfig() + config.ContentType = "application/other" + serializers := defaultSerializers() + r := NewRequest(server, "get", &url.URL{Path: "/path"}, "", config, serializers, nil, nil) + + // Check if all "issue" methods are setting headers. + _ = r.Do() + _, _ = r.Watch() + _, _ = r.Stream() +} + +func TestRequestWithErrorWontChange(t *testing.T) { + original := Request{ + err: errors.New("test"), + content: ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, + } + r := original + changed := r.Param("foo", "bar"). + LabelsSelectorParam(labels.Set{"a": "b"}.AsSelector()). + UintParam("uint", 1). + AbsPath("/abs"). + Prefix("test"). + Suffix("testing"). + Namespace("new"). + Resource("foos"). + Name("bars"). + Body("foo"). + Timeout(time.Millisecond) + if changed != &r { + t.Errorf("returned request should point to the same object") + } + if !reflect.DeepEqual(changed, &original) { + t.Errorf("expected %#v, got %#v", &original, changed) + } +} + +func TestRequestPreservesBaseTrailingSlash(t *testing.T) { + r := &Request{baseURL: &url.URL{}, pathPrefix: "/path/"} + if s := r.URL().String(); s != "/path/" { + t.Errorf("trailing slash should be preserved: %s", s) + } +} + +func TestRequestAbsPathPreservesTrailingSlash(t *testing.T) { + r := (&Request{baseURL: &url.URL{}}).AbsPath("/foo/") + if s := r.URL().String(); s != "/foo/" { + t.Errorf("trailing slash should be preserved: %s", s) + } + + r = (&Request{baseURL: &url.URL{}}).AbsPath("/foo/") + if s := r.URL().String(); s != "/foo/" { + t.Errorf("trailing slash should be preserved: %s", s) + } +} + +func TestRequestAbsPathJoins(t *testing.T) { + r := (&Request{baseURL: &url.URL{}}).AbsPath("foo/bar", "baz") + if s := r.URL().String(); s != "foo/bar/baz" { + t.Errorf("trailing slash should be preserved: %s", s) + } +} + +func TestRequestSetsNamespace(t *testing.T) { + r := (&Request{ + baseURL: &url.URL{ + Path: "/", + }, + }).Namespace("foo") + if r.namespace == "" { + t.Errorf("namespace should be set: %#v", r) + } + + if s := r.URL().String(); s != "namespaces/foo" { + t.Errorf("namespace should be in path: %s", s) + } +} + +func TestRequestOrdersNamespaceInPath(t *testing.T) { + r := (&Request{ + baseURL: &url.URL{}, + pathPrefix: "/test/", + }).Name("bar").Resource("baz").Namespace("foo") + if s := r.URL().String(); s != "/test/namespaces/foo/baz/bar" { + t.Errorf("namespace should be in order in path: %s", s) + } +} + +func TestRequestOrdersSubResource(t *testing.T) { + r := (&Request{ + baseURL: &url.URL{}, + pathPrefix: "/test/", + }).Name("bar").Resource("baz").Namespace("foo").Suffix("test").SubResource("a", "b") + if s := r.URL().String(); s != "/test/namespaces/foo/baz/bar/a/b/test" { + t.Errorf("namespace should be in order in path: %s", s) + } +} + +func TestRequestSetTwiceError(t *testing.T) { + if (&Request{}).Name("bar").Name("baz").err == nil { + t.Errorf("setting name twice should result in error") + } + if (&Request{}).Namespace("bar").Namespace("baz").err == nil { + t.Errorf("setting namespace twice should result in error") + } + if (&Request{}).Resource("bar").Resource("baz").err == nil { + t.Errorf("setting resource twice should result in error") + } + if (&Request{}).SubResource("bar").SubResource("baz").err == nil { + t.Errorf("setting subresource twice should result in error") + } +} + +func TestInvalidSegments(t *testing.T) { + invalidSegments := []string{".", "..", "test/segment", "test%2bsegment"} + setters := map[string]func(string, *Request){ + "namespace": func(s string, r *Request) { r.Namespace(s) }, + "resource": func(s string, r *Request) { r.Resource(s) }, + "name": func(s string, r *Request) { r.Name(s) }, + "subresource": func(s string, r *Request) { r.SubResource(s) }, + } + for _, invalidSegment := range invalidSegments { + for setterName, setter := range setters { + r := &Request{} + setter(invalidSegment, r) + if r.err == nil { + t.Errorf("%s: %s: expected error, got none", setterName, invalidSegment) + } + } + } +} + +func TestRequestParam(t *testing.T) { + r := (&Request{}).Param("foo", "a") + if !reflect.DeepEqual(r.params, url.Values{"foo": []string{"a"}}) { + t.Errorf("should have set a param: %#v", r) + } + + r.Param("bar", "1") + r.Param("bar", "2") + if !reflect.DeepEqual(r.params, url.Values{"foo": []string{"a"}, "bar": []string{"1", "2"}}) { + t.Errorf("should have set a param: %#v", r) + } +} + +func TestRequestVersionedParams(t *testing.T) { + r := (&Request{content: ContentConfig{GroupVersion: &v1.SchemeGroupVersion}}).Param("foo", "a") + if !reflect.DeepEqual(r.params, url.Values{"foo": []string{"a"}}) { + t.Errorf("should have set a param: %#v", r) + } + r.VersionedParams(&api.PodLogOptions{Follow: true, Container: "bar"}, api.ParameterCodec) + + if !reflect.DeepEqual(r.params, url.Values{ + "foo": []string{"a"}, + "container": []string{"bar"}, + "follow": []string{"true"}, + }) { + t.Errorf("should have set a param: %#v", r) + } +} + +func TestRequestVersionedParamsFromListOptions(t *testing.T) { + r := &Request{content: ContentConfig{GroupVersion: &v1.SchemeGroupVersion}} + r.VersionedParams(&api.ListOptions{ResourceVersion: "1"}, api.ParameterCodec) + if !reflect.DeepEqual(r.params, url.Values{ + "resourceVersion": []string{"1"}, + }) { + t.Errorf("should have set a param: %#v", r) + } + + var timeout int64 = 10 + r.VersionedParams(&api.ListOptions{ResourceVersion: "2", TimeoutSeconds: &timeout}, api.ParameterCodec) + if !reflect.DeepEqual(r.params, url.Values{ + "resourceVersion": []string{"1", "2"}, + "timeoutSeconds": []string{"10"}, + }) { + t.Errorf("should have set a param: %#v", r) + } +} + +func TestRequestURI(t *testing.T) { + r := (&Request{}).Param("foo", "a") + r.Prefix("other") + r.RequestURI("/test?foo=b&a=b&c=1&c=2") + if r.pathPrefix != "/test" { + t.Errorf("path is wrong: %#v", r) + } + if !reflect.DeepEqual(r.params, url.Values{"a": []string{"b"}, "foo": []string{"b"}, "c": []string{"1", "2"}}) { + t.Errorf("should have set a param: %#v", r) + } +} + +type NotAnAPIObject struct{} + +func (obj NotAnAPIObject) GroupVersionKind() *unversioned.GroupVersionKind { return nil } +func (obj NotAnAPIObject) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) {} + +func defaultContentConfig() ContentConfig { + return ContentConfig{ + GroupVersion: testapi.Default.GroupVersion(), + Codec: testapi.Default.Codec(), + NegotiatedSerializer: testapi.Default.NegotiatedSerializer(), + } +} + +func defaultSerializers() Serializers { + return Serializers{ + Encoder: testapi.Default.Codec(), + Decoder: testapi.Default.Codec(), + StreamingSerializer: testapi.Default.Codec(), + Framer: runtime.DefaultFramer, + } +} + +func TestRequestBody(t *testing.T) { + // test unknown type + r := (&Request{}).Body([]string{"test"}) + if r.err == nil || r.body != nil { + t.Errorf("should have set err and left body nil: %#v", r) + } + + // test error set when failing to read file + f, err := ioutil.TempFile("", "test") + if err != nil { + t.Fatalf("unable to create temp file") + } + defer f.Close() + os.Remove(f.Name()) + r = (&Request{}).Body(f.Name()) + if r.err == nil || r.body != nil { + t.Errorf("should have set err and left body nil: %#v", r) + } + + // test unencodable api object + r = (&Request{content: defaultContentConfig()}).Body(&NotAnAPIObject{}) + if r.err == nil || r.body != nil { + t.Errorf("should have set err and left body nil: %#v", r) + } +} + +func TestResultIntoWithErrReturnsErr(t *testing.T) { + res := Result{err: errors.New("test")} + if err := res.Into(&api.Pod{}); err != res.err { + t.Errorf("should have returned exact error from result") + } +} + +func TestURLTemplate(t *testing.T) { + uri, _ := url.Parse("http://localhost") + r := NewRequest(nil, "POST", uri, "", ContentConfig{GroupVersion: &unversioned.GroupVersion{Group: "test"}}, Serializers{}, nil, nil) + r.Prefix("pre1").Resource("r1").Namespace("ns").Name("nm").Param("p0", "v0") + full := r.URL() + if full.String() != "http://localhost/pre1/namespaces/ns/r1/nm?p0=v0" { + t.Errorf("unexpected initial URL: %s", full) + } + actual := r.finalURLTemplate() + expected := "http://localhost/pre1/namespaces/%7Bnamespace%7D/r1/%7Bname%7D?p0=%7Bvalue%7D" + if actual != expected { + t.Errorf("unexpected URL template: %s %s", actual, expected) + } + if r.URL().String() != full.String() { + t.Errorf("creating URL template changed request: %s -> %s", full.String(), r.URL().String()) + } +} + +func TestTransformResponse(t *testing.T) { + invalid := []byte("aaaaa") + uri, _ := url.Parse("http://localhost") + testCases := []struct { + Response *http.Response + Data []byte + Created bool + Error bool + ErrFn func(err error) bool + }{ + {Response: &http.Response{StatusCode: 200}, Data: []byte{}}, + {Response: &http.Response{StatusCode: 201}, Data: []byte{}, Created: true}, + {Response: &http.Response{StatusCode: 199}, Error: true}, + {Response: &http.Response{StatusCode: 500}, Error: true}, + {Response: &http.Response{StatusCode: 422}, Error: true}, + {Response: &http.Response{StatusCode: 409}, Error: true}, + {Response: &http.Response{StatusCode: 404}, Error: true}, + {Response: &http.Response{StatusCode: 401}, Error: true}, + { + Response: &http.Response{ + StatusCode: 401, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: ioutil.NopCloser(bytes.NewReader(invalid)), + }, + Error: true, + ErrFn: func(err error) bool { + return err.Error() != "aaaaa" && apierrors.IsUnauthorized(err) + }, + }, + { + Response: &http.Response{ + StatusCode: 401, + Header: http.Header{"Content-Type": []string{"text/any"}}, + Body: ioutil.NopCloser(bytes.NewReader(invalid)), + }, + Error: true, + ErrFn: func(err error) bool { + return strings.Contains(err.Error(), "server has asked for the client to provide") && apierrors.IsUnauthorized(err) + }, + }, + {Response: &http.Response{StatusCode: 403}, Error: true}, + {Response: &http.Response{StatusCode: 200, Body: ioutil.NopCloser(bytes.NewReader(invalid))}, Data: invalid}, + {Response: &http.Response{StatusCode: 200, Body: ioutil.NopCloser(bytes.NewReader(invalid))}, Data: invalid}, + } + for i, test := range testCases { + r := NewRequest(nil, "", uri, "", defaultContentConfig(), defaultSerializers(), nil, nil) + if test.Response.Body == nil { + test.Response.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + } + result := r.transformResponse(test.Response, &http.Request{}) + response, created, err := result.body, result.statusCode == http.StatusCreated, result.err + hasErr := err != nil + if hasErr != test.Error { + t.Errorf("%d: unexpected error: %t %v", i, test.Error, err) + } else if hasErr && test.Response.StatusCode > 399 { + status, ok := err.(apierrors.APIStatus) + if !ok { + t.Errorf("%d: response should have been transformable into APIStatus: %v", i, err) + continue + } + if int(status.Status().Code) != test.Response.StatusCode { + t.Errorf("%d: status code did not match response: %#v", i, status.Status()) + } + } + if test.ErrFn != nil && !test.ErrFn(err) { + t.Errorf("%d: error function did not match: %v", i, err) + } + if !(test.Data == nil && response == nil) && !api.Semantic.DeepDerivative(test.Data, response) { + t.Errorf("%d: unexpected response: %#v %#v", i, test.Data, response) + } + if test.Created != created { + t.Errorf("%d: expected created %t, got %t", i, test.Created, created) + } + } +} + +func TestTransformUnstructuredError(t *testing.T) { + testCases := []struct { + Req *http.Request + Res *http.Response + + Resource string + Name string + + ErrFn func(error) bool + }{ + { + Resource: "foo", + Name: "bar", + Req: &http.Request{ + Method: "POST", + }, + Res: &http.Response{ + StatusCode: http.StatusConflict, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + }, + ErrFn: apierrors.IsAlreadyExists, + }, + { + Resource: "foo", + Name: "bar", + Req: &http.Request{ + Method: "PUT", + }, + Res: &http.Response{ + StatusCode: http.StatusConflict, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + }, + ErrFn: apierrors.IsConflict, + }, + { + Resource: "foo", + Name: "bar", + Req: &http.Request{}, + Res: &http.Response{ + StatusCode: http.StatusNotFound, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + }, + ErrFn: apierrors.IsNotFound, + }, + { + Req: &http.Request{}, + Res: &http.Response{ + StatusCode: http.StatusBadRequest, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + }, + ErrFn: apierrors.IsBadRequest, + }, + } + + for _, testCase := range testCases { + r := &Request{ + content: defaultContentConfig(), + serializers: defaultSerializers(), + resourceName: testCase.Name, + resource: testCase.Resource, + } + result := r.transformResponse(testCase.Res, testCase.Req) + err := result.err + if !testCase.ErrFn(err) { + t.Errorf("unexpected error: %v", err) + continue + } + if len(testCase.Name) != 0 && !strings.Contains(err.Error(), testCase.Name) { + t.Errorf("unexpected error string: %s", err) + } + if len(testCase.Resource) != 0 && !strings.Contains(err.Error(), testCase.Resource) { + t.Errorf("unexpected error string: %s", err) + } + } +} + +func TestRequestWatch(t *testing.T) { + testCases := []struct { + Request *Request + Err bool + ErrFn func(error) bool + Empty bool + }{ + { + Request: &Request{err: errors.New("bail")}, + Err: true, + }, + { + Request: &Request{baseURL: &url.URL{}, pathPrefix: "%"}, + Err: true, + }, + { + Request: &Request{ + client: clientFunc(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("err") + }), + baseURL: &url.URL{}, + }, + Err: true, + }, + { + Request: &Request{ + content: defaultContentConfig(), + serializers: defaultSerializers(), + client: clientFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusForbidden, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + }, nil + }), + baseURL: &url.URL{}, + }, + Err: true, + ErrFn: func(err error) bool { + return apierrors.IsForbidden(err) + }, + }, + { + Request: &Request{ + content: defaultContentConfig(), + serializers: defaultSerializers(), + client: clientFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + }, nil + }), + baseURL: &url.URL{}, + }, + Err: true, + ErrFn: func(err error) bool { + return apierrors.IsUnauthorized(err) + }, + }, + { + Request: &Request{ + content: defaultContentConfig(), + serializers: defaultSerializers(), + client: clientFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), &unversioned.Status{ + Status: unversioned.StatusFailure, + Reason: unversioned.StatusReasonUnauthorized, + })))), + }, nil + }), + baseURL: &url.URL{}, + }, + Err: true, + ErrFn: func(err error) bool { + return apierrors.IsUnauthorized(err) + }, + }, + { + Request: &Request{ + serializers: defaultSerializers(), + client: clientFunc(func(req *http.Request) (*http.Response, error) { + return nil, io.EOF + }), + baseURL: &url.URL{}, + }, + Empty: true, + }, + { + Request: &Request{ + serializers: defaultSerializers(), + client: clientFunc(func(req *http.Request) (*http.Response, error) { + return nil, &url.Error{Err: io.EOF} + }), + baseURL: &url.URL{}, + }, + Empty: true, + }, + { + Request: &Request{ + serializers: defaultSerializers(), + client: clientFunc(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("http: can't write HTTP request on broken connection") + }), + baseURL: &url.URL{}, + }, + Empty: true, + }, + { + Request: &Request{ + serializers: defaultSerializers(), + client: clientFunc(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("foo: connection reset by peer") + }), + baseURL: &url.URL{}, + }, + Empty: true, + }, + } + for i, testCase := range testCases { + t.Logf("testcase %v", testCase.Request) + testCase.Request.backoffMgr = &NoBackoff{} + watch, err := testCase.Request.Watch() + hasErr := err != nil + if hasErr != testCase.Err { + t.Errorf("%d: expected %t, got %t: %v", i, testCase.Err, hasErr, err) + continue + } + if testCase.ErrFn != nil && !testCase.ErrFn(err) { + t.Errorf("%d: error not valid: %v", i, err) + } + if hasErr && watch != nil { + t.Errorf("%d: watch should be nil when error is returned", i) + continue + } + if testCase.Empty { + _, ok := <-watch.ResultChan() + if ok { + t.Errorf("%d: expected the watch to be empty: %#v", i, watch) + } + } + } +} + +func TestRequestStream(t *testing.T) { + testCases := []struct { + Request *Request + Err bool + }{ + { + Request: &Request{err: errors.New("bail")}, + Err: true, + }, + { + Request: &Request{baseURL: &url.URL{}, pathPrefix: "%"}, + Err: true, + }, + { + Request: &Request{ + client: clientFunc(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("err") + }), + baseURL: &url.URL{}, + }, + Err: true, + }, + { + Request: &Request{ + client: clientFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), &unversioned.Status{ + Status: unversioned.StatusFailure, + Reason: unversioned.StatusReasonUnauthorized, + })))), + }, nil + }), + content: defaultContentConfig(), + serializers: defaultSerializers(), + baseURL: &url.URL{}, + }, + Err: true, + }, + } + for i, testCase := range testCases { + testCase.Request.backoffMgr = &NoBackoff{} + body, err := testCase.Request.Stream() + hasErr := err != nil + if hasErr != testCase.Err { + t.Errorf("%d: expected %t, got %t: %v", i, testCase.Err, hasErr, err) + } + if hasErr && body != nil { + t.Errorf("%d: body should be nil when error is returned", i) + } + } +} + +type fakeUpgradeConnection struct{} + +func (c *fakeUpgradeConnection) CreateStream(headers http.Header) (httpstream.Stream, error) { + return nil, nil +} +func (c *fakeUpgradeConnection) Close() error { + return nil +} +func (c *fakeUpgradeConnection) CloseChan() <-chan bool { + return make(chan bool) +} +func (c *fakeUpgradeConnection) SetIdleTimeout(timeout time.Duration) { +} + +type fakeUpgradeRoundTripper struct { + req *http.Request + conn httpstream.Connection +} + +func (f *fakeUpgradeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + f.req = req + b := []byte{} + body := ioutil.NopCloser(bytes.NewReader(b)) + resp := &http.Response{ + StatusCode: 101, + Body: body, + } + return resp, nil +} + +func (f *fakeUpgradeRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) { + return f.conn, nil +} + +func TestRequestDo(t *testing.T) { + testCases := []struct { + Request *Request + Err bool + }{ + { + Request: &Request{err: errors.New("bail")}, + Err: true, + }, + { + Request: &Request{baseURL: &url.URL{}, pathPrefix: "%"}, + Err: true, + }, + { + Request: &Request{ + client: clientFunc(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("err") + }), + baseURL: &url.URL{}, + }, + Err: true, + }, + } + for i, testCase := range testCases { + testCase.Request.backoffMgr = &NoBackoff{} + body, err := testCase.Request.Do().Raw() + hasErr := err != nil + if hasErr != testCase.Err { + t.Errorf("%d: expected %t, got %t: %v", i, testCase.Err, hasErr, err) + } + if hasErr && body != nil { + t.Errorf("%d: body should be nil when error is returned", i) + } + } +} + +func TestDoRequestNewWay(t *testing.T) { + reqBody := "request body" + expectedObj := &api.Service{Spec: api.ServiceSpec{Ports: []api.ServicePort{{ + Protocol: "TCP", + Port: 12345, + TargetPort: intstr.FromInt(12345), + }}}} + expectedBody, _ := runtime.Encode(testapi.Default.Codec(), expectedObj) + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: string(expectedBody), + T: t, + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + c := testRESTClient(t, testServer) + obj, err := c.Verb("POST"). + Prefix("foo", "bar"). + Suffix("baz"). + Timeout(time.Second). + Body([]byte(reqBody)). + Do().Get() + if err != nil { + t.Errorf("Unexpected error: %v %#v", err, err) + return + } + if obj == nil { + t.Error("nil obj") + } else if !api.Semantic.DeepDerivative(expectedObj, obj) { + t.Errorf("Expected: %#v, got %#v", expectedObj, obj) + } + requestURL := testapi.Default.ResourcePathWithPrefix("foo/bar", "", "", "baz") + requestURL += "?timeout=1s" + fakeHandler.ValidateRequest(t, requestURL, "POST", &reqBody) +} + +// This test assumes that the client implementation backs off exponentially, for an individual request. +func TestBackoffLifecycle(t *testing.T) { + count := 0 + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + count++ + t.Logf("Attempt %d", count) + if count == 5 || count == 9 { + w.WriteHeader(http.StatusOK) + return + } else { + w.WriteHeader(http.StatusGatewayTimeout) + return + } + })) + defer testServer.Close() + c := testRESTClient(t, testServer) + + // Test backoff recovery and increase. This correlates to the constants + // which are used in the server implementation returning StatusOK above. + seconds := []int{0, 1, 2, 4, 8, 0, 1, 2, 4, 0} + request := c.Verb("POST").Prefix("backofftest").Suffix("abc") + clock := util.FakeClock{} + request.backoffMgr = &URLBackoff{ + // Use a fake backoff here to avoid flakes and speed the test up. + Backoff: flowcontrol.NewFakeBackOff( + time.Duration(1)*time.Second, + time.Duration(200)*time.Second, + &clock, + )} + + for _, sec := range seconds { + thisBackoff := request.backoffMgr.CalculateBackoff(request.URL()) + t.Logf("Current backoff %v", thisBackoff) + if thisBackoff != time.Duration(sec)*time.Second { + t.Errorf("Backoff is %v instead of %v", thisBackoff, sec) + } + now := clock.Now() + request.DoRaw() + elapsed := clock.Since(now) + if clock.Since(now) != thisBackoff { + t.Errorf("CalculatedBackoff not honored by clock: Expected time of %v, but got %v ", thisBackoff, elapsed) + } + } +} + +func TestCheckRetryClosesBody(t *testing.T) { + count := 0 + ch := make(chan struct{}) + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + count++ + t.Logf("attempt %d", count) + if count >= 5 { + w.WriteHeader(http.StatusOK) + close(ch) + return + } + w.Header().Set("Retry-After", "0") + w.WriteHeader(apierrors.StatusTooManyRequests) + })) + defer testServer.Close() + + c := testRESTClient(t, testServer) + _, err := c.Verb("POST"). + Prefix("foo", "bar"). + Suffix("baz"). + Timeout(time.Second). + Body([]byte(strings.Repeat("abcd", 1000))). + DoRaw() + if err != nil { + t.Fatalf("Unexpected error: %v %#v", err, err) + } + <-ch + if count != 5 { + t.Errorf("unexpected retries: %d", count) + } +} + +func TestCheckRetryHandles429And5xx(t *testing.T) { + count := 0 + ch := make(chan struct{}) + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + data, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatalf("unable to read request body: %v", err) + } + if !bytes.Equal(data, []byte(strings.Repeat("abcd", 1000))) { + t.Fatalf("retry did not send a complete body: %s", data) + } + t.Logf("attempt %d", count) + if count >= 4 { + w.WriteHeader(http.StatusOK) + close(ch) + return + } + w.Header().Set("Retry-After", "0") + w.WriteHeader([]int{apierrors.StatusTooManyRequests, 500, 501, 504}[count]) + count++ + })) + defer testServer.Close() + + c := testRESTClient(t, testServer) + _, err := c.Verb("POST"). + Prefix("foo", "bar"). + Suffix("baz"). + Timeout(time.Second). + Body([]byte(strings.Repeat("abcd", 1000))). + DoRaw() + if err != nil { + t.Fatalf("Unexpected error: %v %#v", err, err) + } + <-ch + if count != 4 { + t.Errorf("unexpected retries: %d", count) + } +} + +func BenchmarkCheckRetryClosesBody(b *testing.B) { + count := 0 + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + count++ + if count%3 == 0 { + w.WriteHeader(http.StatusOK) + return + } + w.Header().Set("Retry-After", "0") + w.WriteHeader(apierrors.StatusTooManyRequests) + })) + defer testServer.Close() + + c := testRESTClient(b, testServer) + r := c.Verb("POST"). + Prefix("foo", "bar"). + Suffix("baz"). + Timeout(time.Second). + Body([]byte(strings.Repeat("abcd", 1000))) + + for i := 0; i < b.N; i++ { + if _, err := r.DoRaw(); err != nil { + b.Fatalf("Unexpected error: %v %#v", err, err) + } + } +} + +func TestDoRequestNewWayReader(t *testing.T) { + reqObj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + reqBodyExpected, _ := runtime.Encode(testapi.Default.Codec(), reqObj) + expectedObj := &api.Service{Spec: api.ServiceSpec{Ports: []api.ServicePort{{ + Protocol: "TCP", + Port: 12345, + TargetPort: intstr.FromInt(12345), + }}}} + expectedBody, _ := runtime.Encode(testapi.Default.Codec(), expectedObj) + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: string(expectedBody), + T: t, + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + c := testRESTClient(t, testServer) + obj, err := c.Verb("POST"). + Resource("bar"). + Name("baz"). + Prefix("foo"). + LabelsSelectorParam(labels.Set{"name": "foo"}.AsSelector()). + Timeout(time.Second). + Body(bytes.NewBuffer(reqBodyExpected)). + Do().Get() + if err != nil { + t.Errorf("Unexpected error: %v %#v", err, err) + return + } + if obj == nil { + t.Error("nil obj") + } else if !api.Semantic.DeepDerivative(expectedObj, obj) { + t.Errorf("Expected: %#v, got %#v", expectedObj, obj) + } + tmpStr := string(reqBodyExpected) + requestURL := testapi.Default.ResourcePathWithPrefix("foo", "bar", "", "baz") + requestURL += "?" + unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String()) + "=name%3Dfoo&timeout=1s" + fakeHandler.ValidateRequest(t, requestURL, "POST", &tmpStr) +} + +func TestDoRequestNewWayObj(t *testing.T) { + reqObj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + reqBodyExpected, _ := runtime.Encode(testapi.Default.Codec(), reqObj) + expectedObj := &api.Service{Spec: api.ServiceSpec{Ports: []api.ServicePort{{ + Protocol: "TCP", + Port: 12345, + TargetPort: intstr.FromInt(12345), + }}}} + expectedBody, _ := runtime.Encode(testapi.Default.Codec(), expectedObj) + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: string(expectedBody), + T: t, + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + c := testRESTClient(t, testServer) + obj, err := c.Verb("POST"). + Suffix("baz"). + Name("bar"). + Resource("foo"). + LabelsSelectorParam(labels.Set{"name": "foo"}.AsSelector()). + Timeout(time.Second). + Body(reqObj). + Do().Get() + if err != nil { + t.Errorf("Unexpected error: %v %#v", err, err) + return + } + if obj == nil { + t.Error("nil obj") + } else if !api.Semantic.DeepDerivative(expectedObj, obj) { + t.Errorf("Expected: %#v, got %#v", expectedObj, obj) + } + tmpStr := string(reqBodyExpected) + requestURL := testapi.Default.ResourcePath("foo", "", "bar/baz") + requestURL += "?" + unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String()) + "=name%3Dfoo&timeout=1s" + fakeHandler.ValidateRequest(t, requestURL, "POST", &tmpStr) +} + +func TestDoRequestNewWayFile(t *testing.T) { + reqObj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + reqBodyExpected, err := runtime.Encode(testapi.Default.Codec(), reqObj) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + file, err := ioutil.TempFile("", "foo") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + defer file.Close() + + _, err = file.Write(reqBodyExpected) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + expectedObj := &api.Service{Spec: api.ServiceSpec{Ports: []api.ServicePort{{ + Protocol: "TCP", + Port: 12345, + TargetPort: intstr.FromInt(12345), + }}}} + expectedBody, _ := runtime.Encode(testapi.Default.Codec(), expectedObj) + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: string(expectedBody), + T: t, + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + c := testRESTClient(t, testServer) + wasCreated := true + obj, err := c.Verb("POST"). + Prefix("foo/bar", "baz"). + Timeout(time.Second). + Body(file.Name()). + Do().WasCreated(&wasCreated).Get() + if err != nil { + t.Errorf("Unexpected error: %v %#v", err, err) + return + } + if obj == nil { + t.Error("nil obj") + } else if !api.Semantic.DeepDerivative(expectedObj, obj) { + t.Errorf("Expected: %#v, got %#v", expectedObj, obj) + } + if wasCreated { + t.Errorf("expected object was not created") + } + tmpStr := string(reqBodyExpected) + requestURL := testapi.Default.ResourcePathWithPrefix("foo/bar/baz", "", "", "") + requestURL += "?timeout=1s" + fakeHandler.ValidateRequest(t, requestURL, "POST", &tmpStr) +} + +func TestWasCreated(t *testing.T) { + reqObj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + reqBodyExpected, err := runtime.Encode(testapi.Default.Codec(), reqObj) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + expectedObj := &api.Service{Spec: api.ServiceSpec{Ports: []api.ServicePort{{ + Protocol: "TCP", + Port: 12345, + TargetPort: intstr.FromInt(12345), + }}}} + expectedBody, _ := runtime.Encode(testapi.Default.Codec(), expectedObj) + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 201, + ResponseBody: string(expectedBody), + T: t, + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + c := testRESTClient(t, testServer) + wasCreated := false + obj, err := c.Verb("PUT"). + Prefix("foo/bar", "baz"). + Timeout(time.Second). + Body(reqBodyExpected). + Do().WasCreated(&wasCreated).Get() + if err != nil { + t.Errorf("Unexpected error: %v %#v", err, err) + return + } + if obj == nil { + t.Error("nil obj") + } else if !api.Semantic.DeepDerivative(expectedObj, obj) { + t.Errorf("Expected: %#v, got %#v", expectedObj, obj) + } + if !wasCreated { + t.Errorf("Expected object was created") + } + + tmpStr := string(reqBodyExpected) + requestURL := testapi.Default.ResourcePathWithPrefix("foo/bar/baz", "", "", "") + requestURL += "?timeout=1s" + fakeHandler.ValidateRequest(t, requestURL, "PUT", &tmpStr) +} + +func TestVerbs(t *testing.T) { + c := testRESTClient(t, nil) + if r := c.Post(); r.verb != "POST" { + t.Errorf("Post verb is wrong") + } + if r := c.Put(); r.verb != "PUT" { + t.Errorf("Put verb is wrong") + } + if r := c.Get(); r.verb != "GET" { + t.Errorf("Get verb is wrong") + } + if r := c.Delete(); r.verb != "DELETE" { + t.Errorf("Delete verb is wrong") + } +} + +func TestAbsPath(t *testing.T) { + for i, tc := range []struct { + configPrefix string + resourcePrefix string + absPath string + wantsAbsPath string + }{ + {"/", "", "", "/"}, + {"", "", "/", "/"}, + {"", "", "/api", "/api"}, + {"", "", "/api/", "/api/"}, + {"", "", "/apis", "/apis"}, + {"", "/foo", "/bar/foo", "/bar/foo"}, + {"", "/api/foo/123", "/bar/foo", "/bar/foo"}, + {"/p1", "", "", "/p1"}, + {"/p1", "", "/", "/p1/"}, + {"/p1", "", "/api", "/p1/api"}, + {"/p1", "", "/apis", "/p1/apis"}, + {"/p1", "/r1", "/apis", "/p1/apis"}, + {"/p1", "/api/r1", "/apis", "/p1/apis"}, + {"/p1/api/p2", "", "", "/p1/api/p2"}, + {"/p1/api/p2", "", "/", "/p1/api/p2/"}, + {"/p1/api/p2", "", "/api", "/p1/api/p2/api"}, + {"/p1/api/p2", "", "/api/", "/p1/api/p2/api/"}, + {"/p1/api/p2", "/r1", "/api/", "/p1/api/p2/api/"}, + {"/p1/api/p2", "/api/r1", "/api/", "/p1/api/p2/api/"}, + } { + u, _ := url.Parse("http://localhost:123" + tc.configPrefix) + r := NewRequest(nil, "POST", u, "", ContentConfig{GroupVersion: &unversioned.GroupVersion{Group: "test"}}, Serializers{}, nil, nil).Prefix(tc.resourcePrefix).AbsPath(tc.absPath) + if r.pathPrefix != tc.wantsAbsPath { + t.Errorf("test case %d failed, unexpected path: %q, expected %q", i, r.pathPrefix, tc.wantsAbsPath) + } + } +} + +func TestUintParam(t *testing.T) { + table := []struct { + name string + testVal uint64 + expectStr string + }{ + {"foo", 31415, "http://localhost?foo=31415"}, + {"bar", 42, "http://localhost?bar=42"}, + {"baz", 0, "http://localhost?baz=0"}, + } + + for _, item := range table { + u, _ := url.Parse("http://localhost") + r := NewRequest(nil, "GET", u, "", ContentConfig{GroupVersion: &unversioned.GroupVersion{Group: "test"}}, Serializers{}, nil, nil).AbsPath("").UintParam(item.name, item.testVal) + if e, a := item.expectStr, r.URL().String(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + } +} + +func TestUnacceptableParamNames(t *testing.T) { + table := []struct { + name string + testVal string + expectSuccess bool + }{ + {"timeout", "42", false}, + } + + for _, item := range table { + c := testRESTClient(t, nil) + r := c.Get().setParam(item.name, item.testVal) + if e, a := item.expectSuccess, r.err == nil; e != a { + t.Errorf("expected %v, got %v (%v)", e, a, r.err) + } + } +} + +func TestBody(t *testing.T) { + const data = "test payload" + + obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + bodyExpected, _ := runtime.Encode(testapi.Default.Codec(), obj) + + f, err := ioutil.TempFile("", "test_body") + if err != nil { + t.Fatalf("TempFile error: %v", err) + } + if _, err := f.WriteString(data); err != nil { + t.Fatalf("TempFile.WriteString error: %v", err) + } + f.Close() + + var nilObject *api.DeleteOptions + typedObject := interface{}(nilObject) + c := testRESTClient(t, nil) + tests := []struct { + input interface{} + expected string + headers map[string]string + }{ + {[]byte(data), data, nil}, + {f.Name(), data, nil}, + {strings.NewReader(data), data, nil}, + {obj, string(bodyExpected), map[string]string{"Content-Type": "application/json"}}, + {typedObject, "", nil}, + } + for i, tt := range tests { + r := c.Post().Body(tt.input) + if r.err != nil { + t.Errorf("%d: r.Body(%#v) error: %v", i, tt, r.err) + continue + } + if tt.headers != nil { + for k, v := range tt.headers { + if r.headers.Get(k) != v { + t.Errorf("%d: r.headers[%q] = %q; want %q", i, k, v, v) + } + } + } + + if r.body == nil { + if len(tt.expected) != 0 { + t.Errorf("%d: r.body = %q; want %q", i, r.body, tt.expected) + } + continue + } + buf := make([]byte, len(tt.expected)) + if _, err := r.body.Read(buf); err != nil { + t.Errorf("%d: r.body.Read error: %v", i, err) + continue + } + body := string(buf) + if body != tt.expected { + t.Errorf("%d: r.body = %q; want %q", i, body, tt.expected) + } + } +} + +func TestWatch(t *testing.T) { + var table = []struct { + t watch.EventType + obj runtime.Object + }{ + {watch.Added, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "first"}}}, + {watch.Modified, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "second"}}}, + {watch.Deleted, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "last"}}}, + } + + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + flusher, ok := w.(http.Flusher) + if !ok { + panic("need flusher!") + } + + w.Header().Set("Transfer-Encoding", "chunked") + w.WriteHeader(http.StatusOK) + flusher.Flush() + + encoder := versioned.NewEncoder(streaming.NewEncoder(w, testapi.Default.Codec()), testapi.Default.Codec()) + for _, item := range table { + if err := encoder.Encode(&watch.Event{Type: item.t, Object: item.obj}); err != nil { + panic(err) + } + flusher.Flush() + } + })) + defer testServer.Close() + + s := testRESTClient(t, testServer) + watching, err := s.Get().Prefix("path/to/watch/thing").Watch() + if err != nil { + t.Fatalf("Unexpected error") + } + + for _, item := range table { + got, ok := <-watching.ResultChan() + if !ok { + t.Fatalf("Unexpected early close") + } + if e, a := item.t, got.Type; e != a { + t.Errorf("Expected %v, got %v", e, a) + } + if e, a := item.obj, got.Object; !api.Semantic.DeepDerivative(e, a) { + t.Errorf("Expected %v, got %v", e, a) + } + } + + _, ok := <-watching.ResultChan() + if ok { + t.Fatal("Unexpected non-close") + } +} + +func TestStream(t *testing.T) { + expectedBody := "expected body" + + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + flusher, ok := w.(http.Flusher) + if !ok { + panic("need flusher!") + } + w.Header().Set("Transfer-Encoding", "chunked") + w.WriteHeader(http.StatusOK) + w.Write([]byte(expectedBody)) + flusher.Flush() + })) + defer testServer.Close() + + s := testRESTClient(t, testServer) + readCloser, err := s.Get().Prefix("path/to/stream/thing").Stream() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + defer readCloser.Close() + buf := new(bytes.Buffer) + buf.ReadFrom(readCloser) + resultBody := buf.String() + + if expectedBody != resultBody { + t.Errorf("Expected %s, got %s", expectedBody, resultBody) + } +} + +func testRESTClient(t testing.TB, srv *httptest.Server) *RESTClient { + baseURL, _ := url.Parse("http://localhost") + if srv != nil { + var err error + baseURL, err = url.Parse(srv.URL) + if err != nil { + t.Fatalf("failed to parse test URL: %v", err) + } + } + versionedAPIPath := testapi.Default.ResourcePath("", "", "") + client, err := NewRESTClient(baseURL, versionedAPIPath, defaultContentConfig(), 0, 0, nil, nil) + if err != nil { + t.Fatalf("failed to create a client: %v", err) + } + return client +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/transport.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/transport.go index 7d4b497c3073..0bfa2ea2720a 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/transport.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/transport.go @@ -26,14 +26,22 @@ import ( // TLSConfigFor returns a tls.Config that will provide the transport level security defined // by the provided Config. Will return nil if no transport level security is requested. func TLSConfigFor(config *Config) (*tls.Config, error) { - return transport.TLSConfigFor(config.transportConfig()) + cfg, err := config.transportConfig() + if err != nil { + return nil, err + } + return transport.TLSConfigFor(cfg) } // TransportFor returns an http.RoundTripper that will provide the authentication // or transport level security defined by the provided Config. Will return the // default http.DefaultTransport if no special case behavior is needed. func TransportFor(config *Config) (http.RoundTripper, error) { - return transport.New(config.transportConfig()) + cfg, err := config.transportConfig() + if err != nil { + return nil, err + } + return transport.New(cfg) } // HTTPWrappersForConfig wraps a round tripper with any relevant layered behavior from the @@ -41,15 +49,34 @@ func TransportFor(config *Config) (http.RoundTripper, error) { // the underlying connection (like WebSocket or HTTP2 clients). Pure HTTP clients should use // the higher level TransportFor or RESTClientFor methods. func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { - return transport.HTTPWrappersForConfig(config.transportConfig(), rt) + cfg, err := config.transportConfig() + if err != nil { + return nil, err + } + return transport.HTTPWrappersForConfig(cfg, rt) } // transportConfig converts a client config to an appropriate transport config. -func (c *Config) transportConfig() *transport.Config { +func (c *Config) transportConfig() (*transport.Config, error) { + wt := c.WrapTransport + if c.AuthProvider != nil { + provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister) + if err != nil { + return nil, err + } + if wt != nil { + previousWT := wt + wt = func(rt http.RoundTripper) http.RoundTripper { + return provider.WrapTransport(previousWT(rt)) + } + } else { + wt = provider.WrapTransport + } + } return &transport.Config{ UserAgent: c.UserAgent, Transport: c.Transport, - WrapTransport: c.WrapTransport, + WrapTransport: wt, TLS: transport.TLSConfig{ CAFile: c.CAFile, CAData: c.CAData, @@ -62,5 +89,6 @@ func (c *Config) transportConfig() *transport.Config { Username: c.Username, Password: c.Password, BearerToken: c.BearerToken, - } + Impersonate: c.Impersonate, + }, nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/url_utils_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/url_utils_test.go new file mode 100644 index 000000000000..4bf8c5423d47 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/url_utils_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "path" + "testing" + + "k8s.io/kubernetes/pkg/api/testapi" +) + +func TestValidatesHostParameter(t *testing.T) { + testCases := []struct { + Host string + APIPath string + + URL string + Err bool + }{ + {"127.0.0.1", "", "http://127.0.0.1/" + testapi.Default.GroupVersion().Version, false}, + {"127.0.0.1:8080", "", "http://127.0.0.1:8080/" + testapi.Default.GroupVersion().Version, false}, + {"foo.bar.com", "", "http://foo.bar.com/" + testapi.Default.GroupVersion().Version, false}, + {"http://host/prefix", "", "http://host/prefix/" + testapi.Default.GroupVersion().Version, false}, + {"http://host", "", "http://host/" + testapi.Default.GroupVersion().Version, false}, + {"http://host", "/", "http://host/" + testapi.Default.GroupVersion().Version, false}, + {"http://host", "/other", "http://host/other/" + testapi.Default.GroupVersion().Version, false}, + {"host/server", "", "", true}, + } + for i, testCase := range testCases { + u, versionedAPIPath, err := DefaultServerURL(testCase.Host, testCase.APIPath, *testapi.Default.GroupVersion(), false) + switch { + case err == nil && testCase.Err: + t.Errorf("expected error but was nil") + continue + case err != nil && !testCase.Err: + t.Errorf("unexpected error %v", err) + continue + case err != nil: + continue + } + u.Path = path.Join(u.Path, versionedAPIPath) + if e, a := testCase.URL, u.String(); e != a { + t.Errorf("%d: expected host %s, got %s", i, e, a) + continue + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go index 7baba5c1f7e5..df453e65fdd0 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go @@ -21,7 +21,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/sets" ) @@ -42,7 +42,7 @@ type BackoffManager interface { // we need for URL specific exponential backoff. type URLBackoff struct { // Uses backoff as underlying implementation. - Backoff *util.Backoff + Backoff *flowcontrol.Backoff } // NoBackoff is a stub implementation, can be used for mocking or else as a default. @@ -63,7 +63,7 @@ func (n *NoBackoff) Sleep(d time.Duration) { // by tests which want to run 1000s of mock requests without slowing down. func (b *URLBackoff) Disable() { glog.V(4).Infof("Disabling backoff strategy") - b.Backoff = util.NewBackOff(0*time.Second, 0*time.Second) + b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second) } // baseUrlKey returns the key which urls will be mapped to. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/urlbackoff_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/urlbackoff_test.go new file mode 100644 index 000000000000..5b370dbe53a0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/restclient/urlbackoff_test.go @@ -0,0 +1,79 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "net/url" + "testing" + "time" + + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +func parse(raw string) *url.URL { + theUrl, _ := url.Parse(raw) + return theUrl +} + +func TestURLBackoffFunctionalityCollisions(t *testing.T) { + myBackoff := &URLBackoff{ + Backoff: flowcontrol.NewBackOff(1*time.Second, 60*time.Second), + } + + // Add some noise and make sure backoff for a clean URL is zero. + myBackoff.UpdateBackoff(parse("http://100.200.300.400:8080"), nil, 500) + + myBackoff.UpdateBackoff(parse("http://1.2.3.4:8080"), nil, 500) + + if myBackoff.CalculateBackoff(parse("http://1.2.3.4:100")) > 0 { + t.Errorf("URLs are colliding in the backoff map!") + } +} + +// TestURLBackoffFunctionality generally tests the URLBackoff wrapper. We avoid duplicating tests from backoff and request. +func TestURLBackoffFunctionality(t *testing.T) { + myBackoff := &URLBackoff{ + Backoff: flowcontrol.NewBackOff(1*time.Second, 60*time.Second), + } + + // Now test that backoff increases, then recovers. + // 200 and 300 should both result in clearing the backoff. + // all others like 429 should result in increased backoff. + seconds := []int{0, + 1, 2, 4, 8, 0, + 1, 2} + returnCodes := []int{ + 429, 500, 501, 502, 300, + 500, 501, 502, + } + + if len(seconds) != len(returnCodes) { + t.Fatalf("responseCode to backoff arrays should be the same length... sanity check failed.") + } + + for i, sec := range seconds { + backoffSec := myBackoff.CalculateBackoff(parse("http://1.2.3.4:100")) + if backoffSec < time.Duration(sec)*time.Second || backoffSec > time.Duration(sec+5)*time.Second { + t.Errorf("Backoff out of range %v: %v %v", i, sec, backoffSec) + } + myBackoff.UpdateBackoff(parse("http://1.2.3.4:100/responseCodeForFuncTest"), nil, returnCodes[i]) + } + + if myBackoff.CalculateBackoff(parse("http://1.2.3.4:100")) == 0 { + t.Errorf("The final return code %v should have resulted in a backoff ! ", returnCodes[7]) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/myCA.cer b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/myCA.cer new file mode 100644 index 000000000000..11148cc6b64e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/myCA.cer @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDATCCAemgAwIBAgIJAJqYDB1GJyW2MA0GCSqGSIb3DQEBBQUAMBcxFTATBgNV +BAMMDCJrdWJlcm5ldGVzIjAeFw0xNDEyMTYwNjQ2MjVaFw0xNjEyMTUwNjQ2MjVa +MBcxFTATBgNVBAMMDCJrdWJlcm5ldGVzIjCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAM4C3mfmc2oyg6VIfwpxVOqHrD8VnGu2gxx73vGlC3QEEaMmPb+m +QcqVNGsK4bEKFgaFR1Eo+clFhWCGCIqoSMMcdV2Blpm/8g7lvtmPsYJyGo/eNjKz +b4Vl7Uyvh2M6reI2N67aXGpdp4UEhpAHZu8N+tWt7yhP2mggv4vUiYAoSZ+8+xMM +9YwX9FR02ybJkDQWPL5hjDG1vPU3FiQTlxS4LstFY1IO6apQQOmY5Jb7YXK7qVhJ +M2i/FczFKnPdMjPSs+Do0hBYG8cYVpUFm1dW/ZG/qVlPn5Huod1Qv4kqnX2E+pka +B5dcpyFYPVfKGMW1pP30Nl+AGkae8y4f3u0CAwEAAaNQME4wHQYDVR0OBBYEFJFC +Tyb1cweoRBXrbfxc53PqC4yTMB8GA1UdIwQYMBaAFJFCTyb1cweoRBXrbfxc53Pq +C4yTMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBALs+LfOEteZVTISX +dFA8+/KuxtzV2O+Hozx317VtgeyMQXX7BnMI4kPDPPrVTAZqG4xjMeMexkotXLdK +EFGm8dvnbhlmhvB6PNBTwUf0mgyVE21ajKh9wdWgeBvG+IHnth4izSUEhBXEN+bY +JlKwlgvlTtck8aLhMo5tOwwmjlYEd1jB4dQZeQdkJs2H8LY4jwlrJE+FyLS8K2/6 +fRb9REE0dDVEzQJn8OCFbrC3+HdLO3dzUZSup7gvs5dVD6Jj/PtZGLn8E96ETtT0 +aOrQMABadPgZ3nzkW8luxEes9PgSOselTR3ACnho0fUCut+PTjjsRHxDV+qJPN3A +7vL/tDs= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/myCA.key b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/myCA.key new file mode 100644 index 000000000000..b7ce3c788a8e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/myCA.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAzgLeZ+ZzajKDpUh/CnFU6oesPxWca7aDHHve8aULdAQRoyY9 +v6ZBypU0awrhsQoWBoVHUSj5yUWFYIYIiqhIwxx1XYGWmb/yDuW+2Y+xgnIaj942 +MrNvhWXtTK+HYzqt4jY3rtpcal2nhQSGkAdm7w361a3vKE/aaCC/i9SJgChJn7z7 +Ewz1jBf0VHTbJsmQNBY8vmGMMbW89TcWJBOXFLguy0VjUg7pqlBA6Zjklvthcrup +WEkzaL8VzMUqc90yM9Kz4OjSEFgbxxhWlQWbV1b9kb+pWU+fke6h3VC/iSqdfYT6 +mRoHl1ynIVg9V8oYxbWk/fQ2X4AaRp7zLh/e7QIDAQABAoIBAQDM1Etf0OEGQO1l +g/xUXLSKb5USMCGTcydPRdY4Otp1YqpKpfYVPHADxXAV0f7ucNHPb+qlxnD87rOb +cgjCHGokHIKREwyzGAbLSyED3fwnb937F3yZ0pDaeKqFaazaO3iyByg8IP5r/2xV +NFe6krGElEjG9iZo1WSZzZ3FoO+JzDvIUOKtlymmfF4Gcl2fJwljPTfrQSIx/z/r +Ag0xy53fl87wiq4ZC46uk78m9lJQs3R8ojp/9kP7TNr1YDlAs9mpwWq/pirw3v6M +1l0AisGI9sbOP5s665yLvPbc9EUHaDlfRe1gt1cNo9QrgfgvkZHZ+/DZk+S0P6RP +lJDsRUddAoGBAO7sYK62ov5a9MU01XbRb3MHpinxLperEaRmjyoywfDaIOa6IYuc +gUbShtH/VjeOTy8lnktXy8WDpg5EyWIKjXwucpHE7FAd4o7BPFbCyvVCh5s+02M9 +NYAlvRCthkjw6vl99noFLL0BFd/wLjI4O0MpHNKTJgW26mAdtmxwZ8wrAoGBANy8 +SrabkqSJaXmT30ndDz72qLCT1+KsW+bjpHGlU4VNJchGmIb+l/lCPPBiM+YcQh24 +4YMwxmTVQf1FuYrAD67dSVQzS7xqENIMI0hmpErBT8Ka71kicZINwro4+8vgfzha +YD0ohj7fIp9rkXTp/Jr9K35vQ/rrubtXascJED1HAoGAKH7gFDzYe4wnGJXP6Iev +ACw3ubwrTYGtR9QqR9i6jnwqP3Ek5mjscHiWaVmB34C7Yx5ZKiQDYcLijmCSUY/A +U1/8A0EBXMLz94ZBF+OESvWvzlxjr9pcCxBab006CXrsGMWE1UGzR4W7k20+Jzzo +roV1YSuXsjhCmW/vz4ltzmkCgYAlW+j5RxNmrasgXJqqEbQG4BBk8mDTiIB1b4nh +gi3Ene4LG4etMWHfWgqeVMCb7aRzC1t/rL2nS0DD8Q0aIq+E1QcYLSZgWUNHia5f +DqA31sf9E+P2nhHCunl+sy5Kr1BY5VLshvNRqMpfWQFhXEjYooi9+W70BPmGb6Eu +1qXc+QKBgE3mcE666Ep4d6dibMoCyea+ir5zl9PllLTpRg2OpBHdm5tk5D7Tfxx8 +Uxs2FI6oZ3G4IaeTGp6blLgyTdMLpuZGM6HdJNI16bk//E6PQV3m6jJ6duhn9Ezr +7l7eI4Y2s7H+SJixNtxtDitFOnjD+KI502ypsLEvlsXZpeXvFtaw +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/mycertvalid.cer b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/mycertvalid.cer new file mode 100644 index 000000000000..8b8bf36bc997 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/mycertvalid.cer @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICpzCCAY8CCQDWu9ClTyE4ADANBgkqhkiG9w0BAQUFADAXMRUwEwYDVQQDDAwi +a3ViZXJuZXRlcyIwHhcNMTQxMjE2MDY0NjI1WhcNMTUxMjE2MDY0NjI1WjAUMRIw +EAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCogtUXHT0lvympI8FUU+wxjueCDQmNPtVuaW0LQ0tH1oQwAB7NuFUgPBZsiN8o +tI3P6EeuBM5nJwy1cP3x630ac1CIqb6zgmRsle15BYRfyVlIXfLYjjcCcMgfRIa/ +FFKAnX46fzL9I3re7ZntTv4XBp6dYm2zEIPureqgpJ369ewBNQ9T5wI+jg+EVryO +dRFTaihW6Ukz82djEY9HqHHDg0YbiAa918ipPZ4YECDPH2fX1grVxO1AqveTkw2i +LI/I7aqy4yqZCB1ar1wnrVzqNR0LcOFupFHj5WberwCao1yDd4C/yEK5tre6sq4v +hwF2II8NFVY7GFQP/V/V5ET7AgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAC891nLG +CiggNRJPOS5rKhUBQa3uCgmsCTuwSf/bSrBMzfTkK5fQsqWvMks+ILYv4q6yGWYj +eqCeNPetbRDTKAtfyI+J9rKGfmvP/cWMK1TVB7OFYGb31Ra6w05Cg9ngCPHvelBh +0t4flVjTBv5MaVYpHQlRB+cQre2prd7qkd3hVHrO3Wf1I3VtqYaXQxyleVHq5FBD +O2zFL2Y1zBb6SUmtK0C1CcUG5rUsasal3FvFkWqeqeN+EkP/7RvMDo4S5JOxbWQp +OoebfirEQcUhz1duIb5th6UKhsJminFozHo0hRwenvhL5Q5sDiXn+1pcolj1gBzm +Ivob4OleMUcIGTg= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/mycertvalid.key b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/mycertvalid.key new file mode 100644 index 000000000000..2828dfee3bfc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/mycertvalid.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAqILVFx09Jb8pqSPBVFPsMY7ngg0JjT7VbmltC0NLR9aEMAAe +zbhVIDwWbIjfKLSNz+hHrgTOZycMtXD98et9GnNQiKm+s4JkbJXteQWEX8lZSF3y +2I43AnDIH0SGvxRSgJ1+On8y/SN63u2Z7U7+FwaenWJtsxCD7q3qoKSd+vXsATUP +U+cCPo4PhFa8jnURU2ooVulJM/NnYxGPR6hxw4NGG4gGvdfIqT2eGBAgzx9n19YK +1cTtQKr3k5MNoiyPyO2qsuMqmQgdWq9cJ61c6jUdC3DhbqRR4+Vm3q8AmqNcg3eA +v8hCuba3urKuL4cBdiCPDRVWOxhUD/1f1eRE+wIDAQABAoIBAQCGv4gSYakh5Ak2 +XYcdHbbDslhh4HcA4XvePKOb3AX4vgsaLx5ytrIrgqETzSdV73tvA3k+KE28ordA +58fJiduSKR//CG2cMeqIAiPRIJ5H0kR439dvX9mRNApzJmLxrRiEDGyB7nEhhxub +5DewUfhRBVQU2j6Kb+xwEdaK+tfxcyVCKnloAh2PwBoSXcpK41ii0fvDzPwEuTqc +LexUxEV2Z9ClxQ2sJ2MLE7x57TQK0Earrph/ew/MDSYfKnay1B5vcXPX8rAiQJdP +Rc0BgeXV+j5pH+s5zOFMJRXrvI/9m+trr8MCYDrKooyFkk2cmsrxz3HvmJ3+t52s +jSXd7RKBAoGBANH0eap41oDo4P9ZF/ngAu7l1Yu5Vk6vB7wGJhekavv6dl+lYpw1 +wUlKv32ZHmah8LvrRdyALHQRJ19V6NJiHlVwiJEEyXQWUsJTmvsvb7idEeU861iw +0bFelJlW7GLCIH/02enWKwMH6oR50Wa1xTbI3CtizbEoWCTnSK5iC1HbAoGBAM13 +kR8vNHhgWKv/AgIYKFrPJjMXmKBfv/jUyKUfcQi9kIZMdaYpN5yPKZIkBIFOVHbG +suH4/7cVA3ZCfQljY6PGLfZu7QPupvd5KrEbBuKGuIdxrUk6mmLjLEXhoYSAeaw/ +OsYKsGHdhWRstCB4R58jqpVcAr1pytxbx1oBxRNhAoGBAKv/pQBz1/5pSZHGsi6h +RqXhoYzCu6LgHuz4+JHbv01IRVtbyKoCG6NoWfGR0+bueaHpPyVB16kKOIAQiBh6 +CzGhbC+phUPV2dya01c96D+MZZGv03mn+VFeE0x/ek35jNhmhXLcYgYsoQIALfz/ +ol2cNUpRugKM85Df7Jn3diCLAoGAS8xNRDTU5Yedjq3/nqgs0vtSe0y8KIXKO1C8 +SHYl6/SKyZCRYmAYPPBvhJM2+kDcVgkNWuHR7EebRFhY6kq5KmTk9eGMHIRBIlCX +2EhBLPZIQudD5xzwcYSfA5SuUkRXHp0g4Ih281OWbyrO9J+KxIGS35DXDetmRA6z +p1e5zWECgYEAulYIXb4tV8zKxJ+5/lLzeOZxzrvLMWv5YLlygjt5HWtCLl9B02Q7 ++zGcMi9O5ASN1cuf5hiQNDvMOQnD5Pywe8/i8zP3QLVDcnlOY83n2Gl3Huh6w3O5 +l+hvRO3LAm0VZSFaJE8WBm45vm09vR0X+69pkcSl/cfyVHygMmhaZSs= +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/mycertvalid.req b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/mycertvalid.req new file mode 100644 index 000000000000..efac2739e934 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testdata/mycertvalid.req @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICWTCCAUECAQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAqILVFx09Jb8pqSPBVFPsMY7ngg0JjT7VbmltC0NL +R9aEMAAezbhVIDwWbIjfKLSNz+hHrgTOZycMtXD98et9GnNQiKm+s4JkbJXteQWE +X8lZSF3y2I43AnDIH0SGvxRSgJ1+On8y/SN63u2Z7U7+FwaenWJtsxCD7q3qoKSd ++vXsATUPU+cCPo4PhFa8jnURU2ooVulJM/NnYxGPR6hxw4NGG4gGvdfIqT2eGBAg +zx9n19YK1cTtQKr3k5MNoiyPyO2qsuMqmQgdWq9cJ61c6jUdC3DhbqRR4+Vm3q8A +mqNcg3eAv8hCuba3urKuL4cBdiCPDRVWOxhUD/1f1eRE+wIDAQABoAAwDQYJKoZI +hvcNAQEFBQADggEBACPbB3L1oW5Ah61YiUiRIyT1i+T0aGZN30QmyTxGrahTqFFz +JFJE+PwNX4ET1K5j634ltnbn/9I03bLs8zXrzmdNDR7OXNdvoGVG8vyldxkqopeK +i7AwH4zKOoH7lFdcn8ISyTKFXERAOnQMbQvFP5ZW8h/nVljZ1NWh08HYE2uhiG6n +sudWIFnorun0tKWyqlnDiiGzoXJNp6X5QvluIP/a5ntSleNCWiJXKY6f0tx/rA+0 +syjk63lShz4eXN1aN5uL2z9borXkZKdFGKaLGqBIgMxM6gjJz3XTDoublTyOAO2n +T0f//nDSamgEQzCLDzPQr7v7diJ9gt9ueD/Q17U= +-----END CERTIFICATE REQUEST----- diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testing/core/actions.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testing/core/actions.go new file mode 100644 index 000000000000..24a707c8a9b7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testing/core/actions.go @@ -0,0 +1,456 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" +) + +func NewRootGetAction(resource unversioned.GroupVersionResource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Name = name + + return action +} + +func NewGetAction(resource unversioned.GroupVersionResource, namespace, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootListAction(resource unversioned.GroupVersionResource, opts api.ListOptions) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + labelSelector := opts.LabelSelector + if labelSelector == nil { + labelSelector = labels.Everything() + } + fieldSelector := opts.FieldSelector + if fieldSelector == nil { + fieldSelector = fields.Everything() + } + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewListAction(resource unversioned.GroupVersionResource, namespace string, opts api.ListOptions) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Namespace = namespace + labelSelector := opts.LabelSelector + if labelSelector == nil { + labelSelector = labels.Everything() + } + fieldSelector := opts.FieldSelector + if fieldSelector == nil { + fieldSelector = fields.Everything() + } + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootCreateAction(resource unversioned.GroupVersionResource, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Object = object + + return action +} + +func NewCreateAction(resource unversioned.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootUpdateAction(resource unversioned.GroupVersionResource, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Object = object + + return action +} + +func NewUpdateAction(resource unversioned.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootPatchAction(resource unversioned.GroupVersionResource, object runtime.Object) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Object = object + + return action +} + +func NewPatchAction(resource unversioned.GroupVersionResource, namespace string, object runtime.Object) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootUpdateSubresourceAction(resource unversioned.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Object = object + + return action +} +func NewUpdateSubresourceAction(resource unversioned.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootDeleteAction(resource unversioned.GroupVersionResource, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Name = name + + return action +} + +func NewDeleteAction(resource unversioned.GroupVersionResource, namespace, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootDeleteCollectionAction(resource unversioned.GroupVersionResource, opts api.ListOptions) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + labelSelector := opts.LabelSelector + if labelSelector == nil { + labelSelector = labels.Everything() + } + fieldSelector := opts.FieldSelector + if fieldSelector == nil { + fieldSelector = fields.Everything() + } + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewDeleteCollectionAction(resource unversioned.GroupVersionResource, namespace string, opts api.ListOptions) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + action.Namespace = namespace + labelSelector := opts.LabelSelector + if labelSelector == nil { + labelSelector = labels.Everything() + } + fieldSelector := opts.FieldSelector + if fieldSelector == nil { + fieldSelector = fields.Everything() + } + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootWatchAction(resource unversioned.GroupVersionResource, opts api.ListOptions) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + labelSelector := opts.LabelSelector + if labelSelector == nil { + labelSelector = labels.Everything() + } + fieldSelector := opts.FieldSelector + if fieldSelector == nil { + fieldSelector = fields.Everything() + } + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, opts.ResourceVersion} + + return action +} + +func NewWatchAction(resource unversioned.GroupVersionResource, namespace string, opts api.ListOptions) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + action.Namespace = namespace + labelSelector := opts.LabelSelector + if labelSelector == nil { + labelSelector = labels.Everything() + } + fieldSelector := opts.FieldSelector + if fieldSelector == nil { + fieldSelector = fields.Everything() + } + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, opts.ResourceVersion} + + return action +} + +func NewProxyGetAction(resource unversioned.GroupVersionResource, namespace, scheme, name, port, path string, params map[string]string) ProxyGetActionImpl { + action := ProxyGetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Scheme = scheme + action.Name = name + action.Port = port + action.Path = path + action.Params = params + return action +} + +type ListRestrictions struct { + Labels labels.Selector + Fields fields.Selector +} +type WatchRestrictions struct { + Labels labels.Selector + Fields fields.Selector + ResourceVersion string +} + +type Action interface { + GetNamespace() string + GetVerb() string + GetResource() unversioned.GroupVersionResource + GetSubresource() string + Matches(verb, resource string) bool +} + +type GenericAction interface { + Action + GetValue() interface{} +} + +type GetAction interface { + Action + GetName() string +} + +type ListAction interface { + Action + GetListRestrictions() ListRestrictions +} + +type CreateAction interface { + Action + GetObject() runtime.Object +} + +type UpdateAction interface { + Action + GetObject() runtime.Object +} + +type DeleteAction interface { + Action + GetName() string +} + +type WatchAction interface { + Action + GetWatchRestrictions() WatchRestrictions +} + +type ProxyGetAction interface { + Action + GetScheme() string + GetName() string + GetPort() string + GetPath() string + GetParams() map[string]string +} + +type ActionImpl struct { + Namespace string + Verb string + Resource unversioned.GroupVersionResource + Subresource string +} + +func (a ActionImpl) GetNamespace() string { + return a.Namespace +} +func (a ActionImpl) GetVerb() string { + return a.Verb +} +func (a ActionImpl) GetResource() unversioned.GroupVersionResource { + return a.Resource +} +func (a ActionImpl) GetSubresource() string { + return a.Subresource +} +func (a ActionImpl) Matches(verb, resource string) bool { + return strings.ToLower(verb) == strings.ToLower(a.Verb) && + strings.ToLower(resource) == strings.ToLower(a.Resource.Resource) +} + +type GenericActionImpl struct { + ActionImpl + Value interface{} +} + +func (a GenericActionImpl) GetValue() interface{} { + return a.Value +} + +type GetActionImpl struct { + ActionImpl + Name string +} + +func (a GetActionImpl) GetName() string { + return a.Name +} + +type ListActionImpl struct { + ActionImpl + ListRestrictions ListRestrictions +} + +func (a ListActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +type CreateActionImpl struct { + ActionImpl + Object runtime.Object +} + +func (a CreateActionImpl) GetObject() runtime.Object { + return a.Object +} + +type UpdateActionImpl struct { + ActionImpl + Object runtime.Object +} + +func (a UpdateActionImpl) GetObject() runtime.Object { + return a.Object +} + +type PatchActionImpl struct { + ActionImpl + Object runtime.Object +} + +func (a PatchActionImpl) GetObject() runtime.Object { + return a.Object +} + +type DeleteActionImpl struct { + ActionImpl + Name string +} + +func (a DeleteActionImpl) GetName() string { + return a.Name +} + +type DeleteCollectionActionImpl struct { + ActionImpl + ListRestrictions ListRestrictions +} + +func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +type WatchActionImpl struct { + ActionImpl + WatchRestrictions WatchRestrictions +} + +func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions { + return a.WatchRestrictions +} + +type ProxyGetActionImpl struct { + ActionImpl + Scheme string + Name string + Port string + Path string + Params map[string]string +} + +func (a ProxyGetActionImpl) GetScheme() string { + return a.Scheme +} + +func (a ProxyGetActionImpl) GetName() string { + return a.Name +} + +func (a ProxyGetActionImpl) GetPort() string { + return a.Port +} + +func (a ProxyGetActionImpl) GetPath() string { + return a.Path +} + +func (a ProxyGetActionImpl) GetParams() map[string]string { + return a.Params +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testing/core/fake.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testing/core/fake.go new file mode 100644 index 000000000000..751780b1d6a8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testing/core/fake.go @@ -0,0 +1,231 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "fmt" + "sync" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/version" + "k8s.io/kubernetes/pkg/watch" +) + +// Fake implements client.Interface. Meant to be embedded into a struct to get a default +// implementation. This makes faking out just the method you want to test easier. +type Fake struct { + sync.RWMutex + actions []Action // these may be castable to other types, but "Action" is the minimum + + // ReactionChain is the list of reactors that will be attempted for every request in the order they are tried + ReactionChain []Reactor + // WatchReactionChain is the list of watch reactors that will be attempted for every request in the order they are tried + WatchReactionChain []WatchReactor + // ProxyReactionChain is the list of proxy reactors that will be attempted for every request in the order they are tried + ProxyReactionChain []ProxyReactor + + Resources map[string]*unversioned.APIResourceList +} + +// Reactor is an interface to allow the composition of reaction functions. +type Reactor interface { + // Handles indicates whether or not this Reactor deals with a given action + Handles(action Action) bool + // React handles the action and returns results. It may choose to delegate by indicated handled=false + React(action Action) (handled bool, ret runtime.Object, err error) +} + +// WatchReactor is an interface to allow the composition of watch functions. +type WatchReactor interface { + // Handles indicates whether or not this Reactor deals with a given action + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to delegate by indicated handled=false + React(action Action) (handled bool, ret watch.Interface, err error) +} + +// ProxyReactor is an interface to allow the composition of proxy get functions. +type ProxyReactor interface { + // Handles indicates whether or not this Reactor deals with a given action + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to delegate by indicated handled=false + React(action Action) (handled bool, ret restclient.ResponseWrapper, err error) +} + +// ReactionFunc is a function that returns an object or error for a given Action. If "handled" is false, +// then the test client will continue ignore the results and continue to the next ReactionFunc +type ReactionFunc func(action Action) (handled bool, ret runtime.Object, err error) + +// WatchReactionFunc is a function that returns a watch interface. If "handled" is false, +// then the test client will continue ignore the results and continue to the next ReactionFunc +type WatchReactionFunc func(action Action) (handled bool, ret watch.Interface, err error) + +// ProxyReactionFunc is a function that returns a ResponseWrapper interface for a given Action. If "handled" is false, +// then the test client will continue ignore the results and continue to the next ProxyReactionFunc +type ProxyReactionFunc func(action Action) (handled bool, ret restclient.ResponseWrapper, err error) + +// AddReactor appends a reactor to the end of the chain +func (c *Fake) AddReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append(c.ReactionChain, &SimpleReactor{verb, resource, reaction}) +} + +// PrependReactor adds a reactor to the beginning of the chain +func (c *Fake) PrependReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append([]Reactor{&SimpleReactor{verb, resource, reaction}}, c.ReactionChain...) +} + +// AddWatchReactor appends a reactor to the end of the chain +func (c *Fake) AddWatchReactor(resource string, reaction WatchReactionFunc) { + c.WatchReactionChain = append(c.WatchReactionChain, &SimpleWatchReactor{resource, reaction}) +} + +// PrependWatchReactor adds a reactor to the beginning of the chain +func (c *Fake) PrependWatchReactor(resource string, reaction WatchReactionFunc) { + c.WatchReactionChain = append([]WatchReactor{&SimpleWatchReactor{resource, reaction}}, c.WatchReactionChain...) +} + +// AddProxyReactor appends a reactor to the end of the chain +func (c *Fake) AddProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append(c.ProxyReactionChain, &SimpleProxyReactor{resource, reaction}) +} + +// PrependProxyReactor adds a reactor to the beginning of the chain +func (c *Fake) PrependProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append([]ProxyReactor{&SimpleProxyReactor{resource, reaction}}, c.ProxyReactionChain...) +} + +// Invokes records the provided Action and then invokes the ReactFn (if provided). +// defaultReturnObj is expected to be of the same type a normal call would return. +func (c *Fake) Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error) { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action) + for _, reactor := range c.ReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action) + if !handled { + continue + } + + return ret, err + } + + return defaultReturnObj, nil +} + +// InvokesWatch records the provided Action and then invokes the ReactFn (if provided). +func (c *Fake) InvokesWatch(action Action) (watch.Interface, error) { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action) + for _, reactor := range c.WatchReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action) + if !handled { + continue + } + + return ret, err + } + + return nil, fmt.Errorf("unhandled watch: %#v", action) +} + +// InvokesProxy records the provided Action and then invokes the ReactFn (if provided). +func (c *Fake) InvokesProxy(action Action) restclient.ResponseWrapper { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action) + for _, reactor := range c.ProxyReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action) + if !handled || err != nil { + continue + } + + return ret + } + + return nil +} + +// ClearActions clears the history of actions called on the fake client +func (c *Fake) ClearActions() { + c.Lock() + defer c.Unlock() + + c.actions = make([]Action, 0) +} + +// Actions returns a chronologically ordered slice fake actions called on the fake client +func (c *Fake) Actions() []Action { + c.RLock() + defer c.RUnlock() + fa := make([]Action, len(c.actions)) + copy(fa, c.actions) + return fa +} + +// TODO: this probably should be moved to somewhere else. +type FakeDiscovery struct { + *Fake +} + +func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*unversioned.APIResourceList, error) { + action := ActionImpl{ + Verb: "get", + Resource: unversioned.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + return c.Resources[groupVersion], nil +} + +func (c *FakeDiscovery) ServerResources() (map[string]*unversioned.APIResourceList, error) { + action := ActionImpl{ + Verb: "get", + Resource: unversioned.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + return c.Resources, nil +} + +func (c *FakeDiscovery) ServerGroups() (*unversioned.APIGroupList, error) { + return nil, nil +} + +func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { + action := ActionImpl{} + action.Verb = "get" + action.Resource = unversioned.GroupVersionResource{Resource: "version"} + + c.Invokes(action, nil) + versionInfo := version.Get() + return &versionInfo, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testing/core/fixture.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testing/core/fixture.go new file mode 100644 index 000000000000..bd4e560064e0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/testing/core/fixture.go @@ -0,0 +1,319 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "fmt" + "io/ioutil" + "reflect" + "strings" + + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/yaml" + "k8s.io/kubernetes/pkg/watch" +) + +// ObjectRetriever abstracts the implementation for retrieving or setting generic +// objects. It is intended to be used to fake calls to a server by returning +// objects based on their kind and name. +type ObjectRetriever interface { + // Kind should return a resource or a list of resources (depending on the provided kind and + // name). It should return an error if the caller should communicate an error to the server. + Kind(gvk unversioned.GroupVersionKind, name string) (runtime.Object, error) + // Add adds a runtime object for test purposes into this object. + Add(runtime.Object) error +} + +// ObjectScheme abstracts the implementation of common operations on objects. +type ObjectScheme interface { + runtime.ObjectCreater + runtime.ObjectCopier + runtime.ObjectTyper +} + +// ObjectReaction returns a ReactionFunc that takes a generic action string of the form +// - or -- and attempts to return a runtime +// Object or error that matches the requested action. For instance, list-replicationControllers +// should attempt to return a list of replication controllers. This method delegates to the +// ObjectRetriever interface to satisfy retrieval of lists or retrieval of single items. +// TODO: add support for sub resources +func ObjectReaction(o ObjectRetriever, mapper meta.RESTMapper) ReactionFunc { + return func(action Action) (bool, runtime.Object, error) { + resource := action.GetResource() + kind, err := mapper.KindFor(resource) + // This is a temporary fix. Because there is no internal resource, so + // the caller has no way to express that it expects to get an internal + // kind back. A more proper fix will be directly specify the Kind when + // build the action. + kind.Version = resource.Version + if err != nil { + return false, nil, fmt.Errorf("unrecognized action %s: %v", action.GetResource(), err) + } + + // TODO: have mapper return a Kind for a subresource? + switch castAction := action.(type) { + case ListAction: + kind.Kind += "List" + resource, err := o.Kind(kind, "") + return true, resource, err + + case GetAction: + resource, err := o.Kind(kind, castAction.GetName()) + return true, resource, err + + case DeleteAction: + resource, err := o.Kind(kind, castAction.GetName()) + return true, resource, err + + case CreateAction: + accessor, err := meta.Accessor(castAction.GetObject()) + if err != nil { + return true, nil, err + } + resource, err := o.Kind(kind, accessor.GetName()) + return true, resource, err + + case UpdateAction: + accessor, err := meta.Accessor(castAction.GetObject()) + if err != nil { + return true, nil, err + } + resource, err := o.Kind(kind, accessor.GetName()) + return true, resource, err + + default: + return false, nil, fmt.Errorf("no reaction implemented for %s", action) + } + } +} + +// AddObjectsFromPath loads the JSON or YAML file containing Kubernetes API resources +// and adds them to the provided ObjectRetriever. +func AddObjectsFromPath(path string, o ObjectRetriever, decoder runtime.Decoder) error { + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + data, err = yaml.ToJSON(data) + if err != nil { + return err + } + obj, err := runtime.Decode(decoder, data) + if err != nil { + return err + } + if err := o.Add(obj); err != nil { + return err + } + return nil +} + +type objects struct { + types map[string][]runtime.Object + last map[string]int + scheme ObjectScheme + decoder runtime.Decoder +} + +var _ ObjectRetriever = &objects{} + +// NewObjects implements the ObjectRetriever interface by introspecting the +// objects provided to Add() and returning them when the Kind method is invoked. +// If an api.List object is provided to Add(), each child item is added. If an +// object is added that is itself a list (PodList, ServiceList) then that is added +// to the "PodList" kind. If no PodList is added, the retriever will take any loaded +// Pods and return them in a list. If an api.Status is added, and the Details.Kind field +// is set, that status will be returned instead (as an error if Status != Success, or +// as a runtime.Object if Status == Success). If multiple PodLists are provided, they +// will be returned in order by the Kind call, and the last PodList will be reused for +// subsequent calls. +func NewObjects(scheme ObjectScheme, decoder runtime.Decoder) ObjectRetriever { + return objects{ + types: make(map[string][]runtime.Object), + last: make(map[string]int), + scheme: scheme, + decoder: decoder, + } +} + +func (o objects) Kind(kind unversioned.GroupVersionKind, name string) (runtime.Object, error) { + if len(kind.Version) == 0 { + kind.Version = runtime.APIVersionInternal + } + empty, err := o.scheme.New(kind) + nilValue := reflect.Zero(reflect.TypeOf(empty)).Interface().(runtime.Object) + + arr, ok := o.types[kind.Kind] + if !ok { + if strings.HasSuffix(kind.Kind, "List") { + itemKind := kind.Kind[:len(kind.Kind)-4] + arr, ok := o.types[itemKind] + if !ok { + return empty, nil + } + out, err := o.scheme.New(kind) + if err != nil { + return nilValue, err + } + if err := meta.SetList(out, arr); err != nil { + return nilValue, err + } + if out, err = o.scheme.Copy(out); err != nil { + return nilValue, err + } + return out, nil + } + return nilValue, errors.NewNotFound(unversioned.GroupResource{Group: kind.Group, Resource: kind.Kind}, name) + } + + index := o.last[kind.Kind] + if index >= len(arr) { + index = len(arr) - 1 + } + if index < 0 { + return nilValue, errors.NewNotFound(unversioned.GroupResource{Group: kind.Group, Resource: kind.Kind}, name) + } + out, err := o.scheme.Copy(arr[index]) + if err != nil { + return nilValue, err + } + o.last[kind.Kind] = index + 1 + + if status, ok := out.(*unversioned.Status); ok { + if status.Details != nil { + status.Details.Kind = kind.Kind + } + if status.Status != unversioned.StatusSuccess { + return nilValue, &errors.StatusError{ErrStatus: *status} + } + } + + return out, nil +} + +func (o objects) Add(obj runtime.Object) error { + gvks, _, err := o.scheme.ObjectKinds(obj) + if err != nil { + return err + } + kind := gvks[0].Kind + + switch { + case meta.IsListType(obj): + if kind != "List" { + o.types[kind] = append(o.types[kind], obj) + } + + list, err := meta.ExtractList(obj) + if err != nil { + return err + } + if errs := runtime.DecodeList(list, o.decoder); len(errs) > 0 { + return errs[0] + } + for _, obj := range list { + if err := o.Add(obj); err != nil { + return err + } + } + default: + if status, ok := obj.(*unversioned.Status); ok && status.Details != nil { + kind = status.Details.Kind + } + o.types[kind] = append(o.types[kind], obj) + } + + return nil +} + +func DefaultWatchReactor(watchInterface watch.Interface, err error) WatchReactionFunc { + return func(action Action) (bool, watch.Interface, error) { + return true, watchInterface, err + } +} + +// SimpleReactor is a Reactor. Each reaction function is attached to a given verb,resource tuple. "*" in either field matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions +type SimpleReactor struct { + Verb string + Resource string + + Reaction ReactionFunc +} + +func (r *SimpleReactor) Handles(action Action) bool { + verbCovers := r.Verb == "*" || r.Verb == action.GetVerb() + if !verbCovers { + return false + } + resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource + if !resourceCovers { + return false + } + + return true +} + +func (r *SimpleReactor) React(action Action) (bool, runtime.Object, error) { + return r.Reaction(action) +} + +// SimpleWatchReactor is a WatchReactor. Each reaction function is attached to a given resource. "*" matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions +type SimpleWatchReactor struct { + Resource string + + Reaction WatchReactionFunc +} + +func (r *SimpleWatchReactor) Handles(action Action) bool { + resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource + if !resourceCovers { + return false + } + + return true +} + +func (r *SimpleWatchReactor) React(action Action) (bool, watch.Interface, error) { + return r.Reaction(action) +} + +// SimpleProxyReactor is a ProxyReactor. Each reaction function is attached to a given resource. "*" matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions. +type SimpleProxyReactor struct { + Resource string + + Reaction ProxyReactionFunc +} + +func (r *SimpleProxyReactor) Handles(action Action) bool { + resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource + if !resourceCovers { + return false + } + + return true +} + +func (r *SimpleProxyReactor) React(action Action) (bool, restclient.ResponseWrapper, error) { + return r.Reaction(action) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/cache.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/cache.go index f1068930d8ad..90bd1190297e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/cache.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/cache.go @@ -22,6 +22,8 @@ import ( "net/http" "sync" "time" + + utilnet "k8s.io/kubernetes/pkg/util/net" ) // TlsTransportCache caches TLS http.RoundTrippers different configurations. The @@ -60,7 +62,7 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { } // Cache a single transport for these options - c.transports[key] = &http.Transport{ + c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{ Proxy: http.ProxyFromEnvironment, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: tlsConfig, @@ -68,7 +70,7 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).Dial, - } + }) return c.transports[key], nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/cache_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/cache_test.go new file mode 100644 index 000000000000..8a602c147e7c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/cache_test.go @@ -0,0 +1,114 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "net/http" + "testing" +) + +func TestTLSConfigKey(t *testing.T) { + // Make sure config fields that don't affect the tls config don't affect the cache key + identicalConfigurations := map[string]*Config{ + "empty": {}, + "basic": {Username: "bob", Password: "password"}, + "bearer": {BearerToken: "token"}, + "user agent": {UserAgent: "useragent"}, + "transport": {Transport: http.DefaultTransport}, + "wrap transport": {WrapTransport: func(http.RoundTripper) http.RoundTripper { return nil }}, + } + for nameA, valueA := range identicalConfigurations { + for nameB, valueB := range identicalConfigurations { + keyA, err := tlsConfigKey(valueA) + if err != nil { + t.Errorf("Unexpected error for %q: %v", nameA, err) + continue + } + keyB, err := tlsConfigKey(valueB) + if err != nil { + t.Errorf("Unexpected error for %q: %v", nameB, err) + continue + } + if keyA != keyB { + t.Errorf("Expected identical cache keys for %q and %q, got:\n\t%s\n\t%s", nameA, nameB, keyA, keyB) + continue + } + } + } + + // Make sure config fields that affect the tls config affect the cache key + uniqueConfigurations := map[string]*Config{ + "no tls": {}, + "insecure": {TLS: TLSConfig{Insecure: true}}, + "cadata 1": {TLS: TLSConfig{CAData: []byte{1}}}, + "cadata 2": {TLS: TLSConfig{CAData: []byte{2}}}, + "cert 1, key 1": { + TLS: TLSConfig{ + CertData: []byte{1}, + KeyData: []byte{1}, + }, + }, + "cert 1, key 2": { + TLS: TLSConfig{ + CertData: []byte{1}, + KeyData: []byte{2}, + }, + }, + "cert 2, key 1": { + TLS: TLSConfig{ + CertData: []byte{2}, + KeyData: []byte{1}, + }, + }, + "cert 2, key 2": { + TLS: TLSConfig{ + CertData: []byte{2}, + KeyData: []byte{2}, + }, + }, + "cadata 1, cert 1, key 1": { + TLS: TLSConfig{ + CAData: []byte{1}, + CertData: []byte{1}, + KeyData: []byte{1}, + }, + }, + } + for nameA, valueA := range uniqueConfigurations { + for nameB, valueB := range uniqueConfigurations { + // Don't compare to ourselves + if nameA == nameB { + continue + } + + keyA, err := tlsConfigKey(valueA) + if err != nil { + t.Errorf("Unexpected error for %q: %v", nameA, err) + continue + } + keyB, err := tlsConfigKey(valueB) + if err != nil { + t.Errorf("Unexpected error for %q: %v", nameB, err) + continue + } + if keyA == keyB { + t.Errorf("Expected unique cache keys for %q and %q, got:\n\t%s\n\t%s", nameA, nameB, keyA, keyB) + continue + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/config.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/config.go index 801977f877af..63a63fbb4795 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/config.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/config.go @@ -34,6 +34,9 @@ type Config struct { // Bearer token for authentication BearerToken string + // Impersonate is the username that this Config will impersonate + Impersonate string + // Transport may be used for custom HTTP behavior. This attribute may // not be specified with the TLS client certificate options. Use // WrapTransport for most client level operations. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/round_trippers.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/round_trippers.go index 6136e080ead0..55284ebc65f7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/round_trippers.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/round_trippers.go @@ -48,6 +48,9 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip if len(config.UserAgent) > 0 { rt = NewUserAgentRoundTripper(config.UserAgent, rt) } + if len(config.Impersonate) > 0 { + rt = NewImpersonatingRoundTripper(config.Impersonate, rt) + } return rt, nil } @@ -130,6 +133,35 @@ func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } +type impersonatingRoundTripper struct { + impersonate string + delegate http.RoundTripper +} + +// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set. +func NewImpersonatingRoundTripper(impersonate string, delegate http.RoundTripper) http.RoundTripper { + return &impersonatingRoundTripper{impersonate, delegate} +} + +func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Impersonate-User")) != 0 { + return rt.delegate.RoundTrip(req) + } + req = cloneRequest(req) + req.Header.Set("Impersonate-User", rt.impersonate) + return rt.delegate.RoundTrip(req) +} + +func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.delegate.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate } + type bearerAuthRoundTripper struct { bearer string rt http.RoundTripper diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/round_trippers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/round_trippers_test.go new file mode 100644 index 000000000000..6e8e52f7d7b2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/round_trippers_test.go @@ -0,0 +1,101 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "net/http" + "testing" +) + +type testRoundTripper struct { + Request *http.Request + Response *http.Response + Err error +} + +func (rt *testRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.Request = req + return rt.Response, rt.Err +} + +func TestBearerAuthRoundTripper(t *testing.T) { + rt := &testRoundTripper{} + req := &http.Request{} + NewBearerAuthRoundTripper("test", rt).RoundTrip(req) + if rt.Request == nil { + t.Fatalf("unexpected nil request: %v", rt) + } + if rt.Request == req { + t.Fatalf("round tripper should have copied request object: %#v", rt.Request) + } + if rt.Request.Header.Get("Authorization") != "Bearer test" { + t.Errorf("unexpected authorization header: %#v", rt.Request) + } +} + +func TestBasicAuthRoundTripper(t *testing.T) { + for n, tc := range map[string]struct { + user string + pass string + }{ + "basic": {user: "user", pass: "pass"}, + "no pass": {user: "user"}, + } { + rt := &testRoundTripper{} + req := &http.Request{} + NewBasicAuthRoundTripper(tc.user, tc.pass, rt).RoundTrip(req) + if rt.Request == nil { + t.Fatalf("%s: unexpected nil request: %v", n, rt) + } + if rt.Request == req { + t.Fatalf("%s: round tripper should have copied request object: %#v", n, rt.Request) + } + if user, pass, found := rt.Request.BasicAuth(); !found || user != tc.user || pass != tc.pass { + t.Errorf("%s: unexpected authorization header: %#v", n, rt.Request) + } + } +} + +func TestUserAgentRoundTripper(t *testing.T) { + rt := &testRoundTripper{} + req := &http.Request{ + Header: make(http.Header), + } + req.Header.Set("User-Agent", "other") + NewUserAgentRoundTripper("test", rt).RoundTrip(req) + if rt.Request == nil { + t.Fatalf("unexpected nil request: %v", rt) + } + if rt.Request != req { + t.Fatalf("round tripper should not have copied request object: %#v", rt.Request) + } + if rt.Request.Header.Get("User-Agent") != "other" { + t.Errorf("unexpected user agent header: %#v", rt.Request) + } + + req = &http.Request{} + NewUserAgentRoundTripper("test", rt).RoundTrip(req) + if rt.Request == nil { + t.Fatalf("unexpected nil request: %v", rt) + } + if rt.Request == req { + t.Fatalf("round tripper should have copied request object: %#v", rt.Request) + } + if rt.Request.Header.Get("User-Agent") != "test" { + t.Errorf("unexpected user agent header: %#v", rt.Request) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/transport.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/transport.go index 94d9c0fbed08..6b41c52e5718 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/transport.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/transport.go @@ -63,8 +63,10 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { } tlsConfig := &tls.Config{ - // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) - MinVersion: tls.VersionTLS10, + // Can't use SSLv4 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, InsecureSkipVerify: c.TLS.Insecure, } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/transport_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/transport_test.go new file mode 100644 index 000000000000..ca04172d2013 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/transport/transport_test.go @@ -0,0 +1,204 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "net/http" + "testing" +) + +const ( + rootCACert = `-----BEGIN CERTIFICATE----- +MIIC4DCCAcqgAwIBAgIBATALBgkqhkiG9w0BAQswIzEhMB8GA1UEAwwYMTAuMTMu +MTI5LjEwNkAxNDIxMzU5MDU4MB4XDTE1MDExNTIxNTczN1oXDTE2MDExNTIxNTcz +OFowIzEhMB8GA1UEAwwYMTAuMTMuMTI5LjEwNkAxNDIxMzU5MDU4MIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAunDRXGwsiYWGFDlWH6kjGun+PshDGeZX +xtx9lUnL8pIRWH3wX6f13PO9sktaOWW0T0mlo6k2bMlSLlSZgG9H6og0W6gLS3vq +s4VavZ6DbXIwemZG2vbRwsvR+t4G6Nbwelm6F8RFnA1Fwt428pavmNQ/wgYzo+T1 +1eS+HiN4ACnSoDSx3QRWcgBkB1g6VReofVjx63i0J+w8Q/41L9GUuLqquFxu6ZnH +60vTB55lHgFiDLjA1FkEz2dGvGh/wtnFlRvjaPC54JH2K1mPYAUXTreoeJtLJKX0 +ycoiyB24+zGCniUmgIsmQWRPaOPircexCp1BOeze82BT1LCZNTVaxQIDAQABoyMw +ITAOBgNVHQ8BAf8EBAMCAKQwDwYDVR0TAQH/BAUwAwEB/zALBgkqhkiG9w0BAQsD +ggEBADMxsUuAFlsYDpF4fRCzXXwrhbtj4oQwcHpbu+rnOPHCZupiafzZpDu+rw4x +YGPnCb594bRTQn4pAu3Ac18NbLD5pV3uioAkv8oPkgr8aUhXqiv7KdDiaWm6sbAL +EHiXVBBAFvQws10HMqMoKtO8f1XDNAUkWduakR/U6yMgvOPwS7xl0eUTqyRB6zGb +K55q2dejiFWaFqB/y78txzvz6UlOZKE44g2JAVoJVM6kGaxh33q8/FmrL4kuN3ut +W+MmJCVDvd4eEqPwbp7146ZWTqpIJ8lvA6wuChtqV8lhAPka2hD/LMqY8iXNmfXD +uml0obOEy+ON91k+SWTJ3ggmF/U= +-----END CERTIFICATE-----` + + certData = `-----BEGIN CERTIFICATE----- +MIIC6jCCAdSgAwIBAgIBCzALBgkqhkiG9w0BAQswIzEhMB8GA1UEAwwYMTAuMTMu +MTI5LjEwNkAxNDIxMzU5MDU4MB4XDTE1MDExNTIyMDEzMVoXDTE2MDExNTIyMDEz +MlowGzEZMBcGA1UEAxMQb3BlbnNoaWZ0LWNsaWVudDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAKtdhz0+uCLXw5cSYns9rU/XifFSpb/x24WDdrm72S/v +b9BPYsAStiP148buylr1SOuNi8sTAZmlVDDIpIVwMLff+o2rKYDicn9fjbrTxTOj +lI4pHJBH+JU3AJ0tbajupioh70jwFS0oYpwtneg2zcnE2Z4l6mhrj2okrc5Q1/X2 +I2HChtIU4JYTisObtin10QKJX01CLfYXJLa8upWzKZ4/GOcHG+eAV3jXWoXidtjb +1Usw70amoTZ6mIVCkiu1QwCoa8+ycojGfZhvqMsAp1536ZcCul+Na+AbCv4zKS7F +kQQaImVrXdUiFansIoofGlw/JNuoKK6ssVpS5Ic3pgcCAwEAAaM1MDMwDgYDVR0P +AQH/BAQDAgCgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJ +KoZIhvcNAQELA4IBAQCKLREH7bXtXtZ+8vI6cjD7W3QikiArGqbl36bAhhWsJLp/ +p/ndKz39iFNaiZ3GlwIURWOOKx3y3GA0x9m8FR+Llthf0EQ8sUjnwaknWs0Y6DQ3 +jjPFZOpV3KPCFrdMJ3++E3MgwFC/Ih/N2ebFX9EcV9Vcc6oVWMdwT0fsrhu683rq +6GSR/3iVX1G/pmOiuaR0fNUaCyCfYrnI4zHBDgSfnlm3vIvN2lrsR/DQBakNL8DJ +HBgKxMGeUPoneBv+c8DMXIL0EhaFXRlBv9QW45/GiAIOuyFJ0i6hCtGZpJjq4OpQ +BRjCI+izPzFTjsxD4aORE+WOkyWFCGPWKfNejfw0 +-----END CERTIFICATE-----` + + keyData = `-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAq12HPT64ItfDlxJiez2tT9eJ8VKlv/HbhYN2ubvZL+9v0E9i +wBK2I/Xjxu7KWvVI642LyxMBmaVUMMikhXAwt9/6jaspgOJyf1+NutPFM6OUjikc +kEf4lTcAnS1tqO6mKiHvSPAVLShinC2d6DbNycTZniXqaGuPaiStzlDX9fYjYcKG +0hTglhOKw5u2KfXRAolfTUIt9hcktry6lbMpnj8Y5wcb54BXeNdaheJ22NvVSzDv +RqahNnqYhUKSK7VDAKhrz7JyiMZ9mG+oywCnXnfplwK6X41r4BsK/jMpLsWRBBoi +ZWtd1SIVqewiih8aXD8k26gorqyxWlLkhzemBwIDAQABAoIBAD2XYRs3JrGHQUpU +FkdbVKZkvrSY0vAZOqBTLuH0zUv4UATb8487anGkWBjRDLQCgxH+jucPTrztekQK +aW94clo0S3aNtV4YhbSYIHWs1a0It0UdK6ID7CmdWkAj6s0T8W8lQT7C46mWYVLm +5mFnCTHi6aB42jZrqmEpC7sivWwuU0xqj3Ml8kkxQCGmyc9JjmCB4OrFFC8NNt6M +ObvQkUI6Z3nO4phTbpxkE1/9dT0MmPIF7GhHVzJMS+EyyRYUDllZ0wvVSOM3qZT0 +JMUaBerkNwm9foKJ1+dv2nMKZZbJajv7suUDCfU44mVeaEO+4kmTKSGCGjjTBGkr +7L1ySDECgYEA5ElIMhpdBzIivCuBIH8LlUeuzd93pqssO1G2Xg0jHtfM4tz7fyeI +cr90dc8gpli24dkSxzLeg3Tn3wIj/Bu64m2TpZPZEIlukYvgdgArmRIPQVxerYey +OkrfTNkxU1HXsYjLCdGcGXs5lmb+K/kuTcFxaMOs7jZi7La+jEONwf8CgYEAwCs/ +rUOOA0klDsWWisbivOiNPII79c9McZCNBqncCBfMUoiGe8uWDEO4TFHN60vFuVk9 +8PkwpCfvaBUX+ajvbafIfHxsnfk1M04WLGCeqQ/ym5Q4sQoQOcC1b1y9qc/xEWfg +nIUuia0ukYRpl7qQa3tNg+BNFyjypW8zukUAC/kCgYB1/Kojuxx5q5/oQVPrx73k +2bevD+B3c+DYh9MJqSCNwFtUpYIWpggPxoQan4LwdsmO0PKzocb/ilyNFj4i/vII +NToqSc/WjDFpaDIKyuu9oWfhECye45NqLWhb/6VOuu4QA/Nsj7luMhIBehnEAHW+ +GkzTKM8oD1PxpEG3nPKXYQKBgQC6AuMPRt3XBl1NkCrpSBy/uObFlFaP2Enpf39S +3OZ0Gv0XQrnSaL1kP8TMcz68rMrGX8DaWYsgytstR4W+jyy7WvZwsUu+GjTJ5aMG +77uEcEBpIi9CBzivfn7hPccE8ZgqPf+n4i6q66yxBJflW5xhvafJqDtW2LcPNbW/ +bvzdmQKBgExALRUXpq+5dbmkdXBHtvXdRDZ6rVmrnjy4nI5bPw+1GqQqk6uAR6B/ +F6NmLCQOO4PDG/cuatNHIr2FrwTmGdEL6ObLUGWn9Oer9gJhHVqqsY5I4sEPo4XX +stR0Yiw0buV6DL/moUO0HIM9Bjh96HJp+LxiIS6UCdIhMPp5HoQa +-----END RSA PRIVATE KEY-----` +) + +func TestNew(t *testing.T) { + testCases := map[string]struct { + Config *Config + Err bool + TLS bool + Default bool + }{ + "default transport": { + Default: true, + Config: &Config{}, + }, + + "ca transport": { + TLS: true, + Config: &Config{ + TLS: TLSConfig{ + CAData: []byte(rootCACert), + }, + }, + }, + "bad ca file transport": { + Err: true, + Config: &Config{ + TLS: TLSConfig{ + CAFile: "invalid file", + }, + }, + }, + "ca data overriding bad ca file transport": { + TLS: true, + Config: &Config{ + TLS: TLSConfig{ + CAData: []byte(rootCACert), + CAFile: "invalid file", + }, + }, + }, + + "cert transport": { + TLS: true, + Config: &Config{ + TLS: TLSConfig{ + CAData: []byte(rootCACert), + CertData: []byte(certData), + KeyData: []byte(keyData), + }, + }, + }, + "bad cert data transport": { + Err: true, + Config: &Config{ + TLS: TLSConfig{ + CAData: []byte(rootCACert), + CertData: []byte(certData), + KeyData: []byte("bad key data"), + }, + }, + }, + "bad file cert transport": { + Err: true, + Config: &Config{ + TLS: TLSConfig{ + CAData: []byte(rootCACert), + CertData: []byte(certData), + KeyFile: "invalid file", + }, + }, + }, + "key data overriding bad file cert transport": { + TLS: true, + Config: &Config{ + TLS: TLSConfig{ + CAData: []byte(rootCACert), + CertData: []byte(certData), + KeyData: []byte(keyData), + KeyFile: "invalid file", + }, + }, + }, + } + for k, testCase := range testCases { + transport, err := New(testCase.Config) + switch { + case testCase.Err && err == nil: + t.Errorf("%s: unexpected non-error", k) + continue + case !testCase.Err && err != nil: + t.Errorf("%s: unexpected error: %v", k, err) + continue + } + + switch { + case testCase.Default && transport != http.DefaultTransport: + t.Errorf("%s: expected the default transport, got %#v", k, transport) + continue + case !testCase.Default && transport == http.DefaultTransport: + t.Errorf("%s: expected non-default transport, got %#v", k, transport) + continue + } + + // We only know how to check TLSConfig on http.Transports + if transport, ok := transport.(*http.Transport); ok { + switch { + case testCase.TLS && transport.TLSClientConfig == nil: + t.Errorf("%s: expected TLSClientConfig, got %#v", k, transport) + continue + case !testCase.TLS && transport.TLSClientConfig != nil: + t.Errorf("%s: expected no TLSClientConfig, got %#v", k, transport) + continue + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/discovery/client_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/discovery/client_test.go new file mode 100644 index 000000000000..674b92f0f6ef --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/discovery/client_test.go @@ -0,0 +1,303 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "reflect" + "testing" + + "github.com/emicklei/go-restful/swagger" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/version" +) + +func TestGetServerVersion(t *testing.T) { + expect := version.Info{ + Major: "foo", + Minor: "bar", + GitCommit: "baz", + } + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + output, err := json.Marshal(expect) + if err != nil { + t.Errorf("unexpected encoding error: %v", err) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write(output) + })) + defer server.Close() + client := NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL}) + + got, err := client.ServerVersion() + if err != nil { + t.Fatalf("unexpected encoding error: %v", err) + } + if e, a := expect, *got; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } +} + +func TestGetServerGroupsWithV1Server(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + var obj interface{} + switch req.URL.Path { + case "/api": + obj = &unversioned.APIVersions{ + Versions: []string{ + "v1", + }, + } + default: + w.WriteHeader(http.StatusNotFound) + return + } + output, err := json.Marshal(obj) + if err != nil { + t.Fatalf("unexpected encoding error: %v", err) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write(output) + })) + defer server.Close() + client := NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL}) + // ServerGroups should not return an error even if server returns error at /api and /apis + apiGroupList, err := client.ServerGroups() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + groupVersions := unversioned.ExtractGroupVersions(apiGroupList) + if !reflect.DeepEqual(groupVersions, []string{"v1"}) { + t.Errorf("expected: %q, got: %q", []string{"v1"}, groupVersions) + } +} + +func TestGetServerResourcesWithV1Server(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + var obj interface{} + switch req.URL.Path { + case "/api": + obj = &unversioned.APIVersions{ + Versions: []string{ + "v1", + }, + } + default: + w.WriteHeader(http.StatusNotFound) + return + } + output, err := json.Marshal(obj) + if err != nil { + t.Errorf("unexpected encoding error: %v", err) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write(output) + })) + defer server.Close() + client := NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL}) + // ServerResources should not return an error even if server returns error at /api/v1. + resourceMap, err := client.ServerResources() + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if _, found := resourceMap["v1"]; !found { + t.Errorf("missing v1 in resource map") + } + +} + +func TestGetServerResources(t *testing.T) { + stable := unversioned.APIResourceList{ + GroupVersion: "v1", + APIResources: []unversioned.APIResource{ + {"pods", true, "Pod"}, + {"services", true, "Service"}, + {"namespaces", false, "Namespace"}, + }, + } + beta := unversioned.APIResourceList{ + GroupVersion: "extensions/v1", + APIResources: []unversioned.APIResource{ + {"deployments", true, "Deployment"}, + {"ingresses", true, "Ingress"}, + {"jobs", true, "Job"}, + }, + } + tests := []struct { + resourcesList *unversioned.APIResourceList + path string + request string + expectErr bool + }{ + { + resourcesList: &stable, + path: "/api/v1", + request: "v1", + expectErr: false, + }, + { + resourcesList: &beta, + path: "/apis/extensions/v1beta1", + request: "extensions/v1beta1", + expectErr: false, + }, + { + resourcesList: &stable, + path: "/api/v1", + request: "foobar", + expectErr: true, + }, + } + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + var list interface{} + switch req.URL.Path { + case "/api/v1": + list = &stable + case "/apis/extensions/v1beta1": + list = &beta + case "/api": + list = &unversioned.APIVersions{ + Versions: []string{ + "v1", + }, + } + case "/apis": + list = &unversioned.APIGroupList{ + Groups: []unversioned.APIGroup{ + { + Versions: []unversioned.GroupVersionForDiscovery{ + {GroupVersion: "extensions/v1beta1"}, + }, + }, + }, + } + default: + t.Logf("unexpected request: %s", req.URL.Path) + w.WriteHeader(http.StatusNotFound) + return + } + output, err := json.Marshal(list) + if err != nil { + t.Errorf("unexpected encoding error: %v", err) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write(output) + })) + defer server.Close() + client := NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL}) + for _, test := range tests { + got, err := client.ServerResourcesForGroupVersion(test.request) + if test.expectErr { + if err == nil { + t.Error("unexpected non-error") + } + continue + } + if err != nil { + t.Errorf("unexpected error: %v", err) + continue + } + if !reflect.DeepEqual(got, test.resourcesList) { + t.Errorf("expected:\n%v\ngot:\n%v\n", test.resourcesList, got) + } + } + + resourceMap, err := client.ServerResources() + if err != nil { + t.Errorf("unexpected error: %v", err) + } + for _, api := range []string{"v1", "extensions/v1beta1"} { + if _, found := resourceMap[api]; !found { + t.Errorf("missing expected api: %s", api) + } + } +} + +func swaggerSchemaFakeServer() (*httptest.Server, error) { + request := 1 + var sErr error + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + var resp interface{} + if request == 1 { + resp = unversioned.APIVersions{Versions: []string{"v1", "v2", "v3"}} + request++ + } else { + resp = swagger.ApiDeclaration{} + } + output, err := json.Marshal(resp) + if err != nil { + sErr = err + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write(output) + })) + return server, sErr +} + +func TestGetSwaggerSchema(t *testing.T) { + expect := swagger.ApiDeclaration{} + + server, err := swaggerSchemaFakeServer() + if err != nil { + t.Errorf("unexpected encoding error: %v", err) + } + defer server.Close() + + client := NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL}) + got, err := client.SwaggerSchema(v1.SchemeGroupVersion) + if err != nil { + t.Fatalf("unexpected encoding error: %v", err) + } + if e, a := expect, *got; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } +} + +func TestGetSwaggerSchemaFail(t *testing.T) { + expErr := "API version: api.group/v4 is not supported by the server. Use one of: [v1 v2 v3]" + + server, err := swaggerSchemaFakeServer() + if err != nil { + t.Errorf("unexpected encoding error: %v", err) + } + defer server.Close() + + client := NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL}) + got, err := client.SwaggerSchema(unversioned.GroupVersion{Group: "api.group", Version: "v4"}) + if got != nil { + t.Fatalf("unexpected response: %v", got) + } + if err.Error() != expErr { + t.Errorf("expected an error, got %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go index fa4d94a348fa..283dd5a63e8b 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "net/url" + "strings" "github.com/emicklei/go-restful/swagger" @@ -29,6 +30,8 @@ import ( "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer" + utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/version" ) @@ -54,6 +57,12 @@ type ServerResourcesInterface interface { ServerResourcesForGroupVersion(groupVersion string) (*unversioned.APIResourceList, error) // ServerResources returns the supported resources for all groups and versions. ServerResources() (map[string]*unversioned.APIResourceList, error) + // ServerPreferredResources returns the supported resources with the version preferred by the + // server. + ServerPreferredResources() ([]unversioned.GroupVersionResource, error) + // ServerPreferredNamespacedResources returns the supported namespaced resources with the + // version preferred by the server. + ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) } // ServerVersionInterface has a method for retrieving the server's version. @@ -124,7 +133,9 @@ func (d *DiscoveryClient) ServerGroups() (apiGroupList *unversioned.APIGroupList // ServerResourcesForGroupVersion returns the supported resources for a group and version. func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *unversioned.APIResourceList, err error) { url := url.URL{} - if groupVersion == "v1" { + if len(groupVersion) == 0 { + return nil, fmt.Errorf("groupVersion shouldn't be empty") + } else if groupVersion == "v1" { url.Path = "/api/" + groupVersion } else { url.Path = "/apis/" + groupVersion @@ -160,6 +171,50 @@ func (d *DiscoveryClient) ServerResources() (map[string]*unversioned.APIResource return result, nil } +// serverPreferredResources returns the supported resources with the version preferred by the +// server. If namespaced is true, only namespaced resources will be returned. +func (d *DiscoveryClient) serverPreferredResources(namespaced bool) ([]unversioned.GroupVersionResource, error) { + results := []unversioned.GroupVersionResource{} + serverGroupList, err := d.ServerGroups() + if err != nil { + return results, err + } + + allErrs := []error{} + for _, apiGroup := range serverGroupList.Groups { + preferredVersion := apiGroup.PreferredVersion + apiResourceList, err := d.ServerResourcesForGroupVersion(preferredVersion.GroupVersion) + if err != nil { + allErrs = append(allErrs, err) + continue + } + groupVersion := unversioned.GroupVersion{Group: apiGroup.Name, Version: preferredVersion.Version} + for _, apiResource := range apiResourceList.APIResources { + // ignore the root scoped resources if "namespaced" is true. + if namespaced && !apiResource.Namespaced { + continue + } + if strings.Contains(apiResource.Name, "/") { + continue + } + results = append(results, groupVersion.WithResource(apiResource.Name)) + } + } + return results, utilerrors.NewAggregate(allErrs) +} + +// ServerPreferredResources returns the supported resources with the version preferred by the +// server. +func (d *DiscoveryClient) ServerPreferredResources() ([]unversioned.GroupVersionResource, error) { + return d.serverPreferredResources(false) +} + +// ServerPreferredNamespacedResources returns the supported namespaced resources with the +// version preferred by the server. +func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) { + return d.serverPreferredResources(true) +} + // ServerVersion retrieves and parses the server's version (git version). func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { body, err := d.Get().AbsPath("/version").Do().Raw() @@ -211,7 +266,11 @@ func (d *DiscoveryClient) SwaggerSchema(version unversioned.GroupVersion) (*swag func setDiscoveryDefaults(config *restclient.Config) error { config.APIPath = "" config.GroupVersion = nil - config.Codec = runtime.NoopEncoder{api.Codecs.UniversalDecoder()} + codec := runtime.NoopEncoder{Decoder: api.Codecs.UniversalDecoder()} + config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper( + runtime.SerializerInfo{Serializer: codec}, + runtime.StreamSerializerInfo{}, + ) if len(config.UserAgent) == 0 { config.UserAgent = restclient.DefaultKubernetesUserAgent() } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/discovery.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/discovery/fake/discovery.go similarity index 73% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/discovery.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/discovery/fake/discovery.go index 655fa41e05d2..1c230acf02aa 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake/discovery.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/discovery/fake/discovery.go @@ -14,8 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// TODO: the fake discovery client should live in pkg/client/discovery/, rather -// than being copied in every fake clientset. package fake import ( @@ -33,7 +31,7 @@ type FakeDiscovery struct { func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*unversioned.APIResourceList, error) { action := core.ActionImpl{ Verb: "get", - Resource: "resource", + Resource: unversioned.GroupVersionResource{Resource: "resource"}, } c.Invokes(action, nil) return c.Resources[groupVersion], nil @@ -42,12 +40,20 @@ func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*un func (c *FakeDiscovery) ServerResources() (map[string]*unversioned.APIResourceList, error) { action := core.ActionImpl{ Verb: "get", - Resource: "resource", + Resource: unversioned.GroupVersionResource{Resource: "resource"}, } c.Invokes(action, nil) return c.Resources, nil } +func (c *FakeDiscovery) ServerPreferredResources() ([]unversioned.GroupVersionResource, error) { + return nil, nil +} + +func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) { + return nil, nil +} + func (c *FakeDiscovery) ServerGroups() (*unversioned.APIGroupList, error) { return nil, nil } @@ -55,7 +61,7 @@ func (c *FakeDiscovery) ServerGroups() (*unversioned.APIGroupList, error) { func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { action := core.ActionImpl{} action.Verb = "get" - action.Resource = "version" + action.Resource = unversioned.GroupVersionResource{Resource: "version"} c.Invokes(action, nil) versionInfo := version.Get() @@ -66,9 +72,9 @@ func (c *FakeDiscovery) SwaggerSchema(version unversioned.GroupVersion) (*swagge action := core.ActionImpl{} action.Verb = "get" if version == v1.SchemeGroupVersion { - action.Resource = "/swaggerapi/api/" + version.Version + action.Resource = unversioned.GroupVersionResource{Resource: "/swaggerapi/api/" + version.Version} } else { - action.Resource = "/swaggerapi/apis/" + version.Group + "/" + version.Version + action.Resource = unversioned.GroupVersionResource{Resource: "/swaggerapi/apis/" + version.Group + "/" + version.Version} } c.Invokes(action, nil) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/client.go new file mode 100644 index 000000000000..96700ac944ac --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/client.go @@ -0,0 +1,234 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package dynamic provides a client interface to arbitrary Kubernetes +// APIs that exposes common high level operations and exposes common +// metadata. +package dynamic + +import ( + "encoding/json" + "errors" + "io" + "net/url" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/conversion/queryparams" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer" + "k8s.io/kubernetes/pkg/watch" +) + +// Client is a Kubernetes client that allows you to access metadata +// and manipulate metadata of a Kubernetes API group. +type Client struct { + cl *restclient.RESTClient +} + +// NewClient returns a new client based on the passed in config. The +// codec is ignored, as the dynamic client uses it's own codec. +func NewClient(conf *restclient.Config) (*Client, error) { + // avoid changing the original config + confCopy := *conf + conf = &confCopy + + codec := dynamicCodec{} + + // TODO: it's questionable that this should be using anything other than unstructured schema and JSON + conf.ContentType = runtime.ContentTypeJSON + streamingInfo, _ := api.Codecs.StreamingSerializerForMediaType("application/json;stream=watch", nil) + conf.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}, streamingInfo) + + if conf.APIPath == "" { + conf.APIPath = "/api" + } + + if len(conf.UserAgent) == 0 { + conf.UserAgent = restclient.DefaultKubernetesUserAgent() + } + + if conf.QPS == 0.0 { + conf.QPS = 5.0 + } + if conf.Burst == 0 { + conf.Burst = 10 + } + + cl, err := restclient.RESTClientFor(conf) + if err != nil { + return nil, err + } + + return &Client{cl: cl}, nil +} + +// Resource returns an API interface to the specified resource for +// this client's group and version. If resource is not a namespaced +// resource, then namespace is ignored. +func (c *Client) Resource(resource *unversioned.APIResource, namespace string) *ResourceClient { + return &ResourceClient{ + cl: c.cl, + resource: resource, + ns: namespace, + } +} + +// ResourceClient is an API interface to a specific resource under a +// dynamic client. +type ResourceClient struct { + cl *restclient.RESTClient + resource *unversioned.APIResource + ns string +} + +// List returns a list of objects for this resource. +func (rc *ResourceClient) List(opts runtime.Object) (*runtime.UnstructuredList, error) { + result := new(runtime.UnstructuredList) + err := rc.cl.Get(). + NamespaceIfScoped(rc.ns, rc.resource.Namespaced). + Resource(rc.resource.Name). + VersionedParams(opts, parameterEncoder). + Do(). + Into(result) + return result, err +} + +// Get gets the resource with the specified name. +func (rc *ResourceClient) Get(name string) (*runtime.Unstructured, error) { + result := new(runtime.Unstructured) + err := rc.cl.Get(). + NamespaceIfScoped(rc.ns, rc.resource.Namespaced). + Resource(rc.resource.Name). + Name(name). + Do(). + Into(result) + return result, err +} + +// Delete deletes the resource with the specified name. +func (rc *ResourceClient) Delete(name string, opts *v1.DeleteOptions) error { + return rc.cl.Delete(). + NamespaceIfScoped(rc.ns, rc.resource.Namespaced). + Resource(rc.resource.Name). + Name(name). + Body(opts). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (rc *ResourceClient) DeleteCollection(deleteOptions *v1.DeleteOptions, listOptions runtime.Object) error { + return rc.cl.Delete(). + NamespaceIfScoped(rc.ns, rc.resource.Namespaced). + Resource(rc.resource.Name). + VersionedParams(listOptions, parameterEncoder). + Body(deleteOptions). + Do(). + Error() +} + +// Create creates the provided resource. +func (rc *ResourceClient) Create(obj *runtime.Unstructured) (*runtime.Unstructured, error) { + result := new(runtime.Unstructured) + err := rc.cl.Post(). + NamespaceIfScoped(rc.ns, rc.resource.Namespaced). + Resource(rc.resource.Name). + Body(obj). + Do(). + Into(result) + return result, err +} + +// Update updates the provided resource. +func (rc *ResourceClient) Update(obj *runtime.Unstructured) (*runtime.Unstructured, error) { + result := new(runtime.Unstructured) + if len(obj.GetName()) == 0 { + return result, errors.New("object missing name") + } + err := rc.cl.Put(). + NamespaceIfScoped(rc.ns, rc.resource.Namespaced). + Resource(rc.resource.Name). + Name(obj.GetName()). + Body(obj). + Do(). + Into(result) + return result, err +} + +// Watch returns a watch.Interface that watches the resource. +func (rc *ResourceClient) Watch(opts runtime.Object) (watch.Interface, error) { + return rc.cl.Get(). + Prefix("watch"). + NamespaceIfScoped(rc.ns, rc.resource.Namespaced). + Resource(rc.resource.Name). + VersionedParams(opts, parameterEncoder). + Watch() +} + +func (rc *ResourceClient) Patch(name string, pt api.PatchType, data []byte) (*runtime.Unstructured, error) { + result := new(runtime.Unstructured) + err := rc.cl.Patch(pt). + NamespaceIfScoped(rc.ns, rc.resource.Namespaced). + Resource(rc.resource.Name). + Name(name). + Body(data). + Do(). + Into(result) + return result, err +} + +// dynamicCodec is a codec that wraps the standard unstructured codec +// with special handling for Status objects. +type dynamicCodec struct{} + +func (dynamicCodec) Decode(data []byte, gvk *unversioned.GroupVersionKind, obj runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { + obj, gvk, err := runtime.UnstructuredJSONScheme.Decode(data, gvk, obj) + if err != nil { + return nil, nil, err + } + + if _, ok := obj.(*unversioned.Status); !ok && strings.ToLower(gvk.Kind) == "status" { + obj = &unversioned.Status{} + err := json.Unmarshal(data, obj) + if err != nil { + return nil, nil, err + } + } + + return obj, gvk, nil +} + +func (dynamicCodec) EncodeToStream(obj runtime.Object, w io.Writer, overrides ...unversioned.GroupVersion) error { + return runtime.UnstructuredJSONScheme.EncodeToStream(obj, w, overrides...) +} + +// paramaterCodec is a codec converts an API object to query +// parameters without trying to convert to the target version. +type parameterCodec struct{} + +func (parameterCodec) EncodeParameters(obj runtime.Object, to unversioned.GroupVersion) (url.Values, error) { + return queryparams.Convert(obj) +} + +func (parameterCodec) DecodeParameters(parameters url.Values, from unversioned.GroupVersion, into runtime.Object) error { + return errors.New("DecodeParameters not implemented on dynamic parameterCodec") +} + +var parameterEncoder runtime.ParameterCodec = parameterCodec{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/client_pool.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/client_pool.go new file mode 100644 index 000000000000..f7c6505fd773 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/client_pool.go @@ -0,0 +1,86 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamic + +import ( + "sync" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" +) + +// ClientPool manages a pool of dynamic clients. +type ClientPool interface { + // ClientForGroupVersion returns a client configured for the specified groupVersion. + ClientForGroupVersion(groupVersion unversioned.GroupVersion) (*Client, error) +} + +// APIPathResolverFunc knows how to convert a groupVersion to its API path. +type APIPathResolverFunc func(groupVersion unversioned.GroupVersion) string + +// LegacyAPIPathResolverFunc can resolve paths properly with the legacy API. +func LegacyAPIPathResolverFunc(groupVersion unversioned.GroupVersion) string { + if len(groupVersion.Group) == 0 { + return "/api" + } + return "/apis" +} + +// clientPoolImpl implements Factory +type clientPoolImpl struct { + lock sync.RWMutex + config *restclient.Config + clients map[unversioned.GroupVersion]*Client + apiPathResolverFunc APIPathResolverFunc +} + +// NewClientPool returns a ClientPool from the specified config +func NewClientPool(config *restclient.Config, apiPathResolverFunc APIPathResolverFunc) ClientPool { + confCopy := *config + return &clientPoolImpl{ + config: &confCopy, + clients: map[unversioned.GroupVersion]*Client{}, + apiPathResolverFunc: apiPathResolverFunc, + } +} + +// ClientForGroupVersion returns a client for the specified groupVersion, creates one if none exists +func (c *clientPoolImpl) ClientForGroupVersion(groupVersion unversioned.GroupVersion) (*Client, error) { + c.lock.Lock() + defer c.lock.Unlock() + + // do we have a client already configured? + if existingClient, found := c.clients[groupVersion]; found { + return existingClient, nil + } + + // avoid changing the original config + confCopy := *c.config + conf := &confCopy + + // we need to set the api path based on group version, if no group, default to legacy path + conf.APIPath = c.apiPathResolverFunc(groupVersion) + + // we need to make a client + conf.GroupVersion = &groupVersion + dynamicClient, err := NewClient(conf) + if err != nil { + return nil, err + } + c.clients[groupVersion] = dynamicClient + return dynamicClient, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/client_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/client_test.go new file mode 100644 index 000000000000..2e456f55013b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/client_test.go @@ -0,0 +1,550 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamic + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/streaming" + "k8s.io/kubernetes/pkg/watch" + "k8s.io/kubernetes/pkg/watch/versioned" +) + +func getJSON(version, kind, name string) []byte { + return []byte(fmt.Sprintf(`{"apiVersion": %q, "kind": %q, "metadata": {"name": %q}}`, version, kind, name)) +} + +func getListJSON(version, kind string, items ...[]byte) []byte { + json := fmt.Sprintf(`{"apiVersion": %q, "kind": %q, "items": [%s]}`, + version, kind, bytes.Join(items, []byte(","))) + return []byte(json) +} + +func getObject(version, kind, name string) *runtime.Unstructured { + return &runtime.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": version, + "kind": kind, + "metadata": map[string]interface{}{ + "name": name, + }, + }, + } +} + +func getClientServer(gv *unversioned.GroupVersion, h func(http.ResponseWriter, *http.Request)) (*Client, *httptest.Server, error) { + srv := httptest.NewServer(http.HandlerFunc(h)) + cl, err := NewClient(&restclient.Config{ + Host: srv.URL, + ContentConfig: restclient.ContentConfig{GroupVersion: gv}, + }) + if err != nil { + srv.Close() + return nil, nil, err + } + return cl, srv, nil +} + +func TestList(t *testing.T) { + tcs := []struct { + name string + namespace string + path string + resp []byte + want *runtime.UnstructuredList + }{ + { + name: "normal_list", + path: "/api/gtest/vtest/rtest", + resp: getListJSON("vTest", "rTestList", + getJSON("vTest", "rTest", "item1"), + getJSON("vTest", "rTest", "item2")), + want: &runtime.UnstructuredList{ + Object: map[string]interface{}{ + "apiVersion": "vTest", + "kind": "rTestList", + }, + Items: []*runtime.Unstructured{ + getObject("vTest", "rTest", "item1"), + getObject("vTest", "rTest", "item2"), + }, + }, + }, + { + name: "namespaced_list", + namespace: "nstest", + path: "/api/gtest/vtest/namespaces/nstest/rtest", + resp: getListJSON("vTest", "rTestList", + getJSON("vTest", "rTest", "item1"), + getJSON("vTest", "rTest", "item2")), + want: &runtime.UnstructuredList{ + Object: map[string]interface{}{ + "apiVersion": "vTest", + "kind": "rTestList", + }, + Items: []*runtime.Unstructured{ + getObject("vTest", "rTest", "item1"), + getObject("vTest", "rTest", "item2"), + }, + }, + }, + } + for _, tc := range tcs { + gv := &unversioned.GroupVersion{Group: "gtest", Version: "vtest"} + resource := &unversioned.APIResource{Name: "rtest", Namespaced: len(tc.namespace) != 0} + cl, srv, err := getClientServer(gv, func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + t.Errorf("List(%q) got HTTP method %s. wanted GET", tc.name, r.Method) + } + + if r.URL.Path != tc.path { + t.Errorf("List(%q) got path %s. wanted %s", tc.name, r.URL.Path, tc.path) + } + + w.Header().Set("Content-Type", runtime.ContentTypeJSON) + w.Write(tc.resp) + }) + if err != nil { + t.Errorf("unexpected error when creating client: %v", err) + continue + } + defer srv.Close() + + got, err := cl.Resource(resource, tc.namespace).List(&v1.ListOptions{}) + if err != nil { + t.Errorf("unexpected error when listing %q: %v", tc.name, err) + continue + } + + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("List(%q) want: %v\ngot: %v", tc.name, tc.want, got) + } + } +} + +func TestGet(t *testing.T) { + tcs := []struct { + namespace string + name string + path string + resp []byte + want *runtime.Unstructured + }{ + { + name: "normal_get", + path: "/api/gtest/vtest/rtest/normal_get", + resp: getJSON("vTest", "rTest", "normal_get"), + want: getObject("vTest", "rTest", "normal_get"), + }, + { + namespace: "nstest", + name: "namespaced_get", + path: "/api/gtest/vtest/namespaces/nstest/rtest/namespaced_get", + resp: getJSON("vTest", "rTest", "namespaced_get"), + want: getObject("vTest", "rTest", "namespaced_get"), + }, + } + for _, tc := range tcs { + gv := &unversioned.GroupVersion{Group: "gtest", Version: "vtest"} + resource := &unversioned.APIResource{Name: "rtest", Namespaced: len(tc.namespace) != 0} + cl, srv, err := getClientServer(gv, func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + t.Errorf("Get(%q) got HTTP method %s. wanted GET", tc.name, r.Method) + } + + if r.URL.Path != tc.path { + t.Errorf("Get(%q) got path %s. wanted %s", tc.name, r.URL.Path, tc.path) + } + + w.Header().Set("Content-Type", runtime.ContentTypeJSON) + w.Write(tc.resp) + }) + if err != nil { + t.Errorf("unexpected error when creating client: %v", err) + continue + } + defer srv.Close() + + got, err := cl.Resource(resource, tc.namespace).Get(tc.name) + if err != nil { + t.Errorf("unexpected error when getting %q: %v", tc.name, err) + continue + } + + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("Get(%q) want: %v\ngot: %v", tc.name, tc.want, got) + } + } +} + +func TestDelete(t *testing.T) { + statusOK := &unversioned.Status{ + TypeMeta: unversioned.TypeMeta{Kind: "Status"}, + Status: unversioned.StatusSuccess, + } + tcs := []struct { + namespace string + name string + path string + }{ + { + name: "normal_delete", + path: "/api/gtest/vtest/rtest/normal_delete", + }, + { + namespace: "nstest", + name: "namespaced_delete", + path: "/api/gtest/vtest/namespaces/nstest/rtest/namespaced_delete", + }, + } + for _, tc := range tcs { + gv := &unversioned.GroupVersion{Group: "gtest", Version: "vtest"} + resource := &unversioned.APIResource{Name: "rtest", Namespaced: len(tc.namespace) != 0} + cl, srv, err := getClientServer(gv, func(w http.ResponseWriter, r *http.Request) { + if r.Method != "DELETE" { + t.Errorf("Delete(%q) got HTTP method %s. wanted DELETE", tc.name, r.Method) + } + + if r.URL.Path != tc.path { + t.Errorf("Delete(%q) got path %s. wanted %s", tc.name, r.URL.Path, tc.path) + } + + w.Header().Set("Content-Type", runtime.ContentTypeJSON) + runtime.UnstructuredJSONScheme.EncodeToStream(statusOK, w) + }) + if err != nil { + t.Errorf("unexpected error when creating client: %v", err) + continue + } + defer srv.Close() + + err = cl.Resource(resource, tc.namespace).Delete(tc.name, nil) + if err != nil { + t.Errorf("unexpected error when deleting %q: %v", tc.name, err) + continue + } + } +} + +func TestDeleteCollection(t *testing.T) { + statusOK := &unversioned.Status{ + TypeMeta: unversioned.TypeMeta{Kind: "Status"}, + Status: unversioned.StatusSuccess, + } + tcs := []struct { + namespace string + name string + path string + }{ + { + name: "normal_delete_collection", + path: "/api/gtest/vtest/rtest", + }, + { + namespace: "nstest", + name: "namespaced_delete_collection", + path: "/api/gtest/vtest/namespaces/nstest/rtest", + }, + } + for _, tc := range tcs { + gv := &unversioned.GroupVersion{Group: "gtest", Version: "vtest"} + resource := &unversioned.APIResource{Name: "rtest", Namespaced: len(tc.namespace) != 0} + cl, srv, err := getClientServer(gv, func(w http.ResponseWriter, r *http.Request) { + if r.Method != "DELETE" { + t.Errorf("DeleteCollection(%q) got HTTP method %s. wanted DELETE", tc.name, r.Method) + } + + if r.URL.Path != tc.path { + t.Errorf("DeleteCollection(%q) got path %s. wanted %s", tc.name, r.URL.Path, tc.path) + } + + w.Header().Set("Content-Type", runtime.ContentTypeJSON) + runtime.UnstructuredJSONScheme.EncodeToStream(statusOK, w) + }) + if err != nil { + t.Errorf("unexpected error when creating client: %v", err) + continue + } + defer srv.Close() + + err = cl.Resource(resource, tc.namespace).DeleteCollection(nil, &v1.ListOptions{}) + if err != nil { + t.Errorf("unexpected error when deleting collection %q: %v", tc.name, err) + continue + } + } +} + +func TestCreate(t *testing.T) { + tcs := []struct { + name string + namespace string + obj *runtime.Unstructured + path string + }{ + { + name: "normal_create", + path: "/api/gtest/vtest/rtest", + obj: getObject("vTest", "rTest", "normal_create"), + }, + { + name: "namespaced_create", + namespace: "nstest", + path: "/api/gtest/vtest/namespaces/nstest/rtest", + obj: getObject("vTest", "rTest", "namespaced_create"), + }, + } + for _, tc := range tcs { + gv := &unversioned.GroupVersion{Group: "gtest", Version: "vtest"} + resource := &unversioned.APIResource{Name: "rtest", Namespaced: len(tc.namespace) != 0} + cl, srv, err := getClientServer(gv, func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Errorf("Create(%q) got HTTP method %s. wanted POST", tc.name, r.Method) + } + + if r.URL.Path != tc.path { + t.Errorf("Create(%q) got path %s. wanted %s", tc.name, r.URL.Path, tc.path) + } + + w.Header().Set("Content-Type", runtime.ContentTypeJSON) + data, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Create(%q) unexpected error reading body: %v", tc.name, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Write(data) + }) + if err != nil { + t.Errorf("unexpected error when creating client: %v", err) + continue + } + defer srv.Close() + + got, err := cl.Resource(resource, tc.namespace).Create(tc.obj) + if err != nil { + t.Errorf("unexpected error when creating %q: %v", tc.name, err) + continue + } + + if !reflect.DeepEqual(got, tc.obj) { + t.Errorf("Create(%q) want: %v\ngot: %v", tc.name, tc.obj, got) + } + } +} + +func TestUpdate(t *testing.T) { + tcs := []struct { + name string + namespace string + obj *runtime.Unstructured + path string + }{ + { + name: "normal_update", + path: "/api/gtest/vtest/rtest/normal_update", + obj: getObject("vTest", "rTest", "normal_update"), + }, + { + name: "namespaced_update", + namespace: "nstest", + path: "/api/gtest/vtest/namespaces/nstest/rtest/namespaced_update", + obj: getObject("vTest", "rTest", "namespaced_update"), + }, + } + for _, tc := range tcs { + gv := &unversioned.GroupVersion{Group: "gtest", Version: "vtest"} + resource := &unversioned.APIResource{Name: "rtest", Namespaced: len(tc.namespace) != 0} + cl, srv, err := getClientServer(gv, func(w http.ResponseWriter, r *http.Request) { + if r.Method != "PUT" { + t.Errorf("Update(%q) got HTTP method %s. wanted PUT", tc.name, r.Method) + } + + if r.URL.Path != tc.path { + t.Errorf("Update(%q) got path %s. wanted %s", tc.name, r.URL.Path, tc.path) + } + + w.Header().Set("Content-Type", runtime.ContentTypeJSON) + data, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Update(%q) unexpected error reading body: %v", tc.name, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Write(data) + }) + if err != nil { + t.Errorf("unexpected error when creating client: %v", err) + continue + } + defer srv.Close() + + got, err := cl.Resource(resource, tc.namespace).Update(tc.obj) + if err != nil { + t.Errorf("unexpected error when updating %q: %v", tc.name, err) + continue + } + + if !reflect.DeepEqual(got, tc.obj) { + t.Errorf("Update(%q) want: %v\ngot: %v", tc.name, tc.obj, got) + } + } +} + +func TestWatch(t *testing.T) { + tcs := []struct { + name string + namespace string + events []watch.Event + path string + }{ + { + name: "normal_watch", + path: "/api/gtest/vtest/watch/rtest", + events: []watch.Event{ + {Type: watch.Added, Object: getObject("vTest", "rTest", "normal_watch")}, + {Type: watch.Modified, Object: getObject("vTest", "rTest", "normal_watch")}, + {Type: watch.Deleted, Object: getObject("vTest", "rTest", "normal_watch")}, + }, + }, + { + name: "namespaced_watch", + namespace: "nstest", + path: "/api/gtest/vtest/watch/namespaces/nstest/rtest", + events: []watch.Event{ + {Type: watch.Added, Object: getObject("vTest", "rTest", "namespaced_watch")}, + {Type: watch.Modified, Object: getObject("vTest", "rTest", "namespaced_watch")}, + {Type: watch.Deleted, Object: getObject("vTest", "rTest", "namespaced_watch")}, + }, + }, + } + for _, tc := range tcs { + gv := &unversioned.GroupVersion{Group: "gtest", Version: "vtest"} + resource := &unversioned.APIResource{Name: "rtest", Namespaced: len(tc.namespace) != 0} + cl, srv, err := getClientServer(gv, func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + t.Errorf("Watch(%q) got HTTP method %s. wanted GET", tc.name, r.Method) + } + + if r.URL.Path != tc.path { + t.Errorf("Watch(%q) got path %s. wanted %s", tc.name, r.URL.Path, tc.path) + } + + enc := versioned.NewEncoder(streaming.NewEncoder(w, dynamicCodec{}), dynamicCodec{}) + for _, e := range tc.events { + enc.Encode(&e) + } + }) + if err != nil { + t.Errorf("unexpected error when creating client: %v", err) + continue + } + defer srv.Close() + + watcher, err := cl.Resource(resource, tc.namespace).Watch(&v1.ListOptions{}) + if err != nil { + t.Errorf("unexpected error when watching %q: %v", tc.name, err) + continue + } + + for _, want := range tc.events { + got := <-watcher.ResultChan() + if !reflect.DeepEqual(got, want) { + t.Errorf("Watch(%q) want: %v\ngot: %v", tc.name, want, got) + } + } + } +} + +func TestPatch(t *testing.T) { + tcs := []struct { + name string + namespace string + patch []byte + want *runtime.Unstructured + path string + }{ + { + name: "normal_patch", + path: "/api/gtest/vtest/rtest/normal_patch", + patch: getJSON("vTest", "rTest", "normal_patch"), + want: getObject("vTest", "rTest", "normal_patch"), + }, + { + name: "namespaced_patch", + namespace: "nstest", + path: "/api/gtest/vtest/namespaces/nstest/rtest/namespaced_patch", + patch: getJSON("vTest", "rTest", "namespaced_patch"), + want: getObject("vTest", "rTest", "namespaced_patch"), + }, + } + for _, tc := range tcs { + gv := &unversioned.GroupVersion{Group: "gtest", Version: "vtest"} + resource := &unversioned.APIResource{Name: "rtest", Namespaced: len(tc.namespace) != 0} + cl, srv, err := getClientServer(gv, func(w http.ResponseWriter, r *http.Request) { + if r.Method != "PATCH" { + t.Errorf("Patch(%q) got HTTP method %s. wanted PATCH", tc.name, r.Method) + } + + if r.URL.Path != tc.path { + t.Errorf("Patch(%q) got path %s. wanted %s", tc.name, r.URL.Path, tc.path) + } + + content := r.Header.Get("Content-Type") + if content != string(api.StrategicMergePatchType) { + t.Errorf("Patch(%q) got Content-Type %s. wanted %s", tc.name, content, api.StrategicMergePatchType) + } + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Patch(%q) unexpected error reading body: %v", tc.name, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Write(data) + }) + if err != nil { + t.Errorf("unexpected error when creating client: %v", err) + continue + } + defer srv.Close() + + got, err := cl.Resource(resource, tc.namespace).Patch(tc.name, api.StrategicMergePatchType, tc.patch) + if err != nil { + t.Errorf("unexpected error when patching %q: %v", tc.name, err) + continue + } + + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("Patch(%q) want: %v\ngot: %v", tc.name, tc.want, got) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/dynamic_util.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/dynamic_util.go new file mode 100644 index 000000000000..094f838115b1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/dynamic_util.go @@ -0,0 +1,94 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamic + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// VersionInterfaces provides an object converter and metadata +// accessor appropriate for use with unstructured objects. +func VersionInterfaces(unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + return &meta.VersionInterfaces{ + ObjectConvertor: &runtime.UnstructuredObjectConverter{}, + MetadataAccessor: meta.NewAccessor(), + }, nil +} + +// NewDiscoveryRESTMapper returns a RESTMapper based on discovery information. +func NewDiscoveryRESTMapper(resources []*unversioned.APIResourceList, versionFunc meta.VersionInterfacesFunc) (*meta.DefaultRESTMapper, error) { + rm := meta.NewDefaultRESTMapper(nil, versionFunc) + for _, resourceList := range resources { + gv, err := unversioned.ParseGroupVersion(resourceList.GroupVersion) + if err != nil { + return nil, err + } + + for _, resource := range resourceList.APIResources { + gvk := gv.WithKind(resource.Kind) + scope := meta.RESTScopeRoot + if resource.Namespaced { + scope = meta.RESTScopeNamespace + } + rm.Add(gvk, scope) + } + } + return rm, nil +} + +// ObjectTyper provides an ObjectTyper implmentation for +// runtime.Unstructured object based on discovery information. +type ObjectTyper struct { + registered map[unversioned.GroupVersionKind]bool +} + +// NewObjectTyper constructs an ObjectTyper from discovery information. +func NewObjectTyper(resources []*unversioned.APIResourceList) (runtime.ObjectTyper, error) { + ot := &ObjectTyper{registered: make(map[unversioned.GroupVersionKind]bool)} + for _, resourceList := range resources { + gv, err := unversioned.ParseGroupVersion(resourceList.GroupVersion) + if err != nil { + return nil, err + } + + for _, resource := range resourceList.APIResources { + ot.registered[gv.WithKind(resource.Kind)] = true + } + } + return ot, nil +} + +// ObjectKinds returns a slice of one element with the +// group,version,kind of the provided object, or an error if the +// object is not *runtime.Unstructured or has no group,version,kind +// information. +func (ot *ObjectTyper) ObjectKinds(obj runtime.Object) ([]unversioned.GroupVersionKind, bool, error) { + if _, ok := obj.(*runtime.Unstructured); !ok { + return nil, false, fmt.Errorf("type %T is invalid for dynamic object typer", obj) + } + return []unversioned.GroupVersionKind{obj.GetObjectKind().GroupVersionKind()}, false, nil +} + +// Recognizes returns true if the provided group,version,kind was in +// the discovery information. +func (ot *ObjectTyper) Recognizes(gvk unversioned.GroupVersionKind) bool { + return ot.registered[gvk] +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/dynamic_util_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/dynamic_util_test.go new file mode 100644 index 000000000000..c6c315a8a7e8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/typed/dynamic/dynamic_util_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamic + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" +) + +func TestDiscoveryRESTMapper(t *testing.T) { + resources := []*unversioned.APIResourceList{ + { + GroupVersion: "test/beta1", + APIResources: []unversioned.APIResource{ + { + Name: "test_kinds", + Namespaced: true, + Kind: "test_kind", + }, + }, + }, + } + + gvk := unversioned.GroupVersionKind{ + Group: "test", + Version: "beta1", + Kind: "test_kind", + } + + mapper, err := NewDiscoveryRESTMapper(resources, VersionInterfaces) + if err != nil { + t.Fatalf("unexpected error creating mapper: %s", err) + } + + for _, res := range []unversioned.GroupVersionResource{ + { + Group: "test", + Version: "beta1", + Resource: "test_kinds", + }, + { + Version: "beta1", + Resource: "test_kinds", + }, + { + Group: "test", + Resource: "test_kinds", + }, + { + Resource: "test_kinds", + }, + } { + got, err := mapper.KindFor(res) + if err != nil { + t.Errorf("KindFor(%#v) unexpected error: %s", res, err) + continue + } + + if got != gvk { + t.Errorf("KindFor(%#v) = %#v; want %#v", res, got, gvk) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset_adaption.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset/clientset_adaption.go similarity index 64% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset_adaption.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset/clientset_adaption.go index 2e1214ac1c5d..680cadc922c3 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset_adaption.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset/clientset_adaption.go @@ -17,17 +17,19 @@ limitations under the License. package internalclientset import ( + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversionedbatch "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" "k8s.io/kubernetes/pkg/client/typed/discovery" - unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" - unversionedextensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned" "k8s.io/kubernetes/pkg/client/unversioned" ) -// FromUnversionedClient adapts a pkg/client/unversioned#Client to a Clientset. +// FromUnversionedClient adapts a unversioned.Client to a internalclientset.Clientset. // This function is temporary. We will remove it when everyone has moved to using // Clientset. New code should NOT use this function. -func FromUnversionedClient(c *unversioned.Client) *Clientset { - var clientset Clientset +func FromUnversionedClient(c *unversioned.Client) *internalclientset.Clientset { + var clientset internalclientset.Clientset if c != nil { clientset.CoreClient = unversionedcore.New(c.RESTClient) } else { @@ -38,7 +40,11 @@ func FromUnversionedClient(c *unversioned.Client) *Clientset { } else { clientset.ExtensionsClient = unversionedextensions.New(nil) } - + if c != nil && c.BatchClient != nil { + clientset.BatchClient = unversionedbatch.New(c.BatchClient.RESTClient) + } else { + clientset.BatchClient = unversionedbatch.New(nil) + } if c != nil && c.DiscoveryClient != nil { clientset.DiscoveryClient = discovery.NewDiscoveryClient(c.DiscoveryClient.RESTClient) } else { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/adapters/release_1_2/clientset_adaption.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/adapters/release_1_2/clientset_adaption.go new file mode 100644 index 000000000000..9e33ccd1978d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/adapters/release_1_2/clientset_adaption.go @@ -0,0 +1,50 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release_1_2 + +import ( + "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2" + v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/core/v1" + v1beta1extensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2/typed/extensions/v1beta1" + "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/client/unversioned" +) + +// FromUnversionedClient adapts a unversioned.Client to a release_1_2.Clientset. +// This function is temporary. We will remove it when everyone has moved to using +// Clientset. New code should NOT use this function. +func FromUnversionedClient(c *unversioned.Client) *release_1_2.Clientset { + var clientset release_1_2.Clientset + if c != nil { + clientset.CoreClient = v1core.New(c.RESTClient) + } else { + clientset.CoreClient = v1core.New(nil) + } + if c != nil && c.ExtensionsClient != nil { + clientset.ExtensionsClient = v1beta1extensions.New(c.ExtensionsClient.RESTClient) + } else { + clientset.ExtensionsClient = v1beta1extensions.New(nil) + } + + if c != nil && c.DiscoveryClient != nil { + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c.DiscoveryClient.RESTClient) + } else { + clientset.DiscoveryClient = discovery.NewDiscoveryClient(nil) + } + + return &clientset +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/adapters/release_1_3/clientset_adaption.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/adapters/release_1_3/clientset_adaption.go new file mode 100644 index 000000000000..3f4b86fb4b81 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/adapters/release_1_3/clientset_adaption.go @@ -0,0 +1,50 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package release_1_3 + +import ( + "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3" + v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/core/v1" + v1beta1extensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3/typed/extensions/v1beta1" + "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/client/unversioned" +) + +// FromUnversionedClient adapts a unversioned.Client to a release_1_3.Clientset. +// This function is temporary. We will remove it when everyone has moved to using +// Clientset. New code should NOT use this function. +func FromUnversionedClient(c *unversioned.Client) *release_1_3.Clientset { + var clientset release_1_3.Clientset + if c != nil { + clientset.CoreClient = v1core.New(c.RESTClient) + } else { + clientset.CoreClient = v1core.New(nil) + } + if c != nil && c.ExtensionsClient != nil { + clientset.ExtensionsClient = v1beta1extensions.New(c.ExtensionsClient.RESTClient) + } else { + clientset.ExtensionsClient = v1beta1extensions.New(nil) + } + + if c != nil && c.DiscoveryClient != nil { + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c.DiscoveryClient.RESTClient) + } else { + clientset.DiscoveryClient = discovery.NewDiscoveryClient(nil) + } + + return &clientset +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/apps.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/apps.go new file mode 100644 index 000000000000..1905c29c2467 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/apps.go @@ -0,0 +1,83 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/client/restclient" +) + +type AppsInterface interface { + PetSetNamespacer +} + +// AppsClient is used to interact with Kubernetes batch features. +type AppsClient struct { + *restclient.RESTClient +} + +func (c *AppsClient) PetSets(namespace string) PetSetInterface { + return newPetSet(c, namespace) +} + +func NewApps(c *restclient.Config) (*AppsClient, error) { + config := *c + if err := setAppsDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AppsClient{client}, nil +} + +func NewAppsOrDie(c *restclient.Config) *AppsClient { + client, err := NewApps(c) + if err != nil { + panic(err) + } + return client +} + +func setAppsDefaults(config *restclient.Config) error { + g, err := registered.Group(apps.GroupName) + if err != nil { + return err + } + config.APIPath = defaultAPIPath + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth_test.go new file mode 100644 index 000000000000..a99c5d94a030 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth_test + +import ( + "io/ioutil" + "os" + "reflect" + "testing" + + clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth" +) + +func TestLoadFromFile(t *testing.T) { + loadAuthInfoTests := []struct { + authData string + authInfo *clientauth.Info + expectErr bool + }{ + { + `{"user": "user", "password": "pass"}`, + &clientauth.Info{User: "user", Password: "pass"}, + false, + }, + { + "", nil, true, + }, + } + for _, loadAuthInfoTest := range loadAuthInfoTests { + tt := loadAuthInfoTest + aifile, err := ioutil.TempFile("", "testAuthInfo") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if tt.authData != "missing" { + defer os.Remove(aifile.Name()) + defer aifile.Close() + _, err = aifile.WriteString(tt.authData) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + } else { + aifile.Close() + os.Remove(aifile.Name()) + } + authInfo, err := clientauth.LoadFromFile(aifile.Name()) + gotErr := err != nil + if gotErr != tt.expectErr { + t.Errorf("expected errorness: %v, actual errorness: %v", tt.expectErr, gotErr) + } + if !reflect.DeepEqual(authInfo, tt.authInfo) { + t.Errorf("Expected %v, got %v", tt.authInfo, authInfo) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go index c3ec19810301..9e543c9d3aeb 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go @@ -33,7 +33,7 @@ type AutoscalingClient struct { } func (c *AutoscalingClient) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { - return newHorizontalPodAutoscalersV1(c, namespace) + return newHorizontalPodAutoscalers(c, namespace) } func NewAutoscaling(c *restclient.Config) (*AutoscalingClient, error) { @@ -73,6 +73,7 @@ func setAutoscalingDefaults(config *restclient.Config) error { //} config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs if config.QPS == 0 { config.QPS = 5 } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/batch.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/batch.go index a432e4c789c7..40fc49dc121f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/batch.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/batch.go @@ -18,13 +18,16 @@ package unversioned import ( "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" "k8s.io/kubernetes/pkg/client/restclient" ) type BatchInterface interface { JobsNamespacer + ScheduledJobsNamespacer } // BatchClient is used to interact with Kubernetes batch features. @@ -36,9 +39,25 @@ func (c *BatchClient) Jobs(namespace string) JobInterface { return newJobsV1(c, namespace) } +func (c *BatchClient) ScheduledJobs(namespace string) ScheduledJobInterface { + return newScheduledJobs(c, namespace) +} + func NewBatch(c *restclient.Config) (*BatchClient, error) { config := *c - if err := setBatchDefaults(&config); err != nil { + if err := setBatchDefaults(&config, nil); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchClient{client}, nil +} + +func NewBatchV2Alpha1(c *restclient.Config) (*BatchClient, error) { + config := *c + if err := setBatchDefaults(&config, &v2alpha1.SchemeGroupVersion); err != nil { return nil, err } client, err := restclient.RESTClientFor(&config) @@ -49,14 +68,22 @@ func NewBatch(c *restclient.Config) (*BatchClient, error) { } func NewBatchOrDie(c *restclient.Config) *BatchClient { - client, err := NewBatch(c) + var ( + client *BatchClient + err error + ) + if c.ContentConfig.GroupVersion != nil && *c.ContentConfig.GroupVersion == v2alpha1.SchemeGroupVersion { + client, err = NewBatchV2Alpha1(c) + } else { + client, err = NewBatch(c) + } if err != nil { panic(err) } return client } -func setBatchDefaults(config *restclient.Config) error { +func setBatchDefaults(config *restclient.Config, gv *unversioned.GroupVersion) error { // if batch group is not registered, return an error g, err := registered.Group(batch.GroupName) if err != nil { @@ -69,10 +96,14 @@ func setBatchDefaults(config *restclient.Config) error { // TODO: Unconditionally set the config.Version, until we fix the config. //if config.Version == "" { copyGroupVersion := g.GroupVersion + if gv != nil { + copyGroupVersion = *gv + } config.GroupVersion = ©GroupVersion //} config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs if config.QPS == 0 { config.QPS = 5 } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/client.go index b897bc230c7a..df68040042f4 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/client.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/client.go @@ -47,6 +47,7 @@ type Interface interface { Autoscaling() AutoscalingInterface Batch() BatchInterface Extensions() ExtensionsInterface + Rbac() RbacInterface Discovery() discovery.DiscoveryInterface } @@ -119,6 +120,9 @@ type Client struct { *AutoscalingClient *BatchClient *ExtensionsClient + *AppsClient + *PolicyClient + *RbacClient *discovery.DiscoveryClient } @@ -156,6 +160,14 @@ func (c *Client) Extensions() ExtensionsInterface { return c.ExtensionsClient } +func (c *Client) Apps() AppsInterface { + return c.AppsClient +} + +func (c *Client) Rbac() RbacInterface { + return c.RbacClient +} + func (c *Client) Discovery() discovery.DiscoveryInterface { return c.DiscoveryClient } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers_test.go new file mode 100644 index 000000000000..6952524c6b15 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers_test.go @@ -0,0 +1,301 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "fmt" + "io/ioutil" + "os" + "reflect" + "testing" + + "github.com/ghodss/yaml" +) + +func newMergedConfig(certFile, certContent, keyFile, keyContent, caFile, caContent string, t *testing.T) Config { + if err := ioutil.WriteFile(certFile, []byte(certContent), 0644); err != nil { + t.Errorf("unexpected error: %v", err) + } + if err := ioutil.WriteFile(keyFile, []byte(keyContent), 0600); err != nil { + t.Errorf("unexpected error: %v", err) + } + if err := ioutil.WriteFile(caFile, []byte(caContent), 0644); err != nil { + t.Errorf("unexpected error: %v", err) + } + + return Config{ + AuthInfos: map[string]*AuthInfo{ + "red-user": {Token: "red-token", ClientCertificateData: []byte(certContent), ClientKeyData: []byte(keyContent)}, + "blue-user": {Token: "blue-token", ClientCertificate: certFile, ClientKey: keyFile}}, + Clusters: map[string]*Cluster{ + "cow-cluster": {Server: "http://cow.org:8080", CertificateAuthorityData: []byte(caContent)}, + "chicken-cluster": {Server: "http://chicken.org:8080", CertificateAuthority: caFile}}, + Contexts: map[string]*Context{ + "federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster"}, + "shaker-context": {AuthInfo: "blue-user", Cluster: "chicken-cluster"}}, + CurrentContext: "federal-context", + } +} + +func TestMinifySuccess(t *testing.T) { + certFile, _ := ioutil.TempFile("", "") + defer os.Remove(certFile.Name()) + keyFile, _ := ioutil.TempFile("", "") + defer os.Remove(keyFile.Name()) + caFile, _ := ioutil.TempFile("", "") + defer os.Remove(caFile.Name()) + + mutatingConfig := newMergedConfig(certFile.Name(), "cert", keyFile.Name(), "key", caFile.Name(), "ca", t) + + if err := MinifyConfig(&mutatingConfig); err != nil { + t.Errorf("unexpected error: %v", err) + } + + if len(mutatingConfig.Contexts) > 1 { + t.Errorf("unexpected contexts: %v", mutatingConfig.Contexts) + } + if _, exists := mutatingConfig.Contexts["federal-context"]; !exists { + t.Errorf("missing context") + } + + if len(mutatingConfig.Clusters) > 1 { + t.Errorf("unexpected clusters: %v", mutatingConfig.Clusters) + } + if _, exists := mutatingConfig.Clusters["cow-cluster"]; !exists { + t.Errorf("missing cluster") + } + + if len(mutatingConfig.AuthInfos) > 1 { + t.Errorf("unexpected users: %v", mutatingConfig.AuthInfos) + } + if _, exists := mutatingConfig.AuthInfos["red-user"]; !exists { + t.Errorf("missing user") + } +} + +func TestMinifyMissingContext(t *testing.T) { + certFile, _ := ioutil.TempFile("", "") + defer os.Remove(certFile.Name()) + keyFile, _ := ioutil.TempFile("", "") + defer os.Remove(keyFile.Name()) + caFile, _ := ioutil.TempFile("", "") + defer os.Remove(caFile.Name()) + + mutatingConfig := newMergedConfig(certFile.Name(), "cert", keyFile.Name(), "key", caFile.Name(), "ca", t) + mutatingConfig.CurrentContext = "missing" + + errMsg := "cannot locate context missing" + + if err := MinifyConfig(&mutatingConfig); err == nil || err.Error() != errMsg { + t.Errorf("expected %v, got %v", errMsg, err) + } +} + +func TestMinifyMissingCluster(t *testing.T) { + certFile, _ := ioutil.TempFile("", "") + defer os.Remove(certFile.Name()) + keyFile, _ := ioutil.TempFile("", "") + defer os.Remove(keyFile.Name()) + caFile, _ := ioutil.TempFile("", "") + defer os.Remove(caFile.Name()) + + mutatingConfig := newMergedConfig(certFile.Name(), "cert", keyFile.Name(), "key", caFile.Name(), "ca", t) + delete(mutatingConfig.Clusters, mutatingConfig.Contexts[mutatingConfig.CurrentContext].Cluster) + + errMsg := "cannot locate cluster cow-cluster" + + if err := MinifyConfig(&mutatingConfig); err == nil || err.Error() != errMsg { + t.Errorf("expected %v, got %v", errMsg, err) + } +} + +func TestMinifyMissingAuthInfo(t *testing.T) { + certFile, _ := ioutil.TempFile("", "") + defer os.Remove(certFile.Name()) + keyFile, _ := ioutil.TempFile("", "") + defer os.Remove(keyFile.Name()) + caFile, _ := ioutil.TempFile("", "") + defer os.Remove(caFile.Name()) + + mutatingConfig := newMergedConfig(certFile.Name(), "cert", keyFile.Name(), "key", caFile.Name(), "ca", t) + delete(mutatingConfig.AuthInfos, mutatingConfig.Contexts[mutatingConfig.CurrentContext].AuthInfo) + + errMsg := "cannot locate user red-user" + + if err := MinifyConfig(&mutatingConfig); err == nil || err.Error() != errMsg { + t.Errorf("expected %v, got %v", errMsg, err) + } +} + +func TestFlattenSuccess(t *testing.T) { + certFile, _ := ioutil.TempFile("", "") + defer os.Remove(certFile.Name()) + keyFile, _ := ioutil.TempFile("", "") + defer os.Remove(keyFile.Name()) + caFile, _ := ioutil.TempFile("", "") + defer os.Remove(caFile.Name()) + + certData := "cert" + keyData := "key" + caData := "ca" + + unchangingCluster := "cow-cluster" + unchangingAuthInfo := "red-user" + changingCluster := "chicken-cluster" + changingAuthInfo := "blue-user" + + startingConfig := newMergedConfig(certFile.Name(), certData, keyFile.Name(), keyData, caFile.Name(), caData, t) + mutatingConfig := startingConfig + + if err := FlattenConfig(&mutatingConfig); err != nil { + t.Errorf("unexpected error: %v", err) + } + + if len(mutatingConfig.Contexts) != 2 { + t.Errorf("unexpected contexts: %v", mutatingConfig.Contexts) + } + if !reflect.DeepEqual(startingConfig.Contexts, mutatingConfig.Contexts) { + t.Errorf("expected %v, got %v", startingConfig.Contexts, mutatingConfig.Contexts) + } + + if len(mutatingConfig.Clusters) != 2 { + t.Errorf("unexpected clusters: %v", mutatingConfig.Clusters) + } + if !reflect.DeepEqual(startingConfig.Clusters[unchangingCluster], mutatingConfig.Clusters[unchangingCluster]) { + t.Errorf("expected %v, got %v", startingConfig.Clusters[unchangingCluster], mutatingConfig.Clusters[unchangingCluster]) + } + if len(mutatingConfig.Clusters[changingCluster].CertificateAuthority) != 0 { + t.Errorf("unexpected caFile") + } + if string(mutatingConfig.Clusters[changingCluster].CertificateAuthorityData) != caData { + t.Errorf("expected %v, got %v", caData, string(mutatingConfig.Clusters[changingCluster].CertificateAuthorityData)) + } + + if len(mutatingConfig.AuthInfos) != 2 { + t.Errorf("unexpected users: %v", mutatingConfig.AuthInfos) + } + if !reflect.DeepEqual(startingConfig.AuthInfos[unchangingAuthInfo], mutatingConfig.AuthInfos[unchangingAuthInfo]) { + t.Errorf("expected %v, got %v", startingConfig.AuthInfos[unchangingAuthInfo], mutatingConfig.AuthInfos[unchangingAuthInfo]) + } + if len(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificate) != 0 { + t.Errorf("unexpected caFile") + } + if string(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificateData) != certData { + t.Errorf("expected %v, got %v", certData, string(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificateData)) + } + if len(mutatingConfig.AuthInfos[changingAuthInfo].ClientKey) != 0 { + t.Errorf("unexpected caFile") + } + if string(mutatingConfig.AuthInfos[changingAuthInfo].ClientKeyData) != keyData { + t.Errorf("expected %v, got %v", keyData, string(mutatingConfig.AuthInfos[changingAuthInfo].ClientKeyData)) + } + +} + +func Example_minifyAndShorten() { + certFile, _ := ioutil.TempFile("", "") + defer os.Remove(certFile.Name()) + keyFile, _ := ioutil.TempFile("", "") + defer os.Remove(keyFile.Name()) + caFile, _ := ioutil.TempFile("", "") + defer os.Remove(caFile.Name()) + + certData := "cert" + keyData := "key" + caData := "ca" + + config := newMergedConfig(certFile.Name(), certData, keyFile.Name(), keyData, caFile.Name(), caData, nil) + + MinifyConfig(&config) + ShortenConfig(&config) + + output, _ := yaml.Marshal(config) + fmt.Printf("%s", string(output)) + // Output: + // clusters: + // cow-cluster: + // LocationOfOrigin: "" + // certificate-authority-data: REDACTED + // server: http://cow.org:8080 + // contexts: + // federal-context: + // LocationOfOrigin: "" + // cluster: cow-cluster + // user: red-user + // current-context: federal-context + // preferences: {} + // users: + // red-user: + // LocationOfOrigin: "" + // client-certificate-data: REDACTED + // client-key-data: REDACTED + // token: red-token +} + +func TestShortenSuccess(t *testing.T) { + certFile, _ := ioutil.TempFile("", "") + defer os.Remove(certFile.Name()) + keyFile, _ := ioutil.TempFile("", "") + defer os.Remove(keyFile.Name()) + caFile, _ := ioutil.TempFile("", "") + defer os.Remove(caFile.Name()) + + certData := "cert" + keyData := "key" + caData := "ca" + + unchangingCluster := "chicken-cluster" + unchangingAuthInfo := "blue-user" + changingCluster := "cow-cluster" + changingAuthInfo := "red-user" + + startingConfig := newMergedConfig(certFile.Name(), certData, keyFile.Name(), keyData, caFile.Name(), caData, t) + mutatingConfig := startingConfig + + ShortenConfig(&mutatingConfig) + + if len(mutatingConfig.Contexts) != 2 { + t.Errorf("unexpected contexts: %v", mutatingConfig.Contexts) + } + if !reflect.DeepEqual(startingConfig.Contexts, mutatingConfig.Contexts) { + t.Errorf("expected %v, got %v", startingConfig.Contexts, mutatingConfig.Contexts) + } + + redacted := string(redactedBytes) + if len(mutatingConfig.Clusters) != 2 { + t.Errorf("unexpected clusters: %v", mutatingConfig.Clusters) + } + if !reflect.DeepEqual(startingConfig.Clusters[unchangingCluster], mutatingConfig.Clusters[unchangingCluster]) { + t.Errorf("expected %v, got %v", startingConfig.Clusters[unchangingCluster], mutatingConfig.Clusters[unchangingCluster]) + } + if string(mutatingConfig.Clusters[changingCluster].CertificateAuthorityData) != redacted { + t.Errorf("expected %v, got %v", redacted, string(mutatingConfig.Clusters[changingCluster].CertificateAuthorityData)) + } + + if len(mutatingConfig.AuthInfos) != 2 { + t.Errorf("unexpected users: %v", mutatingConfig.AuthInfos) + } + if !reflect.DeepEqual(startingConfig.AuthInfos[unchangingAuthInfo], mutatingConfig.AuthInfos[unchangingAuthInfo]) { + t.Errorf("expected %v, got %v", startingConfig.AuthInfos[unchangingAuthInfo], mutatingConfig.AuthInfos[unchangingAuthInfo]) + } + if string(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificateData) != redacted { + t.Errorf("expected %v, got %v", redacted, string(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificateData)) + } + if string(mutatingConfig.AuthInfos[changingAuthInfo].ClientKeyData) != redacted { + t.Errorf("expected %v, got %v", redacted, string(mutatingConfig.AuthInfos[changingAuthInfo].ClientKeyData)) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go index 90d5c5380b1c..48cedb82ea50 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go @@ -40,9 +40,15 @@ const OldestVersion = "v1" // with a set of versions to choose. var Versions = []string{"v1"} -var Codec = versioning.NewCodecForScheme( - api.Scheme, - json.NewYAMLSerializer(json.DefaultMetaFactory, api.Scheme, runtime.ObjectTyperToTyper(api.Scheme)), - []unversioned.GroupVersion{{Version: Version}}, - []unversioned.GroupVersion{{Version: runtime.APIVersionInternal}}, -) +var Codec runtime.Codec + +func init() { + yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, api.Scheme, api.Scheme) + Codec = versioning.NewCodecForScheme( + api.Scheme, + yamlSerializer, + yamlSerializer, + []unversioned.GroupVersion{{Version: Version}}, + []unversioned.GroupVersion{{Version: runtime.APIVersionInternal}}, + ) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go index e4e23998abb8..f26a6cd1b11b 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go @@ -35,9 +35,9 @@ func init() { } func (obj *Config) GetObjectKind() unversioned.ObjectKind { return obj } -func (obj *Config) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) { +func (obj *Config) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } -func (obj *Config) GroupVersionKind() *unversioned.GroupVersionKind { +func (obj *Config) GroupVersionKind() unversioned.GroupVersionKind { return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go index 7e2bfcfa8b8c..56b44e8f42ab 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go @@ -32,7 +32,7 @@ type Config struct { // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify // a single value for the cluster version. - // This field isnt really needed anyway, so we are deprecating it without replacement. + // This field isn't really needed anyway, so we are deprecating it without replacement. // It will be ignored if it is present. APIVersion string `json:"apiVersion,omitempty"` // Preferences holds general information to be use for cli interactions @@ -88,10 +88,14 @@ type AuthInfo struct { ClientKeyData []byte `json:"client-key-data,omitempty"` // Token is the bearer token for authentication to the kubernetes cluster. Token string `json:"token,omitempty"` + // Impersonate is the username to act-as. + Impersonate string `json:"act-as,omitempty"` // Username is the username for basic authentication to the kubernetes cluster. Username string `json:"username,omitempty"` // Password is the password for basic authentication to the kubernetes cluster. Password string `json:"password,omitempty"` + // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields Extensions map[string]runtime.Object `json:"extensions,omitempty"` } @@ -110,6 +114,12 @@ type Context struct { Extensions map[string]runtime.Object `json:"extensions,omitempty"` } +// AuthProviderConfig holds the configuration for a specified auth provider. +type AuthProviderConfig struct { + Name string `json:"name"` + Config map[string]string `json:"config,omitempty"` +} + // NewConfig is a convenience function that returns a new Config object with non-nil maps func NewConfig() *Config { return &Config{ diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types_test.go new file mode 100644 index 000000000000..6c79728f4c33 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types_test.go @@ -0,0 +1,135 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func Example_emptyConfig() { + defaultConfig := NewConfig() + + output, err := yaml.Marshal(defaultConfig) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + + fmt.Printf("%v", string(output)) + // Output: + // clusters: {} + // contexts: {} + // current-context: "" + // preferences: {} + // users: {} +} + +func Example_ofOptionsConfig() { + defaultConfig := NewConfig() + defaultConfig.Preferences.Colors = true + defaultConfig.Clusters["alfa"] = &Cluster{ + Server: "https://alfa.org:8080", + InsecureSkipTLSVerify: true, + CertificateAuthority: "path/to/my/cert-ca-filename", + } + defaultConfig.Clusters["bravo"] = &Cluster{ + Server: "https://bravo.org:8080", + InsecureSkipTLSVerify: false, + } + defaultConfig.AuthInfos["white-mage-via-cert"] = &AuthInfo{ + ClientCertificate: "path/to/my/client-cert-filename", + ClientKey: "path/to/my/client-key-filename", + } + defaultConfig.AuthInfos["red-mage-via-token"] = &AuthInfo{ + Token: "my-secret-token", + } + defaultConfig.AuthInfos["black-mage-via-auth-provider"] = &AuthInfo{ + AuthProvider: &AuthProviderConfig{ + Name: "gcp", + Config: map[string]string{ + "foo": "bar", + "token": "s3cr3t-t0k3n", + }, + }, + } + defaultConfig.Contexts["bravo-as-black-mage"] = &Context{ + Cluster: "bravo", + AuthInfo: "black-mage-via-auth-provider", + Namespace: "yankee", + } + defaultConfig.Contexts["alfa-as-black-mage"] = &Context{ + Cluster: "alfa", + AuthInfo: "black-mage-via-auth-provider", + Namespace: "zulu", + } + defaultConfig.Contexts["alfa-as-white-mage"] = &Context{ + Cluster: "alfa", + AuthInfo: "white-mage-via-cert", + } + defaultConfig.CurrentContext = "alfa-as-white-mage" + + output, err := yaml.Marshal(defaultConfig) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + + fmt.Printf("%v", string(output)) + // Output: + // clusters: + // alfa: + // LocationOfOrigin: "" + // certificate-authority: path/to/my/cert-ca-filename + // insecure-skip-tls-verify: true + // server: https://alfa.org:8080 + // bravo: + // LocationOfOrigin: "" + // server: https://bravo.org:8080 + // contexts: + // alfa-as-black-mage: + // LocationOfOrigin: "" + // cluster: alfa + // namespace: zulu + // user: black-mage-via-auth-provider + // alfa-as-white-mage: + // LocationOfOrigin: "" + // cluster: alfa + // user: white-mage-via-cert + // bravo-as-black-mage: + // LocationOfOrigin: "" + // cluster: bravo + // namespace: yankee + // user: black-mage-via-auth-provider + // current-context: alfa-as-white-mage + // preferences: + // colors: true + // users: + // black-mage-via-auth-provider: + // LocationOfOrigin: "" + // auth-provider: + // config: + // foo: bar + // token: s3cr3t-t0k3n + // name: gcp + // red-mage-via-token: + // LocationOfOrigin: "" + // token: my-secret-token + // white-mage-via-cert: + // LocationOfOrigin: "" + // client-certificate: path/to/my/client-cert-filename + // client-key: path/to/my/client-key-filename +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go index edf9fe1a70a5..e5c9e88ef99a 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go @@ -32,9 +32,9 @@ func init() { } func (obj *Config) GetObjectKind() unversioned.ObjectKind { return obj } -func (obj *Config) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) { +func (obj *Config) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } -func (obj *Config) GroupVersionKind() *unversioned.GroupVersionKind { +func (obj *Config) GroupVersionKind() unversioned.GroupVersionKind { return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go index c9b4ab56ba40..46b5dbaa72d3 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go @@ -31,7 +31,7 @@ type Config struct { // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify // a single value for the cluster version. - // This field isnt really needed anyway, so we are deprecating it without replacement. + // This field isn't really needed anyway, so we are deprecating it without replacement. // It will be ignored if it is present. APIVersion string `json:"apiVersion,omitempty"` // Preferences holds general information to be use for cli interactions @@ -82,10 +82,14 @@ type AuthInfo struct { ClientKeyData []byte `json:"client-key-data,omitempty"` // Token is the bearer token for authentication to the kubernetes cluster. Token string `json:"token,omitempty"` + // Impersonate is the username to imperonate. The name matches the flag. + Impersonate string `json:"as,omitempty"` // Username is the username for basic authentication to the kubernetes cluster. Username string `json:"username,omitempty"` // Password is the password for basic authentication to the kubernetes cluster. Password string `json:"password,omitempty"` + // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields Extensions []NamedExtension `json:"extensions,omitempty"` } @@ -133,3 +137,9 @@ type NamedExtension struct { // Extension holds the extension information Extension runtime.RawExtension `json:"extension"` } + +// AuthProviderConfig holds the configuration for a specified auth provider. +type AuthProviderConfig struct { + Name string `json:"name"` + Config map[string]string `json:"config"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go index 9ff259edffeb..c83f315a3187 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go @@ -41,7 +41,7 @@ var ( // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name EnvVarCluster = clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")} - DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{}, nil} + DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{}, nil, NewDefaultClientConfigLoadingRules()} ) // ClientConfig is used to make it easy to get an api server client @@ -54,29 +54,34 @@ type ClientConfig interface { // result of all overrides and a boolean indicating if it was // overridden Namespace() (string, bool, error) + // ConfigAccess returns the rules for loading/persisting the config. + ConfigAccess() ConfigAccess } +type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister + // DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information type DirectClientConfig struct { config clientcmdapi.Config contextName string overrides *ConfigOverrides fallbackReader io.Reader + configAccess ConfigAccess } // NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig { - return &DirectClientConfig{config, config.CurrentContext, overrides, nil} + return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules()} } // NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information -func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides) ClientConfig { - return &DirectClientConfig{config, contextName, overrides, nil} +func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) ClientConfig { + return &DirectClientConfig{config, contextName, overrides, nil, configAccess} } // NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags -func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { - return &DirectClientConfig{config, contextName, overrides, fallbackReader} +func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) ClientConfig { + return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess} } func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) { @@ -99,6 +104,9 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { u.Fragment = "" clientConfig.Host = u.String() } + if len(configAuthInfo.Impersonate) > 0 { + clientConfig.Impersonate = configAuthInfo.Impersonate + } // only try to read the auth information if we are secure if restclient.IsConfigTransportTLS(*clientConfig) { @@ -107,7 +115,11 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { // mergo is a first write wins for map value and a last writing wins for interface values // NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a. // Our mergo.Merge version is older than this change. - userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader) + var persister restclient.AuthProviderConfigPersister + if config.configAccess != nil { + persister = PersisterForUser(config.configAccess, config.getAuthInfoName()) + } + userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister) if err != nil { return nil, err } @@ -149,13 +161,16 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, // 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) // 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file // 4. if there is not enough information to identify the user, prompt if possible -func getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader) (*restclient.Config, error) { +func getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) { mergedConfig := &restclient.Config{} // blindly overwrite existing values based on precedence if len(configAuthInfo.Token) > 0 { mergedConfig.BearerToken = configAuthInfo.Token } + if len(configAuthInfo.Impersonate) > 0 { + mergedConfig.Impersonate = configAuthInfo.Impersonate + } if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { mergedConfig.CertFile = configAuthInfo.ClientCertificate mergedConfig.CertData = configAuthInfo.ClientCertificateData @@ -166,6 +181,10 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fa mergedConfig.Username = configAuthInfo.Username mergedConfig.Password = configAuthInfo.Password } + if configAuthInfo.AuthProvider != nil { + mergedConfig.AuthProvider = configAuthInfo.AuthProvider + mergedConfig.AuthConfigPersister = persistAuthConfig + } // if there still isn't enough information to authenticate the user, try prompting if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) { @@ -206,11 +225,11 @@ func makeServerIdentificationConfig(info clientauth.Info) restclient.Config { func canIdentifyUser(config restclient.Config) bool { return len(config.Username) > 0 || (len(config.CertFile) > 0 || len(config.CertData) > 0) || - len(config.BearerToken) > 0 - + len(config.BearerToken) > 0 || + config.AuthProvider != nil } -// Namespace implements KubeConfig +// Namespace implements ClientConfig func (config *DirectClientConfig) Namespace() (string, bool, error) { if err := config.ConfirmUsable(); err != nil { return "", false, err @@ -229,6 +248,11 @@ func (config *DirectClientConfig) Namespace() (string, bool, error) { return configContext.Namespace, overridden, nil } +// ConfigAccess implements ClientConfig +func (config *DirectClientConfig) ConfigAccess() ConfigAccess { + return config.configAccess +} + // ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, // but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. func (config *DirectClientConfig) ConfirmUsable() error { @@ -305,6 +329,14 @@ func (config *DirectClientConfig) getCluster() clientcmdapi.Cluster { mergo.Merge(&mergedClusterInfo, configClusterInfo) } mergo.Merge(&mergedClusterInfo, config.overrides.ClusterInfo) + // An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data + // otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set" + caLen := len(config.overrides.ClusterInfo.CertificateAuthority) + caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData) + if config.overrides.ClusterInfo.InsecureSkipTLSVerify && caLen == 0 && caDataLen == 0 { + mergedClusterInfo.CertificateAuthority = "" + mergedClusterInfo.CertificateAuthorityData = nil + } return mergedClusterInfo } @@ -337,6 +369,10 @@ func (inClusterClientConfig) Namespace() (string, error) { return "default", nil } +func (inClusterClientConfig) ConfigAccess() ConfigAccess { + return NewDefaultClientConfigLoadingRules() +} + // Possible returns true if loading an inside-kubernetes-cluster is possible. func (inClusterClientConfig) Possible() bool { fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token") @@ -359,8 +395,17 @@ func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, } glog.Warning("error creating inClusterConfig, falling back to default config: ", err) } - return NewNonInteractiveDeferredLoadingClientConfig( &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig() } + +// BuildConfigFromKubeconfigGetter is a helper function that builds configs from a master +// url and a kubeconfigGetter. +func BuildConfigFromKubeconfigGetter(masterUrl string, kubeconfigGetter KubeconfigGetter) (*restclient.Config, error) { + // TODO: We do not need a DeferredLoader here. Refactor code and see if we can use DirectClientConfig here. + cc := NewNonInteractiveDeferredLoadingClientConfig( + &ClientConfigGetter{kubeconfigGetter: kubeconfigGetter}, + &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}) + return cc.ClientConfig() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config_test.go new file mode 100644 index 000000000000..32c6a293bda9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config_test.go @@ -0,0 +1,290 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "reflect" + "testing" + + "github.com/imdario/mergo" + "k8s.io/kubernetes/pkg/client/restclient" + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" +) + +func TestOldMergoLib(t *testing.T) { + type T struct { + X string + } + dst := T{X: "one"} + src := T{X: "two"} + mergo.Merge(&dst, &src) + if dst.X != "two" { + // mergo.Merge changed in an incompatible way with + // + // https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a + // + // We have to stay with the old version which still does eager + // copying from src to dst in structs. + t.Errorf("mergo.Merge library found with incompatible, new behavior") + } +} + +func createValidTestConfig() *clientcmdapi.Config { + const ( + server = "https://anything.com:8080" + token = "the-token" + ) + + config := clientcmdapi.NewConfig() + config.Clusters["clean"] = &clientcmdapi.Cluster{ + Server: server, + } + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ + Token: token, + } + config.Contexts["clean"] = &clientcmdapi.Context{ + Cluster: "clean", + AuthInfo: "clean", + } + config.CurrentContext = "clean" + + return config +} + +func createCAValidTestConfig() *clientcmdapi.Config { + + config := createValidTestConfig() + config.Clusters["clean"].CertificateAuthorityData = []byte{0, 0} + return config +} + +func TestInsecureOverridesCA(t *testing.T) { + config := createCAValidTestConfig() + clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{ + ClusterInfo: clientcmdapi.Cluster{ + InsecureSkipTLSVerify: true, + }, + }, nil) + + actualCfg, err := clientBuilder.ClientConfig() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + matchBoolArg(true, actualCfg.Insecure, t) + matchStringArg("", actualCfg.TLSClientConfig.CAFile, t) + matchByteArg(nil, actualCfg.TLSClientConfig.CAData, t) +} + +func TestMergeContext(t *testing.T) { + const namespace = "overriden-namespace" + + config := createValidTestConfig() + clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil) + + _, overridden, err := clientBuilder.Namespace() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if overridden { + t.Error("Expected namespace to not be overridden") + } + + clientBuilder = NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{ + Context: clientcmdapi.Context{ + Namespace: namespace, + }, + }, nil) + + actual, overridden, err := clientBuilder.Namespace() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if !overridden { + t.Error("Expected namespace to be overridden") + } + + matchStringArg(namespace, actual, t) +} + +func TestCertificateData(t *testing.T) { + caData := []byte("ca-data") + certData := []byte("cert-data") + keyData := []byte("key-data") + + config := clientcmdapi.NewConfig() + config.Clusters["clean"] = &clientcmdapi.Cluster{ + Server: "https://localhost:8443", + CertificateAuthorityData: caData, + } + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ + ClientCertificateData: certData, + ClientKeyData: keyData, + } + config.Contexts["clean"] = &clientcmdapi.Context{ + Cluster: "clean", + AuthInfo: "clean", + } + config.CurrentContext = "clean" + + clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil) + + clientConfig, err := clientBuilder.ClientConfig() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // Make sure cert data gets into config (will override file paths) + matchByteArg(caData, clientConfig.TLSClientConfig.CAData, t) + matchByteArg(certData, clientConfig.TLSClientConfig.CertData, t) + matchByteArg(keyData, clientConfig.TLSClientConfig.KeyData, t) +} + +func TestBasicAuthData(t *testing.T) { + username := "myuser" + password := "mypass" + + config := clientcmdapi.NewConfig() + config.Clusters["clean"] = &clientcmdapi.Cluster{ + Server: "https://localhost:8443", + } + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ + Username: username, + Password: password, + } + config.Contexts["clean"] = &clientcmdapi.Context{ + Cluster: "clean", + AuthInfo: "clean", + } + config.CurrentContext = "clean" + + clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil) + + clientConfig, err := clientBuilder.ClientConfig() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // Make sure basic auth data gets into config + matchStringArg(username, clientConfig.Username, t) + matchStringArg(password, clientConfig.Password, t) +} + +func TestCreateClean(t *testing.T) { + config := createValidTestConfig() + clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil) + + clientConfig, err := clientBuilder.ClientConfig() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + matchStringArg(config.Clusters["clean"].Server, clientConfig.Host, t) + matchStringArg("", clientConfig.APIPath, t) + matchBoolArg(config.Clusters["clean"].InsecureSkipTLSVerify, clientConfig.Insecure, t) + matchStringArg(config.AuthInfos["clean"].Token, clientConfig.BearerToken, t) +} + +func TestCreateCleanWithPrefix(t *testing.T) { + tt := []struct { + server string + host string + }{ + {"https://anything.com:8080/foo/bar", "https://anything.com:8080/foo/bar"}, + {"http://anything.com:8080/foo/bar", "http://anything.com:8080/foo/bar"}, + {"http://anything.com:8080/foo/bar/", "http://anything.com:8080/foo/bar/"}, + {"http://anything.com:8080/", "http://anything.com:8080/"}, + {"http://anything.com:8080//", "http://anything.com:8080//"}, + {"anything.com:8080/foo/bar", "anything.com:8080/foo/bar"}, + {"anything.com:8080", "anything.com:8080"}, + {"anything.com", "anything.com"}, + {"anything", "anything"}, + } + + // WARNING: EnvVarCluster.Server is set during package loading time and can not be overridden by os.Setenv inside this test + EnvVarCluster.Server = "" + tt = append(tt, struct{ server, host string }{"", "http://localhost:8080"}) + + for _, tc := range tt { + config := createValidTestConfig() + + cleanConfig := config.Clusters["clean"] + cleanConfig.Server = tc.server + config.Clusters["clean"] = cleanConfig + + clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil) + + clientConfig, err := clientBuilder.ClientConfig() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + matchStringArg(tc.host, clientConfig.Host, t) + } +} + +func TestCreateCleanDefault(t *testing.T) { + config := createValidTestConfig() + clientBuilder := NewDefaultClientConfig(*config, &ConfigOverrides{}) + + clientConfig, err := clientBuilder.ClientConfig() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + matchStringArg(config.Clusters["clean"].Server, clientConfig.Host, t) + matchBoolArg(config.Clusters["clean"].InsecureSkipTLSVerify, clientConfig.Insecure, t) + matchStringArg(config.AuthInfos["clean"].Token, clientConfig.BearerToken, t) +} + +func TestCreateMissingContext(t *testing.T) { + const expectedErrorContains = "Context was not found for specified context" + config := createValidTestConfig() + clientBuilder := NewNonInteractiveClientConfig(*config, "not-present", &ConfigOverrides{}, nil) + + clientConfig, err := clientBuilder.ClientConfig() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + expectedConfig := &restclient.Config{Host: clientConfig.Host} + + if !reflect.DeepEqual(expectedConfig, clientConfig) { + t.Errorf("Expected %#v, got %#v", expectedConfig, clientConfig) + } + +} + +func matchBoolArg(expected, got bool, t *testing.T) { + if expected != got { + t.Errorf("Expected %v, got %v", expected, got) + } +} + +func matchStringArg(expected, got string, t *testing.T) { + if expected != got { + t.Errorf("Expected %q, got %q", expected, got) + } +} + +func matchByteArg(expected, got []byte, t *testing.T) { + if !reflect.DeepEqual(expected, got) { + t.Errorf("Expected %v, got %v", expected, got) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go new file mode 100644 index 000000000000..049fc39213c3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go @@ -0,0 +1,419 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "errors" + "os" + "path" + "path/filepath" + "reflect" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/client/restclient" + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" +) + +// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files +type ConfigAccess interface { + // GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config + GetLoadingPrecedence() []string + // GetStartingConfig returns the config that subcommands should being operating against. It may or may not be merged depending on loading rules + GetStartingConfig() (*clientcmdapi.Config, error) + // GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one. + GetDefaultFilename() string + // IsExplicitFile indicates whether or not this command is interested in exactly one file. This implementation only ever does that via a flag, but implementations that handle local, global, and flags may have more + IsExplicitFile() bool + // GetExplicitFile returns the particular file this command is operating against. This implementation only ever has one, but implementations that handle local, global, and flags may have more + GetExplicitFile() string +} + +type PathOptions struct { + // GlobalFile is the full path to the file to load as the global (final) option + GlobalFile string + // EnvVar is the env var name that points to the list of kubeconfig files to load + EnvVar string + // ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file + ExplicitFileFlag string + + // GlobalFileSubpath is an optional value used for displaying help + GlobalFileSubpath string + + LoadingRules *ClientConfigLoadingRules +} + +func (o *PathOptions) GetEnvVarFiles() []string { + if len(o.EnvVar) == 0 { + return []string{} + } + + envVarValue := os.Getenv(o.EnvVar) + if len(envVarValue) == 0 { + return []string{} + } + + return filepath.SplitList(envVarValue) +} + +func (o *PathOptions) GetLoadingPrecedence() []string { + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { + return envVarFiles + } + + return []string{o.GlobalFile} +} + +func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) { + // don't mutate the original + loadingRules := *o.LoadingRules + loadingRules.Precedence = o.GetLoadingPrecedence() + + clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{}) + rawConfig, err := clientConfig.RawConfig() + if os.IsNotExist(err) { + return clientcmdapi.NewConfig(), nil + } + if err != nil { + return nil, err + } + + return &rawConfig, nil +} + +func (o *PathOptions) GetDefaultFilename() string { + if o.IsExplicitFile() { + return o.GetExplicitFile() + } + + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { + if len(envVarFiles) == 1 { + return envVarFiles[0] + } + + // if any of the envvar files already exists, return it + for _, envVarFile := range envVarFiles { + if _, err := os.Stat(envVarFile); err == nil { + return envVarFile + } + } + + // otherwise, return the last one in the list + return envVarFiles[len(envVarFiles)-1] + } + + return o.GlobalFile +} + +func (o *PathOptions) IsExplicitFile() bool { + if len(o.LoadingRules.ExplicitPath) > 0 { + return true + } + + return false +} + +func (o *PathOptions) GetExplicitFile() string { + return o.LoadingRules.ExplicitPath +} + +func NewDefaultPathOptions() *PathOptions { + ret := &PathOptions{ + GlobalFile: RecommendedHomeFile, + EnvVar: RecommendedConfigPathEnvVar, + ExplicitFileFlag: RecommendedConfigPathFlag, + + GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), + + LoadingRules: NewDefaultClientConfigLoadingRules(), + } + ret.LoadingRules.DoNotResolvePaths = true + + return ret +} + +// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or +// uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. +// Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values +// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, +// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any +// modified element. +func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error { + startingConfig, err := configAccess.GetStartingConfig() + if err != nil { + return err + } + + // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file. + // Special case the test for current context and preferences since those always write to the default file. + if reflect.DeepEqual(*startingConfig, newConfig) { + // nothing to do + return nil + } + + if startingConfig.CurrentContext != newConfig.CurrentContext { + if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil { + return err + } + } + + if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) { + if err := writePreferences(configAccess, newConfig.Preferences); err != nil { + return err + } + } + + // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions + for key, cluster := range newConfig.Clusters { + startingCluster, exists := startingConfig.Clusters[key] + if !reflect.DeepEqual(cluster, startingCluster) || !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite := GetConfigFromFileOrDie(destinationFile) + t := *cluster + + configToWrite.Clusters[key] = &t + configToWrite.Clusters[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil { + return err + } + } + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, context := range newConfig.Contexts { + startingContext, exists := startingConfig.Contexts[key] + if !reflect.DeepEqual(context, startingContext) || !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite := GetConfigFromFileOrDie(destinationFile) + configToWrite.Contexts[key] = context + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, authInfo := range newConfig.AuthInfos { + startingAuthInfo, exists := startingConfig.AuthInfos[key] + if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite := GetConfigFromFileOrDie(destinationFile) + t := *authInfo + configToWrite.AuthInfos[key] = &t + configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil { + return err + } + } + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, cluster := range startingConfig.Clusters { + if _, exists := newConfig.Clusters[key]; !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite := GetConfigFromFileOrDie(destinationFile) + delete(configToWrite.Clusters, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, context := range startingConfig.Contexts { + if _, exists := newConfig.Contexts[key]; !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite := GetConfigFromFileOrDie(destinationFile) + delete(configToWrite.Contexts, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, authInfo := range startingConfig.AuthInfos { + if _, exists := newConfig.AuthInfos[key]; !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite := GetConfigFromFileOrDie(destinationFile) + delete(configToWrite.AuthInfos, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + return nil +} + +func PersisterForUser(configAccess ConfigAccess, user string) restclient.AuthProviderConfigPersister { + return &persister{configAccess, user} +} + +type persister struct { + configAccess ConfigAccess + user string +} + +func (p *persister) Persist(config map[string]string) error { + newConfig, err := p.configAccess.GetStartingConfig() + if err != nil { + return err + } + authInfo, ok := newConfig.AuthInfos[p.user] + if ok && authInfo.AuthProvider != nil { + authInfo.AuthProvider.Config = config + ModifyConfig(p.configAccess, *newConfig, false) + } + return nil +} + +// writeCurrentContext takes three possible paths. +// If newCurrentContext is the same as the startingConfig's current context, then we exit. +// If newCurrentContext has a value, then that value is written into the default destination file. +// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file +func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error { + if startingConfig, err := configAccess.GetStartingConfig(); err != nil { + return err + } else if startingConfig.CurrentContext == newCurrentContext { + return nil + } + + if configAccess.IsExplicitFile() { + file := configAccess.GetExplicitFile() + currConfig := GetConfigFromFileOrDie(file) + currConfig.CurrentContext = newCurrentContext + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + + if len(newCurrentContext) > 0 { + destinationFile := configAccess.GetDefaultFilename() + config := GetConfigFromFileOrDie(destinationFile) + config.CurrentContext = newCurrentContext + + if err := WriteToFile(*config, destinationFile); err != nil { + return err + } + + return nil + } + + // we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it + for _, file := range configAccess.GetLoadingPrecedence() { + if _, err := os.Stat(file); err == nil { + currConfig := GetConfigFromFileOrDie(file) + + if len(currConfig.CurrentContext) > 0 { + currConfig.CurrentContext = newCurrentContext + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + } + } + + return errors.New("no config found to write context") +} + +func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error { + if startingConfig, err := configAccess.GetStartingConfig(); err != nil { + return err + } else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) { + return nil + } + + if configAccess.IsExplicitFile() { + file := configAccess.GetExplicitFile() + currConfig := GetConfigFromFileOrDie(file) + currConfig.Preferences = newPrefs + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + + for _, file := range configAccess.GetLoadingPrecedence() { + currConfig := GetConfigFromFileOrDie(file) + + if !reflect.DeepEqual(currConfig.Preferences, newPrefs) { + currConfig.Preferences = newPrefs + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + } + + return errors.New("no config found to write preferences") +} + +// GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit +func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { + config, err := LoadFromFile(filename) + if err != nil && !os.IsNotExist(err) { + glog.FatalDepth(1, err) + } + + if config == nil { + return clientcmdapi.NewConfig() + } + + return config +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go index 065559a9c016..f0c9c547a1d1 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go @@ -23,6 +23,7 @@ import ( "os" "path" "path/filepath" + goruntime "runtime" "strings" "github.com/golang/glog" @@ -33,6 +34,7 @@ import ( clientcmdlatest "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest" "k8s.io/kubernetes/pkg/runtime" utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/homedir" ) const ( @@ -43,9 +45,57 @@ const ( RecommendedSchemaName = "schema" ) -var OldRecommendedHomeFile = path.Join(os.Getenv("HOME"), "/.kube/.kubeconfig") -var RecommendedHomeFile = path.Join(os.Getenv("HOME"), RecommendedHomeDir, RecommendedFileName) -var RecommendedSchemaFile = path.Join(os.Getenv("HOME"), RecommendedHomeDir, RecommendedSchemaName) +var RecommendedHomeFile = path.Join(homedir.HomeDir(), RecommendedHomeDir, RecommendedFileName) +var RecommendedSchemaFile = path.Join(homedir.HomeDir(), RecommendedHomeDir, RecommendedSchemaName) + +// currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions. +// Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make +// sure existing config files are migrated to their new locations properly. +func currentMigrationRules() map[string]string { + oldRecommendedHomeFile := path.Join(os.Getenv("HOME"), "/.kube/.kubeconfig") + oldRecommendedWindowsHomeFile := path.Join(os.Getenv("HOME"), RecommendedHomeDir, RecommendedFileName) + + migrationRules := map[string]string{} + migrationRules[RecommendedHomeFile] = oldRecommendedHomeFile + if goruntime.GOOS == "windows" { + migrationRules[RecommendedHomeFile] = oldRecommendedWindowsHomeFile + } + return migrationRules +} + +type ClientConfigLoader interface { + ConfigAccess + Load() (*clientcmdapi.Config, error) +} + +type KubeconfigGetter func() (*clientcmdapi.Config, error) + +type ClientConfigGetter struct { + kubeconfigGetter KubeconfigGetter +} + +// ClientConfigGetter implements the ClientConfigLoader interface. +var _ ClientConfigLoader = &ClientConfigGetter{} + +func (g *ClientConfigGetter) Load() (*clientcmdapi.Config, error) { + return g.kubeconfigGetter() +} + +func (g *ClientConfigGetter) GetLoadingPrecedence() []string { + return nil +} +func (g *ClientConfigGetter) GetStartingConfig() (*clientcmdapi.Config, error) { + return nil, nil +} +func (g *ClientConfigGetter) GetDefaultFilename() string { + return "" +} +func (g *ClientConfigGetter) IsExplicitFile() bool { + return false +} +func (g *ClientConfigGetter) GetExplicitFile() string { + return "" +} // ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config // Callers can put the chain together however they want, but we'd recommend: @@ -64,11 +114,13 @@ type ClientConfigLoadingRules struct { DoNotResolvePaths bool } +// ClientConfigLoadingRules implements the ClientConfigLoader interface. +var _ ClientConfigLoader = &ClientConfigLoadingRules{} + // NewDefaultClientConfigLoadingRules returns a ClientConfigLoadingRules object with default fields filled in. You are not required to // use this constructor func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules { chain := []string{} - migrationRules := map[string]string{} envVarFiles := os.Getenv(RecommendedConfigPathEnvVar) if len(envVarFiles) != 0 { @@ -76,13 +128,11 @@ func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules { } else { chain = append(chain, RecommendedHomeFile) - migrationRules[RecommendedHomeFile] = OldRecommendedHomeFile - } return &ClientConfigLoadingRules{ Precedence: chain, - MigrationRules: migrationRules, + MigrationRules: currentMigrationRules(), } } @@ -216,6 +266,54 @@ func (rules *ClientConfigLoadingRules) Migrate() error { return nil } +// GetLoadingPrecedence implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string { + return rules.Precedence +} + +// GetStartingConfig implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetStartingConfig() (*clientcmdapi.Config, error) { + clientConfig := NewNonInteractiveDeferredLoadingClientConfig(rules, &ConfigOverrides{}) + rawConfig, err := clientConfig.RawConfig() + if os.IsNotExist(err) { + return clientcmdapi.NewConfig(), nil + } + if err != nil { + return nil, err + } + + return &rawConfig, nil +} + +// GetDefaultFilename implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetDefaultFilename() string { + // Explicit file if we have one. + if rules.IsExplicitFile() { + return rules.GetExplicitFile() + } + // Otherwise, first existing file from precedence. + for _, filename := range rules.GetLoadingPrecedence() { + if _, err := os.Stat(filename); err == nil { + return filename + } + } + // If none exists, use the first from precedence. + if len(rules.Precedence) > 0 { + return rules.Precedence[0] + } + return "" +} + +// IsExplicitFile implements ConfigAccess +func (rules *ClientConfigLoadingRules) IsExplicitFile() bool { + return len(rules.ExplicitPath) > 0 +} + +// GetExplicitFile implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetExplicitFile() string { + return rules.ExplicitPath +} + // LoadFromFile takes a filename and deserializes the contents into Config object func LoadFromFile(filename string) (*clientcmdapi.Config, error) { kubeconfigBytes, err := ioutil.ReadFile(filename) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader_test.go new file mode 100644 index 000000000000..ad79c7b8183b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader_test.go @@ -0,0 +1,562 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/ghodss/yaml" + + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + clientcmdlatest "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest" + "k8s.io/kubernetes/pkg/runtime" +) + +var ( + testConfigAlfa = clientcmdapi.Config{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + "red-user": {Token: "red-token"}}, + Clusters: map[string]*clientcmdapi.Cluster{ + "cow-cluster": {Server: "http://cow.org:8080"}}, + Contexts: map[string]*clientcmdapi.Context{ + "federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster", Namespace: "hammer-ns"}}, + } + testConfigBravo = clientcmdapi.Config{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + "black-user": {Token: "black-token"}}, + Clusters: map[string]*clientcmdapi.Cluster{ + "pig-cluster": {Server: "http://pig.org:8080"}}, + Contexts: map[string]*clientcmdapi.Context{ + "queen-anne-context": {AuthInfo: "black-user", Cluster: "pig-cluster", Namespace: "saw-ns"}}, + } + testConfigCharlie = clientcmdapi.Config{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + "green-user": {Token: "green-token"}}, + Clusters: map[string]*clientcmdapi.Cluster{ + "horse-cluster": {Server: "http://horse.org:8080"}}, + Contexts: map[string]*clientcmdapi.Context{ + "shaker-context": {AuthInfo: "green-user", Cluster: "horse-cluster", Namespace: "chisel-ns"}}, + } + testConfigDelta = clientcmdapi.Config{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + "blue-user": {Token: "blue-token"}}, + Clusters: map[string]*clientcmdapi.Cluster{ + "chicken-cluster": {Server: "http://chicken.org:8080"}}, + Contexts: map[string]*clientcmdapi.Context{ + "gothic-context": {AuthInfo: "blue-user", Cluster: "chicken-cluster", Namespace: "plane-ns"}}, + } + + testConfigConflictAlfa = clientcmdapi.Config{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + "red-user": {Token: "a-different-red-token"}, + "yellow-user": {Token: "yellow-token"}}, + Clusters: map[string]*clientcmdapi.Cluster{ + "cow-cluster": {Server: "http://a-different-cow.org:8080", InsecureSkipTLSVerify: true}, + "donkey-cluster": {Server: "http://donkey.org:8080", InsecureSkipTLSVerify: true}}, + CurrentContext: "federal-context", + } +) + +func TestNonExistentCommandLineFile(t *testing.T) { + loadingRules := ClientConfigLoadingRules{ + ExplicitPath: "bogus_file", + } + + _, err := loadingRules.Load() + if err == nil { + t.Fatalf("Expected error for missing command-line file, got none") + } + if !strings.Contains(err.Error(), "bogus_file") { + t.Fatalf("Expected error about 'bogus_file', got %s", err.Error()) + } +} + +func TestToleratingMissingFiles(t *testing.T) { + loadingRules := ClientConfigLoadingRules{ + Precedence: []string{"bogus1", "bogus2", "bogus3"}, + } + + _, err := loadingRules.Load() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } +} + +func TestErrorReadingFile(t *testing.T) { + commandLineFile, _ := ioutil.TempFile("", "") + defer os.Remove(commandLineFile.Name()) + + if err := ioutil.WriteFile(commandLineFile.Name(), []byte("bogus value"), 0644); err != nil { + t.Fatalf("Error creating tempfile: %v", err) + } + + loadingRules := ClientConfigLoadingRules{ + ExplicitPath: commandLineFile.Name(), + } + + _, err := loadingRules.Load() + if err == nil { + t.Fatalf("Expected error for unloadable file, got none") + } + if !strings.Contains(err.Error(), commandLineFile.Name()) { + t.Fatalf("Expected error about '%s', got %s", commandLineFile.Name(), err.Error()) + } +} + +func TestErrorReadingNonFile(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Couldn't create tmpdir") + } + defer os.Remove(tmpdir) + + loadingRules := ClientConfigLoadingRules{ + ExplicitPath: tmpdir, + } + + _, err = loadingRules.Load() + if err == nil { + t.Fatalf("Expected error for non-file, got none") + } + if !strings.Contains(err.Error(), tmpdir) { + t.Fatalf("Expected error about '%s', got %s", tmpdir, err.Error()) + } +} + +func TestConflictingCurrentContext(t *testing.T) { + commandLineFile, _ := ioutil.TempFile("", "") + defer os.Remove(commandLineFile.Name()) + envVarFile, _ := ioutil.TempFile("", "") + defer os.Remove(envVarFile.Name()) + + mockCommandLineConfig := clientcmdapi.Config{ + CurrentContext: "any-context-value", + } + mockEnvVarConfig := clientcmdapi.Config{ + CurrentContext: "a-different-context", + } + + WriteToFile(mockCommandLineConfig, commandLineFile.Name()) + WriteToFile(mockEnvVarConfig, envVarFile.Name()) + + loadingRules := ClientConfigLoadingRules{ + ExplicitPath: commandLineFile.Name(), + Precedence: []string{envVarFile.Name()}, + } + + mergedConfig, err := loadingRules.Load() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if mergedConfig.CurrentContext != mockCommandLineConfig.CurrentContext { + t.Errorf("expected %v, got %v", mockCommandLineConfig.CurrentContext, mergedConfig.CurrentContext) + } +} + +func TestLoadingEmptyMaps(t *testing.T) { + configFile, _ := ioutil.TempFile("", "") + defer os.Remove(configFile.Name()) + + mockConfig := clientcmdapi.Config{ + CurrentContext: "any-context-value", + } + + WriteToFile(mockConfig, configFile.Name()) + + config, err := LoadFromFile(configFile.Name()) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if config.Clusters == nil { + t.Error("expected config.Clusters to be non-nil") + } + if config.AuthInfos == nil { + t.Error("expected config.AuthInfos to be non-nil") + } + if config.Contexts == nil { + t.Error("expected config.Contexts to be non-nil") + } +} + +func TestResolveRelativePaths(t *testing.T) { + pathResolutionConfig1 := clientcmdapi.Config{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + "relative-user-1": {ClientCertificate: "relative/client/cert", ClientKey: "../relative/client/key"}, + "absolute-user-1": {ClientCertificate: "/absolute/client/cert", ClientKey: "/absolute/client/key"}, + }, + Clusters: map[string]*clientcmdapi.Cluster{ + "relative-server-1": {CertificateAuthority: "../relative/ca"}, + "absolute-server-1": {CertificateAuthority: "/absolute/ca"}, + }, + } + pathResolutionConfig2 := clientcmdapi.Config{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + "relative-user-2": {ClientCertificate: "relative/client/cert2", ClientKey: "../relative/client/key2"}, + "absolute-user-2": {ClientCertificate: "/absolute/client/cert2", ClientKey: "/absolute/client/key2"}, + }, + Clusters: map[string]*clientcmdapi.Cluster{ + "relative-server-2": {CertificateAuthority: "../relative/ca2"}, + "absolute-server-2": {CertificateAuthority: "/absolute/ca2"}, + }, + } + + configDir1, _ := ioutil.TempDir("", "") + configFile1 := path.Join(configDir1, ".kubeconfig") + configDir1, _ = filepath.Abs(configDir1) + defer os.Remove(configFile1) + configDir2, _ := ioutil.TempDir("", "") + configDir2, _ = ioutil.TempDir(configDir2, "") + configFile2 := path.Join(configDir2, ".kubeconfig") + configDir2, _ = filepath.Abs(configDir2) + defer os.Remove(configFile2) + + WriteToFile(pathResolutionConfig1, configFile1) + WriteToFile(pathResolutionConfig2, configFile2) + + loadingRules := ClientConfigLoadingRules{ + Precedence: []string{configFile1, configFile2}, + } + + mergedConfig, err := loadingRules.Load() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + foundClusterCount := 0 + for key, cluster := range mergedConfig.Clusters { + if key == "relative-server-1" { + foundClusterCount++ + matchStringArg(path.Join(configDir1, pathResolutionConfig1.Clusters["relative-server-1"].CertificateAuthority), cluster.CertificateAuthority, t) + } + if key == "relative-server-2" { + foundClusterCount++ + matchStringArg(path.Join(configDir2, pathResolutionConfig2.Clusters["relative-server-2"].CertificateAuthority), cluster.CertificateAuthority, t) + } + if key == "absolute-server-1" { + foundClusterCount++ + matchStringArg(pathResolutionConfig1.Clusters["absolute-server-1"].CertificateAuthority, cluster.CertificateAuthority, t) + } + if key == "absolute-server-2" { + foundClusterCount++ + matchStringArg(pathResolutionConfig2.Clusters["absolute-server-2"].CertificateAuthority, cluster.CertificateAuthority, t) + } + } + if foundClusterCount != 4 { + t.Errorf("Expected 4 clusters, found %v: %v", foundClusterCount, mergedConfig.Clusters) + } + + foundAuthInfoCount := 0 + for key, authInfo := range mergedConfig.AuthInfos { + if key == "relative-user-1" { + foundAuthInfoCount++ + matchStringArg(path.Join(configDir1, pathResolutionConfig1.AuthInfos["relative-user-1"].ClientCertificate), authInfo.ClientCertificate, t) + matchStringArg(path.Join(configDir1, pathResolutionConfig1.AuthInfos["relative-user-1"].ClientKey), authInfo.ClientKey, t) + } + if key == "relative-user-2" { + foundAuthInfoCount++ + matchStringArg(path.Join(configDir2, pathResolutionConfig2.AuthInfos["relative-user-2"].ClientCertificate), authInfo.ClientCertificate, t) + matchStringArg(path.Join(configDir2, pathResolutionConfig2.AuthInfos["relative-user-2"].ClientKey), authInfo.ClientKey, t) + } + if key == "absolute-user-1" { + foundAuthInfoCount++ + matchStringArg(pathResolutionConfig1.AuthInfos["absolute-user-1"].ClientCertificate, authInfo.ClientCertificate, t) + matchStringArg(pathResolutionConfig1.AuthInfos["absolute-user-1"].ClientKey, authInfo.ClientKey, t) + } + if key == "absolute-user-2" { + foundAuthInfoCount++ + matchStringArg(pathResolutionConfig2.AuthInfos["absolute-user-2"].ClientCertificate, authInfo.ClientCertificate, t) + matchStringArg(pathResolutionConfig2.AuthInfos["absolute-user-2"].ClientKey, authInfo.ClientKey, t) + } + } + if foundAuthInfoCount != 4 { + t.Errorf("Expected 4 users, found %v: %v", foundAuthInfoCount, mergedConfig.AuthInfos) + } + +} + +func TestMigratingFile(t *testing.T) { + sourceFile, _ := ioutil.TempFile("", "") + defer os.Remove(sourceFile.Name()) + destinationFile, _ := ioutil.TempFile("", "") + // delete the file so that we'll write to it + os.Remove(destinationFile.Name()) + + WriteToFile(testConfigAlfa, sourceFile.Name()) + + loadingRules := ClientConfigLoadingRules{ + MigrationRules: map[string]string{destinationFile.Name(): sourceFile.Name()}, + } + + if _, err := loadingRules.Load(); err != nil { + t.Errorf("unexpected error %v", err) + } + + // the load should have recreated this file + defer os.Remove(destinationFile.Name()) + + sourceContent, err := ioutil.ReadFile(sourceFile.Name()) + if err != nil { + t.Errorf("unexpected error %v", err) + } + destinationContent, err := ioutil.ReadFile(destinationFile.Name()) + if err != nil { + t.Errorf("unexpected error %v", err) + } + + if !reflect.DeepEqual(sourceContent, destinationContent) { + t.Errorf("source and destination do not match") + } +} + +func TestMigratingFileLeaveExistingFileAlone(t *testing.T) { + sourceFile, _ := ioutil.TempFile("", "") + defer os.Remove(sourceFile.Name()) + destinationFile, _ := ioutil.TempFile("", "") + defer os.Remove(destinationFile.Name()) + + WriteToFile(testConfigAlfa, sourceFile.Name()) + + loadingRules := ClientConfigLoadingRules{ + MigrationRules: map[string]string{destinationFile.Name(): sourceFile.Name()}, + } + + if _, err := loadingRules.Load(); err != nil { + t.Errorf("unexpected error %v", err) + } + + destinationContent, err := ioutil.ReadFile(destinationFile.Name()) + if err != nil { + t.Errorf("unexpected error %v", err) + } + + if len(destinationContent) > 0 { + t.Errorf("destination should not have been touched") + } +} + +func TestMigratingFileSourceMissingSkip(t *testing.T) { + sourceFilename := "some-missing-file" + destinationFile, _ := ioutil.TempFile("", "") + // delete the file so that we'll write to it + os.Remove(destinationFile.Name()) + + loadingRules := ClientConfigLoadingRules{ + MigrationRules: map[string]string{destinationFile.Name(): sourceFilename}, + } + + if _, err := loadingRules.Load(); err != nil { + t.Errorf("unexpected error %v", err) + } + + if _, err := os.Stat(destinationFile.Name()); !os.IsNotExist(err) { + t.Errorf("destination should not exist") + } +} + +func Example_noMergingOnExplicitPaths() { + commandLineFile, _ := ioutil.TempFile("", "") + defer os.Remove(commandLineFile.Name()) + envVarFile, _ := ioutil.TempFile("", "") + defer os.Remove(envVarFile.Name()) + + WriteToFile(testConfigAlfa, commandLineFile.Name()) + WriteToFile(testConfigConflictAlfa, envVarFile.Name()) + + loadingRules := ClientConfigLoadingRules{ + ExplicitPath: commandLineFile.Name(), + Precedence: []string{envVarFile.Name()}, + } + + mergedConfig, err := loadingRules.Load() + + json, err := runtime.Encode(clientcmdlatest.Codec, mergedConfig) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + output, err := yaml.JSONToYAML(json) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + + fmt.Printf("%v", string(output)) + // Output: + // apiVersion: v1 + // clusters: + // - cluster: + // server: http://cow.org:8080 + // name: cow-cluster + // contexts: + // - context: + // cluster: cow-cluster + // namespace: hammer-ns + // user: red-user + // name: federal-context + // current-context: "" + // kind: Config + // preferences: {} + // users: + // - name: red-user + // user: + // token: red-token +} + +func Example_mergingSomeWithConflict() { + commandLineFile, _ := ioutil.TempFile("", "") + defer os.Remove(commandLineFile.Name()) + envVarFile, _ := ioutil.TempFile("", "") + defer os.Remove(envVarFile.Name()) + + WriteToFile(testConfigAlfa, commandLineFile.Name()) + WriteToFile(testConfigConflictAlfa, envVarFile.Name()) + + loadingRules := ClientConfigLoadingRules{ + Precedence: []string{commandLineFile.Name(), envVarFile.Name()}, + } + + mergedConfig, err := loadingRules.Load() + + json, err := runtime.Encode(clientcmdlatest.Codec, mergedConfig) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + output, err := yaml.JSONToYAML(json) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + + fmt.Printf("%v", string(output)) + // Output: + // apiVersion: v1 + // clusters: + // - cluster: + // server: http://cow.org:8080 + // name: cow-cluster + // - cluster: + // insecure-skip-tls-verify: true + // server: http://donkey.org:8080 + // name: donkey-cluster + // contexts: + // - context: + // cluster: cow-cluster + // namespace: hammer-ns + // user: red-user + // name: federal-context + // current-context: federal-context + // kind: Config + // preferences: {} + // users: + // - name: red-user + // user: + // token: red-token + // - name: yellow-user + // user: + // token: yellow-token +} + +func Example_mergingEverythingNoConflicts() { + commandLineFile, _ := ioutil.TempFile("", "") + defer os.Remove(commandLineFile.Name()) + envVarFile, _ := ioutil.TempFile("", "") + defer os.Remove(envVarFile.Name()) + currentDirFile, _ := ioutil.TempFile("", "") + defer os.Remove(currentDirFile.Name()) + homeDirFile, _ := ioutil.TempFile("", "") + defer os.Remove(homeDirFile.Name()) + + WriteToFile(testConfigAlfa, commandLineFile.Name()) + WriteToFile(testConfigBravo, envVarFile.Name()) + WriteToFile(testConfigCharlie, currentDirFile.Name()) + WriteToFile(testConfigDelta, homeDirFile.Name()) + + loadingRules := ClientConfigLoadingRules{ + Precedence: []string{commandLineFile.Name(), envVarFile.Name(), currentDirFile.Name(), homeDirFile.Name()}, + } + + mergedConfig, err := loadingRules.Load() + + json, err := runtime.Encode(clientcmdlatest.Codec, mergedConfig) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + output, err := yaml.JSONToYAML(json) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + + fmt.Printf("%v", string(output)) + // Output: + // apiVersion: v1 + // clusters: + // - cluster: + // server: http://chicken.org:8080 + // name: chicken-cluster + // - cluster: + // server: http://cow.org:8080 + // name: cow-cluster + // - cluster: + // server: http://horse.org:8080 + // name: horse-cluster + // - cluster: + // server: http://pig.org:8080 + // name: pig-cluster + // contexts: + // - context: + // cluster: cow-cluster + // namespace: hammer-ns + // user: red-user + // name: federal-context + // - context: + // cluster: chicken-cluster + // namespace: plane-ns + // user: blue-user + // name: gothic-context + // - context: + // cluster: pig-cluster + // namespace: saw-ns + // user: black-user + // name: queen-anne-context + // - context: + // cluster: horse-cluster + // namespace: chisel-ns + // user: green-user + // name: shaker-context + // current-context: "" + // kind: Config + // preferences: {} + // users: + // - name: black-user + // user: + // token: black-token + // - name: blue-user + // user: + // token: blue-token + // - name: green-user + // user: + // token: green-token + // - name: red-user + // user: + // token: red-token +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go index 321eae9e87c7..52c1493d056e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go @@ -27,13 +27,13 @@ import ( clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" ) -// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules +// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a client config loader. // It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that // the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before // the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid // passing extraneous information down a call stack type DeferredLoadingClientConfig struct { - loadingRules *ClientConfigLoadingRules + loader ClientConfigLoader overrides *ConfigOverrides fallbackReader io.Reader @@ -42,13 +42,13 @@ type DeferredLoadingClientConfig struct { } // NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name -func NewNonInteractiveDeferredLoadingClientConfig(loadingRules *ClientConfigLoadingRules, overrides *ConfigOverrides) ClientConfig { - return &DeferredLoadingClientConfig{loadingRules: loadingRules, overrides: overrides} +func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides} } // NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader -func NewInteractiveDeferredLoadingClientConfig(loadingRules *ClientConfigLoadingRules, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { - return &DeferredLoadingClientConfig{loadingRules: loadingRules, overrides: overrides, fallbackReader: fallbackReader} +func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, fallbackReader: fallbackReader} } func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) { @@ -57,16 +57,16 @@ func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, e defer config.loadingLock.Unlock() if config.clientConfig == nil { - mergedConfig, err := config.loadingRules.Load() + mergedConfig, err := config.loader.Load() if err != nil { return nil, err } var mergedClientConfig ClientConfig if config.fallbackReader != nil { - mergedClientConfig = NewInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.fallbackReader) + mergedClientConfig = NewInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.fallbackReader, config.loader) } else { - mergedClientConfig = NewNonInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides) + mergedClientConfig = NewNonInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.loader) } config.clientConfig = mergedClientConfig @@ -91,6 +91,7 @@ func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, e if err != nil { return nil, err } + mergedConfig, err := mergedClientConfig.ClientConfig() if err != nil { return nil, err @@ -102,7 +103,6 @@ func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, e glog.V(2).Info("No kubeconfig could be created, falling back to service account.") return icc.ClientConfig() } - return mergedConfig, nil } @@ -115,3 +115,8 @@ func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { return mergedKubeConfig.Namespace() } + +// ConfigAccess implements ClientConfig +func (config *DeferredLoadingClientConfig) ConfigAccess() ConfigAccess { + return config.loader +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go index 9996d2f44aff..f6dda97f1cbd 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go @@ -47,6 +47,7 @@ type AuthOverrideFlags struct { ClientCertificate FlagInfo ClientKey FlagInfo Token FlagInfo + Impersonate FlagInfo Username FlagInfo Password FlagInfo } @@ -115,6 +116,7 @@ const ( FlagCAFile = "certificate-authority" FlagEmbedCerts = "embed-certs" FlagBearerToken = "token" + FlagImpersonate = "as" FlagUsername = "username" FlagPassword = "password" ) @@ -125,6 +127,7 @@ func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags { ClientCertificate: FlagInfo{prefix + FlagCertFile, "", "", "Path to a client certificate file for TLS."}, ClientKey: FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS."}, Token: FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server."}, + Impersonate: FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation."}, Username: FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server."}, Password: FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server."}, } @@ -164,6 +167,7 @@ func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, fl flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate) flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey) flagNames.Token.BindStringFlag(flags, &authInfo.Token) + flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate) flagNames.Username.BindStringFlag(flags, &authInfo.Username) flagNames.Password.BindStringFlag(flags, &authInfo.Password) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go index bd1bd735e09e..1690f515e936 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go @@ -260,8 +260,10 @@ func validateContext(contextName string, context clientcmdapi.Context, config cl validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName)) } - if (len(context.Namespace) != 0) && !validation.IsDNS952Label(context.Namespace) { - validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS952 rules", context.Namespace, contextName)) + if len(context.Namespace) != 0 { + if len(validation.IsDNS1123Label(context.Namespace)) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName)) + } } return validationErrors diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation_test.go new file mode 100644 index 000000000000..ca4843a8775a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation_test.go @@ -0,0 +1,432 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "io/ioutil" + "os" + "strings" + "testing" + + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + utilerrors "k8s.io/kubernetes/pkg/util/errors" +) + +func TestConfirmUsableBadInfoButOkConfig(t *testing.T) { + config := clientcmdapi.NewConfig() + config.Clusters["missing ca"] = &clientcmdapi.Cluster{ + Server: "anything", + CertificateAuthority: "missing", + } + config.AuthInfos["error"] = &clientcmdapi.AuthInfo{ + Username: "anything", + Token: "here", + } + config.Contexts["dirty"] = &clientcmdapi.Context{ + Cluster: "missing ca", + AuthInfo: "error", + } + config.Clusters["clean"] = &clientcmdapi.Cluster{ + Server: "anything", + } + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ + Token: "here", + } + config.Contexts["clean"] = &clientcmdapi.Context{ + Cluster: "clean", + AuthInfo: "clean", + } + + badValidation := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"unable to read certificate-authority"}, + } + okTest := configValidationTest{ + config: config, + } + + okTest.testConfirmUsable("clean", t) + badValidation.testConfig(t) +} +func TestConfirmUsableBadInfoConfig(t *testing.T) { + config := clientcmdapi.NewConfig() + config.Clusters["missing ca"] = &clientcmdapi.Cluster{ + Server: "anything", + CertificateAuthority: "missing", + } + config.AuthInfos["error"] = &clientcmdapi.AuthInfo{ + Username: "anything", + Token: "here", + } + config.Contexts["first"] = &clientcmdapi.Context{ + Cluster: "missing ca", + AuthInfo: "error", + } + test := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"unable to read certificate-authority"}, + } + + test.testConfirmUsable("first", t) +} +func TestConfirmUsableEmptyConfig(t *testing.T) { + config := clientcmdapi.NewConfig() + test := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"invalid configuration: no configuration has been provided"}, + } + + test.testConfirmUsable("", t) +} +func TestConfirmUsableMissingConfig(t *testing.T) { + config := clientcmdapi.NewConfig() + test := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"invalid configuration: no configuration has been provided"}, + } + + test.testConfirmUsable("not-here", t) +} +func TestValidateEmptyConfig(t *testing.T) { + config := clientcmdapi.NewConfig() + test := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"invalid configuration: no configuration has been provided"}, + } + + test.testConfig(t) +} +func TestValidateMissingCurrentContextConfig(t *testing.T) { + config := clientcmdapi.NewConfig() + config.CurrentContext = "anything" + test := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"context was not found for specified "}, + } + + test.testConfig(t) +} +func TestIsContextNotFound(t *testing.T) { + config := clientcmdapi.NewConfig() + config.CurrentContext = "anything" + + err := Validate(*config) + if !IsContextNotFound(err) { + t.Errorf("Expected context not found, but got %v", err) + } + if !IsConfigurationInvalid(err) { + t.Errorf("Expected configuration invalid, but got %v", err) + } +} + +func TestIsEmptyConfig(t *testing.T) { + config := clientcmdapi.NewConfig() + + err := Validate(*config) + if !IsEmptyConfig(err) { + t.Errorf("Expected context not found, but got %v", err) + } + if !IsConfigurationInvalid(err) { + t.Errorf("Expected configuration invalid, but got %v", err) + } +} + +func TestIsConfigurationInvalid(t *testing.T) { + if newErrConfigurationInvalid([]error{}) != nil { + t.Errorf("unexpected error") + } + if newErrConfigurationInvalid([]error{ErrNoContext}) == ErrNoContext { + t.Errorf("unexpected error") + } + if newErrConfigurationInvalid([]error{ErrNoContext, ErrNoContext}) == nil { + t.Errorf("unexpected error") + } + if !IsConfigurationInvalid(newErrConfigurationInvalid([]error{ErrNoContext, ErrNoContext})) { + t.Errorf("unexpected error") + } +} + +func TestValidateMissingReferencesConfig(t *testing.T) { + config := clientcmdapi.NewConfig() + config.CurrentContext = "anything" + config.Contexts["anything"] = &clientcmdapi.Context{Cluster: "missing", AuthInfo: "missing"} + test := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"user \"missing\" was not found for context \"anything\"", "cluster \"missing\" was not found for context \"anything\""}, + } + + test.testContext("anything", t) + test.testConfig(t) +} +func TestValidateEmptyContext(t *testing.T) { + config := clientcmdapi.NewConfig() + config.CurrentContext = "anything" + config.Contexts["anything"] = &clientcmdapi.Context{} + test := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"user was not specified for context \"anything\"", "cluster was not specified for context \"anything\""}, + } + + test.testContext("anything", t) + test.testConfig(t) +} + +func TestValidateEmptyClusterInfo(t *testing.T) { + config := clientcmdapi.NewConfig() + config.Clusters["empty"] = &clientcmdapi.Cluster{} + test := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"cluster has no server defined"}, + } + + test.testCluster("empty", t) + test.testConfig(t) +} +func TestValidateMissingCAFileClusterInfo(t *testing.T) { + config := clientcmdapi.NewConfig() + config.Clusters["missing ca"] = &clientcmdapi.Cluster{ + Server: "anything", + CertificateAuthority: "missing", + } + test := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"unable to read certificate-authority"}, + } + + test.testCluster("missing ca", t) + test.testConfig(t) +} +func TestValidateCleanClusterInfo(t *testing.T) { + config := clientcmdapi.NewConfig() + config.Clusters["clean"] = &clientcmdapi.Cluster{ + Server: "anything", + } + test := configValidationTest{ + config: config, + } + + test.testCluster("clean", t) + test.testConfig(t) +} +func TestValidateCleanWithCAClusterInfo(t *testing.T) { + tempFile, _ := ioutil.TempFile("", "") + defer os.Remove(tempFile.Name()) + + config := clientcmdapi.NewConfig() + config.Clusters["clean"] = &clientcmdapi.Cluster{ + Server: "anything", + CertificateAuthority: tempFile.Name(), + } + test := configValidationTest{ + config: config, + } + + test.testCluster("clean", t) + test.testConfig(t) +} + +func TestValidateEmptyAuthInfo(t *testing.T) { + config := clientcmdapi.NewConfig() + config.AuthInfos["error"] = &clientcmdapi.AuthInfo{} + test := configValidationTest{ + config: config, + } + + test.testAuthInfo("error", t) + test.testConfig(t) +} +func TestValidateCertFilesNotFoundAuthInfo(t *testing.T) { + config := clientcmdapi.NewConfig() + config.AuthInfos["error"] = &clientcmdapi.AuthInfo{ + ClientCertificate: "missing", + ClientKey: "missing", + } + test := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"unable to read client-cert", "unable to read client-key"}, + } + + test.testAuthInfo("error", t) + test.testConfig(t) +} +func TestValidateCertDataOverridesFiles(t *testing.T) { + tempFile, _ := ioutil.TempFile("", "") + defer os.Remove(tempFile.Name()) + + config := clientcmdapi.NewConfig() + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ + ClientCertificate: tempFile.Name(), + ClientCertificateData: []byte("certdata"), + ClientKey: tempFile.Name(), + ClientKeyData: []byte("keydata"), + } + test := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"client-cert-data and client-cert are both specified", "client-key-data and client-key are both specified"}, + } + + test.testAuthInfo("clean", t) + test.testConfig(t) +} +func TestValidateCleanCertFilesAuthInfo(t *testing.T) { + tempFile, _ := ioutil.TempFile("", "") + defer os.Remove(tempFile.Name()) + + config := clientcmdapi.NewConfig() + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ + ClientCertificate: tempFile.Name(), + ClientKey: tempFile.Name(), + } + test := configValidationTest{ + config: config, + } + + test.testAuthInfo("clean", t) + test.testConfig(t) +} +func TestValidateCleanTokenAuthInfo(t *testing.T) { + config := clientcmdapi.NewConfig() + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ + Token: "any-value", + } + test := configValidationTest{ + config: config, + } + + test.testAuthInfo("clean", t) + test.testConfig(t) +} + +func TestValidateMultipleMethodsAuthInfo(t *testing.T) { + config := clientcmdapi.NewConfig() + config.AuthInfos["error"] = &clientcmdapi.AuthInfo{ + Token: "token", + Username: "username", + } + test := configValidationTest{ + config: config, + expectedErrorSubstring: []string{"more than one authentication method", "token", "basicAuth"}, + } + + test.testAuthInfo("error", t) + test.testConfig(t) +} + +type configValidationTest struct { + config *clientcmdapi.Config + expectedErrorSubstring []string +} + +func (c configValidationTest) testContext(contextName string, t *testing.T) { + errs := validateContext(contextName, *c.config.Contexts[contextName], *c.config) + + if len(c.expectedErrorSubstring) != 0 { + if len(errs) == 0 { + t.Errorf("Expected error containing: %v", c.expectedErrorSubstring) + } + for _, curr := range c.expectedErrorSubstring { + if len(errs) != 0 && !strings.Contains(utilerrors.NewAggregate(errs).Error(), curr) { + t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, utilerrors.NewAggregate(errs)) + } + } + + } else { + if len(errs) != 0 { + t.Errorf("Unexpected error: %v", utilerrors.NewAggregate(errs)) + } + } +} +func (c configValidationTest) testConfirmUsable(contextName string, t *testing.T) { + err := ConfirmUsable(*c.config, contextName) + + if len(c.expectedErrorSubstring) != 0 { + if err == nil { + t.Errorf("Expected error containing: %v", c.expectedErrorSubstring) + } else { + for _, curr := range c.expectedErrorSubstring { + if err != nil && !strings.Contains(err.Error(), curr) { + t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, err) + } + } + } + } else { + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + } +} +func (c configValidationTest) testConfig(t *testing.T) { + err := Validate(*c.config) + + if len(c.expectedErrorSubstring) != 0 { + if err == nil { + t.Errorf("Expected error containing: %v", c.expectedErrorSubstring) + } else { + for _, curr := range c.expectedErrorSubstring { + if err != nil && !strings.Contains(err.Error(), curr) { + t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, err) + } + } + if !IsConfigurationInvalid(err) { + t.Errorf("all errors should be configuration invalid: %v", err) + } + } + } else { + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + } +} +func (c configValidationTest) testCluster(clusterName string, t *testing.T) { + errs := validateClusterInfo(clusterName, *c.config.Clusters[clusterName]) + + if len(c.expectedErrorSubstring) != 0 { + if len(errs) == 0 { + t.Errorf("Expected error containing: %v", c.expectedErrorSubstring) + } + for _, curr := range c.expectedErrorSubstring { + if len(errs) != 0 && !strings.Contains(utilerrors.NewAggregate(errs).Error(), curr) { + t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, utilerrors.NewAggregate(errs)) + } + } + + } else { + if len(errs) != 0 { + t.Errorf("Unexpected error: %v", utilerrors.NewAggregate(errs)) + } + } +} + +func (c configValidationTest) testAuthInfo(authInfoName string, t *testing.T) { + errs := validateAuthInfo(authInfoName, *c.config.AuthInfos[authInfoName]) + + if len(c.expectedErrorSubstring) != 0 { + if len(errs) == 0 { + t.Errorf("Expected error containing: %v", c.expectedErrorSubstring) + } + for _, curr := range c.expectedErrorSubstring { + if len(errs) != 0 && !strings.Contains(utilerrors.NewAggregate(errs).Error(), curr) { + t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, utilerrors.NewAggregate(errs)) + } + } + + } else { + if len(errs) != 0 { + t.Errorf("Unexpected error: %v", utilerrors.NewAggregate(errs)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go new file mode 100644 index 000000000000..2a9d79846f35 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go @@ -0,0 +1,92 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/watch" +) + +// ClusterRoleBindings has methods to work with ClusterRoleBinding resources in a namespace +type ClusterRoleBindings interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + List(opts api.ListOptions) (*rbac.ClusterRoleBindingList, error) + Get(name string) (*rbac.ClusterRoleBinding, error) + Delete(name string, options *api.DeleteOptions) error + Create(clusterRoleBinding *rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) + Update(clusterRoleBinding *rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// clusterRoleBindings implements ClusterRoleBindingsNamespacer interface +type clusterRoleBindings struct { + client *RbacClient +} + +// newClusterRoleBindings returns a clusterRoleBindings +func newClusterRoleBindings(c *RbacClient) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c, + } +} + +// List takes label and field selectors, and returns the list of clusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts api.ListOptions) (result *rbac.ClusterRoleBindingList, err error) { + result = &rbac.ClusterRoleBindingList{} + err = c.client.Get().Resource("clusterrolebindings").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes the name of the clusterRoleBinding, and returns the corresponding ClusterRoleBinding object, and an error if it occurs +func (c *clusterRoleBindings) Get(name string) (result *rbac.ClusterRoleBinding, err error) { + result = &rbac.ClusterRoleBinding{} + err = c.client.Get().Resource("clusterrolebindings").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete().Resource("clusterrolebindings").Name(name).Body(options).Do().Error() +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if it occurs. +func (c *clusterRoleBindings) Create(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { + result = &rbac.ClusterRoleBinding{} + err = c.client.Post().Resource("clusterrolebindings").Body(clusterRoleBinding).Do().Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if it occurs. +func (c *clusterRoleBindings) Update(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { + result = &rbac.ClusterRoleBinding{} + err = c.client.Put().Resource("clusterrolebindings").Name(clusterRoleBinding.Name).Body(clusterRoleBinding).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("clusterrolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go new file mode 100644 index 000000000000..0d2d375d619a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go @@ -0,0 +1,92 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/watch" +) + +// ClusterRoles has methods to work with ClusterRole resources in a namespace +type ClusterRoles interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + List(opts api.ListOptions) (*rbac.ClusterRoleList, error) + Get(name string) (*rbac.ClusterRole, error) + Delete(name string, options *api.DeleteOptions) error + Create(clusterRole *rbac.ClusterRole) (*rbac.ClusterRole, error) + Update(clusterRole *rbac.ClusterRole) (*rbac.ClusterRole, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// clusterRoles implements ClusterRolesNamespacer interface +type clusterRoles struct { + client *RbacClient +} + +// newClusterRoles returns a clusterRoles +func newClusterRoles(c *RbacClient) *clusterRoles { + return &clusterRoles{ + client: c, + } +} + +// List takes label and field selectors, and returns the list of clusterRoles that match those selectors. +func (c *clusterRoles) List(opts api.ListOptions) (result *rbac.ClusterRoleList, err error) { + result = &rbac.ClusterRoleList{} + err = c.client.Get().Resource("clusterroles").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes the name of the clusterRole, and returns the corresponding ClusterRole object, and an error if it occurs +func (c *clusterRoles) Get(name string) (result *rbac.ClusterRole, err error) { + result = &rbac.ClusterRole{} + err = c.client.Get().Resource("clusterroles").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete().Resource("clusterroles").Name(name).Body(options).Do().Error() +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if it occurs. +func (c *clusterRoles) Create(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { + result = &rbac.ClusterRole{} + err = c.client.Post().Resource("clusterroles").Body(clusterRole).Do().Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if it occurs. +func (c *clusterRoles) Update(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { + result = &rbac.ClusterRole{} + err = c.client.Put().Resource("clusterroles").Name(clusterRole.Name).Body(clusterRole).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("clusterroles"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/conditions.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/conditions.go index 5087baa80676..a61674e2ba88 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/conditions.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/conditions.go @@ -17,12 +17,16 @@ limitations under the License. package unversioned import ( + "fmt" "time" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" ) // DefaultRetry is the recommended retry for a conflict where multiple clients @@ -126,7 +130,7 @@ func ReplicaSetHasDesiredReplicas(c ExtensionsInterface, replicaSet *extensions. // JobHasDesiredParallelism returns a condition that will be true if the desired parallelism count // for a job equals the current active counts or is less by an appropriate successful/unsuccessful count. -func JobHasDesiredParallelism(c ExtensionsInterface, job *extensions.Job) wait.ConditionFunc { +func JobHasDesiredParallelism(c BatchInterface, job *batch.Job) wait.ConditionFunc { return func() (bool, error) { job, err := c.Jobs(job.Namespace).Get(job.Name) @@ -168,3 +172,125 @@ func DeploymentHasDesiredReplicas(c ExtensionsInterface, deployment *extensions. deployment.Status.UpdatedReplicas == deployment.Spec.Replicas, nil } } + +// ErrPodCompleted is returned by PodRunning or PodContainerRunning to indicate that +// the pod has already reached completed state. +var ErrPodCompleted = fmt.Errorf("pod ran to completion") + +// PodRunning returns true if the pod is running, false if the pod has not yet reached running state, +// returns ErrPodCompleted if the pod has run to completion, or an error in any other case. +func PodRunning(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *api.Pod: + switch t.Status.Phase { + case api.PodRunning: + return true, nil + case api.PodFailed, api.PodSucceeded: + return false, ErrPodCompleted + } + } + return false, nil +} + +// PodCompleted returns true if the pod has run to completion, false if the pod has not yet +// reached running state, or an error in any other case. +func PodCompleted(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *api.Pod: + switch t.Status.Phase { + case api.PodFailed, api.PodSucceeded: + return true, nil + } + } + return false, nil +} + +// PodRunningAndReady returns true if the pod is running and ready, false if the pod has not +// yet reached those states, returns ErrPodCompleted if the pod has run to completion, or +// an error in any other case. +func PodRunningAndReady(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *api.Pod: + switch t.Status.Phase { + case api.PodFailed, api.PodSucceeded: + return false, ErrPodCompleted + case api.PodRunning: + return api.IsPodReady(t), nil + } + } + return false, nil +} + +// PodNotPending returns true if the pod has left the pending state, false if it has not, +// or an error in any other case (such as if the pod was deleted). +func PodNotPending(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *api.Pod: + switch t.Status.Phase { + case api.PodPending: + return false, nil + default: + return true, nil + } + } + return false, nil +} + +// PodContainerRunning returns false until the named container has ContainerStatus running (at least once), +// and will return an error if the pod is deleted, runs to completion, or the container pod is not available. +func PodContainerRunning(containerName string) watch.ConditionFunc { + return func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *api.Pod: + switch t.Status.Phase { + case api.PodRunning, api.PodPending: + case api.PodFailed, api.PodSucceeded: + return false, ErrPodCompleted + default: + return false, nil + } + for _, s := range t.Status.ContainerStatuses { + if s.Name != containerName { + continue + } + return s.State.Running != nil, nil + } + return false, nil + } + return false, nil + } +} + +// ServiceAccountHasSecrets returns true if the service account has at least one secret, +// false if it does not, or an error. +func ServiceAccountHasSecrets(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(unversioned.GroupResource{Resource: "serviceaccounts"}, "") + } + switch t := event.Object.(type) { + case *api.ServiceAccount: + return len(t.Secrets) > 0, nil + } + return false, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/conditions_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/conditions_test.go new file mode 100644 index 000000000000..1042461c0956 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/conditions_test.go @@ -0,0 +1,71 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "fmt" + "testing" + + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/util/wait" +) + +func TestRetryOnConflict(t *testing.T) { + opts := wait.Backoff{Factor: 1.0, Steps: 3} + conflictErr := errors.NewConflict(unversioned.GroupResource{Resource: "test"}, "other", nil) + + // never returns + err := RetryOnConflict(opts, func() error { + return conflictErr + }) + if err != conflictErr { + t.Errorf("unexpected error: %v", err) + } + + // returns immediately + i := 0 + err = RetryOnConflict(opts, func() error { + i++ + return nil + }) + if err != nil || i != 1 { + t.Errorf("unexpected error: %v", err) + } + + // returns immediately on error + testErr := fmt.Errorf("some other error") + err = RetryOnConflict(opts, func() error { + return testErr + }) + if err != testErr { + t.Errorf("unexpected error: %v", err) + } + + // keeps retrying + i = 0 + err = RetryOnConflict(opts, func() error { + if i < 2 { + i++ + return errors.NewConflict(unversioned.GroupResource{Resource: "test"}, "other", nil) + } + return nil + }) + if err != nil || i != 2 { + t.Errorf("unexpected error: %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/containerinfo_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/containerinfo_test.go new file mode 100644 index 000000000000..797ad5139855 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/containerinfo_test.go @@ -0,0 +1,198 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "testing" + "time" + + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapitest "github.com/google/cadvisor/info/v1/test" +) + +func testHTTPContainerInfoGetter( + req *cadvisorapi.ContainerInfoRequest, + cinfo *cadvisorapi.ContainerInfo, + podID string, + containerID string, + status int, + t *testing.T, +) { + expectedPath := "/stats" + if len(podID) > 0 && len(containerID) > 0 { + expectedPath = path.Join(expectedPath, podID, containerID) + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if status != 0 { + w.WriteHeader(status) + } + if strings.TrimRight(r.URL.Path, "/") != strings.TrimRight(expectedPath, "/") { + t.Fatalf("Received request to an invalid path. Should be %v. got %v", + expectedPath, r.URL.Path) + } + + var receivedReq cadvisorapi.ContainerInfoRequest + err := json.NewDecoder(r.Body).Decode(&receivedReq) + if err != nil { + t.Fatal(err) + } + // Note: This will not make a deep copy of req. + // So changing req after Get*Info would be a race. + expectedReq := req + // Fill any empty fields with default value + if !expectedReq.Equals(receivedReq) { + t.Errorf("received wrong request") + } + err = json.NewEncoder(w).Encode(cinfo) + if err != nil { + t.Fatal(err) + } + })) + defer ts.Close() + hostURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatal(err) + } + parts := strings.Split(hostURL.Host, ":") + + port, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatal(err) + } + + containerInfoGetter := &HTTPContainerInfoGetter{ + Client: http.DefaultClient, + Port: port, + } + + var receivedContainerInfo *cadvisorapi.ContainerInfo + if len(podID) > 0 && len(containerID) > 0 { + receivedContainerInfo, err = containerInfoGetter.GetContainerInfo(parts[0], podID, containerID, req) + } else { + receivedContainerInfo, err = containerInfoGetter.GetRootInfo(parts[0], req) + } + if status == 0 || status == http.StatusOK { + if err != nil { + t.Errorf("received unexpected error: %v", err) + } + + if !receivedContainerInfo.Eq(cinfo) { + t.Error("received unexpected container info") + } + } else { + if err == nil { + t.Error("did not receive expected error.") + } + } +} + +func TestHTTPContainerInfoGetterGetContainerInfoSuccessfully(t *testing.T) { + req := &cadvisorapi.ContainerInfoRequest{ + NumStats: 10, + } + cinfo := cadvisorapitest.GenerateRandomContainerInfo( + "dockerIDWhichWillNotBeChecked", // docker ID + 2, // Number of cores + req, + 1*time.Second, + ) + testHTTPContainerInfoGetter(req, cinfo, "somePodID", "containerNameInK8S", 0, t) +} + +func TestHTTPContainerInfoGetterGetRootInfoSuccessfully(t *testing.T) { + req := &cadvisorapi.ContainerInfoRequest{ + NumStats: 10, + } + cinfo := cadvisorapitest.GenerateRandomContainerInfo( + "dockerIDWhichWillNotBeChecked", // docker ID + 2, // Number of cores + req, + 1*time.Second, + ) + testHTTPContainerInfoGetter(req, cinfo, "", "", 0, t) +} + +func TestHTTPContainerInfoGetterGetContainerInfoWithError(t *testing.T) { + req := &cadvisorapi.ContainerInfoRequest{ + NumStats: 10, + } + cinfo := cadvisorapitest.GenerateRandomContainerInfo( + "dockerIDWhichWillNotBeChecked", // docker ID + 2, // Number of cores + req, + 1*time.Second, + ) + testHTTPContainerInfoGetter(req, cinfo, "somePodID", "containerNameInK8S", http.StatusNotFound, t) +} + +func TestHTTPContainerInfoGetterGetRootInfoWithError(t *testing.T) { + req := &cadvisorapi.ContainerInfoRequest{ + NumStats: 10, + } + cinfo := cadvisorapitest.GenerateRandomContainerInfo( + "dockerIDWhichWillNotBeChecked", // docker ID + 2, // Number of cores + req, + 1*time.Second, + ) + testHTTPContainerInfoGetter(req, cinfo, "", "", http.StatusNotFound, t) +} + +func TestHTTPGetMachineInfo(t *testing.T) { + mspec := &cadvisorapi.MachineInfo{ + NumCores: 4, + MemoryCapacity: 2048, + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + err := json.NewEncoder(w).Encode(mspec) + if err != nil { + t.Fatal(err) + } + })) + defer ts.Close() + hostURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatal(err) + } + parts := strings.Split(hostURL.Host, ":") + + port, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatal(err) + } + + containerInfoGetter := &HTTPContainerInfoGetter{ + Client: http.DefaultClient, + Port: port, + } + + received, err := containerInfoGetter.GetMachineInfo(parts[0]) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(received, mspec) { + t.Errorf("received wrong machine spec") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets_test.go new file mode 100644 index 000000000000..f453a9138772 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets_test.go @@ -0,0 +1,198 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func getDSResourceName() string { + return "daemonsets" +} + +func TestListDaemonSets(t *testing.T) { + ns := api.NamespaceAll + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Extensions.ResourcePath(getDSResourceName(), ns, ""), + }, + Response: simple.Response{StatusCode: 200, + Body: &extensions.DaemonSetList{ + Items: []extensions.DaemonSet{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.DaemonSetSpec{ + Template: api.PodTemplateSpec{}, + }, + }, + }, + }, + }, + } + receivedDSs, err := c.Setup(t).Extensions().DaemonSets(ns).List(api.ListOptions{}) + defer c.Close() + c.Validate(t, receivedDSs, err) + +} + +func TestGetDaemonSet(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "GET", Path: testapi.Extensions.ResourcePath(getDSResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.DaemonSetSpec{ + Template: api.PodTemplateSpec{}, + }, + }, + }, + } + receivedDaemonSet, err := c.Setup(t).Extensions().DaemonSets(ns).Get("foo") + defer c.Close() + c.Validate(t, receivedDaemonSet, err) +} + +func TestGetDaemonSetWithNoName(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{Error: true} + receivedPod, err := c.Setup(t).Extensions().DaemonSets(ns).Get("") + defer c.Close() + if (err != nil) && (err.Error() != simple.NameRequiredError) { + t.Errorf("Expected error: %v, but got %v", simple.NameRequiredError, err) + } + + c.Validate(t, receivedPod, err) +} + +func TestUpdateDaemonSet(t *testing.T) { + ns := api.NamespaceDefault + requestDaemonSet := &extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Extensions.ResourcePath(getDSResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.DaemonSetSpec{ + Template: api.PodTemplateSpec{}, + }, + }, + }, + } + receivedDaemonSet, err := c.Setup(t).Extensions().DaemonSets(ns).Update(requestDaemonSet) + defer c.Close() + c.Validate(t, receivedDaemonSet, err) +} + +func TestUpdateDaemonSetUpdateStatus(t *testing.T) { + ns := api.NamespaceDefault + requestDaemonSet := &extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Extensions.ResourcePath(getDSResourceName(), ns, "foo") + "/status", Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.DaemonSetSpec{ + Template: api.PodTemplateSpec{}, + }, + Status: extensions.DaemonSetStatus{}, + }, + }, + } + receivedDaemonSet, err := c.Setup(t).Extensions().DaemonSets(ns).UpdateStatus(requestDaemonSet) + defer c.Close() + c.Validate(t, receivedDaemonSet, err) +} + +func TestDeleteDaemon(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Extensions.ResourcePath(getDSResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).Extensions().DaemonSets(ns).Delete("foo") + defer c.Close() + c.Validate(t, nil, err) +} + +func TestCreateDaemonSet(t *testing.T) { + ns := api.NamespaceDefault + requestDaemonSet := &extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "POST", Path: testapi.Extensions.ResourcePath(getDSResourceName(), ns, ""), Body: requestDaemonSet, Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.DaemonSetSpec{ + Template: api.PodTemplateSpec{}, + }, + }, + }, + } + receivedDaemonSet, err := c.Setup(t).Extensions().DaemonSets(ns).Create(requestDaemonSet) + defer c.Close() + c.Validate(t, receivedDaemonSet, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/deployment.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/deployment.go index 1706418b32e0..cafd4cfd1d39 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/deployment.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/deployment.go @@ -45,6 +45,9 @@ type deployments struct { ns string } +// Ensure statically that deployments implements DeploymentInterface. +var _ DeploymentInterface = &deployments{} + // newDeployments returns a Deployments func newDeployments(c *ExtensionsClient, namespace string) *deployments { return &deployments{ diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/deployment_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/deployment_test.go new file mode 100644 index 000000000000..c530411a7516 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/deployment_test.go @@ -0,0 +1,236 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "net/http" + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" + "k8s.io/kubernetes/pkg/labels" +) + +func getDeploymentsResourceName() string { + return "deployments" +} + +func TestDeploymentCreate(t *testing.T) { + ns := api.NamespaceDefault + deployment := extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: ns, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Extensions.ResourcePath(getDeploymentsResourceName(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: &deployment, + }, + Response: simple.Response{StatusCode: 200, Body: &deployment}, + } + + response, err := c.Setup(t).Deployments(ns).Create(&deployment) + defer c.Close() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + c.Validate(t, response, err) +} + +func TestDeploymentGet(t *testing.T) { + ns := api.NamespaceDefault + deployment := &extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: ns, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Extensions.ResourcePath(getDeploymentsResourceName(), ns, "abc"), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: deployment}, + } + + response, err := c.Setup(t).Deployments(ns).Get("abc") + defer c.Close() + c.Validate(t, response, err) +} + +func TestDeploymentList(t *testing.T) { + ns := api.NamespaceDefault + deploymentList := &extensions.DeploymentList{ + Items: []extensions.Deployment{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + }, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Extensions.ResourcePath(getDeploymentsResourceName(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: deploymentList}, + } + response, err := c.Setup(t).Deployments(ns).List(api.ListOptions{}) + defer c.Close() + c.Validate(t, response, err) +} + +func TestDeploymentUpdate(t *testing.T) { + ns := api.NamespaceDefault + deployment := &extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: ns, + ResourceVersion: "1", + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "PUT", + Path: testapi.Extensions.ResourcePath(getDeploymentsResourceName(), ns, "abc"), + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{StatusCode: 200, Body: deployment}, + } + response, err := c.Setup(t).Deployments(ns).Update(deployment) + defer c.Close() + c.Validate(t, response, err) +} + +func TestDeploymentUpdateStatus(t *testing.T) { + ns := api.NamespaceDefault + deployment := &extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: ns, + ResourceVersion: "1", + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "PUT", + Path: testapi.Extensions.ResourcePath(getDeploymentsResourceName(), ns, "abc") + "/status", + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{StatusCode: 200, Body: deployment}, + } + response, err := c.Setup(t).Deployments(ns).UpdateStatus(deployment) + defer c.Close() + c.Validate(t, response, err) +} + +func TestDeploymentDelete(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{ + Method: "DELETE", + Path: testapi.Extensions.ResourcePath(getDeploymentsResourceName(), ns, "foo"), + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).Deployments(ns).Delete("foo", nil) + defer c.Close() + c.Validate(t, nil, err) +} + +func TestDeploymentWatch(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Extensions.ResourcePathWithPrefix("watch", getDeploymentsResourceName(), "", ""), + Query: url.Values{"resourceVersion": []string{}}, + }, + Response: simple.Response{StatusCode: 200}, + } + _, err := c.Setup(t).Deployments(api.NamespaceAll).Watch(api.ListOptions{}) + defer c.Close() + c.Validate(t, nil, err) +} + +func TestListDeploymentsLabels(t *testing.T) { + ns := api.NamespaceDefault + labelSelectorQueryParamName := unversioned.LabelSelectorQueryParam(testapi.Extensions.GroupVersion().String()) + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Extensions.ResourcePath("deployments", ns, ""), + Query: simple.BuildQueryValues(url.Values{labelSelectorQueryParamName: []string{"foo=bar,name=baz"}})}, + Response: simple.Response{ + StatusCode: http.StatusOK, + Body: &extensions.DeploymentList{ + Items: []extensions.Deployment{ + { + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + }, + }, + }, + }, + } + c.Setup(t) + defer c.Close() + c.QueryValidator[labelSelectorQueryParamName] = simple.ValidateLabels + selector := labels.Set{"foo": "bar", "name": "baz"}.AsSelector() + options := api.ListOptions{LabelSelector: selector} + receivedPodList, err := c.Deployments(ns).List(options) + c.Validate(t, receivedPodList, err) +} + +func TestDeploymentRollback(t *testing.T) { + ns := api.NamespaceDefault + deploymentRollback := &extensions.DeploymentRollback{ + Name: "abc", + UpdatedAnnotations: map[string]string{}, + RollbackTo: extensions.RollbackConfig{Revision: 1}, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Extensions.ResourcePath(getDeploymentsResourceName(), ns, "abc") + "/rollback", + Query: simple.BuildQueryValues(nil), + Body: deploymentRollback, + }, + Response: simple.Response{StatusCode: http.StatusOK}, + } + err := c.Setup(t).Deployments(ns).Rollback(deploymentRollback) + defer c.Close() + c.ValidateCommon(t, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/doc.go index f9c7f16e712a..252d80975816 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/doc.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/doc.go @@ -24,8 +24,6 @@ Most consumers should use the Config object to create a Client: import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" ) [...] @@ -39,7 +37,7 @@ Most consumers should use the Config object to create a Client: if err != nil { // handle error } - pods, err := client.Pods(api.NamespaceDefault).List(labels.Everything(), fields.Everything()) + pods, err := client.Pods(api.NamespaceDefault).List(api.ListOptions{}) if err != nil { // handle error } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/endpoints_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/endpoints_test.go new file mode 100644 index 000000000000..59bc869b87a6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/endpoints_test.go @@ -0,0 +1,71 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func TestListEndpoints(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "GET", Path: testapi.Default.ResourcePath("endpoints", ns, ""), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, + Body: &api.EndpointsList{ + Items: []api.Endpoints{ + { + ObjectMeta: api.ObjectMeta{Name: "endpoint-1"}, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "10.245.1.2"}, {IP: "10.245.1.3"}}, + Ports: []api.EndpointPort{{Port: 8080}}, + }}, + }, + }, + }, + }, + } + receivedEndpointsList, err := c.Setup(t).Endpoints(ns).List(api.ListOptions{}) + defer c.Close() + c.Validate(t, receivedEndpointsList, err) +} + +func TestGetEndpoints(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "GET", Path: testapi.Default.ResourcePath("endpoints", ns, "endpoint-1"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: &api.Endpoints{ObjectMeta: api.ObjectMeta{Name: "endpoint-1"}}}, + } + response, err := c.Setup(t).Endpoints(ns).Get("endpoint-1") + defer c.Close() + c.Validate(t, response, err) +} + +func TestGetEndpointWithNoName(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{Error: true} + receivedPod, err := c.Setup(t).Endpoints(ns).Get("") + defer c.Close() + if (err != nil) && (err.Error() != simple.NameRequiredError) { + t.Errorf("Expected error: %v, but got %v", simple.NameRequiredError, err) + } + + c.Validate(t, receivedPod, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/events_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/events_test.go new file mode 100644 index 000000000000..371c7544e885 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/events_test.go @@ -0,0 +1,205 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + . "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +import ( + "net/url" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +func TestEventSearch(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath("events", "baz", ""), + Query: url.Values{ + unversioned.FieldSelectorQueryParam(testapi.Default.GroupVersion().String()): []string{ + GetInvolvedObjectNameFieldLabel(testapi.Default.GroupVersion().String()) + "=foo,", + "involvedObject.namespace=baz,", + "involvedObject.kind=Pod", + }, + unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String()): []string{}, + }, + }, + Response: simple.Response{StatusCode: 200, Body: &api.EventList{}}, + } + eventList, err := c.Setup(t).Events("baz").Search( + &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "baz", + SelfLink: testapi.Default.SelfLink("pods", ""), + }, + }, + ) + defer c.Close() + c.Validate(t, eventList, err) +} + +func TestEventCreate(t *testing.T) { + objReference := &api.ObjectReference{ + Kind: "foo", + Namespace: "nm", + Name: "objref1", + UID: "uid", + APIVersion: "apiv1", + ResourceVersion: "1", + } + timeStamp := unversioned.Now() + event := &api.Event{ + ObjectMeta: api.ObjectMeta{ + Namespace: api.NamespaceDefault, + }, + InvolvedObject: *objReference, + FirstTimestamp: timeStamp, + LastTimestamp: timeStamp, + Count: 1, + Type: api.EventTypeNormal, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Default.ResourcePath("events", api.NamespaceDefault, ""), + Body: event, + }, + Response: simple.Response{StatusCode: 200, Body: event}, + } + + response, err := c.Setup(t).Events(api.NamespaceDefault).Create(event) + defer c.Close() + + if err != nil { + t.Fatalf("%v should be nil.", err) + } + + if e, a := *objReference, response.InvolvedObject; !reflect.DeepEqual(e, a) { + t.Errorf("%#v != %#v.", e, a) + } +} + +func TestEventGet(t *testing.T) { + objReference := &api.ObjectReference{ + Kind: "foo", + Namespace: "nm", + Name: "objref1", + UID: "uid", + APIVersion: "apiv1", + ResourceVersion: "1", + } + timeStamp := unversioned.Now() + event := &api.Event{ + ObjectMeta: api.ObjectMeta{ + Namespace: "other", + }, + InvolvedObject: *objReference, + FirstTimestamp: timeStamp, + LastTimestamp: timeStamp, + Count: 1, + Type: api.EventTypeNormal, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath("events", "other", "1"), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: event}, + } + + response, err := c.Setup(t).Events("other").Get("1") + defer c.Close() + + if err != nil { + t.Fatalf("%v should be nil.", err) + } + + if e, r := event.InvolvedObject, response.InvolvedObject; !reflect.DeepEqual(e, r) { + t.Errorf("%#v != %#v.", e, r) + } +} + +func TestEventList(t *testing.T) { + ns := api.NamespaceDefault + objReference := &api.ObjectReference{ + Kind: "foo", + Namespace: ns, + Name: "objref1", + UID: "uid", + APIVersion: "apiv1", + ResourceVersion: "1", + } + timeStamp := unversioned.Now() + eventList := &api.EventList{ + Items: []api.Event{ + { + InvolvedObject: *objReference, + FirstTimestamp: timeStamp, + LastTimestamp: timeStamp, + Count: 1, + Type: api.EventTypeNormal, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath("events", ns, ""), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: eventList}, + } + response, err := c.Setup(t).Events(ns).List(api.ListOptions{}) + defer c.Close() + + if err != nil { + t.Errorf("%#v should be nil.", err) + } + + if len(response.Items) != 1 { + t.Errorf("%#v response.Items should have len 1.", response.Items) + } + + responseEvent := response.Items[0] + if e, r := eventList.Items[0].InvolvedObject, + responseEvent.InvolvedObject; !reflect.DeepEqual(e, r) { + t.Errorf("%#v != %#v.", e, r) + } +} + +func TestEventDelete(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{ + Method: "DELETE", + Path: testapi.Default.ResourcePath("events", ns, "foo"), + }, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).Events(ns).Delete("foo") + defer c.Close() + c.Validate(t, nil, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/extensions.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/extensions.go index 5db86dbb8384..3c9114d9a88c 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/extensions.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/extensions.go @@ -28,12 +28,12 @@ import ( // Features of Extensions group are not supported and may be changed or removed in // incompatible ways at any time. type ExtensionsInterface interface { - HorizontalPodAutoscalersNamespacer ScaleNamespacer DaemonSetsNamespacer DeploymentsNamespacer JobsNamespacer IngressNamespacer + NetworkPolicyNamespacer ThirdPartyResourceNamespacer ReplicaSetsNamespacer PodSecurityPoliciesInterface @@ -50,10 +50,6 @@ func (c *ExtensionsClient) PodSecurityPolicies() PodSecurityPolicyInterface { return newPodSecurityPolicy(c) } -func (c *ExtensionsClient) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { - return newHorizontalPodAutoscalers(c, namespace) -} - func (c *ExtensionsClient) Scales(namespace string) ScaleInterface { return newScales(c, namespace) } @@ -74,8 +70,12 @@ func (c *ExtensionsClient) Ingress(namespace string) IngressInterface { return newIngress(c, namespace) } -func (c *ExtensionsClient) ThirdPartyResources(namespace string) ThirdPartyResourceInterface { - return newThirdPartyResources(c, namespace) +func (c *ExtensionsClient) NetworkPolicies(namespace string) NetworkPolicyInterface { + return newNetworkPolicies(c, namespace) +} + +func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface { + return newThirdPartyResources(c) } func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface { @@ -127,6 +127,7 @@ func setExtensionsDefaults(config *restclient.Config) error { //} config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs if config.QPS == 0 { config.QPS = 5 } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/fake/fake.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/fake/fake.go index 09f1f027452b..7fd452d264d0 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/fake/fake.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/fake/fake.go @@ -70,7 +70,18 @@ func (c *RESTClient) Delete() *restclient.Request { } func (c *RESTClient) request(verb string) *restclient.Request { - return restclient.NewRequest(c, verb, &url.URL{Host: "localhost"}, "", restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion(), Codec: c.Codec}, nil, nil) + config := restclient.ContentConfig{ + ContentType: runtime.ContentTypeJSON, + GroupVersion: testapi.Default.GroupVersion(), + Codec: c.Codec, + } + serializers := restclient.Serializers{ + Encoder: c.Codec, + Decoder: c.Codec, + StreamingSerializer: c.Codec, + Framer: runtime.DefaultFramer, + } + return restclient.NewRequest(c, verb, &url.URL{Host: "localhost"}, "", config, serializers, nil, nil) } func (c *RESTClient) Do(req *http.Request) (*http.Response, error) { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/flags_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/flags_test.go new file mode 100644 index 000000000000..ab0f94d0412e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/flags_test.go @@ -0,0 +1,79 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "testing" + "time" + + "k8s.io/kubernetes/pkg/util/sets" +) + +type fakeFlagSet struct { + t *testing.T + set sets.String +} + +func (f *fakeFlagSet) StringVar(p *string, name, value, usage string) { + if p == nil { + f.t.Errorf("unexpected nil pointer") + } + if usage == "" { + f.t.Errorf("unexpected empty usage") + } + f.set.Insert(name) +} + +func (f *fakeFlagSet) BoolVar(p *bool, name string, value bool, usage string) { + if p == nil { + f.t.Errorf("unexpected nil pointer") + } + if usage == "" { + f.t.Errorf("unexpected empty usage") + } + f.set.Insert(name) +} + +func (f *fakeFlagSet) UintVar(p *uint, name string, value uint, usage string) { + if p == nil { + f.t.Errorf("unexpected nil pointer") + } + if usage == "" { + f.t.Errorf("unexpected empty usage") + } + f.set.Insert(name) +} + +func (f *fakeFlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + if p == nil { + f.t.Errorf("unexpected nil pointer") + } + if usage == "" { + f.t.Errorf("unexpected empty usage") + } + f.set.Insert(name) +} + +func (f *fakeFlagSet) IntVar(p *int, name string, value int, usage string) { + if p == nil { + f.t.Errorf("unexpected nil pointer") + } + if usage == "" { + f.t.Errorf("unexpected empty usage") + } + f.set.Insert(name) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/helper.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/helper.go index 3ee6d6ec69b4..020bb01c26d8 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/helper.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/helper.go @@ -18,18 +18,22 @@ package unversioned import ( "fmt" - "reflect" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/typed/discovery" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/version" + // Import solely to initialize client auth plugins. + _ "k8s.io/kubernetes/plugin/pkg/client/auth" ) const ( @@ -83,8 +87,34 @@ func New(c *restclient.Config) (*Client, error) { return nil, err } } + var policyClient *PolicyClient + if registered.IsRegistered(policy.GroupName) { + policyConfig := *c + policyClient, err = NewPolicy(&policyConfig) + if err != nil { + return nil, err + } + } - return &Client{RESTClient: client, AutoscalingClient: autoscalingClient, BatchClient: batchClient, ExtensionsClient: extensionsClient, DiscoveryClient: discoveryClient}, nil + var appsClient *AppsClient + if registered.IsRegistered(apps.GroupName) { + appsConfig := *c + appsClient, err = NewApps(&appsConfig) + if err != nil { + return nil, err + } + } + + var rbacClient *RbacClient + if registered.IsRegistered(rbac.GroupName) { + rbacConfig := *c + rbacClient, err = NewRbac(&rbacConfig) + if err != nil { + return nil, err + } + } + + return &Client{RESTClient: client, AutoscalingClient: autoscalingClient, BatchClient: batchClient, ExtensionsClient: extensionsClient, DiscoveryClient: discoveryClient, AppsClient: appsClient, PolicyClient: policyClient, RbacClient: rbacClient}, nil } // MatchesServerVersion queries the server to compares the build version @@ -98,13 +128,14 @@ func MatchesServerVersion(client *Client, c *restclient.Config) error { return err } } - clientVersion := version.Get() - serverVersion, err := client.Discovery().ServerVersion() + cVer := version.Get() + sVer, err := client.Discovery().ServerVersion() if err != nil { return fmt.Errorf("couldn't read version from server: %v\n", err) } - if s := *serverVersion; !reflect.DeepEqual(clientVersion, s) { - return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", s, clientVersion) + // GitVersion includes GitCommit and GitTreeState, but best to be safe? + if cVer.GitVersion != sVer.GitVersion || cVer.GitCommit != sVer.GitCommit || cVer.GitTreeState != cVer.GitTreeState { + return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, cVer) } return nil @@ -220,6 +251,9 @@ func SetKubernetesDefaults(config *restclient.Config) error { // TODO: Unconditionally set the config.Version, until we fix the config. copyGroupVersion := g.GroupVersion config.GroupVersion = ©GroupVersion + if config.NegotiatedSerializer == nil { + config.NegotiatedSerializer = api.Codecs + } if config.Codec == nil { config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/helper_blackbox_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/helper_blackbox_test.go new file mode 100644 index 000000000000..ce517530c184 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/helper_blackbox_test.go @@ -0,0 +1,129 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api/testapi" + uapi "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + "k8s.io/kubernetes/pkg/runtime" +) + +func objBody(object interface{}) io.ReadCloser { + output, err := json.MarshalIndent(object, "", "") + if err != nil { + panic(err) + } + return ioutil.NopCloser(bytes.NewReader([]byte(output))) +} + +func TestNegotiateVersion(t *testing.T) { + tests := []struct { + name string + version *uapi.GroupVersion + expectedVersion *uapi.GroupVersion + serverVersions []string + clientVersions []uapi.GroupVersion + config *restclient.Config + expectErr func(err error) bool + sendErr error + }{ + { + name: "server supports client default", + version: &uapi.GroupVersion{Version: "version1"}, + config: &restclient.Config{}, + serverVersions: []string{"version1", testapi.Default.GroupVersion().String()}, + clientVersions: []uapi.GroupVersion{{Version: "version1"}, *testapi.Default.GroupVersion()}, + expectedVersion: &uapi.GroupVersion{Version: "version1"}, + }, + { + name: "server falls back to client supported", + version: testapi.Default.GroupVersion(), + config: &restclient.Config{}, + serverVersions: []string{"version1"}, + clientVersions: []uapi.GroupVersion{{Version: "version1"}, *testapi.Default.GroupVersion()}, + expectedVersion: &uapi.GroupVersion{Version: "version1"}, + }, + { + name: "explicit version supported", + config: &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}, + serverVersions: []string{"/version1", testapi.Default.GroupVersion().String()}, + clientVersions: []uapi.GroupVersion{{Version: "version1"}, *testapi.Default.GroupVersion()}, + expectedVersion: testapi.Default.GroupVersion(), + }, + { + name: "explicit version not supported", + config: &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}, + serverVersions: []string{"version1"}, + clientVersions: []uapi.GroupVersion{{Version: "version1"}, *testapi.Default.GroupVersion()}, + expectErr: func(err error) bool { return strings.Contains(err.Error(), `server does not support API version "v1"`) }, + }, + { + name: "connection refused error", + config: &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}, + serverVersions: []string{"version1"}, + clientVersions: []uapi.GroupVersion{{Version: "version1"}, *testapi.Default.GroupVersion()}, + sendErr: errors.New("connection refused"), + expectErr: func(err error) bool { return strings.Contains(err.Error(), "connection refused") }, + }, + } + codec := testapi.Default.Codec() + + for _, test := range tests { + fakeClient := &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{ + StatusCode: 200, + Body: objBody(&uapi.APIVersions{Versions: test.serverVersions}), + }, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + if test.sendErr != nil { + return nil, test.sendErr + } + header := http.Header{} + header.Set("Content-Type", runtime.ContentTypeJSON) + return &http.Response{StatusCode: 200, Header: header, Body: objBody(&uapi.APIVersions{Versions: test.serverVersions})}, nil + }), + } + c := unversioned.NewOrDie(test.config) + c.DiscoveryClient.Client = fakeClient.Client + response, err := unversioned.NegotiateVersion(c, test.config, test.version, test.clientVersions) + if err == nil && test.expectErr != nil { + t.Errorf("expected error, got nil for [%s].", test.name) + } + if err != nil { + if test.expectErr == nil || !test.expectErr(err) { + t.Errorf("unexpected error for [%s]: %v.", test.name, err) + } + continue + } + if *response != *test.expectedVersion { + t.Errorf("%s: expected version %s, got %s.", test.name, test.expectedVersion, response) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/helper_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/helper_test.go new file mode 100644 index 000000000000..0e186e4c78ef --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/helper_test.go @@ -0,0 +1,179 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestSetKubernetesDefaults(t *testing.T) { + testCases := []struct { + Config restclient.Config + After restclient.Config + Err bool + }{ + { + restclient.Config{}, + restclient.Config{ + APIPath: "/api", + ContentConfig: restclient.ContentConfig{ + GroupVersion: testapi.Default.GroupVersion(), + Codec: testapi.Default.Codec(), + NegotiatedSerializer: testapi.Default.NegotiatedSerializer(), + }, + QPS: 5, + Burst: 10, + }, + false, + }, + // Add this test back when we fixed config and SetKubernetesDefaults + // { + // restclient.Config{ + // GroupVersion: &unversioned.GroupVersion{Group: "not.a.group", Version: "not_an_api"}, + // }, + // restclient.Config{}, + // true, + // }, + } + for _, testCase := range testCases { + val := &testCase.Config + err := SetKubernetesDefaults(val) + val.UserAgent = "" + switch { + case err == nil && testCase.Err: + t.Errorf("expected error but was nil") + continue + case err != nil && !testCase.Err: + t.Errorf("unexpected error %v", err) + continue + case err != nil: + continue + } + if !reflect.DeepEqual(*val, testCase.After) { + t.Errorf("unexpected result object: %#v", val) + } + } +} + +func TestHelperGetServerAPIVersions(t *testing.T) { + expect := []string{"v1", "v2", "v3"} + APIVersions := unversioned.APIVersions{Versions: expect} + expect = append(expect, "group1/v1", "group1/v2", "group2/v1", "group2/v2") + APIGroupList := unversioned.APIGroupList{ + Groups: []unversioned.APIGroup{ + { + Versions: []unversioned.GroupVersionForDiscovery{ + { + GroupVersion: "group1/v1", + }, + { + GroupVersion: "group1/v2", + }, + }, + }, + { + Versions: []unversioned.GroupVersionForDiscovery{ + { + GroupVersion: "group2/v1", + }, + { + GroupVersion: "group2/v2", + }, + }, + }, + }, + } + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + var output []byte + var err error + switch req.URL.Path { + case "/api": + output, err = json.Marshal(APIVersions) + + case "/apis": + output, err = json.Marshal(APIGroupList) + } + if err != nil { + t.Errorf("unexpected encoding error: %v", err) + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write(output) + })) + defer server.Close() + got, err := restclient.ServerAPIVersions(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &unversioned.GroupVersion{Group: "invalid version", Version: "one"}, NegotiatedSerializer: testapi.Default.NegotiatedSerializer()}}) + if err != nil { + t.Fatalf("unexpected encoding error: %v", err) + } + if e, a := expect, got; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } +} + +func TestSetsCodec(t *testing.T) { + testCases := map[string]struct { + Err bool + Prefix string + Codec runtime.Codec + }{ + testapi.Default.GroupVersion().Version: {false, "/api/" + testapi.Default.GroupVersion().Version, testapi.Default.Codec()}, + // Add this test back when we fixed config and SetKubernetesDefaults + // "invalidVersion": {true, "", nil}, + } + for version, expected := range testCases { + conf := &restclient.Config{ + Host: "127.0.0.1", + ContentConfig: restclient.ContentConfig{ + GroupVersion: &unversioned.GroupVersion{Version: version}, + }, + } + + var versionedPath string + err := SetKubernetesDefaults(conf) + if err == nil { + _, versionedPath, err = restclient.DefaultServerURL(conf.Host, conf.APIPath, *conf.GroupVersion, false) + } + + switch { + case err == nil && expected.Err: + t.Errorf("expected error but was nil") + continue + case err != nil && !expected.Err: + t.Errorf("unexpected error %v", err) + continue + case err != nil: + continue + } + if e, a := expected.Prefix, versionedPath; e != a { + t.Errorf("expected %#v, got %#v", e, a) + } + if e, a := expected.Codec, conf.Codec; !reflect.DeepEqual(e, a) { + t.Errorf("expected %#v, got %#v", e, a) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go index a4efc232a41a..8cdba3a265cf 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go @@ -18,7 +18,7 @@ package unversioned import ( "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/watch" ) @@ -29,23 +29,23 @@ type HorizontalPodAutoscalersNamespacer interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - List(opts api.ListOptions) (*extensions.HorizontalPodAutoscalerList, error) - Get(name string) (*extensions.HorizontalPodAutoscaler, error) + List(opts api.ListOptions) (*autoscaling.HorizontalPodAutoscalerList, error) + Get(name string) (*autoscaling.HorizontalPodAutoscaler, error) Delete(name string, options *api.DeleteOptions) error - Create(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (*extensions.HorizontalPodAutoscaler, error) - Update(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (*extensions.HorizontalPodAutoscaler, error) - UpdateStatus(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (*extensions.HorizontalPodAutoscaler, error) + Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) + Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) + UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) Watch(opts api.ListOptions) (watch.Interface, error) } -// horizontalPodAutoscalers implements HorizontalPodAutoscalersNamespacer interface +// horizontalPodAutoscalers implements HorizontalPodAutoscalersNamespacer interface using AutoscalingClient internally type horizontalPodAutoscalers struct { - client *ExtensionsClient + client *AutoscalingClient ns string } // newHorizontalPodAutoscalers returns a horizontalPodAutoscalers -func newHorizontalPodAutoscalers(c *ExtensionsClient, namespace string) *horizontalPodAutoscalers { +func newHorizontalPodAutoscalers(c *AutoscalingClient, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ client: c, ns: namespace, @@ -53,15 +53,15 @@ func newHorizontalPodAutoscalers(c *ExtensionsClient, namespace string) *horizon } // List takes label and field selectors, and returns the list of horizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts api.ListOptions) (result *extensions.HorizontalPodAutoscalerList, err error) { - result = &extensions.HorizontalPodAutoscalerList{} +func (c *horizontalPodAutoscalers) List(opts api.ListOptions) (result *autoscaling.HorizontalPodAutoscalerList, err error) { + result = &autoscaling.HorizontalPodAutoscalerList{} err = c.client.Get().Namespace(c.ns).Resource("horizontalPodAutoscalers").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) return } // Get takes the name of the horizontalPodAutoscaler, and returns the corresponding HorizontalPodAutoscaler object, and an error if it occurs -func (c *horizontalPodAutoscalers) Get(name string) (result *extensions.HorizontalPodAutoscaler, err error) { - result = &extensions.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) Get(name string) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} err = c.client.Get().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(name).Do().Into(result) return } @@ -72,22 +72,22 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *api.DeleteOption } // Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (result *extensions.HorizontalPodAutoscaler, err error) { - result = &extensions.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} err = c.client.Post().Namespace(c.ns).Resource("horizontalPodAutoscalers").Body(horizontalPodAutoscaler).Do().Into(result) return } // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (result *extensions.HorizontalPodAutoscaler, err error) { - result = &extensions.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} err = c.client.Put().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(horizontalPodAutoscaler.Name).Body(horizontalPodAutoscaler).Do().Into(result) return } // UpdateStatus takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (result *extensions.HorizontalPodAutoscaler, err error) { - result = &extensions.HorizontalPodAutoscaler{} +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} err = c.client.Put().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(horizontalPodAutoscaler.Name).SubResource("status").Body(horizontalPodAutoscaler).Do().Into(result) return } @@ -101,68 +101,3 @@ func (c *horizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, VersionedParams(&opts, api.ParameterCodec). Watch() } - -// horizontalPodAutoscalersV1 implements HorizontalPodAutoscalersNamespacer interface using AutoscalingClient internally -// TODO(piosz): get back to one client implementation once HPA will be graduated to GA completely -type horizontalPodAutoscalersV1 struct { - client *AutoscalingClient - ns string -} - -// newHorizontalPodAutoscalers returns a horizontalPodAutoscalers -func newHorizontalPodAutoscalersV1(c *AutoscalingClient, namespace string) *horizontalPodAutoscalersV1 { - return &horizontalPodAutoscalersV1{ - client: c, - ns: namespace, - } -} - -// List takes label and field selectors, and returns the list of horizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalersV1) List(opts api.ListOptions) (result *extensions.HorizontalPodAutoscalerList, err error) { - result = &extensions.HorizontalPodAutoscalerList{} - err = c.client.Get().Namespace(c.ns).Resource("horizontalPodAutoscalers").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get takes the name of the horizontalPodAutoscaler, and returns the corresponding HorizontalPodAutoscaler object, and an error if it occurs -func (c *horizontalPodAutoscalersV1) Get(name string) (result *extensions.HorizontalPodAutoscaler, err error) { - result = &extensions.HorizontalPodAutoscaler{} - err = c.client.Get().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(name).Do().Into(result) - return -} - -// Delete takes the name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalersV1) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(name).Body(options).Do().Error() -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. -func (c *horizontalPodAutoscalersV1) Create(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (result *extensions.HorizontalPodAutoscaler, err error) { - result = &extensions.HorizontalPodAutoscaler{} - err = c.client.Post().Namespace(c.ns).Resource("horizontalPodAutoscalers").Body(horizontalPodAutoscaler).Do().Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. -func (c *horizontalPodAutoscalersV1) Update(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (result *extensions.HorizontalPodAutoscaler, err error) { - result = &extensions.HorizontalPodAutoscaler{} - err = c.client.Put().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(horizontalPodAutoscaler.Name).Body(horizontalPodAutoscaler).Do().Into(result) - return -} - -// UpdateStatus takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. -func (c *horizontalPodAutoscalersV1) UpdateStatus(horizontalPodAutoscaler *extensions.HorizontalPodAutoscaler) (result *extensions.HorizontalPodAutoscaler, err error) { - result = &extensions.HorizontalPodAutoscaler{} - err = c.client.Put().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(horizontalPodAutoscaler.Name).SubResource("status").Body(horizontalPodAutoscaler).Do().Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalersV1) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("horizontalPodAutoscalers"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler_test.go new file mode 100644 index 000000000000..b893a29ad8a7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler_test.go @@ -0,0 +1,173 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getHorizontalPodAutoscalersResoureName() string { + return "horizontalpodautoscalers" +} + +func TestHorizontalPodAutoscalerCreate(t *testing.T) { + ns := api.NamespaceDefault + horizontalPodAutoscaler := autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: ns, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Autoscaling.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: &horizontalPodAutoscaler, + }, + Response: simple.Response{StatusCode: 200, Body: &horizontalPodAutoscaler}, + ResourceGroup: autoscaling.GroupName, + } + + response, err := c.Setup(t).Autoscaling().HorizontalPodAutoscalers(ns).Create(&horizontalPodAutoscaler) + defer c.Close() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + c.Validate(t, response, err) +} + +func TestHorizontalPodAutoscalerGet(t *testing.T) { + ns := api.NamespaceDefault + horizontalPodAutoscaler := &autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: ns, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Autoscaling.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, "abc"), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: horizontalPodAutoscaler}, + ResourceGroup: autoscaling.GroupName, + } + + response, err := c.Setup(t).Autoscaling().HorizontalPodAutoscalers(ns).Get("abc") + defer c.Close() + c.Validate(t, response, err) +} + +func TestHorizontalPodAutoscalerList(t *testing.T) { + ns := api.NamespaceDefault + horizontalPodAutoscalerList := &autoscaling.HorizontalPodAutoscalerList{ + Items: []autoscaling.HorizontalPodAutoscaler{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + }, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Autoscaling.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: horizontalPodAutoscalerList}, + ResourceGroup: autoscaling.GroupName, + } + response, err := c.Setup(t).Autoscaling().HorizontalPodAutoscalers(ns).List(api.ListOptions{}) + defer c.Close() + c.Validate(t, response, err) +} + +func TestHorizontalPodAutoscalerUpdate(t *testing.T) { + ns := api.NamespaceDefault + horizontalPodAutoscaler := &autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: ns, + ResourceVersion: "1", + }, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Autoscaling.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, "abc"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: horizontalPodAutoscaler}, + ResourceGroup: autoscaling.GroupName, + } + response, err := c.Setup(t).Autoscaling().HorizontalPodAutoscalers(ns).Update(horizontalPodAutoscaler) + defer c.Close() + c.Validate(t, response, err) +} + +func TestHorizontalPodAutoscalerUpdateStatus(t *testing.T) { + ns := api.NamespaceDefault + horizontalPodAutoscaler := &autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: ns, + ResourceVersion: "1", + }, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Autoscaling.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, "abc") + "/status", Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: horizontalPodAutoscaler}, + ResourceGroup: autoscaling.GroupName, + } + response, err := c.Setup(t).Autoscaling().HorizontalPodAutoscalers(ns).UpdateStatus(horizontalPodAutoscaler) + defer c.Close() + c.Validate(t, response, err) +} + +func TestHorizontalPodAutoscalerDelete(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Autoscaling.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + ResourceGroup: autoscaling.GroupName, + } + err := c.Setup(t).Autoscaling().HorizontalPodAutoscalers(ns).Delete("foo", nil) + defer c.Close() + c.Validate(t, nil, err) +} + +func TestHorizontalPodAutoscalerWatch(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Autoscaling.ResourcePathWithPrefix("watch", getHorizontalPodAutoscalersResoureName(), "", ""), + Query: url.Values{"resourceVersion": []string{}}}, + Response: simple.Response{StatusCode: 200}, + ResourceGroup: autoscaling.GroupName, + } + _, err := c.Setup(t).Autoscaling().HorizontalPodAutoscalers(api.NamespaceAll).Watch(api.ListOptions{}) + defer c.Close() + c.Validate(t, nil, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go index c2d7fde2fb78..02387a5bb5e1 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go @@ -22,12 +22,16 @@ import ( _ "k8s.io/kubernetes/pkg/api/install" "k8s.io/kubernetes/pkg/apimachinery/registered" + _ "k8s.io/kubernetes/pkg/apis/apps/install" + _ "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install" _ "k8s.io/kubernetes/pkg/apis/authorization/install" _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" _ "k8s.io/kubernetes/pkg/apis/batch/install" _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" _ "k8s.io/kubernetes/pkg/apis/extensions/install" _ "k8s.io/kubernetes/pkg/apis/metrics/install" + _ "k8s.io/kubernetes/pkg/apis/policy/install" + _ "k8s.io/kubernetes/pkg/apis/rbac/install" ) func init() { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/ingress_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/ingress_test.go new file mode 100644 index 000000000000..dfec482eade8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/ingress_test.go @@ -0,0 +1,236 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getIngressResourceName() string { + return "ingresses" +} + +func TestListIngress(t *testing.T) { + ns := api.NamespaceAll + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Extensions.ResourcePath(getIngressResourceName(), ns, ""), + }, + Response: simple.Response{StatusCode: 200, + Body: &extensions.IngressList{ + Items: []extensions.Ingress{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.IngressSpec{ + Rules: []extensions.IngressRule{}, + }, + }, + }, + }, + }, + } + receivedIngressList, err := c.Setup(t).Extensions().Ingress(ns).List(api.ListOptions{}) + defer c.Close() + c.Validate(t, receivedIngressList, err) +} + +func TestGetIngress(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Extensions.ResourcePath(getIngressResourceName(), ns, "foo"), + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.IngressSpec{ + Rules: []extensions.IngressRule{}, + }, + }, + }, + } + receivedIngress, err := c.Setup(t).Extensions().Ingress(ns).Get("foo") + defer c.Close() + c.Validate(t, receivedIngress, err) +} + +func TestGetIngressWithNoName(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{Error: true} + receivedIngress, err := c.Setup(t).Extensions().Ingress(ns).Get("") + defer c.Close() + if (err != nil) && (err.Error() != simple.NameRequiredError) { + t.Errorf("Expected error: %v, but got %v", simple.NameRequiredError, err) + } + + c.Validate(t, receivedIngress, err) +} + +func TestUpdateIngress(t *testing.T) { + ns := api.NamespaceDefault + requestIngress := &extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "PUT", + Path: testapi.Extensions.ResourcePath(getIngressResourceName(), ns, "foo"), + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.IngressSpec{ + Rules: []extensions.IngressRule{}, + }, + }, + }, + } + receivedIngress, err := c.Setup(t).Extensions().Ingress(ns).Update(requestIngress) + defer c.Close() + c.Validate(t, receivedIngress, err) +} + +func TestUpdateIngressStatus(t *testing.T) { + ns := api.NamespaceDefault + lbStatus := api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + {IP: "127.0.0.1"}, + }, + } + requestIngress := &extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Status: extensions.IngressStatus{ + LoadBalancer: lbStatus, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "PUT", + Path: testapi.Extensions.ResourcePath(getIngressResourceName(), ns, "foo") + "/status", + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.IngressSpec{ + Rules: []extensions.IngressRule{}, + }, + Status: extensions.IngressStatus{ + LoadBalancer: lbStatus, + }, + }, + }, + } + receivedIngress, err := c.Setup(t).Extensions().Ingress(ns).UpdateStatus(requestIngress) + defer c.Close() + c.Validate(t, receivedIngress, err) +} + +func TestDeleteIngress(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{ + Method: "DELETE", + Path: testapi.Extensions.ResourcePath(getIngressResourceName(), ns, "foo"), + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).Extensions().Ingress(ns).Delete("foo", nil) + defer c.Close() + c.Validate(t, nil, err) +} + +func TestCreateIngress(t *testing.T) { + ns := api.NamespaceDefault + requestIngress := &extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Extensions.ResourcePath(getIngressResourceName(), ns, ""), + Body: requestIngress, + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.IngressSpec{ + Rules: []extensions.IngressRule{}, + }, + }, + }, + } + receivedIngress, err := c.Setup(t).Extensions().Ingress(ns).Create(requestIngress) + defer c.Close() + c.Validate(t, receivedIngress, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/jobs.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/jobs.go index f965a0874932..94b819079a92 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/jobs.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/jobs.go @@ -18,7 +18,7 @@ package unversioned import ( "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/watch" ) @@ -29,13 +29,13 @@ type JobsNamespacer interface { // JobInterface exposes methods to work on Job resources. type JobInterface interface { - List(opts api.ListOptions) (*extensions.JobList, error) - Get(name string) (*extensions.Job, error) - Create(job *extensions.Job) (*extensions.Job, error) - Update(job *extensions.Job) (*extensions.Job, error) + List(opts api.ListOptions) (*batch.JobList, error) + Get(name string) (*batch.Job, error) + Create(job *batch.Job) (*batch.Job, error) + Update(job *batch.Job) (*batch.Job, error) Delete(name string, options *api.DeleteOptions) error Watch(opts api.ListOptions) (watch.Interface, error) - UpdateStatus(job *extensions.Job) (*extensions.Job, error) + UpdateStatus(job *batch.Job) (*batch.Job, error) } // jobs implements JobsNamespacer interface @@ -53,29 +53,29 @@ func newJobs(c *ExtensionsClient, namespace string) *jobs { var _ JobInterface = &jobs{} // List returns a list of jobs that match the label and field selectors. -func (c *jobs) List(opts api.ListOptions) (result *extensions.JobList, err error) { - result = &extensions.JobList{} +func (c *jobs) List(opts api.ListOptions) (result *batch.JobList, err error) { + result = &batch.JobList{} err = c.r.Get().Namespace(c.ns).Resource("jobs").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) return } // Get returns information about a particular job. -func (c *jobs) Get(name string) (result *extensions.Job, err error) { - result = &extensions.Job{} +func (c *jobs) Get(name string) (result *batch.Job, err error) { + result = &batch.Job{} err = c.r.Get().Namespace(c.ns).Resource("jobs").Name(name).Do().Into(result) return } // Create creates a new job. -func (c *jobs) Create(job *extensions.Job) (result *extensions.Job, err error) { - result = &extensions.Job{} +func (c *jobs) Create(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} err = c.r.Post().Namespace(c.ns).Resource("jobs").Body(job).Do().Into(result) return } // Update updates an existing job. -func (c *jobs) Update(job *extensions.Job) (result *extensions.Job, err error) { - result = &extensions.Job{} +func (c *jobs) Update(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).Body(job).Do().Into(result) return } @@ -96,8 +96,8 @@ func (c *jobs) Watch(opts api.ListOptions) (watch.Interface, error) { } // UpdateStatus takes the name of the job and the new status. Returns the server's representation of the job, and an error, if it occurs. -func (c *jobs) UpdateStatus(job *extensions.Job) (result *extensions.Job, err error) { - result = &extensions.Job{} +func (c *jobs) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).SubResource("status").Body(job).Do().Into(result) return } @@ -117,29 +117,29 @@ func newJobsV1(c *BatchClient, namespace string) *jobsV1 { var _ JobInterface = &jobsV1{} // List returns a list of jobs that match the label and field selectors. -func (c *jobsV1) List(opts api.ListOptions) (result *extensions.JobList, err error) { - result = &extensions.JobList{} +func (c *jobsV1) List(opts api.ListOptions) (result *batch.JobList, err error) { + result = &batch.JobList{} err = c.r.Get().Namespace(c.ns).Resource("jobs").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) return } // Get returns information about a particular job. -func (c *jobsV1) Get(name string) (result *extensions.Job, err error) { - result = &extensions.Job{} +func (c *jobsV1) Get(name string) (result *batch.Job, err error) { + result = &batch.Job{} err = c.r.Get().Namespace(c.ns).Resource("jobs").Name(name).Do().Into(result) return } // Create creates a new job. -func (c *jobsV1) Create(job *extensions.Job) (result *extensions.Job, err error) { - result = &extensions.Job{} +func (c *jobsV1) Create(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} err = c.r.Post().Namespace(c.ns).Resource("jobs").Body(job).Do().Into(result) return } // Update updates an existing job. -func (c *jobsV1) Update(job *extensions.Job) (result *extensions.Job, err error) { - result = &extensions.Job{} +func (c *jobsV1) Update(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).Body(job).Do().Into(result) return } @@ -160,8 +160,8 @@ func (c *jobsV1) Watch(opts api.ListOptions) (watch.Interface, error) { } // UpdateStatus takes the name of the job and the new status. Returns the server's representation of the job, and an error, if it occurs. -func (c *jobsV1) UpdateStatus(job *extensions.Job) (result *extensions.Job, err error) { - result = &extensions.Job{} +func (c *jobsV1) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).SubResource("status").Body(job).Do().Into(result) return } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/jobs_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/jobs_test.go new file mode 100644 index 000000000000..e47d49d51e76 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/jobs_test.go @@ -0,0 +1,269 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getJobsResourceName() string { + return "jobs" +} + +func getJobClient(t *testing.T, c *simple.Client, ns, resourceGroup string) unversioned.JobInterface { + switch resourceGroup { + case batch.GroupName: + return c.Setup(t).Batch().Jobs(ns) + case extensions.GroupName: + return c.Setup(t).Extensions().Jobs(ns) + default: + t.Fatalf("Unknown group %v", resourceGroup) + } + return nil +} + +func testListJob(t *testing.T, group testapi.TestGroup, resourceGroup string) { + ns := api.NamespaceAll + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: group.ResourcePath(getJobsResourceName(), ns, ""), + }, + Response: simple.Response{StatusCode: 200, + Body: &batch.JobList{ + Items: []batch.Job{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: batch.JobSpec{ + Template: api.PodTemplateSpec{}, + }, + }, + }, + }, + }, + ResourceGroup: resourceGroup, + } + receivedJobList, err := getJobClient(t, c, ns, resourceGroup).List(api.ListOptions{}) + defer c.Close() + c.Validate(t, receivedJobList, err) +} + +func TestListJob(t *testing.T) { + testListJob(t, testapi.Extensions, extensions.GroupName) + testListJob(t, testapi.Batch, batch.GroupName) +} + +func testGetJob(t *testing.T, group testapi.TestGroup, resourceGroup string) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: group.ResourcePath(getJobsResourceName(), ns, "foo"), + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{ + StatusCode: 200, + Body: &batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: batch.JobSpec{ + Template: api.PodTemplateSpec{}, + }, + }, + }, + ResourceGroup: resourceGroup, + } + receivedJob, err := getJobClient(t, c, ns, resourceGroup).Get("foo") + defer c.Close() + c.Validate(t, receivedJob, err) +} + +func TestGetJob(t *testing.T) { + testGetJob(t, testapi.Extensions, extensions.GroupName) + testGetJob(t, testapi.Batch, batch.GroupName) +} + +func testUpdateJob(t *testing.T, group testapi.TestGroup, resourceGroup string) { + ns := api.NamespaceDefault + requestJob := &batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "PUT", + Path: group.ResourcePath(getJobsResourceName(), ns, "foo"), + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{ + StatusCode: 200, + Body: &batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: batch.JobSpec{ + Template: api.PodTemplateSpec{}, + }, + }, + }, + ResourceGroup: resourceGroup, + } + receivedJob, err := getJobClient(t, c, ns, resourceGroup).Update(requestJob) + defer c.Close() + c.Validate(t, receivedJob, err) +} + +func TestUpdateJob(t *testing.T) { + testUpdateJob(t, testapi.Extensions, extensions.GroupName) + testUpdateJob(t, testapi.Batch, batch.GroupName) +} + +func testUpdateJobStatus(t *testing.T, group testapi.TestGroup, resourceGroup string) { + ns := api.NamespaceDefault + requestJob := &batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "PUT", + Path: group.ResourcePath(getJobsResourceName(), ns, "foo") + "/status", + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{ + StatusCode: 200, + Body: &batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: batch.JobSpec{ + Template: api.PodTemplateSpec{}, + }, + Status: batch.JobStatus{ + Active: 1, + }, + }, + }, + ResourceGroup: resourceGroup, + } + receivedJob, err := getJobClient(t, c, ns, resourceGroup).UpdateStatus(requestJob) + defer c.Close() + c.Validate(t, receivedJob, err) +} + +func TestUpdateJobStatus(t *testing.T) { + testUpdateJobStatus(t, testapi.Extensions, extensions.GroupName) + testUpdateJobStatus(t, testapi.Batch, batch.GroupName) +} + +func testDeleteJob(t *testing.T, group testapi.TestGroup, resourceGroup string) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{ + Method: "DELETE", + Path: group.ResourcePath(getJobsResourceName(), ns, "foo"), + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{StatusCode: 200}, + ResourceGroup: resourceGroup, + } + err := getJobClient(t, c, ns, resourceGroup).Delete("foo", nil) + defer c.Close() + c.Validate(t, nil, err) +} + +func TestDeleteJob(t *testing.T) { + testDeleteJob(t, testapi.Extensions, extensions.GroupName) + testDeleteJob(t, testapi.Batch, batch.GroupName) +} + +func testCreateJob(t *testing.T, group testapi.TestGroup, resourceGroup string) { + ns := api.NamespaceDefault + requestJob := &batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: group.ResourcePath(getJobsResourceName(), ns, ""), + Body: requestJob, + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{ + StatusCode: 200, + Body: &batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: batch.JobSpec{ + Template: api.PodTemplateSpec{}, + }, + }, + }, + ResourceGroup: resourceGroup, + } + receivedJob, err := getJobClient(t, c, ns, resourceGroup).Create(requestJob) + defer c.Close() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + c.Validate(t, receivedJob, err) +} + +func TestCreateJob(t *testing.T) { + testCreateJob(t, testapi.Extensions, extensions.GroupName) + testCreateJob(t, testapi.Batch, batch.GroupName) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges_test.go new file mode 100644 index 000000000000..445310291e25 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges_test.go @@ -0,0 +1,185 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getLimitRangesResourceName() string { + return "limitranges" +} + +func TestLimitRangeCreate(t *testing.T) { + ns := api.NamespaceDefault + limitRange := &api.LimitRange{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + }, + Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + api.ResourceMemory: resource.MustParse("10000"), + }, + Min: api.ResourceList{ + api.ResourceCPU: resource.MustParse("0"), + api.ResourceMemory: resource.MustParse("100"), + }, + }, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: limitRange, + }, + Response: simple.Response{StatusCode: 200, Body: limitRange}, + } + + response, err := c.Setup(t).LimitRanges(ns).Create(limitRange) + defer c.Close() + c.Validate(t, response, err) +} + +func TestLimitRangeGet(t *testing.T) { + ns := api.NamespaceDefault + limitRange := &api.LimitRange{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + }, + Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + api.ResourceMemory: resource.MustParse("10000"), + }, + Min: api.ResourceList{ + api.ResourceCPU: resource.MustParse("0"), + api.ResourceMemory: resource.MustParse("100"), + }, + }, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, "abc"), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: limitRange}, + } + + response, err := c.Setup(t).LimitRanges(ns).Get("abc") + defer c.Close() + c.Validate(t, response, err) +} + +func TestLimitRangeList(t *testing.T) { + ns := api.NamespaceDefault + + limitRangeList := &api.LimitRangeList{ + Items: []api.LimitRange{ + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: limitRangeList}, + } + response, err := c.Setup(t).LimitRanges(ns).List(api.ListOptions{}) + defer c.Close() + c.Validate(t, response, err) +} + +func TestLimitRangeUpdate(t *testing.T) { + ns := api.NamespaceDefault + limitRange := &api.LimitRange{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + ResourceVersion: "1", + }, + Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + api.ResourceMemory: resource.MustParse("10000"), + }, + Min: api.ResourceList{ + api.ResourceCPU: resource.MustParse("0"), + api.ResourceMemory: resource.MustParse("100"), + }, + }, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, "abc"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: limitRange}, + } + response, err := c.Setup(t).LimitRanges(ns).Update(limitRange) + defer c.Close() + c.Validate(t, response, err) +} + +func TestLimitRangeDelete(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).LimitRanges(ns).Delete("foo") + defer c.Close() + c.Validate(t, nil, err) +} + +func TestLimitRangeWatch(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePathWithPrefix("watch", getLimitRangesResourceName(), "", ""), + Query: url.Values{"resourceVersion": []string{}}}, + Response: simple.Response{StatusCode: 200}, + } + _, err := c.Setup(t).LimitRanges(api.NamespaceAll).Watch(api.ListOptions{}) + defer c.Close() + c.Validate(t, nil, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/namespaces_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/namespaces_test.go new file mode 100644 index 000000000000..8e38c935b65a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/namespaces_test.go @@ -0,0 +1,185 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func TestNamespaceCreate(t *testing.T) { + // we create a namespace relative to another namespace + namespace := &api.Namespace{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Default.ResourcePath("namespaces", "", ""), + Body: namespace, + }, + Response: simple.Response{StatusCode: 200, Body: namespace}, + } + + // from the source ns, provision a new global namespace "foo" + response, err := c.Setup(t).Namespaces().Create(namespace) + defer c.Close() + + if err != nil { + t.Errorf("%#v should be nil.", err) + } + + if e, a := response.Name, namespace.Name; e != a { + t.Errorf("%#v != %#v.", e, a) + } +} + +func TestNamespaceGet(t *testing.T) { + namespace := &api.Namespace{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath("namespaces", "", "foo"), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: namespace}, + } + + response, err := c.Setup(t).Namespaces().Get("foo") + defer c.Close() + + if err != nil { + t.Errorf("%#v should be nil.", err) + } + + if e, r := response.Name, namespace.Name; e != r { + t.Errorf("%#v != %#v.", e, r) + } +} + +func TestNamespaceList(t *testing.T) { + namespaceList := &api.NamespaceList{ + Items: []api.Namespace{ + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath("namespaces", "", ""), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: namespaceList}, + } + response, err := c.Setup(t).Namespaces().List(api.ListOptions{}) + defer c.Close() + + if err != nil { + t.Errorf("%#v should be nil.", err) + } + + if len(response.Items) != 1 { + t.Errorf("%#v response.Items should have len 1.", response.Items) + } + + responseNamespace := response.Items[0] + if e, r := responseNamespace.Name, "foo"; e != r { + t.Errorf("%#v != %#v.", e, r) + } +} + +func TestNamespaceUpdate(t *testing.T) { + requestNamespace := &api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + ResourceVersion: "1", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{api.FinalizerKubernetes}, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "PUT", + Path: testapi.Default.ResourcePath("namespaces", "", "foo")}, + Response: simple.Response{StatusCode: 200, Body: requestNamespace}, + } + receivedNamespace, err := c.Setup(t).Namespaces().Update(requestNamespace) + defer c.Close() + c.Validate(t, receivedNamespace, err) +} + +func TestNamespaceFinalize(t *testing.T) { + requestNamespace := &api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + ResourceVersion: "1", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{api.FinalizerKubernetes}, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "PUT", + Path: testapi.Default.ResourcePath("namespaces", "", "foo") + "/finalize", + }, + Response: simple.Response{StatusCode: 200, Body: requestNamespace}, + } + receivedNamespace, err := c.Setup(t).Namespaces().Finalize(requestNamespace) + defer c.Close() + c.Validate(t, receivedNamespace, err) +} + +func TestNamespaceDelete(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Default.ResourcePath("namespaces", "", "foo")}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).Namespaces().Delete("foo") + defer c.Close() + c.Validate(t, nil, err) +} + +func TestNamespaceWatch(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePathWithPrefix("watch", "namespaces", "", ""), + Query: url.Values{"resourceVersion": []string{}}}, + Response: simple.Response{StatusCode: 200}, + } + _, err := c.Setup(t).Namespaces().Watch(api.ListOptions{}) + defer c.Close() + c.Validate(t, nil, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go new file mode 100644 index 000000000000..0dc9d97be893 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go @@ -0,0 +1,92 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/watch" +) + +// NetworkPolicyNamespacer has methods to work with NetworkPolicy resources in a namespace +type NetworkPolicyNamespacer interface { + NetworkPolicies(namespace string) NetworkPolicyInterface +} + +// NetworkPolicyInterface exposes methods to work on NetworkPolicy resources. +type NetworkPolicyInterface interface { + List(opts api.ListOptions) (*extensions.NetworkPolicyList, error) + Get(name string) (*extensions.NetworkPolicy, error) + Create(networkPolicy *extensions.NetworkPolicy) (*extensions.NetworkPolicy, error) + Update(networkPolicy *extensions.NetworkPolicy) (*extensions.NetworkPolicy, error) + Delete(name string, options *api.DeleteOptions) error + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// NetworkPolicies implements NetworkPolicyNamespacer interface +type NetworkPolicies struct { + r *ExtensionsClient + ns string +} + +// newNetworkPolicies returns a NetworkPolicies +func newNetworkPolicies(c *ExtensionsClient, namespace string) *NetworkPolicies { + return &NetworkPolicies{c, namespace} +} + +// List returns a list of networkPolicy that match the label and field selectors. +func (c *NetworkPolicies) List(opts api.ListOptions) (result *extensions.NetworkPolicyList, err error) { + result = &extensions.NetworkPolicyList{} + err = c.r.Get().Namespace(c.ns).Resource("networkpolicies").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular networkPolicy. +func (c *NetworkPolicies) Get(name string) (result *extensions.NetworkPolicy, err error) { + result = &extensions.NetworkPolicy{} + err = c.r.Get().Namespace(c.ns).Resource("networkpolicies").Name(name).Do().Into(result) + return +} + +// Create creates a new networkPolicy. +func (c *NetworkPolicies) Create(networkPolicy *extensions.NetworkPolicy) (result *extensions.NetworkPolicy, err error) { + result = &extensions.NetworkPolicy{} + err = c.r.Post().Namespace(c.ns).Resource("networkpolicies").Body(networkPolicy).Do().Into(result) + return +} + +// Update updates an existing networkPolicy. +func (c *NetworkPolicies) Update(networkPolicy *extensions.NetworkPolicy) (result *extensions.NetworkPolicy, err error) { + result = &extensions.NetworkPolicy{} + err = c.r.Put().Namespace(c.ns).Resource("networkpolicies").Name(networkPolicy.Name).Body(networkPolicy).Do().Into(result) + return +} + +// Delete deletes a networkPolicy, returns error if one occurs. +func (c *NetworkPolicies) Delete(name string, options *api.DeleteOptions) (err error) { + return c.r.Delete().Namespace(c.ns).Resource("networkpolicies").Name(name).Body(options).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested networkPolicy. +func (c *NetworkPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/nodes_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/nodes_test.go new file mode 100644 index 000000000000..d20656d49e9a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/nodes_test.go @@ -0,0 +1,173 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" + "k8s.io/kubernetes/pkg/labels" +) + +func getNodesResourceName() string { + return "nodes" +} + +func TestListNodes(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getNodesResourceName(), "", ""), + }, + Response: simple.Response{StatusCode: 200, Body: &api.NodeList{ListMeta: unversioned.ListMeta{ResourceVersion: "1"}}}, + } + response, err := c.Setup(t).Nodes().List(api.ListOptions{}) + defer c.Close() + c.Validate(t, response, err) +} + +func TestListNodesLabels(t *testing.T) { + labelSelectorQueryParamName := unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String()) + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getNodesResourceName(), "", ""), + Query: simple.BuildQueryValues(url.Values{labelSelectorQueryParamName: []string{"foo=bar,name=baz"}})}, + Response: simple.Response{ + StatusCode: 200, + Body: &api.NodeList{ + Items: []api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + }, + }, + }, + }, + } + c.Setup(t) + defer c.Close() + c.QueryValidator[labelSelectorQueryParamName] = simple.ValidateLabels + selector := labels.Set{"foo": "bar", "name": "baz"}.AsSelector() + options := api.ListOptions{LabelSelector: selector} + receivedNodeList, err := c.Nodes().List(options) + c.Validate(t, receivedNodeList, err) +} + +func TestGetNode(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getNodesResourceName(), "", "1"), + }, + Response: simple.Response{StatusCode: 200, Body: &api.Node{ObjectMeta: api.ObjectMeta{Name: "node-1"}}}, + } + response, err := c.Setup(t).Nodes().Get("1") + defer c.Close() + c.Validate(t, response, err) +} + +func TestGetNodeWithNoName(t *testing.T) { + c := &simple.Client{Error: true} + receivedNode, err := c.Setup(t).Nodes().Get("") + defer c.Close() + if (err != nil) && (err.Error() != simple.NameRequiredError) { + t.Errorf("Expected error: %v, but got %v", simple.NameRequiredError, err) + } + + c.Validate(t, receivedNode, err) +} + +func TestCreateNode(t *testing.T) { + requestNode := &api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "node-1", + }, + Status: api.NodeStatus{ + Capacity: api.ResourceList{ + api.ResourceCPU: resource.MustParse("1000m"), + api.ResourceMemory: resource.MustParse("1Mi"), + }, + }, + Spec: api.NodeSpec{ + Unschedulable: false, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Default.ResourcePath(getNodesResourceName(), "", ""), + Body: requestNode}, + Response: simple.Response{ + StatusCode: 200, + Body: requestNode, + }, + } + receivedNode, err := c.Setup(t).Nodes().Create(requestNode) + defer c.Close() + c.Validate(t, receivedNode, err) +} + +func TestDeleteNode(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "DELETE", + Path: testapi.Default.ResourcePath(getNodesResourceName(), "", "foo"), + }, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).Nodes().Delete("foo") + defer c.Close() + c.Validate(t, nil, err) +} + +func TestUpdateNode(t *testing.T) { + requestNode := &api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + ResourceVersion: "1", + }, + Status: api.NodeStatus{ + Capacity: api.ResourceList{ + api.ResourceCPU: resource.MustParse("1000m"), + api.ResourceMemory: resource.MustParse("1Mi"), + }, + }, + Spec: api.NodeSpec{ + Unschedulable: true, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "PUT", + Path: testapi.Default.ResourcePath(getNodesResourceName(), "", "foo"), + }, + Response: simple.Response{StatusCode: 200, Body: requestNode}, + } + response, err := c.Setup(t).Nodes().Update(requestNode) + defer c.Close() + c.Validate(t, response, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/persistentvolume_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/persistentvolume_test.go new file mode 100644 index 000000000000..03ebed7e7c8c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/persistentvolume_test.go @@ -0,0 +1,191 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getPersistentVolumesResoureName() string { + return "persistentvolumes" +} + +func TestPersistentVolumeCreate(t *testing.T) { + pv := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{Path: "/foo"}, + }, + }, + } + + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", ""), + Query: simple.BuildQueryValues(nil), + Body: pv, + }, + Response: simple.Response{StatusCode: 200, Body: pv}, + } + + response, err := c.Setup(t).PersistentVolumes().Create(pv) + defer c.Close() + c.Validate(t, response, err) +} + +func TestPersistentVolumeGet(t *testing.T) { + persistentVolume := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "foo", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{Path: "/foo"}, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", "abc"), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: persistentVolume}, + } + + response, err := c.Setup(t).PersistentVolumes().Get("abc") + defer c.Close() + c.Validate(t, response, err) +} + +func TestPersistentVolumeList(t *testing.T) { + persistentVolumeList := &api.PersistentVolumeList{ + Items: []api.PersistentVolume{ + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", ""), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: persistentVolumeList}, + } + response, err := c.Setup(t).PersistentVolumes().List(api.ListOptions{}) + defer c.Close() + c.Validate(t, response, err) +} + +func TestPersistentVolumeUpdate(t *testing.T) { + persistentVolume := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + ResourceVersion: "1", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{Path: "/foo"}, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", "abc"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: persistentVolume}, + } + response, err := c.Setup(t).PersistentVolumes().Update(persistentVolume) + defer c.Close() + c.Validate(t, response, err) +} + +func TestPersistentVolumeStatusUpdate(t *testing.T) { + persistentVolume := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + ResourceVersion: "1", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{Path: "/foo"}, + }, + }, + Status: api.PersistentVolumeStatus{ + Phase: api.VolumeBound, + Message: "foo", + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "PUT", + Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", "abc") + "/status", + Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: persistentVolume}, + } + response, err := c.Setup(t).PersistentVolumes().UpdateStatus(persistentVolume) + defer c.Close() + c.Validate(t, response, err) +} + +func TestPersistentVolumeDelete(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).PersistentVolumes().Delete("foo") + defer c.Close() + c.Validate(t, nil, err) +} + +func TestPersistentVolumeWatch(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePathWithPrefix("watch", getPersistentVolumesResoureName(), "", ""), + Query: url.Values{"resourceVersion": []string{}}}, + Response: simple.Response{StatusCode: 200}, + } + _, err := c.Setup(t).PersistentVolumes().Watch(api.ListOptions{}) + defer c.Close() + c.Validate(t, nil, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim_test.go new file mode 100644 index 000000000000..901f510dfacd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim_test.go @@ -0,0 +1,208 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getPersistentVolumeClaimsResoureName() string { + return "persistentvolumeclaims" +} + +func TestPersistentVolumeClaimCreate(t *testing.T) { + ns := api.NamespaceDefault + pv := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + }, + }, + } + + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: pv, + }, + Response: simple.Response{StatusCode: 200, Body: pv}, + } + + response, err := c.Setup(t).PersistentVolumeClaims(ns).Create(pv) + defer c.Close() + c.Validate(t, response, err) +} + +func TestPersistentVolumeClaimGet(t *testing.T) { + ns := api.NamespaceDefault + persistentVolumeClaim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "foo", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "abc"), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: persistentVolumeClaim}, + } + + response, err := c.Setup(t).PersistentVolumeClaims(ns).Get("abc") + defer c.Close() + c.Validate(t, response, err) +} + +func TestPersistentVolumeClaimList(t *testing.T) { + ns := api.NamespaceDefault + persistentVolumeList := &api.PersistentVolumeClaimList{ + Items: []api.PersistentVolumeClaim{ + { + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "ns"}, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: persistentVolumeList}, + } + response, err := c.Setup(t).PersistentVolumeClaims(ns).List(api.ListOptions{}) + defer c.Close() + c.Validate(t, response, err) +} + +func TestPersistentVolumeClaimUpdate(t *testing.T) { + ns := api.NamespaceDefault + persistentVolumeClaim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + ResourceVersion: "1", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "abc"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: persistentVolumeClaim}, + } + response, err := c.Setup(t).PersistentVolumeClaims(ns).Update(persistentVolumeClaim) + defer c.Close() + c.Validate(t, response, err) +} + +func TestPersistentVolumeClaimStatusUpdate(t *testing.T) { + ns := api.NamespaceDefault + persistentVolumeClaim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + ResourceVersion: "1", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + }, + }, + Status: api.PersistentVolumeClaimStatus{ + Phase: api.ClaimBound, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "PUT", + Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "abc") + "/status", + Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: persistentVolumeClaim}, + } + response, err := c.Setup(t).PersistentVolumeClaims(ns).UpdateStatus(persistentVolumeClaim) + defer c.Close() + c.Validate(t, response, err) +} + +func TestPersistentVolumeClaimDelete(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).PersistentVolumeClaims(ns).Delete("foo") + defer c.Close() + c.Validate(t, nil, err) +} + +func TestPersistentVolumeClaimWatch(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePathWithPrefix("watch", getPersistentVolumeClaimsResoureName(), "", ""), + Query: url.Values{"resourceVersion": []string{}}}, + Response: simple.Response{StatusCode: 200}, + } + _, err := c.Setup(t).PersistentVolumeClaims(api.NamespaceAll).Watch(api.ListOptions{}) + defer c.Close() + c.Validate(t, nil, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go new file mode 100644 index 000000000000..71b1ea021763 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go @@ -0,0 +1,100 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/watch" +) + +// PetSetNamespacer has methods to work with PetSet resources in a namespace +type PetSetNamespacer interface { + PetSets(namespace string) PetSetInterface +} + +// PetSetInterface exposes methods to work on PetSet resources. +type PetSetInterface interface { + List(opts api.ListOptions) (*apps.PetSetList, error) + Get(name string) (*apps.PetSet, error) + Create(petSet *apps.PetSet) (*apps.PetSet, error) + Update(petSet *apps.PetSet) (*apps.PetSet, error) + Delete(name string, options *api.DeleteOptions) error + Watch(opts api.ListOptions) (watch.Interface, error) + UpdateStatus(petSet *apps.PetSet) (*apps.PetSet, error) +} + +// petSet implements PetSetNamespacer interface +type petSet struct { + r *AppsClient + ns string +} + +// newPetSet returns a petSet +func newPetSet(c *AppsClient, namespace string) *petSet { + return &petSet{c, namespace} +} + +// List returns a list of petSet that match the label and field selectors. +func (c *petSet) List(opts api.ListOptions) (result *apps.PetSetList, err error) { + result = &apps.PetSetList{} + err = c.r.Get().Namespace(c.ns).Resource("petsets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular petSet. +func (c *petSet) Get(name string) (result *apps.PetSet, err error) { + result = &apps.PetSet{} + err = c.r.Get().Namespace(c.ns).Resource("petsets").Name(name).Do().Into(result) + return +} + +// Create creates a new petSet. +func (c *petSet) Create(petSet *apps.PetSet) (result *apps.PetSet, err error) { + result = &apps.PetSet{} + err = c.r.Post().Namespace(c.ns).Resource("petsets").Body(petSet).Do().Into(result) + return +} + +// Update updates an existing petSet. +func (c *petSet) Update(petSet *apps.PetSet) (result *apps.PetSet, err error) { + result = &apps.PetSet{} + err = c.r.Put().Namespace(c.ns).Resource("petsets").Name(petSet.Name).Body(petSet).Do().Into(result) + return +} + +// Delete deletes a petSet, returns error if one occurs. +func (c *petSet) Delete(name string, options *api.DeleteOptions) (err error) { + return c.r.Delete().Namespace(c.ns).Resource("petsets").Name(name).Body(options).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested petSet. +func (c *petSet) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("petsets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// UpdateStatus takes the name of the petSet and the new status. Returns the server's representation of the petSet, and an error, if it occurs. +func (c *petSet) UpdateStatus(petSet *apps.PetSet) (result *apps.PetSet, err error) { + result = &apps.PetSet{} + err = c.r.Put().Namespace(c.ns).Resource("petsets").Name(petSet.Name).SubResource("status").Body(petSet).Do().Into(result) + return +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pet_sets_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pet_sets_test.go new file mode 100644 index 000000000000..879aa5ce7d9c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pet_sets_test.go @@ -0,0 +1,165 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getPetSetResourceName() string { + return "petsets" +} + +func TestListPetSets(t *testing.T) { + ns := api.NamespaceAll + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Apps.ResourcePath(getPetSetResourceName(), ns, ""), + }, + Response: simple.Response{StatusCode: 200, + Body: &apps.PetSetList{ + Items: []apps.PetSet{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: apps.PetSetSpec{ + Replicas: 2, + Template: api.PodTemplateSpec{}, + }, + }, + }, + }, + }, + } + receivedRSList, err := c.Setup(t).Apps().PetSets(ns).List(api.ListOptions{}) + c.Validate(t, receivedRSList, err) +} + +func TestGetPetSet(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "GET", Path: testapi.Apps.ResourcePath(getPetSetResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &apps.PetSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: apps.PetSetSpec{ + Replicas: 2, + Template: api.PodTemplateSpec{}, + }, + }, + }, + } + receivedRS, err := c.Setup(t).Apps().PetSets(ns).Get("foo") + c.Validate(t, receivedRS, err) +} + +func TestGetPetSetWithNoName(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{Error: true} + receivedPod, err := c.Setup(t).Apps().PetSets(ns).Get("") + if (err != nil) && (err.Error() != simple.NameRequiredError) { + t.Errorf("Expected error: %v, but got %v", simple.NameRequiredError, err) + } + + c.Validate(t, receivedPod, err) +} + +func TestUpdatePetSet(t *testing.T) { + ns := api.NamespaceDefault + requestRS := &apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Apps.ResourcePath(getPetSetResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &apps.PetSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: apps.PetSetSpec{ + Replicas: 2, + Template: api.PodTemplateSpec{}, + }, + }, + }, + } + receivedRS, err := c.Setup(t).Apps().PetSets(ns).Update(requestRS) + c.Validate(t, receivedRS, err) +} + +func TestDeletePetSet(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Apps.ResourcePath(getPetSetResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).Apps().PetSets(ns).Delete("foo", nil) + c.Validate(t, nil, err) +} + +func TestCreatePetSet(t *testing.T) { + ns := api.NamespaceDefault + requestRS := &apps.PetSet{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "POST", Path: testapi.Apps.ResourcePath(getPetSetResourceName(), ns, ""), Body: requestRS, Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &apps.PetSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: apps.PetSetSpec{ + Replicas: 2, + Template: api.PodTemplateSpec{}, + }, + }, + }, + } + receivedRS, err := c.Setup(t).Apps().PetSets(ns).Create(requestRS) + c.Validate(t, receivedRS, err) +} + +// TODO: Test Status actions. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go new file mode 100644 index 000000000000..14f373f37601 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go @@ -0,0 +1,100 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/watch" +) + +// PodDisruptionBudgetNamespacer has methods to work with PodDisruptionBudget resources in a namespace +type PodDisruptionBudgetNamespacer interface { + PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface +} + +// PodDisruptionBudgetInterface exposes methods to work on PodDisruptionBudget resources. +type PodDisruptionBudgetInterface interface { + List(opts api.ListOptions) (*policy.PodDisruptionBudgetList, error) + Get(name string) (*policy.PodDisruptionBudget, error) + Create(podDisruptionBudget *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) + Update(podDisruptionBudget *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) + Delete(name string, options *api.DeleteOptions) error + Watch(opts api.ListOptions) (watch.Interface, error) + UpdateStatus(podDisruptionBudget *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) +} + +// podDisruptionBudget implements PodDisruptionBudgetNamespacer interface +type podDisruptionBudget struct { + r *PolicyClient + ns string +} + +// newPodDisruptionBudget returns a podDisruptionBudget +func newPodDisruptionBudget(c *PolicyClient, namespace string) *podDisruptionBudget { + return &podDisruptionBudget{c, namespace} +} + +// List returns a list of podDisruptionBudget that match the label and field selectors. +func (c *podDisruptionBudget) List(opts api.ListOptions) (result *policy.PodDisruptionBudgetList, err error) { + result = &policy.PodDisruptionBudgetList{} + err = c.r.Get().Namespace(c.ns).Resource("poddisruptionbudgets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular podDisruptionBudget. +func (c *podDisruptionBudget) Get(name string) (result *policy.PodDisruptionBudget, err error) { + result = &policy.PodDisruptionBudget{} + err = c.r.Get().Namespace(c.ns).Resource("poddisruptionbudgets").Name(name).Do().Into(result) + return +} + +// Create creates a new podDisruptionBudget. +func (c *podDisruptionBudget) Create(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { + result = &policy.PodDisruptionBudget{} + err = c.r.Post().Namespace(c.ns).Resource("poddisruptionbudgets").Body(podDisruptionBudget).Do().Into(result) + return +} + +// Update updates an existing podDisruptionBudget. +func (c *podDisruptionBudget) Update(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { + result = &policy.PodDisruptionBudget{} + err = c.r.Put().Namespace(c.ns).Resource("poddisruptionbudgets").Name(podDisruptionBudget.Name).Body(podDisruptionBudget).Do().Into(result) + return +} + +// Delete deletes a podDisruptionBudget, returns error if one occurs. +func (c *podDisruptionBudget) Delete(name string, options *api.DeleteOptions) (err error) { + return c.r.Delete().Namespace(c.ns).Resource("poddisruptionbudgets").Name(name).Body(options).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested podDisruptionBudget. +func (c *podDisruptionBudget) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// UpdateStatus takes the name of the podDisruptionBudget and the new status. Returns the server's representation of the podDisruptionBudget, and an error, if it occurs. +func (c *podDisruptionBudget) UpdateStatus(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { + result = &policy.PodDisruptionBudget{} + err = c.r.Put().Namespace(c.ns).Resource("poddisruptionbudgets").Name(podDisruptionBudget.Name).SubResource("status").Body(podDisruptionBudget).Do().Into(result) + return +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pod_templates_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pod_templates_test.go new file mode 100644 index 000000000000..c72f0a21c857 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pod_templates_test.go @@ -0,0 +1,147 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getPodTemplatesResoureName() string { + return "podtemplates" +} + +func TestPodTemplateCreate(t *testing.T) { + ns := api.NamespaceDefault + podTemplate := api.PodTemplate{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: ns, + }, + Template: api.PodTemplateSpec{}, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Default.ResourcePath(getPodTemplatesResoureName(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: &podTemplate, + }, + Response: simple.Response{StatusCode: 200, Body: &podTemplate}, + } + + response, err := c.Setup(t).PodTemplates(ns).Create(&podTemplate) + defer c.Close() + c.Validate(t, response, err) +} + +func TestPodTemplateGet(t *testing.T) { + ns := api.NamespaceDefault + podTemplate := &api.PodTemplate{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: ns, + }, + Template: api.PodTemplateSpec{}, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getPodTemplatesResoureName(), ns, "abc"), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: podTemplate}, + } + + response, err := c.Setup(t).PodTemplates(ns).Get("abc") + defer c.Close() + c.Validate(t, response, err) +} + +func TestPodTemplateList(t *testing.T) { + ns := api.NamespaceDefault + podTemplateList := &api.PodTemplateList{ + Items: []api.PodTemplate{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + }, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getPodTemplatesResoureName(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: podTemplateList}, + } + response, err := c.Setup(t).PodTemplates(ns).List(api.ListOptions{}) + defer c.Close() + c.Validate(t, response, err) +} + +func TestPodTemplateUpdate(t *testing.T) { + ns := api.NamespaceDefault + podTemplate := &api.PodTemplate{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: ns, + ResourceVersion: "1", + }, + Template: api.PodTemplateSpec{}, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Default.ResourcePath(getPodTemplatesResoureName(), ns, "abc"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: podTemplate}, + } + response, err := c.Setup(t).PodTemplates(ns).Update(podTemplate) + defer c.Close() + c.Validate(t, response, err) +} + +func TestPodTemplateDelete(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Default.ResourcePath(getPodTemplatesResoureName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).PodTemplates(ns).Delete("foo", nil) + defer c.Close() + c.Validate(t, nil, err) +} + +func TestPodTemplateWatch(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePathWithPrefix("watch", getPodTemplatesResoureName(), "", ""), + Query: url.Values{"resourceVersion": []string{}}}, + Response: simple.Response{StatusCode: 200}, + } + _, err := c.Setup(t).PodTemplates(api.NamespaceAll).Watch(api.ListOptions{}) + defer c.Close() + c.Validate(t, nil, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pods_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pods_test.go new file mode 100644 index 000000000000..42a806502a67 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/pods_test.go @@ -0,0 +1,226 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "net/http" + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" + "k8s.io/kubernetes/pkg/labels" +) + +func TestListEmptyPods(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "GET", Path: testapi.Default.ResourcePath("pods", ns, ""), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: http.StatusOK, Body: &api.PodList{}}, + } + podList, err := c.Setup(t).Pods(ns).List(api.ListOptions{}) + defer c.Close() + c.Validate(t, podList, err) +} + +func TestListPods(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "GET", Path: testapi.Default.ResourcePath("pods", ns, ""), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: http.StatusOK, + Body: &api.PodList{ + Items: []api.Pod{ + { + Status: api.PodStatus{ + Phase: api.PodRunning, + }, + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + }, + }, + }, + }, + } + receivedPodList, err := c.Setup(t).Pods(ns).List(api.ListOptions{}) + defer c.Close() + c.Validate(t, receivedPodList, err) +} + +func TestListPodsLabels(t *testing.T) { + ns := api.NamespaceDefault + labelSelectorQueryParamName := unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String()) + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath("pods", ns, ""), + Query: simple.BuildQueryValues(url.Values{labelSelectorQueryParamName: []string{"foo=bar,name=baz"}})}, + Response: simple.Response{ + StatusCode: http.StatusOK, + Body: &api.PodList{ + Items: []api.Pod{ + { + Status: api.PodStatus{ + Phase: api.PodRunning, + }, + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + }, + }, + }, + }, + } + c.Setup(t) + defer c.Close() + c.QueryValidator[labelSelectorQueryParamName] = simple.ValidateLabels + selector := labels.Set{"foo": "bar", "name": "baz"}.AsSelector() + options := api.ListOptions{LabelSelector: selector} + receivedPodList, err := c.Pods(ns).List(options) + c.Validate(t, receivedPodList, err) +} + +func TestGetPod(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "GET", Path: testapi.Default.ResourcePath("pods", ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: http.StatusOK, + Body: &api.Pod{ + Status: api.PodStatus{ + Phase: api.PodRunning, + }, + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + }, + }, + } + receivedPod, err := c.Setup(t).Pods(ns).Get("foo") + defer c.Close() + c.Validate(t, receivedPod, err) +} + +func TestGetPodWithNoName(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{Error: true} + receivedPod, err := c.Setup(t).Pods(ns).Get("") + defer c.Close() + if (err != nil) && (err.Error() != simple.NameRequiredError) { + t.Errorf("Expected error: %v, but got %v", simple.NameRequiredError, err) + } + + c.Validate(t, receivedPod, err) +} + +func TestDeletePod(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Default.ResourcePath("pods", ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: http.StatusOK}, + } + err := c.Setup(t).Pods(ns).Delete("foo", nil) + defer c.Close() + c.Validate(t, nil, err) +} + +func TestCreatePod(t *testing.T) { + ns := api.NamespaceDefault + requestPod := &api.Pod{ + Status: api.PodStatus{ + Phase: api.PodRunning, + }, + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + } + c := &simple.Client{ + Request: simple.Request{Method: "POST", Path: testapi.Default.ResourcePath("pods", ns, ""), Query: simple.BuildQueryValues(nil), Body: requestPod}, + Response: simple.Response{ + StatusCode: http.StatusOK, + Body: requestPod, + }, + } + receivedPod, err := c.Setup(t).Pods(ns).Create(requestPod) + defer c.Close() + c.Validate(t, receivedPod, err) +} + +func TestUpdatePod(t *testing.T) { + ns := api.NamespaceDefault + requestPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + ResourceVersion: "1", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Status: api.PodStatus{ + Phase: api.PodRunning, + }, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Default.ResourcePath("pods", ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: http.StatusOK, Body: requestPod}, + } + receivedPod, err := c.Setup(t).Pods(ns).Update(requestPod) + defer c.Close() + c.Validate(t, receivedPod, err) +} + +func TestPodGetLogs(t *testing.T) { + ns := api.NamespaceDefault + opts := &api.PodLogOptions{ + Follow: true, + Timestamps: true, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath("pods", ns, "podName") + "/log", + Query: url.Values{ + "follow": []string{"true"}, + "timestamps": []string{"true"}, + }, + }, + Response: simple.Response{StatusCode: http.StatusOK}, + } + + body, err := c.Setup(t).Pods(ns).GetLogs("podName", opts).Stream() + defer c.Close() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + defer body.Close() + c.ValidateCommon(t, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go index 64a34b4b23be..356d913dbea7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go @@ -28,7 +28,7 @@ type PodSecurityPoliciesInterface interface { type PodSecurityPolicyInterface interface { Get(name string) (result *extensions.PodSecurityPolicy, err error) - Create(scc *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) + Create(psp *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) List(opts api.ListOptions) (*extensions.PodSecurityPolicyList, error) Delete(name string) error Update(*extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) @@ -45,11 +45,11 @@ func newPodSecurityPolicy(c *ExtensionsClient) *podSecurityPolicy { return &podSecurityPolicy{c} } -func (s *podSecurityPolicy) Create(scc *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) { +func (s *podSecurityPolicy) Create(psp *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) { result := &extensions.PodSecurityPolicy{} err := s.client.Post(). Resource("podsecuritypolicies"). - Body(scc). + Body(psp). Do(). Into(result) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy_test.go new file mode 100644 index 000000000000..06fae477e3a0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy_test.go @@ -0,0 +1,137 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "fmt" + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func TestPodSecurityPolicyCreate(t *testing.T) { + ns := api.NamespaceNone + psp := &extensions.PodSecurityPolicy{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + }, + } + + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Extensions.ResourcePath(getPSPResourcename(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: psp, + }, + Response: simple.Response{StatusCode: 200, Body: psp}, + } + + response, err := c.Setup(t).PodSecurityPolicies().Create(psp) + c.Validate(t, response, err) +} + +func TestPodSecurityPolicyGet(t *testing.T) { + ns := api.NamespaceNone + psp := &extensions.PodSecurityPolicy{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Extensions.ResourcePath(getPSPResourcename(), ns, "abc"), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: psp}, + } + + response, err := c.Setup(t).PodSecurityPolicies().Get("abc") + c.Validate(t, response, err) +} + +func TestPodSecurityPolicyList(t *testing.T) { + ns := api.NamespaceNone + pspList := &extensions.PodSecurityPolicyList{ + Items: []extensions.PodSecurityPolicy{ + { + ObjectMeta: api.ObjectMeta{ + Name: "abc", + }, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Extensions.ResourcePath(getPSPResourcename(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: pspList}, + } + response, err := c.Setup(t).PodSecurityPolicies().List(api.ListOptions{}) + c.Validate(t, response, err) +} + +func TestPodSecurityPolicyUpdate(t *testing.T) { + ns := api.NamespaceNone + psp := &extensions.PodSecurityPolicy{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + ResourceVersion: "1", + }, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Extensions.ResourcePath(getPSPResourcename(), ns, "abc"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: psp}, + } + response, err := c.Setup(t).PodSecurityPolicies().Update(psp) + c.Validate(t, response, err) +} + +func TestPodSecurityPolicyDelete(t *testing.T) { + ns := api.NamespaceNone + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Extensions.ResourcePath(getPSPResourcename(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).PodSecurityPolicies().Delete("foo") + c.Validate(t, nil, err) +} + +func TestPodSecurityPolicyWatch(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: fmt.Sprintf("%s/watch/%s", testapi.Extensions.ResourcePath("", "", ""), getPSPResourcename()), + Query: url.Values{"resourceVersion": []string{}}}, + Response: simple.Response{StatusCode: 200}, + } + _, err := c.Setup(t).PodSecurityPolicies().Watch(api.ListOptions{}) + c.Validate(t, nil, err) +} + +func getPSPResourcename() string { + return "podsecuritypolicies" +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/policy.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/policy.go new file mode 100644 index 000000000000..8b06ce275ac4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/policy.go @@ -0,0 +1,83 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/client/restclient" +) + +type PolicyInterface interface { + PodDisruptionBudgetNamespacer +} + +// PolicyClient is used to interact with Kubernetes batch features. +type PolicyClient struct { + *restclient.RESTClient +} + +func (c *PolicyClient) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface { + return newPodDisruptionBudget(c, namespace) +} + +func NewPolicy(c *restclient.Config) (*PolicyClient, error) { + config := *c + if err := setPolicyDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &PolicyClient{client}, nil +} + +func NewPolicyOrDie(c *restclient.Config) *PolicyClient { + client, err := NewPolicy(c) + if err != nil { + panic(err) + } + return client +} + +func setPolicyDefaults(config *restclient.Config) error { + g, err := registered.Group(policy.GroupName) + if err != nil { + return err + } + config.APIPath = defaultAPIPath + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/portforward/portforward.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/portforward/portforward.go index 78e6695e6f7a..a5ce32d34899 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/portforward/portforward.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/portforward/portforward.go @@ -27,7 +27,6 @@ import ( "strings" "sync" - "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/kubelet/server/portforward" "k8s.io/kubernetes/pkg/util/httpstream" @@ -46,6 +45,8 @@ type PortForwarder struct { Ready chan struct{} requestIDLock sync.Mutex requestID int + out io.Writer + errOut io.Writer } // ForwardedPort contains a Local:Remote port pairing. @@ -107,7 +108,7 @@ func parsePorts(ports []string) ([]ForwardedPort, error) { } // New creates a new PortForwarder. -func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}) (*PortForwarder, error) { +func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}, out, errOut io.Writer) (*PortForwarder, error) { if len(ports) == 0 { return nil, errors.New("You must specify at least 1 port") } @@ -120,6 +121,8 @@ func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}) (*P ports: parsedPorts, stopChan: stopChan, Ready: make(chan struct{}), + out: out, + errOut: errOut, }, nil } @@ -151,7 +154,9 @@ func (pf *PortForwarder) forward() error { case err == nil: listenSuccess = true default: - glog.Warningf("Unable to listen on port %d: %v", port.Local, err) + if pf.errOut != nil { + fmt.Fprintf(pf.errOut, "Unable to listen on port %d: %v\n", port.Local, err) + } } } @@ -210,7 +215,9 @@ func (pf *PortForwarder) getListener(protocol string, hostname string, port *For return nil, fmt.Errorf("Error parsing local port: %s from %s (%s)", err, listenerAddress, host) } port.Local = uint16(localPortUInt) - glog.Infof("Forwarding from %s:%d -> %d", hostname, localPortUInt, port.Remote) + if pf.out != nil { + fmt.Fprintf(pf.out, "Forwarding from %s:%d -> %d\n", hostname, localPortUInt, port.Remote) + } return listener, nil } @@ -244,7 +251,9 @@ func (pf *PortForwarder) nextRequestID() int { func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) { defer conn.Close() - glog.Infof("Handling connection for %d", port.Local) + if pf.out != nil { + fmt.Fprintf(pf.out, "Handling connection for %d\n", port.Local) + } requestID := pf.nextRequestID() diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/portforward/portforward_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/portforward/portforward_test.go new file mode 100644 index 000000000000..31689eed215b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/portforward/portforward_test.go @@ -0,0 +1,394 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package portforward + +import ( + "bytes" + "fmt" + "io" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "strings" + "sync" + "testing" + "time" + + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/unversioned/remotecommand" + kubeletserver "k8s.io/kubernetes/pkg/kubelet/server" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/httpstream" +) + +type fakeDialer struct { + dialed bool + conn httpstream.Connection + err error + negotiatedProtocol string +} + +func (d *fakeDialer) Dial(protocols ...string) (httpstream.Connection, string, error) { + d.dialed = true + return d.conn, d.negotiatedProtocol, d.err +} + +func TestParsePortsAndNew(t *testing.T) { + tests := []struct { + input []string + expected []ForwardedPort + expectParseError bool + expectNewError bool + }{ + {input: []string{}, expectNewError: true}, + {input: []string{"a"}, expectParseError: true, expectNewError: true}, + {input: []string{":a"}, expectParseError: true, expectNewError: true}, + {input: []string{"-1"}, expectParseError: true, expectNewError: true}, + {input: []string{"65536"}, expectParseError: true, expectNewError: true}, + {input: []string{"0"}, expectParseError: true, expectNewError: true}, + {input: []string{"0:0"}, expectParseError: true, expectNewError: true}, + {input: []string{"a:5000"}, expectParseError: true, expectNewError: true}, + {input: []string{"5000:a"}, expectParseError: true, expectNewError: true}, + { + input: []string{"5000", "5000:5000", "8888:5000", "5000:8888", ":5000", "0:5000"}, + expected: []ForwardedPort{ + {5000, 5000}, + {5000, 5000}, + {8888, 5000}, + {5000, 8888}, + {0, 5000}, + {0, 5000}, + }, + }, + } + + for i, test := range tests { + parsed, err := parsePorts(test.input) + haveError := err != nil + if e, a := test.expectParseError, haveError; e != a { + t.Fatalf("%d: parsePorts: error expected=%t, got %t: %s", i, e, a, err) + } + + dialer := &fakeDialer{} + expectedStopChan := make(chan struct{}) + pf, err := New(dialer, test.input, expectedStopChan, os.Stdout, os.Stderr) + haveError = err != nil + if e, a := test.expectNewError, haveError; e != a { + t.Fatalf("%d: New: error expected=%t, got %t: %s", i, e, a, err) + } + + if test.expectParseError || test.expectNewError { + continue + } + + for pi, expectedPort := range test.expected { + if e, a := expectedPort.Local, parsed[pi].Local; e != a { + t.Fatalf("%d: local expected: %d, got: %d", i, e, a) + } + if e, a := expectedPort.Remote, parsed[pi].Remote; e != a { + t.Fatalf("%d: remote expected: %d, got: %d", i, e, a) + } + } + + if dialer.dialed { + t.Fatalf("%d: expected not dialed", i) + } + if e, a := test.expected, pf.ports; !reflect.DeepEqual(e, a) { + t.Fatalf("%d: ports: expected %#v, got %#v", i, e, a) + } + if e, a := expectedStopChan, pf.stopChan; e != a { + t.Fatalf("%d: stopChan: expected %#v, got %#v", i, e, a) + } + if pf.Ready == nil { + t.Fatalf("%d: Ready should be non-nil", i) + } + } +} + +type GetListenerTestCase struct { + Hostname string + Protocol string + ShouldRaiseError bool + ExpectedListenerAddress string +} + +func TestGetListener(t *testing.T) { + var pf PortForwarder + testCases := []GetListenerTestCase{ + { + Hostname: "localhost", + Protocol: "tcp4", + ShouldRaiseError: false, + ExpectedListenerAddress: "127.0.0.1", + }, + { + Hostname: "127.0.0.1", + Protocol: "tcp4", + ShouldRaiseError: false, + ExpectedListenerAddress: "127.0.0.1", + }, + { + Hostname: "[::1]", + Protocol: "tcp6", + ShouldRaiseError: false, + ExpectedListenerAddress: "::1", + }, + { + Hostname: "[::1]", + Protocol: "tcp4", + ShouldRaiseError: true, + }, + { + Hostname: "127.0.0.1", + Protocol: "tcp6", + ShouldRaiseError: true, + }, + { + // IPv6 address must be put into brackets. This test reveals this. + Hostname: "::1", + Protocol: "tcp6", + ShouldRaiseError: true, + }, + } + + for i, testCase := range testCases { + expectedListenerPort := "12345" + listener, err := pf.getListener(testCase.Protocol, testCase.Hostname, &ForwardedPort{12345, 12345}) + if err != nil && strings.Contains(err.Error(), "cannot assign requested address") { + t.Logf("Can't test #%d: %v", i, err) + continue + } + errorRaised := err != nil + + if testCase.ShouldRaiseError != errorRaised { + t.Errorf("Test case #%d failed: Data %v an error has been raised(%t) where it should not (or reciprocally): %v", i, testCase, testCase.ShouldRaiseError, err) + continue + } + if errorRaised { + continue + } + + if listener == nil { + t.Errorf("Test case #%d did not raise an error but failed in initializing listener", i) + continue + } + + host, port, _ := net.SplitHostPort(listener.Addr().String()) + t.Logf("Asked a %s forward for: %s:%v, got listener %s:%s, expected: %s", testCase.Protocol, testCase.Hostname, 12345, host, port, expectedListenerPort) + if host != testCase.ExpectedListenerAddress { + t.Errorf("Test case #%d failed: Listener does not listen on exepected address: asked %v got %v", i, testCase.ExpectedListenerAddress, host) + } + if port != expectedListenerPort { + t.Errorf("Test case #%d failed: Listener does not listen on exepected port: asked %v got %v", i, expectedListenerPort, port) + + } + listener.Close() + + } +} + +// fakePortForwarder simulates port forwarding for testing. It implements +// kubeletserver.PortForwarder. +type fakePortForwarder struct { + lock sync.Mutex + // stores data expected from the stream per port + expected map[uint16]string + // stores data received from the stream per port + received map[uint16]string + // data to be sent to the stream per port + send map[uint16]string +} + +var _ kubeletserver.PortForwarder = &fakePortForwarder{} + +func (pf *fakePortForwarder) PortForward(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error { + defer stream.Close() + + // read from the client + received := make([]byte, len(pf.expected[port])) + n, err := stream.Read(received) + if err != nil { + return fmt.Errorf("error reading from client for port %d: %v", port, err) + } + if n != len(pf.expected[port]) { + return fmt.Errorf("unexpected length read from client for port %d: got %d, expected %d. data=%q", port, n, len(pf.expected[port]), string(received)) + } + + // store the received content + pf.lock.Lock() + pf.received[port] = string(received) + pf.lock.Unlock() + + // send the hardcoded data to the client + io.Copy(stream, strings.NewReader(pf.send[port])) + + return nil +} + +// fakePortForwardServer creates an HTTP server that can handle port forwarding +// requests. +func fakePortForwardServer(t *testing.T, testName string, serverSends, expectedFromClient map[uint16]string) http.HandlerFunc { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + pf := &fakePortForwarder{ + expected: expectedFromClient, + received: make(map[uint16]string), + send: serverSends, + } + kubeletserver.ServePortForward(w, req, pf, "pod", "uid", 0, 10*time.Second) + + for port, expected := range expectedFromClient { + actual, ok := pf.received[port] + if !ok { + t.Errorf("%s: server didn't receive any data for port %d", testName, port) + continue + } + + if expected != actual { + t.Errorf("%s: server expected to receive %q, got %q for port %d", testName, expected, actual, port) + } + } + + for port, actual := range pf.received { + if _, ok := expectedFromClient[port]; !ok { + t.Errorf("%s: server unexpectedly received %q for port %d", testName, actual, port) + } + } + }) +} + +func TestForwardPorts(t *testing.T) { + tests := map[string]struct { + ports []string + clientSends map[uint16]string + serverSends map[uint16]string + }{ + "forward 1 port with no data either direction": { + ports: []string{"5000"}, + }, + "forward 2 ports with bidirectional data": { + ports: []string{"5001", "6000"}, + clientSends: map[uint16]string{ + 5001: "abcd", + 6000: "ghij", + }, + serverSends: map[uint16]string{ + 5001: "1234", + 6000: "5678", + }, + }, + } + + for testName, test := range tests { + server := httptest.NewServer(fakePortForwardServer(t, testName, test.serverSends, test.clientSends)) + + url, _ := url.Parse(server.URL) + exec, err := remotecommand.NewExecutor(&restclient.Config{}, "POST", url) + if err != nil { + t.Fatal(err) + } + + stopChan := make(chan struct{}, 1) + + pf, err := New(exec, test.ports, stopChan, os.Stdout, os.Stderr) + if err != nil { + t.Fatalf("%s: unexpected error calling New: %v", testName, err) + } + + doneChan := make(chan error) + go func() { + doneChan <- pf.ForwardPorts() + }() + <-pf.Ready + + for port, data := range test.clientSends { + clientConn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", port)) + if err != nil { + t.Errorf("%s: error dialing %d: %s", testName, port, err) + server.Close() + continue + } + defer clientConn.Close() + + n, err := clientConn.Write([]byte(data)) + if err != nil && err != io.EOF { + t.Errorf("%s: Error sending data '%s': %s", testName, data, err) + server.Close() + continue + } + if n == 0 { + t.Errorf("%s: unexpected write of 0 bytes", testName) + server.Close() + continue + } + b := make([]byte, 4) + n, err = clientConn.Read(b) + if err != nil && err != io.EOF { + t.Errorf("%s: Error reading data: %s", testName, err) + server.Close() + continue + } + if !bytes.Equal([]byte(test.serverSends[port]), b) { + t.Errorf("%s: expected to read '%s', got '%s'", testName, test.serverSends[port], b) + server.Close() + continue + } + } + // tell r.ForwardPorts to stop + close(stopChan) + + // wait for r.ForwardPorts to actually return + err = <-doneChan + if err != nil { + t.Errorf("%s: unexpected error: %s", testName, err) + } + server.Close() + } + +} + +func TestForwardPortsReturnsErrorWhenAllBindsFailed(t *testing.T) { + server := httptest.NewServer(fakePortForwardServer(t, "allBindsFailed", nil, nil)) + defer server.Close() + + url, _ := url.Parse(server.URL) + exec, err := remotecommand.NewExecutor(&restclient.Config{}, "POST", url) + if err != nil { + t.Fatal(err) + } + + stopChan1 := make(chan struct{}, 1) + defer close(stopChan1) + + pf1, err := New(exec, []string{"5555"}, stopChan1, os.Stdout, os.Stderr) + if err != nil { + t.Fatalf("error creating pf1: %v", err) + } + go pf1.ForwardPorts() + <-pf1.Ready + + stopChan2 := make(chan struct{}, 1) + pf2, err := New(exec, []string{"5555"}, stopChan2, os.Stdout, os.Stderr) + if err != nil { + t.Fatalf("error creating pf2: %v", err) + } + if err := pf2.ForwardPorts(); err == nil { + t.Fatal("expected non-nil error for pf2.ForwardPorts") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/rbac.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/rbac.go new file mode 100644 index 000000000000..76ec392c3e47 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/rbac.go @@ -0,0 +1,103 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/client/restclient" +) + +// Interface holds the methods for clients of Kubernetes to allow mock testing. +type RbacInterface interface { + RoleBindingsNamespacer + RolesNamespacer + ClusterRoleBindings + ClusterRoles +} + +type RbacClient struct { + *restclient.RESTClient +} + +func (c *RbacClient) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +func (c *RbacClient) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacClient) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacClient) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +// NewRbac creates a new RbacClient for the given config. +func NewRbac(c *restclient.Config) (*RbacClient, error) { + config := *c + if err := setRbacDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacClient{client}, nil +} + +// NewRbacOrDie creates a new RbacClient for the given config and +// panics if there is an error in the config. +func NewRbacOrDie(c *restclient.Config) *RbacClient { + client, err := NewRbac(c) + if err != nil { + panic(err) + } + return client +} + +func setRbacDefaults(config *restclient.Config) error { + // if rbac group is not registered, return an error + g, err := registered.Group(rbac.GroupName) + if err != nil { + return err + } + config.APIPath = defaultAPIPath + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/remotecommand/remotecommand.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/remotecommand/remotecommand.go index d05e6ba5b3e1..7144f3093c11 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/remotecommand/remotecommand.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/remotecommand/remotecommand.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/transport" + "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" "k8s.io/kubernetes/pkg/util/httpstream" "k8s.io/kubernetes/pkg/util/httpstream/spdy" ) @@ -36,7 +37,7 @@ type Executor interface { // non-nil stream to a remote system, and return an error if a problem occurs. If tty // is set, the stderr stream is not used (raw TTY manages stdout and stderr over the // stdout stream). - Stream(stdin io.Reader, stdout, stderr io.Writer, tty bool) error + Stream(supportedProtocols []string, stdin io.Reader, stdout, stderr io.Writer, tty bool) error } // StreamExecutor supports the ability to dial an httpstream connection and the ability to @@ -128,26 +129,13 @@ func (e *streamExecutor) Dial(protocols ...string) (httpstream.Connection, strin return conn, resp.Header.Get(httpstream.HeaderProtocolVersion), nil } -const ( - // The SPDY subprotocol "channel.k8s.io" is used for remote command - // attachment/execution. This represents the initial unversioned subprotocol, - // which has the known bugs http://issues.k8s.io/13394 and - // http://issues.k8s.io/13395. - StreamProtocolV1Name = "channel.k8s.io" - // The SPDY subprotocol "v2.channel.k8s.io" is used for remote command - // attachment/execution. It is the second version of the subprotocol and - // resolves the issues present in the first version. - StreamProtocolV2Name = "v2.channel.k8s.io" -) - type streamProtocolHandler interface { stream(httpstream.Connection) error } // Stream opens a protocol streamer to the server and streams until a client closes // the connection or the server disconnects. -func (e *streamExecutor) Stream(stdin io.Reader, stdout, stderr io.Writer, tty bool) error { - supportedProtocols := []string{StreamProtocolV2Name, StreamProtocolV1Name} +func (e *streamExecutor) Stream(supportedProtocols []string, stdin io.Reader, stdout, stderr io.Writer, tty bool) error { conn, protocol, err := e.Dial(supportedProtocols...) if err != nil { return err @@ -157,7 +145,7 @@ func (e *streamExecutor) Stream(stdin io.Reader, stdout, stderr io.Writer, tty b var streamer streamProtocolHandler switch protocol { - case StreamProtocolV2Name: + case remotecommand.StreamProtocolV2Name: streamer = &streamProtocolV2{ stdin: stdin, stdout: stdout, @@ -165,9 +153,9 @@ func (e *streamExecutor) Stream(stdin io.Reader, stdout, stderr io.Writer, tty b tty: tty, } case "": - glog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", StreamProtocolV1Name) + glog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) fallthrough - case StreamProtocolV1Name: + case remotecommand.StreamProtocolV1Name: streamer = &streamProtocolV1{ stdin: stdin, stdout: stdout, diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/remotecommand/remotecommand_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/remotecommand/remotecommand_test.go new file mode 100644 index 000000000000..f231a7f49265 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/remotecommand/remotecommand_test.go @@ -0,0 +1,357 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/httpstream" +) + +type fakeExecutor struct { + t *testing.T + testName string + errorData string + stdoutData string + stderrData string + expectStdin bool + stdinReceived bytes.Buffer + tty bool + messageCount int + command []string + exec bool +} + +func (ex *fakeExecutor) ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error { + return ex.run(name, uid, container, cmd, in, out, err, tty) +} + +func (ex *fakeExecutor) AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool) error { + return ex.run(name, uid, container, nil, in, out, err, tty) +} + +func (ex *fakeExecutor) run(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error { + ex.command = cmd + ex.tty = tty + + if e, a := "pod", name; e != a { + ex.t.Errorf("%s: pod: expected %q, got %q", ex.testName, e, a) + } + if e, a := "uid", uid; e != string(a) { + ex.t.Errorf("%s: uid: expected %q, got %q", ex.testName, e, a) + } + if ex.exec { + if e, a := "ls /", strings.Join(ex.command, " "); e != a { + ex.t.Errorf("%s: command: expected %q, got %q", ex.testName, e, a) + } + } else { + if len(ex.command) > 0 { + ex.t.Errorf("%s: command: expected nothing, got %v", ex.testName, ex.command) + } + } + + if len(ex.errorData) > 0 { + return errors.New(ex.errorData) + } + + if len(ex.stdoutData) > 0 { + for i := 0; i < ex.messageCount; i++ { + fmt.Fprint(out, ex.stdoutData) + } + } + + if len(ex.stderrData) > 0 { + for i := 0; i < ex.messageCount; i++ { + fmt.Fprint(err, ex.stderrData) + } + } + + if ex.expectStdin { + io.Copy(&ex.stdinReceived, in) + } + + return nil +} + +func fakeServer(t *testing.T, testName string, exec bool, stdinData, stdoutData, stderrData, errorData string, tty bool, messageCount int, serverProtocols []string) http.HandlerFunc { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + executor := &fakeExecutor{ + t: t, + testName: testName, + errorData: errorData, + stdoutData: stdoutData, + stderrData: stderrData, + expectStdin: len(stdinData) > 0, + tty: tty, + messageCount: messageCount, + exec: exec, + } + + if exec { + remotecommand.ServeExec(w, req, executor, "pod", "uid", "container", 0, 10*time.Second, serverProtocols) + } else { + remotecommand.ServeAttach(w, req, executor, "pod", "uid", "container", 0, 10*time.Second, serverProtocols) + } + + if e, a := strings.Repeat(stdinData, messageCount), executor.stdinReceived.String(); e != a { + t.Errorf("%s: stdin: expected %q, got %q", testName, e, a) + } + }) +} + +func TestStream(t *testing.T) { + testCases := []struct { + TestName string + Stdin string + Stdout string + Stderr string + Error string + Tty bool + MessageCount int + ClientProtocols []string + ServerProtocols []string + }{ + { + TestName: "error", + Error: "bail", + Stdout: "a", + ClientProtocols: []string{remotecommand.StreamProtocolV2Name}, + ServerProtocols: []string{remotecommand.StreamProtocolV2Name}, + }, + { + TestName: "in/out/err", + Stdin: "a", + Stdout: "b", + Stderr: "c", + MessageCount: 100, + ClientProtocols: []string{remotecommand.StreamProtocolV2Name}, + ServerProtocols: []string{remotecommand.StreamProtocolV2Name}, + }, + { + TestName: "in/out/tty", + Stdin: "a", + Stdout: "b", + Tty: true, + MessageCount: 100, + ClientProtocols: []string{remotecommand.StreamProtocolV2Name}, + ServerProtocols: []string{remotecommand.StreamProtocolV2Name}, + }, + { + // 1.0 kubectl, 1.0 kubelet + TestName: "unversioned client, unversioned server", + Stdout: "b", + Stderr: "c", + MessageCount: 1, + ClientProtocols: []string{}, + ServerProtocols: []string{}, + }, + { + // 1.0 kubectl, 1.1+ kubelet + TestName: "unversioned client, versioned server", + Stdout: "b", + Stderr: "c", + MessageCount: 1, + ClientProtocols: []string{}, + ServerProtocols: []string{remotecommand.StreamProtocolV2Name, remotecommand.StreamProtocolV1Name}, + }, + { + // 1.1+ kubectl, 1.0 kubelet + TestName: "versioned client, unversioned server", + Stdout: "b", + Stderr: "c", + MessageCount: 1, + ClientProtocols: []string{remotecommand.StreamProtocolV2Name, remotecommand.StreamProtocolV1Name}, + ServerProtocols: []string{}, + }, + } + + for _, testCase := range testCases { + for _, exec := range []bool{true, false} { + var name string + if exec { + name = testCase.TestName + " (exec)" + } else { + name = testCase.TestName + " (attach)" + } + var ( + streamIn io.Reader + streamOut, streamErr io.Writer + ) + localOut := &bytes.Buffer{} + localErr := &bytes.Buffer{} + + server := httptest.NewServer(fakeServer(t, name, exec, testCase.Stdin, testCase.Stdout, testCase.Stderr, testCase.Error, testCase.Tty, testCase.MessageCount, testCase.ServerProtocols)) + + url, _ := url.ParseRequestURI(server.URL) + config := restclient.ContentConfig{ + GroupVersion: &unversioned.GroupVersion{Group: "x"}, + NegotiatedSerializer: testapi.Default.NegotiatedSerializer(), + } + c, err := restclient.NewRESTClient(url, "", config, -1, -1, nil, nil) + if err != nil { + t.Fatalf("failed to create a client: %v", err) + } + req := c.Post().Resource("testing") + + if exec { + req.Param("command", "ls") + req.Param("command", "/") + } + + if len(testCase.Stdin) > 0 { + req.Param(api.ExecStdinParam, "1") + streamIn = strings.NewReader(strings.Repeat(testCase.Stdin, testCase.MessageCount)) + } + + if len(testCase.Stdout) > 0 { + req.Param(api.ExecStdoutParam, "1") + streamOut = localOut + } + + if testCase.Tty { + req.Param(api.ExecTTYParam, "1") + } else if len(testCase.Stderr) > 0 { + req.Param(api.ExecStderrParam, "1") + streamErr = localErr + } + + conf := &restclient.Config{ + Host: server.URL, + } + e, err := NewExecutor(conf, "POST", req.URL()) + if err != nil { + t.Errorf("%s: unexpected error: %v", name, err) + continue + } + err = e.Stream(testCase.ClientProtocols, streamIn, streamOut, streamErr, testCase.Tty) + hasErr := err != nil + + if len(testCase.Error) > 0 { + if !hasErr { + t.Errorf("%s: expected an error", name) + } else { + if e, a := testCase.Error, err.Error(); !strings.Contains(a, e) { + t.Errorf("%s: expected error stream read %q, got %q", name, e, a) + } + } + + server.Close() + continue + } + + if hasErr { + t.Errorf("%s: unexpected error: %v", name, err) + server.Close() + continue + } + + if len(testCase.Stdout) > 0 { + if e, a := strings.Repeat(testCase.Stdout, testCase.MessageCount), localOut; e != a.String() { + t.Errorf("%s: expected stdout data '%s', got '%s'", name, e, a) + } + } + + if testCase.Stderr != "" { + if e, a := strings.Repeat(testCase.Stderr, testCase.MessageCount), localErr; e != a.String() { + t.Errorf("%s: expected stderr data '%s', got '%s'", name, e, a) + } + } + + server.Close() + } + } +} + +type fakeUpgrader struct { + req *http.Request + resp *http.Response + conn httpstream.Connection + err, connErr error + checkResponse bool + + t *testing.T +} + +func (u *fakeUpgrader) RoundTrip(req *http.Request) (*http.Response, error) { + u.req = req + return u.resp, u.err +} + +func (u *fakeUpgrader) NewConnection(resp *http.Response) (httpstream.Connection, error) { + if u.checkResponse && u.resp != resp { + u.t.Errorf("response objects passed did not match: %#v", resp) + } + return u.conn, u.connErr +} + +type fakeConnection struct { + httpstream.Connection +} + +// Dial is the common functionality between any stream based upgrader, regardless of protocol. +// This method ensures that someone can use a generic stream executor without being dependent +// on the core Kube client config behavior. +func TestDial(t *testing.T) { + upgrader := &fakeUpgrader{ + t: t, + checkResponse: true, + conn: &fakeConnection{}, + resp: &http.Response{ + StatusCode: http.StatusSwitchingProtocols, + Body: ioutil.NopCloser(&bytes.Buffer{}), + }, + } + var called bool + testFn := func(rt http.RoundTripper) http.RoundTripper { + if rt != upgrader { + t.Fatalf("unexpected round tripper: %#v", rt) + } + called = true + return rt + } + exec, err := NewStreamExecutor(upgrader, testFn, "POST", &url.URL{Host: "something.com", Scheme: "https"}) + if err != nil { + t.Fatal(err) + } + conn, protocol, err := exec.Dial("protocol1") + if err != nil { + t.Fatal(err) + } + if conn != upgrader.conn { + t.Errorf("unexpected connection: %#v", conn) + } + if !called { + t.Errorf("wrapper not called") + } + _ = protocol +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/replica_sets_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/replica_sets_test.go new file mode 100644 index 000000000000..2a0e8142cfd3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/replica_sets_test.go @@ -0,0 +1,193 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getReplicaSetResourceName() string { + return "replicasets" +} + +func TestListReplicaSets(t *testing.T) { + ns := api.NamespaceAll + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Extensions.ResourcePath(getReplicaSetResourceName(), ns, ""), + }, + Response: simple.Response{StatusCode: 200, + Body: &extensions.ReplicaSetList{ + Items: []extensions.ReplicaSet{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: 2, + Template: api.PodTemplateSpec{}, + }, + }, + }, + }, + }, + } + receivedRSList, err := c.Setup(t).Extensions().ReplicaSets(ns).List(api.ListOptions{}) + c.Validate(t, receivedRSList, err) +} + +func TestGetReplicaSet(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "GET", Path: testapi.Extensions.ResourcePath(getReplicaSetResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: 2, + Template: api.PodTemplateSpec{}, + }, + }, + }, + } + receivedRS, err := c.Setup(t).Extensions().ReplicaSets(ns).Get("foo") + c.Validate(t, receivedRS, err) +} + +func TestGetReplicaSetWithNoName(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{Error: true} + receivedPod, err := c.Setup(t).Extensions().ReplicaSets(ns).Get("") + if (err != nil) && (err.Error() != simple.NameRequiredError) { + t.Errorf("Expected error: %v, but got %v", simple.NameRequiredError, err) + } + + c.Validate(t, receivedPod, err) +} + +func TestUpdateReplicaSet(t *testing.T) { + ns := api.NamespaceDefault + requestRS := &extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Extensions.ResourcePath(getReplicaSetResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: 2, + Template: api.PodTemplateSpec{}, + }, + }, + }, + } + receivedRS, err := c.Setup(t).Extensions().ReplicaSets(ns).Update(requestRS) + c.Validate(t, receivedRS, err) +} + +func TestUpdateStatusReplicaSet(t *testing.T) { + ns := api.NamespaceDefault + requestRS := &extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Extensions.ResourcePath(getReplicaSetResourceName(), ns, "foo") + "/status", Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: 2, + Template: api.PodTemplateSpec{}, + }, + Status: extensions.ReplicaSetStatus{ + Replicas: 2, + }, + }, + }, + } + receivedRS, err := c.Setup(t).Extensions().ReplicaSets(ns).UpdateStatus(requestRS) + c.Validate(t, receivedRS, err) +} +func TestDeleteReplicaSet(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Extensions.ResourcePath(getReplicaSetResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).Extensions().ReplicaSets(ns).Delete("foo", nil) + c.Validate(t, nil, err) +} + +func TestCreateReplicaSet(t *testing.T) { + ns := api.NamespaceDefault + requestRS := &extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "POST", Path: testapi.Extensions.ResourcePath(getReplicaSetResourceName(), ns, ""), Body: requestRS, Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: 2, + Template: api.PodTemplateSpec{}, + }, + }, + }, + } + receivedRS, err := c.Setup(t).Extensions().ReplicaSets(ns).Create(requestRS) + c.Validate(t, receivedRS, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers_test.go new file mode 100644 index 000000000000..de0458ce40b2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers_test.go @@ -0,0 +1,200 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getRCResourceName() string { + return "replicationcontrollers" +} + +func TestListControllers(t *testing.T) { + ns := api.NamespaceAll + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getRCResourceName(), ns, ""), + }, + Response: simple.Response{StatusCode: 200, + Body: &api.ReplicationControllerList{ + Items: []api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 2, + Template: &api.PodTemplateSpec{}, + }, + }, + }, + }, + }, + } + receivedControllerList, err := c.Setup(t).ReplicationControllers(ns).List(api.ListOptions{}) + defer c.Close() + c.Validate(t, receivedControllerList, err) + +} + +func TestGetController(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "GET", Path: testapi.Default.ResourcePath(getRCResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 2, + Template: &api.PodTemplateSpec{}, + }, + }, + }, + } + receivedController, err := c.Setup(t).ReplicationControllers(ns).Get("foo") + defer c.Close() + c.Validate(t, receivedController, err) +} + +func TestGetControllerWithNoName(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{Error: true} + receivedPod, err := c.Setup(t).ReplicationControllers(ns).Get("") + defer c.Close() + if (err != nil) && (err.Error() != simple.NameRequiredError) { + t.Errorf("Expected error: %v, but got %v", simple.NameRequiredError, err) + } + + c.Validate(t, receivedPod, err) +} + +func TestUpdateController(t *testing.T) { + ns := api.NamespaceDefault + requestController := &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Default.ResourcePath(getRCResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 2, + Template: &api.PodTemplateSpec{}, + }, + }, + }, + } + receivedController, err := c.Setup(t).ReplicationControllers(ns).Update(requestController) + defer c.Close() + c.Validate(t, receivedController, err) +} + +func TestUpdateStatusController(t *testing.T) { + ns := api.NamespaceDefault + requestController := &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Default.ResourcePath(getRCResourceName(), ns, "foo") + "/status", Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 2, + Template: &api.PodTemplateSpec{}, + }, + Status: api.ReplicationControllerStatus{ + Replicas: 2, + }, + }, + }, + } + receivedController, err := c.Setup(t).ReplicationControllers(ns).UpdateStatus(requestController) + defer c.Close() + c.Validate(t, receivedController, err) +} +func TestDeleteController(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Default.ResourcePath(getRCResourceName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).ReplicationControllers(ns).Delete("foo") + defer c.Close() + c.Validate(t, nil, err) +} + +func TestCreateController(t *testing.T) { + ns := api.NamespaceDefault + requestController := &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "POST", Path: testapi.Default.ResourcePath(getRCResourceName(), ns, ""), Body: requestController, Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 2, + Template: &api.PodTemplateSpec{}, + }, + }, + }, + } + receivedController, err := c.Setup(t).ReplicationControllers(ns).Create(requestController) + defer c.Close() + c.Validate(t, receivedController, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas_test.go new file mode 100644 index 000000000000..73dba8dfbaf3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas_test.go @@ -0,0 +1,204 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getResourceQuotasResoureName() string { + return "resourcequotas" +} + +func TestResourceQuotaCreate(t *testing.T) { + ns := api.NamespaceDefault + resourceQuota := &api.ResourceQuota{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "foo", + }, + Spec: api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + api.ResourceMemory: resource.MustParse("10000"), + api.ResourcePods: resource.MustParse("10"), + api.ResourceServices: resource.MustParse("10"), + api.ResourceReplicationControllers: resource.MustParse("10"), + api.ResourceQuotas: resource.MustParse("10"), + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Default.ResourcePath(getResourceQuotasResoureName(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: resourceQuota, + }, + Response: simple.Response{StatusCode: 200, Body: resourceQuota}, + } + + response, err := c.Setup(t).ResourceQuotas(ns).Create(resourceQuota) + defer c.Close() + c.Validate(t, response, err) +} + +func TestResourceQuotaGet(t *testing.T) { + ns := api.NamespaceDefault + resourceQuota := &api.ResourceQuota{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "foo", + }, + Spec: api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + api.ResourceMemory: resource.MustParse("10000"), + api.ResourcePods: resource.MustParse("10"), + api.ResourceServices: resource.MustParse("10"), + api.ResourceReplicationControllers: resource.MustParse("10"), + api.ResourceQuotas: resource.MustParse("10"), + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getResourceQuotasResoureName(), ns, "abc"), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: resourceQuota}, + } + + response, err := c.Setup(t).ResourceQuotas(ns).Get("abc") + defer c.Close() + c.Validate(t, response, err) +} + +func TestResourceQuotaList(t *testing.T) { + ns := api.NamespaceDefault + + resourceQuotaList := &api.ResourceQuotaList{ + Items: []api.ResourceQuota{ + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath(getResourceQuotasResoureName(), ns, ""), + Query: simple.BuildQueryValues(nil), + Body: nil, + }, + Response: simple.Response{StatusCode: 200, Body: resourceQuotaList}, + } + response, err := c.Setup(t).ResourceQuotas(ns).List(api.ListOptions{}) + defer c.Close() + c.Validate(t, response, err) +} + +func TestResourceQuotaUpdate(t *testing.T) { + ns := api.NamespaceDefault + resourceQuota := &api.ResourceQuota{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "foo", + ResourceVersion: "1", + }, + Spec: api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + api.ResourceMemory: resource.MustParse("10000"), + api.ResourcePods: resource.MustParse("10"), + api.ResourceServices: resource.MustParse("10"), + api.ResourceReplicationControllers: resource.MustParse("10"), + api.ResourceQuotas: resource.MustParse("10"), + }, + }, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Default.ResourcePath(getResourceQuotasResoureName(), ns, "abc"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: resourceQuota}, + } + response, err := c.Setup(t).ResourceQuotas(ns).Update(resourceQuota) + defer c.Close() + c.Validate(t, response, err) +} + +func TestResourceQuotaStatusUpdate(t *testing.T) { + ns := api.NamespaceDefault + resourceQuota := &api.ResourceQuota{ + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "foo", + ResourceVersion: "1", + }, + Status: api.ResourceQuotaStatus{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100"), + api.ResourceMemory: resource.MustParse("10000"), + api.ResourcePods: resource.MustParse("10"), + api.ResourceServices: resource.MustParse("10"), + api.ResourceReplicationControllers: resource.MustParse("10"), + api.ResourceQuotas: resource.MustParse("10"), + }, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "PUT", + Path: testapi.Default.ResourcePath(getResourceQuotasResoureName(), ns, "abc") + "/status", + Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: resourceQuota}, + } + response, err := c.Setup(t).ResourceQuotas(ns).UpdateStatus(resourceQuota) + defer c.Close() + c.Validate(t, response, err) +} + +func TestResourceQuotaDelete(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Default.ResourcePath(getResourceQuotasResoureName(), ns, "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).ResourceQuotas(ns).Delete("foo") + defer c.Close() + c.Validate(t, nil, err) +} + +func TestResourceQuotaWatch(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePathWithPrefix("watch", getResourceQuotasResoureName(), "", ""), + Query: url.Values{"resourceVersion": []string{}}}, + Response: simple.Response{StatusCode: 200}, + } + _, err := c.Setup(t).ResourceQuotas(api.NamespaceAll).Watch(api.ListOptions{}) + defer c.Close() + c.Validate(t, nil, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go new file mode 100644 index 000000000000..a43815c55268 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go @@ -0,0 +1,95 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/watch" +) + +// RoleBindingsNamespacer has methods to work with RoleBinding resources in a namespace +type RoleBindingsNamespacer interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + List(opts api.ListOptions) (*rbac.RoleBindingList, error) + Get(name string) (*rbac.RoleBinding, error) + Delete(name string, options *api.DeleteOptions) error + Create(roleBinding *rbac.RoleBinding) (*rbac.RoleBinding, error) + Update(roleBinding *rbac.RoleBinding) (*rbac.RoleBinding, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// roleBindings implements RoleBindingsNamespacer interface +type roleBindings struct { + client *RbacClient + ns string +} + +// newRoleBindings returns a roleBindings +func newRoleBindings(c *RbacClient, namespace string) *roleBindings { + return &roleBindings{ + client: c, + ns: namespace, + } +} + +// List takes label and field selectors, and returns the list of roleBindings that match those selectors. +func (c *roleBindings) List(opts api.ListOptions) (result *rbac.RoleBindingList, err error) { + result = &rbac.RoleBindingList{} + err = c.client.Get().Namespace(c.ns).Resource("rolebindings").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes the name of the roleBinding, and returns the corresponding RoleBinding object, and an error if it occurs +func (c *roleBindings) Get(name string) (result *rbac.RoleBinding, err error) { + result = &rbac.RoleBinding{} + err = c.client.Get().Namespace(c.ns).Resource("rolebindings").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete().Namespace(c.ns).Resource("rolebindings").Name(name).Body(options).Do().Error() +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if it occurs. +func (c *roleBindings) Create(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { + result = &rbac.RoleBinding{} + err = c.client.Post().Namespace(c.ns).Resource("rolebindings").Body(roleBinding).Do().Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if it occurs. +func (c *roleBindings) Update(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { + result = &rbac.RoleBinding{} + err = c.client.Put().Namespace(c.ns).Resource("rolebindings").Name(roleBinding.Name).Body(roleBinding).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/roles.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/roles.go new file mode 100644 index 000000000000..29aee1baee76 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/roles.go @@ -0,0 +1,95 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/watch" +) + +// RolesNamespacer has methods to work with Role resources in a namespace +type RolesNamespacer interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + List(opts api.ListOptions) (*rbac.RoleList, error) + Get(name string) (*rbac.Role, error) + Delete(name string, options *api.DeleteOptions) error + Create(role *rbac.Role) (*rbac.Role, error) + Update(role *rbac.Role) (*rbac.Role, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// roles implements RolesNamespacer interface +type roles struct { + client *RbacClient + ns string +} + +// newRoles returns a roles +func newRoles(c *RbacClient, namespace string) *roles { + return &roles{ + client: c, + ns: namespace, + } +} + +// List takes label and field selectors, and returns the list of roles that match those selectors. +func (c *roles) List(opts api.ListOptions) (result *rbac.RoleList, err error) { + result = &rbac.RoleList{} + err = c.client.Get().Namespace(c.ns).Resource("roles").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes the name of the role, and returns the corresponding Role object, and an error if it occurs +func (c *roles) Get(name string) (result *rbac.Role, err error) { + result = &rbac.Role{} + err = c.client.Get().Namespace(c.ns).Resource("roles").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete().Namespace(c.ns).Resource("roles").Name(name).Body(options).Do().Error() +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if it occurs. +func (c *roles) Create(role *rbac.Role) (result *rbac.Role, err error) { + result = &rbac.Role{} + err = c.client.Post().Namespace(c.ns).Resource("roles").Body(role).Do().Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if it occurs. +func (c *roles) Update(role *rbac.Role) (result *rbac.Role, err error) { + result = &rbac.Role{} + err = c.client.Put().Namespace(c.ns).Resource("roles").Name(role.Name).Body(role).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go new file mode 100644 index 000000000000..d2b83fce2009 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go @@ -0,0 +1,103 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/watch" +) + +// ScheduledJobsNamespacer has methods to work with ScheduledJob resources in a namespace +type ScheduledJobsNamespacer interface { + ScheduledJobs(namespace string) ScheduledJobInterface +} + +// ScheduledJobInterface exposes methods to work on ScheduledJob resources. +type ScheduledJobInterface interface { + List(opts api.ListOptions) (*batch.ScheduledJobList, error) + Get(name string) (*batch.ScheduledJob, error) + Create(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) + Update(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) + Delete(name string, options *api.DeleteOptions) error + Watch(opts api.ListOptions) (watch.Interface, error) + UpdateStatus(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) +} + +// scheduledJobs implements ScheduledJobsNamespacer interface +type scheduledJobs struct { + r *BatchClient + ns string +} + +// newScheduledJobs returns a scheduledJobs +func newScheduledJobs(c *BatchClient, namespace string) *scheduledJobs { + return &scheduledJobs{c, namespace} +} + +// Ensure statically that scheduledJobs implements ScheduledJobInterface. +var _ ScheduledJobInterface = &scheduledJobs{} + +// List returns a list of scheduled jobs that match the label and field selectors. +func (c *scheduledJobs) List(opts api.ListOptions) (result *batch.ScheduledJobList, err error) { + result = &batch.ScheduledJobList{} + err = c.r.Get().Namespace(c.ns).Resource("scheduledJobs").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular scheduled job. +func (c *scheduledJobs) Get(name string) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.r.Get().Namespace(c.ns).Resource("scheduledJobs").Name(name).Do().Into(result) + return +} + +// Create creates a new scheduled job. +func (c *scheduledJobs) Create(job *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.r.Post().Namespace(c.ns).Resource("scheduledJobs").Body(job).Do().Into(result) + return +} + +// Update updates an existing scheduled job. +func (c *scheduledJobs) Update(job *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.r.Put().Namespace(c.ns).Resource("scheduledJobs").Name(job.Name).Body(job).Do().Into(result) + return +} + +// Delete deletes a scheduled job, returns error if one occurs. +func (c *scheduledJobs) Delete(name string, options *api.DeleteOptions) (err error) { + return c.r.Delete().Namespace(c.ns).Resource("scheduledJobs").Name(name).Body(options).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested scheduled jobs. +func (c *scheduledJobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("scheduledJobs"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// UpdateStatus takes the name of the scheduled job and the new status. Returns the server's representation of the scheduled job, and an error, if it occurs. +func (c *scheduledJobs) UpdateStatus(job *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.r.Put().Namespace(c.ns).Resource("scheduledJobs").Name(job.Name).SubResource("status").Body(job).Do().Into(result) + return +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/services_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/services_test.go new file mode 100644 index 000000000000..fadfe2be4890 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/services_test.go @@ -0,0 +1,238 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" + "k8s.io/kubernetes/pkg/labels" +) + +func TestListServices(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath("services", ns, ""), + Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, + Body: &api.ServiceList{ + Items: []api.Service{ + { + ObjectMeta: api.ObjectMeta{ + Name: "name", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "one": "two", + }, + }, + }, + }, + }, + }, + } + receivedServiceList, err := c.Setup(t).Services(ns).List(api.ListOptions{}) + defer c.Close() + t.Logf("received services: %v %#v", err, receivedServiceList) + c.Validate(t, receivedServiceList, err) +} + +func TestListServicesLabels(t *testing.T) { + ns := api.NamespaceDefault + labelSelectorQueryParamName := unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String()) + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath("services", ns, ""), + Query: simple.BuildQueryValues(url.Values{labelSelectorQueryParamName: []string{"foo=bar,name=baz"}})}, + Response: simple.Response{StatusCode: 200, + Body: &api.ServiceList{ + Items: []api.Service{ + { + ObjectMeta: api.ObjectMeta{ + Name: "name", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "one": "two", + }, + }, + }, + }, + }, + }, + } + c.Setup(t) + defer c.Close() + c.QueryValidator[labelSelectorQueryParamName] = simple.ValidateLabels + selector := labels.Set{"foo": "bar", "name": "baz"}.AsSelector() + options := api.ListOptions{LabelSelector: selector} + receivedServiceList, err := c.Services(ns).List(options) + c.Validate(t, receivedServiceList, err) +} + +func TestGetService(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath("services", ns, "1"), + Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: &api.Service{ObjectMeta: api.ObjectMeta{Name: "service-1"}}}, + } + response, err := c.Setup(t).Services(ns).Get("1") + defer c.Close() + c.Validate(t, response, err) +} + +func TestGetServiceWithNoName(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{Error: true} + receivedPod, err := c.Setup(t).Services(ns).Get("") + defer c.Close() + if (err != nil) && (err.Error() != simple.NameRequiredError) { + t.Errorf("Expected error: %v, but got %v", simple.NameRequiredError, err) + } + + c.Validate(t, receivedPod, err) +} + +func TestCreateService(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{ + Method: "POST", + Path: testapi.Default.ResourcePath("services", ns, ""), + Body: &api.Service{ObjectMeta: api.ObjectMeta{Name: "service-1"}}, + Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: &api.Service{ObjectMeta: api.ObjectMeta{Name: "service-1"}}}, + } + response, err := c.Setup(t).Services(ns).Create(&api.Service{ObjectMeta: api.ObjectMeta{Name: "service-1"}}) + defer c.Close() + c.Validate(t, response, err) +} + +func TestUpdateService(t *testing.T) { + ns := api.NamespaceDefault + svc := &api.Service{ObjectMeta: api.ObjectMeta{Name: "service-1", ResourceVersion: "1"}} + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Default.ResourcePath("services", ns, "service-1"), Body: svc, Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200, Body: svc}, + } + response, err := c.Setup(t).Services(ns).Update(svc) + defer c.Close() + c.Validate(t, response, err) +} + +func TestDeleteService(t *testing.T) { + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Default.ResourcePath("services", ns, "1"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).Services(ns).Delete("1") + defer c.Close() + c.Validate(t, nil, err) +} + +func TestUpdateServiceStatus(t *testing.T) { + ns := api.NamespaceDefault + lbStatus := api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + {IP: "127.0.0.1"}, + }, + } + requestService := &api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Status: api.ServiceStatus{ + LoadBalancer: lbStatus, + }, + } + c := &simple.Client{ + Request: simple.Request{ + Method: "PUT", + Path: testapi.Default.ResourcePath("services", ns, "foo") + "/status", + Query: simple.BuildQueryValues(nil), + }, + Response: simple.Response{ + StatusCode: 200, + Body: &api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Spec: api.ServiceSpec{}, + Status: api.ServiceStatus{ + LoadBalancer: lbStatus, + }, + }, + }, + } + receivedService, err := c.Setup(t).Services(ns).UpdateStatus(requestService) + defer c.Close() + c.Validate(t, receivedService, err) +} + +func TestServiceProxyGet(t *testing.T) { + body := "OK" + ns := api.NamespaceDefault + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath("services", ns, "service-1") + "/proxy/foo", + Query: simple.BuildQueryValues(url.Values{"param-name": []string{"param-value"}}), + }, + Response: simple.Response{StatusCode: 200, RawBody: &body}, + } + response, err := c.Setup(t).Services(ns).ProxyGet("", "service-1", "", "foo", map[string]string{"param-name": "param-value"}).DoRaw() + defer c.Close() + c.ValidateRaw(t, response, err) + + // With scheme and port specified + c = &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Default.ResourcePath("services", ns, "https:service-1:my-port") + "/proxy/foo", + Query: simple.BuildQueryValues(url.Values{"param-name": []string{"param-value"}}), + }, + Response: simple.Response{StatusCode: 200, RawBody: &body}, + } + response, err = c.Setup(t).Services(ns).ProxyGet("https", "service-1", "my-port", "foo", map[string]string{"param-name": "param-value"}).DoRaw() + defer c.Close() + c.ValidateRaw(t, response, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_clusterrolebindings.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_clusterrolebindings.go new file mode 100644 index 000000000000..6da1da35536a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_clusterrolebindings.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/watch" +) + +// FakeClusterRoleBindings implements ClusterRoleBindingInterface +type FakeClusterRoleBindings struct { + Fake *FakeRbac +} + +func (c *FakeClusterRoleBindings) Get(name string) (*rbac.ClusterRoleBinding, error) { + obj, err := c.Fake.Invokes(NewRootGetAction("clusterrolebindings", name), &rbac.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + + return obj.(*rbac.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) List(opts api.ListOptions) (*rbac.ClusterRoleBindingList, error) { + obj, err := c.Fake.Invokes(NewRootListAction("clusterrolebindings", opts), &rbac.ClusterRoleBindingList{}) + if obj == nil { + return nil, err + } + + return obj.(*rbac.ClusterRoleBindingList), err +} + +func (c *FakeClusterRoleBindings) Create(csr *rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) { + obj, err := c.Fake.Invokes(NewRootCreateAction("clusterrolebindings", csr), csr) + if obj == nil { + return nil, err + } + + return obj.(*rbac.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) Update(csr *rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) { + obj, err := c.Fake.Invokes(NewRootUpdateAction("clusterrolebindings", csr), csr) + if obj == nil { + return nil, err + } + + return obj.(*rbac.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) Delete(name string, opts *api.DeleteOptions) error { + _, err := c.Fake.Invokes(NewRootDeleteAction("clusterrolebindings", name), &rbac.ClusterRoleBinding{}) + return err +} + +func (c *FakeClusterRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake.InvokesWatch(NewRootWatchAction("clusterrolebindings", opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_clusterroles.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_clusterroles.go new file mode 100644 index 000000000000..5bdfd3dc74f7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_clusterroles.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/watch" +) + +// FakeClusterRoles implements ClusterRoleInterface +type FakeClusterRoles struct { + Fake *FakeRbac +} + +func (c *FakeClusterRoles) Get(name string) (*rbac.ClusterRole, error) { + obj, err := c.Fake.Invokes(NewRootGetAction("clusterroles", name), &rbac.ClusterRole{}) + if obj == nil { + return nil, err + } + + return obj.(*rbac.ClusterRole), err +} + +func (c *FakeClusterRoles) List(opts api.ListOptions) (*rbac.ClusterRoleList, error) { + obj, err := c.Fake.Invokes(NewRootListAction("clusterroles", opts), &rbac.ClusterRoleList{}) + if obj == nil { + return nil, err + } + + return obj.(*rbac.ClusterRoleList), err +} + +func (c *FakeClusterRoles) Create(csr *rbac.ClusterRole) (*rbac.ClusterRole, error) { + obj, err := c.Fake.Invokes(NewRootCreateAction("clusterroles", csr), csr) + if obj == nil { + return nil, err + } + + return obj.(*rbac.ClusterRole), err +} + +func (c *FakeClusterRoles) Update(csr *rbac.ClusterRole) (*rbac.ClusterRole, error) { + obj, err := c.Fake.Invokes(NewRootUpdateAction("clusterroles", csr), csr) + if obj == nil { + return nil, err + } + + return obj.(*rbac.ClusterRole), err +} + +func (c *FakeClusterRoles) Delete(name string, opts *api.DeleteOptions) error { + _, err := c.Fake.Invokes(NewRootDeleteAction("clusterroles", name), &rbac.ClusterRole{}) + return err +} + +func (c *FakeClusterRoles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake.InvokesWatch(NewRootWatchAction("clusterroles", opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_deployments.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_deployments.go index f53f2719899b..3e13fe7b34a3 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_deployments.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_deployments.go @@ -19,17 +19,21 @@ package testclient import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" + kclientlib "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/watch" ) -// FakeDeployments implements DeploymentsInterface. Meant to be embedded into a struct to get a default +// FakeDeployments implements DeploymentInterface. Meant to be embedded into a struct to get a default // implementation. This makes faking out just the methods you want to test easier. type FakeDeployments struct { Fake *FakeExperimental Namespace string } +// Ensure statically that FakeDeployments implements DeploymentInterface. +var _ kclientlib.DeploymentInterface = &FakeDeployments{} + func (c *FakeDeployments) Get(name string) (*extensions.Deployment, error) { obj, err := c.Fake.Invokes(NewGetAction("deployments", c.Namespace, name), &extensions.Deployment{}) if obj == nil { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_horizontal_pod_autoscalers.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_horizontal_pod_autoscalers.go index e50b326d9142..c299679f0b8b 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_horizontal_pod_autoscalers.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_horizontal_pod_autoscalers.go @@ -18,7 +18,7 @@ package testclient import ( "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/watch" ) @@ -26,21 +26,21 @@ import ( // FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface. Meant to be embedded into a struct to get a default // implementation. This makes faking out just the methods you want to test easier. type FakeHorizontalPodAutoscalers struct { - Fake *FakeExperimental + Fake *FakeAutoscaling Namespace string } -func (c *FakeHorizontalPodAutoscalers) Get(name string) (*extensions.HorizontalPodAutoscaler, error) { - obj, err := c.Fake.Invokes(NewGetAction("horizontalpodautoscalers", c.Namespace, name), &extensions.HorizontalPodAutoscaler{}) +func (c *FakeHorizontalPodAutoscalers) Get(name string) (*autoscaling.HorizontalPodAutoscaler, error) { + obj, err := c.Fake.Invokes(NewGetAction("horizontalpodautoscalers", c.Namespace, name), &autoscaling.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*extensions.HorizontalPodAutoscaler), err + return obj.(*autoscaling.HorizontalPodAutoscaler), err } -func (c *FakeHorizontalPodAutoscalers) List(opts api.ListOptions) (*extensions.HorizontalPodAutoscalerList, error) { - obj, err := c.Fake.Invokes(NewListAction("horizontalpodautoscalers", c.Namespace, opts), &extensions.HorizontalPodAutoscalerList{}) +func (c *FakeHorizontalPodAutoscalers) List(opts api.ListOptions) (*autoscaling.HorizontalPodAutoscalerList, error) { + obj, err := c.Fake.Invokes(NewListAction("horizontalpodautoscalers", c.Namespace, opts), &autoscaling.HorizontalPodAutoscalerList{}) if obj == nil { return nil, err } @@ -48,8 +48,8 @@ func (c *FakeHorizontalPodAutoscalers) List(opts api.ListOptions) (*extensions.H if label == nil { label = labels.Everything() } - list := &extensions.HorizontalPodAutoscalerList{} - for _, a := range obj.(*extensions.HorizontalPodAutoscalerList).Items { + list := &autoscaling.HorizontalPodAutoscalerList{} + for _, a := range obj.(*autoscaling.HorizontalPodAutoscalerList).Items { if label.Matches(labels.Set(a.Labels)) { list.Items = append(list.Items, a) } @@ -57,108 +57,37 @@ func (c *FakeHorizontalPodAutoscalers) List(opts api.ListOptions) (*extensions.H return list, err } -func (c *FakeHorizontalPodAutoscalers) Create(a *extensions.HorizontalPodAutoscaler) (*extensions.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) Create(a *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) { obj, err := c.Fake.Invokes(NewCreateAction("horizontalpodautoscalers", c.Namespace, a), a) if obj == nil { return nil, err } - return obj.(*extensions.HorizontalPodAutoscaler), err + return obj.(*autoscaling.HorizontalPodAutoscaler), err } -func (c *FakeHorizontalPodAutoscalers) Update(a *extensions.HorizontalPodAutoscaler) (*extensions.HorizontalPodAutoscaler, error) { +func (c *FakeHorizontalPodAutoscalers) Update(a *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) { obj, err := c.Fake.Invokes(NewUpdateAction("horizontalpodautoscalers", c.Namespace, a), a) if obj == nil { return nil, err } - return obj.(*extensions.HorizontalPodAutoscaler), err + return obj.(*autoscaling.HorizontalPodAutoscaler), err } -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(a *extensions.HorizontalPodAutoscaler) (*extensions.HorizontalPodAutoscaler, error) { - obj, err := c.Fake.Invokes(NewUpdateSubresourceAction("horizontalpodautoscalers", "status", c.Namespace, a), &extensions.HorizontalPodAutoscaler{}) +func (c *FakeHorizontalPodAutoscalers) UpdateStatus(a *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) { + obj, err := c.Fake.Invokes(NewUpdateSubresourceAction("horizontalpodautoscalers", "status", c.Namespace, a), &autoscaling.HorizontalPodAutoscaler{}) if obj == nil { return nil, err } - return obj.(*extensions.HorizontalPodAutoscaler), err + return obj.(*autoscaling.HorizontalPodAutoscaler), err } func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake.Invokes(NewDeleteAction("horizontalpodautoscalers", c.Namespace, name), &extensions.HorizontalPodAutoscaler{}) + _, err := c.Fake.Invokes(NewDeleteAction("horizontalpodautoscalers", c.Namespace, name), &autoscaling.HorizontalPodAutoscaler{}) return err } func (c *FakeHorizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake.InvokesWatch(NewWatchAction("horizontalpodautoscalers", c.Namespace, opts)) } - -// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface. Meant to be embedded into a struct to get a default -// implementation. This makes faking out just the methods you want to test easier. -// This is a test implementation of HorizontalPodAutoscalersV1 -// TODO(piosz): get back to one client implementation once HPA will be graduated to GA completely -type FakeHorizontalPodAutoscalersV1 struct { - Fake *FakeAutoscaling - Namespace string -} - -func (c *FakeHorizontalPodAutoscalersV1) Get(name string) (*extensions.HorizontalPodAutoscaler, error) { - obj, err := c.Fake.Invokes(NewGetAction("horizontalpodautoscalers", c.Namespace, name), &extensions.HorizontalPodAutoscaler{}) - if obj == nil { - return nil, err - } - - return obj.(*extensions.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalersV1) List(opts api.ListOptions) (*extensions.HorizontalPodAutoscalerList, error) { - obj, err := c.Fake.Invokes(NewListAction("horizontalpodautoscalers", c.Namespace, opts), &extensions.HorizontalPodAutoscalerList{}) - if obj == nil { - return nil, err - } - label := opts.LabelSelector - if label == nil { - label = labels.Everything() - } - list := &extensions.HorizontalPodAutoscalerList{} - for _, a := range obj.(*extensions.HorizontalPodAutoscalerList).Items { - if label.Matches(labels.Set(a.Labels)) { - list.Items = append(list.Items, a) - } - } - return list, err -} - -func (c *FakeHorizontalPodAutoscalersV1) Create(a *extensions.HorizontalPodAutoscaler) (*extensions.HorizontalPodAutoscaler, error) { - obj, err := c.Fake.Invokes(NewCreateAction("horizontalpodautoscalers", c.Namespace, a), a) - if obj == nil { - return nil, err - } - - return obj.(*extensions.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalersV1) Update(a *extensions.HorizontalPodAutoscaler) (*extensions.HorizontalPodAutoscaler, error) { - obj, err := c.Fake.Invokes(NewUpdateAction("horizontalpodautoscalers", c.Namespace, a), a) - if obj == nil { - return nil, err - } - - return obj.(*extensions.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalersV1) UpdateStatus(a *extensions.HorizontalPodAutoscaler) (*extensions.HorizontalPodAutoscaler, error) { - obj, err := c.Fake.Invokes(NewUpdateSubresourceAction("horizontalpodautoscalers", "status", c.Namespace, a), &extensions.HorizontalPodAutoscaler{}) - if obj == nil { - return nil, err - } - return obj.(*extensions.HorizontalPodAutoscaler), err -} - -func (c *FakeHorizontalPodAutoscalersV1) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake.Invokes(NewDeleteAction("horizontalpodautoscalers", c.Namespace, name), &extensions.HorizontalPodAutoscaler{}) - return err -} - -func (c *FakeHorizontalPodAutoscalersV1) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake.InvokesWatch(NewWatchAction("horizontalpodautoscalers", c.Namespace, opts)) -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_jobs.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_jobs.go index 71ac8dfd65a3..dedde9dc3d48 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_jobs.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_jobs.go @@ -18,7 +18,7 @@ package testclient import ( "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/watch" ) @@ -29,44 +29,44 @@ type FakeJobs struct { Namespace string } -func (c *FakeJobs) Get(name string) (*extensions.Job, error) { - obj, err := c.Fake.Invokes(NewGetAction("jobs", c.Namespace, name), &extensions.Job{}) +func (c *FakeJobs) Get(name string) (*batch.Job, error) { + obj, err := c.Fake.Invokes(NewGetAction("jobs", c.Namespace, name), &batch.Job{}) if obj == nil { return nil, err } - return obj.(*extensions.Job), err + return obj.(*batch.Job), err } -func (c *FakeJobs) List(opts api.ListOptions) (*extensions.JobList, error) { - obj, err := c.Fake.Invokes(NewListAction("jobs", c.Namespace, opts), &extensions.JobList{}) +func (c *FakeJobs) List(opts api.ListOptions) (*batch.JobList, error) { + obj, err := c.Fake.Invokes(NewListAction("jobs", c.Namespace, opts), &batch.JobList{}) if obj == nil { return nil, err } - return obj.(*extensions.JobList), err + return obj.(*batch.JobList), err } -func (c *FakeJobs) Create(job *extensions.Job) (*extensions.Job, error) { +func (c *FakeJobs) Create(job *batch.Job) (*batch.Job, error) { obj, err := c.Fake.Invokes(NewCreateAction("jobs", c.Namespace, job), job) if obj == nil { return nil, err } - return obj.(*extensions.Job), err + return obj.(*batch.Job), err } -func (c *FakeJobs) Update(job *extensions.Job) (*extensions.Job, error) { +func (c *FakeJobs) Update(job *batch.Job) (*batch.Job, error) { obj, err := c.Fake.Invokes(NewUpdateAction("jobs", c.Namespace, job), job) if obj == nil { return nil, err } - return obj.(*extensions.Job), err + return obj.(*batch.Job), err } func (c *FakeJobs) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake.Invokes(NewDeleteAction("jobs", c.Namespace, name), &extensions.Job{}) + _, err := c.Fake.Invokes(NewDeleteAction("jobs", c.Namespace, name), &batch.Job{}) return err } @@ -74,13 +74,13 @@ func (c *FakeJobs) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake.InvokesWatch(NewWatchAction("jobs", c.Namespace, opts)) } -func (c *FakeJobs) UpdateStatus(job *extensions.Job) (result *extensions.Job, err error) { +func (c *FakeJobs) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { obj, err := c.Fake.Invokes(NewUpdateSubresourceAction("jobs", "status", c.Namespace, job), job) if obj == nil { return nil, err } - return obj.(*extensions.Job), err + return obj.(*batch.Job), err } // FakeJobs implements JobInterface. Meant to be embedded into a struct to get a default @@ -92,44 +92,44 @@ type FakeJobsV1 struct { Namespace string } -func (c *FakeJobsV1) Get(name string) (*extensions.Job, error) { - obj, err := c.Fake.Invokes(NewGetAction("jobs", c.Namespace, name), &extensions.Job{}) +func (c *FakeJobsV1) Get(name string) (*batch.Job, error) { + obj, err := c.Fake.Invokes(NewGetAction("jobs", c.Namespace, name), &batch.Job{}) if obj == nil { return nil, err } - return obj.(*extensions.Job), err + return obj.(*batch.Job), err } -func (c *FakeJobsV1) List(opts api.ListOptions) (*extensions.JobList, error) { - obj, err := c.Fake.Invokes(NewListAction("jobs", c.Namespace, opts), &extensions.JobList{}) +func (c *FakeJobsV1) List(opts api.ListOptions) (*batch.JobList, error) { + obj, err := c.Fake.Invokes(NewListAction("jobs", c.Namespace, opts), &batch.JobList{}) if obj == nil { return nil, err } - return obj.(*extensions.JobList), err + return obj.(*batch.JobList), err } -func (c *FakeJobsV1) Create(job *extensions.Job) (*extensions.Job, error) { +func (c *FakeJobsV1) Create(job *batch.Job) (*batch.Job, error) { obj, err := c.Fake.Invokes(NewCreateAction("jobs", c.Namespace, job), job) if obj == nil { return nil, err } - return obj.(*extensions.Job), err + return obj.(*batch.Job), err } -func (c *FakeJobsV1) Update(job *extensions.Job) (*extensions.Job, error) { +func (c *FakeJobsV1) Update(job *batch.Job) (*batch.Job, error) { obj, err := c.Fake.Invokes(NewUpdateAction("jobs", c.Namespace, job), job) if obj == nil { return nil, err } - return obj.(*extensions.Job), err + return obj.(*batch.Job), err } func (c *FakeJobsV1) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake.Invokes(NewDeleteAction("jobs", c.Namespace, name), &extensions.Job{}) + _, err := c.Fake.Invokes(NewDeleteAction("jobs", c.Namespace, name), &batch.Job{}) return err } @@ -137,11 +137,11 @@ func (c *FakeJobsV1) Watch(opts api.ListOptions) (watch.Interface, error) { return c.Fake.InvokesWatch(NewWatchAction("jobs", c.Namespace, opts)) } -func (c *FakeJobsV1) UpdateStatus(job *extensions.Job) (result *extensions.Job, err error) { +func (c *FakeJobsV1) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { obj, err := c.Fake.Invokes(NewUpdateSubresourceAction("jobs", "status", c.Namespace, job), job) if obj == nil { return nil, err } - return obj.(*extensions.Job), err + return obj.(*batch.Job), err } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_network_policies.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_network_policies.go new file mode 100644 index 000000000000..abbe9c61ba8c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_network_policies.go @@ -0,0 +1,75 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + kclientlib "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/watch" +) + +// FakeNetworkPolicies implements NetworkPolicyInterface. Meant to be embedded into a struct to get a default +// implementation. This makes faking out just the method you want to test easier. +type FakeNetworkPolicies struct { + Fake *FakeExperimental + Namespace string +} + +// Ensure statically that FakeNetworkPolicies implements NetworkPolicyInterface. +var _ kclientlib.NetworkPolicyInterface = &FakeNetworkPolicies{} + +func (c *FakeNetworkPolicies) Get(name string) (*extensions.NetworkPolicy, error) { + obj, err := c.Fake.Invokes(NewGetAction("networkpolicies", c.Namespace, name), &extensions.NetworkPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.NetworkPolicy), err +} + +func (c *FakeNetworkPolicies) List(opts api.ListOptions) (*extensions.NetworkPolicyList, error) { + obj, err := c.Fake.Invokes(NewListAction("networkpolicies", c.Namespace, opts), &extensions.NetworkPolicyList{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.NetworkPolicyList), err +} + +func (c *FakeNetworkPolicies) Create(np *extensions.NetworkPolicy) (*extensions.NetworkPolicy, error) { + obj, err := c.Fake.Invokes(NewCreateAction("networkpolicies", c.Namespace, np), &extensions.NetworkPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.NetworkPolicy), err +} + +func (c *FakeNetworkPolicies) Update(np *extensions.NetworkPolicy) (*extensions.NetworkPolicy, error) { + obj, err := c.Fake.Invokes(NewUpdateAction("networkpolicies", c.Namespace, np), &extensions.NetworkPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*extensions.NetworkPolicy), err +} + +func (c *FakeNetworkPolicies) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake.Invokes(NewDeleteAction("networkpolicies", c.Namespace, name), &extensions.NetworkPolicy{}) + return err +} + +func (c *FakeNetworkPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake.InvokesWatch(NewWatchAction("networkpolicies", c.Namespace, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_podsecuritypolicy.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_podsecuritypolicy.go index bb611d322c51..06bd10991fb3 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_podsecuritypolicy.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_podsecuritypolicy.go @@ -47,16 +47,16 @@ func (c *FakePodSecurityPolicy) Get(name string) (*extensions.PodSecurityPolicy, return obj.(*extensions.PodSecurityPolicy), err } -func (c *FakePodSecurityPolicy) Create(scc *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) { - obj, err := c.Fake.Invokes(NewCreateAction("podsecuritypolicies", c.Namespace, scc), &extensions.PodSecurityPolicy{}) +func (c *FakePodSecurityPolicy) Create(psp *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) { + obj, err := c.Fake.Invokes(NewCreateAction("podsecuritypolicies", c.Namespace, psp), &extensions.PodSecurityPolicy{}) if obj == nil { return nil, err } return obj.(*extensions.PodSecurityPolicy), err } -func (c *FakePodSecurityPolicy) Update(scc *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) { - obj, err := c.Fake.Invokes(NewUpdateAction("podsecuritypolicies", c.Namespace, scc), &extensions.PodSecurityPolicy{}) +func (c *FakePodSecurityPolicy) Update(psp *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) { + obj, err := c.Fake.Invokes(NewUpdateAction("podsecuritypolicies", c.Namespace, psp), &extensions.PodSecurityPolicy{}) if obj == nil { return nil, err } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_rolebindings.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_rolebindings.go new file mode 100644 index 000000000000..6502cbd5a92f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_rolebindings.go @@ -0,0 +1,74 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/watch" +) + +// FakeRoleBindings implements RoleBindingInterface +type FakeRoleBindings struct { + Fake *FakeRbac + Namespace string +} + +func (c *FakeRoleBindings) Get(name string) (*rbac.RoleBinding, error) { + obj, err := c.Fake.Invokes(NewGetAction("rolebindings", c.Namespace, name), &rbac.RoleBinding{}) + if obj == nil { + return nil, err + } + + return obj.(*rbac.RoleBinding), err +} + +func (c *FakeRoleBindings) List(opts api.ListOptions) (*rbac.RoleBindingList, error) { + obj, err := c.Fake.Invokes(NewListAction("rolebindings", c.Namespace, opts), &rbac.RoleBindingList{}) + if obj == nil { + return nil, err + } + + return obj.(*rbac.RoleBindingList), err +} + +func (c *FakeRoleBindings) Create(csr *rbac.RoleBinding) (*rbac.RoleBinding, error) { + obj, err := c.Fake.Invokes(NewCreateAction("rolebindings", c.Namespace, csr), csr) + if obj == nil { + return nil, err + } + + return obj.(*rbac.RoleBinding), err +} + +func (c *FakeRoleBindings) Update(csr *rbac.RoleBinding) (*rbac.RoleBinding, error) { + obj, err := c.Fake.Invokes(NewUpdateAction("rolebindings", c.Namespace, csr), csr) + if obj == nil { + return nil, err + } + + return obj.(*rbac.RoleBinding), err +} + +func (c *FakeRoleBindings) Delete(name string, opts *api.DeleteOptions) error { + _, err := c.Fake.Invokes(NewDeleteAction("rolebindings", c.Namespace, name), &rbac.RoleBinding{}) + return err +} + +func (c *FakeRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake.InvokesWatch(NewWatchAction("rolebindings", c.Namespace, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_roles.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_roles.go new file mode 100644 index 000000000000..53aeb1e27f5b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_roles.go @@ -0,0 +1,74 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/watch" +) + +// FakeRoles implements RoleInterface +type FakeRoles struct { + Fake *FakeRbac + Namespace string +} + +func (c *FakeRoles) Get(name string) (*rbac.Role, error) { + obj, err := c.Fake.Invokes(NewGetAction("roles", c.Namespace, name), &rbac.Role{}) + if obj == nil { + return nil, err + } + + return obj.(*rbac.Role), err +} + +func (c *FakeRoles) List(opts api.ListOptions) (*rbac.RoleList, error) { + obj, err := c.Fake.Invokes(NewListAction("roles", c.Namespace, opts), &rbac.RoleList{}) + if obj == nil { + return nil, err + } + + return obj.(*rbac.RoleList), err +} + +func (c *FakeRoles) Create(csr *rbac.Role) (*rbac.Role, error) { + obj, err := c.Fake.Invokes(NewCreateAction("roles", c.Namespace, csr), csr) + if obj == nil { + return nil, err + } + + return obj.(*rbac.Role), err +} + +func (c *FakeRoles) Update(csr *rbac.Role) (*rbac.Role, error) { + obj, err := c.Fake.Invokes(NewUpdateAction("roles", c.Namespace, csr), csr) + if obj == nil { + return nil, err + } + + return obj.(*rbac.Role), err +} + +func (c *FakeRoles) Delete(name string, opts *api.DeleteOptions) error { + _, err := c.Fake.Invokes(NewDeleteAction("roles", c.Namespace, name), &rbac.Role{}) + return err +} + +func (c *FakeRoles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake.InvokesWatch(NewWatchAction("roles", c.Namespace, opts)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_scheduledjobs.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_scheduledjobs.go new file mode 100644 index 000000000000..7036682bac30 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_scheduledjobs.go @@ -0,0 +1,84 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/watch" +) + +// FakeScheduledJobs implements ScheduledJobInterface. Meant to be embedded into a struct to get a default +// implementation. This makes faking out just the methods you want to test easier. +type FakeScheduledJobs struct { + Fake *FakeBatch + Namespace string +} + +func (c *FakeScheduledJobs) Get(name string) (*batch.ScheduledJob, error) { + obj, err := c.Fake.Invokes(NewGetAction("scheduledjobs", c.Namespace, name), &batch.ScheduledJob{}) + if obj == nil { + return nil, err + } + + return obj.(*batch.ScheduledJob), err +} + +func (c *FakeScheduledJobs) List(opts api.ListOptions) (*batch.ScheduledJobList, error) { + obj, err := c.Fake.Invokes(NewListAction("scheduledjobs", c.Namespace, opts), &batch.ScheduledJobList{}) + if obj == nil { + return nil, err + } + + return obj.(*batch.ScheduledJobList), err +} + +func (c *FakeScheduledJobs) Create(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) { + obj, err := c.Fake.Invokes(NewCreateAction("scheduledjobs", c.Namespace, scheduledJob), scheduledJob) + if obj == nil { + return nil, err + } + + return obj.(*batch.ScheduledJob), err +} + +func (c *FakeScheduledJobs) Update(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) { + obj, err := c.Fake.Invokes(NewUpdateAction("scheduledjobs", c.Namespace, scheduledJob), scheduledJob) + if obj == nil { + return nil, err + } + + return obj.(*batch.ScheduledJob), err +} + +func (c *FakeScheduledJobs) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake.Invokes(NewDeleteAction("scheduledjobs", c.Namespace, name), &batch.ScheduledJob{}) + return err +} + +func (c *FakeScheduledJobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake.InvokesWatch(NewWatchAction("scheduledjobs", c.Namespace, opts)) +} + +func (c *FakeScheduledJobs) UpdateStatus(scheduledJob *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + obj, err := c.Fake.Invokes(NewUpdateSubresourceAction("scheduledjobs", "status", c.Namespace, scheduledJob), scheduledJob) + if obj == nil { + return nil, err + } + + return obj.(*batch.ScheduledJob), err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_test.go new file mode 100644 index 000000000000..303d6d786b73 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "testing" + + client "k8s.io/kubernetes/pkg/client/unversioned" +) + +// This test file just ensures that Fake and structs it is embedded in +// implement Interface. + +func TestFakeImplementsInterface(t *testing.T) { + _ = client.Interface(&Fake{}) +} + +type MyFake struct { + *Fake +} + +func TestEmbeddedFakeImplementsInterface(t *testing.T) { + _ = client.Interface(MyFake{&Fake{}}) + _ = client.Interface(&MyFake{&Fake{}}) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_thirdpartyresources.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_thirdpartyresources.go index 8aa198d70e7a..cb4a15572ab8 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_thirdpartyresources.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fake_thirdpartyresources.go @@ -26,15 +26,14 @@ import ( // FakeThirdPartyResources implements ThirdPartyResourceInterface. Meant to be embedded into a struct to get a default // implementation. This makes faking out just the method you want to test easier. type FakeThirdPartyResources struct { - Fake *FakeExperimental - Namespace string + Fake *FakeExperimental } // Ensure statically that FakeThirdPartyResources implements DaemonInterface. var _ kclientlib.ThirdPartyResourceInterface = &FakeThirdPartyResources{} func (c *FakeThirdPartyResources) Get(name string) (*extensions.ThirdPartyResource, error) { - obj, err := c.Fake.Invokes(NewGetAction("thirdpartyresources", c.Namespace, name), &extensions.ThirdPartyResource{}) + obj, err := c.Fake.Invokes(NewGetAction("thirdpartyresources", "", name), &extensions.ThirdPartyResource{}) if obj == nil { return nil, err } @@ -42,7 +41,7 @@ func (c *FakeThirdPartyResources) Get(name string) (*extensions.ThirdPartyResour } func (c *FakeThirdPartyResources) List(opts api.ListOptions) (*extensions.ThirdPartyResourceList, error) { - obj, err := c.Fake.Invokes(NewListAction("thirdpartyresources", c.Namespace, opts), &extensions.ThirdPartyResourceList{}) + obj, err := c.Fake.Invokes(NewListAction("thirdpartyresources", "", opts), &extensions.ThirdPartyResourceList{}) if obj == nil { return nil, err } @@ -50,7 +49,7 @@ func (c *FakeThirdPartyResources) List(opts api.ListOptions) (*extensions.ThirdP } func (c *FakeThirdPartyResources) Create(daemon *extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) { - obj, err := c.Fake.Invokes(NewCreateAction("thirdpartyresources", c.Namespace, daemon), &extensions.ThirdPartyResource{}) + obj, err := c.Fake.Invokes(NewCreateAction("thirdpartyresources", "", daemon), &extensions.ThirdPartyResource{}) if obj == nil { return nil, err } @@ -58,7 +57,7 @@ func (c *FakeThirdPartyResources) Create(daemon *extensions.ThirdPartyResource) } func (c *FakeThirdPartyResources) Update(daemon *extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) { - obj, err := c.Fake.Invokes(NewUpdateAction("thirdpartyresources", c.Namespace, daemon), &extensions.ThirdPartyResource{}) + obj, err := c.Fake.Invokes(NewUpdateAction("thirdpartyresources", "", daemon), &extensions.ThirdPartyResource{}) if obj == nil { return nil, err } @@ -66,7 +65,7 @@ func (c *FakeThirdPartyResources) Update(daemon *extensions.ThirdPartyResource) } func (c *FakeThirdPartyResources) UpdateStatus(daemon *extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) { - obj, err := c.Fake.Invokes(NewUpdateSubresourceAction("thirdpartyresources", "status", c.Namespace, daemon), &extensions.ThirdPartyResource{}) + obj, err := c.Fake.Invokes(NewUpdateSubresourceAction("thirdpartyresources", "status", "", daemon), &extensions.ThirdPartyResource{}) if obj == nil { return nil, err } @@ -74,10 +73,10 @@ func (c *FakeThirdPartyResources) UpdateStatus(daemon *extensions.ThirdPartyReso } func (c *FakeThirdPartyResources) Delete(name string) error { - _, err := c.Fake.Invokes(NewDeleteAction("thirdpartyresources", c.Namespace, name), &extensions.ThirdPartyResource{}) + _, err := c.Fake.Invokes(NewDeleteAction("thirdpartyresources", "", name), &extensions.ThirdPartyResource{}) return err } func (c *FakeThirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake.InvokesWatch(NewWatchAction("thirdpartyresources", c.Namespace, opts)) + return c.Fake.InvokesWatch(NewWatchAction("thirdpartyresources", "", opts)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fixture.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fixture.go index 4956a1b90fb4..0fdbeac9f66c 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fixture.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/fixture.go @@ -22,7 +22,6 @@ import ( "reflect" "strings" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" @@ -80,26 +79,24 @@ func ObjectReaction(o ObjectRetriever, mapper meta.RESTMapper) ReactionFunc { return true, resource, err case CreateAction: - meta, err := api.ObjectMetaFor(castAction.GetObject()) + accessor, err := meta.Accessor(castAction.GetObject()) if err != nil { return true, nil, err } - resource, err := o.Kind(kind, meta.Name) + resource, err := o.Kind(kind, accessor.GetName()) return true, resource, err case UpdateAction: - meta, err := api.ObjectMetaFor(castAction.GetObject()) + accessor, err := meta.Accessor(castAction.GetObject()) if err != nil { return true, nil, err } - resource, err := o.Kind(kind, meta.Name) + resource, err := o.Kind(kind, accessor.GetName()) return true, resource, err default: return false, nil, fmt.Errorf("no reaction implemented for %s", action) } - - return true, nil, nil } } @@ -207,11 +204,11 @@ func (o objects) Kind(kind unversioned.GroupVersionKind, name string) (runtime.O } func (o objects) Add(obj runtime.Object) error { - gvk, err := o.scheme.ObjectKind(obj) + gvks, _, err := o.scheme.ObjectKinds(obj) if err != nil { return err } - kind := gvk.Kind + kind := gvks[0].Kind switch { case meta.IsListType(obj): diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/simple/simple_testclient.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/simple/simple_testclient.go index 8697da29ea1c..546fb7c47b70 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/simple/simple_testclient.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/simple/simple_testclient.go @@ -101,6 +101,10 @@ func (c *Client) Setup(t *testing.T) *Client { Host: c.server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()}, }) + c.RbacClient = client.NewRbacOrDie(&restclient.Config{ + Host: c.server.URL, + ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Rbac.GroupVersion()}, + }) c.Clientset = clientset.NewForConfigOrDie(&restclient.Config{Host: c.server.URL}) } @@ -110,8 +114,7 @@ func (c *Client) Setup(t *testing.T) *Client { func (c *Client) Close() { if c.server != nil { - // TODO: Uncomment when fix #19254 - // c.server.Close() + c.server.Close() } } @@ -221,11 +224,11 @@ func validateFields(a, b string) bool { func (c *Client) body(t *testing.T, obj runtime.Object, raw *string) *string { if obj != nil { - fqKind, err := api.Scheme.ObjectKind(obj) + fqKinds, _, err := api.Scheme.ObjectKinds(obj) if err != nil { t.Errorf("unexpected encoding error: %v", err) } - groupName := fqKind.GroupVersion().Group + groupName := fqKinds[0].GroupVersion().Group if c.ResourceGroup != "" { groupName = c.ResourceGroup } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/testclient.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/testclient.go index cd82377b6189..96ff7d56be42 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/testclient.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/testclient.go @@ -301,6 +301,10 @@ func (c *Fake) ConfigMaps(namespace string) client.ConfigMapsInterface { return &FakeConfigMaps{Fake: c, Namespace: namespace} } +func (c *Fake) Rbac() client.RbacInterface { + return &FakeRbac{Fake: c} +} + // SwaggerSchema returns an empty swagger.ApiDeclaration for testing func (c *Fake) SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) { action := ActionImpl{} @@ -325,7 +329,7 @@ type FakeAutoscaling struct { } func (c *FakeAutoscaling) HorizontalPodAutoscalers(namespace string) client.HorizontalPodAutoscalerInterface { - return &FakeHorizontalPodAutoscalersV1{Fake: c, Namespace: namespace} + return &FakeHorizontalPodAutoscalers{Fake: c, Namespace: namespace} } // NewSimpleFakeBatch returns a client that will respond with the provided objects @@ -341,6 +345,10 @@ func (c *FakeBatch) Jobs(namespace string) client.JobInterface { return &FakeJobsV1{Fake: c, Namespace: namespace} } +func (c *FakeBatch) ScheduledJobs(namespace string) client.ScheduledJobInterface { + return &FakeScheduledJobs{Fake: c, Namespace: namespace} +} + // NewSimpleFakeExp returns a client that will respond with the provided objects func NewSimpleFakeExp(objects ...runtime.Object) *FakeExperimental { return &FakeExperimental{Fake: NewSimpleFake(objects...)} @@ -354,10 +362,6 @@ func (c *FakeExperimental) DaemonSets(namespace string) client.DaemonSetInterfac return &FakeDaemonSets{Fake: c, Namespace: namespace} } -func (c *FakeExperimental) HorizontalPodAutoscalers(namespace string) client.HorizontalPodAutoscalerInterface { - return &FakeHorizontalPodAutoscalers{Fake: c, Namespace: namespace} -} - func (c *FakeExperimental) Deployments(namespace string) client.DeploymentInterface { return &FakeDeployments{Fake: c, Namespace: namespace} } @@ -374,18 +378,54 @@ func (c *FakeExperimental) Ingress(namespace string) client.IngressInterface { return &FakeIngress{Fake: c, Namespace: namespace} } -func (c *FakeExperimental) ThirdPartyResources(namespace string) client.ThirdPartyResourceInterface { - return &FakeThirdPartyResources{Fake: c, Namespace: namespace} +func (c *FakeExperimental) ThirdPartyResources() client.ThirdPartyResourceInterface { + return &FakeThirdPartyResources{Fake: c} } func (c *FakeExperimental) ReplicaSets(namespace string) client.ReplicaSetInterface { return &FakeReplicaSets{Fake: c, Namespace: namespace} } +func (c *FakeExperimental) NetworkPolicies(namespace string) client.NetworkPolicyInterface { + return &FakeNetworkPolicies{Fake: c, Namespace: namespace} +} + +func NewSimpleFakeRbac(objects ...runtime.Object) *FakeRbac { + return &FakeRbac{Fake: NewSimpleFake(objects...)} +} + +type FakeRbac struct { + *Fake +} + +func (c *FakeRbac) Roles(namespace string) client.RoleInterface { + return &FakeRoles{Fake: c, Namespace: namespace} +} + +func (c *FakeRbac) RoleBindings(namespace string) client.RoleBindingInterface { + return &FakeRoleBindings{Fake: c, Namespace: namespace} +} + +func (c *FakeRbac) ClusterRoles() client.ClusterRoleInterface { + return &FakeClusterRoles{Fake: c} +} + +func (c *FakeRbac) ClusterRoleBindings() client.ClusterRoleBindingInterface { + return &FakeClusterRoleBindings{Fake: c} +} + type FakeDiscovery struct { *Fake } +func (c *FakeDiscovery) ServerPreferredResources() ([]unversioned.GroupVersionResource, error) { + return nil, nil +} + +func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) { + return nil, nil +} + func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*unversioned.APIResourceList, error) { action := ActionImpl{ Verb: "get", diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/testclient_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/testclient_test.go new file mode 100644 index 000000000000..4a799df71db2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/testclient/testclient_test.go @@ -0,0 +1,75 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestNewClient(t *testing.T) { + o := NewObjects(api.Scheme, api.Codecs.UniversalDecoder()) + if err := AddObjectsFromPath("../../../../examples/guestbook/frontend-service.yaml", o, api.Codecs.UniversalDecoder()); err != nil { + t.Fatal(err) + } + client := &Fake{} + client.AddReactor("*", "*", ObjectReaction(o, testapi.Default.RESTMapper())) + list, err := client.Services("test").List(api.ListOptions{}) + if err != nil { + t.Fatal(err) + } + if len(list.Items) != 1 { + t.Fatalf("unexpected list %#v", list) + } + + // When list is invoked a second time, the same results are returned. + list, err = client.Services("test").List(api.ListOptions{}) + if err != nil { + t.Fatal(err) + } + if len(list.Items) != 1 { + t.Fatalf("unexpected list %#v", list) + } + t.Logf("list: %#v", list) +} + +func TestErrors(t *testing.T) { + o := NewObjects(api.Scheme, api.Codecs.UniversalDecoder()) + o.Add(&api.List{ + Items: []runtime.Object{ + // This first call to List will return this error + &(errors.NewNotFound(api.Resource("ServiceList"), "").ErrStatus), + // The second call to List will return this error + &(errors.NewForbidden(api.Resource("ServiceList"), "", nil).ErrStatus), + }, + }) + client := &Fake{} + client.AddReactor("*", "*", ObjectReaction(o, testapi.Default.RESTMapper())) + _, err := client.Services("test").List(api.ListOptions{}) + if !errors.IsNotFound(err) { + t.Fatalf("unexpected error: %v", err) + } + t.Logf("error: %#v", err.(*errors.StatusError).Status()) + _, err = client.Services("test").List(api.ListOptions{}) + if !errors.IsForbidden(err) { + t.Fatalf("unexpected error: %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go index 09bc4e4afc81..0908db06eb72 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go @@ -24,7 +24,7 @@ import ( // ThirdPartyResourceNamespacer has methods to work with ThirdPartyResource resources in a namespace type ThirdPartyResourceNamespacer interface { - ThirdPartyResources(namespace string) ThirdPartyResourceInterface + ThirdPartyResources() ThirdPartyResourceInterface } type ThirdPartyResourceInterface interface { @@ -39,12 +39,11 @@ type ThirdPartyResourceInterface interface { // thirdPartyResources implements DaemonsSetsNamespacer interface type thirdPartyResources struct { - r *ExtensionsClient - ns string + r *ExtensionsClient } -func newThirdPartyResources(c *ExtensionsClient, namespace string) *thirdPartyResources { - return &thirdPartyResources{c, namespace} +func newThirdPartyResources(c *ExtensionsClient) *thirdPartyResources { + return &thirdPartyResources{c} } // Ensure statically that thirdPartyResources implements ThirdPartyResourcesInterface. @@ -52,48 +51,47 @@ var _ ThirdPartyResourceInterface = &thirdPartyResources{} func (c *thirdPartyResources) List(opts api.ListOptions) (result *extensions.ThirdPartyResourceList, err error) { result = &extensions.ThirdPartyResourceList{} - err = c.r.Get().Namespace(c.ns).Resource("thirdpartyresources").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + err = c.r.Get().Resource("thirdpartyresources").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) return } // Get returns information about a particular third party resource. func (c *thirdPartyResources) Get(name string) (result *extensions.ThirdPartyResource, err error) { result = &extensions.ThirdPartyResource{} - err = c.r.Get().Namespace(c.ns).Resource("thirdpartyresources").Name(name).Do().Into(result) + err = c.r.Get().Resource("thirdpartyresources").Name(name).Do().Into(result) return } // Create creates a new third party resource. func (c *thirdPartyResources) Create(resource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { result = &extensions.ThirdPartyResource{} - err = c.r.Post().Namespace(c.ns).Resource("thirdpartyresources").Body(resource).Do().Into(result) + err = c.r.Post().Resource("thirdpartyresources").Body(resource).Do().Into(result) return } // Update updates an existing third party resource. func (c *thirdPartyResources) Update(resource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { result = &extensions.ThirdPartyResource{} - err = c.r.Put().Namespace(c.ns).Resource("thirdpartyresources").Name(resource.Name).Body(resource).Do().Into(result) + err = c.r.Put().Resource("thirdpartyresources").Name(resource.Name).Body(resource).Do().Into(result) return } // UpdateStatus updates an existing third party resource status func (c *thirdPartyResources) UpdateStatus(resource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { result = &extensions.ThirdPartyResource{} - err = c.r.Put().Namespace(c.ns).Resource("thirdpartyresources").Name(resource.Name).SubResource("status").Body(resource).Do().Into(result) + err = c.r.Put().Resource("thirdpartyresources").Name(resource.Name).SubResource("status").Body(resource).Do().Into(result) return } // Delete deletes an existing third party resource. func (c *thirdPartyResources) Delete(name string) error { - return c.r.Delete().Namespace(c.ns).Resource("thirdpartyresources").Name(name).Do().Error() + return c.r.Delete().Resource("thirdpartyresources").Name(name).Do().Error() } // Watch returns a watch.Interface that watches the requested third party resources. func (c *thirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { return c.r.Get(). Prefix("watch"). - Namespace(c.ns). Resource("thirdpartyresources"). VersionedParams(&opts, api.ParameterCodec). Watch() diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources_test.go new file mode 100644 index 000000000000..266ff4ae8b7d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources_test.go @@ -0,0 +1,177 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned_test + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/unversioned/testclient/simple" +) + +func getThirdPartyResourceName() string { + return "thirdpartyresources" +} + +func TestListThirdPartyResources(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{ + Method: "GET", + Path: testapi.Extensions.ResourcePath(getThirdPartyResourceName(), "", ""), + }, + Response: simple.Response{StatusCode: 200, + Body: &extensions.ThirdPartyResourceList{ + Items: []extensions.ThirdPartyResource{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Description: "test third party resource", + }, + }, + }, + }, + } + receivedDSs, err := c.Setup(t).Extensions().ThirdPartyResources().List(api.ListOptions{}) + defer c.Close() + c.Validate(t, receivedDSs, err) + +} + +func TestGetThirdPartyResource(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{Method: "GET", Path: testapi.Extensions.ResourcePath(getThirdPartyResourceName(), "", "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.ThirdPartyResource{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Description: "test third party resource", + }, + }, + } + receivedThirdPartyResource, err := c.Setup(t).Extensions().ThirdPartyResources().Get("foo") + defer c.Close() + c.Validate(t, receivedThirdPartyResource, err) +} + +func TestGetThirdPartyResourceWithNoName(t *testing.T) { + c := &simple.Client{Error: true} + receivedPod, err := c.Setup(t).Extensions().ThirdPartyResources().Get("") + defer c.Close() + if (err != nil) && (err.Error() != simple.NameRequiredError) { + t.Errorf("Expected error: %v, but got %v", simple.NameRequiredError, err) + } + + c.Validate(t, receivedPod, err) +} + +func TestUpdateThirdPartyResource(t *testing.T) { + requestThirdPartyResource := &extensions.ThirdPartyResource{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Extensions.ResourcePath(getThirdPartyResourceName(), "", "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.ThirdPartyResource{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Description: "test third party resource", + }, + }, + } + receivedThirdPartyResource, err := c.Setup(t).Extensions().ThirdPartyResources().Update(requestThirdPartyResource) + defer c.Close() + c.Validate(t, receivedThirdPartyResource, err) +} + +func TestUpdateThirdPartyResourceUpdateStatus(t *testing.T) { + requestThirdPartyResource := &extensions.ThirdPartyResource{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "PUT", Path: testapi.Extensions.ResourcePath(getThirdPartyResourceName(), "", "foo") + "/status", Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.ThirdPartyResource{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Description: "test third party resource", + }, + }, + } + receivedThirdPartyResource, err := c.Setup(t).Extensions().ThirdPartyResources().UpdateStatus(requestThirdPartyResource) + defer c.Close() + c.Validate(t, receivedThirdPartyResource, err) +} + +func TestDeleteThirdPartyResource(t *testing.T) { + c := &simple.Client{ + Request: simple.Request{Method: "DELETE", Path: testapi.Extensions.ResourcePath(getThirdPartyResourceName(), "", "foo"), Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{StatusCode: 200}, + } + err := c.Setup(t).Extensions().ThirdPartyResources().Delete("foo") + defer c.Close() + c.Validate(t, nil, err) +} + +func TestCreateThirdPartyResource(t *testing.T) { + requestThirdPartyResource := &extensions.ThirdPartyResource{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + } + c := &simple.Client{ + Request: simple.Request{Method: "POST", Path: testapi.Extensions.ResourcePath(getThirdPartyResourceName(), "", ""), Body: requestThirdPartyResource, Query: simple.BuildQueryValues(nil)}, + Response: simple.Response{ + StatusCode: 200, + Body: &extensions.ThirdPartyResource{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "name": "baz", + }, + }, + Description: "test third party resource", + }, + }, + } + receivedThirdPartyResource, err := c.Setup(t).Extensions().ThirdPartyResources().Create(requestThirdPartyResource) + defer c.Close() + c.Validate(t, receivedThirdPartyResource, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/OWNERS b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/OWNERS index 35859cd8c025..ac83016025ce 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/OWNERS +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/OWNERS @@ -1,5 +1,4 @@ assignees: - bprashanth - - davidopp - derekwaynecarr - mikedanese diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/controller_utils.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/controller_utils.go index fc377134d930..0748017346b7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -24,6 +24,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/apis/extensions" @@ -398,7 +399,7 @@ func getPodsAnnotationSet(template *api.PodTemplateSpec, object runtime.Object) func getPodsPrefix(controllerName string) string { // use the dash (if the name isn't too long) to make the pod name a bit prettier prefix := fmt.Sprintf("%s-", controllerName) - if ok, _ := validation.ValidatePodName(prefix, true); !ok { + if len(validation.ValidatePodName(prefix, true)) != 0 { prefix = controllerName } return prefix @@ -412,17 +413,17 @@ func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *a return r.createPods(nodeName, namespace, template, object) } -func (r RealPodControl) createPods(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error { +func GetPodFromTemplate(template *api.PodTemplateSpec, parentObject runtime.Object) (*api.Pod, error) { desiredLabels := getPodsLabelSet(template) - desiredAnnotations, err := getPodsAnnotationSet(template, object) + desiredAnnotations, err := getPodsAnnotationSet(template, parentObject) if err != nil { - return err + return nil, err } - meta, err := api.ObjectMetaFor(object) + accessor, err := meta.Accessor(parentObject) if err != nil { - return fmt.Errorf("object does not have ObjectMeta, %v", err) + return nil, fmt.Errorf("parentObject does not have ObjectMeta, %v", err) } - prefix := getPodsPrefix(meta.Name) + prefix := getPodsPrefix(accessor.GetName()) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ @@ -432,7 +433,15 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *api.Pod }, } if err := api.Scheme.Convert(&template.Spec, &pod.Spec); err != nil { - return fmt.Errorf("unable to convert pod template: %v", err) + return nil, fmt.Errorf("unable to convert pod template: %v", err) + } + return pod, nil +} + +func (r RealPodControl) createPods(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error { + pod, err := GetPodFromTemplate(template, object) + if err != nil { + return err } if len(nodeName) != 0 { pod.Spec.NodeName = nodeName @@ -444,14 +453,19 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *api.Pod r.Recorder.Eventf(object, api.EventTypeWarning, "FailedCreate", "Error creating: %v", err) return fmt.Errorf("unable to create pods: %v", err) } else { - glog.V(4).Infof("Controller %v created pod %v", meta.Name, newPod.Name) + accessor, err := meta.Accessor(object) + if err != nil { + glog.Errorf("parentObject does not have ObjectMeta, %v", err) + return nil + } + glog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name) r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulCreate", "Created pod: %v", newPod.Name) } return nil } func (r RealPodControl) DeletePod(namespace string, podID string, object runtime.Object) error { - meta, err := api.ObjectMetaFor(object) + accessor, err := meta.Accessor(object) if err != nil { return fmt.Errorf("object does not have ObjectMeta, %v", err) } @@ -459,7 +473,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime r.Recorder.Eventf(object, api.EventTypeWarning, "FailedDelete", "Error deleting: %v", err) return fmt.Errorf("unable to delete pods: %v", err) } else { - glog.V(4).Infof("Controller %v deleted pod %v", meta.Name, podID) + glog.V(4).Infof("Controller %v deleted pod %v", accessor.GetName(), podID) r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulDelete", "Deleted pod: %v", podID) } return nil @@ -575,7 +589,7 @@ func podReadyTime(pod *api.Pod) unversioned.Time { func maxContainerRestarts(pod *api.Pod) int { maxRestarts := 0 for _, c := range pod.Status.ContainerStatuses { - maxRestarts = integer.IntMax(maxRestarts, c.RestartCount) + maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount)) } return maxRestarts } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/controller_utils_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/controller_utils_test.go new file mode 100644 index 000000000000..423e42703ec7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/controller_utils_test.go @@ -0,0 +1,372 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "math/rand" + "net/http/httptest" + "reflect" + "sort" + "sync" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/securitycontext" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" + utiltesting "k8s.io/kubernetes/pkg/util/testing" +) + +// NewFakeControllerExpectationsLookup creates a fake store for PodExpectations. +func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *util.FakeClock) { + fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + fakeClock := util.NewFakeClock(fakeTime) + ttlPolicy := &cache.TTLPolicy{Ttl: ttl, Clock: fakeClock} + ttlStore := cache.NewFakeExpirationStore( + ExpKeyFunc, nil, ttlPolicy, fakeClock) + return &ControllerExpectations{ttlStore}, fakeClock +} + +func newReplicationController(replicas int) *api.ReplicationController { + rc := &api.ReplicationController{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()}, + ObjectMeta: api.ObjectMeta{ + UID: util.NewUUID(), + Name: "foobar", + Namespace: api.NamespaceDefault, + ResourceVersion: "18", + }, + Spec: api.ReplicationControllerSpec{ + Replicas: int32(replicas), + Selector: map[string]string{"foo": "bar"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "name": "foo", + "type": "production", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo/bar", + TerminationMessagePath: api.TerminationMessagePathDefault, + ImagePullPolicy: api.PullIfNotPresent, + SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), + }, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSDefault, + NodeSelector: map[string]string{ + "baz": "blah", + }, + }, + }, + }, + } + return rc +} + +// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store. +func newPodList(store cache.Store, count int, status api.PodPhase, rc *api.ReplicationController) *api.PodList { + pods := []api.Pod{} + for i := 0; i < count; i++ { + newPod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("pod%d", i), + Labels: rc.Spec.Selector, + Namespace: rc.Namespace, + }, + Status: api.PodStatus{Phase: status}, + } + if store != nil { + store.Add(&newPod) + } + pods = append(pods, newPod) + } + return &api.PodList{ + Items: pods, + } +} + +func TestControllerExpectations(t *testing.T) { + ttl := 30 * time.Second + e, fakeClock := NewFakeControllerExpectationsLookup(ttl) + // In practice we can't really have add and delete expectations since we only either create or + // delete replicas in one rc pass, and the rc goes to sleep soon after until the expectations are + // either fulfilled or timeout. + adds, dels := 10, 30 + rc := newReplicationController(1) + + // RC fires off adds and deletes at apiserver, then sets expectations + rcKey, err := KeyFunc(rc) + if err != nil { + t.Errorf("Couldn't get key for object %+v: %v", rc, err) + } + e.SetExpectations(rcKey, adds, dels) + var wg sync.WaitGroup + for i := 0; i < adds+1; i++ { + wg.Add(1) + go func() { + // In prod this can happen either because of a failed create by the rc + // or after having observed a create via informer + e.CreationObserved(rcKey) + wg.Done() + }() + } + wg.Wait() + + // There are still delete expectations + if e.SatisfiedExpectations(rcKey) { + t.Errorf("Rc will sync before expectations are met") + } + for i := 0; i < dels+1; i++ { + wg.Add(1) + go func() { + e.DeletionObserved(rcKey) + wg.Done() + }() + } + wg.Wait() + + // Expectations have been surpassed + if podExp, exists, err := e.GetExpectations(rcKey); err == nil && exists { + add, del := podExp.GetExpectations() + if add != -1 || del != -1 { + t.Errorf("Unexpected pod expectations %#v", podExp) + } + } else { + t.Errorf("Could not get expectations for rc, exists %v and err %v", exists, err) + } + if !e.SatisfiedExpectations(rcKey) { + t.Errorf("Expectations are met but the rc will not sync") + } + + // Next round of rc sync, old expectations are cleared + e.SetExpectations(rcKey, 1, 2) + if podExp, exists, err := e.GetExpectations(rcKey); err == nil && exists { + add, del := podExp.GetExpectations() + if add != 1 || del != 2 { + t.Errorf("Unexpected pod expectations %#v", podExp) + } + } else { + t.Errorf("Could not get expectations for rc, exists %v and err %v", exists, err) + } + + // Expectations have expired because of ttl + fakeClock.Step(ttl + 1) + if !e.SatisfiedExpectations(rcKey) { + t.Errorf("Expectations should have expired but didn't") + } +} + +func TestUIDExpectations(t *testing.T) { + uidExp := NewUIDTrackingControllerExpectations(NewControllerExpectations()) + rcList := []*api.ReplicationController{ + newReplicationController(2), + newReplicationController(1), + newReplicationController(0), + newReplicationController(5), + } + rcToPods := map[string][]string{} + rcKeys := []string{} + for i := range rcList { + rc := rcList[i] + rcName := fmt.Sprintf("rc-%v", i) + rc.Name = rcName + rc.Spec.Selector[rcName] = rcName + podList := newPodList(nil, 5, api.PodRunning, rc) + rcKey, err := KeyFunc(rc) + if err != nil { + t.Fatalf("Couldn't get key for object %+v: %v", rc, err) + } + rcKeys = append(rcKeys, rcKey) + rcPodNames := []string{} + for i := range podList.Items { + p := &podList.Items[i] + p.Name = fmt.Sprintf("%v-%v", p.Name, rc.Name) + rcPodNames = append(rcPodNames, PodKey(p)) + } + rcToPods[rcKey] = rcPodNames + uidExp.ExpectDeletions(rcKey, rcPodNames) + } + for i := range rcKeys { + j := rand.Intn(i + 1) + rcKeys[i], rcKeys[j] = rcKeys[j], rcKeys[i] + } + for _, rcKey := range rcKeys { + if uidExp.SatisfiedExpectations(rcKey) { + t.Errorf("Controller %v satisfied expectations before deletion", rcKey) + } + for _, p := range rcToPods[rcKey] { + uidExp.DeletionObserved(rcKey, p) + } + if !uidExp.SatisfiedExpectations(rcKey) { + t.Errorf("Controller %v didn't satisfy expectations after deletion", rcKey) + } + uidExp.DeleteExpectations(rcKey) + if uidExp.GetUIDs(rcKey) != nil { + t.Errorf("Failed to delete uid expectations for %v", rcKey) + } + } +} + +func TestCreatePods(t *testing.T) { + ns := api.NamespaceDefault + body := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Name: "empty_pod"}}) + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: string(body), + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + + podControl := RealPodControl{ + KubeClient: clientset, + Recorder: &record.FakeRecorder{}, + } + + controllerSpec := newReplicationController(1) + + // Make sure createReplica sends a POST to the apiserver with a pod from the controllers pod template + podControl.CreatePods(ns, controllerSpec.Spec.Template, controllerSpec) + + expectedPod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: controllerSpec.Spec.Template.Labels, + GenerateName: fmt.Sprintf("%s-", controllerSpec.Name), + }, + Spec: controllerSpec.Spec.Template.Spec, + } + fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath("pods", api.NamespaceDefault, ""), "POST", nil) + actualPod, err := runtime.Decode(testapi.Default.Codec(), []byte(fakeHandler.RequestBody)) + if err != nil { + t.Errorf("Unexpected error: %#v", err) + } + if !api.Semantic.DeepDerivative(&expectedPod, actualPod) { + t.Logf("Body: %s", fakeHandler.RequestBody) + t.Errorf("Unexpected mismatch. Expected\n %#v,\n Got:\n %#v", &expectedPod, actualPod) + } +} + +func TestActivePodFiltering(t *testing.T) { + // This rc is not needed by the test, only the newPodList to give the pods labels/a namespace. + rc := newReplicationController(0) + podList := newPodList(nil, 5, api.PodRunning, rc) + podList.Items[0].Status.Phase = api.PodSucceeded + podList.Items[1].Status.Phase = api.PodFailed + expectedNames := sets.NewString() + for _, pod := range podList.Items[2:] { + expectedNames.Insert(pod.Name) + } + + got := FilterActivePods(podList.Items) + gotNames := sets.NewString() + for _, pod := range got { + gotNames.Insert(pod.Name) + } + if expectedNames.Difference(gotNames).Len() != 0 || gotNames.Difference(expectedNames).Len() != 0 { + t.Errorf("expected %v, got %v", expectedNames.List(), gotNames.List()) + } +} + +func TestSortingActivePods(t *testing.T) { + numPods := 9 + // This rc is not needed by the test, only the newPodList to give the pods labels/a namespace. + rc := newReplicationController(0) + podList := newPodList(nil, numPods, api.PodRunning, rc) + + pods := make([]*api.Pod, len(podList.Items)) + for i := range podList.Items { + pods[i] = &podList.Items[i] + } + // pods[0] is not scheduled yet. + pods[0].Spec.NodeName = "" + pods[0].Status.Phase = api.PodPending + // pods[1] is scheduled but pending. + pods[1].Spec.NodeName = "bar" + pods[1].Status.Phase = api.PodPending + // pods[2] is unknown. + pods[2].Spec.NodeName = "foo" + pods[2].Status.Phase = api.PodUnknown + // pods[3] is running but not ready. + pods[3].Spec.NodeName = "foo" + pods[3].Status.Phase = api.PodRunning + // pods[4] is running and ready but without LastTransitionTime. + now := unversioned.Now() + pods[4].Spec.NodeName = "foo" + pods[4].Status.Phase = api.PodRunning + pods[4].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}} + pods[4].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}} + // pods[5] is running and ready and with LastTransitionTime. + pods[5].Spec.NodeName = "foo" + pods[5].Status.Phase = api.PodRunning + pods[5].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue, LastTransitionTime: now}} + pods[5].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}} + // pods[6] is running ready for a longer time than pods[5]. + then := unversioned.Time{Time: now.AddDate(0, -1, 0)} + pods[6].Spec.NodeName = "foo" + pods[6].Status.Phase = api.PodRunning + pods[6].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue, LastTransitionTime: then}} + pods[6].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}} + // pods[7] has lower container restart count than pods[6]. + pods[7].Spec.NodeName = "foo" + pods[7].Status.Phase = api.PodRunning + pods[7].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue, LastTransitionTime: then}} + pods[7].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}} + pods[7].CreationTimestamp = now + // pods[8] is older than pods[7]. + pods[8].Spec.NodeName = "foo" + pods[8].Status.Phase = api.PodRunning + pods[8].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue, LastTransitionTime: then}} + pods[8].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}} + pods[8].CreationTimestamp = then + + getOrder := func(pods []*api.Pod) []string { + names := make([]string, len(pods)) + for i := range pods { + names[i] = pods[i].Name + } + return names + } + + expected := getOrder(pods) + + for i := 0; i < 20; i++ { + idx := rand.Perm(numPods) + randomizedPods := make([]*api.Pod, numPods) + for j := 0; j < numPods; j++ { + randomizedPods[j] = pods[idx[j]] + } + sort.Sort(ActivePods(randomizedPods)) + actual := getOrder(randomizedPods) + + if !reflect.DeepEqual(actual, expected) { + t.Errorf("expected %v, got %v", expected, actual) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/daemon/controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/daemon/controller.go index 96c10514881a..53eb03f73cc8 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/daemon/controller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/daemon/controller.go @@ -30,13 +30,15 @@ import ( "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" "k8s.io/kubernetes/pkg/client/record" - unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" - unversionedextensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/controller/framework/informers" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/metrics" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/validation/field" @@ -65,8 +67,16 @@ const ( // DaemonSetsController is responsible for synchronizing DaemonSet objects stored // in the system with actual running pods. type DaemonSetsController struct { - kubeClient clientset.Interface - podControl controller.PodControlInterface + kubeClient clientset.Interface + eventRecorder record.EventRecorder + podControl controller.PodControlInterface + + // internalPodInformer is used to hold a personal informer. If we're using + // a normal shared informer, then the informer will be started for us. If + // we have a personal informer, we must start it ourselves. If you start + // the controller using NewDaemonSetsController(passing SharedInformer), this + // will be null + internalPodInformer framework.SharedInformer // An dsc is temporarily suspended after creating/deleting these many replicas. // It resumes normal action after observing the watch events for them. @@ -85,7 +95,7 @@ type DaemonSetsController struct { // Watches changes to all daemon sets. dsController *framework.Controller // Watches changes to all pods - podController *framework.Controller + podController framework.ControllerInterface // Watches changes to all nodes. nodeController *framework.Controller // podStoreSynced returns true if the pod store has been synced at least once. @@ -98,14 +108,18 @@ type DaemonSetsController struct { queue *workqueue.Type } -func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController { +func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. - eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")}) + eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) + if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) + } dsc := &DaemonSetsController{ - kubeClient: kubeClient, + kubeClient: kubeClient, + eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemonset-controller"}), podControl: controller.RealPodControl{ KubeClient: kubeClient, Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemon-set"}), @@ -131,7 +145,7 @@ func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod contro AddFunc: func(obj interface{}) { ds := obj.(*extensions.DaemonSet) glog.V(4).Infof("Adding daemon set %s", ds.Name) - dsc.enqueueDaemonSet(obj) + dsc.enqueueDaemonSet(ds) }, UpdateFunc: func(old, cur interface{}) { oldDS := old.(*extensions.DaemonSet) @@ -152,34 +166,27 @@ func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod contro } glog.V(4).Infof("Updating daemon set %s", oldDS.Name) - dsc.enqueueDaemonSet(cur) + dsc.enqueueDaemonSet(curDS) }, DeleteFunc: func(obj interface{}) { ds := obj.(*extensions.DaemonSet) glog.V(4).Infof("Deleting daemon set %s", ds.Name) - dsc.enqueueDaemonSet(obj) + dsc.enqueueDaemonSet(ds) }, }, ) + // Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete // more pods until all the effects (expectations) of a daemon set's create/delete have been observed. - dsc.podStore.Store, dsc.podController = framework.NewInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return dsc.kubeClient.Core().Pods(api.NamespaceAll).List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return dsc.kubeClient.Core().Pods(api.NamespaceAll).Watch(options) - }, - }, - &api.Pod{}, - resyncPeriod(), - framework.ResourceEventHandlerFuncs{ - AddFunc: dsc.addPod, - UpdateFunc: dsc.updatePod, - DeleteFunc: dsc.deletePod, - }, - ) + podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ + AddFunc: dsc.addPod, + UpdateFunc: dsc.updatePod, + DeleteFunc: dsc.deletePod, + }) + dsc.podStore.Indexer = podInformer.GetIndexer() + dsc.podController = podInformer.GetController() + dsc.podStoreSynced = podInformer.HasSynced + // Watch for new nodes or updates to nodes - daemon pods are launched on new nodes, and possibly when labels on nodes change, dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer( &cache.ListWatch{ @@ -198,11 +205,18 @@ func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod contro }, ) dsc.syncHandler = dsc.syncDaemonSet - dsc.podStoreSynced = dsc.podController.HasSynced dsc.lookupCache = controller.NewMatchingCache(lookupCacheSize) return dsc } +func NewDaemonSetsControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController { + podInformer := informers.CreateSharedPodIndexInformer(kubeClient, resyncPeriod()) + dsc := NewDaemonSetsController(podInformer, kubeClient, resyncPeriod, lookupCacheSize) + dsc.internalPodInformer = podInformer + + return dsc +} + // Run begins watching and syncing daemon sets. func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() @@ -213,6 +227,11 @@ func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) { for i := 0; i < workers; i++ { go wait.Until(dsc.worker, time.Second, stopCh) } + + if dsc.internalPodInformer != nil { + go dsc.internalPodInformer.Run(stopCh) + } + <-stopCh glog.Infof("Shutting down Daemon Set Controller") dsc.queue.ShutDown() @@ -246,10 +265,10 @@ func (dsc *DaemonSetsController) enqueueAllDaemonSets() { } } -func (dsc *DaemonSetsController) enqueueDaemonSet(obj interface{}) { - key, err := controller.KeyFunc(obj) +func (dsc *DaemonSetsController) enqueueDaemonSet(ds *extensions.DaemonSet) { + key, err := controller.KeyFunc(ds) if err != nil { - glog.Errorf("Couldn't get key for object %+v: %v", obj, err) + glog.Errorf("Couldn't get key for object %+v: %v", ds, err) return } @@ -534,15 +553,15 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) { } func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int) error { - if ds.Status.DesiredNumberScheduled == desiredNumberScheduled && ds.Status.CurrentNumberScheduled == currentNumberScheduled && ds.Status.NumberMisscheduled == numberMisscheduled { + if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled && int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled && int(ds.Status.NumberMisscheduled) == numberMisscheduled { return nil } var updateErr, getErr error for i := 0; i <= StatusUpdateRetries; i++ { - ds.Status.DesiredNumberScheduled = desiredNumberScheduled - ds.Status.CurrentNumberScheduled = currentNumberScheduled - ds.Status.NumberMisscheduled = numberMisscheduled + ds.Status.DesiredNumberScheduled = int32(desiredNumberScheduled) + ds.Status.CurrentNumberScheduled = int32(currentNumberScheduled) + ds.Status.NumberMisscheduled = int32(numberMisscheduled) _, updateErr = dsClient.UpdateStatus(ds) if updateErr == nil { @@ -623,6 +642,12 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error { } ds := obj.(*extensions.DaemonSet) + everything := unversioned.LabelSelector{} + if reflect.DeepEqual(ds.Spec.Selector, &everything) { + dsc.eventRecorder.Eventf(ds, api.EventTypeWarning, "SelectingAll", "This daemon set is selecting all pods. A non-empty selector is required.") + return nil + } + // Don't process a daemon set until all its creations and deletions have been processed. // For example if daemon set foo asked for 3 new daemon pods in the previous call to manage, // then we do not want to call manage on foo until the daemon pods have been created. @@ -661,11 +686,14 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *exte newPod.Spec.NodeName = node.Name pods := []*api.Pod{newPod} - for _, m := range dsc.podStore.Store.List() { + for _, m := range dsc.podStore.Indexer.List() { pod := m.(*api.Pod) if pod.Spec.NodeName != node.Name { continue } + if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed { + continue + } // ignore pods that belong to the daemonset when taking into account wheter // a daemonset should bind to a node. if pds := dsc.getPodDaemonSet(pod); pds != nil && ds.Name == pds.Name { @@ -673,13 +701,15 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *exte } pods = append(pods, pod) } - _, notFittingCPU, notFittingMemory := predicates.CheckPodsExceedingFreeResources(pods, node.Status.Allocatable) - if len(notFittingCPU)+len(notFittingMemory) != 0 { + _, notFittingCPU, notFittingMemory, notFittingNvidiaGPU := predicates.CheckPodsExceedingFreeResources(pods, node.Status.Allocatable) + if len(notFittingCPU)+len(notFittingMemory)+len(notFittingNvidiaGPU) != 0 { + dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: insufficent free resources", node.ObjectMeta.Name) return false } ports := sets.String{} for _, pod := range pods { if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 { + dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: host port conflict", node.ObjectMeta.Name) return false } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/daemon/controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/daemon/controller_test.go new file mode 100644 index 000000000000..c7efe750b733 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/daemon/controller_test.go @@ -0,0 +1,530 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package daemon + +import ( + "fmt" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/securitycontext" +) + +var ( + simpleDaemonSetLabel = map[string]string{"name": "simple-daemon", "type": "production"} + simpleDaemonSetLabel2 = map[string]string{"name": "simple-daemon", "type": "test"} + simpleNodeLabel = map[string]string{"color": "blue", "speed": "fast"} + simpleNodeLabel2 = map[string]string{"color": "red", "speed": "fast"} + alwaysReady = func() bool { return true } +) + +func getKey(ds *extensions.DaemonSet, t *testing.T) string { + if key, err := controller.KeyFunc(ds); err != nil { + t.Errorf("Unexpected error getting key for ds %v: %v", ds.Name, err) + return "" + } else { + return key + } +} + +func newDaemonSet(name string) *extensions.DaemonSet { + return &extensions.DaemonSet{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()}, + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: api.NamespaceDefault, + }, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo/bar", + TerminationMessagePath: api.TerminationMessagePathDefault, + ImagePullPolicy: api.PullIfNotPresent, + SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), + }, + }, + DNSPolicy: api.DNSDefault, + }, + }, + }, + } +} + +func newNode(name string, label map[string]string) *api.Node { + return &api.Node{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()}, + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: label, + Namespace: api.NamespaceDefault, + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + {Type: api.NodeReady, Status: api.ConditionTrue}, + }, + }, + } +} + +func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]string) { + for i := startIndex; i < startIndex+numNodes; i++ { + nodeStore.Add(newNode(fmt.Sprintf("node-%d", i), label)) + } +} + +func newPod(podName string, nodeName string, label map[string]string) *api.Pod { + pod := &api.Pod{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()}, + ObjectMeta: api.ObjectMeta{ + GenerateName: podName, + Labels: label, + Namespace: api.NamespaceDefault, + }, + Spec: api.PodSpec{ + NodeName: nodeName, + Containers: []api.Container{ + { + Image: "foo/bar", + TerminationMessagePath: api.TerminationMessagePathDefault, + ImagePullPolicy: api.PullIfNotPresent, + SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), + }, + }, + DNSPolicy: api.DNSDefault, + }, + } + api.GenerateName(api.SimpleNameGenerator, &pod.ObjectMeta) + return pod +} + +func addPods(podStore cache.Store, nodeName string, label map[string]string, number int) { + for i := 0; i < number; i++ { + podStore.Add(newPod(fmt.Sprintf("%s-", nodeName), nodeName, label)) + } +} + +func newTestController() (*DaemonSetsController, *controller.FakePodControl) { + clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewDaemonSetsControllerFromClient(clientset, controller.NoResyncPeriodFunc, 0) + manager.podStoreSynced = alwaysReady + podControl := &controller.FakePodControl{} + manager.podControl = podControl + return manager, podControl +} + +func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes int) { + if len(fakePodControl.Templates) != expectedCreates { + t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates)) + } + if len(fakePodControl.DeletePodName) != expectedDeletes { + t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", expectedDeletes, len(fakePodControl.DeletePodName)) + } +} + +func syncAndValidateDaemonSets(t *testing.T, manager *DaemonSetsController, ds *extensions.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) { + key, err := controller.KeyFunc(ds) + if err != nil { + t.Errorf("Could not get key for daemon.") + } + manager.syncHandler(key) + validateSyncDaemonSets(t, podControl, expectedCreates, expectedDeletes) +} + +// DaemonSets without node selectors should launch pods on every node. +func TestSimpleDaemonSetLaunchesPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 5, nil) + ds := newDaemonSet("foo") + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0) +} + +// DaemonSets should do nothing if there aren't any nodes +func TestNoNodesDoesNothing(t *testing.T) { + manager, podControl := newTestController() + ds := newDaemonSet("foo") + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} + +// DaemonSets without node selectors should launch on a single node in a +// single node cluster. +func TestOneNodeDaemonLaunchesPod(t *testing.T) { + manager, podControl := newTestController() + manager.nodeStore.Add(newNode("only-node", nil)) + ds := newDaemonSet("foo") + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) +} + +// DaemonSets should place onto NotReady nodes +func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) { + manager, podControl := newTestController() + node := newNode("not-ready", nil) + node.Status = api.NodeStatus{ + Conditions: []api.NodeCondition{ + {Type: api.NodeReady, Status: api.ConditionFalse}, + }, + } + manager.nodeStore.Add(node) + ds := newDaemonSet("foo") + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) +} + +// DaemonSets should not place onto OutOfDisk nodes +func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) { + manager, podControl := newTestController() + node := newNode("not-enough-disk", nil) + node.Status.Conditions = []api.NodeCondition{{Type: api.NodeOutOfDisk, Status: api.ConditionTrue}} + manager.nodeStore.Add(node) + ds := newDaemonSet("foo") + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} + +func resourcePodSpec(nodeName, memory, cpu string) api.PodSpec { + return api.PodSpec{ + NodeName: nodeName, + Containers: []api.Container{{ + Resources: api.ResourceRequirements{ + Requests: allocatableResources(memory, cpu), + }, + }}, + } +} + +func allocatableResources(memory, cpu string) api.ResourceList { + return api.ResourceList{ + api.ResourceMemory: resource.MustParse(memory), + api.ResourceCPU: resource.MustParse(cpu), + } +} + +// DaemonSets should not place onto nodes with insufficient free resource +func TestInsufficentCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { + podSpec := resourcePodSpec("too-much-mem", "75M", "75m") + manager, podControl := newTestController() + node := newNode("too-much-mem", nil) + node.Status.Allocatable = allocatableResources("100M", "200m") + manager.nodeStore.Add(node) + manager.podStore.Add(&api.Pod{ + Spec: podSpec, + }) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec = podSpec + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} + +func TestSufficentCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) { + podSpec := resourcePodSpec("too-much-mem", "75M", "75m") + manager, podControl := newTestController() + node := newNode("too-much-mem", nil) + node.Status.Allocatable = allocatableResources("100M", "200m") + manager.nodeStore.Add(node) + manager.podStore.Add(&api.Pod{ + Spec: podSpec, + Status: api.PodStatus{Phase: api.PodSucceeded}, + }) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec = podSpec + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) +} + +// DaemonSets should place onto nodes with sufficient free resource +func TestSufficentCapacityNodeDaemonLaunchesPod(t *testing.T) { + podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m") + manager, podControl := newTestController() + node := newNode("not-too-much-mem", nil) + node.Status.Allocatable = allocatableResources("200M", "200m") + manager.nodeStore.Add(node) + manager.podStore.Add(&api.Pod{ + Spec: podSpec, + }) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec = podSpec + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) +} + +// DaemonSets should not place onto nodes that would cause port conflicts +func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) { + podSpec := api.PodSpec{ + NodeName: "port-conflict", + Containers: []api.Container{{ + Ports: []api.ContainerPort{{ + HostPort: 666, + }}, + }}, + } + manager, podControl := newTestController() + node := newNode("port-conflict", nil) + manager.nodeStore.Add(node) + manager.podStore.Add(&api.Pod{ + Spec: podSpec, + }) + + ds := newDaemonSet("foo") + ds.Spec.Template.Spec = podSpec + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} + +// Test that if the node is already scheduled with a pod using a host port +// but belonging to the same daemonset, we don't delete that pod +// +// Issue: https://github.com/kubernetes/kubernetes/issues/22309 +func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) { + podSpec := api.PodSpec{ + NodeName: "port-conflict", + Containers: []api.Container{{ + Ports: []api.ContainerPort{{ + HostPort: 666, + }}, + }}, + } + manager, podControl := newTestController() + node := newNode("port-conflict", nil) + manager.nodeStore.Add(node) + manager.podStore.Add(&api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: simpleDaemonSetLabel, + Namespace: api.NamespaceDefault, + }, + Spec: podSpec, + }) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec = podSpec + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} + +// DaemonSets should place onto nodes that would not cause port conflicts +func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) { + podSpec1 := api.PodSpec{ + NodeName: "no-port-conflict", + Containers: []api.Container{{ + Ports: []api.ContainerPort{{ + HostPort: 6661, + }}, + }}, + } + podSpec2 := api.PodSpec{ + NodeName: "no-port-conflict", + Containers: []api.Container{{ + Ports: []api.ContainerPort{{ + HostPort: 6662, + }}, + }}, + } + manager, podControl := newTestController() + node := newNode("no-port-conflict", nil) + manager.nodeStore.Add(node) + manager.podStore.Add(&api.Pod{ + Spec: podSpec1, + }) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec = podSpec2 + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) +} + +// DaemonSetController should not sync DaemonSets with empty pod selectors. +// +// issue https://github.com/kubernetes/kubernetes/pull/23223 +func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) { + manager, podControl := newTestController() + manager.nodeStore.Store.Add(newNode("node1", nil)) + // Create pod not controlled by a daemonset. + manager.podStore.Add(&api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"bang": "boom"}, + Namespace: api.NamespaceDefault, + }, + Spec: api.PodSpec{ + NodeName: "node1", + }, + }) + + // Create a misconfigured DaemonSet. An empty pod selector is invalid but could happen + // if we upgrade and make a backwards incompatible change. + // + // The node selector matches no nodes which mimics the behavior of kubectl delete. + // + // The DaemonSet should not schedule pods and should not delete scheduled pods in + // this case even though it's empty pod selector matches all pods. The DaemonSetController + // should detect this misconfiguration and choose not to sync the DaemonSet. We should + // not observe a deletion of the pod on node1. + ds := newDaemonSet("foo") + ls := unversioned.LabelSelector{} + ds.Spec.Selector = &ls + ds.Spec.Template.Spec.NodeSelector = map[string]string{"foo": "bar"} + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} + +// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods. +func TestDealsWithExistingPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 5, nil) + addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 1) + addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 2) + addPods(manager.podStore.Indexer, "node-3", simpleDaemonSetLabel, 5) + addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel2, 2) + ds := newDaemonSet("foo") + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5) +} + +// Daemon with node selector should launch pods on nodes matching selector. +func TestSelectorDaemonLaunchesPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 4, nil) + addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) + daemon := newDaemonSet("foo") + daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager.dsStore.Add(daemon) + syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0) +} + +// Daemon with node selector should delete pods from nodes that do not satisfy selector. +func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 5, nil) + addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel) + addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel2, 2) + addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3) + addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 1) + addPods(manager.podStore.Indexer, "node-4", simpleDaemonSetLabel, 1) + daemon := newDaemonSet("foo") + daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager.dsStore.Add(daemon) + syncAndValidateDaemonSets(t, manager, daemon, podControl, 5, 4) +} + +// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes. +func TestSelectorDaemonDealsWithExistingPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 5, nil) + addNodes(manager.nodeStore.Store, 5, 5, simpleNodeLabel) + addPods(manager.podStore.Indexer, "node-0", simpleDaemonSetLabel, 1) + addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel, 3) + addPods(manager.podStore.Indexer, "node-1", simpleDaemonSetLabel2, 2) + addPods(manager.podStore.Indexer, "node-2", simpleDaemonSetLabel, 4) + addPods(manager.podStore.Indexer, "node-6", simpleDaemonSetLabel, 13) + addPods(manager.podStore.Indexer, "node-7", simpleDaemonSetLabel2, 4) + addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel, 1) + addPods(manager.podStore.Indexer, "node-9", simpleDaemonSetLabel2, 1) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20) +} + +// DaemonSet with node selector which does not match any node labels should not launch pods. +func TestBadSelectorDaemonDoesNothing(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 4, nil) + addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2 + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} + +// DaemonSet with node name should launch pod on node with corresponding name. +func TestNameDaemonSetLaunchesPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 5, nil) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec.NodeName = "node-0" + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) +} + +// DaemonSet with node name that does not exist should not launch pods. +func TestBadNameDaemonSetDoesNothing(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 5, nil) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec.NodeName = "node-10" + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} + +// DaemonSet with node selector, and node name, matching a node, should launch a pod on the node. +func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 4, nil) + addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + ds.Spec.Template.Spec.NodeName = "node-6" + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) +} + +// DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing. +func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) { + manager, podControl := newTestController() + addNodes(manager.nodeStore.Store, 0, 4, nil) + addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + ds.Spec.Template.Spec.NodeName = "node-0" + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} + +func TestDSManagerNotReady(t *testing.T) { + manager, podControl := newTestController() + manager.podStoreSynced = func() bool { return false } + addNodes(manager.nodeStore.Store, 0, 1, nil) + + // Simulates the ds reflector running before the pod reflector. We don't + // want to end up creating daemon pods in this case until the pod reflector + // has synced, so the ds manager should just requeue the ds. + ds := newDaemonSet("foo") + manager.dsStore.Add(ds) + + dsKey := getKey(ds, t) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + queueDS, _ := manager.queue.Get() + if queueDS != dsKey { + t.Fatalf("Expected to find key %v in queue, found %v", dsKey, queueDS) + } + + manager.podStoreSynced = alwaysReady + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go index 2c60e40e02ec..6895e7e265b6 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go @@ -25,21 +25,22 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/annotations" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/client/record" - unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/framework" - "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/runtime" deploymentutil "k8s.io/kubernetes/pkg/util/deployment" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/integer" labelsutil "k8s.io/kubernetes/pkg/util/labels" + "k8s.io/kubernetes/pkg/util/metrics" podutil "k8s.io/kubernetes/pkg/util/pod" rsutil "k8s.io/kubernetes/pkg/util/replicaset" utilruntime "k8s.io/kubernetes/pkg/util/runtime" @@ -95,8 +96,11 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. - eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{client.Core().Events("")}) + eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: client.Core().Events("")}) + if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.Core().GetRESTClient().GetRateLimiter()) + } dc := &DeploymentController{ client: client, eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "deployment-controller"}), @@ -140,7 +144,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller }, ) - dc.podStore.Store, dc.podController = framework.NewInformer( + dc.podStore.Indexer, dc.podController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return dc.client.Core().Pods(api.NamespaceAll).List(options) @@ -156,6 +160,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller UpdateFunc: dc.updatePod, DeleteFunc: dc.deletePod, }, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) dc.syncHandler = dc.syncDeployment @@ -426,6 +431,11 @@ func (dc *DeploymentController) syncDeployment(key string) error { } d := obj.(*extensions.Deployment) + everything := unversioned.LabelSelector{} + if reflect.DeepEqual(d.Spec.Selector, &everything) { + dc.eventRecorder.Eventf(d, api.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.") + return nil + } if d.Spec.Paused { // TODO: Implement scaling for paused deployments. @@ -898,6 +908,24 @@ func setNewReplicaSetAnnotations(deployment *extensions.Deployment, newRS *exten return annotationChanged } +// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key +// TODO: How to decide which annotations should / should not be copied? +// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615 +func skipCopyAnnotation(key string) bool { + // Skip apply annotations and revision annotations. + return key == annotations.LastAppliedConfigAnnotation || key == deploymentutil.RevisionAnnotation +} + +func getSkippedAnnotations(annotations map[string]string) map[string]string { + skippedAnnotations := make(map[string]string) + for k, v := range annotations { + if skipCopyAnnotation(k) { + skippedAnnotations[k] = v + } + } + return skippedAnnotations +} + // copyDeploymentAnnotationsToReplicaSet copies deployment's annotations to replica set's annotations, // and returns true if replica set's annotation is changed. // Note that apply and revision annotations are not copied. @@ -907,13 +935,10 @@ func copyDeploymentAnnotationsToReplicaSet(deployment *extensions.Deployment, rs rs.Annotations = make(map[string]string) } for k, v := range deployment.Annotations { - // TODO: How to decide which annotations should / should not be copied? - // See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615 - // Skip apply annotations and revision annotations. // newRS revision is updated automatically in getNewReplicaSet, and the deployment's revision number is then updated // by copying its newRS revision number. We should not copy deployment's revision to its newRS, since the update of // deployment revision number may fail (revision becomes stale) and the revision number in newRS is more reliable. - if k == kubectl.LastAppliedConfigAnnotation || k == deploymentutil.RevisionAnnotation || rs.Annotations[k] == v { + if skipCopyAnnotation(k) || rs.Annotations[k] == v { continue } rs.Annotations[k] = v @@ -922,6 +947,18 @@ func copyDeploymentAnnotationsToReplicaSet(deployment *extensions.Deployment, rs return rsAnnotationsChanged } +// setDeploymentAnnotationsTo sets deployment's annotations as given RS's annotations. +// This action should be done if and only if the deployment is rolling back to this rs. +// Note that apply and revision annotations are not changed. +func setDeploymentAnnotationsTo(deployment *extensions.Deployment, rollbackToRS *extensions.ReplicaSet) { + deployment.Annotations = getSkippedAnnotations(deployment.Annotations) + for k, v := range rollbackToRS.Annotations { + if !skipCopyAnnotation(k) { + deployment.Annotations[k] = v + } + } +} + func (dc *DeploymentController) updateDeploymentRevision(deployment *extensions.Deployment, revision string) error { if deployment.Annotations == nil { deployment.Annotations = make(map[string]string) @@ -1010,29 +1047,32 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep // Clean up unhealthy replicas first, otherwise unhealthy replicas will block deployment // and cause timeout. See https://github.com/kubernetes/kubernetes/issues/16737 - cleanupCount, err := dc.cleanupUnhealthyReplicas(oldRSs, deployment, maxScaledDown) + oldRSs, cleanupCount, err := dc.cleanupUnhealthyReplicas(oldRSs, deployment, maxScaledDown) if err != nil { return false, nil } + glog.V(4).Infof("Cleaned up unhealthy replicas from old RSes by %d", cleanupCount) // Scale down old replica sets, need check maxUnavailable to ensure we can scale down + allRSs = append(oldRSs, newRS) scaledDownCount, err := dc.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, deployment) if err != nil { return false, nil } + glog.V(4).Infof("Scaled down old RSes by %d", scaledDownCount) totalScaledDown := cleanupCount + scaledDownCount return totalScaledDown > 0, nil } // cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted. -func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int) (int, error) { +func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int32) ([]*extensions.ReplicaSet, int32, error) { sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs)) // Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order // such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will // been deleted first and won't increase unavailability. - totalScaledDown := 0 - for _, targetRS := range oldRSs { + totalScaledDown := int32(0) + for i, targetRS := range oldRSs { if totalScaledDown >= maxCleanupCount { break } @@ -1042,27 +1082,31 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re } readyPodCount, err := deploymentutil.GetAvailablePodsForReplicaSets(dc.client, []*extensions.ReplicaSet{targetRS}, 0) if err != nil { - return totalScaledDown, fmt.Errorf("could not find available pods: %v", err) + return nil, totalScaledDown, fmt.Errorf("could not find available pods: %v", err) } if targetRS.Spec.Replicas == readyPodCount { // no unhealthy replicas found, no scaling required. continue } - scaledDownCount := integer.IntMin(maxCleanupCount-totalScaledDown, targetRS.Spec.Replicas-readyPodCount) + scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(targetRS.Spec.Replicas-readyPodCount))) newReplicasCount := targetRS.Spec.Replicas - scaledDownCount - _, _, err = dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment) + if newReplicasCount > targetRS.Spec.Replicas { + return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount) + } + _, updatedOldRS, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment) if err != nil { - return totalScaledDown, err + return nil, totalScaledDown, err } totalScaledDown += scaledDownCount + oldRSs[i] = updatedOldRS } - return totalScaledDown, nil + return oldRSs, totalScaledDown, nil } // scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate". // Need check maxUnavailable to ensure availability -func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int, error) { +func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int32, error) { _, maxUnavailable, err := deploymentutil.ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas) if err != nil { return 0, err @@ -1083,7 +1127,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [ sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs)) - totalScaledDown := 0 + totalScaledDown := int32(0) totalScaleDownCount := readyPodCount - minAvailable for _, targetRS := range oldRSs { if totalScaledDown >= totalScaleDownCount { @@ -1095,8 +1139,11 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [ continue } // Scale down. - scaleDownCount := integer.IntMin(targetRS.Spec.Replicas, totalScaleDownCount-totalScaledDown) + scaleDownCount := int32(integer.IntMin(int(targetRS.Spec.Replicas), int(totalScaleDownCount-totalScaledDown))) newReplicasCount := targetRS.Spec.Replicas - scaleDownCount + if newReplicasCount > targetRS.Spec.Replicas { + return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount) + } _, _, err = dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment) if err != nil { return totalScaledDown, err @@ -1134,7 +1181,7 @@ func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extension } func (dc *DeploymentController) cleanupOldReplicaSets(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) error { - diff := len(oldRSs) - *deployment.Spec.RevisionHistoryLimit + diff := int32(len(oldRSs)) - *deployment.Spec.RevisionHistoryLimit if diff <= 0 { return nil } @@ -1143,7 +1190,7 @@ func (dc *DeploymentController) cleanupOldReplicaSets(oldRSs []*extensions.Repli var errList []error // TODO: This should be parallelized. - for i := 0; i < diff; i++ { + for i := int32(0); i < diff; i++ { rs := oldRSs[i] // Avoid delete replica set with non-zero replica counts if rs.Status.Replicas != 0 || rs.Spec.Replicas != 0 || rs.Generation > rs.Status.ObservedGeneration { @@ -1177,7 +1224,7 @@ func (dc *DeploymentController) updateDeploymentStatus(allRSs []*extensions.Repl return err } -func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (totalActualReplicas, updatedReplicas, availableReplicas, unavailableReplicas int, err error) { +func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (totalActualReplicas, updatedReplicas, availableReplicas, unavailableReplicas int32, err error) { totalActualReplicas = deploymentutil.GetActualReplicaCountForReplicaSets(allRSs) updatedReplicas = deploymentutil.GetActualReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}) minReadySeconds := deployment.Spec.MinReadySeconds @@ -1191,7 +1238,7 @@ func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet, return } -func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) { +func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) { // No need to scale if rs.Spec.Replicas == newScale { return false, rs, nil @@ -1211,7 +1258,7 @@ func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.Rep return true, newRS, err } -func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int) (*extensions.ReplicaSet, error) { +func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int32) (*extensions.ReplicaSet, error) { // TODO: Using client for now, update to use store when it is ready. // NOTE: This mutates the ReplicaSet passed in. Not sure if that's a good idea. rs.Spec.Replicas = newScale @@ -1227,6 +1274,18 @@ func (dc *DeploymentController) rollbackToTemplate(deployment *extensions.Deploy if !reflect.DeepEqual(deploymentutil.GetNewReplicaSetTemplate(deployment), rs.Spec.Template) { glog.Infof("Rolling back deployment %s to template spec %+v", deployment.Name, rs.Spec.Template.Spec) deploymentutil.SetFromReplicaSetTemplate(deployment, rs.Spec.Template) + // set RS (the old RS we'll rolling back to) annotations back to the deployment; + // otherwise, the deployment's current annotations (should be the same as current new RS) will be copied to the RS after the rollback. + // + // For example, + // A Deployment has old RS1 with annotation {change-cause:create}, and new RS2 {change-cause:edit}. + // Note that both annotations are copied from Deployment, and the Deployment should be annotated {change-cause:edit} as well. + // Now, rollback Deployment to RS1, we should update Deployment's pod-template and also copy annotation from RS1. + // Deployment is now annotated {change-cause:create}, and we have new RS1 {change-cause:create}, old RS2 {change-cause:edit}. + // + // If we don't copy the annotations back from RS to deployment on rollback, the Deployment will stay as {change-cause:edit}, + // and new RS1 becomes {change-cause:edit} (copied from deployment after rollback), old RS2 {change-cause:edit}, which is not correct. + setDeploymentAnnotationsTo(deployment, rs) performedRollback = true } else { glog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %s, skipping rollback...", deployment.Name) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller_test.go new file mode 100644 index 000000000000..0dc92fb9344c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller_test.go @@ -0,0 +1,820 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "fmt" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + exp "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/intstr" +) + +func rs(name string, replicas int, selector map[string]string) *exp.ReplicaSet { + return &exp.ReplicaSet{ + ObjectMeta: api.ObjectMeta{ + Name: name, + }, + Spec: exp.ReplicaSetSpec{ + Replicas: int32(replicas), + Selector: &unversioned.LabelSelector{MatchLabels: selector}, + Template: api.PodTemplateSpec{}, + }, + } +} + +func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *exp.ReplicaSet { + rs := rs(name, specReplicas, selector) + rs.Status = exp.ReplicaSetStatus{ + Replicas: int32(statusReplicas), + } + return rs +} + +func deployment(name string, replicas int, maxSurge, maxUnavailable intstr.IntOrString) exp.Deployment { + return exp.Deployment{ + ObjectMeta: api.ObjectMeta{ + Name: name, + }, + Spec: exp.DeploymentSpec{ + Replicas: int32(replicas), + Strategy: exp.DeploymentStrategy{ + Type: exp.RollingUpdateDeploymentStrategyType, + RollingUpdate: &exp.RollingUpdateDeployment{ + MaxSurge: maxSurge, + MaxUnavailable: maxUnavailable, + }, + }, + }, + } +} + +var alwaysReady = func() bool { return true } + +func newDeployment(replicas int, revisionHistoryLimit *int) *exp.Deployment { + var v *int32 + if revisionHistoryLimit != nil { + v = new(int32) + *v = int32(*revisionHistoryLimit) + } + d := exp.Deployment{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()}, + ObjectMeta: api.ObjectMeta{ + UID: util.NewUUID(), + Name: "foobar", + Namespace: api.NamespaceDefault, + ResourceVersion: "18", + }, + Spec: exp.DeploymentSpec{ + Strategy: exp.DeploymentStrategy{ + Type: exp.RollingUpdateDeploymentStrategyType, + RollingUpdate: &exp.RollingUpdateDeployment{}, + }, + Replicas: int32(replicas), + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "name": "foo", + "type": "production", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo/bar", + }, + }, + }, + }, + RevisionHistoryLimit: v, + }, + } + return &d +} + +func newReplicaSet(d *exp.Deployment, name string, replicas int) *exp.ReplicaSet { + return &exp.ReplicaSet{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: api.NamespaceDefault, + }, + Spec: exp.ReplicaSetSpec{ + Replicas: int32(replicas), + Template: d.Spec.Template, + }, + } + +} + +func newListOptions() api.ListOptions { + return api.ListOptions{} +} + +func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) { + tests := []struct { + deploymentReplicas int + maxSurge intstr.IntOrString + oldReplicas int + newReplicas int + scaleExpected bool + expectedNewReplicas int + }{ + { + // Should not scale up. + deploymentReplicas: 10, + maxSurge: intstr.FromInt(0), + oldReplicas: 10, + newReplicas: 0, + scaleExpected: false, + }, + { + deploymentReplicas: 10, + maxSurge: intstr.FromInt(2), + oldReplicas: 10, + newReplicas: 0, + scaleExpected: true, + expectedNewReplicas: 2, + }, + { + deploymentReplicas: 10, + maxSurge: intstr.FromInt(2), + oldReplicas: 5, + newReplicas: 0, + scaleExpected: true, + expectedNewReplicas: 7, + }, + { + deploymentReplicas: 10, + maxSurge: intstr.FromInt(2), + oldReplicas: 10, + newReplicas: 2, + scaleExpected: false, + }, + { + // Should scale down. + deploymentReplicas: 10, + maxSurge: intstr.FromInt(2), + oldReplicas: 2, + newReplicas: 11, + scaleExpected: true, + expectedNewReplicas: 10, + }, + } + + for i, test := range tests { + t.Logf("executing scenario %d", i) + newRS := rs("foo-v2", test.newReplicas, nil) + oldRS := rs("foo-v2", test.oldReplicas, nil) + allRSs := []*exp.ReplicaSet{newRS, oldRS} + deployment := deployment("foo", test.deploymentReplicas, test.maxSurge, intstr.FromInt(0)) + fake := fake.Clientset{} + controller := &DeploymentController{ + client: &fake, + eventRecorder: &record.FakeRecorder{}, + } + scaled, err := controller.reconcileNewReplicaSet(allRSs, newRS, &deployment) + if err != nil { + t.Errorf("unexpected error: %v", err) + continue + } + if !test.scaleExpected { + if scaled || len(fake.Actions()) > 0 { + t.Errorf("unexpected scaling: %v", fake.Actions()) + } + continue + } + if test.scaleExpected && !scaled { + t.Errorf("expected scaling to occur") + continue + } + if len(fake.Actions()) != 1 { + t.Errorf("expected 1 action during scale, got: %v", fake.Actions()) + continue + } + updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*exp.ReplicaSet) + if e, a := test.expectedNewReplicas, int(updated.Spec.Replicas); e != a { + t.Errorf("expected update to %d replicas, got %d", e, a) + } + } +} + +func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) { + tests := []struct { + deploymentReplicas int + maxUnavailable intstr.IntOrString + oldReplicas int + newReplicas int + readyPodsFromOldRS int + readyPodsFromNewRS int + scaleExpected bool + expectedOldReplicas int + }{ + { + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(0), + oldReplicas: 10, + newReplicas: 0, + readyPodsFromOldRS: 10, + readyPodsFromNewRS: 0, + scaleExpected: true, + expectedOldReplicas: 9, + }, + { + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + oldReplicas: 10, + newReplicas: 0, + readyPodsFromOldRS: 10, + readyPodsFromNewRS: 0, + scaleExpected: true, + expectedOldReplicas: 8, + }, + { // expect unhealthy replicas from old replica sets been cleaned up + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + oldReplicas: 10, + newReplicas: 0, + readyPodsFromOldRS: 8, + readyPodsFromNewRS: 0, + scaleExpected: true, + expectedOldReplicas: 8, + }, + { // expect 1 unhealthy replica from old replica sets been cleaned up, and 1 ready pod been scaled down + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + oldReplicas: 10, + newReplicas: 0, + readyPodsFromOldRS: 9, + readyPodsFromNewRS: 0, + scaleExpected: true, + expectedOldReplicas: 8, + }, + { // the unavailable pods from the newRS would not make us scale down old RSs in a further step + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + oldReplicas: 8, + newReplicas: 2, + readyPodsFromOldRS: 8, + readyPodsFromNewRS: 0, + scaleExpected: false, + }, + } + for i, test := range tests { + t.Logf("executing scenario %d", i) + + newSelector := map[string]string{"foo": "new"} + oldSelector := map[string]string{"foo": "old"} + newRS := rs("foo-new", test.newReplicas, newSelector) + oldRS := rs("foo-old", test.oldReplicas, oldSelector) + oldRSs := []*exp.ReplicaSet{oldRS} + allRSs := []*exp.ReplicaSet{oldRS, newRS} + + deployment := deployment("foo", test.deploymentReplicas, intstr.FromInt(0), test.maxUnavailable) + fakeClientset := fake.Clientset{} + fakeClientset.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { + switch action.(type) { + case core.ListAction: + podList := &api.PodList{} + for podIndex := 0; podIndex < test.readyPodsFromOldRS; podIndex++ { + podList.Items = append(podList.Items, api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("%s-oldReadyPod-%d", oldRS.Name, podIndex), + Labels: oldSelector, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Type: api.PodReady, + Status: api.ConditionTrue, + }, + }, + }, + }) + } + for podIndex := 0; podIndex < test.oldReplicas-test.readyPodsFromOldRS; podIndex++ { + podList.Items = append(podList.Items, api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("%s-oldUnhealthyPod-%d", oldRS.Name, podIndex), + Labels: oldSelector, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Type: api.PodReady, + Status: api.ConditionFalse, + }, + }, + }, + }) + } + for podIndex := 0; podIndex < test.readyPodsFromNewRS; podIndex++ { + podList.Items = append(podList.Items, api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("%s-newReadyPod-%d", oldRS.Name, podIndex), + Labels: newSelector, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Type: api.PodReady, + Status: api.ConditionTrue, + }, + }, + }, + }) + } + for podIndex := 0; podIndex < test.oldReplicas-test.readyPodsFromOldRS; podIndex++ { + podList.Items = append(podList.Items, api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("%s-newUnhealthyPod-%d", oldRS.Name, podIndex), + Labels: newSelector, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Type: api.PodReady, + Status: api.ConditionFalse, + }, + }, + }, + }) + } + return true, podList, nil + } + return false, nil, nil + }) + controller := &DeploymentController{ + client: &fakeClientset, + eventRecorder: &record.FakeRecorder{}, + } + + scaled, err := controller.reconcileOldReplicaSets(allRSs, oldRSs, newRS, &deployment) + if err != nil { + t.Errorf("unexpected error: %v", err) + continue + } + if !test.scaleExpected && scaled { + t.Errorf("unexpected scaling: %v", fakeClientset.Actions()) + } + if test.scaleExpected && !scaled { + t.Errorf("expected scaling to occur") + continue + } + continue + } +} + +func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) { + tests := []struct { + oldReplicas int + readyPods int + unHealthyPods int + maxCleanupCount int + cleanupCountExpected int + }{ + { + oldReplicas: 10, + readyPods: 8, + unHealthyPods: 2, + maxCleanupCount: 1, + cleanupCountExpected: 1, + }, + { + oldReplicas: 10, + readyPods: 8, + unHealthyPods: 2, + maxCleanupCount: 3, + cleanupCountExpected: 2, + }, + { + oldReplicas: 10, + readyPods: 8, + unHealthyPods: 2, + maxCleanupCount: 0, + cleanupCountExpected: 0, + }, + { + oldReplicas: 10, + readyPods: 10, + unHealthyPods: 0, + maxCleanupCount: 3, + cleanupCountExpected: 0, + }, + } + + for i, test := range tests { + t.Logf("executing scenario %d", i) + oldRS := rs("foo-v2", test.oldReplicas, nil) + oldRSs := []*exp.ReplicaSet{oldRS} + deployment := deployment("foo", 10, intstr.FromInt(2), intstr.FromInt(2)) + fakeClientset := fake.Clientset{} + fakeClientset.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { + switch action.(type) { + case core.ListAction: + podList := &api.PodList{} + for podIndex := 0; podIndex < test.readyPods; podIndex++ { + podList.Items = append(podList.Items, api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("%s-readyPod-%d", oldRS.Name, podIndex), + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Type: api.PodReady, + Status: api.ConditionTrue, + }, + }, + }, + }) + } + for podIndex := 0; podIndex < test.unHealthyPods; podIndex++ { + podList.Items = append(podList.Items, api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("%s-unHealthyPod-%d", oldRS.Name, podIndex), + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Type: api.PodReady, + Status: api.ConditionFalse, + }, + }, + }, + }) + } + return true, podList, nil + } + return false, nil, nil + }) + + controller := &DeploymentController{ + client: &fakeClientset, + eventRecorder: &record.FakeRecorder{}, + } + _, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, &deployment, int32(test.maxCleanupCount)) + if err != nil { + t.Errorf("unexpected error: %v", err) + continue + } + if int(cleanupCount) != test.cleanupCountExpected { + t.Errorf("expected %v unhealthy replicas been cleaned up, got %v", test.cleanupCountExpected, cleanupCount) + continue + } + } +} + +func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing.T) { + tests := []struct { + deploymentReplicas int + maxUnavailable intstr.IntOrString + readyPods int + oldReplicas int + scaleExpected bool + expectedOldReplicas int + }{ + { + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(0), + readyPods: 10, + oldReplicas: 10, + scaleExpected: true, + expectedOldReplicas: 9, + }, + { + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + readyPods: 10, + oldReplicas: 10, + scaleExpected: true, + expectedOldReplicas: 8, + }, + { + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + readyPods: 8, + oldReplicas: 10, + scaleExpected: false, + }, + { + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + readyPods: 10, + oldReplicas: 0, + scaleExpected: false, + }, + { + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + readyPods: 1, + oldReplicas: 10, + scaleExpected: false, + }, + } + + for i, test := range tests { + t.Logf("executing scenario %d", i) + oldRS := rs("foo-v2", test.oldReplicas, nil) + allRSs := []*exp.ReplicaSet{oldRS} + oldRSs := []*exp.ReplicaSet{oldRS} + deployment := deployment("foo", test.deploymentReplicas, intstr.FromInt(0), test.maxUnavailable) + fakeClientset := fake.Clientset{} + fakeClientset.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { + switch action.(type) { + case core.ListAction: + podList := &api.PodList{} + for podIndex := 0; podIndex < test.readyPods; podIndex++ { + podList.Items = append(podList.Items, api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("%s-pod-%d", oldRS.Name, podIndex), + Labels: map[string]string{"foo": "bar"}, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Type: api.PodReady, + Status: api.ConditionTrue, + }, + }, + }, + }) + } + return true, podList, nil + } + return false, nil, nil + }) + controller := &DeploymentController{ + client: &fakeClientset, + eventRecorder: &record.FakeRecorder{}, + } + scaled, err := controller.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, &deployment) + if err != nil { + t.Errorf("unexpected error: %v", err) + continue + } + if !test.scaleExpected { + if scaled != 0 { + t.Errorf("unexpected scaling: %v", fakeClientset.Actions()) + } + continue + } + if test.scaleExpected && scaled == 0 { + t.Errorf("expected scaling to occur; actions: %v", fakeClientset.Actions()) + continue + } + // There are both list and update actions logged, so extract the update + // action for verification. + var updateAction core.UpdateAction + for _, action := range fakeClientset.Actions() { + switch a := action.(type) { + case core.UpdateAction: + if updateAction != nil { + t.Errorf("expected only 1 update action; had %v and found %v", updateAction, a) + } else { + updateAction = a + } + } + } + if updateAction == nil { + t.Errorf("expected an update action") + continue + } + updated := updateAction.GetObject().(*exp.ReplicaSet) + if e, a := test.expectedOldReplicas, int(updated.Spec.Replicas); e != a { + t.Errorf("expected update to %d replicas, got %d", e, a) + } + } +} + +func TestDeploymentController_cleanupOldReplicaSets(t *testing.T) { + selector := map[string]string{"foo": "bar"} + + tests := []struct { + oldRSs []*exp.ReplicaSet + revisionHistoryLimit int + expectedDeletions int + }{ + { + oldRSs: []*exp.ReplicaSet{ + newRSWithStatus("foo-1", 0, 0, selector), + newRSWithStatus("foo-2", 0, 0, selector), + newRSWithStatus("foo-3", 0, 0, selector), + }, + revisionHistoryLimit: 1, + expectedDeletions: 2, + }, + { + // Only delete the replica set with Spec.Replicas = Status.Replicas = 0. + oldRSs: []*exp.ReplicaSet{ + newRSWithStatus("foo-1", 0, 0, selector), + newRSWithStatus("foo-2", 0, 1, selector), + newRSWithStatus("foo-3", 1, 0, selector), + newRSWithStatus("foo-4", 1, 1, selector), + }, + revisionHistoryLimit: 0, + expectedDeletions: 1, + }, + + { + oldRSs: []*exp.ReplicaSet{ + newRSWithStatus("foo-1", 0, 0, selector), + newRSWithStatus("foo-2", 0, 0, selector), + }, + revisionHistoryLimit: 0, + expectedDeletions: 2, + }, + { + oldRSs: []*exp.ReplicaSet{ + newRSWithStatus("foo-1", 1, 1, selector), + newRSWithStatus("foo-2", 1, 1, selector), + }, + revisionHistoryLimit: 0, + expectedDeletions: 0, + }, + } + + for i, test := range tests { + fake := &fake.Clientset{} + controller := NewDeploymentController(fake, controller.NoResyncPeriodFunc) + + controller.eventRecorder = &record.FakeRecorder{} + controller.rsStoreSynced = alwaysReady + controller.podStoreSynced = alwaysReady + for _, rs := range test.oldRSs { + controller.rsStore.Add(rs) + } + + d := newDeployment(1, &tests[i].revisionHistoryLimit) + controller.cleanupOldReplicaSets(test.oldRSs, d) + + gotDeletions := 0 + for _, action := range fake.Actions() { + if "delete" == action.GetVerb() { + gotDeletions++ + } + } + if gotDeletions != test.expectedDeletions { + t.Errorf("expect %v old replica sets been deleted, but got %v", test.expectedDeletions, gotDeletions) + continue + } + } +} + +func getKey(d *exp.Deployment, t *testing.T) string { + if key, err := controller.KeyFunc(d); err != nil { + t.Errorf("Unexpected error getting key for deployment %v: %v", d.Name, err) + return "" + } else { + return key + } +} + +type fixture struct { + t *testing.T + + client *fake.Clientset + // Objects to put in the store. + dStore []*exp.Deployment + rsStore []*exp.ReplicaSet + podStore []*api.Pod + + // Actions expected to happen on the client. Objects from here are also + // preloaded into NewSimpleFake. + actions []core.Action + objects *api.List +} + +func (f *fixture) expectUpdateDeploymentAction(d *exp.Deployment) { + f.actions = append(f.actions, core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "deployments"}, d.Namespace, d)) + f.objects.Items = append(f.objects.Items, d) +} + +func (f *fixture) expectCreateRSAction(rs *exp.ReplicaSet) { + f.actions = append(f.actions, core.NewCreateAction(unversioned.GroupVersionResource{Resource: "replicasets"}, rs.Namespace, rs)) + f.objects.Items = append(f.objects.Items, rs) +} + +func (f *fixture) expectUpdateRSAction(rs *exp.ReplicaSet) { + f.actions = append(f.actions, core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "replicasets"}, rs.Namespace, rs)) + f.objects.Items = append(f.objects.Items, rs) +} + +func (f *fixture) expectListPodAction(namespace string, opt api.ListOptions) { + f.actions = append(f.actions, core.NewListAction(unversioned.GroupVersionResource{Resource: "pods"}, namespace, opt)) +} + +func newFixture(t *testing.T) *fixture { + f := &fixture{} + f.t = t + f.objects = &api.List{} + return f +} + +func (f *fixture) run(deploymentName string) { + f.client = fake.NewSimpleClientset(f.objects) + c := NewDeploymentController(f.client, controller.NoResyncPeriodFunc) + c.eventRecorder = &record.FakeRecorder{} + c.rsStoreSynced = alwaysReady + c.podStoreSynced = alwaysReady + for _, d := range f.dStore { + c.dStore.Store.Add(d) + } + for _, rs := range f.rsStore { + c.rsStore.Store.Add(rs) + } + for _, pod := range f.podStore { + c.podStore.Indexer.Add(pod) + } + + err := c.syncDeployment(deploymentName) + if err != nil { + f.t.Errorf("error syncing deployment: %v", err) + } + + actions := f.client.Actions() + for i, action := range actions { + if len(f.actions) < i+1 { + f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:]) + break + } + + expectedAction := f.actions[i] + if !expectedAction.Matches(action.GetVerb(), action.GetResource().Resource) { + f.t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expectedAction, action) + continue + } + } + + if len(f.actions) > len(actions) { + f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):]) + } +} + +func TestSyncDeploymentCreatesReplicaSet(t *testing.T) { + f := newFixture(t) + + d := newDeployment(1, nil) + f.dStore = append(f.dStore, d) + + // expect that one ReplicaSet with zero replicas is created + // then is updated to 1 replica + rs := newReplicaSet(d, "deploymentrs-4186632231", 0) + updatedRS := newReplicaSet(d, "deploymentrs-4186632231", 1) + opt := newListOptions() + + f.expectCreateRSAction(rs) + f.expectUpdateDeploymentAction(d) + f.expectUpdateRSAction(updatedRS) + f.expectListPodAction(rs.Namespace, opt) + f.expectUpdateDeploymentAction(d) + + f.run(getKey(d, t)) +} + +// issue: https://github.com/kubernetes/kubernetes/issues/23218 +func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) { + fake := &fake.Clientset{} + controller := NewDeploymentController(fake, controller.NoResyncPeriodFunc) + + controller.eventRecorder = &record.FakeRecorder{} + controller.rsStoreSynced = alwaysReady + controller.podStoreSynced = alwaysReady + + d := newDeployment(1, nil) + empty := unversioned.LabelSelector{} + d.Spec.Selector = &empty + controller.dStore.Store.Add(d) + // We expect the deployment controller to not take action here since it's configuration + // is invalid, even though no replicasets exist that match it's selector. + controller.syncDeployment(fmt.Sprintf("%s/%s", d.ObjectMeta.Namespace, d.ObjectMeta.Name)) + if len(fake.Actions()) == 0 { + return + } + for _, action := range fake.Actions() { + t.Logf("unexpected action: %#v", action) + } + t.Errorf("expected deployment controller to not take action") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go index 07491f6966ff..257ecb3df1d5 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go @@ -20,6 +20,7 @@ package endpoint import ( "reflect" + "strconv" "time" "encoding/json" @@ -33,8 +34,10 @@ import ( clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/controller/framework/informers" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/metrics" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" @@ -47,6 +50,18 @@ const ( // often. Higher numbers = lower CPU/network load; lower numbers = // shorter amount of time before a mistaken endpoint is corrected. FullServiceResyncPeriod = 30 * time.Second + + // We must avoid syncing service until the pod store has synced. If it hasn't synced, to + // avoid a hot loop, we'll wait this long between checks. + PodStoreSyncedPollPeriod = 100 * time.Millisecond + + // An annotation on the Service denoting if the endpoints controller should + // go ahead and create endpoints for unready pods. This annotation is + // currently only used by PetSets, where we need the pet to be DNS + // resolvable during initialization. In this situation we create a headless + // service just for the PetSet, and clients shouldn't be using this Service + // for anything so unready endpoints don't matter. + TolerateUnreadyEndpointsAnnotation = "service.alpha.kubernetes.io/tolerate-unready-endpoints" ) var ( @@ -54,7 +69,10 @@ var ( ) // NewEndpointController returns a new *EndpointController. -func NewEndpointController(client *clientset.Clientset, resyncPeriod controller.ResyncPeriodFunc) *EndpointController { +func NewEndpointController(podInformer framework.SharedIndexInformer, client *clientset.Clientset) *EndpointController { + if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter()) + } e := &EndpointController{ client: client, queue: workqueue.New(), @@ -81,23 +99,23 @@ func NewEndpointController(client *clientset.Clientset, resyncPeriod controller. }, ) - e.podStore.Store, e.podController = framework.NewInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return e.client.Core().Pods(api.NamespaceAll).List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return e.client.Core().Pods(api.NamespaceAll).Watch(options) - }, - }, - &api.Pod{}, - resyncPeriod(), - framework.ResourceEventHandlerFuncs{ - AddFunc: e.addPod, - UpdateFunc: e.updatePod, - DeleteFunc: e.deletePod, - }, - ) + podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ + AddFunc: e.addPod, + UpdateFunc: e.updatePod, + DeleteFunc: e.deletePod, + }) + e.podStore.Indexer = podInformer.GetIndexer() + e.podController = podInformer.GetController() + e.podStoreSynced = podInformer.HasSynced + + return e +} + +// NewEndpointControllerFromClient returns a new *EndpointController that runs its own informer. +func NewEndpointControllerFromClient(client *clientset.Clientset, resyncPeriod controller.ResyncPeriodFunc) *EndpointController { + podInformer := informers.CreateSharedPodIndexInformer(client, resyncPeriod()) + e := NewEndpointController(podInformer, client) + e.internalPodInformer = podInformer return e } @@ -109,6 +127,13 @@ type EndpointController struct { serviceStore cache.StoreToServiceLister podStore cache.StoreToPodLister + // internalPodInformer is used to hold a personal informer. If we're using + // a normal shared informer, then the informer will be started for us. If + // we have a personal informer, we must start it ourselves. If you start + // the controller using NewEndpointController(passing SharedInformer), this + // will be null + internalPodInformer framework.SharedIndexInformer + // Services that need to be updated. A channel is inappropriate here, // because it allows services with lots of pods to be serviced much // more often than services with few pods; it also would cause a @@ -119,7 +144,10 @@ type EndpointController struct { // Since we join two objects, we'll watch both of them with // controllers. serviceController *framework.Controller - podController *framework.Controller + podController framework.ControllerInterface + // podStoreSynced returns true if the pod store has been synced at least once. + // Added as a member to the struct to allow injection for testing. + podStoreSynced func() bool } // Runs e; will not return until stopCh is closed. workers determines how many @@ -136,6 +164,11 @@ func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) { time.Sleep(5 * time.Minute) // give time for our cache to fill e.checkLeftoverEndpoints() }() + + if e.internalPodInformer != nil { + go e.internalPodInformer.Run(stopCh) + } + <-stopCh e.queue.ShutDown() } @@ -189,7 +222,7 @@ func (e *EndpointController) updatePod(old, cur interface{}) { oldPod := cur.(*api.Pod) // Only need to get the old services if the labels changed. if !reflect.DeepEqual(newPod.Labels, oldPod.Labels) || - !hostNameAndDomainAnnotationsAreEqual(newPod.Annotations, oldPod.Annotations) { + !hostNameAndDomainAreEqual(newPod, oldPod) { oldServices, err := e.getPodServiceMemberships(oldPod) if err != nil { glog.Errorf("Unable to get pod %v/%v's service memberships: %v", oldPod.Namespace, oldPod.Name, err) @@ -202,15 +235,29 @@ func (e *EndpointController) updatePod(old, cur interface{}) { } } -func hostNameAndDomainAnnotationsAreEqual(annotation1, annotation2 map[string]string) bool { - if annotation1 == nil { - annotation1 = map[string]string{} +func hostNameAndDomainAreEqual(pod1, pod2 *api.Pod) bool { + return getHostname(pod1) == getHostname(pod2) && + getSubdomain(pod1) == getSubdomain(pod2) +} + +func getHostname(pod *api.Pod) string { + if len(pod.Spec.Hostname) > 0 { + return pod.Spec.Hostname + } + if pod.Annotations != nil { + return pod.Annotations[utilpod.PodHostnameAnnotation] + } + return "" +} + +func getSubdomain(pod *api.Pod) string { + if len(pod.Spec.Subdomain) > 0 { + return pod.Spec.Subdomain } - if annotation2 == nil { - annotation2 = map[string]string{} + if pod.Annotations != nil { + return pod.Annotations[utilpod.PodSubdomainAnnotation] } - return annotation1[utilpod.PodHostnameAnnotation] == annotation2[utilpod.PodHostnameAnnotation] && - annotation1[utilpod.PodSubdomainAnnotation] == annotation2[utilpod.PodSubdomainAnnotation] + return "" } // When a pod is deleted, enqueue the services the pod used to be a member of. @@ -268,6 +315,15 @@ func (e *EndpointController) syncService(key string) { defer func() { glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime)) }() + + if !e.podStoreSynced() { + // Sleep so we give the pod reflector goroutine a chance to run. + time.Sleep(PodStoreSyncedPollPeriod) + glog.Infof("Waiting for pods controller to sync, requeuing rc %v", key) + e.queue.Add(key) + return + } + obj, exists, err := e.serviceStore.Store.GetByKey(key) if err != nil || !exists { // Delete the corresponding endpoint, as the service has been deleted. @@ -309,6 +365,16 @@ func (e *EndpointController) syncService(key string) { subsets := []api.EndpointSubset{} podHostNames := map[string]endpoints.HostRecord{} + var tolerateUnreadyEndpoints bool + if v, ok := service.Annotations[TolerateUnreadyEndpointsAnnotation]; ok { + b, err := strconv.ParseBool(v) + if err == nil { + tolerateUnreadyEndpoints = b + } else { + glog.Errorf("Failed to parse annotation %v: %v", TolerateUnreadyEndpointsAnnotation, err) + } + } + for i := range pods.Items { pod := &pods.Items[i] @@ -331,17 +397,7 @@ func (e *EndpointController) syncService(key string) { continue } - hostname := pod.Annotations[utilpod.PodHostnameAnnotation] - if len(hostname) > 0 && - pod.Annotations[utilpod.PodSubdomainAnnotation] == service.Name && - service.Namespace == pod.Namespace { - hostRecord := endpoints.HostRecord{ - HostName: hostname, - } - podHostNames[string(pod.Status.PodIP)] = hostRecord - } - - epp := api.EndpointPort{Name: portName, Port: portNum, Protocol: portProto} + epp := api.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto} epa := api.EndpointAddress{ IP: pod.Status.PodIP, TargetRef: &api.ObjectReference{ @@ -351,7 +407,20 @@ func (e *EndpointController) syncService(key string) { UID: pod.ObjectMeta.UID, ResourceVersion: pod.ObjectMeta.ResourceVersion, }} - if api.IsPodReady(pod) { + + hostname := getHostname(pod) + if len(hostname) > 0 && + getSubdomain(pod) == service.Name && + service.Namespace == pod.Namespace { + hostRecord := endpoints.HostRecord{ + HostName: hostname, + } + // TODO: stop populating podHostNames annotation in 1.4 + podHostNames[string(pod.Status.PodIP)] = hostRecord + epa.Hostname = hostname + } + + if tolerateUnreadyEndpoints || api.IsPodReady(pod) { subsets = append(subsets, api.EndpointSubset{ Addresses: []api.EndpointAddress{epa}, Ports: []api.EndpointPort{epp}, @@ -395,12 +464,10 @@ func (e *EndpointController) syncService(key string) { serializedPodHostNames = string(b) } - podHostNamesAreEqual := verifyPodHostNamesAreEqual(serializedPodHostNames, currentEndpoints.Annotations) - newAnnotations := make(map[string]string) newAnnotations[endpoints.PodHostnamesAnnotation] = serializedPodHostNames if reflect.DeepEqual(currentEndpoints.Subsets, subsets) && - reflect.DeepEqual(currentEndpoints.Labels, service.Labels) && podHostNamesAreEqual { + reflect.DeepEqual(currentEndpoints.Labels, service.Labels) { glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name) return } @@ -428,14 +495,6 @@ func (e *EndpointController) syncService(key string) { } } -func verifyPodHostNamesAreEqual(newPodHostNames string, oldAnnotations map[string]string) bool { - oldPodHostNames := "" - if oldAnnotations != nil { - oldPodHostNames = oldAnnotations[endpoints.PodHostnamesAnnotation] - } - return oldPodHostNames == newPodHostNames -} - // checkLeftoverEndpoints lists all currently existing endpoints and adds their // service to the queue. This will detect endpoints that exist with no // corresponding service; these endpoints need to be deleted. We only need to diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller_test.go new file mode 100644 index 000000000000..e4097eb16a3d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller_test.go @@ -0,0 +1,566 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endpoint + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "k8s.io/kubernetes/pkg/api" + endptspkg "k8s.io/kubernetes/pkg/api/endpoints" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + _ "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/intstr" + utiltesting "k8s.io/kubernetes/pkg/util/testing" +) + +var alwaysReady = func() bool { return true } + +func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotReady int) { + for i := 0; i < nPods+nNotReady; i++ { + p := &api.Pod{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()}, + ObjectMeta: api.ObjectMeta{ + Namespace: namespace, + Name: fmt.Sprintf("pod%d", i), + Labels: map[string]string{"foo": "bar"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Ports: []api.ContainerPort{}}}, + }, + Status: api.PodStatus{ + PodIP: fmt.Sprintf("1.2.3.%d", 4+i), + Conditions: []api.PodCondition{ + { + Type: api.PodReady, + Status: api.ConditionTrue, + }, + }, + }, + } + if i >= nPods { + p.Status.Conditions[0].Status = api.ConditionFalse + } + for j := 0; j < nPorts; j++ { + p.Spec.Containers[0].Ports = append(p.Spec.Containers[0].Ports, + api.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: int32(8080 + j)}) + } + store.Add(p) + } +} + +type serverResponse struct { + statusCode int + obj interface{} +} + +func makeTestServer(t *testing.T, namespace string, endpointsResponse serverResponse) (*httptest.Server, *utiltesting.FakeHandler) { + fakeEndpointsHandler := utiltesting.FakeHandler{ + StatusCode: endpointsResponse.statusCode, + ResponseBody: runtime.EncodeOrDie(testapi.Default.Codec(), endpointsResponse.obj.(runtime.Object)), + } + mux := http.NewServeMux() + mux.Handle(testapi.Default.ResourcePath("endpoints", namespace, ""), &fakeEndpointsHandler) + mux.Handle(testapi.Default.ResourcePath("endpoints/", namespace, ""), &fakeEndpointsHandler) + mux.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) { + t.Errorf("unexpected request: %v", req.RequestURI) + res.WriteHeader(http.StatusNotFound) + }) + return httptest.NewServer(mux), &fakeEndpointsHandler +} + +func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { + ns := api.NamespaceDefault + testServer, endpointsHandler := makeTestServer(t, ns, + serverResponse{http.StatusOK, &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, + Ports: []api.EndpointPort{{Port: 1000}}, + }}, + }}) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) + endpoints.podStoreSynced = alwaysReady + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{Ports: []api.ServicePort{{Port: 80}}}, + }) + endpoints.syncService(ns + "/foo") + endpointsHandler.ValidateRequestCount(t, 0) +} + +func TestCheckLeftoverEndpoints(t *testing.T) { + ns := api.NamespaceDefault + // Note that this requests *all* endpoints, therefore the NamespaceAll + // below. + testServer, _ := makeTestServer(t, api.NamespaceAll, + serverResponse{http.StatusOK, &api.EndpointsList{ + ListMeta: unversioned.ListMeta{ + ResourceVersion: "1", + }, + Items: []api.Endpoints{{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, + Ports: []api.EndpointPort{{Port: 1000}}, + }}, + }}, + }}) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) + endpoints.podStoreSynced = alwaysReady + endpoints.checkLeftoverEndpoints() + + if e, a := 1, endpoints.queue.Len(); e != a { + t.Fatalf("Expected %v, got %v", e, a) + } + got, _ := endpoints.queue.Get() + if e, a := ns+"/foo", got; e != a { + t.Errorf("Expected %v, got %v", e, a) + } +} + +func TestSyncEndpointsProtocolTCP(t *testing.T) { + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns, + serverResponse{http.StatusOK, &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, + Ports: []api.EndpointPort{{Port: 1000, Protocol: "TCP"}}, + }}, + }}) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) + endpoints.podStoreSynced = alwaysReady + + addPods(endpoints.podStore.Indexer, ns, 1, 1, 0) + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{ + Selector: map[string]string{}, + Ports: []api.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "TCP"}}, + }, + }) + endpoints.syncService(ns + "/foo") + endpointsHandler.ValidateRequestCount(t, 2) + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, + Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, + }}, + }) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) +} + +func TestSyncEndpointsProtocolUDP(t *testing.T) { + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns, + serverResponse{http.StatusOK, &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, + Ports: []api.EndpointPort{{Port: 1000, Protocol: "UDP"}}, + }}, + }}) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) + endpoints.podStoreSynced = alwaysReady + addPods(endpoints.podStore.Indexer, ns, 1, 1, 0) + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{ + Selector: map[string]string{}, + Ports: []api.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "UDP"}}, + }, + }) + endpoints.syncService(ns + "/foo") + endpointsHandler.ValidateRequestCount(t, 2) + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, + Ports: []api.EndpointPort{{Port: 8080, Protocol: "UDP"}}, + }}, + }) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) +} + +func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) { + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns, + serverResponse{http.StatusOK, &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{}, + }}) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) + endpoints.podStoreSynced = alwaysReady + addPods(endpoints.podStore.Indexer, ns, 1, 1, 0) + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{ + Selector: map[string]string{}, + Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, + }, + }) + endpoints.syncService(ns + "/foo") + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, + Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, + }}, + }) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) +} + +func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) { + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns, + serverResponse{http.StatusOK, &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{}, + }}) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) + endpoints.podStoreSynced = alwaysReady + addPods(endpoints.podStore.Indexer, ns, 0, 1, 1) + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{ + Selector: map[string]string{}, + Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, + }, + }) + endpoints.syncService(ns + "/foo") + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{{ + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, + Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, + }}, + }) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) +} + +func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) { + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns, + serverResponse{http.StatusOK, &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{}, + }}) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) + endpoints.podStoreSynced = alwaysReady + addPods(endpoints.podStore.Indexer, ns, 1, 1, 1) + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{ + Selector: map[string]string{}, + Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, + }, + }) + endpoints.syncService(ns + "/foo") + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, + NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.5", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}}}, + Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, + }}, + }) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) +} + +func TestSyncEndpointsItemsPreexisting(t *testing.T) { + ns := "bar" + testServer, endpointsHandler := makeTestServer(t, ns, + serverResponse{http.StatusOK, &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, + Ports: []api.EndpointPort{{Port: 1000}}, + }}, + }}) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) + endpoints.podStoreSynced = alwaysReady + addPods(endpoints.podStore.Indexer, ns, 1, 1, 0) + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"foo": "bar"}, + Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, + }, + }) + endpoints.syncService(ns + "/foo") + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, + Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, + }}, + }) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) +} + +func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { + ns := api.NamespaceDefault + testServer, endpointsHandler := makeTestServer(t, api.NamespaceDefault, + serverResponse{http.StatusOK, &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "1", + Name: "foo", + Namespace: ns, + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, + Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, + }}, + }}) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) + endpoints.podStoreSynced = alwaysReady + addPods(endpoints.podStore.Indexer, api.NamespaceDefault, 1, 1, 0) + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"foo": "bar"}, + Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, + }, + }) + endpoints.syncService(ns + "/foo") + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", api.NamespaceDefault, "foo"), "GET", nil) +} + +func TestSyncEndpointsItems(t *testing.T) { + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns, + serverResponse{http.StatusOK, &api.Endpoints{}}) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) + endpoints.podStoreSynced = alwaysReady + addPods(endpoints.podStore.Indexer, ns, 3, 2, 0) + addPods(endpoints.podStore.Indexer, "blah", 5, 2, 0) // make sure these aren't found! + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"foo": "bar"}, + Ports: []api.ServicePort{ + {Name: "port0", Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + {Name: "port1", Port: 88, Protocol: "TCP", TargetPort: intstr.FromInt(8088)}, + }, + }, + }) + endpoints.syncService("other/foo") + expectedSubsets := []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}, + {IP: "1.2.3.5", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}}, + {IP: "1.2.3.6", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}}, + }, + Ports: []api.EndpointPort{ + {Name: "port0", Port: 8080, Protocol: "TCP"}, + {Name: "port1", Port: 8088, Protocol: "TCP"}, + }, + }} + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "", + }, + Subsets: endptspkg.SortSubsets(expectedSubsets), + }) + // endpointsHandler should get 2 requests - one for "GET" and the next for "POST". + endpointsHandler.ValidateRequestCount(t, 2) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, ""), "POST", &data) +} + +func TestSyncEndpointsItemsWithLabels(t *testing.T) { + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns, + serverResponse{http.StatusOK, &api.Endpoints{}}) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) + endpoints.podStoreSynced = alwaysReady + addPods(endpoints.podStore.Indexer, ns, 3, 2, 0) + serviceLabels := map[string]string{"foo": "bar"} + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + Labels: serviceLabels, + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{"foo": "bar"}, + Ports: []api.ServicePort{ + {Name: "port0", Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + {Name: "port1", Port: 88, Protocol: "TCP", TargetPort: intstr.FromInt(8088)}, + }, + }, + }) + endpoints.syncService(ns + "/foo") + expectedSubsets := []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}, + {IP: "1.2.3.5", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}}, + {IP: "1.2.3.6", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}}, + }, + Ports: []api.EndpointPort{ + {Name: "port0", Port: 8080, Protocol: "TCP"}, + {Name: "port1", Port: 8088, Protocol: "TCP"}, + }, + }} + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "", + Labels: serviceLabels, + }, + Subsets: endptspkg.SortSubsets(expectedSubsets), + }) + // endpointsHandler should get 2 requests - one for "GET" and the next for "POST". + endpointsHandler.ValidateRequestCount(t, 2) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, ""), "POST", &data) +} + +func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) { + ns := "bar" + testServer, endpointsHandler := makeTestServer(t, ns, + serverResponse{http.StatusOK, &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}}, + Ports: []api.EndpointPort{{Port: 1000}}, + }}, + }}) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) + endpoints.podStoreSynced = alwaysReady + addPods(endpoints.podStore.Indexer, ns, 1, 1, 0) + serviceLabels := map[string]string{"baz": "blah"} + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + Labels: serviceLabels, + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{"foo": "bar"}, + Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, + }, + }) + endpoints.syncService(ns + "/foo") + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + ResourceVersion: "1", + Labels: serviceLabels, + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, + Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, + }}, + }) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/controller.go index ed819525266d..cfffabe8cd41 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/controller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/controller.go @@ -68,6 +68,12 @@ type Controller struct { reflectorMutex sync.RWMutex } +// TODO make the "Controller" private, and convert all references to use ControllerInterface instead +type ControllerInterface interface { + Run(stopCh <-chan struct{}) + HasSynced() bool +} + // New makes a new Controller from the given Config. func New(c *Config) *Controller { ctlr := &Controller{ diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/controller_test.go new file mode 100644 index 000000000000..b17aba4ab679 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/controller_test.go @@ -0,0 +1,414 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework_test + +import ( + "fmt" + "math/rand" + "sync" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" + + "github.com/google/gofuzz" +) + +type testLW struct { + ListFunc func(options api.ListOptions) (runtime.Object, error) + WatchFunc func(options api.ListOptions) (watch.Interface, error) +} + +func (t *testLW) List(options api.ListOptions) (runtime.Object, error) { + return t.ListFunc(options) +} +func (t *testLW) Watch(options api.ListOptions) (watch.Interface, error) { + return t.WatchFunc(options) +} + +func Example() { + // source simulates an apiserver object endpoint. + source := framework.NewFakeControllerSource() + + // This will hold the downstream state, as we know it. + downstream := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc) + + // This will hold incoming changes. Note how we pass downstream in as a + // KeyLister, that way resync operations will result in the correct set + // of update/delete deltas. + fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, downstream) + + // Let's do threadsafe output to get predictable test results. + deletionCounter := make(chan string, 1000) + + cfg := &framework.Config{ + Queue: fifo, + ListerWatcher: source, + ObjectType: &api.Pod{}, + FullResyncPeriod: time.Millisecond * 100, + RetryOnError: false, + + // Let's implement a simple controller that just deletes + // everything that comes in. + Process: func(obj interface{}) error { + // Obj is from the Pop method of the Queue we make above. + newest := obj.(cache.Deltas).Newest() + + if newest.Type != cache.Deleted { + // Update our downstream store. + err := downstream.Add(newest.Object) + if err != nil { + return err + } + + // Delete this object. + source.Delete(newest.Object.(runtime.Object)) + } else { + // Update our downstream store. + err := downstream.Delete(newest.Object) + if err != nil { + return err + } + + // fifo's KeyOf is easiest, because it handles + // DeletedFinalStateUnknown markers. + key, err := fifo.KeyOf(newest.Object) + if err != nil { + return err + } + + // Report this deletion. + deletionCounter <- key + } + return nil + }, + } + + // Create the controller and run it until we close stop. + stop := make(chan struct{}) + defer close(stop) + go framework.New(cfg).Run(stop) + + // Let's add a few objects to the source. + testIDs := []string{"a-hello", "b-controller", "c-framework"} + for _, name := range testIDs { + // Note that these pods are not valid-- the fake source doesn't + // call validation or anything. + source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}}) + } + + // Let's wait for the controller to process the things we just added. + outputSet := sets.String{} + for i := 0; i < len(testIDs); i++ { + outputSet.Insert(<-deletionCounter) + } + + for _, key := range outputSet.List() { + fmt.Println(key) + } + // Output: + // a-hello + // b-controller + // c-framework +} + +func ExampleInformer() { + // source simulates an apiserver object endpoint. + source := framework.NewFakeControllerSource() + + // Let's do threadsafe output to get predictable test results. + deletionCounter := make(chan string, 1000) + + // Make a controller that immediately deletes anything added to it, and + // logs anything deleted. + _, controller := framework.NewInformer( + source, + &api.Pod{}, + time.Millisecond*100, + framework.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + source.Delete(obj.(runtime.Object)) + }, + DeleteFunc: func(obj interface{}) { + key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + key = "oops something went wrong with the key" + } + + // Report this deletion. + deletionCounter <- key + }, + }, + ) + + // Run the controller and run it until we close stop. + stop := make(chan struct{}) + defer close(stop) + go controller.Run(stop) + + // Let's add a few objects to the source. + testIDs := []string{"a-hello", "b-controller", "c-framework"} + for _, name := range testIDs { + // Note that these pods are not valid-- the fake source doesn't + // call validation or anything. + source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}}) + } + + // Let's wait for the controller to process the things we just added. + outputSet := sets.String{} + for i := 0; i < len(testIDs); i++ { + outputSet.Insert(<-deletionCounter) + } + + for _, key := range outputSet.List() { + fmt.Println(key) + } + // Output: + // a-hello + // b-controller + // c-framework +} + +func TestHammerController(t *testing.T) { + // This test executes a bunch of requests through the fake source and + // controller framework to make sure there's no locking/threading + // errors. If an error happens, it should hang forever or trigger the + // race detector. + + // source simulates an apiserver object endpoint. + source := framework.NewFakeControllerSource() + + // Let's do threadsafe output to get predictable test results. + outputSetLock := sync.Mutex{} + // map of key to operations done on the key + outputSet := map[string][]string{} + + recordFunc := func(eventType string, obj interface{}) { + key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + t.Errorf("something wrong with key: %v", err) + key = "oops something went wrong with the key" + } + + // Record some output when items are deleted. + outputSetLock.Lock() + defer outputSetLock.Unlock() + outputSet[key] = append(outputSet[key], eventType) + } + + // Make a controller which just logs all the changes it gets. + _, controller := framework.NewInformer( + source, + &api.Pod{}, + time.Millisecond*100, + framework.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { recordFunc("add", obj) }, + UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) }, + DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) }, + }, + ) + + if controller.HasSynced() { + t.Errorf("Expected HasSynced() to return false before we started the controller") + } + + // Run the controller and run it until we close stop. + stop := make(chan struct{}) + go controller.Run(stop) + + // Let's wait for the controller to do its initial sync + wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { + return controller.HasSynced(), nil + }) + if !controller.HasSynced() { + t.Errorf("Expected HasSynced() to return true after the initial sync") + } + + wg := sync.WaitGroup{} + const threads = 3 + wg.Add(threads) + for i := 0; i < threads; i++ { + go func() { + defer wg.Done() + // Let's add a few objects to the source. + currentNames := sets.String{} + rs := rand.NewSource(rand.Int63()) + f := fuzz.New().NilChance(.5).NumElements(0, 2).RandSource(rs) + r := rand.New(rs) // Mustn't use r and f concurrently! + for i := 0; i < 100; i++ { + var name string + var isNew bool + if currentNames.Len() == 0 || r.Intn(3) == 1 { + f.Fuzz(&name) + isNew = true + } else { + l := currentNames.List() + name = l[r.Intn(len(l))] + } + + pod := &api.Pod{} + f.Fuzz(pod) + pod.ObjectMeta.Name = name + pod.ObjectMeta.Namespace = "default" + // Add, update, or delete randomly. + // Note that these pods are not valid-- the fake source doesn't + // call validation or perform any other checking. + if isNew { + currentNames.Insert(name) + source.Add(pod) + continue + } + switch r.Intn(2) { + case 0: + currentNames.Insert(name) + source.Modify(pod) + case 1: + currentNames.Delete(name) + source.Delete(pod) + } + } + }() + } + wg.Wait() + + // Let's wait for the controller to finish processing the things we just added. + // TODO: look in the queue to see how many items need to be processed. + time.Sleep(100 * time.Millisecond) + close(stop) + + outputSetLock.Lock() + t.Logf("got: %#v", outputSet) +} + +func TestUpdate(t *testing.T) { + // This test is going to exercise the various paths that result in a + // call to update. + + // source simulates an apiserver object endpoint. + source := framework.NewFakeControllerSource() + + const ( + FROM = "from" + TO = "to" + ) + + // These are the transitions we expect to see; because this is + // asynchronous, there are a lot of valid possibilities. + type pair struct{ from, to string } + allowedTransitions := map[pair]bool{ + pair{FROM, TO}: true, + + // Because a resync can happen when we've already observed one + // of the above but before the item is deleted. + pair{TO, TO}: true, + // Because a resync could happen before we observe an update. + pair{FROM, FROM}: true, + } + + pod := func(name, check string, final bool) *api.Pod { + p := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: map[string]string{"check": check}, + }, + } + if final { + p.Labels["final"] = "true" + } + return p + } + deletePod := func(p *api.Pod) bool { + return p.Labels["final"] == "true" + } + + tests := []func(string){ + func(name string) { + name = "a-" + name + source.Add(pod(name, FROM, false)) + source.Modify(pod(name, TO, true)) + }, + } + + const threads = 3 + + var testDoneWG sync.WaitGroup + testDoneWG.Add(threads * len(tests)) + + // Make a controller that deletes things once it observes an update. + // It calls Done() on the wait group on deletions so we can tell when + // everything we've added has been deleted. + watchCh := make(chan struct{}) + _, controller := framework.NewInformer( + &testLW{ + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + watch, err := source.Watch(options) + close(watchCh) + return watch, err + }, + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return source.List(options) + }, + }, + &api.Pod{}, + 0, + framework.ResourceEventHandlerFuncs{ + UpdateFunc: func(oldObj, newObj interface{}) { + o, n := oldObj.(*api.Pod), newObj.(*api.Pod) + from, to := o.Labels["check"], n.Labels["check"] + if !allowedTransitions[pair{from, to}] { + t.Errorf("observed transition %q -> %q for %v", from, to, n.Name) + } + if deletePod(n) { + source.Delete(n) + } + }, + DeleteFunc: func(obj interface{}) { + testDoneWG.Done() + }, + }, + ) + + // Run the controller and run it until we close stop. + // Once Run() is called, calls to testDoneWG.Done() might start, so + // all testDoneWG.Add() calls must happen before this point + stop := make(chan struct{}) + go controller.Run(stop) + <-watchCh + + // run every test a few times, in parallel + var wg sync.WaitGroup + wg.Add(threads * len(tests)) + for i := 0; i < threads; i++ { + for j, f := range tests { + go func(name string, f func(string)) { + defer wg.Done() + f(name) + }(fmt.Sprintf("%v-%v", i, j), f) + } + } + wg.Wait() + + // Let's wait for the controller to process the things we just added. + testDoneWG.Wait() + close(stop) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go index fa28171137c4..bebacb531a63 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go @@ -89,8 +89,8 @@ func (f *FakeControllerSource) DeleteDropWatch(lastValue runtime.Object) { f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 0) } -func (f *FakeControllerSource) key(meta *api.ObjectMeta) nnu { - return nnu{meta.Namespace, meta.Name, meta.UID} +func (f *FakeControllerSource) key(accessor meta.Object) nnu { + return nnu{accessor.GetNamespace(), accessor.GetName(), accessor.GetUID()} } // Change records the given event (setting the object's resource version) and @@ -99,15 +99,15 @@ func (f *FakeControllerSource) Change(e watch.Event, watchProbability float64) { f.lock.Lock() defer f.lock.Unlock() - objMeta, err := api.ObjectMetaFor(e.Object) + accessor, err := meta.Accessor(e.Object) if err != nil { panic(err) // this is test code only } resourceVersion := len(f.changes) + 1 - objMeta.ResourceVersion = strconv.Itoa(resourceVersion) + accessor.SetResourceVersion(strconv.Itoa(resourceVersion)) f.changes = append(f.changes, e) - key := f.key(objMeta) + key := f.key(accessor) switch e.Type { case watch.Added, watch.Modified: f.items[key] = e.Object diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source_test.go new file mode 100644 index 000000000000..01269ce64354 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "sync" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +// ensure the watch delivers the requested and only the requested items. +func consume(t *testing.T, w watch.Interface, rvs []string, done *sync.WaitGroup) { + defer done.Done() + for _, rv := range rvs { + got, ok := <-w.ResultChan() + if !ok { + t.Errorf("%#v: unexpected channel close, wanted %v", rvs, rv) + return + } + gotRV := got.Object.(*api.Pod).ObjectMeta.ResourceVersion + if e, a := rv, gotRV; e != a { + t.Errorf("wanted %v, got %v", e, a) + } else { + t.Logf("Got %v as expected", gotRV) + } + } + // We should not get anything else. + got, open := <-w.ResultChan() + if open { + t.Errorf("%#v: unwanted object %#v", rvs, got) + } +} + +func TestRCNumber(t *testing.T) { + pod := func(name string) *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: name, + }, + } + } + + wg := &sync.WaitGroup{} + wg.Add(3) + + source := NewFakeControllerSource() + source.Add(pod("foo")) + source.Modify(pod("foo")) + source.Modify(pod("foo")) + + w, err := source.Watch(api.ListOptions{ResourceVersion: "1"}) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + go consume(t, w, []string{"2", "3"}, wg) + + list, err := source.List(api.ListOptions{}) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if e, a := "3", list.(*api.List).ResourceVersion; e != a { + t.Errorf("wanted %v, got %v", e, a) + } + + w2, err := source.Watch(api.ListOptions{ResourceVersion: "2"}) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + go consume(t, w2, []string{"3"}, wg) + + w3, err := source.Watch(api.ListOptions{ResourceVersion: "3"}) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + go consume(t, w3, []string{}, wg) + source.Shutdown() + wg.Wait() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/informers/factory.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/informers/factory.go new file mode 100644 index 000000000000..6e432ef9cdd8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/informers/factory.go @@ -0,0 +1,120 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package informers + +import ( + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// CreateSharedPodInformer returns a SharedIndexInformer that lists and watches all pods +func CreateSharedPodInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { + sharedInformer := framework.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return client.Core().Pods(api.NamespaceAll).List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return client.Core().Pods(api.NamespaceAll).Watch(options) + }, + }, + &api.Pod{}, + resyncPeriod, + cache.Indexers{}, + ) + + return sharedInformer +} + +// CreateSharedPodIndexInformer returns a SharedIndexInformer that lists and watches all pods +func CreateSharedPodIndexInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { + sharedIndexInformer := framework.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return client.Core().Pods(api.NamespaceAll).List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return client.Core().Pods(api.NamespaceAll).Watch(options) + }, + }, + &api.Pod{}, + resyncPeriod, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, + ) + + return sharedIndexInformer +} + +// CreateSharedNodeIndexInformer returns a SharedIndexInformer that lists and watches all nodes +func CreateSharedNodeIndexInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { + sharedIndexInformer := framework.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return client.Core().Nodes().List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return client.Core().Nodes().Watch(options) + }, + }, + &api.Node{}, + resyncPeriod, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + + return sharedIndexInformer +} + +// CreateSharedPVCIndexInformer returns a SharedIndexInformer that lists and watches all PVCs +func CreateSharedPVCIndexInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { + sharedIndexInformer := framework.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return client.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return client.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options) + }, + }, + &api.PersistentVolumeClaim{}, + resyncPeriod, + cache.Indexers{}) + + return sharedIndexInformer +} + +// CreateSharedPVIndexInformer returns a SharedIndexInformer that lists and watches all PVs +func CreateSharedPVIndexInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { + sharedIndexInformer := framework.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return client.Core().PersistentVolumes().List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return client.Core().PersistentVolumes().Watch(options) + }, + }, + &api.PersistentVolume{}, + resyncPeriod, + cache.Indexers{}) + + return sharedIndexInformer +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go new file mode 100644 index 000000000000..ce9ddf2c7140 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go @@ -0,0 +1,333 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "fmt" + "sync" + "time" + + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/runtime" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" +) + +// if you use this, there is one behavior change compared to a standard Informer. +// When you receive a notification, the cache will be AT LEAST as fresh as the +// notification, but it MAY be more fresh. You should NOT depend on the contents +// of the cache exactly matching the notification you've received in handler +// functions. If there was a create, followed by a delete, the cache may NOT +// have your item. This has advantages over the broadcaster since it allows us +// to share a common cache across many controllers. Extending the broadcaster +// would have required us keep duplicate caches for each watch. +type SharedInformer interface { + // events to a single handler are delivered sequentially, but there is no coordination between different handlers + // You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned. + // TODO we should try to remove this restriction eventually. + AddEventHandler(handler ResourceEventHandler) error + GetStore() cache.Store + // GetController gives back a synthetic interface that "votes" to start the informer + GetController() ControllerInterface + Run(stopCh <-chan struct{}) + HasSynced() bool +} + +type SharedIndexInformer interface { + SharedInformer + // AddIndexers add indexers to the informer before it starts. + AddIndexers(indexers cache.Indexers) error + GetIndexer() cache.Indexer +} + +// NewSharedInformer creates a new instance for the listwatcher. +// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can +// be shared amongst all consumers. +func NewSharedInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer { + return NewSharedIndexInformer(lw, objType, resyncPeriod, cache.Indexers{}) +} + +// NewSharedIndexInformer creates a new instance for the listwatcher. +// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can +// be shared amongst all consumers. +func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers cache.Indexers) SharedIndexInformer { + sharedIndexInformer := &sharedIndexInformer{ + processor: &sharedProcessor{}, + indexer: cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), + listerWatcher: lw, + objectType: objType, + fullResyncPeriod: resyncPeriod, + } + return sharedIndexInformer +} + +type sharedIndexInformer struct { + indexer cache.Indexer + controller *Controller + + processor *sharedProcessor + + // This block is tracked to handle late initialization of the controller + listerWatcher cache.ListerWatcher + objectType runtime.Object + fullResyncPeriod time.Duration + + started bool + startedLock sync.Mutex +} + +// dummyController hides the fact that a SharedInformer is different from a dedicated one +// where a caller can `Run`. The run method is disonnected in this case, because higher +// level logic will decide when to start the SharedInformer and related controller. +// Because returning information back is always asynchronous, the legacy callers shouldn't +// notice any change in behavior. +type dummyController struct { + informer *sharedIndexInformer +} + +func (v *dummyController) Run(stopCh <-chan struct{}) { +} + +func (v *dummyController) HasSynced() bool { + return v.informer.HasSynced() +} + +type updateNotification struct { + oldObj interface{} + newObj interface{} +} + +type addNotification struct { + newObj interface{} +} + +type deleteNotification struct { + oldObj interface{} +} + +func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, s.indexer) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + FullResyncPeriod: s.fullResyncPeriod, + RetryOnError: false, + + Process: s.HandleDeltas, + } + + func() { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + s.controller = New(cfg) + s.started = true + }() + + s.processor.run(stopCh) + s.controller.Run(stopCh) +} + +func (s *sharedIndexInformer) isStarted() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + return s.started +} + +func (s *sharedIndexInformer) HasSynced() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return false + } + return s.controller.HasSynced() +} + +func (s *sharedIndexInformer) GetStore() cache.Store { + return s.indexer +} + +func (s *sharedIndexInformer) GetIndexer() cache.Indexer { + return s.indexer +} + +func (s *sharedIndexInformer) AddIndexers(indexers cache.Indexers) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + return s.indexer.AddIndexers(indexers) +} + +func (s *sharedIndexInformer) GetController() ControllerInterface { + return &dummyController{informer: s} +} + +func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + listener := newProcessListener(handler) + s.processor.listeners = append(s.processor.listeners, listener) + return nil +} + +func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { + // from oldest to newest + for _, d := range obj.(cache.Deltas) { + switch d.Type { + case cache.Sync, cache.Added, cache.Updated: + if old, exists, err := s.indexer.Get(d.Object); err == nil && exists { + if err := s.indexer.Update(d.Object); err != nil { + return err + } + s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}) + } else { + if err := s.indexer.Add(d.Object); err != nil { + return err + } + s.processor.distribute(addNotification{newObj: d.Object}) + } + case cache.Deleted: + if err := s.indexer.Delete(d.Object); err != nil { + return err + } + s.processor.distribute(deleteNotification{oldObj: d.Object}) + } + } + return nil +} + +type sharedProcessor struct { + listeners []*processorListener +} + +func (p *sharedProcessor) distribute(obj interface{}) { + for _, listener := range p.listeners { + listener.add(obj) + } +} + +func (p *sharedProcessor) run(stopCh <-chan struct{}) { + for _, listener := range p.listeners { + go listener.run(stopCh) + go listener.pop(stopCh) + } +} + +type processorListener struct { + // lock/cond protects access to 'pendingNotifications'. + lock sync.RWMutex + cond sync.Cond + + // pendingNotifications is an unbounded slice that holds all notifications not yet distributed + // there is one per listener, but a failing/stalled listener will have infinite pendingNotifications + // added until we OOM. + // TODO This is no worse that before, since reflectors were backed by unbounded DeltaFIFOs, but + // we should try to do something better + pendingNotifications []interface{} + + nextCh chan interface{} + + handler ResourceEventHandler +} + +func newProcessListener(handler ResourceEventHandler) *processorListener { + ret := &processorListener{ + pendingNotifications: []interface{}{}, + nextCh: make(chan interface{}), + handler: handler, + } + + ret.cond.L = &ret.lock + return ret +} + +func (p *processorListener) add(notification interface{}) { + p.lock.Lock() + defer p.lock.Unlock() + + p.pendingNotifications = append(p.pendingNotifications, notification) + p.cond.Broadcast() +} + +func (p *processorListener) pop(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + p.lock.Lock() + defer p.lock.Unlock() + for { + for len(p.pendingNotifications) == 0 { + // check if we're shutdown + select { + case <-stopCh: + return + default: + } + + p.cond.Wait() + } + notification := p.pendingNotifications[0] + p.pendingNotifications = p.pendingNotifications[1:] + + select { + case <-stopCh: + return + case p.nextCh <- notification: + } + } +} + +func (p *processorListener) run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + for { + var next interface{} + select { + case <-stopCh: + func() { + p.lock.Lock() + defer p.lock.Unlock() + p.cond.Broadcast() + }() + return + case next = <-p.nextCh: + } + + switch notification := next.(type) { + case updateNotification: + p.handler.OnUpdate(notification.oldObj, notification.newObj) + case addNotification: + p.handler.OnAdd(notification.newObj) + case deleteNotification: + p.handler.OnDelete(notification.oldObj) + default: + utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go new file mode 100644 index 000000000000..91d1b228a230 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go @@ -0,0 +1,714 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package garbagecollector + +import ( + "fmt" + "sync" + "time" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/meta/metatypes" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/client/typed/dynamic" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/util/workqueue" + "k8s.io/kubernetes/pkg/watch" +) + +const ResourceResyncTime = 60 * time.Second + +type monitor struct { + store cache.Store + controller *framework.Controller +} + +type objectReference struct { + metatypes.OwnerReference + // This is needed by the dynamic client + Namespace string +} + +func (s objectReference) String() string { + return fmt.Sprintf("[%s/%s, namespace: %s, name: %s, uid: %s]", s.APIVersion, s.Kind, s.Namespace, s.Name, s.UID) +} + +// node does not require a lock to protect. The single-threaded +// Propagator.processEvent() is the sole writer of the nodes. The multi-threaded +// GarbageCollector.processItem() reads the nodes, but it only reads the fields +// that never get changed by Propagator.processEvent(). +type node struct { + identity objectReference + // dependents will be read by the orphan() routine, we need to protect it with a lock. + dependentsLock *sync.RWMutex + dependents map[*node]struct{} + // When processing an Update event, we need to compare the updated + // ownerReferences with the owners recorded in the graph. + owners []metatypes.OwnerReference +} + +func (ownerNode *node) addDependent(dependent *node) { + ownerNode.dependentsLock.Lock() + defer ownerNode.dependentsLock.Unlock() + ownerNode.dependents[dependent] = struct{}{} +} + +func (ownerNode *node) deleteDependent(dependent *node) { + ownerNode.dependentsLock.Lock() + defer ownerNode.dependentsLock.Unlock() + delete(ownerNode.dependents, dependent) +} + +type eventType int + +const ( + addEvent eventType = iota + updateEvent + deleteEvent +) + +type event struct { + eventType eventType + obj interface{} + // the update event comes with an old object, but it's not used by the garbage collector. + oldObj interface{} +} + +type concurrentUIDToNode struct { + *sync.RWMutex + uidToNode map[types.UID]*node +} + +func (m *concurrentUIDToNode) Write(node *node) { + m.Lock() + defer m.Unlock() + m.uidToNode[node.identity.UID] = node +} + +func (m *concurrentUIDToNode) Read(uid types.UID) (*node, bool) { + m.RLock() + defer m.RUnlock() + n, ok := m.uidToNode[uid] + return n, ok +} + +func (m *concurrentUIDToNode) Delete(uid types.UID) { + m.Lock() + defer m.Unlock() + delete(m.uidToNode, uid) +} + +type Propagator struct { + eventQueue *workqueue.Type + // uidToNode doesn't require a lock to protect, because only the + // single-threaded Propagator.processEvent() reads/writes it. + uidToNode *concurrentUIDToNode + gc *GarbageCollector +} + +// addDependentToOwners adds n to owners' dependents list. If the owner does not +// exist in the p.uidToNode yet, a "virtual" node will be created to represent +// the owner. The "virtual" node will be enqueued to the dirtyQueue, so that +// processItem() will verify if the owner exists according to the API server. +func (p *Propagator) addDependentToOwners(n *node, owners []metatypes.OwnerReference) { + for _, owner := range owners { + ownerNode, ok := p.uidToNode.Read(owner.UID) + if !ok { + // Create a "virtual" node in the graph for the owner if it doesn't + // exist in the graph yet. Then enqueue the virtual node into the + // dirtyQueue. The garbage processor will enqueue a virtual delete + // event to delete it from the graph if API server confirms this + // owner doesn't exist. + ownerNode = &node{ + identity: objectReference{ + OwnerReference: owner, + Namespace: n.identity.Namespace, + }, + dependentsLock: &sync.RWMutex{}, + dependents: make(map[*node]struct{}), + } + p.uidToNode.Write(ownerNode) + p.gc.dirtyQueue.Add(ownerNode) + } + ownerNode.addDependent(n) + } +} + +// insertNode insert the node to p.uidToNode; then it finds all owners as listed +// in n.owners, and adds the node to their dependents list. +func (p *Propagator) insertNode(n *node) { + p.uidToNode.Write(n) + p.addDependentToOwners(n, n.owners) +} + +// removeDependentFromOwners remove n from owners' dependents list. +func (p *Propagator) removeDependentFromOwners(n *node, owners []metatypes.OwnerReference) { + for _, owner := range owners { + ownerNode, ok := p.uidToNode.Read(owner.UID) + if !ok { + continue + } + ownerNode.deleteDependent(n) + } +} + +// removeNode removes the node from p.uidToNode, then finds all +// owners as listed in n.owners, and removes n from their dependents list. +func (p *Propagator) removeNode(n *node) { + p.uidToNode.Delete(n.identity.UID) + p.removeDependentFromOwners(n, n.owners) +} + +// TODO: profile this function to see if a naive N^2 algorithm performs better +// when the number of references is small. +func referencesDiffs(old []metatypes.OwnerReference, new []metatypes.OwnerReference) (added []metatypes.OwnerReference, removed []metatypes.OwnerReference) { + oldUIDToRef := make(map[string]metatypes.OwnerReference) + for i := 0; i < len(old); i++ { + oldUIDToRef[string(old[i].UID)] = old[i] + } + oldUIDSet := sets.StringKeySet(oldUIDToRef) + newUIDToRef := make(map[string]metatypes.OwnerReference) + for i := 0; i < len(new); i++ { + newUIDToRef[string(new[i].UID)] = new[i] + } + newUIDSet := sets.StringKeySet(newUIDToRef) + + addedUID := newUIDSet.Difference(oldUIDSet) + removedUID := oldUIDSet.Difference(newUIDSet) + + for uid := range addedUID { + added = append(added, newUIDToRef[uid]) + } + for uid := range removedUID { + removed = append(removed, oldUIDToRef[uid]) + } + return added, removed +} + +func shouldOrphanDependents(e event, accessor meta.Object) bool { + // The delta_fifo may combine the creation and update of the object into one + // event, so we need to check AddEvent as well. + if e.oldObj == nil { + if accessor.GetDeletionTimestamp() == nil { + return false + } + } else { + oldAccessor, err := meta.Accessor(e.oldObj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("cannot access oldObj: %v", err)) + return false + } + // ignore the event if it's not updating DeletionTimestamp from non-nil to nil. + if accessor.GetDeletionTimestamp() == nil || oldAccessor.GetDeletionTimestamp() != nil { + return false + } + } + finalizers := accessor.GetFinalizers() + for _, finalizer := range finalizers { + if finalizer == api.FinalizerOrphan { + return true + } + } + return false +} + +// dependents are copies of pointers to the owner's dependents, they don't need to be locked. +func (gc *GarbageCollector) orhpanDependents(owner objectReference, dependents []*node) error { + var failedDependents []objectReference + var errorsSlice []error + for _, dependent := range dependents { + // the dependent.identity.UID is used as precondition + deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, owner.UID, dependent.identity.UID) + _, err := gc.patchObject(dependent.identity, []byte(deleteOwnerRefPatch)) + // note that if the target ownerReference doesn't exist in the + // dependent, strategic merge patch will NOT return an error. + if err != nil && !errors.IsNotFound(err) { + errorsSlice = append(errorsSlice, fmt.Errorf("orphaning %s failed with %v", dependent.identity, err)) + } + } + if len(failedDependents) != 0 { + return fmt.Errorf("failed to orphan dependents of owner %s, got errors: %s", owner, utilerrors.NewAggregate(errorsSlice).Error()) + } + fmt.Println("CHAO: successfully updated all dependents") + return nil +} + +// TODO: Using Patch when strategicmerge supports deleting an entry from a +// slice of a base type. +func (gc *GarbageCollector) removeOrphanFinalizer(owner *node) error { + const retries = 5 + for count := 0; count < retries; count++ { + ownerObject, err := gc.getObject(owner.identity) + if err != nil { + return fmt.Errorf("cannot finalize owner %s, because cannot get it. The garbage collector will retry later.", owner.identity) + } + accessor, err := meta.Accessor(ownerObject) + if err != nil { + return fmt.Errorf("cannot access the owner object: %v. The garbage collector will retry later.", err) + } + finalizers := accessor.GetFinalizers() + var newFinalizers []string + found := false + for _, f := range finalizers { + if f == api.FinalizerOrphan { + found = true + } else { + newFinalizers = append(newFinalizers, f) + } + } + if !found { + glog.V(6).Infof("the orphan finalizer is already removed from object %s", owner.identity) + return nil + } + // remove the owner from dependent's OwnerReferences + ownerObject.SetFinalizers(newFinalizers) + _, err = gc.updateObject(owner.identity, ownerObject) + if err == nil { + return nil + } + if err != nil && !errors.IsConflict(err) { + return fmt.Errorf("cannot update the finalizers of owner %s, with error: %v, tried %d times", owner.identity, err, count+1) + } + // retry if it's a conflict + glog.V(6).Infof("got conflict updating the owner object %s, tried %d times", owner.identity, count+1) + } + return fmt.Errorf("updateMaxRetries(%d) has reached. The garbage collector will retry later for owner %v.", retries, owner.identity) +} + +// orphanFinalizer dequeues a node from the orphanQueue, then finds its dependents +// based on the graph maintained by the GC, then removes it from the +// OwnerReferences of its dependents, and finally updates the owner to remove +// the "Orphan" finalizer. The node is add back into the orphanQueue if any of +// these steps fail. +func (gc *GarbageCollector) orphanFinalizer() { + key, quit := gc.orphanQueue.Get() + if quit { + return + } + defer gc.orphanQueue.Done(key) + owner, ok := key.(*node) + if !ok { + utilruntime.HandleError(fmt.Errorf("expect *node, got %#v", key)) + } + // we don't need to lock each element, because they never get updated + owner.dependentsLock.RLock() + dependents := make([]*node, 0, len(owner.dependents)) + for dependent := range owner.dependents { + dependents = append(dependents, dependent) + } + owner.dependentsLock.RUnlock() + + err := gc.orhpanDependents(owner.identity, dependents) + if err != nil { + glog.V(6).Infof("orphanDependents for %s failed with %v", owner.identity, err) + gc.orphanQueue.Add(owner) + return + } + // update the owner, remove "orphaningFinalizer" from its finalizers list + err = gc.removeOrphanFinalizer(owner) + if err != nil { + glog.V(6).Infof("removeOrphanFinalizer for %s failed with %v", owner.identity, err) + gc.orphanQueue.Add(owner) + } +} + +// Dequeueing an event from eventQueue, updating graph, populating dirty_queue. +func (p *Propagator) processEvent() { + key, quit := p.eventQueue.Get() + if quit { + return + } + defer p.eventQueue.Done(key) + event, ok := key.(event) + if !ok { + utilruntime.HandleError(fmt.Errorf("expect an event, got %v", key)) + return + } + obj := event.obj + accessor, err := meta.Accessor(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err)) + return + } + typeAccessor, err := meta.TypeAccessor(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err)) + return + } + glog.V(6).Infof("Propagator process object: %s/%s, namespace %s, name %s, event type %s", typeAccessor.GetAPIVersion(), typeAccessor.GetKind(), accessor.GetNamespace(), accessor.GetName(), event.eventType) + // Check if the node already exsits + existingNode, found := p.uidToNode.Read(accessor.GetUID()) + switch { + case (event.eventType == addEvent || event.eventType == updateEvent) && !found: + newNode := &node{ + identity: objectReference{ + OwnerReference: metatypes.OwnerReference{ + APIVersion: typeAccessor.GetAPIVersion(), + Kind: typeAccessor.GetKind(), + UID: accessor.GetUID(), + Name: accessor.GetName(), + }, + Namespace: accessor.GetNamespace(), + }, + dependentsLock: &sync.RWMutex{}, + dependents: make(map[*node]struct{}), + owners: accessor.GetOwnerReferences(), + } + p.insertNode(newNode) + // the underlying delta_fifo may combine a creation and deletion into one event + if shouldOrphanDependents(event, accessor) { + glog.V(6).Infof("add %s to the orphanQueue", newNode.identity) + p.gc.orphanQueue.Add(newNode) + } + case (event.eventType == addEvent || event.eventType == updateEvent) && found: + // caveat: if GC observes the creation of the dependents later than the + // deletion of the owner, then the orphaning finalizer won't be effective. + if shouldOrphanDependents(event, accessor) { + glog.V(6).Infof("add %s to the orphanQueue", existingNode.identity) + p.gc.orphanQueue.Add(existingNode) + } + // add/remove owner refs + added, removed := referencesDiffs(existingNode.owners, accessor.GetOwnerReferences()) + if len(added) == 0 && len(removed) == 0 { + glog.V(6).Infof("The updateEvent %#v doesn't change node references, ignore", event) + return + } + // update the node itself + existingNode.owners = accessor.GetOwnerReferences() + // Add the node to its new owners' dependent lists. + p.addDependentToOwners(existingNode, added) + // remove the node from the dependent list of node that are no long in + // the node's owners list. + p.removeDependentFromOwners(existingNode, removed) + case event.eventType == deleteEvent: + if !found { + glog.V(6).Infof("%v doesn't exist in the graph, this shouldn't happen", accessor.GetUID()) + return + } + p.removeNode(existingNode) + existingNode.dependentsLock.RLock() + defer existingNode.dependentsLock.RUnlock() + for dep := range existingNode.dependents { + p.gc.dirtyQueue.Add(dep) + } + } +} + +// GarbageCollector is responsible for carrying out cascading deletion, and +// removing ownerReferences from the dependents if the owner is deleted with +// DeleteOptions.OrphanDependents=true. +type GarbageCollector struct { + restMapper meta.RESTMapper + clientPool dynamic.ClientPool + dirtyQueue *workqueue.Type + orphanQueue *workqueue.Type + monitors []monitor + propagator *Propagator +} + +func monitorFor(p *Propagator, clientPool dynamic.ClientPool, resource unversioned.GroupVersionResource) (monitor, error) { + // TODO: consider store in one storage. + glog.V(6).Infof("create storage for resource %s", resource) + var monitor monitor + client, err := clientPool.ClientForGroupVersion(resource.GroupVersion()) + if err != nil { + return monitor, err + } + monitor.store, monitor.controller = framework.NewInformer( + // TODO: make special List and Watch function that removes fields other + // than ObjectMeta. + &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + // APIResource.Kind is not used by the dynamic client, so + // leave it empty. We want to list this resource in all + // namespaces if it's namespace scoped, so leave + // APIResource.Namespaced as false is all right. + apiResource := unversioned.APIResource{Name: resource.Resource} + return client.Resource(&apiResource, api.NamespaceAll).List(&options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + // APIResource.Kind is not used by the dynamic client, so + // leave it empty. We want to list this resource in all + // namespaces if it's namespace scoped, so leave + // APIResource.Namespaced as false is all right. + apiResource := unversioned.APIResource{Name: resource.Resource} + return client.Resource(&apiResource, api.NamespaceAll).Watch(&options) + }, + }, + nil, + ResourceResyncTime, + framework.ResourceEventHandlerFuncs{ + // add the event to the propagator's eventQueue. + AddFunc: func(obj interface{}) { + event := event{ + eventType: addEvent, + obj: obj, + } + p.eventQueue.Add(event) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + event := event{updateEvent, newObj, oldObj} + p.eventQueue.Add(event) + }, + DeleteFunc: func(obj interface{}) { + // delta fifo may wrap the object in a cache.DeletedFinalStateUnknown, unwrap it + if deletedFinalStateUnknown, ok := obj.(cache.DeletedFinalStateUnknown); ok { + obj = deletedFinalStateUnknown.Obj + } + event := event{ + eventType: deleteEvent, + obj: obj, + } + p.eventQueue.Add(event) + }, + }, + ) + return monitor, nil +} + +var ignoredResources = map[unversioned.GroupVersionResource]struct{}{ + unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "replicationcontrollers"}: {}, + unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "bindings"}: {}, + unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "componentstatuses"}: {}, + unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "events"}: {}, +} + +func NewGarbageCollector(clientPool dynamic.ClientPool, resources []unversioned.GroupVersionResource) (*GarbageCollector, error) { + gc := &GarbageCollector{ + clientPool: clientPool, + dirtyQueue: workqueue.New(), + orphanQueue: workqueue.New(), + // TODO: should use a dynamic RESTMapper built from the discovery results. + restMapper: registered.RESTMapper(), + } + gc.propagator = &Propagator{ + eventQueue: workqueue.New(), + uidToNode: &concurrentUIDToNode{ + RWMutex: &sync.RWMutex{}, + uidToNode: make(map[types.UID]*node), + }, + gc: gc, + } + for _, resource := range resources { + if _, ok := ignoredResources[resource]; ok { + glog.V(6).Infof("ignore resource %#v", resource) + continue + } + monitor, err := monitorFor(gc.propagator, gc.clientPool, resource) + if err != nil { + return nil, err + } + gc.monitors = append(gc.monitors, monitor) + } + return gc, nil +} + +func (gc *GarbageCollector) worker() { + key, quit := gc.dirtyQueue.Get() + if quit { + return + } + defer gc.dirtyQueue.Done(key) + err := gc.processItem(key.(*node)) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Error syncing item %v: %v", key, err)) + } +} + +// apiResource consults the REST mapper to translate an tuple to a unversioned.APIResource struct. +func (gc *GarbageCollector) apiResource(apiVersion, kind string, namespaced bool) (*unversioned.APIResource, error) { + fqKind := unversioned.FromAPIVersionAndKind(apiVersion, kind) + mapping, err := gc.restMapper.RESTMapping(fqKind.GroupKind(), apiVersion) + if err != nil { + return nil, fmt.Errorf("unable to get REST mapping for kind: %s, version: %s", kind, apiVersion) + } + glog.V(6).Infof("map kind %s, version %s to resource %s", kind, apiVersion, mapping.Resource) + resource := unversioned.APIResource{ + Name: mapping.Resource, + Namespaced: namespaced, + Kind: kind, + } + return &resource, nil +} + +func (gc *GarbageCollector) deleteObject(item objectReference) error { + fqKind := unversioned.FromAPIVersionAndKind(item.APIVersion, item.Kind) + client, err := gc.clientPool.ClientForGroupVersion(fqKind.GroupVersion()) + resource, err := gc.apiResource(item.APIVersion, item.Kind, len(item.Namespace) != 0) + if err != nil { + return err + } + uid := item.UID + preconditions := v1.Preconditions{UID: &uid} + deleteOptions := v1.DeleteOptions{Preconditions: &preconditions} + return client.Resource(resource, item.Namespace).Delete(item.Name, &deleteOptions) +} + +func (gc *GarbageCollector) getObject(item objectReference) (*runtime.Unstructured, error) { + fqKind := unversioned.FromAPIVersionAndKind(item.APIVersion, item.Kind) + client, err := gc.clientPool.ClientForGroupVersion(fqKind.GroupVersion()) + resource, err := gc.apiResource(item.APIVersion, item.Kind, len(item.Namespace) != 0) + if err != nil { + return nil, err + } + return client.Resource(resource, item.Namespace).Get(item.Name) +} + +func (gc *GarbageCollector) updateObject(item objectReference, obj *runtime.Unstructured) (*runtime.Unstructured, error) { + fqKind := unversioned.FromAPIVersionAndKind(item.APIVersion, item.Kind) + client, err := gc.clientPool.ClientForGroupVersion(fqKind.GroupVersion()) + resource, err := gc.apiResource(item.APIVersion, item.Kind, len(item.Namespace) != 0) + if err != nil { + return nil, err + } + return client.Resource(resource, item.Namespace).Update(obj) +} + +func (gc *GarbageCollector) patchObject(item objectReference, patch []byte) (*runtime.Unstructured, error) { + fqKind := unversioned.FromAPIVersionAndKind(item.APIVersion, item.Kind) + client, err := gc.clientPool.ClientForGroupVersion(fqKind.GroupVersion()) + resource, err := gc.apiResource(item.APIVersion, item.Kind, len(item.Namespace) != 0) + if err != nil { + return nil, err + } + return client.Resource(resource, item.Namespace).Patch(item.Name, api.StrategicMergePatchType, patch) +} + +func objectReferenceToUnstructured(ref objectReference) *runtime.Unstructured { + ret := &runtime.Unstructured{} + ret.SetKind(ref.Kind) + ret.SetAPIVersion(ref.APIVersion) + ret.SetUID(ref.UID) + ret.SetNamespace(ref.Namespace) + ret.SetName(ref.Name) + return ret +} + +func (gc *GarbageCollector) processItem(item *node) error { + // Get the latest item from the API server + latest, err := gc.getObject(item.identity) + if err != nil { + if errors.IsNotFound(err) { + // the Propagator can add "virtual" node for an owner that doesn't + // exist yet, so we need to enqueue a virtual Delete event to remove + // the virtual node from Propagator.uidToNode. + glog.V(6).Infof("item %v not found, generating a virtual delete event", item.identity) + event := event{ + eventType: deleteEvent, + obj: objectReferenceToUnstructured(item.identity), + } + gc.propagator.eventQueue.Add(event) + return nil + } + return err + } + if latest.GetUID() != item.identity.UID { + glog.V(6).Infof("UID doesn't match, item %v not found, ignore it", item.identity) + return nil + } + ownerReferences := latest.GetOwnerReferences() + if len(ownerReferences) == 0 { + glog.V(6).Infof("object %s's doesn't have an owner, continue on next item", item.identity) + return nil + } + // TODO: we need to remove dangling references if the object is not to be + // deleted. + for _, reference := range ownerReferences { + // TODO: we need to verify the reference resource is supported by the + // system. If it's not a valid resource, the garbage collector should i) + // ignore the reference when decide if the object should be deleted, and + // ii) should update the object to remove such references. This is to + // prevent objects having references to an old resource from being + // deleted during a cluster upgrade. + fqKind := unversioned.FromAPIVersionAndKind(reference.APIVersion, reference.Kind) + client, err := gc.clientPool.ClientForGroupVersion(fqKind.GroupVersion()) + if err != nil { + return err + } + resource, err := gc.apiResource(reference.APIVersion, reference.Kind, len(item.identity.Namespace) != 0) + if err != nil { + return err + } + owner, err := client.Resource(resource, item.identity.Namespace).Get(reference.Name) + if err == nil { + if owner.GetUID() != reference.UID { + glog.V(6).Infof("object %s's owner %s/%s, %s is not found, UID mismatch", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name) + continue + } + glog.V(6).Infof("object %s has at least an existing owner, will not garbage collect", item.identity.UID) + return nil + } else if errors.IsNotFound(err) { + glog.V(6).Infof("object %s's owner %s/%s, %s is not found", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name) + } else { + return err + } + } + glog.V(2).Infof("none of object %s's owners exist any more, will garbage collect it", item.identity) + return gc.deleteObject(item.identity) +} + +func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) { + for _, monitor := range gc.monitors { + go monitor.controller.Run(stopCh) + } + + // worker + go wait.Until(gc.propagator.processEvent, 0, stopCh) + + for i := 0; i < workers; i++ { + go wait.Until(gc.worker, 0, stopCh) + go wait.Until(gc.orphanFinalizer, 0, stopCh) + } + <-stopCh + glog.Infof("Shutting down garbage collector") + gc.dirtyQueue.ShutDown() + gc.orphanQueue.ShutDown() + gc.propagator.eventQueue.ShutDown() +} + +// QueueDrained returns if the dirtyQueue and eventQueue are drained. It's +// useful for debugging. Note that it doesn't guarantee the workers are idle. +func (gc *GarbageCollector) QueuesDrained() bool { + return gc.dirtyQueue.Len() == 0 && gc.propagator.eventQueue.Len() == 0 && gc.orphanQueue.Len() == 0 +} + +// *FOR TEST USE ONLY* It's not safe to call this function when the GC is still +// busy. +// GraphHasUID returns if the Propagator has a particular UID store in its +// uidToNode graph. It's useful for debugging. +func (gc *GarbageCollector) GraphHasUID(UIDs []types.UID) bool { + for _, u := range UIDs { + if _, ok := gc.propagator.uidToNode.Read(u); ok { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector_test.go new file mode 100644 index 000000000000..6c044a90c60e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector_test.go @@ -0,0 +1,319 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package garbagecollector + +import ( + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + + _ "k8s.io/kubernetes/pkg/api/install" + + "github.com/stretchr/testify/assert" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta/metatypes" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/typed/dynamic" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/json" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/workqueue" +) + +func TestNewGarbageCollector(t *testing.T) { + clientPool := dynamic.NewClientPool(&restclient.Config{}, dynamic.LegacyAPIPathResolverFunc) + podResource := []unversioned.GroupVersionResource{{Version: "v1", Resource: "pods"}} + gc, err := NewGarbageCollector(clientPool, podResource) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, 1, len(gc.monitors)) +} + +// fakeAction records information about requests to aid in testing. +type fakeAction struct { + method string + path string +} + +// String returns method=path to aid in testing +func (f *fakeAction) String() string { + return strings.Join([]string{f.method, f.path}, "=") +} + +type FakeResponse struct { + statusCode int + content []byte +} + +// fakeActionHandler holds a list of fakeActions received +type fakeActionHandler struct { + // statusCode and content returned by this handler for different method + path. + response map[string]FakeResponse + + lock sync.Mutex + actions []fakeAction +} + +// ServeHTTP logs the action that occurred and always returns the associated status code +func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) { + f.lock.Lock() + defer f.lock.Unlock() + + f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path}) + fakeResponse, ok := f.response[request.Method+request.URL.Path] + if !ok { + fakeResponse.statusCode = 200 + fakeResponse.content = []byte("{\"kind\": \"List\"}") + } + response.WriteHeader(fakeResponse.statusCode) + response.Write(fakeResponse.content) +} + +// testServerAndClientConfig returns a server that listens and a config that can reference it +func testServerAndClientConfig(handler func(http.ResponseWriter, *http.Request)) (*httptest.Server, *restclient.Config) { + srv := httptest.NewServer(http.HandlerFunc(handler)) + config := &restclient.Config{ + Host: srv.URL, + } + return srv, config +} + +func newDanglingPod() *v1.Pod { + return &v1.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "ToBeDeletedPod", + Namespace: "ns1", + OwnerReferences: []v1.OwnerReference{ + { + Kind: "ReplicationController", + Name: "owner1", + UID: "123", + APIVersion: "v1", + }, + }, + }, + } +} + +// test the processItem function making the expected actions. +func TestProcessItem(t *testing.T) { + pod := newDanglingPod() + podBytes, err := json.Marshal(pod) + if err != nil { + t.Fatal(err) + } + testHandler := &fakeActionHandler{ + response: map[string]FakeResponse{ + "GET" + "/api/v1/namespaces/ns1/replicationcontrollers/owner1": { + 404, + []byte{}, + }, + "GET" + "/api/v1/namespaces/ns1/pods/ToBeDeletedPod": { + 200, + podBytes, + }, + }, + } + podResource := []unversioned.GroupVersionResource{{Version: "v1", Resource: "pods"}} + srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP) + defer srv.Close() + clientPool := dynamic.NewClientPool(clientConfig, dynamic.LegacyAPIPathResolverFunc) + gc, err := NewGarbageCollector(clientPool, podResource) + if err != nil { + t.Fatal(err) + } + item := &node{ + identity: objectReference{ + OwnerReference: metatypes.OwnerReference{ + Kind: pod.Kind, + APIVersion: pod.APIVersion, + Name: pod.Name, + UID: pod.UID, + }, + Namespace: pod.Namespace, + }, + // owners are intentionally left empty. The processItem routine should get the latest item from the server. + owners: nil, + } + err = gc.processItem(item) + if err != nil { + t.Errorf("Unexpected Error: %v", err) + } + expectedActionSet := sets.NewString() + expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/replicationcontrollers/owner1") + expectedActionSet.Insert("DELETE=/api/v1/namespaces/ns1/pods/ToBeDeletedPod") + expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/pods/ToBeDeletedPod") + + actualActionSet := sets.NewString() + for _, action := range testHandler.actions { + actualActionSet.Insert(action.String()) + } + if !expectedActionSet.Equal(actualActionSet) { + t.Errorf("expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, + actualActionSet, expectedActionSet.Difference(actualActionSet)) + } +} + +// verifyGraphInvariants verifies that all of a node's owners list the node as a +// dependent and vice versa. uidToNode has all the nodes in the graph. +func verifyGraphInvariants(scenario string, uidToNode map[types.UID]*node, t *testing.T) { + for myUID, node := range uidToNode { + for dependentNode := range node.dependents { + found := false + for _, owner := range dependentNode.owners { + if owner.UID == myUID { + found = true + break + } + } + if !found { + t.Errorf("scenario: %s: node %s has node %s as a dependent, but it's not present in the latter node's owners list", scenario, node.identity, dependentNode.identity) + } + } + + for _, owner := range node.owners { + ownerNode, ok := uidToNode[owner.UID] + if !ok { + // It's possible that the owner node doesn't exist + continue + } + if _, ok := ownerNode.dependents[node]; !ok { + t.Errorf("node %s has node %s as an owner, but it's not present in the latter node's dependents list", node.identity, ownerNode.identity) + } + } + } +} + +func createEvent(eventType eventType, selfUID string, owners []string) event { + var ownerReferences []api.OwnerReference + for i := 0; i < len(owners); i++ { + ownerReferences = append(ownerReferences, api.OwnerReference{UID: types.UID(owners[i])}) + } + return event{ + eventType: eventType, + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: types.UID(selfUID), + OwnerReferences: ownerReferences, + }, + }, + } +} + +func TestProcessEvent(t *testing.T) { + var testScenarios = []struct { + name string + // a series of events that will be supplied to the + // Propagator.eventQueue. + events []event + }{ + { + name: "test1", + events: []event{ + createEvent(addEvent, "1", []string{}), + createEvent(addEvent, "2", []string{"1"}), + createEvent(addEvent, "3", []string{"1", "2"}), + }, + }, + { + name: "test2", + events: []event{ + createEvent(addEvent, "1", []string{}), + createEvent(addEvent, "2", []string{"1"}), + createEvent(addEvent, "3", []string{"1", "2"}), + createEvent(addEvent, "4", []string{"2"}), + createEvent(deleteEvent, "2", []string{"doesn't matter"}), + }, + }, + { + name: "test3", + events: []event{ + createEvent(addEvent, "1", []string{}), + createEvent(addEvent, "2", []string{"1"}), + createEvent(addEvent, "3", []string{"1", "2"}), + createEvent(addEvent, "4", []string{"3"}), + createEvent(updateEvent, "2", []string{"4"}), + }, + }, + { + name: "reverse test2", + events: []event{ + createEvent(addEvent, "4", []string{"2"}), + createEvent(addEvent, "3", []string{"1", "2"}), + createEvent(addEvent, "2", []string{"1"}), + createEvent(addEvent, "1", []string{}), + createEvent(deleteEvent, "2", []string{"doesn't matter"}), + }, + }, + } + + for _, scenario := range testScenarios { + propagator := &Propagator{ + eventQueue: workqueue.New(), + uidToNode: &concurrentUIDToNode{ + RWMutex: &sync.RWMutex{}, + uidToNode: make(map[types.UID]*node), + }, + gc: &GarbageCollector{ + dirtyQueue: workqueue.New(), + }, + } + for i := 0; i < len(scenario.events); i++ { + propagator.eventQueue.Add(scenario.events[i]) + propagator.processEvent() + verifyGraphInvariants(scenario.name, propagator.uidToNode.uidToNode, t) + } + } +} + +// TestDependentsRace relies on golang's data race detector to check if there is +// data race among in the dependents field. +func TestDependentsRace(t *testing.T) { + clientPool := dynamic.NewClientPool(&restclient.Config{}, dynamic.LegacyAPIPathResolverFunc) + podResource := []unversioned.GroupVersionResource{{Version: "v1", Resource: "pods"}} + gc, err := NewGarbageCollector(clientPool, podResource) + if err != nil { + t.Fatal(err) + } + + const updates = 100 + owner := &node{dependentsLock: &sync.RWMutex{}, dependents: make(map[*node]struct{})} + ownerUID := types.UID("owner") + gc.propagator.uidToNode.Write(owner) + go func() { + for i := 0; i < updates; i++ { + dependent := &node{} + gc.propagator.addDependentToOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}}) + gc.propagator.removeDependentFromOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}}) + } + }() + go func() { + gc.orphanQueue.Add(owner) + for i := 0; i < updates; i++ { + gc.orphanFinalizer() + } + }() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/gc/gc_controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/gc/gc_controller.go index bf09ae928b25..485c326c177f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/gc/gc_controller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/gc/gc_controller.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/metrics" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/watch" @@ -49,6 +50,9 @@ type GCController struct { } func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController { + if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) + } gcc := &GCController{ kubeClient: kubeClient, threshold: threshold, @@ -59,7 +63,7 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown)) - gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer( + gcc.podStore.Indexer, gcc.podStoreSyncer = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.FieldSelector = terminatedSelector @@ -73,6 +77,10 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun &api.Pod{}, resyncPeriod(), framework.ResourceEventHandlerFuncs{}, + // We don't need to build a index for podStore here actually, but build one for consistency. + // It will ensure that if people start making use of the podStore in more specific ways, + // they'll get the benefits they expect. It will also reserve the name for future refactorings. + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) return gcc } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/gc/gc_controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/gc/gc_controller_test.go new file mode 100644 index 000000000000..e28c9cf03fad --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/gc/gc_controller_test.go @@ -0,0 +1,104 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gc + +import ( + "sync" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/util/sets" +) + +func TestGC(t *testing.T) { + type nameToPhase struct { + name string + phase api.PodPhase + } + + testCases := []struct { + pods []nameToPhase + threshold int + deletedPodNames sets.String + }{ + { + pods: []nameToPhase{ + {name: "a", phase: api.PodFailed}, + {name: "b", phase: api.PodSucceeded}, + }, + threshold: 0, + deletedPodNames: sets.NewString("a", "b"), + }, + { + pods: []nameToPhase{ + {name: "a", phase: api.PodFailed}, + {name: "b", phase: api.PodSucceeded}, + }, + threshold: 1, + deletedPodNames: sets.NewString("a"), + }, + { + pods: []nameToPhase{ + {name: "a", phase: api.PodFailed}, + {name: "b", phase: api.PodSucceeded}, + }, + threshold: 5, + deletedPodNames: sets.NewString(), + }, + } + + for i, test := range testCases { + client := fake.NewSimpleClientset() + gcc := New(client, controller.NoResyncPeriodFunc, test.threshold) + deletedPodNames := make([]string, 0) + var lock sync.Mutex + gcc.deletePod = func(_, name string) error { + lock.Lock() + defer lock.Unlock() + deletedPodNames = append(deletedPodNames, name) + return nil + } + + creationTime := time.Unix(0, 0) + for _, pod := range test.pods { + creationTime = creationTime.Add(1 * time.Hour) + gcc.podStore.Indexer.Add(&api.Pod{ + ObjectMeta: api.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime}}, + Status: api.PodStatus{Phase: pod.phase}, + }) + } + + gcc.gc() + + pass := true + for _, pod := range deletedPodNames { + if !test.deletedPodNames.Has(pod) { + pass = false + } + } + if len(deletedPodNames) != len(test.deletedPodNames) { + pass = false + } + if !pass { + t.Errorf("[%v]pod's deleted expected and actual did not match.\n\texpected: %v\n\tactual: %v", i, test.deletedPodNames, deletedPodNames) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/job/controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/job/controller.go index 729862aae737..964a4ec14d6e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/job/controller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/job/controller.go @@ -25,15 +25,17 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/client/record" - unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/controller/framework/informers" replicationcontroller "k8s.io/kubernetes/pkg/controller/replication" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/metrics" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/workqueue" @@ -44,8 +46,15 @@ type JobController struct { kubeClient clientset.Interface podControl controller.PodControlInterface + // internalPodInformer is used to hold a personal informer. If we're using + // a normal shared informer, then the informer will be started for us. If + // we have a personal informer, we must start it ourselves. If you start + // the controller using NewJobController(passing SharedInformer), this + // will be null + internalPodInformer framework.SharedInformer + // To allow injection of updateJobStatus for testing. - updateHandler func(job *extensions.Job) error + updateHandler func(job *batch.Job) error syncHandler func(jobKey string) error // podStoreSynced returns true if the pod store has been synced at least once. // Added as a member to the struct to allow injection for testing. @@ -61,8 +70,6 @@ type JobController struct { // A store of pods, populated by the podController podStore cache.StoreToPodLister - // Watches changes to all pods - podController *framework.Controller // Jobs that need to be updated queue *workqueue.Type @@ -70,11 +77,15 @@ type JobController struct { recorder record.EventRecorder } -func NewJobController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) *JobController { +func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface) *JobController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. - eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")}) + eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) + + if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) + } jm := &JobController{ kubeClient: kubeClient, @@ -90,19 +101,19 @@ func NewJobController(kubeClient clientset.Interface, resyncPeriod controller.Re jm.jobStore.Store, jm.jobController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return jm.kubeClient.Extensions().Jobs(api.NamespaceAll).List(options) + return jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return jm.kubeClient.Extensions().Jobs(api.NamespaceAll).Watch(options) + return jm.kubeClient.Batch().Jobs(api.NamespaceAll).Watch(options) }, }, - &extensions.Job{}, + &batch.Job{}, // TODO: Can we have much longer period here? replicationcontroller.FullControllerResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: jm.enqueueController, UpdateFunc: func(old, cur interface{}) { - if job := cur.(*extensions.Job); !isJobFinished(job) { + if job := cur.(*batch.Job); !isJobFinished(job) { jm.enqueueController(job) } }, @@ -110,27 +121,24 @@ func NewJobController(kubeClient clientset.Interface, resyncPeriod controller.Re }, ) - jm.podStore.Store, jm.podController = framework.NewInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return jm.kubeClient.Core().Pods(api.NamespaceAll).List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return jm.kubeClient.Core().Pods(api.NamespaceAll).Watch(options) - }, - }, - &api.Pod{}, - resyncPeriod(), - framework.ResourceEventHandlerFuncs{ - AddFunc: jm.addPod, - UpdateFunc: jm.updatePod, - DeleteFunc: jm.deletePod, - }, - ) + podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ + AddFunc: jm.addPod, + UpdateFunc: jm.updatePod, + DeleteFunc: jm.deletePod, + }) + jm.podStore.Indexer = podInformer.GetIndexer() + jm.podStoreSynced = podInformer.HasSynced jm.updateHandler = jm.updateJobStatus jm.syncHandler = jm.syncJob - jm.podStoreSynced = jm.podController.HasSynced + return jm +} + +func NewJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) *JobController { + podInformer := informers.CreateSharedPodIndexInformer(kubeClient, resyncPeriod()) + jm := NewJobController(podInformer, kubeClient) + jm.internalPodInformer = podInformer + return jm } @@ -138,17 +146,21 @@ func NewJobController(kubeClient clientset.Interface, resyncPeriod controller.Re func (jm *JobController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() go jm.jobController.Run(stopCh) - go jm.podController.Run(stopCh) for i := 0; i < workers; i++ { go wait.Until(jm.worker, time.Second, stopCh) } + + if jm.internalPodInformer != nil { + go jm.internalPodInformer.Run(stopCh) + } + <-stopCh glog.Infof("Shutting down Job Manager") jm.queue.ShutDown() } // getPodJob returns the job managing the given pod. -func (jm *JobController) getPodJob(pod *api.Pod) *extensions.Job { +func (jm *JobController) getPodJob(pod *api.Pod) *batch.Job { jobs, err := jm.jobStore.GetPodJobs(pod) if err != nil { glog.V(4).Infof("No jobs found for pod %v, job controller will avoid syncing", pod.Name) @@ -244,7 +256,7 @@ func (jm *JobController) deletePod(obj interface{}) { } } -// obj could be an *extensions.Job, or a DeletionFinalStateUnknown marker item. +// obj could be an *batch.Job, or a DeletionFinalStateUnknown marker item. func (jm *JobController) enqueueController(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { @@ -307,7 +319,7 @@ func (jm *JobController) syncJob(key string) error { jm.queue.Add(key) return err } - job := *obj.(*extensions.Job) + job := *obj.(*batch.Job) // Check the expectations of the job before counting active pods, otherwise a new pod can sneak in // and update the expectations after we've retrieved active pods from the store. If a new pod enters @@ -327,7 +339,7 @@ func (jm *JobController) syncJob(key string) error { } activePods := controller.FilterActivePods(podList.Items) - active := len(activePods) + active := int32(len(activePods)) succeeded, failed := getStatus(podList.Items) conditions := len(job.Status.Conditions) if job.Status.StartTime == nil { @@ -346,9 +358,9 @@ func (jm *JobController) syncJob(key string) error { // some sort of solution to above problem. // kill remaining active pods wait := sync.WaitGroup{} - wait.Add(active) - for i := 0; i < active; i++ { - go func(ix int) { + wait.Add(int(active)) + for i := int32(0); i < active; i++ { + go func(ix int32) { defer wait.Done() if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name, &job); err != nil { defer utilruntime.HandleError(err) @@ -359,7 +371,7 @@ func (jm *JobController) syncJob(key string) error { // update status values accordingly failed += active active = 0 - job.Status.Conditions = append(job.Status.Conditions, newCondition(extensions.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline")) + job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline")) jm.recorder.Event(&job, api.EventTypeNormal, "DeadlineExceeded", "Job was active longer than specified deadline") } else { if jobNeedsSync { @@ -393,7 +405,7 @@ func (jm *JobController) syncJob(key string) error { } } if complete { - job.Status.Conditions = append(job.Status.Conditions, newCondition(extensions.JobComplete, "", "")) + job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, "", "")) now := unversioned.Now() job.Status.CompletionTime = &now } @@ -414,7 +426,7 @@ func (jm *JobController) syncJob(key string) error { } // pastActiveDeadline checks if job has ActiveDeadlineSeconds field set and if it is exceeded. -func pastActiveDeadline(job *extensions.Job) bool { +func pastActiveDeadline(job *batch.Job) bool { if job.Spec.ActiveDeadlineSeconds == nil || job.Status.StartTime == nil { return false } @@ -425,8 +437,8 @@ func pastActiveDeadline(job *extensions.Job) bool { return duration >= allowedDuration } -func newCondition(conditionType extensions.JobConditionType, reason, message string) extensions.JobCondition { - return extensions.JobCondition{ +func newCondition(conditionType batch.JobConditionType, reason, message string) batch.JobCondition { + return batch.JobCondition{ Type: conditionType, Status: api.ConditionTrue, LastProbeTime: unversioned.Now(), @@ -437,17 +449,17 @@ func newCondition(conditionType extensions.JobConditionType, reason, message str } // getStatus returns no of succeeded and failed pods running a job -func getStatus(pods []api.Pod) (succeeded, failed int) { - succeeded = filterPods(pods, api.PodSucceeded) - failed = filterPods(pods, api.PodFailed) +func getStatus(pods []api.Pod) (succeeded, failed int32) { + succeeded = int32(filterPods(pods, api.PodSucceeded)) + failed = int32(filterPods(pods, api.PodFailed)) return } // manageJob is the core method responsible for managing the number of running // pods according to what is specified in the job.Spec. -func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *extensions.Job) int { +func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int32, job *batch.Job) int32 { var activeLock sync.Mutex - active := len(activePods) + active := int32(len(activePods)) parallelism := *job.Spec.Parallelism jobKey, err := controller.KeyFunc(job) if err != nil { @@ -457,7 +469,7 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ex if active > parallelism { diff := active - parallelism - jm.expectations.ExpectDeletions(jobKey, diff) + jm.expectations.ExpectDeletions(jobKey, int(diff)) glog.V(4).Infof("Too many pods running job %q, need %d, deleting %d", jobKey, parallelism, diff) // Sort the pods in the order such that not-ready < ready, unscheduled // < scheduled, and pending < running. This ensures that we delete pods @@ -466,9 +478,9 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ex active -= diff wait := sync.WaitGroup{} - wait.Add(diff) - for i := 0; i < diff; i++ { - go func(ix int) { + wait.Add(int(diff)) + for i := int32(0); i < diff; i++ { + go func(ix int32) { defer wait.Done() if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name, job); err != nil { defer utilruntime.HandleError(err) @@ -483,7 +495,7 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ex wait.Wait() } else if active < parallelism { - wantActive := 0 + wantActive := int32(0) if job.Spec.Completions == nil { // Job does not specify a number of completions. Therefore, number active // should be equal to parallelism, unless the job has seen at least @@ -506,13 +518,13 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ex glog.Errorf("More active than wanted: job %q, want %d, have %d", jobKey, wantActive, active) diff = 0 } - jm.expectations.ExpectCreations(jobKey, diff) + jm.expectations.ExpectCreations(jobKey, int(diff)) glog.V(4).Infof("Too few pods running job %q, need %d, creating %d", jobKey, wantActive, diff) active += diff wait := sync.WaitGroup{} - wait.Add(diff) - for i := 0; i < diff; i++ { + wait.Add(int(diff)) + for i := int32(0); i < diff; i++ { go func() { defer wait.Done() if err := jm.podControl.CreatePods(job.Namespace, &job.Spec.Template, job); err != nil { @@ -531,8 +543,8 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ex return active } -func (jm *JobController) updateJobStatus(job *extensions.Job) error { - _, err := jm.kubeClient.Extensions().Jobs(job.Namespace).UpdateStatus(job) +func (jm *JobController) updateJobStatus(job *batch.Job) error { + _, err := jm.kubeClient.Batch().Jobs(job.Namespace).UpdateStatus(job) return err } @@ -547,9 +559,9 @@ func filterPods(pods []api.Pod, phase api.PodPhase) int { return result } -func isJobFinished(j *extensions.Job) bool { +func isJobFinished(j *batch.Job) bool { for _, c := range j.Status.Conditions { - if (c.Type == extensions.JobComplete || c.Type == extensions.JobFailed) && c.Status == api.ConditionTrue { + if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == api.ConditionTrue { return true } } @@ -557,7 +569,7 @@ func isJobFinished(j *extensions.Job) bool { } // byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker. -type byCreationTimestamp []extensions.Job +type byCreationTimestamp []batch.Job func (o byCreationTimestamp) Len() int { return len(o) } func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/job/controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/job/controller_test.go new file mode 100644 index 000000000000..95f0432b682f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/job/controller_test.go @@ -0,0 +1,711 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package job + +import ( + "fmt" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/batch" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/unversioned/testclient" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/util/rand" + "k8s.io/kubernetes/pkg/watch" +) + +var alwaysReady = func() bool { return true } + +func newJob(parallelism, completions int32) *batch.Job { + j := &batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "foobar", + Namespace: api.NamespaceDefault, + }, + Spec: batch.JobSpec{ + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Image: "foo/bar"}, + }, + }, + }, + }, + } + // Special case: -1 for either completions or parallelism means leave nil (negative is not allowed + // in practice by validation. + if completions >= 0 { + j.Spec.Completions = &completions + } else { + j.Spec.Completions = nil + } + if parallelism >= 0 { + j.Spec.Parallelism = ¶llelism + } else { + j.Spec.Parallelism = nil + } + return j +} + +func getKey(job *batch.Job, t *testing.T) string { + if key, err := controller.KeyFunc(job); err != nil { + t.Errorf("Unexpected error getting key for job %v: %v", job.Name, err) + return "" + } else { + return key + } +} + +// create count pods with the given phase for the given job +func newPodList(count int32, status api.PodPhase, job *batch.Job) []api.Pod { + pods := []api.Pod{} + for i := int32(0); i < count; i++ { + newPod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("pod-%v", rand.String(10)), + Labels: job.Spec.Selector.MatchLabels, + Namespace: job.Namespace, + }, + Status: api.PodStatus{Phase: status}, + } + pods = append(pods, newPod) + } + return pods +} + +func TestControllerSyncJob(t *testing.T) { + testCases := map[string]struct { + // job setup + parallelism int32 + completions int32 + + // pod setup + podControllerError error + activePods int32 + succeededPods int32 + failedPods int32 + + // expectations + expectedCreations int32 + expectedDeletions int32 + expectedActive int32 + expectedSucceeded int32 + expectedFailed int32 + expectedComplete bool + }{ + "job start": { + 2, 5, + nil, 0, 0, 0, + 2, 0, 2, 0, 0, false, + }, + "WQ job start": { + 2, -1, + nil, 0, 0, 0, + 2, 0, 2, 0, 0, false, + }, + "correct # of pods": { + 2, 5, + nil, 2, 0, 0, + 0, 0, 2, 0, 0, false, + }, + "WQ job: correct # of pods": { + 2, -1, + nil, 2, 0, 0, + 0, 0, 2, 0, 0, false, + }, + "too few active pods": { + 2, 5, + nil, 1, 1, 0, + 1, 0, 2, 1, 0, false, + }, + "too few active pods with a dynamic job": { + 2, -1, + nil, 1, 0, 0, + 1, 0, 2, 0, 0, false, + }, + "too few active pods, with controller error": { + 2, 5, + fmt.Errorf("Fake error"), 1, 1, 0, + 0, 0, 1, 1, 0, false, + }, + "too many active pods": { + 2, 5, + nil, 3, 0, 0, + 0, 1, 2, 0, 0, false, + }, + "too many active pods, with controller error": { + 2, 5, + fmt.Errorf("Fake error"), 3, 0, 0, + 0, 0, 3, 0, 0, false, + }, + "failed pod": { + 2, 5, + nil, 1, 1, 1, + 1, 0, 2, 1, 1, false, + }, + "job finish": { + 2, 5, + nil, 0, 5, 0, + 0, 0, 0, 5, 0, true, + }, + "WQ job finishing": { + 2, -1, + nil, 1, 1, 0, + 0, 0, 1, 1, 0, false, + }, + "WQ job all finished": { + 2, -1, + nil, 0, 2, 0, + 0, 0, 0, 2, 0, true, + }, + "WQ job all finished despite one failure": { + 2, -1, + nil, 0, 1, 1, + 0, 0, 0, 1, 1, true, + }, + "more active pods than completions": { + 2, 5, + nil, 10, 0, 0, + 0, 8, 2, 0, 0, false, + }, + "status change": { + 2, 5, + nil, 2, 2, 0, + 0, 0, 2, 2, 0, false, + }, + } + + for name, tc := range testCases { + // job manager setup + clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + fakePodControl := controller.FakePodControl{Err: tc.podControllerError} + manager.podControl = &fakePodControl + manager.podStoreSynced = alwaysReady + var actual *batch.Job + manager.updateHandler = func(job *batch.Job) error { + actual = job + return nil + } + + // job & pods setup + job := newJob(tc.parallelism, tc.completions) + manager.jobStore.Store.Add(job) + for _, pod := range newPodList(tc.activePods, api.PodRunning, job) { + manager.podStore.Indexer.Add(&pod) + } + for _, pod := range newPodList(tc.succeededPods, api.PodSucceeded, job) { + manager.podStore.Indexer.Add(&pod) + } + for _, pod := range newPodList(tc.failedPods, api.PodFailed, job) { + manager.podStore.Indexer.Add(&pod) + } + + // run + err := manager.syncJob(getKey(job, t)) + if err != nil { + t.Errorf("%s: unexpected error when syncing jobs %v", name, err) + } + + // validate created/deleted pods + if int32(len(fakePodControl.Templates)) != tc.expectedCreations { + t.Errorf("%s: unexpected number of creates. Expected %d, saw %d\n", name, tc.expectedCreations, len(fakePodControl.Templates)) + } + if int32(len(fakePodControl.DeletePodName)) != tc.expectedDeletions { + t.Errorf("%s: unexpected number of deletes. Expected %d, saw %d\n", name, tc.expectedDeletions, len(fakePodControl.DeletePodName)) + } + // validate status + if actual.Status.Active != tc.expectedActive { + t.Errorf("%s: unexpected number of active pods. Expected %d, saw %d\n", name, tc.expectedActive, actual.Status.Active) + } + if actual.Status.Succeeded != tc.expectedSucceeded { + t.Errorf("%s: unexpected number of succeeded pods. Expected %d, saw %d\n", name, tc.expectedSucceeded, actual.Status.Succeeded) + } + if actual.Status.Failed != tc.expectedFailed { + t.Errorf("%s: unexpected number of failed pods. Expected %d, saw %d\n", name, tc.expectedFailed, actual.Status.Failed) + } + if actual.Status.StartTime == nil { + t.Errorf("%s: .status.startTime was not set", name) + } + // validate conditions + if tc.expectedComplete && !getCondition(actual, batch.JobComplete) { + t.Errorf("%s: expected completion condition. Got %#v", name, actual.Status.Conditions) + } + } +} + +func TestSyncJobPastDeadline(t *testing.T) { + testCases := map[string]struct { + // job setup + parallelism int32 + completions int32 + activeDeadlineSeconds int64 + startTime int64 + + // pod setup + activePods int32 + succeededPods int32 + failedPods int32 + + // expectations + expectedDeletions int32 + expectedActive int32 + expectedSucceeded int32 + expectedFailed int32 + }{ + "activeDeadlineSeconds less than single pod execution": { + 1, 1, 10, 15, + 1, 0, 0, + 1, 0, 0, 1, + }, + "activeDeadlineSeconds bigger than single pod execution": { + 1, 2, 10, 15, + 1, 1, 0, + 1, 0, 1, 1, + }, + "activeDeadlineSeconds times-out before any pod starts": { + 1, 1, 10, 10, + 0, 0, 0, + 0, 0, 0, 0, + }, + } + + for name, tc := range testCases { + // job manager setup + clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + manager.podStoreSynced = alwaysReady + var actual *batch.Job + manager.updateHandler = func(job *batch.Job) error { + actual = job + return nil + } + + // job & pods setup + job := newJob(tc.parallelism, tc.completions) + job.Spec.ActiveDeadlineSeconds = &tc.activeDeadlineSeconds + start := unversioned.Unix(unversioned.Now().Time.Unix()-tc.startTime, 0) + job.Status.StartTime = &start + manager.jobStore.Store.Add(job) + for _, pod := range newPodList(tc.activePods, api.PodRunning, job) { + manager.podStore.Indexer.Add(&pod) + } + for _, pod := range newPodList(tc.succeededPods, api.PodSucceeded, job) { + manager.podStore.Indexer.Add(&pod) + } + for _, pod := range newPodList(tc.failedPods, api.PodFailed, job) { + manager.podStore.Indexer.Add(&pod) + } + + // run + err := manager.syncJob(getKey(job, t)) + if err != nil { + t.Errorf("%s: unexpected error when syncing jobs %v", name, err) + } + + // validate created/deleted pods + if int32(len(fakePodControl.Templates)) != 0 { + t.Errorf("%s: unexpected number of creates. Expected 0, saw %d\n", name, len(fakePodControl.Templates)) + } + if int32(len(fakePodControl.DeletePodName)) != tc.expectedDeletions { + t.Errorf("%s: unexpected number of deletes. Expected %d, saw %d\n", name, tc.expectedDeletions, len(fakePodControl.DeletePodName)) + } + // validate status + if actual.Status.Active != tc.expectedActive { + t.Errorf("%s: unexpected number of active pods. Expected %d, saw %d\n", name, tc.expectedActive, actual.Status.Active) + } + if actual.Status.Succeeded != tc.expectedSucceeded { + t.Errorf("%s: unexpected number of succeeded pods. Expected %d, saw %d\n", name, tc.expectedSucceeded, actual.Status.Succeeded) + } + if actual.Status.Failed != tc.expectedFailed { + t.Errorf("%s: unexpected number of failed pods. Expected %d, saw %d\n", name, tc.expectedFailed, actual.Status.Failed) + } + if actual.Status.StartTime == nil { + t.Errorf("%s: .status.startTime was not set", name) + } + // validate conditions + if !getCondition(actual, batch.JobFailed) { + t.Errorf("%s: expected fail condition. Got %#v", name, actual.Status.Conditions) + } + } +} + +func getCondition(job *batch.Job, condition batch.JobConditionType) bool { + for _, v := range job.Status.Conditions { + if v.Type == condition && v.Status == api.ConditionTrue { + return true + } + } + return false +} + +func TestSyncPastDeadlineJobFinished(t *testing.T) { + clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + manager.podStoreSynced = alwaysReady + var actual *batch.Job + manager.updateHandler = func(job *batch.Job) error { + actual = job + return nil + } + + job := newJob(1, 1) + activeDeadlineSeconds := int64(10) + job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds + start := unversioned.Unix(unversioned.Now().Time.Unix()-15, 0) + job.Status.StartTime = &start + job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline")) + manager.jobStore.Store.Add(job) + err := manager.syncJob(getKey(job, t)) + if err != nil { + t.Errorf("Unexpected error when syncing jobs %v", err) + } + if len(fakePodControl.Templates) != 0 { + t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates)) + } + if len(fakePodControl.DeletePodName) != 0 { + t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName)) + } + if actual != nil { + t.Error("Unexpected job modification") + } +} + +func TestSyncJobComplete(t *testing.T) { + clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + manager.podStoreSynced = alwaysReady + + job := newJob(1, 1) + job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, "", "")) + manager.jobStore.Store.Add(job) + err := manager.syncJob(getKey(job, t)) + if err != nil { + t.Fatalf("Unexpected error when syncing jobs %v", err) + } + uncastJob, _, err := manager.jobStore.Store.Get(job) + if err != nil { + t.Fatalf("Unexpected error when trying to get job from the store: %v", err) + } + actual := uncastJob.(*batch.Job) + // Verify that after syncing a complete job, the conditions are the same. + if got, expected := len(actual.Status.Conditions), 1; got != expected { + t.Fatalf("Unexpected job status conditions amount; expected %d, got %d", expected, got) + } +} + +func TestSyncJobDeleted(t *testing.T) { + clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + manager.podStoreSynced = alwaysReady + manager.updateHandler = func(job *batch.Job) error { return nil } + job := newJob(2, 2) + err := manager.syncJob(getKey(job, t)) + if err != nil { + t.Errorf("Unexpected error when syncing jobs %v", err) + } + if len(fakePodControl.Templates) != 0 { + t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates)) + } + if len(fakePodControl.DeletePodName) != 0 { + t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName)) + } +} + +func TestSyncJobUpdateRequeue(t *testing.T) { + clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + manager.podStoreSynced = alwaysReady + manager.updateHandler = func(job *batch.Job) error { return fmt.Errorf("Fake error") } + job := newJob(2, 2) + manager.jobStore.Store.Add(job) + err := manager.syncJob(getKey(job, t)) + if err != nil { + t.Errorf("Unxpected error when syncing jobs, got %v", err) + } + t.Log("Waiting for a job in the queue") + key, _ := manager.queue.Get() + expectedKey := getKey(job, t) + if key != expectedKey { + t.Errorf("Expected requeue of job with key %s got %s", expectedKey, key) + } +} + +func TestJobPodLookup(t *testing.T) { + clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager.podStoreSynced = alwaysReady + testCases := []struct { + job *batch.Job + pod *api.Pod + + expectedName string + }{ + // pods without labels don't match any job + { + job: &batch.Job{ + ObjectMeta: api.ObjectMeta{Name: "basic"}, + }, + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}, + }, + expectedName: "", + }, + // matching labels, different namespace + { + job: &batch.Job{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: batch.JobSpec{ + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + }, + }, + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo2", + Namespace: "ns", + Labels: map[string]string{"foo": "bar"}, + }, + }, + expectedName: "", + }, + // matching ns and labels returns + { + job: &batch.Job{ + ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, + Spec: batch.JobSpec{ + Selector: &unversioned.LabelSelector{ + MatchExpressions: []unversioned.LabelSelectorRequirement{ + { + Key: "foo", + Operator: unversioned.LabelSelectorOpIn, + Values: []string{"bar"}, + }, + }, + }, + }, + }, + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo3", + Namespace: "ns", + Labels: map[string]string{"foo": "bar"}, + }, + }, + expectedName: "bar", + }, + } + for _, tc := range testCases { + manager.jobStore.Add(tc.job) + if job := manager.getPodJob(tc.pod); job != nil { + if tc.expectedName != job.Name { + t.Errorf("Got job %+v expected %+v", job.Name, tc.expectedName) + } + } else if tc.expectedName != "" { + t.Errorf("Expected a job %v pod %v, found none", tc.expectedName, tc.pod.Name) + } + } +} + +type FakeJobExpectations struct { + *controller.ControllerExpectations + satisfied bool + expSatisfied func() +} + +func (fe FakeJobExpectations) SatisfiedExpectations(controllerKey string) bool { + fe.expSatisfied() + return fe.satisfied +} + +// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods +// and checking expectations. +func TestSyncJobExpectations(t *testing.T) { + clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + manager.podStoreSynced = alwaysReady + manager.updateHandler = func(job *batch.Job) error { return nil } + + job := newJob(2, 2) + manager.jobStore.Store.Add(job) + pods := newPodList(2, api.PodPending, job) + manager.podStore.Indexer.Add(&pods[0]) + + manager.expectations = FakeJobExpectations{ + controller.NewControllerExpectations(), true, func() { + // If we check active pods before checking expectataions, the job + // will create a new replica because it doesn't see this pod, but + // has fulfilled its expectations. + manager.podStore.Indexer.Add(&pods[1]) + }, + } + manager.syncJob(getKey(job, t)) + if len(fakePodControl.Templates) != 0 { + t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates)) + } + if len(fakePodControl.DeletePodName) != 0 { + t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName)) + } +} + +type FakeWatcher struct { + w *watch.FakeWatcher + *testclient.Fake +} + +func TestWatchJobs(t *testing.T) { + clientset := fake.NewSimpleClientset() + fakeWatch := watch.NewFake() + clientset.PrependWatchReactor("jobs", core.DefaultWatchReactor(fakeWatch, nil)) + manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager.podStoreSynced = alwaysReady + + var testJob batch.Job + received := make(chan struct{}) + + // The update sent through the fakeWatcher should make its way into the workqueue, + // and eventually into the syncHandler. + manager.syncHandler = func(key string) error { + + obj, exists, err := manager.jobStore.Store.GetByKey(key) + if !exists || err != nil { + t.Errorf("Expected to find job under key %v", key) + } + job, ok := obj.(*batch.Job) + if !ok { + t.Fatalf("unexpected type: %v %#v", reflect.TypeOf(obj), obj) + } + if !api.Semantic.DeepDerivative(*job, testJob) { + t.Errorf("Expected %#v, but got %#v", testJob, *job) + } + close(received) + return nil + } + // Start only the job watcher and the workqueue, send a watch event, + // and make sure it hits the sync method. + stopCh := make(chan struct{}) + defer close(stopCh) + go manager.Run(1, stopCh) + + // We're sending new job to see if it reaches syncHandler. + testJob.Name = "foo" + fakeWatch.Add(&testJob) + t.Log("Waiting for job to reach syncHandler") + <-received +} + +func TestIsJobFinished(t *testing.T) { + job := &batch.Job{ + Status: batch.JobStatus{ + Conditions: []batch.JobCondition{{ + Type: batch.JobComplete, + Status: api.ConditionTrue, + }}, + }, + } + + if !isJobFinished(job) { + t.Error("Job was expected to be finished") + } + + job.Status.Conditions[0].Status = api.ConditionFalse + if isJobFinished(job) { + t.Error("Job was not expected to be finished") + } + + job.Status.Conditions[0].Status = api.ConditionUnknown + if isJobFinished(job) { + t.Error("Job was not expected to be finished") + } +} + +func TestWatchPods(t *testing.T) { + testJob := newJob(2, 2) + clientset := fake.NewSimpleClientset(testJob) + fakeWatch := watch.NewFake() + clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil)) + manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager.podStoreSynced = alwaysReady + + // Put one job and one pod into the store + manager.jobStore.Store.Add(testJob) + received := make(chan struct{}) + // The pod update sent through the fakeWatcher should figure out the managing job and + // send it into the syncHandler. + manager.syncHandler = func(key string) error { + obj, exists, err := manager.jobStore.Store.GetByKey(key) + if !exists || err != nil { + t.Errorf("Expected to find job under key %v", key) + close(received) + return nil + } + job, ok := obj.(*batch.Job) + if !ok { + t.Errorf("unexpected type: %v %#v", reflect.TypeOf(obj), obj) + close(received) + return nil + } + if !api.Semantic.DeepDerivative(job, testJob) { + t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job) + close(received) + return nil + } + close(received) + return nil + } + // Start only the pod watcher and the workqueue, send a watch event, + // and make sure it hits the sync method for the right job. + stopCh := make(chan struct{}) + defer close(stopCh) + go manager.Run(1, stopCh) + + pods := newPodList(1, api.PodRunning, testJob) + testPod := pods[0] + testPod.Status.Phase = api.PodFailed + fakeWatch.Add(&testPod) + + t.Log("Waiting for pod to reach syncHandler") + <-received +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/lookup_cache.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/lookup_cache.go index 5d82908be01f..3c43e1e3008a 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/lookup_cache.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/lookup_cache.go @@ -72,8 +72,8 @@ func (c *MatchingCache) Add(labelObj objectWithMeta, selectorObj objectWithMeta) // we need check in the external request to ensure the cache data is not dirty. func (c *MatchingCache) GetMatchingObject(labelObj objectWithMeta) (controller interface{}, exists bool) { key := keyFunc(labelObj) - c.mutex.Lock() - defer c.mutex.Unlock() + c.mutex.RLock() + defer c.mutex.RUnlock() return c.cache.Get(key) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller.go index a1c6d4aacac0..0583313ab860 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller.go @@ -27,6 +27,7 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/metrics" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/workqueue" @@ -46,7 +47,7 @@ type NamespaceController struct { // controller that observes the namespaces controller *framework.Controller // namespaces that have been queued up for processing by workers - queue *workqueue.Type + queue workqueue.RateLimitingInterface // list of preferred group versions and their corresponding resource set for namespace deletion groupVersionResources []unversioned.GroupVersionResource // opCache is a cache to remember if a particular operation is not supported to aid dynamic client. @@ -66,12 +67,16 @@ func NewNamespaceController( namespaceController := &NamespaceController{ kubeClient: kubeClient, clientPool: clientPool, - queue: workqueue.New(), + queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), groupVersionResources: groupVersionResources, opCache: operationNotSupportedCache{}, finalizerToken: finalizerToken, } + if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("namespace_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) + } + // configure the backing store/controller store, controller := framework.NewInformer( &cache.ListWatch{ @@ -117,29 +122,40 @@ func (nm *NamespaceController) enqueueNamespace(obj interface{}) { // The system ensures that no two workers can process // the same namespace at the same time. func (nm *NamespaceController) worker() { + workFunc := func() bool { + key, quit := nm.queue.Get() + if quit { + return true + } + defer nm.queue.Done(key) + + err := nm.syncNamespaceFromKey(key.(string)) + if err == nil { + // no error, forget this entry and return + nm.queue.Forget(key) + return false + } + + if estimate, ok := err.(*contentRemainingError); ok { + t := estimate.Estimate/2 + 1 + glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", key, t) + nm.queue.AddAfter(key, time.Duration(t)*time.Second) + + } else { + // rather than wait for a full resync, re-add the namespace to the queue to be processed + nm.queue.AddRateLimited(key) + utilruntime.HandleError(err) + } + return false + + } + for { - func() { - key, quit := nm.queue.Get() - if quit { - return - } - defer nm.queue.Done(key) - if err := nm.syncNamespaceFromKey(key.(string)); err != nil { - if estimate, ok := err.(*contentRemainingError); ok { - go func() { - defer utilruntime.HandleCrash() - t := estimate.Estimate/2 + 1 - glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", key, t) - time.Sleep(time.Duration(t) * time.Second) - nm.queue.Add(key) - }() - } else { - // rather than wait for a full resync, re-add the namespace to the queue to be processed - nm.queue.Add(key) - utilruntime.HandleError(err) - } - } - }() + quit := workFunc() + + if quit { + return + } } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller_test.go new file mode 100644 index 000000000000..f03a59d71302 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller_test.go @@ -0,0 +1,284 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namespace + +import ( + "fmt" + "net/http" + "net/http/httptest" + "path" + "strings" + "sync" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/dynamic" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +func TestFinalized(t *testing.T) { + testNamespace := &api.Namespace{ + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{"a", "b"}, + }, + } + if finalized(testNamespace) { + t.Errorf("Unexpected result, namespace is not finalized") + } + testNamespace.Spec.Finalizers = []api.FinalizerName{} + if !finalized(testNamespace) { + t.Errorf("Expected object to be finalized") + } +} + +func TestFinalizeNamespaceFunc(t *testing.T) { + mockClient := &fake.Clientset{} + testNamespace := &api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + ResourceVersion: "1", + }, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{"kubernetes", "other"}, + }, + } + finalizeNamespace(mockClient, testNamespace, api.FinalizerKubernetes) + actions := mockClient.Actions() + if len(actions) != 1 { + t.Errorf("Expected 1 mock client action, but got %v", len(actions)) + } + if !actions[0].Matches("create", "namespaces") || actions[0].GetSubresource() != "finalize" { + t.Errorf("Expected finalize-namespace action %v", actions[0]) + } + finalizers := actions[0].(core.CreateAction).GetObject().(*api.Namespace).Spec.Finalizers + if len(finalizers) != 1 { + t.Errorf("There should be a single finalizer remaining") + } + if "other" != string(finalizers[0]) { + t.Errorf("Unexpected finalizer value, %v", finalizers[0]) + } +} + +func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIVersions) { + now := unversioned.Now() + namespaceName := "test" + testNamespacePendingFinalize := &api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: namespaceName, + ResourceVersion: "1", + DeletionTimestamp: &now, + }, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{"kubernetes"}, + }, + Status: api.NamespaceStatus{ + Phase: api.NamespaceTerminating, + }, + } + testNamespaceFinalizeComplete := &api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: namespaceName, + ResourceVersion: "1", + DeletionTimestamp: &now, + }, + Spec: api.NamespaceSpec{}, + Status: api.NamespaceStatus{ + Phase: api.NamespaceTerminating, + }, + } + + // when doing a delete all of content, we will do a GET of a collection, and DELETE of a collection by default + dynamicClientActionSet := sets.NewString() + groupVersionResources := testGroupVersionResources() + for _, groupVersionResource := range groupVersionResources { + urlPath := path.Join([]string{ + dynamic.LegacyAPIPathResolverFunc(groupVersionResource.GroupVersion()), + groupVersionResource.Group, + groupVersionResource.Version, + "namespaces", + namespaceName, + groupVersionResource.Resource, + }...) + dynamicClientActionSet.Insert((&fakeAction{method: "GET", path: urlPath}).String()) + dynamicClientActionSet.Insert((&fakeAction{method: "DELETE", path: urlPath}).String()) + } + + scenarios := map[string]struct { + testNamespace *api.Namespace + kubeClientActionSet sets.String + dynamicClientActionSet sets.String + }{ + "pending-finalize": { + testNamespace: testNamespacePendingFinalize, + kubeClientActionSet: sets.NewString( + strings.Join([]string{"get", "namespaces", ""}, "-"), + strings.Join([]string{"list", "pods", ""}, "-"), + strings.Join([]string{"create", "namespaces", "finalize"}, "-"), + ), + dynamicClientActionSet: dynamicClientActionSet, + }, + "complete-finalize": { + testNamespace: testNamespaceFinalizeComplete, + kubeClientActionSet: sets.NewString( + strings.Join([]string{"get", "namespaces", ""}, "-"), + strings.Join([]string{"delete", "namespaces", ""}, "-"), + ), + dynamicClientActionSet: sets.NewString(), + }, + } + + for scenario, testInput := range scenarios { + testHandler := &fakeActionHandler{statusCode: 200} + srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP) + defer srv.Close() + + mockClient := fake.NewSimpleClientset(testInput.testNamespace) + clientPool := dynamic.NewClientPool(clientConfig, dynamic.LegacyAPIPathResolverFunc) + + err := syncNamespace(mockClient, clientPool, operationNotSupportedCache{}, groupVersionResources, testInput.testNamespace, api.FinalizerKubernetes) + if err != nil { + t.Errorf("scenario %s - Unexpected error when synching namespace %v", scenario, err) + } + + // validate traffic from kube client + actionSet := sets.NewString() + for _, action := range mockClient.Actions() { + actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-")) + } + if !actionSet.Equal(testInput.kubeClientActionSet) { + t.Errorf("scenario %s - mock client expected actions:\n%v\n but got:\n%v\nDifference:\n%v", scenario, + testInput.kubeClientActionSet, actionSet, testInput.kubeClientActionSet.Difference(actionSet)) + } + + // validate traffic from dynamic client + actionSet = sets.NewString() + for _, action := range testHandler.actions { + actionSet.Insert(action.String()) + } + if !actionSet.Equal(testInput.dynamicClientActionSet) { + t.Errorf("scenario %s - dynamic client expected actions:\n%v\n but got:\n%v\nDifference:\n%v", scenario, + testInput.dynamicClientActionSet, actionSet, testInput.dynamicClientActionSet.Difference(actionSet)) + } + } +} + +func TestRetryOnConflictError(t *testing.T) { + mockClient := &fake.Clientset{} + numTries := 0 + retryOnce := func(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error) { + numTries++ + if numTries <= 1 { + return namespace, errors.NewConflict(api.Resource("namespaces"), namespace.Name, fmt.Errorf("ERROR!")) + } + return namespace, nil + } + namespace := &api.Namespace{} + _, err := retryOnConflictError(mockClient, namespace, retryOnce) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if numTries != 2 { + t.Errorf("Expected %v, but got %v", 2, numTries) + } +} + +func TestSyncNamespaceThatIsTerminatingNonExperimental(t *testing.T) { + testSyncNamespaceThatIsTerminating(t, &unversioned.APIVersions{}) +} + +func TestSyncNamespaceThatIsTerminatingV1Beta1(t *testing.T) { + testSyncNamespaceThatIsTerminating(t, &unversioned.APIVersions{Versions: []string{"extensions/v1beta1"}}) +} + +func TestSyncNamespaceThatIsActive(t *testing.T) { + mockClient := &fake.Clientset{} + testNamespace := &api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + ResourceVersion: "1", + }, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{"kubernetes"}, + }, + Status: api.NamespaceStatus{ + Phase: api.NamespaceActive, + }, + } + err := syncNamespace(mockClient, nil, operationNotSupportedCache{}, testGroupVersionResources(), testNamespace, api.FinalizerKubernetes) + if err != nil { + t.Errorf("Unexpected error when synching namespace %v", err) + } + if len(mockClient.Actions()) != 0 { + t.Errorf("Expected no action from controller, but got: %v", mockClient.Actions()) + } +} + +// testServerAndClientConfig returns a server that listens and a config that can reference it +func testServerAndClientConfig(handler func(http.ResponseWriter, *http.Request)) (*httptest.Server, *restclient.Config) { + srv := httptest.NewServer(http.HandlerFunc(handler)) + config := &restclient.Config{ + Host: srv.URL, + } + return srv, config +} + +// fakeAction records information about requests to aid in testing. +type fakeAction struct { + method string + path string +} + +// String returns method=path to aid in testing +func (f *fakeAction) String() string { + return strings.Join([]string{f.method, f.path}, "=") +} + +// fakeActionHandler holds a list of fakeActions received +type fakeActionHandler struct { + // statusCode returned by this handler + statusCode int + + lock sync.Mutex + actions []fakeAction +} + +// ServeHTTP logs the action that occurred and always returns the associated status code +func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) { + f.lock.Lock() + defer f.lock.Unlock() + + f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path}) + response.Header().Set("Content-Type", runtime.ContentTypeJSON) + response.WriteHeader(f.statusCode) + response.Write([]byte("{\"kind\": \"List\"}")) +} + +// testGroupVersionResources returns a mocked up set of resources across different api groups for testing namespace controller. +func testGroupVersionResources() []unversioned.GroupVersionResource { + results := []unversioned.GroupVersionResource{} + results = append(results, unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}) + results = append(results, unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}) + results = append(results, unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}) + return results +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller_utils.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller_utils.go index 1080f26758a5..9971e3ee5ef0 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller_utils.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller_utils.go @@ -18,7 +18,6 @@ package namespace import ( "fmt" - "strings" "time" "k8s.io/kubernetes/pkg/api" @@ -26,7 +25,6 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/client/typed/discovery" "k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/sets" @@ -146,16 +144,16 @@ func deleteCollection( gvr unversioned.GroupVersionResource, namespace string, ) (bool, error) { - glog.V(4).Infof("namespace controller - deleteCollection - namespace: %s, gvr: %v", namespace, gvr) + glog.V(5).Infof("namespace controller - deleteCollection - namespace: %s, gvr: %v", namespace, gvr) key := operationKey{op: operationDeleteCollection, gvr: gvr} if !opCache.isSupported(key) { - glog.V(4).Infof("namespace controller - deleteCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr) + glog.V(5).Infof("namespace controller - deleteCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr) return false, nil } apiResource := unversioned.APIResource{Name: gvr.Resource, Namespaced: true} - err := dynamicClient.Resource(&apiResource, namespace).DeleteCollection(nil, v1.ListOptions{}) + err := dynamicClient.Resource(&apiResource, namespace).DeleteCollection(nil, &v1.ListOptions{}) if err == nil { return true, nil @@ -168,12 +166,12 @@ func deleteCollection( // when working with this resource type, we will get a literal not found error rather than expected method not supported // remember next time that this resource does not support delete collection... if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) { - glog.V(4).Infof("namespace controller - deleteCollection not supported - namespace: %s, gvr: %v", namespace, gvr) + glog.V(5).Infof("namespace controller - deleteCollection not supported - namespace: %s, gvr: %v", namespace, gvr) opCache[key] = true return false, nil } - glog.V(4).Infof("namespace controller - deleteCollection unexpected error - namespace: %s, gvr: %v, error: %v", namespace, gvr, err) + glog.V(5).Infof("namespace controller - deleteCollection unexpected error - namespace: %s, gvr: %v, error: %v", namespace, gvr, err) return true, err } @@ -188,16 +186,16 @@ func listCollection( gvr unversioned.GroupVersionResource, namespace string, ) (*runtime.UnstructuredList, bool, error) { - glog.V(4).Infof("namespace controller - listCollection - namespace: %s, gvr: %v", namespace, gvr) + glog.V(5).Infof("namespace controller - listCollection - namespace: %s, gvr: %v", namespace, gvr) key := operationKey{op: operationList, gvr: gvr} if !opCache.isSupported(key) { - glog.V(4).Infof("namespace controller - listCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr) + glog.V(5).Infof("namespace controller - listCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr) return nil, false, nil } apiResource := unversioned.APIResource{Name: gvr.Resource, Namespaced: true} - unstructuredList, err := dynamicClient.Resource(&apiResource, namespace).List(v1.ListOptions{}) + unstructuredList, err := dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{}) if err == nil { return unstructuredList, true, nil } @@ -209,7 +207,7 @@ func listCollection( // when working with this resource type, we will get a literal not found error rather than expected method not supported // remember next time that this resource does not support delete collection... if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) { - glog.V(4).Infof("namespace controller - listCollection not supported - namespace: %s, gvr: %v", namespace, gvr) + glog.V(5).Infof("namespace controller - listCollection not supported - namespace: %s, gvr: %v", namespace, gvr) opCache[key] = true return nil, false, nil } @@ -224,7 +222,7 @@ func deleteEachItem( gvr unversioned.GroupVersionResource, namespace string, ) error { - glog.V(4).Infof("namespace controller - deleteEachItem - namespace: %s, gvr: %v", namespace, gvr) + glog.V(5).Infof("namespace controller - deleteEachItem - namespace: %s, gvr: %v", namespace, gvr) unstructuredList, listSupported, err := listCollection(dynamicClient, opCache, gvr, namespace) if err != nil { @@ -235,7 +233,7 @@ func deleteEachItem( } apiResource := unversioned.APIResource{Name: gvr.Resource, Namespaced: true} for _, item := range unstructuredList.Items { - if err = dynamicClient.Resource(&apiResource, namespace).Delete(item.Name, nil); err != nil && !errors.IsNotFound(err) && !errors.IsMethodNotSupported(err) { + if err = dynamicClient.Resource(&apiResource, namespace).Delete(item.GetName(), nil); err != nil && !errors.IsNotFound(err) && !errors.IsMethodNotSupported(err) { return err } } @@ -243,7 +241,7 @@ func deleteEachItem( } // deleteAllContentForGroupVersionResource will use the dynamic client to delete each resource identified in gvr. -// It returns an estimate of the time remaining before the remaing resources are deleted. +// It returns an estimate of the time remaining before the remaining resources are deleted. // If estimate > 0, not all resources are guaranteed to be gone. func deleteAllContentForGroupVersionResource( kubeClient clientset.Interface, @@ -253,20 +251,20 @@ func deleteAllContentForGroupVersionResource( namespace string, namespaceDeletedAt unversioned.Time, ) (int64, error) { - glog.V(4).Infof("namespace controller - deleteAllContentForGroupVersionResource - namespace: %s, gvr: %v", namespace, gvr) + glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - namespace: %s, gvr: %v", namespace, gvr) // estimate how long it will take for the resource to be deleted (needed for objects that support graceful delete) estimate, err := estimateGracefulTermination(kubeClient, gvr, namespace, namespaceDeletedAt) if err != nil { - glog.V(4).Infof("namespace controller - deleteAllContentForGroupVersionResource - unable to estimate - namespace: %s, gvr: %v, err: %v", namespace, gvr, err) + glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - unable to estimate - namespace: %s, gvr: %v, err: %v", namespace, gvr, err) return estimate, err } - glog.V(4).Infof("namespace controller - deleteAllContentForGroupVersionResource - estimate - namespace: %s, gvr: %v, estimate: %v", namespace, gvr, estimate) + glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - estimate - namespace: %s, gvr: %v, estimate: %v", namespace, gvr, estimate) // get a client for this group version... dynamicClient, err := clientPool.ClientForGroupVersion(gvr.GroupVersion()) if err != nil { - glog.V(4).Infof("namespace controller - deleteAllContentForGroupVersionResource - unable to get client - namespace: %s, gvr: %v, err: %v", namespace, gvr, err) + glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - unable to get client - namespace: %s, gvr: %v, err: %v", namespace, gvr, err) return estimate, err } @@ -286,16 +284,16 @@ func deleteAllContentForGroupVersionResource( // verify there are no more remaining items // it is not an error condition for there to be remaining items if local estimate is non-zero - glog.V(4).Infof("namespace controller - deleteAllContentForGroupVersionResource - checking for no more items in namespace: %s, gvr: %v", namespace, gvr) + glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - checking for no more items in namespace: %s, gvr: %v", namespace, gvr) unstructuredList, listSupported, err := listCollection(dynamicClient, opCache, gvr, namespace) if err != nil { - glog.V(4).Infof("namespace controller - deleteAllContentForGroupVersionResource - error verifying no items in namespace: %s, gvr: %v, err: %v", namespace, gvr, err) + glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - error verifying no items in namespace: %s, gvr: %v, err: %v", namespace, gvr, err) return estimate, err } if !listSupported { return estimate, nil } - glog.V(4).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining - namespace: %s, gvr: %v, items: %v", namespace, gvr, len(unstructuredList.Items)) + glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining - namespace: %s, gvr: %v, items: %v", namespace, gvr, len(unstructuredList.Items)) if len(unstructuredList.Items) != 0 && estimate == int64(0) { return estimate, fmt.Errorf("unexpected items still remain in namespace: %s for gvr: %v", namespace, gvr) } @@ -303,7 +301,7 @@ func deleteAllContentForGroupVersionResource( } // deleteAllContent will use the dynamic client to delete each resource identified in groupVersionResources. -// It returns an estimate of the time remaining before the remaing resources are deleted. +// It returns an estimate of the time remaining before the remaining resources are deleted. // If estimate > 0, not all resources are guaranteed to be gone. func deleteAllContent( kubeClient clientset.Interface, @@ -353,7 +351,7 @@ func syncNamespace( return err } - glog.V(4).Infof("namespace controller - syncNamespace - namespace: %s, finalizerToken: %s", namespace.Name, finalizerToken) + glog.V(5).Infof("namespace controller - syncNamespace - namespace: %s, finalizerToken: %s", namespace.Name, finalizerToken) // ensure that the status is up to date on the namespace // if we get a not found error, we assume the namespace is truly gone @@ -409,7 +407,7 @@ func syncNamespace( // estimateGrracefulTermination will estimate the graceful termination required for the specific entity in the namespace func estimateGracefulTermination(kubeClient clientset.Interface, groupVersionResource unversioned.GroupVersionResource, ns string, namespaceDeletedAt unversioned.Time) (int64, error) { groupResource := groupVersionResource.GroupResource() - glog.V(4).Infof("namespace controller - estimateGracefulTermination - group %s, resource: %s", groupResource.Group, groupResource.Resource) + glog.V(5).Infof("namespace controller - estimateGracefulTermination - group %s, resource: %s", groupResource.Group, groupResource.Resource) estimate := int64(0) var err error switch groupResource { @@ -430,7 +428,7 @@ func estimateGracefulTermination(kubeClient clientset.Interface, groupVersionRes // estimateGracefulTerminationForPods determines the graceful termination period for pods in the namespace func estimateGracefulTerminationForPods(kubeClient clientset.Interface, ns string) (int64, error) { - glog.V(4).Infof("namespace controller - estimateGracefulTerminationForPods - namespace %s", ns) + glog.V(5).Infof("namespace controller - estimateGracefulTerminationForPods - namespace %s", ns) estimate := int64(0) items, err := kubeClient.Core().Pods(ns).List(api.ListOptions{}) if err != nil { @@ -451,30 +449,3 @@ func estimateGracefulTerminationForPods(kubeClient clientset.Interface, ns strin } return estimate, nil } - -// ServerPreferredNamespacedGroupVersionResources uses the specified client to discover the set of preferred groupVersionResources that are namespaced -func ServerPreferredNamespacedGroupVersionResources(discoveryClient discovery.DiscoveryInterface) ([]unversioned.GroupVersionResource, error) { - results := []unversioned.GroupVersionResource{} - serverGroupList, err := discoveryClient.ServerGroups() - if err != nil { - return results, err - } - for _, apiGroup := range serverGroupList.Groups { - preferredVersion := apiGroup.PreferredVersion - apiResourceList, err := discoveryClient.ServerResourcesForGroupVersion(preferredVersion.GroupVersion) - if err != nil { - return results, err - } - groupVersion := unversioned.GroupVersion{Group: apiGroup.Name, Version: preferredVersion.Version} - for _, apiResource := range apiResourceList.APIResources { - if !apiResource.Namespaced { - continue - } - if strings.Contains(apiResource.Name, "/") { - continue - } - results = append(results, groupVersion.WithResource(apiResource.Name)) - } - } - return results, nil -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/cidr_allocator.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/cidr_allocator.go new file mode 100644 index 000000000000..4bb4a2503d97 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/cidr_allocator.go @@ -0,0 +1,157 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "encoding/binary" + "errors" + "fmt" + "math/big" + "net" + "sync" +) + +var errCIDRRangeNoCIDRsRemaining = errors.New("CIDR allocation failed; there are no remaining CIDRs left to allocate in the accepted range") + +// CIDRAllocator is an interface implemented by things that know how to allocate/occupy/recycle CIDR for nodes. +type CIDRAllocator interface { + AllocateNext() (*net.IPNet, error) + Occupy(*net.IPNet) error + Release(*net.IPNet) error +} + +type rangeAllocator struct { + clusterCIDR *net.IPNet + clusterIP net.IP + clusterMaskSize int + subNetMaskSize int + maxCIDRs int + used big.Int + lock sync.Mutex +} + +// NewCIDRRangeAllocator returns a CIDRAllocator to allocate CIDR for node +// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size. +func NewCIDRRangeAllocator(clusterCIDR *net.IPNet, subNetMaskSize int) CIDRAllocator { + clusterMask := clusterCIDR.Mask + clusterMaskSize, _ := clusterMask.Size() + + ra := &rangeAllocator{ + clusterCIDR: clusterCIDR, + clusterIP: clusterCIDR.IP.To4(), + clusterMaskSize: clusterMaskSize, + subNetMaskSize: subNetMaskSize, + maxCIDRs: 1 << uint32(subNetMaskSize-clusterMaskSize), + } + return ra +} + +func (r *rangeAllocator) AllocateNext() (*net.IPNet, error) { + r.lock.Lock() + defer r.lock.Unlock() + + nextUnused := -1 + for i := 0; i < r.maxCIDRs; i++ { + if r.used.Bit(i) == 0 { + nextUnused = i + break + } + } + if nextUnused == -1 { + return nil, errCIDRRangeNoCIDRsRemaining + } + + r.used.SetBit(&r.used, nextUnused, 1) + + j := uint32(nextUnused) << uint32(32-r.subNetMaskSize) + ipInt := (binary.BigEndian.Uint32(r.clusterIP)) | j + ip := make([]byte, 4) + binary.BigEndian.PutUint32(ip, ipInt) + + return &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(r.subNetMaskSize, 32), + }, nil +} + +func (r *rangeAllocator) Release(cidr *net.IPNet) error { + used, err := r.getIndexForCIDR(cidr) + if err != nil { + return err + } + + r.lock.Lock() + defer r.lock.Unlock() + r.used.SetBit(&r.used, used, 0) + + return nil +} + +func (r *rangeAllocator) MaxCIDRs() int { + return r.maxCIDRs +} + +func (r *rangeAllocator) Occupy(cidr *net.IPNet) (err error) { + begin, end := 0, r.maxCIDRs + cidrMask := cidr.Mask + maskSize, _ := cidrMask.Size() + + if !r.clusterCIDR.Contains(cidr.IP.Mask(r.clusterCIDR.Mask)) && !cidr.Contains(r.clusterCIDR.IP.Mask(cidr.Mask)) { + return fmt.Errorf("cidr %v is out the range of cluster cidr %v", cidr, r.clusterCIDR) + } + + if r.clusterMaskSize < maskSize { + subNetMask := net.CIDRMask(r.subNetMaskSize, 32) + begin, err = r.getIndexForCIDR(&net.IPNet{ + IP: cidr.IP.To4().Mask(subNetMask), + Mask: subNetMask, + }) + if err != nil { + return err + } + + ip := make([]byte, 4) + ipInt := binary.BigEndian.Uint32(cidr.IP) | (^binary.BigEndian.Uint32(cidr.Mask)) + binary.BigEndian.PutUint32(ip, ipInt) + end, err = r.getIndexForCIDR(&net.IPNet{ + IP: net.IP(ip).To4().Mask(subNetMask), + Mask: subNetMask, + }) + if err != nil { + return err + } + } + + r.lock.Lock() + defer r.lock.Unlock() + + for i := begin; i <= end; i++ { + r.used.SetBit(&r.used, i, 1) + } + + return nil +} + +func (r *rangeAllocator) getIndexForCIDR(cidr *net.IPNet) (int, error) { + cidrIndex := (binary.BigEndian.Uint32(r.clusterIP) ^ binary.BigEndian.Uint32(cidr.IP.To4())) >> uint32(32-r.subNetMaskSize) + + if cidrIndex >= uint32(r.maxCIDRs) { + return 0, fmt.Errorf("CIDR: %v is out of the range of CIDR allocator", cidr) + } + + return int(cidrIndex), nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/cidr_allocator_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/cidr_allocator_test.go new file mode 100644 index 000000000000..37cfdf67bab8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/cidr_allocator_test.go @@ -0,0 +1,350 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "github.com/golang/glog" + "math/big" + "net" + "reflect" + "testing" +) + +func TestRangeAllocatorFullyAllocated(t *testing.T) { + _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/30") + a := NewCIDRRangeAllocator(clusterCIDR, 30) + p, err := a.AllocateNext() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if p.String() != "127.123.234.0/30" { + t.Fatalf("unexpected allocated cidr: %s", p.String()) + } + + _, err = a.AllocateNext() + if err == nil { + t.Fatalf("expected error because of fully-allocated range") + } + + a.Release(p) + p, err = a.AllocateNext() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if p.String() != "127.123.234.0/30" { + t.Fatalf("unexpected allocated cidr: %s", p.String()) + } + _, err = a.AllocateNext() + if err == nil { + t.Fatalf("expected error because of fully-allocated range") + } +} + +func TestRangeAllocator_RandomishAllocation(t *testing.T) { + _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/16") + a := NewCIDRRangeAllocator(clusterCIDR, 24) + + // allocate all the CIDRs + var err error + cidrs := make([]*net.IPNet, 256) + + for i := 0; i < 256; i++ { + cidrs[i], err = a.AllocateNext() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + } + + _, err = a.AllocateNext() + if err == nil { + t.Fatalf("expected error because of fully-allocated range") + } + // release them all + for i := 0; i < 256; i++ { + a.Release(cidrs[i]) + } + + // allocate the CIDRs again + rcidrs := make([]*net.IPNet, 256) + for i := 0; i < 256; i++ { + rcidrs[i], err = a.AllocateNext() + if err != nil { + t.Fatalf("unexpected error: %d, %v", i, err) + } + } + _, err = a.AllocateNext() + if err == nil { + t.Fatalf("expected error because of fully-allocated range") + } + + if !reflect.DeepEqual(cidrs, rcidrs) { + t.Fatalf("expected re-allocated cidrs are the same collection") + } +} + +func TestRangeAllocator_AllocationOccupied(t *testing.T) { + _, clusterCIDR, _ := net.ParseCIDR("127.123.234.0/16") + a := NewCIDRRangeAllocator(clusterCIDR, 24) + + // allocate all the CIDRs + var err error + cidrs := make([]*net.IPNet, 256) + + for i := 0; i < 256; i++ { + cidrs[i], err = a.AllocateNext() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + } + + _, err = a.AllocateNext() + if err == nil { + t.Fatalf("expected error because of fully-allocated range") + } + // release them all + for i := 0; i < 256; i++ { + a.Release(cidrs[i]) + } + // occupy the last 128 CIDRs + for i := 128; i < 256; i++ { + a.Occupy(cidrs[i]) + } + + // allocate the first 128 CIDRs again + rcidrs := make([]*net.IPNet, 128) + for i := 0; i < 128; i++ { + rcidrs[i], err = a.AllocateNext() + if err != nil { + t.Fatalf("unexpected error: %d, %v", i, err) + } + } + _, err = a.AllocateNext() + if err == nil { + t.Fatalf("expected error because of fully-allocated range") + } + + // check Occupy() work properly + for i := 128; i < 256; i++ { + rcidrs = append(rcidrs, cidrs[i]) + } + if !reflect.DeepEqual(cidrs, rcidrs) { + t.Fatalf("expected re-allocated cidrs are the same collection") + } +} + +func TestGetBitforCIDR(t *testing.T) { + cases := []struct { + clusterCIDRStr string + subNetMaskSize int + subNetCIDRStr string + expectedBit int + expectErr bool + }{ + { + clusterCIDRStr: "127.0.0.0/8", + subNetMaskSize: 16, + subNetCIDRStr: "127.0.0.0/16", + expectedBit: 0, + expectErr: false, + }, + { + clusterCIDRStr: "127.0.0.0/8", + subNetMaskSize: 16, + subNetCIDRStr: "127.123.0.0/16", + expectedBit: 123, + expectErr: false, + }, + { + clusterCIDRStr: "127.0.0.0/8", + subNetMaskSize: 16, + subNetCIDRStr: "127.168.0.0/16", + expectedBit: 168, + expectErr: false, + }, + { + clusterCIDRStr: "127.0.0.0/8", + subNetMaskSize: 16, + subNetCIDRStr: "127.224.0.0/16", + expectedBit: 224, + expectErr: false, + }, + { + clusterCIDRStr: "192.168.0.0/16", + subNetMaskSize: 24, + subNetCIDRStr: "192.168.12.0/24", + expectedBit: 12, + expectErr: false, + }, + { + clusterCIDRStr: "192.168.0.0/16", + subNetMaskSize: 24, + subNetCIDRStr: "192.168.151.0/24", + expectedBit: 151, + expectErr: false, + }, + { + clusterCIDRStr: "192.168.0.0/16", + subNetMaskSize: 24, + subNetCIDRStr: "127.168.224.0/24", + expectErr: true, + }, + } + + for _, tc := range cases { + _, clusterCIDR, err := net.ParseCIDR(tc.clusterCIDRStr) + clusterMask := clusterCIDR.Mask + clusterMaskSize, _ := clusterMask.Size() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + ra := &rangeAllocator{ + clusterIP: clusterCIDR.IP.To4(), + clusterMaskSize: clusterMaskSize, + subNetMaskSize: tc.subNetMaskSize, + maxCIDRs: 1 << uint32(tc.subNetMaskSize-clusterMaskSize), + } + + _, subnetCIDR, err := net.ParseCIDR(tc.subNetCIDRStr) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + got, err := ra.getIndexForCIDR(subnetCIDR) + if err == nil && tc.expectErr { + glog.Errorf("expected error but got null") + continue + } + + if err != nil && !tc.expectErr { + glog.Errorf("unexpected error: %v", err) + continue + } + + if got != tc.expectedBit { + glog.Errorf("expected %v, but got %v", tc.expectedBit, got) + } + } +} + +func TestOccupy(t *testing.T) { + cases := []struct { + clusterCIDRStr string + subNetMaskSize int + subNetCIDRStr string + expectedUsedBegin int + expectedUsedEnd int + expectErr bool + }{ + { + clusterCIDRStr: "127.0.0.0/8", + subNetMaskSize: 16, + subNetCIDRStr: "127.0.0.0/8", + expectedUsedBegin: 0, + expectedUsedEnd: 256, + expectErr: false, + }, + { + clusterCIDRStr: "127.0.0.0/8", + subNetMaskSize: 16, + subNetCIDRStr: "127.0.0.0/2", + expectedUsedBegin: 0, + expectedUsedEnd: 256, + expectErr: false, + }, + { + clusterCIDRStr: "127.0.0.0/8", + subNetMaskSize: 16, + subNetCIDRStr: "127.0.0.0/16", + expectedUsedBegin: 0, + expectedUsedEnd: 0, + expectErr: false, + }, + { + clusterCIDRStr: "127.0.0.0/8", + subNetMaskSize: 32, + subNetCIDRStr: "127.0.0.0/16", + expectedUsedBegin: 0, + expectedUsedEnd: 65535, + expectErr: false, + }, + { + clusterCIDRStr: "127.0.0.0/7", + subNetMaskSize: 16, + subNetCIDRStr: "127.0.0.0/15", + expectedUsedBegin: 256, + expectedUsedEnd: 257, + expectErr: false, + }, + { + clusterCIDRStr: "127.0.0.0/7", + subNetMaskSize: 15, + subNetCIDRStr: "127.0.0.0/15", + expectedUsedBegin: 128, + expectedUsedEnd: 128, + expectErr: false, + }, + { + clusterCIDRStr: "127.0.0.0/7", + subNetMaskSize: 18, + subNetCIDRStr: "127.0.0.0/15", + expectedUsedBegin: 1024, + expectedUsedEnd: 1031, + expectErr: false, + }, + } + + for _, tc := range cases { + _, clusterCIDR, err := net.ParseCIDR(tc.clusterCIDRStr) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + clusterMask := clusterCIDR.Mask + clusterMaskSize, _ := clusterMask.Size() + + ra := &rangeAllocator{ + clusterCIDR: clusterCIDR, + clusterIP: clusterCIDR.IP.To4(), + clusterMaskSize: clusterMaskSize, + subNetMaskSize: tc.subNetMaskSize, + maxCIDRs: 1 << uint32(tc.subNetMaskSize-clusterMaskSize), + } + + _, subnetCIDR, err := net.ParseCIDR(tc.subNetCIDRStr) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + err = ra.Occupy(subnetCIDR) + if err == nil && tc.expectErr { + t.Errorf("expected error but got none") + continue + } + if err != nil && !tc.expectErr { + t.Errorf("unexpected error: %v", err) + continue + } + + expectedUsed := big.Int{} + for i := tc.expectedUsedBegin; i <= tc.expectedUsedEnd; i++ { + expectedUsed.SetBit(&expectedUsed, i, 1) + } + if expectedUsed.Cmp(&ra.used) != 0 { + t.Errorf("error") + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go index aa88a3459d9e..2d3998bdc316 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go @@ -30,8 +30,8 @@ import ( "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/client/record" - unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/framework" @@ -40,9 +40,11 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/util/metrics" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/system" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/watch" @@ -55,6 +57,8 @@ var ( const ( // nodeStatusUpdateRetry controls the number of retries of writing NodeStatus update. nodeStatusUpdateRetry = 5 + // podCIDRUpdateRetry controls the number of retries of writing Node.Spec.PodCIDR update. + podCIDRUpdateRetry = 5 // controls how often NodeController will try to evict Pods from non-responsive Nodes. nodeEvictionPeriod = 100 * time.Millisecond ) @@ -69,7 +73,8 @@ type NodeController struct { allocateNodeCIDRs bool cloud cloudprovider.Interface clusterCIDR *net.IPNet - deletingPodsRateLimiter util.RateLimiter + serviceCIDR *net.IPNet + deletingPodsRateLimiter flowcontrol.RateLimiter knownNodeSet sets.String kubeClient clientset.Interface // Method for easy mocking in unittest. @@ -119,9 +124,16 @@ type NodeController struct { // DaemonSet framework and store daemonSetController *framework.Controller daemonSetStore cache.StoreToDaemonSetLister + // allocate/recycle CIDRs for node if allocateNodeCIDRs == true + cidrAllocator CIDRAllocator forcefullyDeletePod func(*api.Pod) error nodeExistsInCloudProvider func(string) (bool, error) + + // If in network segmentation mode NodeController won't evict Pods from unhealthy Nodes. + // It is enabled when all Nodes observed by the NodeController are NotReady and disabled + // when NC sees any healthy Node. This is a temporary fix for v1.3. + networkSegmentationMode bool } // NewNodeController returns a new node controller to sync instances from cloudprovider. @@ -129,24 +141,37 @@ func NewNodeController( cloud cloudprovider.Interface, kubeClient clientset.Interface, podEvictionTimeout time.Duration, - deletionEvictionLimiter util.RateLimiter, - terminationEvictionLimiter util.RateLimiter, + deletionEvictionLimiter flowcontrol.RateLimiter, + terminationEvictionLimiter flowcontrol.RateLimiter, nodeMonitorGracePeriod time.Duration, nodeStartupGracePeriod time.Duration, nodeMonitorPeriod time.Duration, clusterCIDR *net.IPNet, + serviceCIDR *net.IPNet, + nodeCIDRMaskSize int, allocateNodeCIDRs bool) *NodeController { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"}) eventBroadcaster.StartLogging(glog.Infof) if kubeClient != nil { - glog.Infof("Sending events to api server.") - eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")}) + glog.V(0).Infof("Sending events to api server.") + eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) } else { - glog.Infof("No api server defined - no events will be sent to API server.") + glog.V(0).Infof("No api server defined - no events will be sent to API server.") + } + + if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("node_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) } - if allocateNodeCIDRs && clusterCIDR == nil { - glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.") + + if allocateNodeCIDRs { + if clusterCIDR == nil { + glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.") + } + mask := clusterCIDR.Mask + if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize { + glog.Fatal("NodeController: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.") + } } evictorLock := sync.Mutex{} @@ -167,12 +192,13 @@ func NewNodeController( lookupIP: net.LookupIP, now: unversioned.Now, clusterCIDR: clusterCIDR, + serviceCIDR: serviceCIDR, allocateNodeCIDRs: allocateNodeCIDRs, forcefullyDeletePod: func(p *api.Pod) error { return forcefullyDeletePod(kubeClient, p) }, nodeExistsInCloudProvider: func(nodeName string) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) }, } - nc.podStore.Store, nc.podController = framework.NewInformer( + nc.podStore.Indexer, nc.podController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return nc.kubeClient.Core().Pods(api.NamespaceAll).List(options) @@ -187,7 +213,20 @@ func NewNodeController( AddFunc: nc.maybeDeleteTerminatingPod, UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) }, }, + // We don't need to build a index for podStore here actually, but build one for consistency. + // It will ensure that if people start making use of the podStore in more specific ways, + // they'll get the benefits they expect. It will also reserve the name for future refactorings. + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) + + nodeEventHandlerFuncs := framework.ResourceEventHandlerFuncs{} + if nc.allocateNodeCIDRs { + nodeEventHandlerFuncs = framework.ResourceEventHandlerFuncs{ + AddFunc: nc.allocateOrOccupyCIDR, + DeleteFunc: nc.recycleCIDR, + } + } + nc.nodeStore.Store, nc.nodeController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { @@ -199,8 +238,9 @@ func NewNodeController( }, &api.Node{}, controller.NoResyncPeriodFunc(), - framework.ResourceEventHandlerFuncs{}, + nodeEventHandlerFuncs, ) + nc.daemonSetStore.Store, nc.daemonSetController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { @@ -214,11 +254,24 @@ func NewNodeController( controller.NoResyncPeriodFunc(), framework.ResourceEventHandlerFuncs{}, ) + + if allocateNodeCIDRs { + nc.cidrAllocator = NewCIDRRangeAllocator(clusterCIDR, nodeCIDRMaskSize) + } + return nc } // Run starts an asynchronous loop that monitors the status of cluster nodes. func (nc *NodeController) Run(period time.Duration) { + if nc.allocateNodeCIDRs { + if nc.serviceCIDR != nil { + nc.filterOutServiceRange() + } else { + glog.Info("No Service CIDR provided. Skipping filtering out service addresses.") + } + } + go nc.nodeController.Run(wait.NeverStop) go nc.podController.Run(wait.NeverStop) go nc.daemonSetController.Run(wait.NeverStop) @@ -272,7 +325,7 @@ func (nc *NodeController) Run(period time.Duration) { } if completed { - glog.Infof("All pods terminated on %s", value.Value) + glog.V(2).Infof("All pods terminated on %s", value.Value) nc.recordNodeEvent(value.Value, api.EventTypeNormal, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value)) return true, 0 } @@ -289,31 +342,79 @@ func (nc *NodeController) Run(period time.Duration) { go wait.Until(nc.cleanupOrphanedPods, 30*time.Second, wait.NeverStop) } -// Generates num pod CIDRs that could be assigned to nodes. -func generateCIDRs(clusterCIDR *net.IPNet, num int) sets.String { - res := sets.NewString() - cidrIP := clusterCIDR.IP.To4() - for i := 0; i < num; i++ { - // TODO: Make the CIDRs configurable. - b1 := byte(i >> 8) - b2 := byte(i % 256) - res.Insert(fmt.Sprintf("%d.%d.%d.0/24", cidrIP[0], cidrIP[1]+b1, cidrIP[2]+b2)) - } - return res +func (nc *NodeController) filterOutServiceRange() { + if !nc.clusterCIDR.Contains(nc.serviceCIDR.IP.Mask(nc.clusterCIDR.Mask)) && !nc.serviceCIDR.Contains(nc.clusterCIDR.IP.Mask(nc.serviceCIDR.Mask)) { + return + } + + if err := nc.cidrAllocator.Occupy(nc.serviceCIDR); err != nil { + glog.Errorf("Error filtering out service cidr: %v", err) + } } -// getCondition returns a condition object for the specific condition -// type, nil if the condition is not set. -func (nc *NodeController) getCondition(status *api.NodeStatus, conditionType api.NodeConditionType) *api.NodeCondition { - if status == nil { - return nil +// allocateOrOccupyCIDR looks at each new observed node, assigns it a valid CIDR +// if it doesn't currently have one or mark the CIDR as used if the node already have one. +func (nc *NodeController) allocateOrOccupyCIDR(obj interface{}) { + node := obj.(*api.Node) + + if node.Spec.PodCIDR != "" { + _, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR) + if err != nil { + glog.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR) + return + } + if err := nc.cidrAllocator.Occupy(podCIDR); err != nil { + glog.Errorf("failed to mark cidr as occupied :%v", err) + return + } + return } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return &status.Conditions[i] + + podCIDR, err := nc.cidrAllocator.AllocateNext() + if err != nil { + nc.recordNodeStatusChange(node, "CIDRNotAvailable") + return + } + + glog.V(4).Infof("Assigning node %s CIDR %s", node.Name, podCIDR) + for rep := 0; rep < podCIDRUpdateRetry; rep++ { + node.Spec.PodCIDR = podCIDR.String() + if _, err := nc.kubeClient.Core().Nodes().Update(node); err != nil { + glog.Errorf("Failed while updating Node.Spec.PodCIDR (%d retries left): %v", podCIDRUpdateRetry-rep-1, err) + } else { + break + } + node, err = nc.kubeClient.Core().Nodes().Get(node.Name) + if err != nil { + glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", node.Name, err) + break } } - return nil + if err != nil { + glog.Errorf("Update PodCIDR of node %v from NodeController exceeds retry count.", node.Name) + nc.recordNodeStatusChange(node, "CIDRAssignmentFailed") + glog.Errorf("CIDR assignment for node %v failed: %v", node.Name, err) + } +} + +// recycleCIDR recycles the CIDR of a removed node +func (nc *NodeController) recycleCIDR(obj interface{}) { + node := obj.(*api.Node) + + if node.Spec.PodCIDR == "" { + return + } + + _, podCIDR, err := net.ParseCIDR(node.Spec.PodCIDR) + if err != nil { + glog.Errorf("failed to parse node %s, CIDR %s", node.Name, node.Spec.PodCIDR) + return + } + + glog.V(4).Infof("recycle node %s CIDR %s", node.Name, podCIDR) + if err := nc.cidrAllocator.Release(podCIDR); err != nil { + glog.Errorf("failed to release cidr: %v", err) + } } var gracefulDeletionVersion = version.MustParse("v1.1.0") @@ -361,7 +462,7 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) { node := nodeObj.(*api.Node) v, err := version.Parse(node.Status.NodeInfo.KubeletVersion) if err != nil { - glog.Infof("couldn't parse verions %q of minion: %v", node.Status.NodeInfo.KubeletVersion, err) + glog.V(0).Infof("couldn't parse verions %q of minion: %v", node.Status.NodeInfo.KubeletVersion, err) utilruntime.HandleError(nc.forcefullyDeletePod(pod)) return } @@ -397,7 +498,7 @@ func forcefullyDeletePod(c clientset.Interface, pod *api.Pod) error { var zero int64 err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &zero}) if err == nil { - glog.Infof("forceful deletion of %s succeeded", pod.Name) + glog.V(4).Infof("forceful deletion of %s succeeded", pod.Name) } return err } @@ -434,18 +535,14 @@ func (nc *NodeController) monitorNodeStatus() error { } } - if nc.allocateNodeCIDRs { - // TODO (cjcullen): Use pkg/controller/framework to watch nodes and - // reduce lists/decouple this from monitoring status. - nc.reconcileNodeCIDRs(nodes) - } + seenReady := false for i := range nodes.Items { var gracePeriod time.Duration - var lastReadyCondition api.NodeCondition - var readyCondition *api.NodeCondition + var observedReadyCondition api.NodeCondition + var currentReadyCondition *api.NodeCondition node := &nodes.Items[i] for rep := 0; rep < nodeStatusUpdateRetry; rep++ { - gracePeriod, lastReadyCondition, readyCondition, err = nc.tryUpdateNodeStatus(node) + gracePeriod, observedReadyCondition, currentReadyCondition, err = nc.tryUpdateNodeStatus(node) if err == nil { break } @@ -464,28 +561,32 @@ func (nc *NodeController) monitorNodeStatus() error { decisionTimestamp := nc.now() - if readyCondition != nil { + if currentReadyCondition != nil { // Check eviction timeout against decisionTimestamp - if lastReadyCondition.Status == api.ConditionFalse && + if observedReadyCondition.Status == api.ConditionFalse && decisionTimestamp.After(nc.nodeStatusMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) { if nc.evictPods(node.Name) { - glog.Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout) + glog.V(4).Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout) } } - if lastReadyCondition.Status == api.ConditionUnknown && + if observedReadyCondition.Status == api.ConditionUnknown && decisionTimestamp.After(nc.nodeStatusMap[node.Name].probeTimestamp.Add(nc.podEvictionTimeout)) { if nc.evictPods(node.Name) { - glog.Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout-gracePeriod) + glog.V(4).Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout-gracePeriod) } } - if lastReadyCondition.Status == api.ConditionTrue { + if observedReadyCondition.Status == api.ConditionTrue { + // We do not treat a master node as a part of the cluster for network segmentation checking. + if !system.IsMasterNode(node) { + seenReady = true + } if nc.cancelPodEviction(node.Name) { - glog.Infof("Node %s is ready again, cancelled pod eviction", node.Name) + glog.V(2).Infof("Node %s is ready again, cancelled pod eviction", node.Name) } } // Report node event. - if readyCondition.Status != api.ConditionTrue && lastReadyCondition.Status == api.ConditionTrue { + if currentReadyCondition.Status != api.ConditionTrue && observedReadyCondition.Status == api.ConditionTrue { nc.recordNodeStatusChange(node, "NodeNotReady") if err = nc.markAllPodsNotReady(node.Name); err != nil { utilruntime.HandleError(fmt.Errorf("Unable to mark all pods NotReady on node %v: %v", node.Name, err)) @@ -494,14 +595,14 @@ func (nc *NodeController) monitorNodeStatus() error { // Check with the cloud provider to see if the node still exists. If it // doesn't, delete the node immediately. - if readyCondition.Status != api.ConditionTrue && nc.cloud != nil { + if currentReadyCondition.Status != api.ConditionTrue && nc.cloud != nil { exists, err := nc.nodeExistsInCloudProvider(node.Name) if err != nil { glog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err) continue } if !exists { - glog.Infof("Deleting node (no longer present in cloud provider): %s", node.Name) + glog.V(2).Infof("Deleting node (no longer present in cloud provider): %s", node.Name) nc.recordNodeEvent(node.Name, api.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name)) go func(nodeName string) { defer utilruntime.HandleCrash() @@ -517,6 +618,20 @@ func (nc *NodeController) monitorNodeStatus() error { } } } + + // NC don't see any Ready Node. We assume that the network is segmented and Nodes cannot connect to API server and + // update their statuses. NC enteres network segmentation mode and cancels all evictions in progress. + if !seenReady { + nc.networkSegmentationMode = true + nc.stopAllPodEvictions() + glog.V(2).Info("NodeController is entering network segmentation mode.") + } else { + if nc.networkSegmentationMode { + nc.forceUpdateAllProbeTimes() + nc.networkSegmentationMode = false + glog.V(2).Info("NodeController exited network segmentation mode.") + } + } return nil } @@ -557,42 +672,6 @@ func (nc *NodeController) forcefullyDeleteNode(nodeName string) error { return nil } -// reconcileNodeCIDRs looks at each node and assigns it a valid CIDR -// if it doesn't currently have one. -func (nc *NodeController) reconcileNodeCIDRs(nodes *api.NodeList) { - glog.V(4).Infof("Reconciling cidrs for %d nodes", len(nodes.Items)) - // TODO(roberthbailey): This seems inefficient. Why re-calculate CIDRs - // on each sync period? - availableCIDRs := generateCIDRs(nc.clusterCIDR, len(nodes.Items)) - for _, node := range nodes.Items { - if node.Spec.PodCIDR != "" { - glog.V(4).Infof("CIDR %s is already being used by node %s", node.Spec.PodCIDR, node.Name) - availableCIDRs.Delete(node.Spec.PodCIDR) - } - } - for _, node := range nodes.Items { - if node.Spec.PodCIDR == "" { - // Re-GET node (because ours might be stale by now). - n, err := nc.kubeClient.Core().Nodes().Get(node.Name) - if err != nil { - glog.Errorf("Failed to get node %q: %v", node.Name, err) - continue - } - podCIDR, found := availableCIDRs.PopAny() - if !found { - nc.recordNodeStatusChange(n, "CIDRNotAvailable") - continue - } - glog.V(1).Infof("Assigning node %s CIDR %s", n.Name, podCIDR) - n.Spec.PodCIDR = podCIDR - if _, err := nc.kubeClient.Core().Nodes().Update(n); err != nil { - nc.recordNodeStatusChange(&node, "CIDRAssignmentFailed") - } - } - - } -} - func (nc *NodeController) recordNodeEvent(nodeName, eventtype, reason, event string) { ref := &api.ObjectReference{ Kind: "Node", @@ -622,13 +701,13 @@ func (nc *NodeController) recordNodeStatusChange(node *api.Node, new_status stri func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, api.NodeCondition, *api.NodeCondition, error) { var err error var gracePeriod time.Duration - var lastReadyCondition api.NodeCondition - readyCondition := nc.getCondition(&node.Status, api.NodeReady) - if readyCondition == nil { + var observedReadyCondition api.NodeCondition + _, currentReadyCondition := api.GetNodeCondition(&node.Status, api.NodeReady) + if currentReadyCondition == nil { // If ready condition is nil, then kubelet (or nodecontroller) never posted node status. // A fake ready condition is created, where LastProbeTime and LastTransitionTime is set // to node.CreationTimestamp to avoid handle the corner case. - lastReadyCondition = api.NodeCondition{ + observedReadyCondition = api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionUnknown, LastHeartbeatTime: node.CreationTimestamp, @@ -642,7 +721,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap } } else { // If ready condition is not nil, make a copy of it, since we may modify it in place later. - lastReadyCondition = *readyCondition + observedReadyCondition = *currentReadyCondition gracePeriod = nc.nodeMonitorGracePeriod } @@ -663,9 +742,9 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap // if that's the case, but it does not seem necessary. var savedCondition *api.NodeCondition if found { - savedCondition = nc.getCondition(&savedNodeStatus.status, api.NodeReady) + _, savedCondition = api.GetNodeCondition(&savedNodeStatus.status, api.NodeReady) } - observedCondition := nc.getCondition(&node.Status, api.NodeReady) + _, observedCondition := api.GetNodeCondition(&node.Status, api.NodeReady) if !found { glog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name) savedNodeStatus = nodeStatusData{ @@ -673,7 +752,6 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap probeTimestamp: nc.now(), readyTransitionTimestamp: nc.now(), } - nc.nodeStatusMap[node.Name] = savedNodeStatus } else if savedCondition == nil && observedCondition != nil { glog.V(1).Infof("Creating timestamp entry for newly observed Node %s", node.Name) savedNodeStatus = nodeStatusData{ @@ -681,7 +759,6 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap probeTimestamp: nc.now(), readyTransitionTimestamp: nc.now(), } - nc.nodeStatusMap[node.Name] = savedNodeStatus } else if savedCondition != nil && observedCondition == nil { glog.Errorf("ReadyCondition was removed from Status of Node %s", node.Name) // TODO: figure out what to do in this case. For now we do the same thing as above. @@ -690,7 +767,6 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap probeTimestamp: nc.now(), readyTransitionTimestamp: nc.now(), } - nc.nodeStatusMap[node.Name] = savedNodeStatus } else if savedCondition != nil && observedCondition != nil && savedCondition.LastHeartbeatTime != observedCondition.LastHeartbeatTime { var transitionTime unversioned.Time // If ReadyCondition changed since the last time we checked, we update the transition timestamp to "now", @@ -703,7 +779,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap transitionTime = savedNodeStatus.readyTransitionTimestamp } if glog.V(5) { - glog.Infof("Node %s ReadyCondition updated. Updating timestamp: %+v vs %+v.", node.Name, savedNodeStatus.status, node.Status) + glog.V(5).Infof("Node %s ReadyCondition updated. Updating timestamp: %+v vs %+v.", node.Name, savedNodeStatus.status, node.Status) } else { glog.V(3).Infof("Node %s ReadyCondition updated. Updating timestamp.", node.Name) } @@ -712,13 +788,13 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap probeTimestamp: nc.now(), readyTransitionTimestamp: transitionTime, } - nc.nodeStatusMap[node.Name] = savedNodeStatus } + nc.nodeStatusMap[node.Name] = savedNodeStatus if nc.now().After(savedNodeStatus.probeTimestamp.Add(gracePeriod)) { // NodeReady condition was last set longer ago than gracePeriod, so update it to Unknown // (regardless of its current value) in the master. - if readyCondition == nil { + if currentReadyCondition == nil { glog.V(2).Infof("node %v is never updated by kubelet", node.Name) node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{ Type: api.NodeReady, @@ -729,22 +805,22 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap LastTransitionTime: nc.now(), }) } else { - glog.V(2).Infof("node %v hasn't been updated for %+v. Last ready condition is: %+v", - node.Name, nc.now().Time.Sub(savedNodeStatus.probeTimestamp.Time), lastReadyCondition) - if lastReadyCondition.Status != api.ConditionUnknown { - readyCondition.Status = api.ConditionUnknown - readyCondition.Reason = "NodeStatusUnknown" - readyCondition.Message = fmt.Sprintf("Kubelet stopped posting node status.") + glog.V(4).Infof("node %v hasn't been updated for %+v. Last ready condition is: %+v", + node.Name, nc.now().Time.Sub(savedNodeStatus.probeTimestamp.Time), observedReadyCondition) + if observedReadyCondition.Status != api.ConditionUnknown { + currentReadyCondition.Status = api.ConditionUnknown + currentReadyCondition.Reason = "NodeStatusUnknown" + currentReadyCondition.Message = fmt.Sprintf("Kubelet stopped posting node status.") // LastProbeTime is the last time we heard from kubelet. - readyCondition.LastHeartbeatTime = lastReadyCondition.LastHeartbeatTime - readyCondition.LastTransitionTime = nc.now() + currentReadyCondition.LastHeartbeatTime = observedReadyCondition.LastHeartbeatTime + currentReadyCondition.LastTransitionTime = nc.now() } } // Like NodeReady condition, NodeOutOfDisk was last set longer ago than gracePeriod, so update // it to Unknown (regardless of its current value) in the master. // TODO(madhusudancs): Refactor this with readyCondition to remove duplicated code. - oodCondition := nc.getCondition(&node.Status, api.NodeOutOfDisk) + _, oodCondition := api.GetNodeCondition(&node.Status, api.NodeOutOfDisk) if oodCondition == nil { glog.V(2).Infof("Out of disk condition of node %v is never updated by kubelet", node.Name) node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{ @@ -756,7 +832,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap LastTransitionTime: nc.now(), }) } else { - glog.V(2).Infof("node %v hasn't been updated for %+v. Last out of disk condition is: %+v", + glog.V(4).Infof("node %v hasn't been updated for %+v. Last out of disk condition is: %+v", node.Name, nc.now().Time.Sub(savedNodeStatus.probeTimestamp.Time), oodCondition) if oodCondition.Status != api.ConditionUnknown { oodCondition.Status = api.ConditionUnknown @@ -766,27 +842,42 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap } } - if !api.Semantic.DeepEqual(nc.getCondition(&node.Status, api.NodeReady), &lastReadyCondition) { + _, currentCondition := api.GetNodeCondition(&node.Status, api.NodeReady) + if !api.Semantic.DeepEqual(currentCondition, &observedReadyCondition) { if _, err = nc.kubeClient.Core().Nodes().UpdateStatus(node); err != nil { glog.Errorf("Error updating node %s: %v", node.Name, err) - return gracePeriod, lastReadyCondition, readyCondition, err + return gracePeriod, observedReadyCondition, currentReadyCondition, err } else { nc.nodeStatusMap[node.Name] = nodeStatusData{ status: node.Status, probeTimestamp: nc.nodeStatusMap[node.Name].probeTimestamp, readyTransitionTimestamp: nc.now(), } - return gracePeriod, lastReadyCondition, readyCondition, nil + return gracePeriod, observedReadyCondition, currentReadyCondition, nil } } } - return gracePeriod, lastReadyCondition, readyCondition, err + return gracePeriod, observedReadyCondition, currentReadyCondition, err +} + +// forceUpdateAllProbeTimes bumps all observed timestamps in saved nodeStatuses to now. This makes +// all eviction timer to reset. +func (nc *NodeController) forceUpdateAllProbeTimes() { + now := nc.now() + for k, v := range nc.nodeStatusMap { + v.probeTimestamp = now + v.readyTransitionTimestamp = now + nc.nodeStatusMap[k] = v + } } // evictPods queues an eviction for the provided node name, and returns false if the node is already // queued for eviction. func (nc *NodeController) evictPods(nodeName string) bool { + if nc.networkSegmentationMode { + return false + } nc.evictorLock.Lock() defer nc.evictorLock.Unlock() return nc.podEvictor.Add(nodeName) @@ -806,6 +897,15 @@ func (nc *NodeController) cancelPodEviction(nodeName string) bool { return false } +// stopAllPodEvictions removes any queued evictions for all Nodes. +func (nc *NodeController) stopAllPodEvictions() { + nc.evictorLock.Lock() + defer nc.evictorLock.Unlock() + glog.V(3).Infof("Cancelling all pod evictions.") + nc.podEvictor.Clear() + nc.terminationEvictor.Clear() +} + // deletePods will delete all pods from master running on given node, and return true // if any pods were deleted. func (nc *NodeController) deletePods(nodeName string) (bool, error) { @@ -869,7 +969,7 @@ func (nc *NodeController) markAllPodsNotReady(nodeName string) error { glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name) pod, err := nc.kubeClient.Core().Pods(pod.Namespace).UpdateStatus(&pod) if err != nil { - glog.Warningf("Failed to updated status for pod %q: %v", format.Pod(pod), err) + glog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err) errMsg = append(errMsg, fmt.Sprintf("%v", err)) } break diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/nodecontroller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/nodecontroller_test.go new file mode 100644 index 000000000000..e386004c88e1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/nodecontroller_test.go @@ -0,0 +1,1431 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "errors" + "sync" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + apierrors "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" + "k8s.io/kubernetes/pkg/util/diff" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" +) + +const ( + testNodeMonitorGracePeriod = 40 * time.Second + testNodeStartupGracePeriod = 60 * time.Second + testNodeMonitorPeriod = 5 * time.Second +) + +// FakeNodeHandler is a fake implementation of NodesInterface and NodeInterface. It +// allows test cases to have fine-grained control over mock behaviors. We also need +// PodsInterface and PodInterface to test list & delet pods, which is implemented in +// the embedded client.Fake field. +type FakeNodeHandler struct { + *fake.Clientset + + // Input: Hooks determine if request is valid or not + CreateHook func(*FakeNodeHandler, *api.Node) bool + Existing []*api.Node + + // Output + CreatedNodes []*api.Node + DeletedNodes []*api.Node + UpdatedNodes []*api.Node + UpdatedNodeStatuses []*api.Node + RequestCount int + + // Synchronization + createLock sync.Mutex + deleteWaitChan chan struct{} +} + +type FakeLegacyHandler struct { + unversionedcore.CoreInterface + n *FakeNodeHandler +} + +func (c *FakeNodeHandler) Core() unversionedcore.CoreInterface { + return &FakeLegacyHandler{c.Clientset.Core(), c} +} + +func (m *FakeLegacyHandler) Nodes() unversionedcore.NodeInterface { + return m.n +} + +func (m *FakeNodeHandler) Create(node *api.Node) (*api.Node, error) { + m.createLock.Lock() + defer func() { + m.RequestCount++ + m.createLock.Unlock() + }() + for _, n := range m.Existing { + if n.Name == node.Name { + return nil, apierrors.NewAlreadyExists(api.Resource("nodes"), node.Name) + } + } + if m.CreateHook == nil || m.CreateHook(m, node) { + nodeCopy := *node + m.CreatedNodes = append(m.CreatedNodes, &nodeCopy) + return node, nil + } else { + return nil, errors.New("Create error.") + } +} + +func (m *FakeNodeHandler) Get(name string) (*api.Node, error) { + return nil, nil +} + +func (m *FakeNodeHandler) List(opts api.ListOptions) (*api.NodeList, error) { + defer func() { m.RequestCount++ }() + var nodes []*api.Node + for i := 0; i < len(m.UpdatedNodes); i++ { + if !contains(m.UpdatedNodes[i], m.DeletedNodes) { + nodes = append(nodes, m.UpdatedNodes[i]) + } + } + for i := 0; i < len(m.Existing); i++ { + if !contains(m.Existing[i], m.DeletedNodes) && !contains(m.Existing[i], nodes) { + nodes = append(nodes, m.Existing[i]) + } + } + for i := 0; i < len(m.CreatedNodes); i++ { + if !contains(m.Existing[i], m.DeletedNodes) && !contains(m.CreatedNodes[i], nodes) { + nodes = append(nodes, m.CreatedNodes[i]) + } + } + nodeList := &api.NodeList{} + for _, node := range nodes { + nodeList.Items = append(nodeList.Items, *node) + } + return nodeList, nil +} + +func (m *FakeNodeHandler) Delete(id string, opt *api.DeleteOptions) error { + defer func() { + if m.deleteWaitChan != nil { + m.deleteWaitChan <- struct{}{} + } + }() + m.DeletedNodes = append(m.DeletedNodes, newNode(id)) + m.RequestCount++ + return nil +} + +func (m *FakeNodeHandler) DeleteCollection(opt *api.DeleteOptions, listOpts api.ListOptions) error { + return nil +} + +func (m *FakeNodeHandler) Update(node *api.Node) (*api.Node, error) { + nodeCopy := *node + m.UpdatedNodes = append(m.UpdatedNodes, &nodeCopy) + m.RequestCount++ + return node, nil +} + +func (m *FakeNodeHandler) UpdateStatus(node *api.Node) (*api.Node, error) { + nodeCopy := *node + m.UpdatedNodeStatuses = append(m.UpdatedNodeStatuses, &nodeCopy) + m.RequestCount++ + return node, nil +} + +func (m *FakeNodeHandler) Watch(opts api.ListOptions) (watch.Interface, error) { + return nil, nil +} + +func TestMonitorNodeStatusEvictPods(t *testing.T) { + fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) + evictionTimeout := 10 * time.Minute + + // Because of the logic that prevents NC from evicting anything when all Nodes are NotReady + // we need second healthy node in tests. Because of how the tests are written we need to update + // the status of this Node. + healthyNodeNewStatus := api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + // Node status has just been updated, and is NotReady for 10min. + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 9, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + } + + table := []struct { + fakeNodeHandler *FakeNodeHandler + daemonSets []extensions.DaemonSet + timeToPass time.Duration + newNodeStatus api.NodeStatus + secondNodeNewStatus api.NodeStatus + expectedEvictPods bool + description string + }{ + // Node created recently, with no status (happens only at cluster startup). + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: fakeNow, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "node1", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + daemonSets: nil, + timeToPass: 0, + newNodeStatus: api.NodeStatus{}, + secondNodeNewStatus: healthyNodeNewStatus, + expectedEvictPods: false, + description: "Node created recently, with no status.", + }, + // Node created long time ago, and kubelet posted NotReady for a short period of time. + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionFalse, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "node1", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + daemonSets: nil, + timeToPass: evictionTimeout, + newNodeStatus: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionFalse, + // Node status has just been updated, and is NotReady for 10min. + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 9, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + secondNodeNewStatus: healthyNodeNewStatus, + expectedEvictPods: false, + description: "Node created long time ago, and kubelet posted NotReady for a short period of time.", + }, + // Pod is ds-managed, and kubelet posted NotReady for a long period of time. + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionFalse, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "node1", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset( + &api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + Name: "pod0", + Namespace: "default", + Labels: map[string]string{"daemon": "yes"}, + }, + Spec: api.PodSpec{ + NodeName: "node0", + }, + }, + }, + }, + ), + }, + daemonSets: []extensions.DaemonSet{ + { + ObjectMeta: api.ObjectMeta{ + Name: "ds0", + Namespace: "default", + }, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"daemon": "yes"}, + }, + }, + }, + }, + timeToPass: time.Hour, + newNodeStatus: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionFalse, + // Node status has just been updated, and is NotReady for 1hr. + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 59, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + secondNodeNewStatus: healthyNodeNewStatus, + expectedEvictPods: false, + description: "Pod is ds-managed, and kubelet posted NotReady for a long period of time.", + }, + // Node created long time ago, and kubelet posted NotReady for a long period of time. + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionFalse, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "node1", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + daemonSets: nil, + timeToPass: time.Hour, + newNodeStatus: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionFalse, + // Node status has just been updated, and is NotReady for 1hr. + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 59, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + secondNodeNewStatus: healthyNodeNewStatus, + expectedEvictPods: true, + description: "Node created long time ago, and kubelet posted NotReady for a long period of time.", + }, + // Node created long time ago, node controller posted Unknown for a short period of time. + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionUnknown, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "node1", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + daemonSets: nil, + timeToPass: evictionTimeout - testNodeMonitorGracePeriod, + newNodeStatus: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionUnknown, + // Node status was updated by nodecontroller 10min ago + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + secondNodeNewStatus: healthyNodeNewStatus, + expectedEvictPods: false, + description: "Node created long time ago, node controller posted Unknown for a short period of time.", + }, + // Node created long time ago, node controller posted Unknown for a long period of time. + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionUnknown, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "node1", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + daemonSets: nil, + timeToPass: 60 * time.Minute, + newNodeStatus: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionUnknown, + // Node status was updated by nodecontroller 1hr ago + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + secondNodeNewStatus: healthyNodeNewStatus, + expectedEvictPods: true, + description: "Node created long time ago, node controller posted Unknown for a long period of time.", + }, + // NetworkSegmentation: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes. + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionUnknown, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "node1", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionUnknown, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + daemonSets: nil, + timeToPass: 60 * time.Minute, + newNodeStatus: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionUnknown, + // Node status was updated by nodecontroller 1hr ago + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + secondNodeNewStatus: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionUnknown, + // Node status was updated by nodecontroller 1hr ago + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + expectedEvictPods: false, + description: "Network Segmentation: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes.", + }, + // NetworkSegmentation: Node created long time ago, node controller posted Unknown for a long period + // of on first Node, eviction should stop even though -master Node is healthy. + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionUnknown, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "node-master", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + daemonSets: nil, + timeToPass: 60 * time.Minute, + newNodeStatus: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionUnknown, + // Node status was updated by nodecontroller 1hr ago + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + secondNodeNewStatus: healthyNodeNewStatus, + expectedEvictPods: false, + description: "NetworkSegmentation: Node created long time ago, node controller posted Unknown for a long period of on first Node, eviction should stop even though -master Node is healthy", + }, + } + + for _, item := range table { + nodeController := NewNodeController(nil, item.fakeNodeHandler, + evictionTimeout, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, + testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) + nodeController.now = func() unversioned.Time { return fakeNow } + for _, ds := range item.daemonSets { + nodeController.daemonSetStore.Add(&ds) + } + if err := nodeController.monitorNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + if item.timeToPass > 0 { + nodeController.now = func() unversioned.Time { return unversioned.Time{Time: fakeNow.Add(item.timeToPass)} } + item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus + item.fakeNodeHandler.Existing[1].Status = item.secondNodeNewStatus + } + if err := nodeController.monitorNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + + nodeController.podEvictor.Try(func(value TimedValue) (bool, time.Duration) { + remaining, _ := nodeController.deletePods(value.Value) + if remaining { + nodeController.terminationEvictor.Add(value.Value) + } + return true, 0 + }) + nodeController.podEvictor.Try(func(value TimedValue) (bool, time.Duration) { + nodeController.terminatePods(value.Value, value.AddedAt) + return true, 0 + }) + podEvicted := false + for _, action := range item.fakeNodeHandler.Actions() { + if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" { + podEvicted = true + } + } + + if item.expectedEvictPods != podEvicted { + t.Errorf("expected pod eviction: %+v, got %+v for %+v", item.expectedEvictPods, + podEvicted, item.description) + } + } +} + +// TestCloudProviderNoRateLimit tests that monitorNodes() immediately deletes +// pods and the node when kubelet has not reported, and the cloudprovider says +// the node is gone. +func TestCloudProviderNoRateLimit(t *testing.T) { + fnh := &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionUnknown, + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0"), *newPod("pod1", "node0")}}), + deleteWaitChan: make(chan struct{}), + } + nodeController := NewNodeController(nil, fnh, 10*time.Minute, + flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(), + testNodeMonitorGracePeriod, testNodeStartupGracePeriod, + testNodeMonitorPeriod, nil, nil, 0, false) + nodeController.cloud = &fakecloud.FakeCloud{} + nodeController.now = func() unversioned.Time { return unversioned.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) } + nodeController.nodeExistsInCloudProvider = func(nodeName string) (bool, error) { + return false, nil + } + // monitorNodeStatus should allow this node to be immediately deleted + if err := nodeController.monitorNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + select { + case <-fnh.deleteWaitChan: + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Timed out waiting %v for node to be deleted", wait.ForeverTestTimeout) + } + if len(fnh.DeletedNodes) != 1 || fnh.DeletedNodes[0].Name != "node0" { + t.Errorf("Node was not deleted") + } + if nodeOnQueue := nodeController.podEvictor.Remove("node0"); nodeOnQueue { + t.Errorf("Node was queued for eviction. Should have been immediately deleted.") + } +} + +func TestMonitorNodeStatusUpdateStatus(t *testing.T) { + fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) + table := []struct { + fakeNodeHandler *FakeNodeHandler + timeToPass time.Duration + newNodeStatus api.NodeStatus + expectedEvictPods bool + expectedRequestCount int + expectedNodes []*api.Node + }{ + // Node created long time ago, without status: + // Expect Unknown status posted from node controller. + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + expectedRequestCount: 2, // List+Update + expectedNodes: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionUnknown, + Reason: "NodeStatusNeverUpdated", + Message: "Kubelet never posted node status.", + LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + LastTransitionTime: fakeNow, + }, + { + Type: api.NodeOutOfDisk, + Status: api.ConditionUnknown, + Reason: "NodeStatusNeverUpdated", + Message: "Kubelet never posted node status.", + LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + LastTransitionTime: fakeNow, + }, + }, + }, + }, + }, + }, + // Node created recently, without status. + // Expect no action from node controller (within startup grace period). + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: fakeNow, + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + expectedRequestCount: 1, // List + expectedNodes: nil, + }, + // Node created long time ago, with status updated by kubelet exceeds grace period. + // Expect Unknown status posted from node controller. + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + // Node status hasn't been updated for 1hr. + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + { + Type: api.NodeOutOfDisk, + Status: api.ConditionFalse, + // Node status hasn't been updated for 1hr. + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "node0", + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + expectedRequestCount: 3, // (List+)List+Update + timeToPass: time.Hour, + newNodeStatus: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + // Node status hasn't been updated for 1hr. + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + { + Type: api.NodeOutOfDisk, + Status: api.ConditionFalse, + // Node status hasn't been updated for 1hr. + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + expectedNodes: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionUnknown, + Reason: "NodeStatusUnknown", + Message: "Kubelet stopped posting node status.", + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Time{Time: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)}, + }, + { + Type: api.NodeOutOfDisk, + Status: api.ConditionUnknown, + Reason: "NodeStatusUnknown", + Message: "Kubelet stopped posting node status.", + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Time{Time: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)}, + }, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "node0", + }, + }, + }, + }, + // Node created long time ago, with status updated recently. + // Expect no action from node controller (within monitor grace period). + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + // Node status has just been updated. + LastHeartbeatTime: fakeNow, + LastTransitionTime: fakeNow, + }, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "node0", + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + expectedRequestCount: 1, // List + expectedNodes: nil, + }, + } + + for i, item := range table { + nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), + flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) + nodeController.now = func() unversioned.Time { return fakeNow } + if err := nodeController.monitorNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + if item.timeToPass > 0 { + nodeController.now = func() unversioned.Time { return unversioned.Time{Time: fakeNow.Add(item.timeToPass)} } + item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus + if err := nodeController.monitorNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + } + if item.expectedRequestCount != item.fakeNodeHandler.RequestCount { + t.Errorf("expected %v call, but got %v.", item.expectedRequestCount, item.fakeNodeHandler.RequestCount) + } + if len(item.fakeNodeHandler.UpdatedNodes) > 0 && !api.Semantic.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodes) { + t.Errorf("Case[%d] unexpected nodes: %s", i, diff.ObjectDiff(item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodes[0])) + } + if len(item.fakeNodeHandler.UpdatedNodeStatuses) > 0 && !api.Semantic.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodeStatuses) { + t.Errorf("Case[%d] unexpected nodes: %s", i, diff.ObjectDiff(item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodeStatuses[0])) + } + } +} + +func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) { + fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) + table := []struct { + fakeNodeHandler *FakeNodeHandler + timeToPass time.Duration + newNodeStatus api.NodeStatus + expectedPodStatusUpdate bool + }{ + // Node created recently, without status. + // Expect no action from node controller (within startup grace period). + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: fakeNow, + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + expectedPodStatusUpdate: false, + }, + // Node created long time ago, with status updated recently. + // Expect no action from node controller (within monitor grace period). + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + // Node status has just been updated. + LastHeartbeatTime: fakeNow, + LastTransitionTime: fakeNow, + }, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "node0", + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + expectedPodStatusUpdate: false, + }, + // Node created long time ago, with status updated by kubelet exceeds grace period. + // Expect pods status updated and Unknown node status posted from node controller + { + fakeNodeHandler: &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + // Node status hasn't been updated for 1hr. + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + { + Type: api.NodeOutOfDisk, + Status: api.ConditionFalse, + // Node status hasn't been updated for 1hr. + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "node0", + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), + }, + timeToPass: 1 * time.Minute, + newNodeStatus: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + // Node status hasn't been updated for 1hr. + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + { + Type: api.NodeOutOfDisk, + Status: api.ConditionFalse, + // Node status hasn't been updated for 1hr. + LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), + }, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + expectedPodStatusUpdate: true, + }, + } + + for i, item := range table { + nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), + flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) + nodeController.now = func() unversioned.Time { return fakeNow } + if err := nodeController.monitorNodeStatus(); err != nil { + t.Errorf("Case[%d] unexpected error: %v", i, err) + } + if item.timeToPass > 0 { + nodeController.now = func() unversioned.Time { return unversioned.Time{Time: fakeNow.Add(item.timeToPass)} } + item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus + if err := nodeController.monitorNodeStatus(); err != nil { + t.Errorf("Case[%d] unexpected error: %v", i, err) + } + } + + podStatusUpdated := false + for _, action := range item.fakeNodeHandler.Actions() { + if action.GetVerb() == "update" && action.GetResource().Resource == "pods" && action.GetSubresource() == "status" { + podStatusUpdated = true + } + } + if podStatusUpdated != item.expectedPodStatusUpdate { + t.Errorf("Case[%d] expect pod status updated to be %v, but got %v", i, item.expectedPodStatusUpdate, podStatusUpdated) + } + } +} + +func TestNodeDeletion(t *testing.T) { + fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) + fakeNodeHandler := &FakeNodeHandler{ + Existing: []*api.Node{ + { + ObjectMeta: api.ObjectMeta{ + Name: "node0", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + // Node status has just been updated. + LastHeartbeatTime: fakeNow, + LastTransitionTime: fakeNow, + }, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "node0", + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "node1", + CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + // Node status has just been updated. + LastHeartbeatTime: fakeNow, + LastTransitionTime: fakeNow, + }, + }, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "node0", + }, + }, + }, + Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0"), *newPod("pod1", "node1")}}), + } + + nodeController := NewNodeController(nil, fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(), + testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) + nodeController.now = func() unversioned.Time { return fakeNow } + if err := nodeController.monitorNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + fakeNodeHandler.Delete("node1", nil) + if err := nodeController.monitorNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + nodeController.podEvictor.Try(func(value TimedValue) (bool, time.Duration) { + nodeController.deletePods(value.Value) + return true, 0 + }) + podEvicted := false + for _, action := range fakeNodeHandler.Actions() { + if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" { + podEvicted = true + } + } + if !podEvicted { + t.Error("expected pods to be evicted from the deleted node") + } +} + +func TestCheckPod(t *testing.T) { + + tcs := []struct { + pod api.Pod + prune bool + }{ + + { + pod: api.Pod{ + ObjectMeta: api.ObjectMeta{DeletionTimestamp: nil}, + Spec: api.PodSpec{NodeName: "new"}, + }, + prune: false, + }, + { + pod: api.Pod{ + ObjectMeta: api.ObjectMeta{DeletionTimestamp: nil}, + Spec: api.PodSpec{NodeName: "old"}, + }, + prune: false, + }, + { + pod: api.Pod{ + ObjectMeta: api.ObjectMeta{DeletionTimestamp: nil}, + Spec: api.PodSpec{NodeName: ""}, + }, + prune: false, + }, + { + pod: api.Pod{ + ObjectMeta: api.ObjectMeta{DeletionTimestamp: nil}, + Spec: api.PodSpec{NodeName: "nonexistant"}, + }, + prune: false, + }, + { + pod: api.Pod{ + ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}}, + Spec: api.PodSpec{NodeName: "new"}, + }, + prune: false, + }, + { + pod: api.Pod{ + ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}}, + Spec: api.PodSpec{NodeName: "old"}, + }, + prune: true, + }, + { + pod: api.Pod{ + ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}}, + Spec: api.PodSpec{NodeName: "older"}, + }, + prune: true, + }, + { + pod: api.Pod{ + ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}}, + Spec: api.PodSpec{NodeName: "oldest"}, + }, + prune: true, + }, + { + pod: api.Pod{ + ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}}, + Spec: api.PodSpec{NodeName: ""}, + }, + prune: true, + }, + { + pod: api.Pod{ + ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}}, + Spec: api.PodSpec{NodeName: "nonexistant"}, + }, + prune: true, + }, + } + + nc := NewNodeController(nil, nil, 0, nil, nil, 0, 0, 0, nil, nil, 0, false) + nc.nodeStore.Store = cache.NewStore(cache.MetaNamespaceKeyFunc) + nc.nodeStore.Store.Add(&api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "new", + }, + Status: api.NodeStatus{ + NodeInfo: api.NodeSystemInfo{ + KubeletVersion: "v1.1.0", + }, + }, + }) + nc.nodeStore.Store.Add(&api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "old", + }, + Status: api.NodeStatus{ + NodeInfo: api.NodeSystemInfo{ + KubeletVersion: "v1.0.0", + }, + }, + }) + nc.nodeStore.Store.Add(&api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "older", + }, + Status: api.NodeStatus{ + NodeInfo: api.NodeSystemInfo{ + KubeletVersion: "v0.21.4", + }, + }, + }) + nc.nodeStore.Store.Add(&api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "oldest", + }, + Status: api.NodeStatus{ + NodeInfo: api.NodeSystemInfo{ + KubeletVersion: "v0.19.3", + }, + }, + }) + + for i, tc := range tcs { + var deleteCalls int + nc.forcefullyDeletePod = func(_ *api.Pod) error { + deleteCalls++ + return nil + } + + nc.maybeDeleteTerminatingPod(&tc.pod) + + if tc.prune && deleteCalls != 1 { + t.Errorf("[%v] expected number of delete calls to be 1 but got %v", i, deleteCalls) + } + if !tc.prune && deleteCalls != 0 { + t.Errorf("[%v] expected number of delete calls to be 0 but got %v", i, deleteCalls) + } + } +} + +func TestCleanupOrphanedPods(t *testing.T) { + newPod := func(name, node string) api.Pod { + return api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: name, + }, + Spec: api.PodSpec{ + NodeName: node, + }, + } + } + pods := []api.Pod{ + newPod("a", "foo"), + newPod("b", "bar"), + newPod("c", "gone"), + } + nc := NewNodeController(nil, nil, 0, nil, nil, 0, 0, 0, nil, nil, 0, false) + + nc.nodeStore.Store.Add(newNode("foo")) + nc.nodeStore.Store.Add(newNode("bar")) + for _, pod := range pods { + p := pod + nc.podStore.Indexer.Add(&p) + } + + var deleteCalls int + var deletedPodName string + nc.forcefullyDeletePod = func(p *api.Pod) error { + deleteCalls++ + deletedPodName = p.ObjectMeta.Name + return nil + } + nc.cleanupOrphanedPods() + + if deleteCalls != 1 { + t.Fatalf("expected one delete, got: %v", deleteCalls) + } + if deletedPodName != "c" { + t.Fatalf("expected deleted pod name to be 'c', but got: %q", deletedPodName) + } +} + +func newNode(name string) *api.Node { + return &api.Node{ + ObjectMeta: api.ObjectMeta{Name: name}, + Spec: api.NodeSpec{ + ExternalID: name, + }, + Status: api.NodeStatus{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + } +} + +func newPod(name, host string) *api.Pod { + return &api.Pod{ObjectMeta: api.ObjectMeta{Name: name}, Spec: api.PodSpec{NodeName: host}, + Status: api.PodStatus{Conditions: []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}}}} +} + +func contains(node *api.Node, nodes []*api.Node) bool { + for i := 0; i < len(nodes); i++ { + if node.Name == nodes[i].Name { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue.go index e934d4b4ae0b..a2865418fb7f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue.go @@ -22,7 +22,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/sets" ) @@ -133,15 +133,27 @@ func (q *UniqueQueue) Head() (TimedValue, bool) { return *result, true } +// Clear removes all items from the queue and duplication preventing set. +func (q *UniqueQueue) Clear() { + q.lock.Lock() + defer q.lock.Unlock() + if q.queue.Len() > 0 { + q.queue = make(TimedQueue, 0) + } + if len(q.set) > 0 { + q.set = sets.NewString() + } +} + // RateLimitedTimedQueue is a unique item priority queue ordered by the expected next time // of execution. It is also rate limited. type RateLimitedTimedQueue struct { queue UniqueQueue - limiter util.RateLimiter + limiter flowcontrol.RateLimiter } // Creates new queue which will use given RateLimiter to oversee execution. -func NewRateLimitedTimedQueue(limiter util.RateLimiter) *RateLimitedTimedQueue { +func NewRateLimitedTimedQueue(limiter flowcontrol.RateLimiter) *RateLimitedTimedQueue { return &RateLimitedTimedQueue{ queue: UniqueQueue{ queue: TimedQueue{}, @@ -164,7 +176,7 @@ func (q *RateLimitedTimedQueue) Try(fn ActionFunc) { for ok { // rate limit the queue checking if !q.limiter.TryAccept() { - glog.V(10).Info("Try rate limitted...") + glog.V(10).Info("Try rate limited...") // Try again later break } @@ -199,3 +211,8 @@ func (q *RateLimitedTimedQueue) Add(value string) bool { func (q *RateLimitedTimedQueue) Remove(value string) bool { return q.queue.Remove(value) } + +// Removes all items from the queue +func (q *RateLimitedTimedQueue) Clear() { + q.queue.Clear() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue_test.go new file mode 100644 index 000000000000..fded671abd06 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/node/rate_limited_queue_test.go @@ -0,0 +1,283 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "reflect" + "testing" + "time" + + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/util/sets" +) + +func CheckQueueEq(lhs []string, rhs TimedQueue) bool { + for i := 0; i < len(lhs); i++ { + if rhs[i].Value != lhs[i] { + return false + } + } + return true +} + +func CheckSetEq(lhs, rhs sets.String) bool { + return lhs.HasAll(rhs.List()...) && rhs.HasAll(lhs.List()...) +} + +func TestAddNode(t *testing.T) { + evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter()) + evictor.Add("first") + evictor.Add("second") + evictor.Add("third") + + queuePattern := []string{"first", "second", "third"} + if len(evictor.queue.queue) != len(queuePattern) { + t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern)) + } + if !CheckQueueEq(queuePattern, evictor.queue.queue) { + t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern) + } + + setPattern := sets.NewString("first", "second", "third") + if len(evictor.queue.set) != len(setPattern) { + t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern)) + } + if !CheckSetEq(setPattern, evictor.queue.set) { + t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern) + } +} + +func TestDelNode(t *testing.T) { + defer func() { now = time.Now }() + var tick int64 + now = func() time.Time { + t := time.Unix(tick, 0) + tick++ + return t + } + evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter()) + evictor.Add("first") + evictor.Add("second") + evictor.Add("third") + evictor.Remove("first") + + queuePattern := []string{"second", "third"} + if len(evictor.queue.queue) != len(queuePattern) { + t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern)) + } + if !CheckQueueEq(queuePattern, evictor.queue.queue) { + t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern) + } + + setPattern := sets.NewString("second", "third") + if len(evictor.queue.set) != len(setPattern) { + t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern)) + } + if !CheckSetEq(setPattern, evictor.queue.set) { + t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern) + } + + evictor = NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter()) + evictor.Add("first") + evictor.Add("second") + evictor.Add("third") + evictor.Remove("second") + + queuePattern = []string{"first", "third"} + if len(evictor.queue.queue) != len(queuePattern) { + t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern)) + } + if !CheckQueueEq(queuePattern, evictor.queue.queue) { + t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern) + } + + setPattern = sets.NewString("first", "third") + if len(evictor.queue.set) != len(setPattern) { + t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern)) + } + if !CheckSetEq(setPattern, evictor.queue.set) { + t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern) + } + + evictor = NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter()) + evictor.Add("first") + evictor.Add("second") + evictor.Add("third") + evictor.Remove("third") + + queuePattern = []string{"first", "second"} + if len(evictor.queue.queue) != len(queuePattern) { + t.Fatalf("Queue %v should have length %d", evictor.queue.queue, len(queuePattern)) + } + if !CheckQueueEq(queuePattern, evictor.queue.queue) { + t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern) + } + + setPattern = sets.NewString("first", "second") + if len(evictor.queue.set) != len(setPattern) { + t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern)) + } + if !CheckSetEq(setPattern, evictor.queue.set) { + t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern) + } +} + +func TestTry(t *testing.T) { + evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter()) + evictor.Add("first") + evictor.Add("second") + evictor.Add("third") + evictor.Remove("second") + + deletedMap := sets.NewString() + evictor.Try(func(value TimedValue) (bool, time.Duration) { + deletedMap.Insert(value.Value) + return true, 0 + }) + + setPattern := sets.NewString("first", "third") + if len(deletedMap) != len(setPattern) { + t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern)) + } + if !CheckSetEq(setPattern, deletedMap) { + t.Errorf("Invalid map. Got %v, expected %v", deletedMap, setPattern) + } +} + +func TestTryOrdering(t *testing.T) { + defer func() { now = time.Now }() + current := time.Unix(0, 0) + delay := 0 + // the current time is incremented by 1ms every time now is invoked + now = func() time.Time { + if delay > 0 { + delay-- + } else { + current = current.Add(time.Millisecond) + } + t.Logf("time %d", current.UnixNano()) + return current + } + evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter()) + evictor.Add("first") + evictor.Add("second") + evictor.Add("third") + + order := []string{} + count := 0 + hasQueued := false + evictor.Try(func(value TimedValue) (bool, time.Duration) { + count++ + t.Logf("eviction %d", count) + if value.ProcessAt.IsZero() { + t.Fatalf("processAt should not be zero") + } + switch value.Value { + case "first": + if !value.AddedAt.Equal(time.Unix(0, time.Millisecond.Nanoseconds())) { + t.Fatalf("added time for %s is %d", value.Value, value.AddedAt) + } + + case "second": + if !value.AddedAt.Equal(time.Unix(0, 2*time.Millisecond.Nanoseconds())) { + t.Fatalf("added time for %s is %d", value.Value, value.AddedAt) + } + if hasQueued { + if !value.ProcessAt.Equal(time.Unix(0, 6*time.Millisecond.Nanoseconds())) { + t.Fatalf("process time for %s is %d", value.Value, value.ProcessAt) + } + break + } + hasQueued = true + delay = 1 + t.Logf("going to delay") + return false, 2 * time.Millisecond + + case "third": + if !value.AddedAt.Equal(time.Unix(0, 3*time.Millisecond.Nanoseconds())) { + t.Fatalf("added time for %s is %d", value.Value, value.AddedAt) + } + } + order = append(order, value.Value) + return true, 0 + }) + if !reflect.DeepEqual(order, []string{"first", "third"}) { + t.Fatalf("order was wrong: %v", order) + } + if count != 3 { + t.Fatalf("unexpected iterations: %d", count) + } +} + +func TestTryRemovingWhileTry(t *testing.T) { + evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter()) + evictor.Add("first") + evictor.Add("second") + evictor.Add("third") + + processing := make(chan struct{}) + wait := make(chan struct{}) + order := []string{} + count := 0 + queued := false + + // while the Try function is processing "second", remove it from the queue + // we should not see "second" retried. + go func() { + <-processing + evictor.Remove("second") + close(wait) + }() + + evictor.Try(func(value TimedValue) (bool, time.Duration) { + count++ + if value.AddedAt.IsZero() { + t.Fatalf("added should not be zero") + } + if value.ProcessAt.IsZero() { + t.Fatalf("next should not be zero") + } + if !queued && value.Value == "second" { + queued = true + close(processing) + <-wait + return false, time.Millisecond + } + order = append(order, value.Value) + return true, 0 + }) + + if !reflect.DeepEqual(order, []string{"first", "third"}) { + t.Fatalf("order was wrong: %v", order) + } + if count != 3 { + t.Fatalf("unexpected iterations: %d", count) + } +} + +func TestClear(t *testing.T) { + evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter()) + evictor.Add("first") + evictor.Add("second") + evictor.Add("third") + + evictor.Clear() + + if len(evictor.queue.queue) != 0 { + t.Fatalf("Clear should remove all elements from the queue.") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/binder_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/binder_test.go new file mode 100644 index 000000000000..b981c17d9199 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/binder_test.go @@ -0,0 +1,426 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +// Test single call to syncClaim and syncVolume methods. +// 1. Fill in the controller with initial data +// 2. Call the tested function (syncClaim/syncVolume) via +// controllerTest.testCall *once*. +// 3. Compare resulting volumes and claims with expected volumes and claims. +func TestSync(t *testing.T) { + tests := []controllerTest{ + // [Unit test set 1] User did not care which PV they get. + // Test the matching with no claim.Spec.VolumeName and with various + // volumes. + { + // syncClaim binds to a matching unbound volume. + "1-1 - successful bind", + newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", api.ClaimBound, annBoundByController, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim does not do anything when there is no matching volume. + "1-2 - noop", + newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending), + newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim resets claim.Status to Pending when there is no + // matching volume. + "1-3 - reset to Pending", + newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimBound), + newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimPending), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim binds claims to the smallest matching volume + "1-4 - smallest volume", + []*api.PersistentVolume{ + newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolume("volume1-4_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + }, + []*api.PersistentVolume{ + newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + }, + newClaimArray("claim1-4", "uid1-4", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-4", "uid1-4", "1Gi", "volume1-4_2", api.ClaimBound, annBoundByController, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim binds a claim only to volume that points to it (by + // name), even though a smaller one is available. + "1-5 - prebound volume by name - success", + []*api.PersistentVolume{ + newVolume("volume1-5_1", "10Gi", "", "claim1-5", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + }, + []*api.PersistentVolume{ + newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + }, + newClaimArray("claim1-5", "uid1-5", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", api.ClaimBound, annBoundByController, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim binds a claim only to volume that points to it (by + // UID), even though a smaller one is available. + "1-6 - prebound volume by UID - success", + []*api.PersistentVolume{ + newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + }, + []*api.PersistentVolume{ + newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + }, + newClaimArray("claim1-6", "uid1-6", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", api.ClaimBound, annBoundByController, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim does not bind claim to a volume prebound to a claim with + // same name and different UID + "1-7 - prebound volume to different claim", + newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending, api.PersistentVolumeReclaimRetain), + newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim completes binding - simulates controller crash after + // PV.ClaimRef is saved + "1-8 - complete bind after crash - PV bound", + newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumePending, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newClaimArray("claim1-8", "uid1-8", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", api.ClaimBound, annBoundByController, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim completes binding - simulates controller crash after + // PV.Status is saved + "1-9 - complete bind after crash - PV status saved", + newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newClaimArray("claim1-9", "uid1-9", "1Gi", "", api.ClaimPending), + newClaimArray("claim1-9", "uid1-9", "1Gi", "volume1-9", api.ClaimBound, annBoundByController, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim completes binding - simulates controller crash after + // PVC.VolumeName is saved + "10 - complete bind after crash - PVC bound", + newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimPending, annBoundByController, annBindCompleted), + newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimBound, annBoundByController, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + // [Unit test set 2] User asked for a specific PV. + // Test the binding when pv.ClaimRef is already set by controller or + // by user. + { + // syncClaim with claim pre-bound to a PV that does not exist + "2-1 - claim prebound to non-existing volume - noop", + novolumes, + novolumes, + newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending), + newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim with claim pre-bound to a PV that does not exist. + // Check that the claim status is reset to Pending + "2-2 - claim prebound to non-existing volume - reset status", + novolumes, + novolumes, + newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimBound), + newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimPending), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim with claim pre-bound to a PV that exists and is + // unbound. Check it gets bound and no annBoundByController is set. + "2-3 - claim prebound to unbound volume", + newVolumeArray("volume2-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimPending), + newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimBound, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + { + // claim with claim pre-bound to a PV that is pre-bound to the claim + // by name. Check it gets bound and no annBoundByController is set. + "2-4 - claim prebound to prebound volume by name", + newVolumeArray("volume2-4", "1Gi", "", "claim2-4", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimPending), + newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimBound, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim with claim pre-bound to a PV that is pre-bound to the + // claim by UID. Check it gets bound and no annBoundByController is + // set. + "2-5 - claim prebound to prebound volume by UID", + newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimPending), + newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimBound, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim with claim pre-bound to a PV that is bound to different + // claim. Check it's reset to Pending. + "2-6 - claim prebound to already bound volume", + newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimBound), + newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimPending), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim with claim bound by controller to a PV that is bound to + // different claim. Check it throws an error. + "2-7 - claim bound by controller to already bound volume", + newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController), + newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController), + noevents, noerrors, testSyncClaimError, + }, + // [Unit test set 3] Syncing bound claim + { + // syncClaim with claim bound and its claim.Spec.VolumeName is + // removed. Check it's marked as Lost. + "3-1 - bound claim with missing VolumeName", + novolumes, + novolumes, + newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimBound, annBoundByController, annBindCompleted), + newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimLost, annBoundByController, annBindCompleted), + []string{"Warning ClaimLost"}, noerrors, testSyncClaim, + }, + { + // syncClaim with claim bound to non-exising volume. Check it's + // marked as Lost. + "3-2 - bound claim with missing volume", + novolumes, + novolumes, + newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimBound, annBoundByController, annBindCompleted), + newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimLost, annBoundByController, annBindCompleted), + []string{"Warning ClaimLost"}, noerrors, testSyncClaim, + }, + { + // syncClaim with claim bound to unbound volume. Check it's bound. + // Also check that Pending phase is set to Bound + "3-3 - bound claim with unbound volume", + newVolumeArray("volume3-3", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimPending, annBoundByController, annBindCompleted), + newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimBound, annBoundByController, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim with claim bound to volume with missing (or different) + // volume.Spec.ClaimRef.UID. Check that the claim is marked as lost. + "3-4 - bound claim with prebound volume", + newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending, api.PersistentVolumeReclaimRetain), + newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimPending, annBoundByController, annBindCompleted), + newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimLost, annBoundByController, annBindCompleted), + []string{"Warning ClaimMisbound"}, noerrors, testSyncClaim, + }, + { + // syncClaim with claim bound to bound volume. Check that the + // controller does not do anything. Also check that Pending phase is + // set to Bound + "3-5 - bound claim with bound volume", + newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimPending, annBindCompleted), + newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimBound, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + { + // syncClaim with claim bound to a volume that is bound to different + // claim. Check that the claim is marked as lost. + // TODO: test that an event is emitted + "3-6 - bound claim with bound volume", + newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending, api.PersistentVolumeReclaimRetain), + newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimPending, annBindCompleted), + newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimLost, annBindCompleted), + []string{"Warning ClaimMisbound"}, noerrors, testSyncClaim, + }, + // [Unit test set 4] All syncVolume tests. + { + // syncVolume with pending volume. Check it's marked as Available. + "4-1 - pending volume", + newVolumeArray("volume4-1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-1", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), + noclaims, + noclaims, + noevents, noerrors, testSyncVolume, + }, + { + // syncVolume with prebound pending volume. Check it's marked as + // Available. + "4-2 - pending prebound volume", + newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), + noclaims, + noclaims, + noevents, noerrors, testSyncVolume, + }, + { + // syncVolume with volume bound to missing claim. + // Check the volume gets Released + "4-3 - bound volume with missing claim", + newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain), + noclaims, + noclaims, + noevents, noerrors, testSyncVolume, + }, + { + // syncVolume with volume bound to claim with different UID. + // Check the volume gets Released. + "4-4 - volume bound to claim with different UID", + newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeReleased, api.PersistentVolumeReclaimRetain), + newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted), + newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted), + noevents, noerrors, testSyncVolume, + }, + { + // syncVolume with volume bound by controller to unbound claim. + // Check syncVolume does not do anything. + "4-5 - volume bound by controller to unbound claim", + newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), + newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), + noevents, noerrors, testSyncVolume, + }, + { + // syncVolume with volume bound by user to unbound claim. + // Check syncVolume does not do anything. + "4-5 - volume bound by user to bound claim", + newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), + newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), + noevents, noerrors, testSyncVolume, + }, + { + // syncVolume with volume bound to bound claim. + // Check that the volume is marked as Bound. + "4-6 - volume bound by to bound claim", + newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound), + newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound), + noevents, noerrors, testSyncVolume, + }, + { + // syncVolume with volume bound by controller to claim bound to + // another volume. Check that the volume is rolled back. + "4-7 - volume bound by controller to claim bound somewhere else", + newVolumeArray("volume4-7", "10Gi", "uid4-7", "claim4-7", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolumeArray("volume4-7", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), + newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound), + newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound), + noevents, noerrors, testSyncVolume, + }, + { + // syncVolume with volume bound by user to claim bound to + // another volume. Check that the volume is marked as Available + // and its UID is reset. + "4-8 - volume bound by user to claim bound somewhere else", + newVolumeArray("volume4-8", "10Gi", "uid4-8", "claim4-8", api.VolumeBound, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume4-8", "10Gi", "", "claim4-8", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), + newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound), + newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound), + noevents, noerrors, testSyncVolume, + }, + } + runSyncTests(t, tests) +} + +// Test multiple calls to syncClaim/syncVolume and periodic sync of all +// volume/claims. The test follows this pattern: +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// Some limit of calls in enforced to prevent endless loops. +func TestMultiSync(t *testing.T) { + tests := []controllerTest{ + // Test simple binding + { + // syncClaim binds to a matching unbound volume. + "10-1 - successful bind", + newVolumeArray("volume10-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume10-1", "1Gi", "uid10-1", "claim10-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newClaimArray("claim10-1", "uid10-1", "1Gi", "", api.ClaimPending), + newClaimArray("claim10-1", "uid10-1", "1Gi", "volume10-1", api.ClaimBound, annBoundByController, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + { + // Two controllers bound two PVs to single claim. Test one of them + // wins and the second rolls back. + "10-2 - bind PV race", + []*api.PersistentVolume{ + newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolume("volume10-2-2", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + }, + []*api.PersistentVolume{ + newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolume("volume10-2-2", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), + }, + newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted), + newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted), + noevents, noerrors, testSyncClaim, + }, + } + + runMultisyncTests(t, tests) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/controller.go new file mode 100644 index 000000000000..f552f773ecd5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/controller.go @@ -0,0 +1,1227 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "fmt" + "sync" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/conversion" + vol "k8s.io/kubernetes/pkg/volume" + + "github.com/golang/glog" +) + +// Design: +// +// The fundamental key to this design is the bi-directional "pointer" between +// PersistentVolumes (PVs) and PersistentVolumeClaims (PVCs), which is +// represented here as pvc.Spec.VolumeName and pv.Spec.ClaimRef. The bi-directionality +// is complicated to manage in a transactionless system, but without it we +// can't ensure sane behavior in the face of different forms of trouble. For +// example, a rogue HA controller instance could end up racing and making +// multiple bindings that are indistinguishable, resulting in potential data +// loss. +// +// This controller is designed to work in active-passive high availability mode. +// It *could* work also in active-active HA mode, all the object transitions are +// designed to cope with this, however performance could be lower as these two +// active controllers will step on each other toes frequently. +// +// This controller supports pre-bound (by the creator) objects in both +// directions: a PVC that wants a specific PV or a PV that is reserved for a +// specific PVC. +// +// The binding is two-step process. PV.Spec.ClaimRef is modified first and +// PVC.Spec.VolumeName second. At any point of this transaction, the PV or PVC +// can be modified by user or other controller or completelly deleted. Also, two +// (or more) controllers may try to bind different volumes to different claims +// at the same time. The controller must recover from any conflicts that may +// arise from these conditions. + +// annBindCompleted annotation applies to PVCs. It indicates that the lifecycle +// of the PVC has passed through the initial setup. This information changes how +// we interpret some observations of the state of the objects. Value of this +// annotation does not matter. +const annBindCompleted = "pv.kubernetes.io/bind-completed" + +// annBoundByController annotation applies to PVs and PVCs. It indicates that +// the binding (PV->PVC or PVC->PV) was installed by the controller. The +// absence of this annotation means the binding was done by the user (i.e. +// pre-bound). Value of this annotation does not matter. +const annBoundByController = "pv.kubernetes.io/bound-by-controller" + +// annClass annotation represents a new field which instructs dynamic +// provisioning to choose a particular storage class (aka profile). +// Value of this annotation should be empty. +const annClass = "volume.alpha.kubernetes.io/storage-class" + +// This annotation is added to a PV that has been dynamically provisioned by +// Kubernetes. It's value is name of volume plugin that created the volume. +// It serves both user (to show where a PV comes from) and Kubernetes (to +// recognize dynamically provisioned PVs in its decissions). +const annDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by" + +// Name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD) +// with namespace of a persistent volume claim used to create this volume. +const cloudVolumeCreatedForClaimNamespaceTag = "kubernetes.io/created-for/pvc/namespace" + +// Name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD) +// with name of a persistent volume claim used to create this volume. +const cloudVolumeCreatedForClaimNameTag = "kubernetes.io/created-for/pvc/name" + +// Name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD) +// with name of appropriate Kubernetes persistent volume . +const cloudVolumeCreatedForVolumeNameTag = "kubernetes.io/created-for/pv/name" + +// Number of retries when we create a PV object for a provisioned volume. +const createProvisionedPVRetryCount = 5 + +// Interval between retries when we create a PV object for a provisioned volume. +const createProvisionedPVInterval = 10 * time.Second + +// PersistentVolumeController is a controller that synchronizes +// PersistentVolumeClaims and PersistentVolumes. It starts two +// framework.Controllers that watch PerstentVolume and PersistentVolumeClaim +// changes. +type PersistentVolumeController struct { + volumes persistentVolumeOrderedIndex + volumeController *framework.Controller + volumeControllerStopCh chan struct{} + claims cache.Store + claimController *framework.Controller + claimControllerStopCh chan struct{} + kubeClient clientset.Interface + eventRecorder record.EventRecorder + cloud cloudprovider.Interface + recyclePluginMgr vol.VolumePluginMgr + provisioner vol.ProvisionableVolumePlugin + clusterName string + + // PersistentVolumeController keeps track of long running operations and + // makes sure it won't start the same operation twice in parallel. + // Each operation is identified by unique operationName. + // Simple keymutex.KeyMutex is not enough, we need to know what operations + // are in progress (so we don't schedule a new one) and keymutex.KeyMutex + // does not provide such functionality. + + // runningOperationsMapLock guards access to runningOperations map + runningOperationsMapLock sync.Mutex + // runningOperations is map of running operations. The value does not + // matter, presence of a key is enough to consider an operation running. + runningOperations map[string]bool + + // For testing only: hook to call before an asynchronous operation starts. + // Not used when set to nil. + preOperationHook func(operationName string, operationArgument interface{}) + + createProvisionedPVRetryCount int + createProvisionedPVInterval time.Duration +} + +// syncClaim is the main controller method to decide what to do with a claim. +// It's invoked by appropriate framework.Controller callbacks when a claim is +// created, updated or periodically synced. We do not differentiate between +// these events. +// For easier readability, it was split into syncUnboundClaim and syncBoundClaim +// methods. +func (ctrl *PersistentVolumeController) syncClaim(claim *api.PersistentVolumeClaim) error { + glog.V(4).Infof("synchronizing PersistentVolumeClaim[%s]: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) + + if !hasAnnotation(claim.ObjectMeta, annBindCompleted) { + return ctrl.syncUnboundClaim(claim) + } else { + return ctrl.syncBoundClaim(claim) + } +} + +// syncUnboundClaim is the main controller method to decide what to do with an +// unbound claim. +func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *api.PersistentVolumeClaim) error { + // This is a new PVC that has not completed binding + // OBSERVATION: pvc is "Pending" + if claim.Spec.VolumeName == "" { + // User did not care which PV they get. + // [Unit test set 1] + volume, err := ctrl.volumes.findBestMatchForClaim(claim) + if err != nil { + glog.V(2).Infof("synchronizing unbound PersistentVolumeClaim[%s]: Error finding PV for claim: %v", claimToClaimKey(claim), err) + return fmt.Errorf("Error finding PV for claim %q: %v", claimToClaimKey(claim), err) + } + if volume == nil { + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: no volume found", claimToClaimKey(claim)) + // No PV could be found + // OBSERVATION: pvc is "Pending", will retry + if hasAnnotation(claim.ObjectMeta, annClass) { + if err = ctrl.provisionClaim(claim); err != nil { + return err + } + return nil + } + // Mark the claim as Pending and try to find a match in the next + // periodic syncClaim + if _, err = ctrl.updateClaimPhase(claim, api.ClaimPending); err != nil { + return err + } + return nil + } else /* pv != nil */ { + // Found a PV for this claim + // OBSERVATION: pvc is "Pending", pv is "Available" + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), volume.Name, getVolumeStatusForLogging(volume)) + if err = ctrl.bind(volume, claim); err != nil { + // On any error saving the volume or the claim, subsequent + // syncClaim will finish the binding. + return err + } + // OBSERVATION: claim is "Bound", pv is "Bound" + return nil + } + } else /* pvc.Spec.VolumeName != nil */ { + // [Unit test set 2] + // User asked for a specific PV. + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested", claimToClaimKey(claim), claim.Spec.VolumeName) + obj, found, err := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName) + if err != nil { + return err + } + if !found { + // User asked for a PV that does not exist. + // OBSERVATION: pvc is "Pending" + // Retry later. + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and not found, will try again next time", claimToClaimKey(claim), claim.Spec.VolumeName) + if _, err = ctrl.updateClaimPhase(claim, api.ClaimPending); err != nil { + return err + } + return nil + } else { + volume, ok := obj.(*api.PersistentVolume) + if !ok { + return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj) + } + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) + if volume.Spec.ClaimRef == nil { + // User asked for a PV that is not claimed + // OBSERVATION: pvc is "Pending", pv is "Available" + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume is unbound, binding", claimToClaimKey(claim)) + if err = ctrl.bind(volume, claim); err != nil { + // On any error saving the volume or the claim, subsequent + // syncClaim will finish the binding. + return err + } + // OBSERVATION: pvc is "Bound", pv is "Bound" + return nil + } else if isVolumeBoundToClaim(volume, claim) { + // User asked for a PV that is claimed by this PVC + // OBSERVATION: pvc is "Pending", pv is "Bound" + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound, finishing the binding", claimToClaimKey(claim)) + + // Finish the volume binding by adding claim UID. + if err = ctrl.bind(volume, claim); err != nil { + return err + } + // OBSERVATION: pvc is "Bound", pv is "Bound" + return nil + } else { + // User asked for a PV that is claimed by someone else + // OBSERVATION: pvc is "Pending", pv is "Bound" + if !hasAnnotation(claim.ObjectMeta, annBoundByController) { + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim by user, will retry later", claimToClaimKey(claim)) + // User asked for a specific PV, retry later + if _, err = ctrl.updateClaimPhase(claim, api.ClaimPending); err != nil { + return err + } + return nil + } else { + // This should never happen because someone had to remove + // annBindCompleted annotation on the claim. + glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim %q by controller, THIS SHOULD NEVER HAPPEN", claimToClaimKey(claim), claimrefToClaimKey(volume.Spec.ClaimRef)) + return fmt.Errorf("Invalid binding of claim %q to volume %q: volume already claimed by %q", claimToClaimKey(claim), claim.Spec.VolumeName, claimrefToClaimKey(volume.Spec.ClaimRef)) + } + } + } + } +} + +// syncBoundClaim is the main controller method to decide what to do with a +// bound claim. +func (ctrl *PersistentVolumeController) syncBoundClaim(claim *api.PersistentVolumeClaim) error { + // hasAnnotation(pvc, annBindCompleted) + // This PVC has previously been bound + // OBSERVATION: pvc is not "Pending" + // [Unit test set 3] + if claim.Spec.VolumeName == "" { + // Claim was bound before but not any more. + if _, err := ctrl.updateClaimPhaseWithEvent(claim, api.ClaimLost, api.EventTypeWarning, "ClaimLost", "Bound claim has lost reference to PersistentVolume. Data on the volume is lost!"); err != nil { + return err + } + return nil + } + obj, found, err := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName) + if err != nil { + return err + } + if !found { + // Claim is bound to a non-existing volume. + if _, err = ctrl.updateClaimPhaseWithEvent(claim, api.ClaimLost, api.EventTypeWarning, "ClaimLost", "Bound claim has lost its PersistentVolume. Data on the volume is lost!"); err != nil { + return err + } + return nil + } else { + volume, ok := obj.(*api.PersistentVolume) + if !ok { + return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj) + } + + glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) + if volume.Spec.ClaimRef == nil { + // Claim is bound but volume has come unbound. + // Or, a claim was bound and the controller has not received updated + // volume yet. We can't distinguish these cases. + // Bind the volume again and set all states to Bound. + glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume is unbound, fixing", claimToClaimKey(claim)) + if err = ctrl.bind(volume, claim); err != nil { + // Objects not saved, next syncPV or syncClaim will try again + return err + } + return nil + } else if volume.Spec.ClaimRef.UID == claim.UID { + // All is well + // NOTE: syncPV can handle this so it can be left out. + // NOTE: bind() call here will do nothing in most cases as + // everything should be already set. + glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: claim is already correctly bound", claimToClaimKey(claim)) + if err = ctrl.bind(volume, claim); err != nil { + // Objects not saved, next syncPV or syncClaim will try again + return err + } + return nil + } else { + // Claim is bound but volume has a different claimant. + // Set the claim phase to 'Lost', which is a terminal + // phase. + if _, err = ctrl.updateClaimPhaseWithEvent(claim, api.ClaimLost, api.EventTypeWarning, "ClaimMisbound", "Two claims are bound to the same volume, this one is bound incorrectly"); err != nil { + return err + } + return nil + } + } +} + +// syncVolume is the main controller method to decide what to do with a volume. +// It's invoked by appropriate framework.Controller callbacks when a volume is +// created, updated or periodically synced. We do not differentiate between +// these events. +func (ctrl *PersistentVolumeController) syncVolume(volume *api.PersistentVolume) error { + glog.V(4).Infof("synchronizing PersistentVolume[%s]: %s", volume.Name, getVolumeStatusForLogging(volume)) + + // [Unit test set 4] + if volume.Spec.ClaimRef == nil { + // Volume is unused + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is unused", volume.Name) + if _, err := ctrl.updateVolumePhase(volume, api.VolumeAvailable); err != nil { + // Nothing was saved; we will fall back into the same + // condition in the next call to this method + return err + } + return nil + } else /* pv.Spec.ClaimRef != nil */ { + // Volume is bound to a claim. + if volume.Spec.ClaimRef.UID == "" { + // The PV is reserved for a PVC; that PVC has not yet been + // bound to this PV; the PVC sync will handle it. + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is pre-bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + if _, err := ctrl.updateVolumePhase(volume, api.VolumeAvailable); err != nil { + // Nothing was saved; we will fall back into the same + // condition in the next call to this method + return err + } + return nil + } + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + // Get the PVC by _name_ + var claim *api.PersistentVolumeClaim + claimName := claimrefToClaimKey(volume.Spec.ClaimRef) + obj, found, err := ctrl.claims.GetByKey(claimName) + if err != nil { + return err + } + if !found { + glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s not found", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + // Fall through with claim = nil + } else { + var ok bool + claim, ok = obj.(*api.PersistentVolumeClaim) + if !ok { + return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj) + } + glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s found: %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef), getClaimStatusForLogging(claim)) + } + if claim != nil && claim.UID != volume.Spec.ClaimRef.UID { + // The claim that the PV was pointing to was deleted, and another + // with the same name created. + glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s has different UID, the old one must have been deleted", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + // Treat the volume as bound to a missing claim. + claim = nil + } + + if claim == nil { + // If we get into this block, the claim must have been deleted; + // NOTE: reclaimVolume may either release the PV back into the pool or + // recycle it or do nothing (retain) + + // Do not overwrite previous Failed state - let the user see that + // something went wrong, while we still re-try to reclaim the + // volume. + if volume.Status.Phase != api.VolumeReleased && volume.Status.Phase != api.VolumeFailed { + // Also, log this only once: + glog.V(2).Infof("volume %q is released and reclaim policy %q will be executed", volume.Name, volume.Spec.PersistentVolumeReclaimPolicy) + if volume, err = ctrl.updateVolumePhase(volume, api.VolumeReleased); err != nil { + // Nothing was saved; we will fall back into the same condition + // in the next call to this method + return err + } + } + + if err = ctrl.reclaimVolume(volume); err != nil { + // Release failed, we will fall back into the same condition + // in the next call to this method + return err + } + return nil + } else if claim.Spec.VolumeName == "" { + if hasAnnotation(volume.ObjectMeta, annBoundByController) { + // The binding is not completed; let PVC sync handle it + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume not bound yet, waiting for syncClaim to fix it", volume.Name) + } else { + // Dangling PV; try to re-establish the link in the PVC sync + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume was bound and got unbound (by user?), waiting for syncClaim to fix it", volume.Name) + } + // In both cases, the volume is Bound and the claim is Pending. + // Next syncClaim will fix it. To speed it up, we enqueue the claim + // into the controller, which results in syncClaim to be called + // shortly (and in the right goroutine). + // This speeds up binding of provisioned volumes - provisioner saves + // only the new PV and it expects that next syncClaim will bind the + // claim to it. + clone, err := conversion.NewCloner().DeepCopy(claim) + if err != nil { + return fmt.Errorf("error cloning claim %q: %v", claimToClaimKey(claim), err) + } + glog.V(5).Infof("requeueing claim %q for faster syncClaim", claimToClaimKey(claim)) + err = ctrl.claimController.Requeue(clone) + if err != nil { + return fmt.Errorf("error enqueing claim %q for faster sync: %v", claimToClaimKey(claim), err) + } + return nil + } else if claim.Spec.VolumeName == volume.Name { + // Volume is bound to a claim properly, update status if necessary + glog.V(4).Infof("synchronizing PersistentVolume[%s]: all is bound", volume.Name) + if _, err = ctrl.updateVolumePhase(volume, api.VolumeBound); err != nil { + // Nothing was saved; we will fall back into the same + // condition in the next call to this method + return err + } + return nil + } else { + // Volume is bound to a claim, but the claim is bound elsewhere + if hasAnnotation(volume.ObjectMeta, annDynamicallyProvisioned) && volume.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimDelete { + // This volume was dynamically provisioned for this claim. The + // claim got bound elsewhere, and thus this volume is not + // needed. Delete it. + if err = ctrl.reclaimVolume(volume); err != nil { + // Deletion failed, we will fall back into the same condition + // in the next call to this method + return err + } + return nil + } else { + // Volume is bound to a claim, but the claim is bound elsewhere + // and it's not dynamically provisioned. + if hasAnnotation(volume.ObjectMeta, annBoundByController) { + // This is part of the normal operation of the controller; the + // controller tried to use this volume for a claim but the claim + // was fulfilled by another volume. We did this; fix it. + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by controller to a claim that is bound to another volume, unbinding", volume.Name) + if err = ctrl.unbindVolume(volume); err != nil { + return err + } + return nil + } else { + // The PV must have been created with this ptr; leave it alone. + glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by user to a claim that is bound to another volume, waiting for the claim to get unbound", volume.Name) + // This just updates the volume phase and clears + // volume.Spec.ClaimRef.UID. It leaves the volume pre-bound + // to the claim. + if err = ctrl.unbindVolume(volume); err != nil { + return err + } + return nil + } + } + } + } +} + +// updateClaimPhase saves new claim phase to API server. +func (ctrl *PersistentVolumeController) updateClaimPhase(claim *api.PersistentVolumeClaim, phase api.PersistentVolumeClaimPhase) (*api.PersistentVolumeClaim, error) { + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: set phase %s", claimToClaimKey(claim), phase) + if claim.Status.Phase == phase { + // Nothing to do. + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: phase %s already set", claimToClaimKey(claim), phase) + return claim, nil + } + + clone, err := conversion.NewCloner().DeepCopy(claim) + if err != nil { + return nil, fmt.Errorf("Error cloning claim: %v", err) + } + claimClone, ok := clone.(*api.PersistentVolumeClaim) + if !ok { + return nil, fmt.Errorf("Unexpected claim cast error : %v", claimClone) + } + + claimClone.Status.Phase = phase + newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone) + if err != nil { + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: set phase %s failed: %v", claimToClaimKey(claim), phase, err) + return newClaim, err + } + glog.V(2).Infof("claim %q entered phase %q", claimToClaimKey(claim), phase) + return newClaim, nil +} + +// updateClaimPhaseWithEvent saves new claim phase to API server and emits given +// event on the claim. It saves the phase and emits the event only when the +// phase has actually changed from the version saved in API server. +func (ctrl *PersistentVolumeController) updateClaimPhaseWithEvent(claim *api.PersistentVolumeClaim, phase api.PersistentVolumeClaimPhase, eventtype, reason, message string) (*api.PersistentVolumeClaim, error) { + glog.V(4).Infof("updating updateClaimPhaseWithEvent[%s]: set phase %s", claimToClaimKey(claim), phase) + if claim.Status.Phase == phase { + // Nothing to do. + glog.V(4).Infof("updating updateClaimPhaseWithEvent[%s]: phase %s already set", claimToClaimKey(claim), phase) + return claim, nil + } + + newClaim, err := ctrl.updateClaimPhase(claim, phase) + if err != nil { + return nil, err + } + + // Emit the event only when the status change happens, not everytime + // syncClaim is called. + glog.V(3).Infof("claim %q changed status to %q: %s", claimToClaimKey(claim), phase, message) + ctrl.eventRecorder.Event(newClaim, eventtype, reason, message) + + return newClaim, nil +} + +// updateVolumePhase saves new volume phase to API server. +func (ctrl *PersistentVolumeController) updateVolumePhase(volume *api.PersistentVolume, phase api.PersistentVolumePhase) (*api.PersistentVolume, error) { + glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s", volume.Name, phase) + if volume.Status.Phase == phase { + // Nothing to do. + glog.V(4).Infof("updating PersistentVolume[%s]: phase %s already set", volume.Name, phase) + return volume, nil + } + + clone, err := conversion.NewCloner().DeepCopy(volume) + if err != nil { + return nil, fmt.Errorf("Error cloning claim: %v", err) + } + volumeClone, ok := clone.(*api.PersistentVolume) + if !ok { + return nil, fmt.Errorf("Unexpected volume cast error : %v", volumeClone) + } + + volumeClone.Status.Phase = phase + newVol, err := ctrl.kubeClient.Core().PersistentVolumes().UpdateStatus(volumeClone) + if err != nil { + glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err) + return newVol, err + } + glog.V(2).Infof("volume %q entered phase %q", volume.Name, phase) + return newVol, err +} + +// updateVolumePhaseWithEvent saves new volume phase to API server and emits +// given event on the volume. It saves the phase and emits the event only when +// the phase has actually changed from the version saved in API server. +func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *api.PersistentVolume, phase api.PersistentVolumePhase, eventtype, reason, message string) (*api.PersistentVolume, error) { + glog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: set phase %s", volume.Name, phase) + if volume.Status.Phase == phase { + // Nothing to do. + glog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: phase %s already set", volume.Name, phase) + return volume, nil + } + + newVol, err := ctrl.updateVolumePhase(volume, phase) + if err != nil { + return nil, err + } + + // Emit the event only when the status change happens, not everytime + // syncClaim is called. + glog.V(3).Infof("volume %q changed status to %q: %s", volume.Name, phase, message) + ctrl.eventRecorder.Event(newVol, eventtype, reason, message) + + return newVol, nil +} + +// bindVolumeToClaim modifes given volume to be bound to a claim and saves it to +// API server. The claim is not modified in this method! +func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *api.PersistentVolume, claim *api.PersistentVolumeClaim) (*api.PersistentVolume, error) { + glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q", volume.Name, claimToClaimKey(claim)) + + dirty := false + + // Check if the volume was already bound (either by user or by controller) + shouldSetBoundByController := false + if !isVolumeBoundToClaim(volume, claim) { + shouldSetBoundByController = true + } + + // The volume from method args can be pointing to watcher cache. We must not + // modify these, therefore create a copy. + clone, err := conversion.NewCloner().DeepCopy(volume) + if err != nil { + return nil, fmt.Errorf("Error cloning pv: %v", err) + } + volumeClone, ok := clone.(*api.PersistentVolume) + if !ok { + return nil, fmt.Errorf("Unexpected volume cast error : %v", volumeClone) + } + + // Bind the volume to the claim if it is not bound yet + if volume.Spec.ClaimRef == nil || + volume.Spec.ClaimRef.Name != claim.Name || + volume.Spec.ClaimRef.Namespace != claim.Namespace || + volume.Spec.ClaimRef.UID != claim.UID { + + claimRef, err := api.GetReference(claim) + if err != nil { + return nil, fmt.Errorf("Unexpected error getting claim reference: %v", err) + } + volumeClone.Spec.ClaimRef = claimRef + dirty = true + } + + // Set annBoundByController if it is not set yet + if shouldSetBoundByController && !hasAnnotation(volumeClone.ObjectMeta, annBoundByController) { + setAnnotation(&volumeClone.ObjectMeta, annBoundByController, "yes") + dirty = true + } + + // Save the volume only if something was changed + if dirty { + glog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volume.Name) + newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone) + if err != nil { + glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volume.Name, claimToClaimKey(claim), err) + return newVol, err + } + glog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", newVol.Name, claimToClaimKey(claim)) + return newVol, nil + } + + glog.V(4).Infof("updating PersistentVolume[%s]: already bound to %q", volume.Name, claimToClaimKey(claim)) + return volume, nil +} + +// bindClaimToVolume modifes given claim to be bound to a volume and saves it to +// API server. The volume is not modified in this method! +func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *api.PersistentVolumeClaim, volume *api.PersistentVolume) (*api.PersistentVolumeClaim, error) { + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q", claimToClaimKey(claim), volume.Name) + + dirty := false + + // Check if the claim was already bound (either by controller or by user) + shouldSetBoundByController := false + if volume.Name != claim.Spec.VolumeName { + shouldSetBoundByController = true + } + + // The claim from method args can be pointing to watcher cache. We must not + // modify these, therefore create a copy. + clone, err := conversion.NewCloner().DeepCopy(claim) + if err != nil { + return nil, fmt.Errorf("Error cloning claim: %v", err) + } + claimClone, ok := clone.(*api.PersistentVolumeClaim) + if !ok { + return nil, fmt.Errorf("Unexpected claim cast error : %v", claimClone) + } + + // Bind the claim to the volume if it is not bound yet + if claimClone.Spec.VolumeName != volume.Name { + claimClone.Spec.VolumeName = volume.Name + dirty = true + } + + // Set annBoundByController if it is not set yet + if shouldSetBoundByController && !hasAnnotation(claimClone.ObjectMeta, annBoundByController) { + setAnnotation(&claimClone.ObjectMeta, annBoundByController, "yes") + dirty = true + } + + // Set annBindCompleted if it is not set yet + if !hasAnnotation(claimClone.ObjectMeta, annBindCompleted) { + setAnnotation(&claimClone.ObjectMeta, annBindCompleted, "yes") + dirty = true + } + + if dirty { + glog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) + newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claim.Namespace).Update(claimClone) + if err != nil { + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err) + return newClaim, err + } + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: bound to %q", claimToClaimKey(claim), volume.Name) + return newClaim, nil + } + + glog.V(4).Infof("updating PersistentVolumeClaim[%s]: already bound to %q", claimToClaimKey(claim), volume.Name) + return claim, nil +} + +// bind saves binding information both to the volume and the claim and marks +// both objects as Bound. Volume is saved first. +// It returns on first error, it's up to the caller to implement some retry +// mechanism. +func (ctrl *PersistentVolumeController) bind(volume *api.PersistentVolume, claim *api.PersistentVolumeClaim) error { + var err error + // use updateClaim/updatedVolume to keep the original claim/volume for + // logging in error cases. + var updatedClaim *api.PersistentVolumeClaim + var updatedVolume *api.PersistentVolume + + glog.V(4).Infof("binding volume %q to claim %q", volume.Name, claimToClaimKey(claim)) + + if updatedVolume, err = ctrl.bindVolumeToClaim(volume, claim); err != nil { + glog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume: %v", volume.Name, claimToClaimKey(claim), err) + return err + } + volume = updatedVolume + + if updatedVolume, err = ctrl.updateVolumePhase(volume, api.VolumeBound); err != nil { + glog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume status: %v", volume.Name, claimToClaimKey(claim), err) + return err + } + volume = updatedVolume + + if updatedClaim, err = ctrl.bindClaimToVolume(claim, volume); err != nil { + glog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim: %v", volume.Name, claimToClaimKey(claim), err) + return err + } + claim = updatedClaim + + if updatedClaim, err = ctrl.updateClaimPhase(claim, api.ClaimBound); err != nil { + glog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim status: %v", volume.Name, claimToClaimKey(claim), err) + return err + } + claim = updatedClaim + + glog.V(4).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) + glog.V(4).Infof("volume %q status after binding: %s", volume.Name, getVolumeStatusForLogging(volume)) + glog.V(4).Infof("claim %q status after binding: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) + return nil +} + +// unbindVolume rolls back previous binding of the volume. This may be necessary +// when two controllers bound two volumes to single claim - when we detect this, +// only one binding succeeds and the second one must be rolled back. +// This method updates both Spec and Status. +// It returns on first error, it's up to the caller to implement some retry +// mechanism. +func (ctrl *PersistentVolumeController) unbindVolume(volume *api.PersistentVolume) error { + glog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + + // Save the PV only when any modification is neccesary. + clone, err := conversion.NewCloner().DeepCopy(volume) + if err != nil { + return fmt.Errorf("Error cloning pv: %v", err) + } + volumeClone, ok := clone.(*api.PersistentVolume) + if !ok { + return fmt.Errorf("Unexpected volume cast error : %v", volumeClone) + } + + if hasAnnotation(volume.ObjectMeta, annBoundByController) { + // The volume was bound by the controller. + volumeClone.Spec.ClaimRef = nil + delete(volumeClone.Annotations, annBoundByController) + if len(volumeClone.Annotations) == 0 { + // No annotations look better than empty annotation map (and it's easier + // to test). + volumeClone.Annotations = nil + } + } else { + // The volume was pre-bound by user. Clear only the binging UID. + volumeClone.Spec.ClaimRef.UID = "" + } + + newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone) + if err != nil { + glog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err) + return err + } + glog.V(4).Infof("updating PersistentVolume[%s]: rolled back", newVol.Name) + + // Update the status + _, err = ctrl.updateVolumePhase(newVol, api.VolumeAvailable) + return err + +} + +// reclaimVolume implements volume.Spec.PersistentVolumeReclaimPolicy and +// starts appropriate reclaim action. +func (ctrl *PersistentVolumeController) reclaimVolume(volume *api.PersistentVolume) error { + switch volume.Spec.PersistentVolumeReclaimPolicy { + case api.PersistentVolumeReclaimRetain: + glog.V(4).Infof("reclaimVolume[%s]: policy is Retain, nothing to do", volume.Name) + + case api.PersistentVolumeReclaimRecycle: + glog.V(4).Infof("reclaimVolume[%s]: policy is Recycle", volume.Name) + opName := fmt.Sprintf("recycle-%s[%s]", volume.Name, string(volume.UID)) + ctrl.scheduleOperation(opName, ctrl.recycleVolumeOperation, volume) + + case api.PersistentVolumeReclaimDelete: + glog.V(4).Infof("reclaimVolume[%s]: policy is Delete", volume.Name) + opName := fmt.Sprintf("delete-%s[%s]", volume.Name, string(volume.UID)) + ctrl.scheduleOperation(opName, ctrl.deleteVolumeOperation, volume) + + default: + // Unknown PersistentVolumeReclaimPolicy + if _, err := ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeUnknownReclaimPolicy", "Volume has unrecognized PersistentVolumeReclaimPolicy"); err != nil { + return err + } + } + return nil +} + +// doRerecycleVolumeOperationcycleVolume recycles a volume. This method is +// running in standalone goroutine and already has all necessary locks. +func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) { + volume, ok := arg.(*api.PersistentVolume) + if !ok { + glog.Errorf("Cannot convert recycleVolumeOperation argument to volume, got %+v", arg) + return + } + glog.V(4).Infof("recycleVolumeOperation [%s] started", volume.Name) + + // This method may have been waiting for a volume lock for some time. + // Previous recycleVolumeOperation might just have saved an updated version, + // so read current volume state now. + newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name) + if err != nil { + glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err) + return + } + needsReclaim, err := ctrl.isVolumeReleased(newVolume) + if err != nil { + glog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) + return + } + if !needsReclaim { + glog.V(3).Infof("volume %q no longer needs recycling, skipping", volume.Name) + return + } + + // Use the newest volume copy, this will save us from version conflicts on + // saving. + volume = newVolume + + // Find a plugin. + spec := vol.NewSpecFromPersistentVolume(volume, false) + plugin, err := ctrl.recyclePluginMgr.FindRecyclablePluginBySpec(spec) + if err != nil { + // No recycler found. Emit an event and mark the volume Failed. + if _, err = ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeFailedRecycle", "No recycler plugin found for the volume!"); err != nil { + glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + // Save failed, retry on the next deletion attempt + return + } + // Despite the volume being Failed, the controller will retry recycling + // the volume in every syncVolume() call. + return + } + + // Plugin found + recycler, err := plugin.NewRecycler(volume.Name, spec) + if err != nil { + // Cannot create recycler + strerr := fmt.Sprintf("Failed to create recycler: %v", err) + if _, err = ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeFailedRecycle", strerr); err != nil { + glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + // Save failed, retry on the next deletion attempt + return + } + // Despite the volume being Failed, the controller will retry recycling + // the volume in every syncVolume() call. + return + } + + if err = recycler.Recycle(); err != nil { + // Recycler failed + strerr := fmt.Sprintf("Recycler failed: %s", err) + if _, err = ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeFailedRecycle", strerr); err != nil { + glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + // Save failed, retry on the next deletion attempt + return + } + // Despite the volume being Failed, the controller will retry recycling + // the volume in every syncVolume() call. + return + } + + glog.V(2).Infof("volume %q recycled", volume.Name) + // Make the volume available again + if err = ctrl.unbindVolume(volume); err != nil { + // Oops, could not save the volume and therefore the controller will + // recycle the volume again on next update. We _could_ maintain a cache + // of "recently recycled volumes" and avoid unnecessary recycling, this + // is left out as future optimization. + glog.V(3).Infof("recycleVolumeOperation [%s]: failed to make recycled volume 'Available' (%v), we will recycle the volume again", volume.Name, err) + return + } + return +} + +// deleteVolumeOperation deletes a volume. This method is running in standalone +// goroutine and already has all necessary locks. +func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) { + volume, ok := arg.(*api.PersistentVolume) + if !ok { + glog.Errorf("Cannot convert deleteVolumeOperation argument to volume, got %+v", arg) + return + } + glog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name) + + // This method may have been waiting for a volume lock for some time. + // Previous deleteVolumeOperation might just have saved an updated version, so + // read current volume state now. + newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name) + if err != nil { + glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err) + return + } + needsReclaim, err := ctrl.isVolumeReleased(newVolume) + if err != nil { + glog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) + return + } + if !needsReclaim { + glog.V(3).Infof("volume %q no longer needs deletion, skipping", volume.Name) + return + } + + if err = ctrl.doDeleteVolume(volume); err != nil { + // Delete failed, update the volume and emit an event. + glog.V(3).Infof("deletion of volume %q failed: %v", volume.Name, err) + if _, err = ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeFailedDelete", err.Error()); err != nil { + glog.V(4).Infof("deleteVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + // Save failed, retry on the next deletion attempt + return + } + // Despite the volume being Failed, the controller will retry deleting + // the volume in every syncVolume() call. + return + } + + glog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name) + // Delete the volume + if err = ctrl.kubeClient.Core().PersistentVolumes().Delete(volume.Name, nil); err != nil { + // Oops, could not delete the volume and therefore the controller will + // try to delete the volume again on next update. We _could_ maintain a + // cache of "recently deleted volumes" and avoid unnecessary deletion, + // this is left out as future optimization. + glog.V(3).Infof("failed to delete volume %q from database: %v", volume.Name, err) + return + } + return +} + +// isVolumeReleased returns true if given volume is released and can be recycled +// or deleted, based on its retain policy. I.e. the volume is bound to a claim +// and the claim does not exist or exists and is bound to different volume. +func (ctrl *PersistentVolumeController) isVolumeReleased(volume *api.PersistentVolume) (bool, error) { + // A volume needs reclaim if it has ClaimRef and appropriate claim does not + // exist. + if volume.Spec.ClaimRef == nil { + glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is nil", volume.Name) + return false, nil + } + if volume.Spec.ClaimRef.UID == "" { + // This is a volume bound by user and the controller has not finished + // binding to the real claim yet. + glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is not bound", volume.Name) + return false, nil + } + + var claim *api.PersistentVolumeClaim + claimName := claimrefToClaimKey(volume.Spec.ClaimRef) + obj, found, err := ctrl.claims.GetByKey(claimName) + if err != nil { + return false, err + } + if !found { + // Fall through with claim = nil + } else { + var ok bool + claim, ok = obj.(*api.PersistentVolumeClaim) + if !ok { + return false, fmt.Errorf("Cannot convert object from claim cache to claim!?: %+v", obj) + } + } + if claim != nil && claim.UID == volume.Spec.ClaimRef.UID { + // the claim still exists and has the right UID + glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is still valid, volume is not released", volume.Name) + return false, nil + } + + glog.V(2).Infof("isVolumeReleased[%s]: volume is released", volume.Name) + return true, nil +} + +// doDeleteVolume finds appropriate delete plugin and deletes given volume +// (it will be re-used in future provisioner error cases). +func (ctrl *PersistentVolumeController) doDeleteVolume(volume *api.PersistentVolume) error { + glog.V(4).Infof("doDeleteVolume [%s]", volume.Name) + // Find a plugin. + spec := vol.NewSpecFromPersistentVolume(volume, false) + plugin, err := ctrl.recyclePluginMgr.FindDeletablePluginBySpec(spec) + if err != nil { + // No deleter found. Emit an event and mark the volume Failed. + return fmt.Errorf("Error getting deleter volume plugin for volume %q: %v", volume.Name, err) + } + + // Plugin found + deleter, err := plugin.NewDeleter(spec) + if err != nil { + // Cannot create deleter + return fmt.Errorf("Failed to create deleter for volume %q: %v", volume.Name, err) + } + + if err = deleter.Delete(); err != nil { + // Deleter failed + return fmt.Errorf("Delete of volume %q failed: %v", volume.Name, err) + } + + glog.V(2).Infof("volume %q deleted", volume.Name) + return nil +} + +// provisionClaim starts new asynchronous operation to provision a claim. +func (ctrl *PersistentVolumeController) provisionClaim(claim *api.PersistentVolumeClaim) error { + glog.V(4).Infof("provisionClaim[%s]: started", claimToClaimKey(claim)) + opName := fmt.Sprintf("provision-%s[%s]", claimToClaimKey(claim), string(claim.UID)) + ctrl.scheduleOperation(opName, ctrl.provisionClaimOperation, claim) + return nil +} + +// provisionClaimOperation provisions a volume. This method is running in +// standalone goroutine and already has all necessary locks. +func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interface{}) { + claim, ok := claimObj.(*api.PersistentVolumeClaim) + if !ok { + glog.Errorf("Cannot convert provisionClaimOperation argument to claim, got %+v", claimObj) + return + } + glog.V(4).Infof("provisionClaimOperation [%s] started", claimToClaimKey(claim)) + + // A previous doProvisionClaim may just have finished while we were waiting for + // the locks. Check that PV (with deterministic name) hasn't been provisioned + // yet. + + pvName := ctrl.getProvisionedVolumeNameForClaim(claim) + volume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(pvName) + if err == nil && volume != nil { + // Volume has been already provisioned, nothing to do. + glog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim)) + return + } + + // Prepare a claimRef to the claim early (to fail before a volume is + // provisioned) + claimRef, err := api.GetReference(claim) + if err != nil { + glog.V(3).Infof("unexpected error getting claim reference: %v", err) + return + } + + // TODO: find provisionable plugin based on a class/profile + plugin := ctrl.provisioner + if plugin == nil { + // No provisioner found. Emit an event. + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", "No provisioner plugin found for the claim!") + glog.V(2).Infof("no provisioner plugin found for claim %s!", claimToClaimKey(claim)) + // The controller will retry provisioning the volume in every + // syncVolume() call. + return + } + + // Gather provisioning options + tags := make(map[string]string) + tags[cloudVolumeCreatedForClaimNamespaceTag] = claim.Namespace + tags[cloudVolumeCreatedForClaimNameTag] = claim.Name + tags[cloudVolumeCreatedForVolumeNameTag] = pvName + + options := vol.VolumeOptions{ + Capacity: claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)], + AccessModes: claim.Spec.AccessModes, + PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, + CloudTags: &tags, + ClusterName: ctrl.clusterName, + PVName: pvName, + } + + // Provision the volume + provisioner, err := plugin.NewProvisioner(options) + if err != nil { + strerr := fmt.Sprintf("Failed to create provisioner: %v", err) + glog.V(2).Infof("failed to create provisioner for claim %q: %v", claimToClaimKey(claim), err) + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", strerr) + return + } + + volume, err = provisioner.Provision() + if err != nil { + strerr := fmt.Sprintf("Failed to provision volume: %v", err) + glog.V(2).Infof("failed to provision volume for claim %q: %v", claimToClaimKey(claim), err) + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", strerr) + return + } + + glog.V(3).Infof("volume %q for claim %q created", volume.Name, claimToClaimKey(claim)) + + // Create Kubernetes PV object for the volume. + volume.Name = pvName + // Bind it to the claim + volume.Spec.ClaimRef = claimRef + volume.Status.Phase = api.VolumeBound + + // Add annBoundByController (used in deleting the volume) + setAnnotation(&volume.ObjectMeta, annBoundByController, "yes") + setAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, plugin.Name()) + + // Try to create the PV object several times + for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { + glog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name) + if _, err = ctrl.kubeClient.Core().PersistentVolumes().Create(volume); err == nil { + // Save succeeded. + glog.V(3).Infof("volume %q for claim %q saved", volume.Name, claimToClaimKey(claim)) + break + } + // Save failed, try again after a while. + glog.V(3).Infof("failed to save volume %q for claim %q: %v", volume.Name, claimToClaimKey(claim), err) + time.Sleep(ctrl.createProvisionedPVInterval) + } + + if err != nil { + // Save failed. Now we have a storage asset outside of Kubernetes, + // but we don't have appropriate PV object for it. + // Emit some event here and try to delete the storage asset several + // times. + strerr := fmt.Sprintf("Error creating provisioned PV object for claim %s: %v. Deleting the volume.", claimToClaimKey(claim), err) + glog.V(3).Info(strerr) + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", strerr) + + for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { + if err = ctrl.doDeleteVolume(volume); err == nil { + // Delete succeeded + glog.V(4).Infof("provisionClaimOperation [%s]: cleaning volume %s succeeded", claimToClaimKey(claim), volume.Name) + break + } + // Delete failed, try again after a while. + glog.V(3).Infof("failed to delete volume %q: %v", volume.Name, i, err) + time.Sleep(ctrl.createProvisionedPVInterval) + } + + if err != nil { + // Delete failed several times. There is orphaned volume and there + // is nothing we can do about it. + strerr := fmt.Sprintf("Error cleaning provisioned volume for claim %s: %v. Please delete manually.", claimToClaimKey(claim), err) + glog.V(2).Info(strerr) + ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningCleanupFailed", strerr) + } + } else { + glog.V(2).Infof("volume %q provisioned for claim %q", volume.Name, claimToClaimKey(claim)) + } +} + +// getProvisionedVolumeNameForClaim returns PV.Name for the provisioned volume. +// The name must be unique +func (ctrl *PersistentVolumeController) getProvisionedVolumeNameForClaim(claim *api.PersistentVolumeClaim) string { + return "pvc-" + string(claim.UID) +} + +// scheduleOperation starts given asynchronous operation on given volume. It +// makes sure the operation is already not running. +func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, operation func(arg interface{}), arg interface{}) { + glog.V(4).Infof("scheduleOperation[%s]", operationName) + + // Poke test code that an operation is just about to get started. + if ctrl.preOperationHook != nil { + ctrl.preOperationHook(operationName, arg) + } + + isRunning := func() bool { + // In anonymous func() to get the locking right. + ctrl.runningOperationsMapLock.Lock() + defer ctrl.runningOperationsMapLock.Unlock() + + if ctrl.isOperationRunning(operationName) { + glog.V(4).Infof("operation %q is already running, skipping", operationName) + return true + } + ctrl.startRunningOperation(operationName) + return false + }() + + if isRunning { + return + } + + // Run the operation in separate goroutine + go func() { + glog.V(4).Infof("scheduleOperation[%s]: running the operation", operationName) + operation(arg) + + ctrl.runningOperationsMapLock.Lock() + defer ctrl.runningOperationsMapLock.Unlock() + ctrl.finishRunningOperation(operationName) + }() +} + +func (ctrl *PersistentVolumeController) isOperationRunning(operationName string) bool { + _, found := ctrl.runningOperations[operationName] + return found +} + +func (ctrl *PersistentVolumeController) finishRunningOperation(operationName string) { + delete(ctrl.runningOperations, operationName) +} + +func (ctrl *PersistentVolumeController) startRunningOperation(operationName string) { + ctrl.runningOperations[operationName] = true +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/controller_base.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/controller_base.go new file mode 100644 index 000000000000..55c7417f00c1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/controller_base.go @@ -0,0 +1,390 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "fmt" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/runtime" + vol "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/watch" + + "github.com/golang/glog" +) + +// This file contains the controller base functionality, i.e. framework to +// process PV/PVC added/updated/deleted events. The real binding, provisioning, +// recycling and deleting is done in controller.go + +// NewPersistentVolumeController creates a new PersistentVolumeController +func NewPersistentVolumeController( + kubeClient clientset.Interface, + syncPeriod time.Duration, + provisioner vol.ProvisionableVolumePlugin, + recyclers []vol.VolumePlugin, + cloud cloudprovider.Interface, + clusterName string, + volumeSource, claimSource cache.ListerWatcher, + eventRecorder record.EventRecorder, +) *PersistentVolumeController { + + if eventRecorder == nil { + broadcaster := record.NewBroadcaster() + broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) + eventRecorder = broadcaster.NewRecorder(api.EventSource{Component: "persistentvolume-controller"}) + } + + controller := &PersistentVolumeController{ + kubeClient: kubeClient, + eventRecorder: eventRecorder, + runningOperations: make(map[string]bool), + cloud: cloud, + provisioner: provisioner, + clusterName: clusterName, + createProvisionedPVRetryCount: createProvisionedPVRetryCount, + createProvisionedPVInterval: createProvisionedPVInterval, + } + + controller.recyclePluginMgr.InitPlugins(recyclers, controller) + if controller.provisioner != nil { + if err := controller.provisioner.Init(controller); err != nil { + glog.Errorf("PersistentVolumeController: error initializing provisioner plugin: %v", err) + } + } + + if volumeSource == nil { + volumeSource = &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return kubeClient.Core().PersistentVolumes().List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return kubeClient.Core().PersistentVolumes().Watch(options) + }, + } + } + + if claimSource == nil { + claimSource = &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options) + }, + } + } + + controller.volumes.store, controller.volumeController = framework.NewIndexerInformer( + volumeSource, + &api.PersistentVolume{}, + syncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: controller.addVolume, + UpdateFunc: controller.updateVolume, + DeleteFunc: controller.deleteVolume, + }, + cache.Indexers{"accessmodes": accessModesIndexFunc}, + ) + controller.claims, controller.claimController = framework.NewInformer( + claimSource, + &api.PersistentVolumeClaim{}, + syncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: controller.addClaim, + UpdateFunc: controller.updateClaim, + DeleteFunc: controller.deleteClaim, + }, + ) + return controller +} + +// addVolume is callback from framework.Controller watching PersistentVolume +// events. +func (ctrl *PersistentVolumeController) addVolume(obj interface{}) { + if !ctrl.isFullySynced() { + return + } + + pv, ok := obj.(*api.PersistentVolume) + if !ok { + glog.Errorf("expected PersistentVolume but handler received %+v", obj) + return + } + if err := ctrl.syncVolume(pv); err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not add volume %q: %+v", pv.Name, err) + } else { + glog.Errorf("PersistentVolumeController could not add volume %q: %+v", pv.Name, err) + } + } +} + +// updateVolume is callback from framework.Controller watching PersistentVolume +// events. +func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) { + if !ctrl.isFullySynced() { + return + } + + newVolume, ok := newObj.(*api.PersistentVolume) + if !ok { + glog.Errorf("Expected PersistentVolume but handler received %+v", newObj) + return + } + if err := ctrl.syncVolume(newVolume); err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) + } else { + glog.Errorf("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) + } + } +} + +// deleteVolume is callback from framework.Controller watching PersistentVolume +// events. +func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) { + if !ctrl.isFullySynced() { + return + } + + var volume *api.PersistentVolume + var ok bool + volume, ok = obj.(*api.PersistentVolume) + if !ok { + if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { + volume, ok = unknown.Obj.(*api.PersistentVolume) + if !ok { + glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", unknown.Obj) + return + } + } else { + glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", obj) + return + } + } + + if !ok || volume == nil || volume.Spec.ClaimRef == nil { + return + } + + if claimObj, exists, _ := ctrl.claims.GetByKey(claimrefToClaimKey(volume.Spec.ClaimRef)); exists { + if claim, ok := claimObj.(*api.PersistentVolumeClaim); ok && claim != nil { + // sync the claim when its volume is deleted. Explicitly syncing the + // claim here in response to volume deletion prevents the claim from + // waiting until the next sync period for its Lost status. + err := ctrl.syncClaim(claim) + if err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the + // controller recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteVolume handler: %+v", claimToClaimKey(claim), err) + } else { + glog.Errorf("PersistentVolumeController could not update volume %q from deleteVolume handler: %+v", claimToClaimKey(claim), err) + } + } + } else { + glog.Errorf("Cannot convert object from claim cache to claim %q!?: %+v", claimrefToClaimKey(volume.Spec.ClaimRef), claimObj) + } + } +} + +// addClaim is callback from framework.Controller watching PersistentVolumeClaim +// events. +func (ctrl *PersistentVolumeController) addClaim(obj interface{}) { + if !ctrl.isFullySynced() { + return + } + + claim, ok := obj.(*api.PersistentVolumeClaim) + if !ok { + glog.Errorf("Expected PersistentVolumeClaim but addClaim received %+v", obj) + return + } + if err := ctrl.syncClaim(claim); err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err) + } else { + glog.Errorf("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err) + } + } +} + +// updateClaim is callback from framework.Controller watching PersistentVolumeClaim +// events. +func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) { + if !ctrl.isFullySynced() { + return + } + + newClaim, ok := newObj.(*api.PersistentVolumeClaim) + if !ok { + glog.Errorf("Expected PersistentVolumeClaim but updateClaim received %+v", newObj) + return + } + if err := ctrl.syncClaim(newClaim); err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the controller + // recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) + } else { + glog.Errorf("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) + } + } +} + +// deleteClaim is callback from framework.Controller watching PersistentVolumeClaim +// events. +func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) { + if !ctrl.isFullySynced() { + return + } + + var volume *api.PersistentVolume + var claim *api.PersistentVolumeClaim + var ok bool + + claim, ok = obj.(*api.PersistentVolumeClaim) + if !ok { + if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { + claim, ok = unknown.Obj.(*api.PersistentVolumeClaim) + if !ok { + glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", unknown.Obj) + return + } + } else { + glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", obj) + return + } + } + + if !ok || claim == nil { + return + } + + if pvObj, exists, _ := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName); exists { + if volume, ok = pvObj.(*api.PersistentVolume); ok { + // sync the volume when its claim is deleted. Explicitly sync'ing the + // volume here in response to claim deletion prevents the volume from + // waiting until the next sync period for its Release. + if volume != nil { + err := ctrl.syncVolume(volume) + if err != nil { + if errors.IsConflict(err) { + // Version conflict error happens quite often and the + // controller recovers from it easily. + glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) + } else { + glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) + } + } + } + } else { + glog.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, pvObj) + } + } +} + +// Run starts all of this controller's control loops +func (ctrl *PersistentVolumeController) Run() { + glog.V(4).Infof("starting PersistentVolumeController") + + if ctrl.volumeControllerStopCh == nil { + ctrl.volumeControllerStopCh = make(chan struct{}) + go ctrl.volumeController.Run(ctrl.volumeControllerStopCh) + } + + if ctrl.claimControllerStopCh == nil { + ctrl.claimControllerStopCh = make(chan struct{}) + go ctrl.claimController.Run(ctrl.claimControllerStopCh) + } +} + +// Stop gracefully shuts down this controller +func (ctrl *PersistentVolumeController) Stop() { + glog.V(4).Infof("stopping PersistentVolumeController") + close(ctrl.volumeControllerStopCh) + close(ctrl.claimControllerStopCh) +} + +// isFullySynced returns true, if both volume and claim caches are fully loaded +// after startup. +// We do not want to process events with not fully loaded caches - e.g. we might +// recycle/delete PVs that don't have corresponding claim in the cache yet. +func (ctrl *PersistentVolumeController) isFullySynced() bool { + return ctrl.volumeController.HasSynced() && ctrl.claimController.HasSynced() +} + +// Stateless functions + +func hasAnnotation(obj api.ObjectMeta, ann string) bool { + _, found := obj.Annotations[ann] + return found +} + +func setAnnotation(obj *api.ObjectMeta, ann string, value string) { + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[ann] = value +} + +func getClaimStatusForLogging(claim *api.PersistentVolumeClaim) string { + bound := hasAnnotation(claim.ObjectMeta, annBindCompleted) + boundByController := hasAnnotation(claim.ObjectMeta, annBoundByController) + + return fmt.Sprintf("phase: %s, bound to: %q, bindCompleted: %v, boundByController: %v", claim.Status.Phase, claim.Spec.VolumeName, bound, boundByController) +} + +func getVolumeStatusForLogging(volume *api.PersistentVolume) string { + boundByController := hasAnnotation(volume.ObjectMeta, annBoundByController) + claimName := "" + if volume.Spec.ClaimRef != nil { + claimName = fmt.Sprintf("%s/%s (uid: %s)", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, volume.Spec.ClaimRef.UID) + } + return fmt.Sprintf("phase: %s, bound to: %q, boundByController: %v", volume.Status.Phase, claimName, boundByController) +} + +// isVolumeBoundToClaim returns true, if given volume is pre-bound or bound +// to specific claim. Both claim.Name and claim.Namespace must be equal. +// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too. +func isVolumeBoundToClaim(volume *api.PersistentVolume, claim *api.PersistentVolumeClaim) bool { + if volume.Spec.ClaimRef == nil { + return false + } + if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace { + return false + } + if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID { + return false + } + return true +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/controller_test.go new file mode 100644 index 000000000000..5b65b1176f57 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/controller_test.go @@ -0,0 +1,166 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "testing" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/conversion" +) + +// Test the real controller methods (add/update/delete claim/volume) with +// a fake API server. +// There is no controller API to 'initiate syncAll now', therefore these tests +// can't reliably simulate periodic sync of volumes/claims - it would be +// either very timing-sensitive or slow to wait for real periodic sync. +func TestControllerSync(t *testing.T) { + expectedChanges := []int{1, 4, 1, 1} + tests := []controllerTest{ + // [Unit test set 5] - controller tests. + // We test the controller as if + // it was connected to real API server, i.e. we call add/update/delete + // Claim/Volume methods. Also, all changes to volumes and claims are + // sent to add/update/delete Claim/Volume as real controller would do. + { + // addVolume gets a new volume. Check it's marked as Available and + // that it's not bound to any claim - we bind volumes on periodic + // syncClaim, not on addVolume. + "5-1 - addVolume", + novolumes, /* added in testCall below */ + newVolumeArray("volume5-1", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), + newClaimArray("claim5-1", "uid5-1", "1Gi", "", api.ClaimPending), + newClaimArray("claim5-1", "uid5-1", "1Gi", "", api.ClaimPending), + noevents, noerrors, + // Custom test function that generates an add event + func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + volume := newVolume("volume5-1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain) + reactor.volumes[volume.Name] = volume + reactor.volumeSource.Add(volume) + return nil + }, + }, + { + // addClaim gets a new claim. Check it's bound to a volume. + "5-2 - complete bind", + newVolumeArray("volume5-2", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), + newVolumeArray("volume5-2", "10Gi", "uid5-2", "claim5-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + noclaims, /* added in testAddClaim5_2 */ + newClaimArray("claim5-2", "uid5-2", "1Gi", "volume5-2", api.ClaimBound, annBoundByController, annBindCompleted), + noevents, noerrors, + // Custom test function that generates an add event + func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + claim := newClaim("claim5-2", "uid5-2", "1Gi", "", api.ClaimPending) + reactor.claims[claim.Name] = claim + reactor.claimSource.Add(claim) + return nil + }, + }, + { + // deleteClaim with a bound claim makes bound volume released. + "5-3 - delete claim", + newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), + newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain, annBoundByController), + newClaimArray("claim5-3", "uid5-3", "1Gi", "volume5-3", api.ClaimBound, annBoundByController, annBindCompleted), + noclaims, + noevents, noerrors, + // Custom test function that generates a delete event + func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + obj := ctrl.claims.List()[0] + claim := obj.(*api.PersistentVolumeClaim) + // Remove the claim from list of resulting claims. + delete(reactor.claims, claim.Name) + // Poke the controller with deletion event. Cloned claim is + // needed to prevent races (and we would get a clone from etcd + // too). + clone, _ := conversion.NewCloner().DeepCopy(claim) + claimClone := clone.(*api.PersistentVolumeClaim) + reactor.claimSource.Delete(claimClone) + return nil + }, + }, + { + // deleteVolume with a bound volume. Check the claim is Lost. + "5-4 - delete volume", + newVolumeArray("volume5-4", "10Gi", "uid5-4", "claim5-4", api.VolumeBound, api.PersistentVolumeReclaimRetain), + novolumes, + newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimBound, annBoundByController, annBindCompleted), + newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimLost, annBoundByController, annBindCompleted), + []string{"Warning ClaimLost"}, noerrors, + // Custom test function that generates a delete event + func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + obj := ctrl.volumes.store.List()[0] + volume := obj.(*api.PersistentVolume) + // Remove the volume from list of resulting volumes. + delete(reactor.volumes, volume.Name) + // Poke the controller with deletion event. Cloned volume is + // needed to prevent races (and we would get a clone from etcd + // too). + clone, _ := conversion.NewCloner().DeepCopy(volume) + volumeClone := clone.(*api.PersistentVolume) + reactor.volumeSource.Delete(volumeClone) + return nil + }, + }, + } + + for ix, test := range tests { + glog.V(4).Infof("starting test %q", test.name) + + // Initialize the controller + client := &fake.Clientset{} + volumeSource := framework.NewFakeControllerSource() + claimSource := framework.NewFakeControllerSource() + ctrl := newTestController(client, volumeSource, claimSource) + reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors) + for _, claim := range test.initialClaims { + claimSource.Add(claim) + reactor.claims[claim.Name] = claim + } + for _, volume := range test.initialVolumes { + volumeSource.Add(volume) + reactor.volumes[volume.Name] = volume + } + + // Start the controller + defer ctrl.Stop() + go ctrl.Run() + + // Wait for the controller to pass initial sync. + for !ctrl.isFullySynced() { + time.Sleep(10 * time.Millisecond) + } + + count := reactor.getChangeCount() + + // Call the tested function + err := test.test(ctrl, reactor, test) + if err != nil { + t.Errorf("Test %q initial test call failed: %v", test.name, err) + } + + for reactor.getChangeCount() < count+expectedChanges[ix] { + reactor.waitTest() + } + + evaluateTestResults(ctrl, reactor, test, t) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/delete_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/delete_test.go new file mode 100644 index 000000000000..03ef84b94d4a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/delete_test.go @@ -0,0 +1,169 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "errors" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +// Test single call to syncVolume, expecting recycling to happen. +// 1. Fill in the controller with initial data +// 2. Call the syncVolume *once*. +// 3. Compare resulting volumes with expected volumes. +func TestDeleteSync(t *testing.T) { + tests := []controllerTest{ + { + // delete volume bound by controller + "8-1 - successful delete", + newVolumeArray("volume8-1", "1Gi", "uid8-1", "claim8-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController), + novolumes, + noclaims, + noclaims, + noevents, noerrors, + // Inject deleter into the controller and call syncVolume. The + // deleter simulates one delete() call that succeeds. + wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), + }, + { + // delete volume bound by user + "8-2 - successful delete with prebound volume", + newVolumeArray("volume8-2", "1Gi", "uid8-2", "claim8-2", api.VolumeBound, api.PersistentVolumeReclaimDelete), + novolumes, + noclaims, + noclaims, + noevents, noerrors, + // Inject deleter into the controller and call syncVolume. The + // deleter simulates one delete() call that succeeds. + wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), + }, + { + // delete failure - plugin not found + "8-3 - plugin not found", + newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", api.VolumeBound, api.PersistentVolumeReclaimDelete), + newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", api.VolumeFailed, api.PersistentVolumeReclaimDelete), + noclaims, + noclaims, + []string{"Warning VolumeFailedDelete"}, noerrors, testSyncVolume, + }, + { + // delete failure - newDeleter returns error + "8-4 - newDeleter returns error", + newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", api.VolumeBound, api.PersistentVolumeReclaimDelete), + newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", api.VolumeFailed, api.PersistentVolumeReclaimDelete), + noclaims, + noclaims, + []string{"Warning VolumeFailedDelete"}, noerrors, + wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), + }, + { + // delete failure - delete() returns error + "8-5 - delete returns error", + newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", api.VolumeBound, api.PersistentVolumeReclaimDelete), + newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", api.VolumeFailed, api.PersistentVolumeReclaimDelete), + noclaims, + noclaims, + []string{"Warning VolumeFailedDelete"}, noerrors, + wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error")}, testSyncVolume), + }, + { + // delete success(?) - volume is deleted before doDelete() starts + "8-6 - volume is deleted before deleting", + newVolumeArray("volume8-6", "1Gi", "uid8-6", "claim8-6", api.VolumeBound, api.PersistentVolumeReclaimDelete), + novolumes, + noclaims, + noclaims, + noevents, noerrors, + wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + // Delete the volume before delete operation starts + reactor.lock.Lock() + delete(reactor.volumes, "volume8-6") + reactor.lock.Unlock() + }), + }, + { + // delete success(?) - volume is bound just at the time doDelete() + // starts. This simulates "volume no longer needs recycling, + // skipping". + "8-7 - volume is bound before deleting", + newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController), + newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController), + noclaims, + newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound), + noevents, noerrors, + wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + reactor.lock.Lock() + defer reactor.lock.Unlock() + // Bind the volume to ressurected claim (this should never + // happen) + claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound) + reactor.claims[claim.Name] = claim + ctrl.claims.Add(claim) + volume := reactor.volumes["volume8-7"] + volume.Status.Phase = api.VolumeBound + }), + }, + { + // delete success - volume bound by user is deleted, while a new + // claim is created with another UID. + "8-9 - prebound volume is deleted while the claim exists", + newVolumeArray("volume8-9", "1Gi", "uid8-9", "claim8-9", api.VolumeBound, api.PersistentVolumeReclaimDelete), + novolumes, + newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending), + newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending), + noevents, noerrors, + // Inject deleter into the controller and call syncVolume. The + // deleter simulates one delete() call that succeeds. + wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume), + }, + } + runSyncTests(t, tests) +} + +// Test multiple calls to syncClaim/syncVolume and periodic sync of all +// volume/claims. The test follows this pattern: +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// Some limit of calls in enforced to prevent endless loops. +func TestDeleteMultiSync(t *testing.T) { + tests := []controllerTest{ + { + // delete failure - delete returns error. The controller should + // try again. + "9-1 - delete returns error", + newVolumeArray("volume9-1", "1Gi", "uid9-1", "claim9-1", api.VolumeBound, api.PersistentVolumeReclaimDelete), + novolumes, + noclaims, + noclaims, + []string{"Warning VolumeFailedDelete"}, noerrors, + wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume), + }, + } + + runMultisyncTests(t, tests) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/framework_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/framework_test.go new file mode 100644 index 000000000000..6096799dc939 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/framework_test.go @@ -0,0 +1,1005 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/diff" + vol "k8s.io/kubernetes/pkg/volume" +) + +// This is a unit test framework for persistent volume controller. +// It fills the controller with test claims/volumes and can simulate these +// scenarios: +// 1) Call syncClaim/syncVolume once. +// 2) Call syncClaim/syncVolume several times (both simulating "claim/volume +// modified" events and periodic sync), until the controller settles down and +// does not modify anything. +// 3) Simulate almost real API server/etcd and call add/update/delete +// volume/claim. +// In all these scenarios, when the test finishes, the framework can compare +// resulting claims/volumes with list of expected claims/volumes and report +// differences. + +// controllerTest contains a single controller test input. +// Each test has initial set of volumes and claims that are filled into the +// controller before the test starts. The test then contains a reference to +// function to call as the actual test. Available functions are: +// - testSyncClaim - calls syncClaim on the first claim in initialClaims. +// - testSyncClaimError - calls syncClaim on the first claim in initialClaims +// and expects an error to be returned. +// - testSyncVolume - calls syncVolume on the first volume in initialVolumes. +// - any custom function for specialized tests. +// The test then contains list of volumes/claims that are expected at the end +// of the test and list of generated events. +type controllerTest struct { + // Name of the test, for logging + name string + // Initial content of controller volume cache. + initialVolumes []*api.PersistentVolume + // Expected content of controller volume cache at the end of the test. + expectedVolumes []*api.PersistentVolume + // Initial content of controller claim cache. + initialClaims []*api.PersistentVolumeClaim + // Expected content of controller claim cache at the end of the test. + expectedClaims []*api.PersistentVolumeClaim + // Expected events - any event with prefix will pass, we don't check full + // event message. + expectedEvents []string + // Errors to produce on matching action + errors []reactorError + // Function to call as the test. + test testCall +} + +type testCall func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error + +const testNamespace = "default" +const mockPluginName = "MockVolumePlugin" + +var versionConflictError = errors.New("VersionError") +var novolumes []*api.PersistentVolume +var noclaims []*api.PersistentVolumeClaim +var noevents = []string{} +var noerrors = []reactorError{} + +// volumeReactor is a core.Reactor that simulates etcd and API server. It +// stores: +// - Latest version of claims volumes saved by the controller. +// - Queue of all saves (to simulate "volume/claim updated" events). This queue +// contains all intermediate state of an object - e.g. a claim.VolumeName +// is updated first and claim.Phase second. This queue will then contain both +// updates as separate entries. +// - Number of changes since the last call to volumeReactor.syncAll(). +// - Optionally, volume and claim event sources. When set, all changed +// volumes/claims are sent as Modify event to these sources. These sources can +// be linked back to the controller watcher as "volume/claim updated" events. +// - Optionally, list of error that should be returned by reactor, simulating +// etcd / API server failures. These errors are evaluated in order and every +// error is returned only once. I.e. when the reactor finds matching +// reactorError, it return appropriate error and removes the reactorError from +// the list. +type volumeReactor struct { + volumes map[string]*api.PersistentVolume + claims map[string]*api.PersistentVolumeClaim + changedObjects []interface{} + changedSinceLastSync int + ctrl *PersistentVolumeController + volumeSource *framework.FakeControllerSource + claimSource *framework.FakeControllerSource + lock sync.Mutex + errors []reactorError +} + +// reactorError is an error that is returned by test reactor (=simulated +// etcd+/API server) when an action performed by the reactor matches given verb +// ("get", "update", "create", "delete" or "*"") on given resource +// ("persistentvolumes", "persistentvolumeclaims" or "*"). +type reactorError struct { + verb string + resource string + error error +} + +// React is a callback called by fake kubeClient from the controller. +// In other words, every claim/volume change performed by the controller ends +// here. +// This callback checks versions of the updated objects and refuse those that +// are too old (simulating real etcd). +// All updated objects are stored locally to keep track of object versions and +// to evaluate test results. +// All updated objects are also inserted into changedObjects queue and +// optionally sent back to the controller via its watchers. +func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Object, err error) { + r.lock.Lock() + defer r.lock.Unlock() + + glog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource()) + + // Inject error when requested + err = r.injectReactError(action) + if err != nil { + return true, nil, err + } + + // Test did not requst to inject an error, continue simulating API server. + switch { + case action.Matches("create", "persistentvolumes"): + obj := action.(core.UpdateAction).GetObject() + volume := obj.(*api.PersistentVolume) + + // check the volume does not exist + _, found := r.volumes[volume.Name] + if found { + return true, nil, fmt.Errorf("Cannot create volume %s: volume already exists", volume.Name) + } + + // Store the updated object to appropriate places. + if r.volumeSource != nil { + r.volumeSource.Add(volume) + } + r.volumes[volume.Name] = volume + r.changedObjects = append(r.changedObjects, volume) + r.changedSinceLastSync++ + glog.V(4).Infof("created volume %s", volume.Name) + return true, volume, nil + + case action.Matches("update", "persistentvolumes"): + obj := action.(core.UpdateAction).GetObject() + volume := obj.(*api.PersistentVolume) + + // Check and bump object version + storedVolume, found := r.volumes[volume.Name] + if found { + storedVer, _ := strconv.Atoi(storedVolume.ResourceVersion) + requestedVer, _ := strconv.Atoi(volume.ResourceVersion) + if storedVer != requestedVer { + return true, obj, versionConflictError + } + volume.ResourceVersion = strconv.Itoa(storedVer + 1) + } else { + return true, nil, fmt.Errorf("Cannot update volume %s: volume not found", volume.Name) + } + + // Store the updated object to appropriate places. + if r.volumeSource != nil { + r.volumeSource.Modify(volume) + } + r.volumes[volume.Name] = volume + r.changedObjects = append(r.changedObjects, volume) + r.changedSinceLastSync++ + glog.V(4).Infof("saved updated volume %s", volume.Name) + return true, volume, nil + + case action.Matches("update", "persistentvolumeclaims"): + obj := action.(core.UpdateAction).GetObject() + claim := obj.(*api.PersistentVolumeClaim) + + // Check and bump object version + storedClaim, found := r.claims[claim.Name] + if found { + storedVer, _ := strconv.Atoi(storedClaim.ResourceVersion) + requestedVer, _ := strconv.Atoi(claim.ResourceVersion) + if storedVer != requestedVer { + return true, obj, versionConflictError + } + claim.ResourceVersion = strconv.Itoa(storedVer + 1) + } else { + return true, nil, fmt.Errorf("Cannot update claim %s: claim not found", claim.Name) + } + + // Store the updated object to appropriate places. + r.claims[claim.Name] = claim + if r.claimSource != nil { + r.claimSource.Modify(claim) + } + r.changedObjects = append(r.changedObjects, claim) + r.changedSinceLastSync++ + glog.V(4).Infof("saved updated claim %s", claim.Name) + return true, claim, nil + + case action.Matches("get", "persistentvolumes"): + name := action.(core.GetAction).GetName() + volume, found := r.volumes[name] + if found { + glog.V(4).Infof("GetVolume: found %s", volume.Name) + return true, volume, nil + } else { + glog.V(4).Infof("GetVolume: volume %s not found", name) + return true, nil, fmt.Errorf("Cannot find volume %s", name) + } + + case action.Matches("delete", "persistentvolumes"): + name := action.(core.DeleteAction).GetName() + glog.V(4).Infof("deleted volume %s", name) + _, found := r.volumes[name] + if found { + delete(r.volumes, name) + return true, nil, nil + } else { + return true, nil, fmt.Errorf("Cannot delete volume %s: not found", name) + } + + case action.Matches("delete", "persistentvolumeclaims"): + name := action.(core.DeleteAction).GetName() + glog.V(4).Infof("deleted claim %s", name) + _, found := r.volumes[name] + if found { + delete(r.claims, name) + return true, nil, nil + } else { + return true, nil, fmt.Errorf("Cannot delete claim %s: not found", name) + } + } + + return false, nil, nil +} + +// injectReactError returns an error when the test requested given action to +// fail. nil is returned otherwise. +func (r *volumeReactor) injectReactError(action core.Action) error { + if len(r.errors) == 0 { + // No more errors to inject, everything should succeed. + return nil + } + + for i, expected := range r.errors { + glog.V(4).Infof("trying to match %q %q with %q %q", expected.verb, expected.resource, action.GetVerb(), action.GetResource()) + if action.Matches(expected.verb, expected.resource) { + // That's the action we're waiting for, remove it from injectedErrors + r.errors = append(r.errors[:i], r.errors[i+1:]...) + glog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.verb, expected.resource, expected.error) + return expected.error + } + } + return nil +} + +// checkVolumes compares all expectedVolumes with set of volumes at the end of +// the test and reports differences. +func (r *volumeReactor) checkVolumes(t *testing.T, expectedVolumes []*api.PersistentVolume) error { + r.lock.Lock() + defer r.lock.Unlock() + + expectedMap := make(map[string]*api.PersistentVolume) + gotMap := make(map[string]*api.PersistentVolume) + // Clear any ResourceVersion from both sets + for _, v := range expectedVolumes { + v.ResourceVersion = "" + expectedMap[v.Name] = v + } + for _, v := range r.volumes { + // We must clone the volume because of golang race check - it was + // written by the controller without any locks on it. + clone, _ := conversion.NewCloner().DeepCopy(v) + v = clone.(*api.PersistentVolume) + v.ResourceVersion = "" + if v.Spec.ClaimRef != nil { + v.Spec.ClaimRef.ResourceVersion = "" + } + gotMap[v.Name] = v + } + if !reflect.DeepEqual(expectedMap, gotMap) { + // Print ugly but useful diff of expected and received objects for + // easier debugging. + return fmt.Errorf("Volume check failed [A-expected, B-got]: %s", diff.ObjectDiff(expectedMap, gotMap)) + } + return nil +} + +// checkClaims compares all expectedClaims with set of claims at the end of the +// test and reports differences. +func (r *volumeReactor) checkClaims(t *testing.T, expectedClaims []*api.PersistentVolumeClaim) error { + r.lock.Lock() + defer r.lock.Unlock() + + expectedMap := make(map[string]*api.PersistentVolumeClaim) + gotMap := make(map[string]*api.PersistentVolumeClaim) + for _, c := range expectedClaims { + c.ResourceVersion = "" + expectedMap[c.Name] = c + } + for _, c := range r.claims { + // We must clone the claim because of golang race check - it was + // written by the controller without any locks on it. + clone, _ := conversion.NewCloner().DeepCopy(c) + c = clone.(*api.PersistentVolumeClaim) + c.ResourceVersion = "" + gotMap[c.Name] = c + } + if !reflect.DeepEqual(expectedMap, gotMap) { + // Print ugly but useful diff of expected and received objects for + // easier debugging. + return fmt.Errorf("Claim check failed [A-expected, B-got result]: %s", diff.ObjectDiff(expectedMap, gotMap)) + } + return nil +} + +// checkEvents compares all expectedEvents with events generated during the test +// and reports differences. +func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeController) error { + var err error + + // Read recorded events + fakeRecorder := ctrl.eventRecorder.(*record.FakeRecorder) + gotEvents := []string{} + finished := false + for !finished { + select { + case event, ok := <-fakeRecorder.Events: + if ok { + glog.V(5).Infof("event recorder got event %s", event) + gotEvents = append(gotEvents, event) + } else { + glog.V(5).Infof("event recorder finished") + finished = true + } + default: + glog.V(5).Infof("event recorder finished") + finished = true + } + } + + // Evaluate the events + for i, expected := range expectedEvents { + if len(gotEvents) <= i { + t.Errorf("Event %q not emitted", expected) + err = fmt.Errorf("Events do not match") + continue + } + received := gotEvents[i] + if !strings.HasPrefix(received, expected) { + t.Errorf("Unexpected event received, expected %q, got %q", expected, received) + err = fmt.Errorf("Events do not match") + } + } + for i := len(expectedEvents); i < len(gotEvents); i++ { + t.Errorf("Unexpected event received: %q", gotEvents[i]) + err = fmt.Errorf("Events do not match") + } + return err +} + +// popChange returns one recorded updated object, either *api.PersistentVolume +// or *api.PersistentVolumeClaim. Returns nil when there are no changes. +func (r *volumeReactor) popChange() interface{} { + r.lock.Lock() + defer r.lock.Unlock() + + if len(r.changedObjects) == 0 { + return nil + } + + // For debugging purposes, print the queue + for _, obj := range r.changedObjects { + switch obj.(type) { + case *api.PersistentVolume: + vol, _ := obj.(*api.PersistentVolume) + glog.V(4).Infof("reactor queue: %s", vol.Name) + case *api.PersistentVolumeClaim: + claim, _ := obj.(*api.PersistentVolumeClaim) + glog.V(4).Infof("reactor queue: %s", claim.Name) + } + } + + // Pop the first item from the queue and return it + obj := r.changedObjects[0] + r.changedObjects = r.changedObjects[1:] + return obj +} + +// syncAll simulates the controller periodic sync of volumes and claim. It +// simply adds all these objects to the internal queue of updates. This method +// should be used when the test manually calls syncClaim/syncVolume. Test that +// use real controller loop (ctrl.Run()) will get periodic sync automatically. +func (r *volumeReactor) syncAll() { + r.lock.Lock() + defer r.lock.Unlock() + + for _, c := range r.claims { + r.changedObjects = append(r.changedObjects, c) + } + for _, v := range r.volumes { + r.changedObjects = append(r.changedObjects, v) + } + r.changedSinceLastSync = 0 +} + +func (r *volumeReactor) getChangeCount() int { + r.lock.Lock() + defer r.lock.Unlock() + return r.changedSinceLastSync +} + +func (r *volumeReactor) getOperationCount() int { + r.ctrl.runningOperationsMapLock.Lock() + defer r.ctrl.runningOperationsMapLock.Unlock() + return len(r.ctrl.runningOperations) +} + +// waitTest waits until all tests, controllers and other goroutines do their +// job and no new actions are registered for 10 milliseconds. +func (r *volumeReactor) waitTest() { + // Check every 10ms if the controller does something and stop if it's + // idle. + oldChanges := -1 + for { + time.Sleep(10 * time.Millisecond) + changes := r.getChangeCount() + if changes == oldChanges && r.getOperationCount() == 0 { + // No changes for last 10ms -> controller must be idle. + break + } + oldChanges = changes + } +} + +func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, volumeSource, claimSource *framework.FakeControllerSource, errors []reactorError) *volumeReactor { + reactor := &volumeReactor{ + volumes: make(map[string]*api.PersistentVolume), + claims: make(map[string]*api.PersistentVolumeClaim), + ctrl: ctrl, + volumeSource: volumeSource, + claimSource: claimSource, + errors: errors, + } + client.AddReactor("*", "*", reactor.React) + return reactor +} + +func newTestController(kubeClient clientset.Interface, volumeSource, claimSource cache.ListerWatcher) *PersistentVolumeController { + if volumeSource == nil { + volumeSource = framework.NewFakeControllerSource() + } + if claimSource == nil { + claimSource = framework.NewFakeControllerSource() + } + ctrl := NewPersistentVolumeController( + kubeClient, + 5*time.Second, // sync period + nil, // provisioner + []vol.VolumePlugin{}, // recyclers + nil, // cloud + "", + volumeSource, + claimSource, + record.NewFakeRecorder(1000), // event recorder + ) + + // Speed up the test + ctrl.createProvisionedPVInterval = 5 * time.Millisecond + return ctrl +} + +func addRecyclePlugin(ctrl *PersistentVolumeController, expectedRecycleCalls []error) { + plugin := &mockVolumePlugin{ + recycleCalls: expectedRecycleCalls, + } + ctrl.recyclePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl) +} + +func addDeletePlugin(ctrl *PersistentVolumeController, expectedDeleteCalls []error) { + plugin := &mockVolumePlugin{ + deleteCalls: expectedDeleteCalls, + } + ctrl.recyclePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl) +} + +func addProvisionPlugin(ctrl *PersistentVolumeController, expectedDeleteCalls []error) { + plugin := &mockVolumePlugin{ + provisionCalls: expectedDeleteCalls, + } + ctrl.provisioner = plugin +} + +// newVolume returns a new volume with given attributes +func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, reclaimPolicy api.PersistentVolumeReclaimPolicy, annotations ...string) *api.PersistentVolume { + volume := api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: name, + ResourceVersion: "1", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse(capacity), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}, + PersistentVolumeReclaimPolicy: reclaimPolicy, + }, + Status: api.PersistentVolumeStatus{ + Phase: phase, + }, + } + + if boundToClaimName != "" { + volume.Spec.ClaimRef = &api.ObjectReference{ + Kind: "PersistentVolumeClaim", + APIVersion: "v1", + UID: types.UID(boundToClaimUID), + Namespace: testNamespace, + Name: boundToClaimName, + } + } + + if len(annotations) > 0 { + volume.Annotations = make(map[string]string) + for _, a := range annotations { + if a != annDynamicallyProvisioned { + volume.Annotations[a] = "yes" + } else { + volume.Annotations[a] = mockPluginName + } + } + } + + return &volume +} + +// newVolumeArray returns array with a single volume that would be returned by +// newVolume() with the same parameters. +func newVolumeArray(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, reclaimPolicy api.PersistentVolumeReclaimPolicy, annotations ...string) []*api.PersistentVolume { + return []*api.PersistentVolume{ + newVolume(name, capacity, boundToClaimUID, boundToClaimName, phase, reclaimPolicy, annotations...), + } +} + +// newClaim returns a new claim with given attributes +func newClaim(name, claimUID, capacity, boundToVolume string, phase api.PersistentVolumeClaimPhase, annotations ...string) *api.PersistentVolumeClaim { + claim := api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: testNamespace, + UID: types.UID(claimUID), + ResourceVersion: "1", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse(capacity), + }, + }, + VolumeName: boundToVolume, + }, + Status: api.PersistentVolumeClaimStatus{ + Phase: phase, + }, + } + // Make sure api.GetReference(claim) works + claim.ObjectMeta.SelfLink = testapi.Default.SelfLink("pvc", name) + + if len(annotations) > 0 { + claim.Annotations = make(map[string]string) + for _, a := range annotations { + claim.Annotations[a] = "yes" + } + } + return &claim +} + +// newClaimArray returns array with a single claim that would be returned by +// newClaim() with the same parameters. +func newClaimArray(name, claimUID, capacity, boundToVolume string, phase api.PersistentVolumeClaimPhase, annotations ...string) []*api.PersistentVolumeClaim { + return []*api.PersistentVolumeClaim{ + newClaim(name, claimUID, capacity, boundToVolume, phase, annotations...), + } +} + +func testSyncClaim(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + return ctrl.syncClaim(test.initialClaims[0]) +} + +func testSyncClaimError(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + err := ctrl.syncClaim(test.initialClaims[0]) + + if err != nil { + return nil + } + return fmt.Errorf("syncClaim succeeded when failure was expected") +} + +func testSyncVolume(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + return ctrl.syncVolume(test.initialVolumes[0]) +} + +type operationType string + +const operationDelete = "Delete" +const operationRecycle = "Recycle" +const operationProvision = "Provision" + +// wrapTestWithControllerConfig returns a testCall that: +// - configures controller with recycler, deleter or provisioner which will +// return provided errors when a volume is deleted, recycled or provisioned +// - calls given testCall +func wrapTestWithControllerConfig(operation operationType, expectedOperationCalls []error, toWrap testCall) testCall { + expected := expectedOperationCalls + + return func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + switch operation { + case operationDelete: + addDeletePlugin(ctrl, expected) + case operationRecycle: + addRecyclePlugin(ctrl, expected) + case operationProvision: + addProvisionPlugin(ctrl, expected) + } + + return toWrap(ctrl, reactor, test) + } +} + +// wrapTestWithInjectedOperation returns a testCall that: +// - starts the controller and lets it run original testCall until +// scheduleOperation() call. It blocks the controller there and calls the +// injected function to simulate that something is happenning when the +// controller waits for the operation lock. Controller is then resumed and we +// check how it behaves. +func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(ctrl *PersistentVolumeController, reactor *volumeReactor)) testCall { + + return func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { + // Inject a hook before async operation starts + ctrl.preOperationHook = func(operationName string, arg interface{}) { + // Inside the hook, run the function to inject + glog.V(4).Infof("reactor: scheduleOperation reached, injecting call") + injectBeforeOperation(ctrl, reactor) + } + + // Run the tested function (typically syncClaim/syncVolume) in a + // separate goroutine. + var testError error + var testFinished int32 + + go func() { + testError = toWrap(ctrl, reactor, test) + // Let the "main" test function know that syncVolume has finished. + atomic.StoreInt32(&testFinished, 1) + }() + + // Wait for the controler to finish the test function. + for atomic.LoadInt32(&testFinished) == 0 { + time.Sleep(time.Millisecond * 10) + } + + return testError + } +} + +func evaluateTestResults(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest, t *testing.T) { + // Evaluate results + if err := reactor.checkClaims(t, test.expectedClaims); err != nil { + t.Errorf("Test %q: %v", test.name, err) + + } + if err := reactor.checkVolumes(t, test.expectedVolumes); err != nil { + t.Errorf("Test %q: %v", test.name, err) + } + + if err := checkEvents(t, test.expectedEvents, ctrl); err != nil { + t.Errorf("Test %q: %v", test.name, err) + } +} + +// Test single call to syncClaim and syncVolume methods. +// For all tests: +// 1. Fill in the controller with initial data +// 2. Call the tested function (syncClaim/syncVolume) via +// controllerTest.testCall *once*. +// 3. Compare resulting volumes and claims with expected volumes and claims. +func runSyncTests(t *testing.T, tests []controllerTest) { + for _, test := range tests { + glog.V(4).Infof("starting test %q", test.name) + + // Initialize the controller + client := &fake.Clientset{} + ctrl := newTestController(client, nil, nil) + reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) + for _, claim := range test.initialClaims { + ctrl.claims.Add(claim) + reactor.claims[claim.Name] = claim + } + for _, volume := range test.initialVolumes { + ctrl.volumes.store.Add(volume) + reactor.volumes[volume.Name] = volume + } + + // Run the tested functions + err := test.test(ctrl, reactor, test) + if err != nil { + t.Errorf("Test %q failed: %v", test.name, err) + } + + // Wait for all goroutines to finish + reactor.waitTest() + + evaluateTestResults(ctrl, reactor, test, t) + } +} + +// Test multiple calls to syncClaim/syncVolume and periodic sync of all +// volume/claims. For all tests, the test follows this pattern: +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// Some limit of calls in enforced to prevent endless loops. +func runMultisyncTests(t *testing.T, tests []controllerTest) { + for _, test := range tests { + glog.V(4).Infof("starting multisync test %q", test.name) + + // Initialize the controller + client := &fake.Clientset{} + ctrl := newTestController(client, nil, nil) + reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) + for _, claim := range test.initialClaims { + ctrl.claims.Add(claim) + reactor.claims[claim.Name] = claim + } + for _, volume := range test.initialVolumes { + ctrl.volumes.store.Add(volume) + reactor.volumes[volume.Name] = volume + } + + // Run the tested function + err := test.test(ctrl, reactor, test) + if err != nil { + t.Errorf("Test %q failed: %v", test.name, err) + } + + // Simulate any "changed" events and "periodical sync" until we reach a + // stable state. + firstSync := true + counter := 0 + for { + counter++ + glog.V(4).Infof("test %q: iteration %d", test.name, counter) + + if counter > 100 { + t.Errorf("Test %q failed: too many iterations", test.name) + break + } + + // Wait for all goroutines to finish + reactor.waitTest() + + obj := reactor.popChange() + if obj == nil { + // Nothing was changed, should we exit? + if firstSync || reactor.changedSinceLastSync > 0 { + // There were some changes after the last "periodic sync". + // Simulate "periodic sync" of everything (until it produces + // no changes). + firstSync = false + glog.V(4).Infof("test %q: simulating periodical sync of all claims and volumes", test.name) + reactor.syncAll() + } else { + // Last sync did not produce any updates, the test reached + // stable state -> finish. + break + } + } + + // There were some changes, process them + switch obj.(type) { + case *api.PersistentVolumeClaim: + claim := obj.(*api.PersistentVolumeClaim) + // Simulate "claim updated" event + ctrl.claims.Update(claim) + err = ctrl.syncClaim(claim) + if err != nil { + if err == versionConflictError { + // Ignore version errors + glog.V(4).Infof("test intentionaly ignores version error.") + } else { + t.Errorf("Error calling syncClaim: %v", err) + // Finish the loop on the first error + break + } + } + // Process generated changes + continue + case *api.PersistentVolume: + volume := obj.(*api.PersistentVolume) + // Simulate "volume updated" event + ctrl.volumes.store.Update(volume) + err = ctrl.syncVolume(volume) + if err != nil { + if err == versionConflictError { + // Ignore version errors + glog.V(4).Infof("test intentionaly ignores version error.") + } else { + t.Errorf("Error calling syncVolume: %v", err) + // Finish the loop on the first error + break + } + } + // Process generated changes + continue + } + } + evaluateTestResults(ctrl, reactor, test, t) + glog.V(4).Infof("test %q finished after %d iterations", test.name, counter) + } +} + +// Dummy volume plugin for provisioning, deletion and recycling. It contains +// lists of expected return values to simulate errors. +type mockVolumePlugin struct { + provisionCalls []error + provisionCallCounter int + deleteCalls []error + deleteCallCounter int + recycleCalls []error + recycleCallCounter int + provisionOptions vol.VolumeOptions +} + +var _ vol.VolumePlugin = &mockVolumePlugin{} +var _ vol.RecyclableVolumePlugin = &mockVolumePlugin{} +var _ vol.DeletableVolumePlugin = &mockVolumePlugin{} +var _ vol.ProvisionableVolumePlugin = &mockVolumePlugin{} + +func (plugin *mockVolumePlugin) Init(host vol.VolumeHost) error { + return nil +} + +func (plugin *mockVolumePlugin) Name() string { + return mockPluginName +} + +func (plugin *mockVolumePlugin) CanSupport(spec *vol.Spec) bool { + return true +} + +func (plugin *mockVolumePlugin) NewMounter(spec *vol.Spec, podRef *api.Pod, opts vol.VolumeOptions) (vol.Mounter, error) { + return nil, fmt.Errorf("Mounter is not supported by this plugin") +} + +func (plugin *mockVolumePlugin) NewUnmounter(name string, podUID types.UID) (vol.Unmounter, error) { + return nil, fmt.Errorf("Unmounter is not supported by this plugin") +} + +// Provisioner interfaces + +func (plugin *mockVolumePlugin) NewProvisioner(options vol.VolumeOptions) (vol.Provisioner, error) { + if len(plugin.provisionCalls) > 0 { + // mockVolumePlugin directly implements Provisioner interface + glog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner") + plugin.provisionOptions = options + return plugin, nil + } else { + return nil, fmt.Errorf("Mock plugin error: no provisionCalls configured") + } +} + +func (plugin *mockVolumePlugin) Provision() (*api.PersistentVolume, error) { + if len(plugin.provisionCalls) <= plugin.provisionCallCounter { + return nil, fmt.Errorf("Mock plugin error: unexpected provisioner call %d", plugin.provisionCallCounter) + } + + var pv *api.PersistentVolume + err := plugin.provisionCalls[plugin.provisionCallCounter] + if err == nil { + // Create a fake PV with known GCE volume (to match expected volume) + pv = &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: plugin.provisionOptions.PVName, + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): plugin.provisionOptions.Capacity, + }, + AccessModes: plugin.provisionOptions.AccessModes, + PersistentVolumeReclaimPolicy: plugin.provisionOptions.PersistentVolumeReclaimPolicy, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + }, + } + } + + plugin.provisionCallCounter++ + glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, err) + return pv, err +} + +// Deleter interfaces + +func (plugin *mockVolumePlugin) NewDeleter(spec *vol.Spec) (vol.Deleter, error) { + if len(plugin.deleteCalls) > 0 { + // mockVolumePlugin directly implements Deleter interface + glog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter") + return plugin, nil + } else { + return nil, fmt.Errorf("Mock plugin error: no deleteCalls configured") + } +} + +func (plugin *mockVolumePlugin) Delete() error { + if len(plugin.deleteCalls) <= plugin.deleteCallCounter { + return fmt.Errorf("Mock plugin error: unexpected deleter call %d", plugin.deleteCallCounter) + } + ret := plugin.deleteCalls[plugin.deleteCallCounter] + plugin.deleteCallCounter++ + glog.V(4).Infof("mock plugin Delete call nr. %d, returning %v", plugin.deleteCallCounter, ret) + return ret +} + +// Volume interfaces + +func (plugin *mockVolumePlugin) GetPath() string { + return "" +} + +func (plugin *mockVolumePlugin) GetMetrics() (*vol.Metrics, error) { + return nil, nil +} + +// Recycler interfaces + +func (plugin *mockVolumePlugin) NewRecycler(pvName string, spec *vol.Spec) (vol.Recycler, error) { + if len(plugin.recycleCalls) > 0 { + // mockVolumePlugin directly implements Recycler interface + glog.V(4).Infof("mock plugin NewRecycler called, returning mock recycler") + return plugin, nil + } else { + return nil, fmt.Errorf("Mock plugin error: no recycleCalls configured") + } +} + +func (plugin *mockVolumePlugin) Recycle() error { + if len(plugin.recycleCalls) <= plugin.recycleCallCounter { + return fmt.Errorf("Mock plugin error: unexpected recycle call %d", plugin.recycleCallCounter) + } + ret := plugin.recycleCalls[plugin.recycleCallCounter] + plugin.recycleCallCounter++ + glog.V(4).Infof("mock plugin Recycle call nr. %d, returning %v", plugin.recycleCallCounter, ret) + return ret +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/index.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/index.go new file mode 100644 index 000000000000..39c85d939ed1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/index.go @@ -0,0 +1,240 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "fmt" + "sort" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" +) + +// persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes indexed by AccessModes and ordered by storage capacity. +type persistentVolumeOrderedIndex struct { + store cache.Indexer +} + +// accessModesIndexFunc is an indexing function that returns a persistent volume's AccessModes as a string +func accessModesIndexFunc(obj interface{}) ([]string, error) { + if pv, ok := obj.(*api.PersistentVolume); ok { + modes := api.GetAccessModesAsString(pv.Spec.AccessModes) + return []string{modes}, nil + } + return []string{""}, fmt.Errorf("object is not a persistent volume: %v", obj) +} + +// listByAccessModes returns all volumes with the given set of AccessModeTypes. The list is unsorted! +func (pvIndex *persistentVolumeOrderedIndex) listByAccessModes(modes []api.PersistentVolumeAccessMode) ([]*api.PersistentVolume, error) { + pv := &api.PersistentVolume{ + Spec: api.PersistentVolumeSpec{ + AccessModes: modes, + }, + } + + objs, err := pvIndex.store.Index("accessmodes", pv) + if err != nil { + return nil, err + } + + volumes := make([]*api.PersistentVolume, len(objs)) + for i, obj := range objs { + volumes[i] = obj.(*api.PersistentVolume) + } + + return volumes, nil +} + +// matchPredicate is a function that indicates that a persistent volume matches another +type matchPredicate func(compareThis, toThis *api.PersistentVolume) bool + +// find returns the nearest PV from the ordered list or nil if a match is not found +func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVolumeClaim, matchPredicate matchPredicate) (*api.PersistentVolume, error) { + // PVs are indexed by their access modes to allow easier searching. Each index is the string representation of a set of access modes. + // There is a finite number of possible sets and PVs will only be indexed in one of them (whichever index matches the PV's modes). + // + // A request for resources will always specify its desired access modes. Any matching PV must have at least that number + // of access modes, but it can have more. For example, a user asks for ReadWriteOnce but a GCEPD is available, which is ReadWriteOnce+ReadOnlyMany. + // + // Searches are performed against a set of access modes, so we can attempt not only the exact matching modes but also + // potential matches (the GCEPD example above). + allPossibleModes := pvIndex.allPossibleMatchingAccessModes(claim.Spec.AccessModes) + + var smallestVolume *api.PersistentVolume + var smallestVolumeSize int64 + requestedQty := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] + requestedSize := requestedQty.Value() + + for _, modes := range allPossibleModes { + volumes, err := pvIndex.listByAccessModes(modes) + if err != nil { + return nil, err + } + + // Go through all available volumes with two goals: + // - find a volume that is either pre-bound by user or dynamically + // provisioned for this claim. Because of this we need to loop through + // all volumes. + // - find the smallest matching one if there is no volume pre-bound to + // the claim. + for _, volume := range volumes { + if isVolumeBoundToClaim(volume, claim) { + // Exact match! No search required. This catches both volumes + // pre-bound by user and volumes dynamically provisioned by the + // controller. + return volume, nil + } + + if volume.Spec.ClaimRef != nil { + // This volume waits for exact claim or is alredy bound. + continue + } + + volumeQty := volume.Spec.Capacity[api.ResourceStorage] + volumeSize := volumeQty.Value() + if volumeSize >= requestedSize { + if smallestVolume == nil || smallestVolumeSize > volumeSize { + smallestVolume = volume + smallestVolumeSize = volumeSize + } + } + } + + // We want to provision volumes if the annotation is set even if there + // is matching PV. Therefore, do not look for available PV and let + // a new volume to be provisioned. + // + // When provisioner creates a new PV to this claim, an exact match + // pre-bound to the claim will be found by the checks above during + // subsequent claim sync. + if hasAnnotation(claim.ObjectMeta, annClass) { + return nil, nil + } + + if smallestVolume != nil { + // Found a matching volume + return smallestVolume, nil + } + } + return nil, nil +} + +// findBestMatchForClaim is a convenience method that finds a volume by the claim's AccessModes and requests for Storage +func (pvIndex *persistentVolumeOrderedIndex) findBestMatchForClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolume, error) { + return pvIndex.findByClaim(claim, matchStorageCapacity) +} + +// matchStorageCapacity is a matchPredicate used to sort and find volumes +func matchStorageCapacity(pvA, pvB *api.PersistentVolume) bool { + aQty := pvA.Spec.Capacity[api.ResourceStorage] + bQty := pvB.Spec.Capacity[api.ResourceStorage] + aSize := aQty.Value() + bSize := bQty.Value() + return aSize <= bSize +} + +// allPossibleMatchingAccessModes returns an array of AccessMode arrays that can satisfy a user's requested modes. +// +// see comments in the Find func above regarding indexing. +// +// allPossibleMatchingAccessModes gets all stringified accessmodes from the index and returns all those that +// contain at least all of the requested mode. +// +// For example, assume the index contains 2 types of PVs where the stringified accessmodes are: +// +// "RWO,ROX" -- some number of GCEPDs +// "RWO,ROX,RWX" -- some number of NFS volumes +// +// A request for RWO could be satisfied by both sets of indexed volumes, so allPossibleMatchingAccessModes returns: +// +// [][]api.PersistentVolumeAccessMode { +// []api.PersistentVolumeAccessMode { +// api.ReadWriteOnce, api.ReadOnlyMany, +// }, +// []api.PersistentVolumeAccessMode { +// api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany, +// }, +// } +// +// A request for RWX can be satisfied by only one set of indexed volumes, so the return is: +// +// [][]api.PersistentVolumeAccessMode { +// []api.PersistentVolumeAccessMode { +// api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany, +// }, +// } +// +// This func returns modes with ascending levels of modes to give the user what is closest to what they actually asked for. +// +func (pvIndex *persistentVolumeOrderedIndex) allPossibleMatchingAccessModes(requestedModes []api.PersistentVolumeAccessMode) [][]api.PersistentVolumeAccessMode { + matchedModes := [][]api.PersistentVolumeAccessMode{} + keys := pvIndex.store.ListIndexFuncValues("accessmodes") + for _, key := range keys { + indexedModes := api.GetAccessModesFromString(key) + if containedInAll(indexedModes, requestedModes) { + matchedModes = append(matchedModes, indexedModes) + } + } + + // sort by the number of modes in each array with the fewest number of modes coming first. + // this allows searching for volumes by the minimum number of modes required of the possible matches. + sort.Sort(byAccessModes{matchedModes}) + return matchedModes +} + +func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +func containedInAll(indexedModes []api.PersistentVolumeAccessMode, requestedModes []api.PersistentVolumeAccessMode) bool { + for _, mode := range requestedModes { + if !contains(indexedModes, mode) { + return false + } + } + return true +} + +// byAccessModes is used to order access modes by size, with the fewest modes first +type byAccessModes struct { + modes [][]api.PersistentVolumeAccessMode +} + +func (c byAccessModes) Less(i, j int) bool { + return len(c.modes[i]) < len(c.modes[j]) +} + +func (c byAccessModes) Swap(i, j int) { + c.modes[i], c.modes[j] = c.modes[j], c.modes[i] +} + +func (c byAccessModes) Len() int { + return len(c.modes) +} + +func claimToClaimKey(claim *api.PersistentVolumeClaim) string { + return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name) +} + +func claimrefToClaimKey(claimref *api.ObjectReference) string { + return fmt.Sprintf("%s/%s", claimref.Namespace, claimref.Name) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/index_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/index_test.go new file mode 100644 index 000000000000..c80984649cd7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/index_test.go @@ -0,0 +1,574 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "sort" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/cache" +) + +func newPersistentVolumeOrderedIndex() persistentVolumeOrderedIndex { + return persistentVolumeOrderedIndex{cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"accessmodes": accessModesIndexFunc})} +} + +func TestMatchVolume(t *testing.T) { + volList := newPersistentVolumeOrderedIndex() + for _, pv := range createTestVolumes() { + volList.store.Add(pv) + } + + scenarios := map[string]struct { + expectedMatch string + claim *api.PersistentVolumeClaim + }{ + "successful-match-gce-10": { + expectedMatch: "gce-pd-10", + claim: &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claim01", + Namespace: "myns", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany, api.ReadWriteOnce}, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("8G"), + }, + }, + }, + }, + }, + "successful-match-nfs-5": { + expectedMatch: "nfs-5", + claim: &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claim01", + Namespace: "myns", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany, api.ReadWriteOnce, api.ReadWriteMany}, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("5G"), + }, + }, + }, + }, + }, + "successful-skip-1g-bound-volume": { + expectedMatch: "gce-pd-5", + claim: &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claim01", + Namespace: "myns", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany, api.ReadWriteOnce}, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("1G"), + }, + }, + }, + }, + }, + "successful-no-match": { + expectedMatch: "", + claim: &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claim01", + Namespace: "myns", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany, api.ReadWriteOnce}, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("999G"), + }, + }, + }, + }, + }, + } + + for name, scenario := range scenarios { + volume, err := volList.findBestMatchForClaim(scenario.claim) + if err != nil { + t.Errorf("Unexpected error matching volume by claim: %v", err) + } + if len(scenario.expectedMatch) != 0 && volume == nil { + t.Errorf("Expected match but received nil volume for scenario: %s", name) + } + if len(scenario.expectedMatch) != 0 && volume != nil && string(volume.UID) != scenario.expectedMatch { + t.Errorf("Expected %s but got volume %s in scenario %s", scenario.expectedMatch, volume.UID, name) + } + if len(scenario.expectedMatch) == 0 && volume != nil { + t.Errorf("Unexpected match for scenario: %s", name) + } + } +} + +func TestMatchingWithBoundVolumes(t *testing.T) { + volumeIndex := newPersistentVolumeOrderedIndex() + // two similar volumes, one is bound + pv1 := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + UID: "gce-pd-1", + Name: "gce001", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("1G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}, + // this one we're pretending is already bound + ClaimRef: &api.ObjectReference{UID: "abc123"}, + }, + } + + pv2 := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + UID: "gce-pd-2", + Name: "gce002", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("1G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}, + }, + } + + volumeIndex.store.Add(pv1) + volumeIndex.store.Add(pv2) + + claim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claim01", + Namespace: "myns", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany, api.ReadWriteOnce}, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("1G"), + }, + }, + }, + } + + volume, err := volumeIndex.findBestMatchForClaim(claim) + if err != nil { + t.Fatalf("Unexpected error matching volume by claim: %v", err) + } + if volume == nil { + t.Fatalf("Unexpected nil volume. Expected %s", pv2.Name) + } + if pv2.Name != volume.Name { + t.Errorf("Expected %s but got volume %s instead", pv2.Name, volume.Name) + } +} + +func TestListByAccessModes(t *testing.T) { + volList := newPersistentVolumeOrderedIndex() + for _, pv := range createTestVolumes() { + volList.store.Add(pv) + } + + volumes, err := volList.listByAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}) + if err != nil { + t.Error("Unexpected error retrieving volumes by access modes:", err) + } + sort.Sort(byCapacity{volumes}) + + for i, expected := range []string{"gce-pd-1", "gce-pd-5", "gce-pd-10"} { + if string(volumes[i].UID) != expected { + t.Errorf("Incorrect ordering of persistent volumes. Expected %s but got %s", expected, volumes[i].UID) + } + } + + volumes, err = volList.listByAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany}) + if err != nil { + t.Error("Unexpected error retrieving volumes by access modes:", err) + } + sort.Sort(byCapacity{volumes}) + + for i, expected := range []string{"nfs-1", "nfs-5", "nfs-10"} { + if string(volumes[i].UID) != expected { + t.Errorf("Incorrect ordering of persistent volumes. Expected %s but got %s", expected, volumes[i].UID) + } + } +} + +func TestAllPossibleAccessModes(t *testing.T) { + index := newPersistentVolumeOrderedIndex() + for _, pv := range createTestVolumes() { + index.store.Add(pv) + } + + // the mock PVs creates contain 2 types of accessmodes: RWO+ROX and RWO+ROW+RWX + possibleModes := index.allPossibleMatchingAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce}) + if len(possibleModes) != 2 { + t.Errorf("Expected 2 arrays of modes that match RWO, but got %v", len(possibleModes)) + } + for _, m := range possibleModes { + if !contains(m, api.ReadWriteOnce) { + t.Errorf("AccessModes does not contain %s", api.ReadWriteOnce) + } + } + + possibleModes = index.allPossibleMatchingAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteMany}) + if len(possibleModes) != 1 { + t.Errorf("Expected 1 array of modes that match RWX, but got %v", len(possibleModes)) + } + if !contains(possibleModes[0], api.ReadWriteMany) { + t.Errorf("AccessModes does not contain %s", api.ReadWriteOnce) + } + +} + +func TestFindingVolumeWithDifferentAccessModes(t *testing.T) { + gce := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{UID: "001", Name: "gce"}, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")}, + PersistentVolumeSource: api.PersistentVolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}}, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + }, + } + + ebs := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{UID: "002", Name: "ebs"}, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")}, + PersistentVolumeSource: api.PersistentVolumeSource{AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{}}, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + }, + } + + nfs := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{UID: "003", Name: "nfs"}, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")}, + PersistentVolumeSource: api.PersistentVolumeSource{NFS: &api.NFSVolumeSource{}}, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + api.ReadWriteMany, + }, + }, + } + + claim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claim01", + Namespace: "myns", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("1G")}}, + }, + } + + index := newPersistentVolumeOrderedIndex() + index.store.Add(gce) + index.store.Add(ebs) + index.store.Add(nfs) + + volume, _ := index.findBestMatchForClaim(claim) + if volume.Name != ebs.Name { + t.Errorf("Expected %s but got volume %s instead", ebs.Name, volume.Name) + } + + claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany} + volume, _ = index.findBestMatchForClaim(claim) + if volume.Name != gce.Name { + t.Errorf("Expected %s but got volume %s instead", gce.Name, volume.Name) + } + + // order of the requested modes should not matter + claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteMany, api.ReadWriteOnce, api.ReadOnlyMany} + volume, _ = index.findBestMatchForClaim(claim) + if volume.Name != nfs.Name { + t.Errorf("Expected %s but got volume %s instead", nfs.Name, volume.Name) + } + + // fewer modes requested should still match + claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteMany} + volume, _ = index.findBestMatchForClaim(claim) + if volume.Name != nfs.Name { + t.Errorf("Expected %s but got volume %s instead", nfs.Name, volume.Name) + } + + // pretend the exact match is bound. should get the next level up of modes. + ebs.Spec.ClaimRef = &api.ObjectReference{} + claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteOnce} + volume, _ = index.findBestMatchForClaim(claim) + if volume.Name != gce.Name { + t.Errorf("Expected %s but got volume %s instead", gce.Name, volume.Name) + } + + // continue up the levels of modes. + gce.Spec.ClaimRef = &api.ObjectReference{} + claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteOnce} + volume, _ = index.findBestMatchForClaim(claim) + if volume.Name != nfs.Name { + t.Errorf("Expected %s but got volume %s instead", nfs.Name, volume.Name) + } + + // partial mode request + gce.Spec.ClaimRef = nil + claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadOnlyMany} + volume, _ = index.findBestMatchForClaim(claim) + if volume.Name != gce.Name { + t.Errorf("Expected %s but got volume %s instead", gce.Name, volume.Name) + } +} + +func createTestVolumes() []*api.PersistentVolume { + // these volumes are deliberately out-of-order to test indexing and sorting + return []*api.PersistentVolume{ + { + ObjectMeta: api.ObjectMeta{ + UID: "gce-pd-10", + Name: "gce003", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + UID: "gce-pd-20", + Name: "gce004", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("20G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + // this one we're pretending is already bound + ClaimRef: &api.ObjectReference{UID: "def456"}, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + UID: "nfs-5", + Name: "nfs002", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("5G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + Glusterfs: &api.GlusterfsVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + api.ReadWriteMany, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + UID: "gce-pd-1", + Name: "gce001", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("1G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + // this one we're pretending is already bound + ClaimRef: &api.ObjectReference{UID: "abc123"}, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + UID: "nfs-10", + Name: "nfs003", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + Glusterfs: &api.GlusterfsVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + api.ReadWriteMany, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + UID: "gce-pd-5", + Name: "gce002", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("5G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + UID: "nfs-1", + Name: "nfs001", + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse("1G"), + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + Glusterfs: &api.GlusterfsVolumeSource{}, + }, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + api.ReadWriteMany, + }, + }, + }, + } +} + +func testVolume(name, size string) *api.PersistentVolume { + return &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Annotations: map[string]string{}, + }, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse(size)}, + PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{}}, + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + }, + } +} + +func TestFindingPreboundVolumes(t *testing.T) { + claim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claim01", + Namespace: "myns", + SelfLink: testapi.Default.SelfLink("pvc", ""), + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("1Gi")}}, + }, + } + claimRef, err := api.GetReference(claim) + if err != nil { + t.Errorf("error getting claimRef: %v", err) + } + + pv1 := testVolume("pv1", "1Gi") + pv5 := testVolume("pv5", "5Gi") + pv8 := testVolume("pv8", "8Gi") + + index := newPersistentVolumeOrderedIndex() + index.store.Add(pv1) + index.store.Add(pv5) + index.store.Add(pv8) + + // expected exact match on size + volume, _ := index.findBestMatchForClaim(claim) + if volume.Name != pv1.Name { + t.Errorf("Expected %s but got volume %s instead", pv1.Name, volume.Name) + } + + // pretend the exact match is pre-bound. should get the next size up. + pv1.Spec.ClaimRef = &api.ObjectReference{Name: "foo", Namespace: "bar"} + volume, _ = index.findBestMatchForClaim(claim) + if volume.Name != pv5.Name { + t.Errorf("Expected %s but got volume %s instead", pv5.Name, volume.Name) + } + + // pretend the exact match is available but the largest volume is pre-bound to the claim. + pv1.Spec.ClaimRef = nil + pv8.Spec.ClaimRef = claimRef + volume, _ = index.findBestMatchForClaim(claim) + if volume.Name != pv8.Name { + t.Errorf("Expected %s but got volume %s instead", pv8.Name, volume.Name) + } +} + +// byCapacity is used to order volumes by ascending storage size +type byCapacity struct { + volumes []*api.PersistentVolume +} + +func (c byCapacity) Less(i, j int) bool { + return matchStorageCapacity(c.volumes[i], c.volumes[j]) +} + +func (c byCapacity) Swap(i, j int) { + c.volumes[i], c.volumes[j] = c.volumes[j], c.volumes[i] +} + +func (c byCapacity) Len() int { + return len(c.volumes) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/provision_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/provision_test.go new file mode 100644 index 000000000000..44dad1388314 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/provision_test.go @@ -0,0 +1,256 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "errors" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +// Test single call to syncVolume, expecting provisioning to happen. +// 1. Fill in the controller with initial data +// 2. Call the syncVolume *once*. +// 3. Compare resulting volumes with expected volumes. +func TestProvisionSync(t *testing.T) { + tests := []controllerTest{ + { + // Provision a volume + "11-1 - successful provision", + novolumes, + newVolumeArray("pvc-uid11-1", "1Gi", "uid11-1", "claim11-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), + // Binding will be completed in the next syncClaim + newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass), + noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + }, + { + // Provision failure - plugin not found + "11-2 - plugin not found", + novolumes, + novolumes, + newClaimArray("claim11-2", "uid11-2", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-2", "uid11-2", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed"}, noerrors, + testSyncClaim, + }, + { + // Provision failure - newProvisioner returns error + "11-3 - newProvisioner failure", + novolumes, + novolumes, + newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed"}, noerrors, + wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim), + }, + { + // Provision failure - Provision returns error + "11-4 - provision failure", + novolumes, + novolumes, + newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed"}, noerrors, + wrapTestWithControllerConfig(operationProvision, []error{errors.New("Moc provisioner error")}, testSyncClaim), + }, + { + // Provision success - there is already a volume available, still + // we provision a new one when requested. + "11-6 - provisioning when there is a volume available", + newVolumeArray("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + []*api.PersistentVolume{ + newVolume("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), + newVolume("pvc-uid11-6", "1Gi", "uid11-6", "claim11-6", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + }, + newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass), + // Binding will be completed in the next syncClaim + newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass), + noevents, noerrors, + // No provisioning plugin confingure - makes the test fail when + // the controller errorneously tries to provision something + wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + }, + { + // Provision success? - claim is bound before provisioner creates + // a volume. + "11-7 - claim is bound before provisioning", + novolumes, + newVolumeArray("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass), + // The claim would be bound in next syncClaim + newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass), + noevents, noerrors, + wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + // Create a volume before provisionClaimOperation starts. + // This similates a parallel controller provisioning the volume. + reactor.lock.Lock() + volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned) + reactor.volumes[volume.Name] = volume + reactor.lock.Unlock() + }), + }, + { + // Provision success - cannot save provisioned PV once, + // second retry succeeds + "11-8 - cannot save provisioned volume", + novolumes, + newVolumeArray("pvc-uid11-8", "1Gi", "uid11-8", "claim11-8", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass), + // Binding will be completed in the next syncClaim + newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass), + noevents, + []reactorError{ + // Inject error to the first + // kubeclient.PersistentVolumes.Create() call. All other calls + // will succeed. + {"create", "persistentvolumes", errors.New("Mock creation error")}, + }, + wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + }, + { + // Provision success? - cannot save provisioned PV five times, + // volume is deleted and delete succeeds + "11-9 - cannot save provisioned volume, delete succeeds", + novolumes, + novolumes, + newClaimArray("claim11-9", "uid11-9", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-9", "uid11-9", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed"}, + []reactorError{ + // Inject error to five kubeclient.PersistentVolumes.Create() + // calls + {"create", "persistentvolumes", errors.New("Mock creation error1")}, + {"create", "persistentvolumes", errors.New("Mock creation error2")}, + {"create", "persistentvolumes", errors.New("Mock creation error3")}, + {"create", "persistentvolumes", errors.New("Mock creation error4")}, + {"create", "persistentvolumes", errors.New("Mock creation error5")}, + }, + wrapTestWithControllerConfig(operationDelete, []error{nil}, + wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim)), + }, + { + // Provision failure - cannot save provisioned PV five times, + // volume delete failed - no plugin found + "11-10 - cannot save provisioned volume, no delete plugin found", + novolumes, + novolumes, + newClaimArray("claim11-10", "uid11-10", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-10", "uid11-10", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed", "Warning ProvisioningCleanupFailed"}, + []reactorError{ + // Inject error to five kubeclient.PersistentVolumes.Create() + // calls + {"create", "persistentvolumes", errors.New("Mock creation error1")}, + {"create", "persistentvolumes", errors.New("Mock creation error2")}, + {"create", "persistentvolumes", errors.New("Mock creation error3")}, + {"create", "persistentvolumes", errors.New("Mock creation error4")}, + {"create", "persistentvolumes", errors.New("Mock creation error5")}, + }, + // No deleteCalls are configured, which results into no deleter plugin available for the volume + wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + }, + { + // Provision failure - cannot save provisioned PV five times, + // volume delete failed - deleter returns error five times + "11-11 - cannot save provisioned volume, deleter fails", + novolumes, + novolumes, + newClaimArray("claim11-11", "uid11-11", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-11", "uid11-11", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed", "Warning ProvisioningCleanupFailed"}, + []reactorError{ + // Inject error to five kubeclient.PersistentVolumes.Create() + // calls + {"create", "persistentvolumes", errors.New("Mock creation error1")}, + {"create", "persistentvolumes", errors.New("Mock creation error2")}, + {"create", "persistentvolumes", errors.New("Mock creation error3")}, + {"create", "persistentvolumes", errors.New("Mock creation error4")}, + {"create", "persistentvolumes", errors.New("Mock creation error5")}, + }, + wrapTestWithControllerConfig( + operationDelete, []error{ + errors.New("Mock deletion error1"), + errors.New("Mock deletion error2"), + errors.New("Mock deletion error3"), + errors.New("Mock deletion error4"), + errors.New("Mock deletion error5"), + }, + wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + ), + }, + { + // Provision failure - cannot save provisioned PV five times, + // volume delete succeeds 2nd time + "11-12 - cannot save provisioned volume, delete succeeds 2nd time", + novolumes, + novolumes, + newClaimArray("claim11-12", "uid11-12", "1Gi", "", api.ClaimPending, annClass), + newClaimArray("claim11-12", "uid11-12", "1Gi", "", api.ClaimPending, annClass), + []string{"Warning ProvisioningFailed"}, + []reactorError{ + // Inject error to five kubeclient.PersistentVolumes.Create() + // calls + {"create", "persistentvolumes", errors.New("Mock creation error1")}, + {"create", "persistentvolumes", errors.New("Mock creation error2")}, + {"create", "persistentvolumes", errors.New("Mock creation error3")}, + {"create", "persistentvolumes", errors.New("Mock creation error4")}, + {"create", "persistentvolumes", errors.New("Mock creation error5")}, + }, + wrapTestWithControllerConfig( + operationDelete, []error{ + errors.New("Mock deletion error1"), + nil, + }, + wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + ), + }, + } + runSyncTests(t, tests) +} + +// Test multiple calls to syncClaim/syncVolume and periodic sync of all +// volume/claims. The test follows this pattern: +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// Some limit of calls in enforced to prevent endless loops. +func TestProvisionMultiSync(t *testing.T) { + tests := []controllerTest{ + { + // Provision a volume with binding + "12-1 - successful provision", + novolumes, + newVolumeArray("pvc-uid12-1", "1Gi", "uid12-1", "claim12-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned), + newClaimArray("claim12-1", "uid12-1", "1Gi", "", api.ClaimPending, annClass), + // Binding will be completed in the next syncClaim + newClaimArray("claim12-1", "uid12-1", "1Gi", "pvc-uid12-1", api.ClaimBound, annClass, annBoundByController, annBindCompleted), + noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim), + }, + } + + runMultisyncTests(t, tests) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/recycle_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/recycle_test.go new file mode 100644 index 000000000000..14203b04c414 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/recycle_test.go @@ -0,0 +1,196 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "errors" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +// Test single call to syncVolume, expecting recycling to happen. +// 1. Fill in the controller with initial data +// 2. Call the syncVolume *once*. +// 3. Compare resulting volumes with expected volumes. +func TestRecycleSync(t *testing.T) { + tests := []controllerTest{ + { + // recycle volume bound by controller + "6-1 - successful recycle", + newVolumeArray("volume6-1", "1Gi", "uid6-1", "claim6-1", api.VolumeBound, api.PersistentVolumeReclaimRecycle, annBoundByController), + newVolumeArray("volume6-1", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + noevents, noerrors, + // Inject recycler into the controller and call syncVolume. The + // recycler simulates one recycle() call that succeeds. + wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), + }, + { + // recycle volume bound by user + "6-2 - successful recycle with prebound volume", + newVolumeArray("volume6-2", "1Gi", "uid6-2", "claim6-2", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume6-2", "1Gi", "", "claim6-2", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + noevents, noerrors, + // Inject recycler into the controller and call syncVolume. The + // recycler simulates one recycle() call that succeeds. + wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), + }, + { + // recycle failure - plugin not found + "6-3 - plugin not found", + newVolumeArray("volume6-3", "1Gi", "uid6-3", "claim6-3", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume6-3", "1Gi", "uid6-3", "claim6-3", api.VolumeFailed, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + []string{"Warning VolumeFailedRecycle"}, noerrors, testSyncVolume, + }, + { + // recycle failure - newRecycler returns error + "6-4 - newRecycler returns error", + newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", api.VolumeFailed, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + []string{"Warning VolumeFailedRecycle"}, noerrors, + wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), + }, + { + // recycle failure - recycle returns error + "6-5 - recycle returns error", + newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", api.VolumeFailed, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + []string{"Warning VolumeFailedRecycle"}, noerrors, + wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error")}, testSyncVolume), + }, + { + // recycle success(?) - volume is deleted before doRecycle() starts + "6-6 - volume is deleted before recycling", + newVolumeArray("volume6-6", "1Gi", "uid6-6", "claim6-6", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + novolumes, + noclaims, + noclaims, + noevents, noerrors, + wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + // Delete the volume before recycle operation starts + reactor.lock.Lock() + delete(reactor.volumes, "volume6-6") + reactor.lock.Unlock() + }), + }, + { + // recycle success(?) - volume is recycled by previous recycler just + // at the time new doRecycle() starts. This simulates "volume no + // longer needs recycling, skipping". + "6-7 - volume is deleted before recycling", + newVolumeArray("volume6-7", "1Gi", "uid6-7", "claim6-7", api.VolumeBound, api.PersistentVolumeReclaimRecycle, annBoundByController), + newVolumeArray("volume6-7", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + noevents, noerrors, + wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + // Mark the volume as Available before the recycler starts + reactor.lock.Lock() + volume := reactor.volumes["volume6-7"] + volume.Spec.ClaimRef = nil + volume.Status.Phase = api.VolumeAvailable + volume.Annotations = nil + reactor.lock.Unlock() + }), + }, + { + // recycle success(?) - volume bound by user is recycled by previous + // recycler just at the time new doRecycle() starts. This simulates + // "volume no longer needs recycling, skipping" with volume bound by + // user. + "6-8 - prebound volume is deleted before recycling", + newVolumeArray("volume6-8", "1Gi", "uid6-8", "claim6-8", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume6-8", "1Gi", "", "claim6-8", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + noevents, noerrors, + wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { + // Mark the volume as Available before the recycler starts + reactor.lock.Lock() + volume := reactor.volumes["volume6-8"] + volume.Spec.ClaimRef.UID = "" + volume.Status.Phase = api.VolumeAvailable + reactor.lock.Unlock() + }), + }, + { + // recycle success - volume bound by user is recycled, while a new + // claim is created with another UID. + "6-9 - prebound volume is recycled while the claim exists", + newVolumeArray("volume6-9", "1Gi", "uid6-9", "claim6-9", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume6-9", "1Gi", "", "claim6-9", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), + newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", api.ClaimPending), + newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", api.ClaimPending), + noevents, noerrors, + // Inject recycler into the controller and call syncVolume. The + // recycler simulates one recycle() call that succeeds. + wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume), + }, + { + // volume has unknown reclaim policy - failure expected + "6-10 - unknown reclaim policy", + newVolumeArray("volume6-10", "1Gi", "uid6-10", "claim6-10", api.VolumeBound, "Unknown"), + newVolumeArray("volume6-10", "1Gi", "uid6-10", "claim6-10", api.VolumeFailed, "Unknown"), + noclaims, + noclaims, + []string{"Warning VolumeUnknownReclaimPolicy"}, noerrors, testSyncVolume, + }, + } + runSyncTests(t, tests) +} + +// Test multiple calls to syncClaim/syncVolume and periodic sync of all +// volume/claims. The test follows this pattern: +// 0. Load the controller with initial data. +// 1. Call controllerTest.testCall() once as in TestSync() +// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, +// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" +// events). Go to 2. if these calls change anything. +// 3. When all changes are processed and no new changes were made, call +// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). +// 4. If some changes were done by step 3., go to 2. (simulation of +// "volume/claim updated" events, eventually performing step 3. again) +// 5. When 3. does not do any changes, finish the tests and compare final set +// of volumes/claims with expected claims/volumes and report differences. +// Some limit of calls in enforced to prevent endless loops. +func TestRecycleMultiSync(t *testing.T) { + tests := []controllerTest{ + { + // recycle failure - recycle returns error. The controller should + // try again. + "7-1 - recycle returns error", + newVolumeArray("volume7-1", "1Gi", "uid7-1", "claim7-1", api.VolumeBound, api.PersistentVolumeReclaimRecycle), + newVolumeArray("volume7-1", "1Gi", "", "claim7-1", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle), + noclaims, + noclaims, + []string{"Warning VolumeFailedRecycle"}, noerrors, + wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error"), nil}, testSyncVolume), + }, + } + + runMultisyncTests(t, tests) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/volume_host.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/volume_host.go new file mode 100644 index 000000000000..f38ad0da4ee6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/persistentvolume/volume_host.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/io" + "k8s.io/kubernetes/pkg/util/mount" + vol "k8s.io/kubernetes/pkg/volume" +) + +// VolumeHost interface implementation for PersistentVolumeController. + +var _ vol.VolumeHost = &PersistentVolumeController{} + +func (ctrl *PersistentVolumeController) GetPluginDir(pluginName string) string { + return "" +} + +func (ctrl *PersistentVolumeController) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string { + return "" +} + +func (ctrl *PersistentVolumeController) GetPodPluginDir(podUID types.UID, pluginName string) string { + return "" +} + +func (ctrl *PersistentVolumeController) GetKubeClient() clientset.Interface { + return ctrl.kubeClient +} + +func (ctrl *PersistentVolumeController) NewWrapperMounter(volName string, spec vol.Spec, pod *api.Pod, opts vol.VolumeOptions) (vol.Mounter, error) { + return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented") +} + +func (ctrl *PersistentVolumeController) NewWrapperUnmounter(volName string, spec vol.Spec, podUID types.UID) (vol.Unmounter, error) { + return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented") +} + +func (ctrl *PersistentVolumeController) GetCloudProvider() cloudprovider.Interface { + return ctrl.cloud +} + +func (ctrl *PersistentVolumeController) GetMounter() mount.Interface { + return nil +} + +func (ctrl *PersistentVolumeController) GetWriter() io.Writer { + return nil +} + +func (ctrl *PersistentVolumeController) GetHostName() string { + return "" +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/fakes.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/fakes.go new file mode 100644 index 000000000000..6c1c8e713f4a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/fakes.go @@ -0,0 +1,325 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package petset + +import ( + "fmt" + "time" + + inf "gopkg.in/inf.v0" + + "k8s.io/kubernetes/pkg/api" + api_pod "k8s.io/kubernetes/pkg/api/pod" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/sets" +) + +func dec(i int64, exponent int) *inf.Dec { + return inf.NewDec(i, inf.Scale(-exponent)) +} + +func newPVC(name string) api.PersistentVolumeClaim { + return api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: name, + }, + Spec: api.PersistentVolumeClaimSpec{ + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI), + }, + }, + }, + } +} + +func newPetSetWithVolumes(replicas int, name string, petMounts []api.VolumeMount, podMounts []api.VolumeMount) *apps.PetSet { + mounts := append(petMounts, podMounts...) + claims := []api.PersistentVolumeClaim{} + for _, m := range petMounts { + claims = append(claims, newPVC(m.Name)) + } + + vols := []api.Volume{} + for _, m := range podMounts { + vols = append(vols, api.Volume{ + Name: m.Name, + VolumeSource: api.VolumeSource{ + HostPath: &api.HostPathVolumeSource{ + Path: fmt.Sprintf("/tmp/%v", m.Name), + }, + }, + }) + } + + return &apps.PetSet{ + TypeMeta: unversioned.TypeMeta{ + Kind: "PetSet", + APIVersion: "apps/v1beta1", + }, + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: api.NamespaceDefault, + UID: types.UID("test"), + }, + Spec: apps.PetSetSpec{ + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Replicas: replicas, + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "nginx", + Image: "nginx", + VolumeMounts: mounts, + }, + }, + Volumes: vols, + }, + }, + VolumeClaimTemplates: claims, + ServiceName: "governingsvc", + }, + } +} + +func runningPod(ns, name string) *api.Pod { + p := &api.Pod{Status: api.PodStatus{Phase: api.PodRunning}} + p.Namespace = ns + p.Name = name + return p +} + +func newPodList(ps *apps.PetSet, num int) []*api.Pod { + // knownPods are pods in the system + knownPods := []*api.Pod{} + for i := 0; i < num; i++ { + k, _ := newPCB(fmt.Sprintf("%v", i), ps) + knownPods = append(knownPods, k.pod) + } + return knownPods +} + +func newPetSet(replicas int) *apps.PetSet { + petMounts := []api.VolumeMount{ + {Name: "datadir", MountPath: "/tmp/zookeeper"}, + } + podMounts := []api.VolumeMount{ + {Name: "home", MountPath: "/home"}, + } + return newPetSetWithVolumes(replicas, "foo", petMounts, podMounts) +} + +func checkPodForMount(pod *api.Pod, mountName string) error { + for _, c := range pod.Spec.Containers { + for _, v := range c.VolumeMounts { + if v.Name == mountName { + return nil + } + } + } + return fmt.Errorf("Found volume but no associated mount %v in pod %v", mountName, pod.Name) +} + +func newFakePetClient() *fakePetClient { + return &fakePetClient{ + pets: []*pcb{}, + claims: []api.PersistentVolumeClaim{}, + recorder: &record.FakeRecorder{}, + petHealthChecker: &defaultPetHealthChecker{}, + } +} + +type fakePetClient struct { + pets []*pcb + claims []api.PersistentVolumeClaim + petsCreated, petsDeleted int + claimsCreated, claimsDeleted int + recorder record.EventRecorder + petHealthChecker +} + +// Delete fakes pet client deletion. +func (f *fakePetClient) Delete(p *pcb) error { + pets := []*pcb{} + found := false + for i, pet := range f.pets { + if p.pod.Name == pet.pod.Name { + found = true + f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulDelete", "pet: %v", pet.pod.Name) + continue + } + pets = append(pets, f.pets[i]) + } + if !found { + // TODO: Return proper not found error + return fmt.Errorf("Delete failed: pet %v doesn't exist", p.pod.Name) + } + f.pets = pets + f.petsDeleted++ + return nil +} + +// Get fakes getting pets. +func (f *fakePetClient) Get(p *pcb) (*pcb, bool, error) { + for i, pet := range f.pets { + if p.pod.Name == pet.pod.Name { + return f.pets[i], true, nil + } + } + return nil, false, nil +} + +// Create fakes pet creation. +func (f *fakePetClient) Create(p *pcb) error { + for _, pet := range f.pets { + if p.pod.Name == pet.pod.Name { + return fmt.Errorf("Create failed: pet %v already exists", p.pod.Name) + } + } + f.recorder.Eventf(p.parent, api.EventTypeNormal, "SuccessfulCreate", "pet: %v", p.pod.Name) + f.pets = append(f.pets, p) + f.petsCreated++ + return nil +} + +// Update fakes pet updates. +func (f *fakePetClient) Update(expected, wanted *pcb) error { + found := false + pets := []*pcb{} + for i, pet := range f.pets { + if wanted.pod.Name == pet.pod.Name { + f.pets[i].pod.Annotations[api_pod.PodHostnameAnnotation] = wanted.pod.Annotations[api_pod.PodHostnameAnnotation] + f.pets[i].pod.Annotations[api_pod.PodSubdomainAnnotation] = wanted.pod.Annotations[api_pod.PodSubdomainAnnotation] + f.pets[i].pod.Spec = wanted.pod.Spec + found = true + } + pets = append(pets, f.pets[i]) + } + f.pets = pets + if !found { + return fmt.Errorf("Cannot update pet %v not found", wanted.pod.Name) + } + // TODO: Delete pvcs/volumes that are in wanted but not in expected. + return nil +} + +func (f *fakePetClient) getPodList() []*api.Pod { + p := []*api.Pod{} + for i, pet := range f.pets { + if pet.pod == nil { + continue + } + p = append(p, f.pets[i].pod) + } + return p +} + +func (f *fakePetClient) deletePetAtIndex(index int) { + p := []*pcb{} + for i := range f.pets { + if i != index { + p = append(p, f.pets[i]) + } + } + f.pets = p +} + +func (f *fakePetClient) setHealthy(index int) error { + if len(f.pets) < index { + return fmt.Errorf("Index out of range, len %v index %v", len(f.pets), index) + } + f.pets[index].pod.Status.Phase = api.PodRunning + f.pets[index].pod.Annotations[PetSetInitAnnotation] = "true" + f.pets[index].pod.Status.Conditions = []api.PodCondition{ + {Type: api.PodReady, Status: api.ConditionTrue}, + } + return nil +} + +// isHealthy is a convenience wrapper around the default health checker. +// The first invocation returns not-healthy, but marks the pet healthy so +// subsequent invocations see it as healthy. +func (f *fakePetClient) isHealthy(pod *api.Pod) bool { + if f.petHealthChecker.isHealthy(pod) { + return true + } + return false +} + +func (f *fakePetClient) setDeletionTimestamp(index int) error { + if len(f.pets) < index { + return fmt.Errorf("Index out of range, len %v index %v", len(f.pets), index) + } + f.pets[index].pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()} + return nil +} + +// SyncPVCs fakes pvc syncing. +func (f *fakePetClient) SyncPVCs(pet *pcb) error { + v := pet.pvcs + updateClaims := map[string]api.PersistentVolumeClaim{} + for i, update := range v { + updateClaims[update.Name] = v[i] + } + claimList := []api.PersistentVolumeClaim{} + for i, existing := range f.claims { + if update, ok := updateClaims[existing.Name]; ok { + claimList = append(claimList, update) + delete(updateClaims, existing.Name) + } else { + claimList = append(claimList, f.claims[i]) + } + } + for _, remaining := range updateClaims { + claimList = append(claimList, remaining) + f.claimsCreated++ + f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulCreate", "pvc: %v", remaining.Name) + } + f.claims = claimList + return nil +} + +// DeletePVCs fakes pvc deletion. +func (f *fakePetClient) DeletePVCs(pet *pcb) error { + claimsToDelete := pet.pvcs + deleteClaimNames := sets.NewString() + for _, c := range claimsToDelete { + deleteClaimNames.Insert(c.Name) + } + pvcs := []api.PersistentVolumeClaim{} + for i, existing := range f.claims { + if deleteClaimNames.Has(existing.Name) { + deleteClaimNames.Delete(existing.Name) + f.claimsDeleted++ + f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulDelete", "pvc: %v", existing.Name) + continue + } + pvcs = append(pvcs, f.claims[i]) + } + f.claims = pvcs + if deleteClaimNames.Len() != 0 { + return fmt.Errorf("Claims %+v don't exist. Failed deletion.", deleteClaimNames) + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/identity_mappers.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/identity_mappers.go new file mode 100644 index 000000000000..ae72ef2a8f9e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/identity_mappers.go @@ -0,0 +1,247 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package petset + +import ( + "crypto/md5" + "fmt" + "sort" + "strings" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + podapi "k8s.io/kubernetes/pkg/api/pod" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/util/sets" +) + +// identityMapper is an interface for assigning identities to a pet. +// All existing identity mappers just append "-(index)" to the petset name to +// generate a unique identity. This is used in claims/DNS/hostname/petname +// etc. There's a more elegant way to achieve this mapping, but we're +// taking the simplest route till we have data on whether users will need +// more customization. +// Note that running a single identity mapper is not guaranteed to give +// your pet a unique identity. You must run them all. Order doesn't matter. +type identityMapper interface { + // SetIdentity takes an id and assigns the given pet an identity based + // on the pet set spec. The is must be unique amongst members of the + // pet set. + SetIdentity(id string, pet *api.Pod) + + // Identity returns the identity of the pet. + Identity(pod *api.Pod) string +} + +func newIdentityMappers(ps *apps.PetSet) []identityMapper { + return []identityMapper{ + &NameIdentityMapper{ps}, + &NetworkIdentityMapper{ps}, + &VolumeIdentityMapper{ps}, + } +} + +// NetworkIdentityMapper assigns network identity to pets. +type NetworkIdentityMapper struct { + ps *apps.PetSet +} + +// SetIdentity sets network identity on the pet. +func (n *NetworkIdentityMapper) SetIdentity(id string, pet *api.Pod) { + pet.Annotations[podapi.PodHostnameAnnotation] = fmt.Sprintf("%v-%v", n.ps.Name, id) + pet.Annotations[podapi.PodSubdomainAnnotation] = n.ps.Spec.ServiceName + return +} + +// Identity returns the network identity of the pet. +func (n *NetworkIdentityMapper) Identity(pet *api.Pod) string { + return n.String(pet) +} + +// String is a string function for the network identity of the pet. +func (n *NetworkIdentityMapper) String(pet *api.Pod) string { + hostname := pet.Annotations[podapi.PodHostnameAnnotation] + subdomain := pet.Annotations[podapi.PodSubdomainAnnotation] + return strings.Join([]string{hostname, subdomain, n.ps.Namespace}, ".") +} + +// VolumeIdentityMapper assigns storage identity to pets. +type VolumeIdentityMapper struct { + ps *apps.PetSet +} + +// SetIdentity sets storge identity on the pet. +func (v *VolumeIdentityMapper) SetIdentity(id string, pet *api.Pod) { + petVolumes := []api.Volume{} + petClaims := v.GetClaims(id) + + // These volumes will all go down with the pod. If a name matches one of + // the claims in the pet set, it gets clobbered. + podVolumes := map[string]api.Volume{} + for _, podVol := range pet.Spec.Volumes { + podVolumes[podVol.Name] = podVol + } + + // Insert claims for the idempotent petSet volumes + for name, claim := range petClaims { + // Volumes on a pet for which there are no associated claims on the + // petset are pod local, and die with the pod. + podVol, ok := podVolumes[name] + if ok { + // TODO: Validate and reject this. + glog.V(4).Infof("Overwriting existing volume source %v", podVol.Name) + } + newVol := api.Volume{ + Name: name, + VolumeSource: api.VolumeSource{ + PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ + ClaimName: claim.Name, + // TODO: Use source definition to set this value when we have one. + ReadOnly: false, + }, + }, + } + petVolumes = append(petVolumes, newVol) + } + + // Transfer any ephemeral pod volumes + for name, vol := range podVolumes { + if _, ok := petClaims[name]; !ok { + petVolumes = append(petVolumes, vol) + } + } + pet.Spec.Volumes = petVolumes + return +} + +// Identity returns the storage identity of the pet. +func (v *VolumeIdentityMapper) Identity(pet *api.Pod) string { + // TODO: Make this a hash? + return v.String(pet) +} + +// String is a string function for the network identity of the pet. +func (v *VolumeIdentityMapper) String(pet *api.Pod) string { + ids := []string{} + petVols := sets.NewString() + for _, petVol := range v.ps.Spec.VolumeClaimTemplates { + petVols.Insert(petVol.Name) + } + for _, podVol := range pet.Spec.Volumes { + // Volumes on a pet for which there are no associated claims on the + // petset are pod local, and die with the pod. + if !petVols.Has(podVol.Name) { + continue + } + if podVol.VolumeSource.PersistentVolumeClaim == nil { + // TODO: Is this a part of the identity? + ids = append(ids, fmt.Sprintf("%v:None", podVol.Name)) + continue + } + ids = append(ids, fmt.Sprintf("%v:%v", podVol.Name, podVol.VolumeSource.PersistentVolumeClaim.ClaimName)) + } + sort.Strings(ids) + return strings.Join(ids, "") +} + +// GetClaims returns the volume claims associated with the given id. +// The claims belong to the petset. The id should be unique within a petset. +func (v *VolumeIdentityMapper) GetClaims(id string) map[string]api.PersistentVolumeClaim { + petClaims := map[string]api.PersistentVolumeClaim{} + for _, pvc := range v.ps.Spec.VolumeClaimTemplates { + claim := pvc + // TODO: Name length checking in validation. + claim.Name = fmt.Sprintf("%v-%v-%v", claim.Name, v.ps.Name, id) + claim.Namespace = v.ps.Namespace + claim.Labels = v.ps.Spec.Selector.MatchLabels + + // TODO: We're assuming that the claim template has a volume QoS key, eg: + // volume.alpha.kubernetes.io/storage-class: anything + petClaims[pvc.Name] = claim + } + return petClaims +} + +// GetClaimsForPet returns the pvcs for the given pet. +func (v *VolumeIdentityMapper) GetClaimsForPet(pet *api.Pod) []api.PersistentVolumeClaim { + // Strip out the "-(index)" from the pet name and use it to generate + // claim names. + id := strings.Split(pet.Name, "-") + petID := id[len(id)-1] + pvcs := []api.PersistentVolumeClaim{} + for _, pvc := range v.GetClaims(petID) { + pvcs = append(pvcs, pvc) + } + return pvcs +} + +// NameIdentityMapper assigns names to pets. +// It also puts the pet in the same namespace as the parent. +type NameIdentityMapper struct { + ps *apps.PetSet +} + +// SetIdentity sets the pet namespace and name. +func (n *NameIdentityMapper) SetIdentity(id string, pet *api.Pod) { + pet.Name = fmt.Sprintf("%v-%v", n.ps.Name, id) + pet.Namespace = n.ps.Namespace + return +} + +// Identity returns the name identity of the pet. +func (n *NameIdentityMapper) Identity(pet *api.Pod) string { + return n.String(pet) +} + +// String is a string function for the name identity of the pet. +func (n *NameIdentityMapper) String(pet *api.Pod) string { + return fmt.Sprintf("%v/%v", pet.Namespace, pet.Name) +} + +// identityHash computes a hash of the pet by running all the above identity +// mappers. +func identityHash(ps *apps.PetSet, pet *api.Pod) string { + id := "" + for _, idMapper := range newIdentityMappers(ps) { + id += idMapper.Identity(pet) + } + return fmt.Sprintf("%x", md5.Sum([]byte(id))) +} + +// copyPetID gives the realPet the same identity as the expectedPet. +// Note that this is *not* a literal copy, but a copy of the fields that +// contribute to the pet's identity. The returned boolean 'needsUpdate' will +// be false if the realPet already has the same identity as the expectedPet. +func copyPetID(realPet, expectedPet *pcb) (pod api.Pod, needsUpdate bool, err error) { + if realPet.pod == nil || expectedPet.pod == nil { + return pod, false, fmt.Errorf("Need a valid to and from pet for copy") + } + if realPet.parent.UID != expectedPet.parent.UID { + return pod, false, fmt.Errorf("Cannot copy pets with different parents") + } + ps := realPet.parent + if identityHash(ps, realPet.pod) == identityHash(ps, expectedPet.pod) { + return *realPet.pod, false, nil + } + copyPod := *realPet.pod + // This is the easiest way to give an identity to a pod. It won't work + // when we stop using names for id. + for _, idMapper := range newIdentityMappers(ps) { + idMapper.SetIdentity(expectedPet.id, ©Pod) + } + return copyPod, true, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/identity_mappers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/identity_mappers_test.go new file mode 100644 index 000000000000..f9a736fc30d7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/identity_mappers_test.go @@ -0,0 +1,179 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package petset + +import ( + "fmt" + "reflect" + "strings" + + "k8s.io/kubernetes/pkg/api" + api_pod "k8s.io/kubernetes/pkg/api/pod" + "testing" +) + +func TestPetIDName(t *testing.T) { + replicas := 3 + ps := newPetSet(replicas) + for i := 0; i < replicas; i++ { + petName := fmt.Sprintf("%v-%d", ps.Name, i) + pcb, err := newPCB(fmt.Sprintf("%d", i), ps) + if err != nil { + t.Fatalf("Failed to generate pet %v", err) + } + pod := pcb.pod + if pod.Name != petName || pod.Namespace != ps.Namespace { + t.Errorf("Wrong name identity, expected %v", pcb.pod.Name) + } + } +} + +func TestPetIDDNS(t *testing.T) { + replicas := 3 + ps := newPetSet(replicas) + for i := 0; i < replicas; i++ { + petName := fmt.Sprintf("%v-%d", ps.Name, i) + petSubdomain := ps.Spec.ServiceName + pcb, err := newPCB(fmt.Sprintf("%d", i), ps) + pod := pcb.pod + if err != nil { + t.Fatalf("Failed to generate pet %v", err) + } + if hostname, ok := pod.Annotations[api_pod.PodHostnameAnnotation]; !ok || hostname != petName { + t.Errorf("Wrong hostname: %v", petName) + } + // TODO: Check this against the governing service. + if subdomain, ok := pod.Annotations[api_pod.PodSubdomainAnnotation]; !ok || subdomain != petSubdomain { + t.Errorf("Wrong subdomain: %v", petName) + } + } +} +func TestPetIDVolume(t *testing.T) { + replicas := 3 + ps := newPetSet(replicas) + for i := 0; i < replicas; i++ { + pcb, err := newPCB(fmt.Sprintf("%d", i), ps) + if err != nil { + t.Fatalf("Failed to generate pet %v", err) + } + pod := pcb.pod + petName := fmt.Sprintf("%v-%d", ps.Name, i) + claimName := fmt.Sprintf("datadir-%v", petName) + for _, v := range pod.Spec.Volumes { + switch v.Name { + case "datadir": + c := v.VolumeSource.PersistentVolumeClaim + if c == nil || c.ClaimName != claimName { + t.Fatalf("Unexpected claim %v", c) + } + if err := checkPodForMount(pod, "datadir"); err != nil { + t.Errorf("Expected pod mount: %v", err) + } + case "home": + h := v.VolumeSource.HostPath + if h == nil || h.Path != "/tmp/home" { + t.Errorf("Unexpected modification to hostpath, expected /tmp/home got %+v", h) + } + default: + t.Errorf("Unexpected volume %v", v.Name) + } + } + } + // TODO: Check volume mounts. +} + +func TestPetIDVolumeClaims(t *testing.T) { + replicas := 3 + ps := newPetSet(replicas) + for i := 0; i < replicas; i++ { + pcb, err := newPCB(fmt.Sprintf("%v", i), ps) + if err != nil { + t.Fatalf("Failed to generate pet %v", err) + } + pvcs := pcb.pvcs + petName := fmt.Sprintf("%v-%d", ps.Name, i) + claimName := fmt.Sprintf("datadir-%v", petName) + if len(pvcs) != 1 || pvcs[0].Name != claimName { + t.Errorf("Wrong pvc expected %v got %v", claimName, pvcs[0].Name) + } + } +} + +func TestPetIDCrossAssignment(t *testing.T) { + replicas := 3 + ps := newPetSet(replicas) + + nameMapper := &NameIdentityMapper{ps} + volumeMapper := &VolumeIdentityMapper{ps} + networkMapper := &NetworkIdentityMapper{ps} + + // Check that the name is consistent across identity. + for i := 0; i < replicas; i++ { + pet, _ := newPCB(fmt.Sprintf("%v", i), ps) + p := pet.pod + name := strings.Split(nameMapper.Identity(p), "/")[1] + network := networkMapper.Identity(p) + volume := volumeMapper.Identity(p) + + petVolume := strings.Split(volume, ":")[1] + + if petVolume != fmt.Sprintf("datadir-%v", name) { + t.Errorf("Unexpected pet volume name %v, expected %v", petVolume, name) + } + if network != fmt.Sprintf("%v.%v.%v", name, ps.Spec.ServiceName, ps.Namespace) { + t.Errorf("Unexpected pet network ID %v, expected %v", network, name) + } + t.Logf("[%v] volume: %+v, network: %+v, name: %+v", i, volume, network, name) + } +} + +func TestPetIDReset(t *testing.T) { + replicas := 2 + ps := newPetSet(replicas) + firstPCB, err := newPCB("1", ps) + secondPCB, err := newPCB("2", ps) + if identityHash(ps, firstPCB.pod) == identityHash(ps, secondPCB.pod) { + t.Fatalf("Failed to generate uniquey identities:\n%+v\n%+v", firstPCB.pod.Spec, secondPCB.pod.Spec) + } + userAdded := api.Volume{ + Name: "test", + VolumeSource: api.VolumeSource{ + EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}, + }, + } + firstPCB.pod.Spec.Volumes = append(firstPCB.pod.Spec.Volumes, userAdded) + pod, needsUpdate, err := copyPetID(firstPCB, secondPCB) + if err != nil { + t.Errorf("%v", err) + } + if !needsUpdate { + t.Errorf("expected update since identity of %v was reset", secondPCB.pod.Name) + } + if identityHash(ps, &pod) != identityHash(ps, secondPCB.pod) { + t.Errorf("Failed to copy identity for pod %v -> %v", firstPCB.pod.Name, secondPCB.pod.Name) + } + foundVol := false + for _, v := range pod.Spec.Volumes { + if reflect.DeepEqual(v, userAdded) { + foundVol = true + break + } + } + if !foundVol { + t.Errorf("User added volume was corrupted by reset action.") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/iterator.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/iterator.go new file mode 100644 index 000000000000..81df6814ccd9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/iterator.go @@ -0,0 +1,163 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package petset + +import ( + "fmt" + "sort" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/controller" +) + +// newPCB generates a new PCB using the id string as a unique qualifier +func newPCB(id string, ps *apps.PetSet) (*pcb, error) { + petPod, err := controller.GetPodFromTemplate(&ps.Spec.Template, ps) + if err != nil { + return nil, err + } + for _, im := range newIdentityMappers(ps) { + im.SetIdentity(id, petPod) + } + petPVCs := []api.PersistentVolumeClaim{} + vMapper := &VolumeIdentityMapper{ps} + for _, c := range vMapper.GetClaims(id) { + petPVCs = append(petPVCs, c) + } + // TODO: Replace id field with IdentityHash, since id is more than just an index. + return &pcb{pod: petPod, pvcs: petPVCs, id: id, parent: ps}, nil +} + +// petQueue is a custom datastructure that's resembles a queue of pets. +type petQueue struct { + pets []*pcb + idMapper identityMapper +} + +// enqueue enqueues the given pet, evicting any pets with the same id +func (pt *petQueue) enqueue(p *pcb) { + if p == nil { + pt.pets = append(pt.pets, nil) + return + } + // Pop an existing pet from the know list, append the new pet to the end. + petList := []*pcb{} + petID := pt.idMapper.Identity(p.pod) + for i := range pt.pets { + if petID != pt.idMapper.Identity(pt.pets[i].pod) { + petList = append(petList, pt.pets[i]) + } + } + pt.pets = petList + p.event = syncPet + pt.pets = append(pt.pets, p) +} + +// dequeue returns the last element of the queue +func (pt *petQueue) dequeue() *pcb { + if pt.empty() { + glog.Warningf("Dequeue invoked on an empty queue") + return nil + } + l := len(pt.pets) - 1 + pet := pt.pets[l] + pt.pets = pt.pets[:l] + return pet +} + +// empty returns true if the pet queue is empty. +func (pt *petQueue) empty() bool { + return len(pt.pets) == 0 +} + +// NewPetQueue returns a queue for tracking pets +func NewPetQueue(ps *apps.PetSet, podList []*api.Pod) *petQueue { + pt := petQueue{pets: []*pcb{}, idMapper: &NameIdentityMapper{ps}} + // Seed the queue with existing pets. Assume all pets are scheduled for + // deletion, enqueuing a pet will "undelete" it. We always want to delete + // from the higher ids, so sort by creation timestamp. + + sort.Sort(PodsByCreationTimestamp(podList)) + vMapper := VolumeIdentityMapper{ps} + for i := range podList { + pod := podList[i] + pt.pets = append(pt.pets, &pcb{pod: pod, pvcs: vMapper.GetClaimsForPet(pod), parent: ps, event: deletePet, id: fmt.Sprintf("%v", i)}) + } + return &pt +} + +// petsetIterator implements a simple iterator over pets in the given petset. +type petSetIterator struct { + // ps is the petset for this iterator. + ps *apps.PetSet + // queue contains the elements to iterate over. + queue *petQueue + // errs is a list because we always want the iterator to drain. + errs []error + // petCount is the number of pets iterated over. + petCount int +} + +// Next returns true for as long as there are elements in the underlying queue. +func (pi *petSetIterator) Next() bool { + var pet *pcb + var err error + if pi.petCount < pi.ps.Spec.Replicas { + pet, err = newPCB(fmt.Sprintf("%d", pi.petCount), pi.ps) + if err != nil { + pi.errs = append(pi.errs, err) + // Don't stop iterating over the set on errors. Caller handles nil. + pet = nil + } + pi.queue.enqueue(pet) + pi.petCount++ + } + // Keep the iterator running till we've deleted pets in the queue. + return !pi.queue.empty() +} + +// Value dequeues an element from the queue. +func (pi *petSetIterator) Value() *pcb { + return pi.queue.dequeue() +} + +// NewPetSetIterator returns a new iterator. All pods in the given podList +// are used to seed the queue of the iterator. +func NewPetSetIterator(ps *apps.PetSet, podList []*api.Pod) *petSetIterator { + pi := &petSetIterator{ + ps: ps, + queue: NewPetQueue(ps, podList), + errs: []error{}, + petCount: 0, + } + return pi +} + +// PodsByCreationTimestamp sorts a list of Pods by creation timestamp, using their names as a tie breaker. +type PodsByCreationTimestamp []*api.Pod + +func (o PodsByCreationTimestamp) Len() int { return len(o) } +func (o PodsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } + +func (o PodsByCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/iterator_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/iterator_test.go new file mode 100644 index 000000000000..ab07c4223b1c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/iterator_test.go @@ -0,0 +1,149 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package petset + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/sets" + "testing" +) + +func TestPetQueueCreates(t *testing.T) { + replicas := 3 + ps := newPetSet(replicas) + q := NewPetQueue(ps, []*api.Pod{}) + for i := 0; i < replicas; i++ { + pet, _ := newPCB(fmt.Sprintf("%v", i), ps) + q.enqueue(pet) + p := q.dequeue() + if p.event != syncPet { + t.Errorf("Failed to retrieve sync event from queue") + } + } + if q.dequeue() != nil { + t.Errorf("Expected no pets") + } +} + +func TestPetQueueScaleDown(t *testing.T) { + replicas := 1 + ps := newPetSet(replicas) + + // knownPods are the pods in the system + knownPods := newPodList(ps, 3) + + q := NewPetQueue(ps, knownPods) + + // The iterator will insert a single replica, the enqueue + // mimics that behavior. + pet, _ := newPCB(fmt.Sprintf("%v", 0), ps) + q.enqueue(pet) + + deletes := sets.NewString(fmt.Sprintf("%v-1", ps.Name), fmt.Sprintf("%v-2", ps.Name)) + syncs := sets.NewString(fmt.Sprintf("%v-0", ps.Name)) + + // Confirm that 2 known pods are deleted + for i := 0; i < 3; i++ { + p := q.dequeue() + switch p.event { + case syncPet: + if !syncs.Has(p.pod.Name) { + t.Errorf("Unexpected sync %v expecting %+v", p.pod.Name, syncs) + } + case deletePet: + if !deletes.Has(p.pod.Name) { + t.Errorf("Unexpected deletes %v expecting %+v", p.pod.Name, deletes) + } + } + } + if q.dequeue() != nil { + t.Errorf("Expected no pets") + } +} + +func TestPetQueueScaleUp(t *testing.T) { + replicas := 5 + ps := newPetSet(replicas) + + // knownPods are pods in the system + knownPods := newPodList(ps, 2) + + q := NewPetQueue(ps, knownPods) + for i := 0; i < 5; i++ { + pet, _ := newPCB(fmt.Sprintf("%v", i), ps) + q.enqueue(pet) + } + for i := 4; i >= 0; i-- { + pet := q.dequeue() + expectedName := fmt.Sprintf("%v-%d", ps.Name, i) + if pet.event != syncPet || pet.pod.Name != expectedName { + t.Errorf("Unexpected pet %+v, expected %v", pet.pod.Name, expectedName) + } + } +} + +func TestPetSetIteratorRelist(t *testing.T) { + replicas := 5 + ps := newPetSet(replicas) + + // knownPods are pods in the system + knownPods := newPodList(ps, 5) + for i := range knownPods { + knownPods[i].Spec.NodeName = fmt.Sprintf("foo-node-%v", i) + knownPods[i].Status.Phase = api.PodRunning + } + pi := NewPetSetIterator(ps, knownPods) + + // A simple resync should not change identity of pods in the system + i := 0 + for pi.Next() { + p := pi.Value() + if identityHash(ps, p.pod) != identityHash(ps, knownPods[i]) { + t.Errorf("Got unexpected identity hash from iterator.") + } + if p.event != syncPet { + t.Errorf("Got unexpected sync event for %v: %v", p.pod.Name, p.event) + } + i++ + } + if i != 5 { + t.Errorf("Unexpected iterations %v, this probably means too many/few pets", i) + } + + // Scale to 0 should delete all pods in system + ps.Spec.Replicas = 0 + pi = NewPetSetIterator(ps, knownPods) + i = 0 + for pi.Next() { + p := pi.Value() + if p.event != deletePet { + t.Errorf("Got unexpected sync event for %v: %v", p.pod.Name, p.event) + } + i++ + } + if i != 5 { + t.Errorf("Unexpected iterations %v, this probably means too many/few pets", i) + } + + // Relist with 0 replicas should no-op + pi = NewPetSetIterator(ps, []*api.Pod{}) + if pi.Next() != false { + t.Errorf("Unexpected iteration without any replicas or pods in system") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/pet.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/pet.go new file mode 100644 index 000000000000..aa3534c442bc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/pet.go @@ -0,0 +1,310 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package petset + +import ( + "fmt" + "strconv" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/client/record" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/runtime" + + "github.com/golang/glog" +) + +// petLifeCycleEvent is used to communicate high level actions the controller +// needs to take on a given pet. It's recorded in the pcb. The recognized values +// are listed below. +type petLifeCycleEvent string + +const ( + syncPet petLifeCycleEvent = "sync" + deletePet petLifeCycleEvent = "delete" + // updateRetries is the number of Get/Update cycles we perform when an + // update fails. + updateRetries = 3 + // PetSetInitAnnotation is an annotation which when set, indicates that the + // pet has finished initializing itself. + // TODO: Replace this with init container status. + PetSetInitAnnotation = "pod.alpha.kubernetes.io/initialized" +) + +// pcb is the control block used to transmit all updates about a single pet. +// It serves as the manifest for a single pet. Users must populate the pod +// and parent fields to pass it around safely. +type pcb struct { + // pod is the desired pet pod. + pod *api.Pod + // pvcs is a list of desired persistent volume claims for the pet pod. + pvcs []api.PersistentVolumeClaim + // event is the lifecycle event associated with this update. + event petLifeCycleEvent + // id is the identity index of this pet. + id string + // parent is a pointer to the parent petset. + parent *apps.PetSet +} + +// pvcClient is a client for managing persistent volume claims. +type pvcClient interface { + // DeletePVCs deletes the pvcs in the given pcb. + DeletePVCs(*pcb) error + // SyncPVCs creates/updates pvcs in the given pcb. + SyncPVCs(*pcb) error +} + +// petSyncer syncs a single pet. +type petSyncer struct { + petClient + + // blockingPet is an unhealthy pet either from this iteration or a previous + // iteration, either because it is not yet Running, or being Deleted, that + // prevents other creates/deletions. + blockingPet *pcb +} + +// Sync syncs the given pet. +func (p *petSyncer) Sync(pet *pcb) error { + if pet == nil { + return nil + } + realPet, exists, err := p.Get(pet) + if err != nil { + return err + } + // There is not constraint except quota on the number of pvcs created. + // This is done per pet so we get a working cluster ASAP, even if user + // runs out of quota. + if err := p.SyncPVCs(pet); err != nil { + return err + } + if exists { + if !p.isHealthy(realPet.pod) { + glog.Infof("PetSet %v waiting on unhealthy pet %v", pet.parent.Name, realPet.pod.Name) + } + return p.Update(realPet, pet) + } + if p.blockingPet != nil { + glog.Infof("Create of %v in PetSet %v blocked by unhealthy pet %v", pet.pod.Name, pet.parent.Name, p.blockingPet.pod.Name) + return nil + } + // This is counted as a create, even if it fails. We can't skip indices + // because some pets might allocate a special role to earlier indices. + // The returned error will force a requeue. + // TODO: What's the desired behavior if pet-0 is deleted while pet-1 is + // not yet healthy? currently pet-0 will wait till pet-1 is healthy, + // this feels safer, but might lead to deadlock. + p.blockingPet = pet + if err := p.Create(pet); err != nil { + return err + } + return nil +} + +// Delete deletes the given pet, if no other pet in the petset is blocking a +// scale event. +func (p *petSyncer) Delete(pet *pcb) error { + if pet == nil { + return nil + } + realPet, exists, err := p.Get(pet) + if err != nil { + return err + } + if !exists { + return nil + } + if p.blockingPet != nil { + glog.Infof("Delete of %v in PetSet %v blocked by unhealthy pet %v", realPet.pod.Name, pet.parent.Name, p.blockingPet.pod.Name) + return nil + } + // This is counted as a delete, even if it fails. + // The returned error will force a requeue. + p.blockingPet = realPet + if !p.isDying(realPet.pod) { + glog.Infof("PetSet %v deleting pet %v", pet.parent.Name, pet.pod.Name) + return p.petClient.Delete(pet) + } + glog.Infof("PetSet %v waiting on pet %v to die in %v", pet.parent.Name, realPet.pod.Name, realPet.pod.DeletionTimestamp) + return nil +} + +// petClient is a client for managing pets. +type petClient interface { + pvcClient + petHealthChecker + Delete(*pcb) error + Get(*pcb) (*pcb, bool, error) + Create(*pcb) error + Update(*pcb, *pcb) error +} + +// apiServerPetClient is a petset aware Kubernetes client. +type apiServerPetClient struct { + c *client.Client + recorder record.EventRecorder + petHealthChecker +} + +// Get gets the pet in the pcb from the apiserver. +func (p *apiServerPetClient) Get(pet *pcb) (*pcb, bool, error) { + found := true + ns := pet.parent.Namespace + pod, err := podClient(p.c, ns).Get(pet.pod.Name) + if errors.IsNotFound(err) { + found = false + err = nil + } + if err != nil || !found { + return nil, found, err + } + realPet := *pet + realPet.pod = pod + return &realPet, true, nil +} + +// Delete deletes the pet in the pcb from the apiserver. +func (p *apiServerPetClient) Delete(pet *pcb) error { + err := podClient(p.c, pet.parent.Namespace).Delete(pet.pod.Name, nil) + if errors.IsNotFound(err) { + err = nil + } + p.event(pet.parent, "Delete", fmt.Sprintf("pet: %v", pet.pod.Name), err) + return err +} + +// Create creates the pet in the pcb. +func (p *apiServerPetClient) Create(pet *pcb) error { + _, err := podClient(p.c, pet.parent.Namespace).Create(pet.pod) + p.event(pet.parent, "Create", fmt.Sprintf("pet: %v", pet.pod.Name), err) + return err +} + +// Update updates the pet in the 'pet' pcb to match the pet in the 'expectedPet' pcb. +func (p *apiServerPetClient) Update(pet *pcb, expectedPet *pcb) (updateErr error) { + var getErr error + pc := podClient(p.c, pet.parent.Namespace) + + pod, needsUpdate, err := copyPetID(pet, expectedPet) + if err != nil || !needsUpdate { + return err + } + glog.Infof("Resetting pet %v to match PetSet %v spec", pod.Name, pet.parent.Name) + for i, p := 0, &pod; ; i++ { + _, updateErr = pc.Update(p) + if updateErr == nil || i >= updateRetries { + return updateErr + } + if p, getErr = pc.Get(pod.Name); getErr != nil { + return getErr + } + } +} + +// DeletePVCs should delete PVCs, when implemented. +func (p *apiServerPetClient) DeletePVCs(pet *pcb) error { + // TODO: Implement this when we delete pvcs. + return nil +} + +func (p *apiServerPetClient) getPVC(pvcName, pvcNamespace string) (*api.PersistentVolumeClaim, bool, error) { + found := true + pvc, err := claimClient(p.c, pvcNamespace).Get(pvcName) + if errors.IsNotFound(err) { + found = false + } + if err != nil || !found { + return nil, found, err + } + return pvc, true, nil +} + +func (p *apiServerPetClient) createPVC(pvc *api.PersistentVolumeClaim) error { + _, err := claimClient(p.c, pvc.Namespace).Create(pvc) + return err +} + +// SyncPVCs syncs pvcs in the given pcb. +func (p *apiServerPetClient) SyncPVCs(pet *pcb) error { + errMsg := "" + // Create new claims. + for i, pvc := range pet.pvcs { + _, exists, err := p.getPVC(pvc.Name, pet.parent.Namespace) + if !exists { + if err := p.createPVC(&pet.pvcs[i]); err != nil { + errMsg += fmt.Sprintf("Failed to create %v: %v", pvc.Name, err) + } + p.event(pet.parent, "Create", fmt.Sprintf("pvc: %v", pvc.Name), err) + } else if err != nil { + errMsg += fmt.Sprintf("Error trying to get pvc %v, %v.", pvc.Name, err) + } + // TODO: Check resource requirements and accessmodes, update if necessary + } + if len(errMsg) != 0 { + return fmt.Errorf("%v", errMsg) + } + return nil +} + +// event formats an event for the given runtime object. +func (p *apiServerPetClient) event(obj runtime.Object, reason, msg string, err error) { + if err != nil { + p.recorder.Eventf(obj, api.EventTypeWarning, fmt.Sprintf("Failed%v", reason), fmt.Sprintf("%v, error: %v", msg, err)) + } else { + p.recorder.Eventf(obj, api.EventTypeNormal, fmt.Sprintf("Successful%v", reason), msg) + } +} + +// petHealthChecker is an interface to check pet health. It makes a boolean +// decision based on the given pod. +type petHealthChecker interface { + isHealthy(*api.Pod) bool + isDying(*api.Pod) bool +} + +// defaultPetHealthChecks does basic health checking. +// It doesn't update, probe or get the pod. +type defaultPetHealthChecker struct{} + +// isHealthy returns true if the pod is running and has the +// "pod.alpha.kubernetes.io/initialized" set to "true". +func (d *defaultPetHealthChecker) isHealthy(pod *api.Pod) bool { + if pod == nil || pod.Status.Phase != api.PodRunning { + return false + } + initialized, ok := pod.Annotations[PetSetInitAnnotation] + if !ok { + glog.Infof("PetSet pod %v in %v, waiting on annotation %v", api.PodRunning, pod.Name, PetSetInitAnnotation) + return false + } + b, err := strconv.ParseBool(initialized) + if err != nil { + return false + } + return b && api.IsPodReady(pod) +} + +// isDying returns true if the pod has a non-nil deletion timestamp. Since the +// timestamp can only decrease, once this method returns true for a given pet, it +// will never return false. +func (d *defaultPetHealthChecker) isDying(pod *api.Pod) bool { + return pod != nil && pod.DeletionTimestamp != nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/pet_set.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/pet_set.go new file mode 100644 index 000000000000..a34eb6d5c2e6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/pet_set.go @@ -0,0 +1,356 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package petset + +import ( + "fmt" + "reflect" + "sort" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/client/record" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/runtime" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/util/workqueue" + "k8s.io/kubernetes/pkg/watch" + + "github.com/golang/glog" +) + +const ( + // Time to sleep before polling to see if the pod cache has synced. + PodStoreSyncedPollPeriod = 100 * time.Millisecond + // number of retries for a status update. + statusUpdateRetries = 2 + // period to relist petsets and verify pets + petSetResyncPeriod = 30 * time.Second +) + +// PetSetController controls petsets. +type PetSetController struct { + kubeClient *client.Client + + // newSyncer returns an interface capable of syncing a single pet. + // Abstracted out for testing. + newSyncer func(*pcb) *petSyncer + + // podStore is a cache of watched pods. + podStore cache.StoreToPodLister + + // podStoreSynced returns true if the pod store has synced at least once. + podStoreSynced func() bool + // Watches changes to all pods. + podController framework.ControllerInterface + + // A store of PetSets, populated by the psController. + psStore cache.StoreToPetSetLister + // Watches changes to all PetSets. + psController *framework.Controller + + // A store of the 1 unhealthy pet blocking progress for a given ps + blockingPetStore *unhealthyPetTracker + + // Controllers that need to be synced. + queue *workqueue.Type + + // syncHandler handles sync events for petsets. + // Abstracted as a func to allow injection for testing. + syncHandler func(psKey string) []error +} + +// NewPetSetController creates a new petset controller. +func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) + recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "petset"}) + pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}} + + psc := &PetSetController{ + kubeClient: kubeClient, + blockingPetStore: newUnHealthyPetTracker(pc), + newSyncer: func(blockingPet *pcb) *petSyncer { + return &petSyncer{pc, blockingPet} + }, + queue: workqueue.New(), + } + + podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ + // lookup the petset and enqueue + AddFunc: psc.addPod, + // lookup current and old petset if labels changed + UpdateFunc: psc.updatePod, + // lookup petset accounting for deletion tombstones + DeleteFunc: psc.deletePod, + }) + psc.podStore.Indexer = podInformer.GetIndexer() + psc.podController = podInformer.GetController() + + psc.psStore.Store, psc.psController = framework.NewInformer( + &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return psc.kubeClient.Apps().PetSets(api.NamespaceAll).Watch(options) + }, + }, + &apps.PetSet{}, + petSetResyncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: psc.enqueuePetSet, + UpdateFunc: func(old, cur interface{}) { + oldPS := old.(*apps.PetSet) + curPS := cur.(*apps.PetSet) + if oldPS.Status.Replicas != curPS.Status.Replicas { + glog.V(4).Infof("Observed updated replica count for PetSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas) + } + psc.enqueuePetSet(cur) + }, + DeleteFunc: psc.enqueuePetSet, + }, + ) + // TODO: Watch volumes + psc.podStoreSynced = psc.podController.HasSynced + psc.syncHandler = psc.Sync + return psc +} + +// Run runs the petset controller. +func (psc *PetSetController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + glog.Infof("Starting petset controller") + go psc.podController.Run(stopCh) + go psc.psController.Run(stopCh) + for i := 0; i < workers; i++ { + go wait.Until(psc.worker, time.Second, stopCh) + } + <-stopCh + glog.Infof("Shutting down petset controller") + psc.queue.ShutDown() +} + +// addPod adds the petset for the pod to the sync queue +func (psc *PetSetController) addPod(obj interface{}) { + pod := obj.(*api.Pod) + glog.V(4).Infof("Pod %s created, labels: %+v", pod.Name, pod.Labels) + ps := psc.getPetSetForPod(pod) + if ps == nil { + return + } + psc.enqueuePetSet(ps) +} + +// updatePod adds the petset for the current and old pods to the sync queue. +// If the labels of the pod didn't change, this method enqueues a single petset. +func (psc *PetSetController) updatePod(old, cur interface{}) { + if api.Semantic.DeepEqual(old, cur) { + return + } + curPod := cur.(*api.Pod) + oldPod := old.(*api.Pod) + ps := psc.getPetSetForPod(curPod) + if ps == nil { + return + } + psc.enqueuePetSet(ps) + if !reflect.DeepEqual(curPod.Labels, oldPod.Labels) { + if oldPS := psc.getPetSetForPod(oldPod); oldPS != nil { + psc.enqueuePetSet(oldPS) + } + } +} + +// deletePod enqueues the petset for the pod accounting for deletion tombstones. +func (psc *PetSetController) deletePod(obj interface{}) { + pod, ok := obj.(*api.Pod) + + // When a delete is dropped, the relist will notice a pod in the store not + // in the list, leading to the insertion of a tombstone object which contains + // the deleted key/value. Note that this value might be stale. If the pod + // changed labels the new PetSet will not be woken up till the periodic resync. + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + glog.Errorf("couldn't get object from tombstone %+v", obj) + return + } + pod, ok = tombstone.Obj.(*api.Pod) + if !ok { + glog.Errorf("tombstone contained object that is not a pod %+v", obj) + return + } + } + glog.V(4).Infof("Pod %s/%s deleted through %v.", pod.Namespace, pod.Name, utilruntime.GetCaller()) + if ps := psc.getPetSetForPod(pod); ps != nil { + psc.enqueuePetSet(ps) + } +} + +// getPodsForPetSets returns the pods that match the selectors of the given petset. +func (psc *PetSetController) getPodsForPetSet(ps *apps.PetSet) ([]*api.Pod, error) { + // TODO: Do we want the petset to fight with RCs? check parent petset annoation, or name prefix? + sel, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + return []*api.Pod{}, err + } + petList, err := psc.podStore.Pods(ps.Namespace).List(sel) + if err != nil { + return []*api.Pod{}, err + } + pods := []*api.Pod{} + for _, p := range petList.Items { + pods = append(pods, &p) + } + return pods, nil +} + +// getPetSetForPod returns the pet set managing the given pod. +func (psc *PetSetController) getPetSetForPod(pod *api.Pod) *apps.PetSet { + ps, err := psc.psStore.GetPodPetSets(pod) + if err != nil { + glog.V(4).Infof("No PetSets found for pod %v, PetSet controller will avoid syncing", pod.Name) + return nil + } + // Resolve a overlapping petset tie by creation timestamp. + // Let's hope users don't create overlapping petsets. + if len(ps) > 1 { + glog.Errorf("user error! more than one PetSet is selecting pods with labels: %+v", pod.Labels) + sort.Sort(overlappingPetSets(ps)) + } + return &ps[0] +} + +// enqueuePetSet enqueues the given petset in the work queue. +func (psc *PetSetController) enqueuePetSet(obj interface{}) { + key, err := controller.KeyFunc(obj) + if err != nil { + glog.Errorf("Cound't get key for object %+v: %v", obj, err) + return + } + psc.queue.Add(key) +} + +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncHandler is never invoked concurrently with the same key. +func (psc *PetSetController) worker() { + for { + func() { + key, quit := psc.queue.Get() + if quit { + return + } + defer psc.queue.Done(key) + if errs := psc.syncHandler(key.(string)); len(errs) != 0 { + glog.Errorf("Error syncing PetSet %v, requeuing: %v", key.(string), errs) + psc.queue.Add(key) + } + }() + } +} + +// Sync syncs the given petset. +func (psc *PetSetController) Sync(key string) []error { + startTime := time.Now() + defer func() { + glog.V(4).Infof("Finished syncing pet set %q (%v)", key, time.Now().Sub(startTime)) + }() + + if !psc.podStoreSynced() { + // Sleep so we give the pod reflector goroutine a chance to run. + time.Sleep(PodStoreSyncedPollPeriod) + return []error{fmt.Errorf("waiting for pods controller to sync")} + } + + obj, exists, err := psc.psStore.Store.GetByKey(key) + if !exists { + if err = psc.blockingPetStore.store.Delete(key); err != nil { + return []error{err} + } + glog.Infof("PetSet has been deleted %v", key) + return []error{} + } + if err != nil { + glog.Errorf("Unable to retrieve PetSet %v from store: %v", key, err) + return []error{err} + } + + ps := *obj.(*apps.PetSet) + petList, err := psc.getPodsForPetSet(&ps) + if err != nil { + return []error{err} + } + + numPets, errs := psc.syncPetSet(&ps, petList) + if err := updatePetCount(psc.kubeClient, ps, numPets); err != nil { + glog.Infof("Failed to update replica count for petset %v/%v; requeuing; error: %v", ps.Namespace, ps.Name, err) + errs = append(errs, err) + } + + return errs +} + +// syncPetSet syncs a tuple of (petset, pets). +func (psc *PetSetController) syncPetSet(ps *apps.PetSet, pets []*api.Pod) (int, []error) { + glog.Infof("Syncing PetSet %v/%v with %d pets", ps.Namespace, ps.Name, len(pets)) + + it := NewPetSetIterator(ps, pets) + blockingPet, err := psc.blockingPetStore.Get(ps, pets) + if err != nil { + return 0, []error{err} + } + if blockingPet != nil { + glog.Infof("PetSet %v blocked from scaling on pet %v", ps.Name, blockingPet.pod.Name) + } + petManager := psc.newSyncer(blockingPet) + numPets := 0 + + for it.Next() { + pet := it.Value() + if pet == nil { + continue + } + switch pet.event { + case syncPet: + err = petManager.Sync(pet) + if err == nil { + numPets++ + } + case deletePet: + err = petManager.Delete(pet) + } + if err != nil { + it.errs = append(it.errs, err) + } + } + + if err := psc.blockingPetStore.Add(petManager.blockingPet); err != nil { + it.errs = append(it.errs, err) + } + // TODO: GC pvcs. We can't delete them per pet because of grace period, and + // in fact we *don't want to* till petset is stable to guarantee that bugs + // in the controller don't corrupt user data. + return numPets, it.errs +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/pet_set_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/pet_set_test.go new file mode 100644 index 000000000000..8498fce87014 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/pet_set_test.go @@ -0,0 +1,264 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package petset + +import ( + "fmt" + "math/rand" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/controller" +) + +func newFakePetSetController() (*PetSetController, *fakePetClient) { + fpc := newFakePetClient() + return &PetSetController{ + kubeClient: nil, + blockingPetStore: newUnHealthyPetTracker(fpc), + podStoreSynced: func() bool { return true }, + psStore: cache.StoreToPetSetLister{Store: cache.NewStore(controller.KeyFunc)}, + podStore: cache.StoreToPodLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})}, + newSyncer: func(blockingPet *pcb) *petSyncer { + return &petSyncer{fpc, blockingPet} + }, + }, fpc +} + +func checkPets(ps *apps.PetSet, creates, deletes int, fc *fakePetClient, t *testing.T) { + if fc.petsCreated != creates || fc.petsDeleted != deletes { + t.Errorf("Found (creates: %d, deletes: %d), expected (creates: %d, deletes: %d)", fc.petsCreated, fc.petsDeleted, creates, deletes) + } + gotClaims := map[string]api.PersistentVolumeClaim{} + for _, pvc := range fc.claims { + gotClaims[pvc.Name] = pvc + } + for i := range fc.pets { + expectedPet, _ := newPCB(fmt.Sprintf("%v", i), ps) + if identityHash(ps, fc.pets[i].pod) != identityHash(ps, expectedPet.pod) { + t.Errorf("Unexpected pet at index %d", i) + } + for _, pvc := range expectedPet.pvcs { + gotPVC, ok := gotClaims[pvc.Name] + if !ok { + t.Errorf("PVC %v not created for pet %v", pvc.Name, expectedPet.pod.Name) + } + if !reflect.DeepEqual(gotPVC.Spec, pvc.Spec) { + t.Errorf("got PVC %v differs from created pvc", pvc.Name) + } + } + } +} + +func scalePetSet(t *testing.T, ps *apps.PetSet, psc *PetSetController, fc *fakePetClient, scale int) []error { + errs := []error{} + for i := 0; i < scale; i++ { + pl := fc.getPodList() + if len(pl) != i { + t.Errorf("Unexpected number of pets, expected %d found %d", i, len(fc.pets)) + } + _, syncErrs := psc.syncPetSet(ps, pl) + errs = append(errs, syncErrs...) + fc.setHealthy(i) + checkPets(ps, i+1, 0, fc, t) + } + return errs +} + +func saturatePetSet(t *testing.T, ps *apps.PetSet, psc *PetSetController, fc *fakePetClient) { + errs := scalePetSet(t, ps, psc, fc, ps.Spec.Replicas) + if len(errs) != 0 { + t.Errorf("%v", errs) + } +} + +func TestPetSetControllerCreates(t *testing.T) { + psc, fc := newFakePetSetController() + replicas := 3 + ps := newPetSet(replicas) + + saturatePetSet(t, ps, psc, fc) + + podList := fc.getPodList() + // Deleted pet gets recreated + fc.pets = fc.pets[:replicas-1] + if _, errs := psc.syncPetSet(ps, podList); len(errs) != 0 { + t.Errorf("%v", errs) + } + checkPets(ps, replicas+1, 0, fc, t) +} + +func TestPetSetControllerDeletes(t *testing.T) { + psc, fc := newFakePetSetController() + replicas := 4 + ps := newPetSet(replicas) + + saturatePetSet(t, ps, psc, fc) + + // Drain + errs := []error{} + ps.Spec.Replicas = 0 + knownPods := fc.getPodList() + for i := replicas - 1; i >= 0; i-- { + if len(fc.pets) != i+1 { + t.Errorf("Unexpected number of pets, expected %d found %d", i, len(fc.pets)) + } + _, syncErrs := psc.syncPetSet(ps, knownPods) + errs = append(errs, syncErrs...) + } + if len(errs) != 0 { + t.Errorf("%v", errs) + } + checkPets(ps, replicas, replicas, fc, t) +} + +func TestPetSetControllerRespectsTermination(t *testing.T) { + psc, fc := newFakePetSetController() + replicas := 4 + ps := newPetSet(replicas) + + saturatePetSet(t, ps, psc, fc) + + fc.setDeletionTimestamp(replicas - 1) + ps.Spec.Replicas = 2 + _, errs := psc.syncPetSet(ps, fc.getPodList()) + if len(errs) != 0 { + t.Errorf("%v", errs) + } + // Finding a pod with the deletion timestamp will pause all deletions. + knownPods := fc.getPodList() + if len(knownPods) != 4 { + t.Errorf("Pods deleted prematurely before deletion timestamp expired, len %d", len(knownPods)) + } + fc.pets = fc.pets[:replicas-1] + _, errs = psc.syncPetSet(ps, fc.getPodList()) + if len(errs) != 0 { + t.Errorf("%v", errs) + } + checkPets(ps, replicas, 1, fc, t) +} + +func TestPetSetControllerRespectsOrder(t *testing.T) { + psc, fc := newFakePetSetController() + replicas := 4 + ps := newPetSet(replicas) + + saturatePetSet(t, ps, psc, fc) + + errs := []error{} + ps.Spec.Replicas = 0 + // Shuffle known list and check that pets are deleted in reverse + knownPods := fc.getPodList() + for i := range knownPods { + j := rand.Intn(i + 1) + knownPods[i], knownPods[j] = knownPods[j], knownPods[i] + } + + for i := 0; i < replicas; i++ { + if len(fc.pets) != replicas-i { + t.Errorf("Unexpected number of pets, expected %d found %d", i, len(fc.pets)) + } + _, syncErrs := psc.syncPetSet(ps, knownPods) + errs = append(errs, syncErrs...) + checkPets(ps, replicas, i+1, fc, t) + } + if len(errs) != 0 { + t.Errorf("%v", errs) + } +} + +func TestPetSetControllerBlocksScaling(t *testing.T) { + psc, fc := newFakePetSetController() + replicas := 5 + ps := newPetSet(replicas) + scalePetSet(t, ps, psc, fc, 3) + + // Create 4th pet, then before flipping it to healthy, kill the first pet. + // There should only be 1 not-healty pet at a time. + pl := fc.getPodList() + if _, errs := psc.syncPetSet(ps, pl); len(errs) != 0 { + t.Errorf("%v", errs) + } + + deletedPod := pl[0] + fc.deletePetAtIndex(0) + pl = fc.getPodList() + if _, errs := psc.syncPetSet(ps, pl); len(errs) != 0 { + t.Errorf("%v", errs) + } + newPodList := fc.getPodList() + for _, p := range newPodList { + if p.Name == deletedPod.Name { + t.Errorf("Deleted pod was created while existing pod was unhealthy") + } + } + + fc.setHealthy(len(newPodList) - 1) + if _, errs := psc.syncPetSet(ps, pl); len(errs) != 0 { + t.Errorf("%v", errs) + } + + found := false + for _, p := range fc.getPodList() { + if p.Name == deletedPod.Name { + found = true + } + } + if !found { + t.Errorf("Deleted pod was not created after existing pods became healthy") + } +} + +func TestPetSetBlockingPetIsCleared(t *testing.T) { + psc, fc := newFakePetSetController() + ps := newPetSet(3) + scalePetSet(t, ps, psc, fc, 1) + + if blocking, err := psc.blockingPetStore.Get(ps, fc.getPodList()); err != nil || blocking != nil { + t.Errorf("Unexpected blocking pet %v, err %v", blocking, err) + } + + // 1 not yet healthy pet + psc.syncPetSet(ps, fc.getPodList()) + + if blocking, err := psc.blockingPetStore.Get(ps, fc.getPodList()); err != nil || blocking == nil { + t.Errorf("Expected blocking pet %v, err %v", blocking, err) + } + + // Deleting the petset should clear the blocking pet + if err := psc.psStore.Store.Delete(ps); err != nil { + t.Fatalf("Unable to delete pet %v from petset controller store.", ps.Name) + } + if errs := psc.Sync(fmt.Sprintf("%v/%v", ps.Namespace, ps.Name)); len(errs) != 0 { + t.Errorf("Error during sync of deleted petset %v", errs) + } + fc.pets = []*pcb{} + fc.petsCreated = 0 + if blocking, err := psc.blockingPetStore.Get(ps, fc.getPodList()); err != nil || blocking != nil { + t.Errorf("Unexpected blocking pet %v, err %v", blocking, err) + } + saturatePetSet(t, ps, psc, fc) + + // Make sure we don't leak the final blockin pet in the store + psc.syncPetSet(ps, fc.getPodList()) + if p, exists, err := psc.blockingPetStore.store.GetByKey(fmt.Sprintf("%v/%v", ps.Namespace, ps.Name)); err != nil || exists { + t.Errorf("Unexpected blocking pet, err %v: %+v", err, p) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/pet_set_utils.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/pet_set_utils.go new file mode 100644 index 000000000000..d6d373050dd6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/petset/pet_set_utils.go @@ -0,0 +1,168 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package petset + +import ( + "fmt" + "sync" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/client/cache" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/controller" + + "github.com/golang/glog" +) + +// overlappingPetSets sorts a list of PetSets by creation timestamp, using their names as a tie breaker. +// Generally used to tie break between PetSets that have overlapping selectors. +type overlappingPetSets []apps.PetSet + +func (o overlappingPetSets) Len() int { return len(o) } +func (o overlappingPetSets) Swap(i, j int) { o[i], o[j] = o[j], o[i] } + +func (o overlappingPetSets) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) +} + +// updatePetCount attempts to update the Status.Replicas of the given PetSet, with a single GET/PUT retry. +func updatePetCount(kubeClient *client.Client, ps apps.PetSet, numPets int) (updateErr error) { + if ps.Status.Replicas == numPets || kubeClient == nil { + return nil + } + psClient := kubeClient.Apps().PetSets(ps.Namespace) + var getErr error + for i, ps := 0, &ps; ; i++ { + glog.V(4).Infof(fmt.Sprintf("Updating replica count for PetSet: %s/%s, ", ps.Namespace, ps.Name) + + fmt.Sprintf("replicas %d->%d (need %d), ", ps.Status.Replicas, numPets, ps.Spec.Replicas)) + + ps.Status = apps.PetSetStatus{Replicas: numPets} + _, updateErr = psClient.UpdateStatus(ps) + if updateErr == nil || i >= statusUpdateRetries { + return updateErr + } + if ps, getErr = psClient.Get(ps.Name); getErr != nil { + return getErr + } + } +} + +// claimClient returns the pvcClient for the given kubeClient/ns. +func claimClient(kubeClient *client.Client, ns string) client.PersistentVolumeClaimInterface { + return kubeClient.PersistentVolumeClaims(ns) +} + +// podClient returns the given podClient for the given kubeClient/ns. +func podClient(kubeClient *client.Client, ns string) client.PodInterface { + return kubeClient.Pods(ns) +} + +// unhealthyPetTracker tracks unhealthy pets for petsets. +type unhealthyPetTracker struct { + pc petClient + store cache.Store + storeLock sync.Mutex +} + +// Get returns a previously recorded blocking pet for the given petset. +func (u *unhealthyPetTracker) Get(ps *apps.PetSet, knownPets []*api.Pod) (*pcb, error) { + u.storeLock.Lock() + defer u.storeLock.Unlock() + + // We "Get" by key but "Add" by object because the store interface doesn't + // allow us to Get/Add a related obj (eg petset: blocking pet). + key, err := controller.KeyFunc(ps) + if err != nil { + return nil, err + } + obj, exists, err := u.store.GetByKey(key) + if err != nil { + return nil, err + } + + hc := defaultPetHealthChecker{} + // There's no unhealthy pet blocking a scale event, but this might be + // a controller manager restart. If it is, knownPets can be trusted. + if !exists { + for _, p := range knownPets { + if hc.isHealthy(p) && !hc.isDying(p) { + glog.V(4).Infof("Ignoring healthy pet %v for PetSet %v", p.Name, ps.Name) + continue + } + glog.Infof("No recorded blocking pet, but found unhealty pet %v for PetSet %v", p.Name, ps.Name) + return &pcb{pod: p, parent: ps}, nil + } + return nil, nil + } + + // This is a pet that's blocking further creates/deletes of a petset. If it + // disappears, it's no longer blocking. If it exists, it continues to block + // till it turns healthy or disappears. + bp := obj.(*pcb) + blockingPet, exists, err := u.pc.Get(bp) + if err != nil { + return nil, err + } + if !exists { + glog.V(4).Infof("Clearing blocking pet %v for PetSet %v because it's been deleted", bp.pod.Name, ps.Name) + return nil, nil + } + blockingPetPod := blockingPet.pod + if hc.isHealthy(blockingPetPod) && !hc.isDying(blockingPetPod) { + glog.V(4).Infof("Clearing blocking pet %v for PetSet %v because it's healthy", bp.pod.Name, ps.Name) + u.store.Delete(blockingPet) + blockingPet = nil + } + return blockingPet, nil +} + +// Add records the given pet as a blocking pet. +func (u *unhealthyPetTracker) Add(blockingPet *pcb) error { + u.storeLock.Lock() + defer u.storeLock.Unlock() + + if blockingPet == nil { + return nil + } + glog.V(4).Infof("Adding blocking pet %v for PetSet %v", blockingPet.pod.Name, blockingPet.parent.Name) + return u.store.Add(blockingPet) +} + +// newUnHealthyPetTracker tracks unhealthy pets that block progress of petsets. +func newUnHealthyPetTracker(pc petClient) *unhealthyPetTracker { + return &unhealthyPetTracker{pc: pc, store: cache.NewStore(pcbKeyFunc)} +} + +// pcbKeyFunc computes the key for a given pcb. +// If it's given a key, it simply returns it. +func pcbKeyFunc(obj interface{}) (string, error) { + if key, ok := obj.(string); ok { + return key, nil + } + p, ok := obj.(*pcb) + if !ok { + return "", fmt.Errorf("not a valid pet control block %+v", p) + } + if p.parent == nil { + return "", fmt.Errorf("cannot compute pet control block key without parent pointer %+v", p) + } + return controller.KeyFunc(p.parent) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/doc.go new file mode 100644 index 000000000000..34ce53aec78b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package podautoscaler contains logic for autoscaling number of +// pods based on metrics observed. +package podautoscaler diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go index 3ba83dbdf902..2ca083ff20d1 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go @@ -26,11 +26,13 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" + unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" "k8s.io/kubernetes/pkg/client/record" - unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" - unversionedextensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" "k8s.io/kubernetes/pkg/runtime" @@ -51,7 +53,7 @@ const ( type HorizontalController struct { scaleNamespacer unversionedextensions.ScalesGetter - hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter + hpaNamespacer unversionedautoscaling.HorizontalPodAutoscalersGetter metricsClient metrics.MetricsClient eventRecorder record.EventRecorder @@ -65,19 +67,8 @@ type HorizontalController struct { var downscaleForbiddenWindow = 5 * time.Minute var upscaleForbiddenWindow = 3 * time.Minute -func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient, resyncPeriod time.Duration) *HorizontalController { - broadcaster := record.NewBroadcaster() - broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{evtNamespacer.Events("")}) - recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"}) - - controller := &HorizontalController{ - metricsClient: metricsClient, - eventRecorder: recorder, - scaleNamespacer: scaleNamespacer, - hpaNamespacer: hpaNamespacer, - } - - controller.store, controller.controller = framework.NewInformer( +func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, *framework.Controller) { + return framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).List(options) @@ -86,12 +77,12 @@ func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNa return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).Watch(options) }, }, - &extensions.HorizontalPodAutoscaler{}, + &autoscaling.HorizontalPodAutoscaler{}, resyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - hpa := obj.(*extensions.HorizontalPodAutoscaler) - hasCPUPolicy := hpa.Spec.CPUUtilization != nil + hpa := obj.(*autoscaling.HorizontalPodAutoscaler) + hasCPUPolicy := hpa.Spec.TargetCPUUtilizationPercentage != nil _, hasCustomMetricsPolicy := hpa.Annotations[HpaCustomMetricsTargetAnnotationName] if !hasCPUPolicy && !hasCustomMetricsPolicy { controller.eventRecorder.Event(hpa, api.EventTypeNormal, "DefaultPolicy", "No scaling policy specified - will use default one. See documentation for details") @@ -102,7 +93,7 @@ func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNa } }, UpdateFunc: func(old, cur interface{}) { - hpa := cur.(*extensions.HorizontalPodAutoscaler) + hpa := cur.(*autoscaling.HorizontalPodAutoscaler) err := controller.reconcileAutoscaler(hpa) if err != nil { glog.Warningf("Failed to reconcile %s: %v", hpa.Name, err) @@ -111,6 +102,22 @@ func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNa // We are not interested in deletions. }, ) +} + +func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedautoscaling.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient, resyncPeriod time.Duration) *HorizontalController { + broadcaster := record.NewBroadcaster() + broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: evtNamespacer.Events("")}) + recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"}) + + controller := &HorizontalController{ + metricsClient: metricsClient, + eventRecorder: recorder, + scaleNamespacer: scaleNamespacer, + hpaNamespacer: hpaNamespacer, + } + store, frameworkController := newInformer(controller, resyncPeriod) + controller.store = store + controller.controller = frameworkController return controller } @@ -123,10 +130,10 @@ func (a *HorizontalController) Run(stopCh <-chan struct{}) { glog.Infof("Shutting down HPA Controller") } -func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale) (int, *int, time.Time, error) { - targetUtilization := defaultTargetCPUUtilizationPercentage - if hpa.Spec.CPUUtilization != nil { - targetUtilization = hpa.Spec.CPUUtilization.TargetPercentage +func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *autoscaling.HorizontalPodAutoscaler, scale *extensions.Scale) (int32, *int32, time.Time, error) { + targetUtilization := int32(defaultTargetCPUUtilizationPercentage) + if hpa.Spec.TargetCPUUtilizationPercentage != nil { + targetUtilization = *hpa.Spec.TargetCPUUtilizationPercentage } currentReplicas := scale.Status.Replicas @@ -150,11 +157,13 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions. return 0, nil, time.Time{}, fmt.Errorf("failed to get CPU utilization: %v", err) } - usageRatio := float64(*currentUtilization) / float64(targetUtilization) + utilization := int32(*currentUtilization) + + usageRatio := float64(utilization) / float64(targetUtilization) if math.Abs(1.0-usageRatio) > tolerance { - return int(math.Ceil(usageRatio * float64(currentReplicas))), currentUtilization, timestamp, nil + return int32(math.Ceil(usageRatio * float64(currentReplicas))), &utilization, timestamp, nil } else { - return currentReplicas, currentUtilization, timestamp, nil + return currentReplicas, &utilization, timestamp, nil } } @@ -163,8 +172,8 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions. // Returns number of replicas, metric which required highest number of replicas, // status string (also json-serialized extensions.CustomMetricsCurrentStatusList), // last timestamp of the metrics involved in computations or error, if occurred. -func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale, - cmAnnotation string) (replicas int, metric string, status string, timestamp time.Time, err error) { +func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *autoscaling.HorizontalPodAutoscaler, scale *extensions.Scale, + cmAnnotation string) (replicas int32, metric string, status string, timestamp time.Time, err error) { currentReplicas := scale.Status.Replicas replicas = 0 @@ -211,9 +220,9 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *extensions.H floatTarget := float64(customMetricTarget.TargetValue.MilliValue()) / 1000.0 usageRatio := *value / floatTarget - replicaCountProposal := 0 + replicaCountProposal := int32(0) if math.Abs(1.0-usageRatio) > tolerance { - replicaCountProposal = int(math.Ceil(usageRatio * float64(currentReplicas))) + replicaCountProposal = int32(math.Ceil(usageRatio * float64(currentReplicas))) } else { replicaCountProposal = currentReplicas } @@ -228,7 +237,7 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *extensions.H } statusList.Items = append(statusList.Items, extensions.CustomMetricCurrentStatus{ Name: customMetricTarget.Name, - CurrentValue: *quantity, + CurrentValue: quantity, }) } byteStatusList, err := json.Marshal(statusList) @@ -239,26 +248,26 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *extensions.H return replicas, metric, string(byteStatusList), timestamp, nil } -func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPodAutoscaler) error { - reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Namespace, hpa.Spec.ScaleRef.Name) +func (a *HorizontalController) reconcileAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler) error { + reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name) - scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name) + scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Kind, hpa.Spec.ScaleTargetRef.Name) if err != nil { a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetScale", err.Error()) return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err) } currentReplicas := scale.Status.Replicas - cpuDesiredReplicas := 0 - var cpuCurrentUtilization *int = nil + cpuDesiredReplicas := int32(0) + var cpuCurrentUtilization *int32 = nil cpuTimestamp := time.Time{} - cmDesiredReplicas := 0 + cmDesiredReplicas := int32(0) cmMetric := "" cmStatus := "" cmTimestamp := time.Time{} - desiredReplicas := 0 + desiredReplicas := int32(0) rescaleReason := "" timestamp := time.Now() @@ -275,7 +284,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPod // All basic scenarios covered, the state should be sane, lets use metrics. cmAnnotation, cmAnnotationFound := hpa.Annotations[HpaCustomMetricsTargetAnnotationName] - if hpa.Spec.CPUUtilization != nil || !cmAnnotationFound { + if hpa.Spec.TargetCPUUtilizationPercentage != nil || !cmAnnotationFound { cpuDesiredReplicas, cpuCurrentUtilization, cpuTimestamp, err = a.computeReplicasForCPUUtilization(hpa, scale) if err != nil { a.updateCurrentReplicasInStatus(hpa, currentReplicas) @@ -327,7 +336,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPod rescale := shouldScale(hpa, currentReplicas, desiredReplicas, timestamp) if rescale { scale.Spec.Replicas = desiredReplicas - _, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale) + _, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleTargetRef.Kind, scale) if err != nil { a.eventRecorder.Eventf(hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error()) return fmt.Errorf("failed to rescale %s: %v", reference, err) @@ -342,7 +351,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPod return a.updateStatus(hpa, currentReplicas, desiredReplicas, cpuCurrentUtilization, cmStatus, rescale) } -func shouldScale(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, timestamp time.Time) bool { +func shouldScale(hpa *autoscaling.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, timestamp time.Time) bool { if desiredReplicas != currentReplicas { // Going down only if the usageRatio dropped significantly below the target // and there was no rescaling in the last downscaleForbiddenWindow. @@ -363,15 +372,15 @@ func shouldScale(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desir return false } -func (a *HorizontalController) updateCurrentReplicasInStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas int) { +func (a *HorizontalController) updateCurrentReplicasInStatus(hpa *autoscaling.HorizontalPodAutoscaler, currentReplicas int32) { err := a.updateStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentCPUUtilizationPercentage, hpa.Annotations[HpaCustomMetricsStatusAnnotationName], false) if err != nil { glog.Errorf("%v", err) } } -func (a *HorizontalController) updateStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, cpuCurrentUtilization *int, cmStatus string, rescale bool) error { - hpa.Status = extensions.HorizontalPodAutoscalerStatus{ +func (a *HorizontalController) updateStatus(hpa *autoscaling.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, cpuCurrentUtilization *int32, cmStatus string, rescale bool) error { + hpa.Status = autoscaling.HorizontalPodAutoscalerStatus{ CurrentReplicas: currentReplicas, DesiredReplicas: desiredReplicas, CurrentCPUUtilizationPercentage: cpuCurrentUtilization, diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go new file mode 100644 index 000000000000..003a4a5d692d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go @@ -0,0 +1,873 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podautoscaler + +import ( + "encoding/json" + "fmt" + "io" + "math" + "sync" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + _ "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" + + heapster "k8s.io/heapster/metrics/api/v1/types" + metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1" + + "github.com/stretchr/testify/assert" +) + +func (w fakeResponseWrapper) DoRaw() ([]byte, error) { + return w.raw, nil +} + +func (w fakeResponseWrapper) Stream() (io.ReadCloser, error) { + return nil, nil +} + +func newFakeResponseWrapper(raw []byte) fakeResponseWrapper { + return fakeResponseWrapper{raw: raw} +} + +type fakeResponseWrapper struct { + raw []byte +} + +type fakeResource struct { + name string + apiVersion string + kind string +} + +type testCase struct { + sync.Mutex + minReplicas int32 + maxReplicas int32 + initialReplicas int32 + desiredReplicas int32 + + // CPU target utilization as a percentage of the requested resources. + CPUTarget int32 + CPUCurrent int32 + verifyCPUCurrent bool + reportedLevels []uint64 + reportedCPURequests []resource.Quantity + cmTarget *extensions.CustomMetricTargetList + scaleUpdated bool + statusUpdated bool + eventCreated bool + verifyEvents bool + useMetricsApi bool + // Channel with names of HPA objects which we have reconciled. + processed chan string + + // Target resource information. + resource *fakeResource +} + +// Needs to be called under a lock. +func (tc *testCase) computeCPUCurrent() { + if len(tc.reportedLevels) != len(tc.reportedCPURequests) || len(tc.reportedLevels) == 0 { + return + } + reported := 0 + for _, r := range tc.reportedLevels { + reported += int(r) + } + requested := 0 + for _, req := range tc.reportedCPURequests { + requested += int(req.MilliValue()) + } + tc.CPUCurrent = int32(100 * reported / requested) +} + +func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset { + namespace := "test-namespace" + hpaName := "test-hpa" + podNamePrefix := "test-pod" + selector := &unversioned.LabelSelector{ + MatchLabels: map[string]string{"name": podNamePrefix}, + } + + tc.Lock() + + tc.scaleUpdated = false + tc.statusUpdated = false + tc.eventCreated = false + tc.processed = make(chan string, 100) + tc.computeCPUCurrent() + + // TODO(madhusudancs): HPA only supports resources in extensions/v1beta1 right now. Add + // tests for "v1" replicationcontrollers when HPA adds support for cross-group scale. + if tc.resource == nil { + tc.resource = &fakeResource{ + name: "test-rc", + apiVersion: "extensions/v1beta1", + kind: "replicationcontrollers", + } + } + tc.Unlock() + + fakeClient := &fake.Clientset{} + fakeClient.AddReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) { + tc.Lock() + defer tc.Unlock() + + obj := &autoscaling.HorizontalPodAutoscalerList{ + Items: []autoscaling.HorizontalPodAutoscaler{ + { + ObjectMeta: api.ObjectMeta{ + Name: hpaName, + Namespace: namespace, + SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName, + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{ + Kind: tc.resource.kind, + Name: tc.resource.name, + APIVersion: tc.resource.apiVersion, + }, + MinReplicas: &tc.minReplicas, + MaxReplicas: tc.maxReplicas, + }, + Status: autoscaling.HorizontalPodAutoscalerStatus{ + CurrentReplicas: tc.initialReplicas, + DesiredReplicas: tc.initialReplicas, + }, + }, + }, + } + + if tc.CPUTarget > 0.0 { + obj.Items[0].Spec.TargetCPUUtilizationPercentage = &tc.CPUTarget + } + if tc.cmTarget != nil { + b, err := json.Marshal(tc.cmTarget) + if err != nil { + t.Fatalf("Failed to marshal cm: %v", err) + } + obj.Items[0].Annotations = make(map[string]string) + obj.Items[0].Annotations[HpaCustomMetricsTargetAnnotationName] = string(b) + } + return true, obj, nil + }) + + fakeClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) { + tc.Lock() + defer tc.Unlock() + + obj := &extensions.Scale{ + ObjectMeta: api.ObjectMeta{ + Name: tc.resource.name, + Namespace: namespace, + }, + Spec: extensions.ScaleSpec{ + Replicas: tc.initialReplicas, + }, + Status: extensions.ScaleStatus{ + Replicas: tc.initialReplicas, + Selector: selector, + }, + } + return true, obj, nil + }) + + fakeClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) { + tc.Lock() + defer tc.Unlock() + + obj := &extensions.Scale{ + ObjectMeta: api.ObjectMeta{ + Name: tc.resource.name, + Namespace: namespace, + }, + Spec: extensions.ScaleSpec{ + Replicas: tc.initialReplicas, + }, + Status: extensions.ScaleStatus{ + Replicas: tc.initialReplicas, + Selector: selector, + }, + } + return true, obj, nil + }) + + fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + tc.Lock() + defer tc.Unlock() + + obj := &extensions.Scale{ + ObjectMeta: api.ObjectMeta{ + Name: tc.resource.name, + Namespace: namespace, + }, + Spec: extensions.ScaleSpec{ + Replicas: tc.initialReplicas, + }, + Status: extensions.ScaleStatus{ + Replicas: tc.initialReplicas, + Selector: selector, + }, + } + return true, obj, nil + }) + + fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { + tc.Lock() + defer tc.Unlock() + + obj := &api.PodList{} + for i := 0; i < len(tc.reportedCPURequests); i++ { + podName := fmt.Sprintf("%s-%d", podNamePrefix, i) + pod := api.Pod{ + Status: api.PodStatus{ + Phase: api.PodRunning, + }, + ObjectMeta: api.ObjectMeta{ + Name: podName, + Namespace: namespace, + Labels: map[string]string{ + "name": podNamePrefix, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: tc.reportedCPURequests[i], + }, + }, + }, + }, + }, + } + obj.Items = append(obj.Items, pod) + } + return true, obj, nil + }) + + fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) { + tc.Lock() + defer tc.Unlock() + + var heapsterRawMemResponse []byte + + if tc.useMetricsApi { + metrics := []*metrics_api.PodMetrics{} + for i, cpu := range tc.reportedLevels { + podMetric := &metrics_api.PodMetrics{ + ObjectMeta: v1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", podNamePrefix, i), + Namespace: namespace, + }, + Timestamp: unversioned.Time{Time: time.Now()}, + Containers: []metrics_api.ContainerMetrics{ + { + Name: "container", + Usage: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity( + int64(cpu), + resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity( + int64(1024*1024), + resource.BinarySI), + }, + }, + }, + } + metrics = append(metrics, podMetric) + } + heapsterRawMemResponse, _ = json.Marshal(&metrics) + } else { + timestamp := time.Now() + metrics := heapster.MetricResultList{} + for _, level := range tc.reportedLevels { + metric := heapster.MetricResult{ + Metrics: []heapster.MetricPoint{{timestamp, level, nil}}, + LatestTimestamp: timestamp, + } + metrics.Items = append(metrics.Items, metric) + } + heapsterRawMemResponse, _ = json.Marshal(&metrics) + } + + return true, newFakeResponseWrapper(heapsterRawMemResponse), nil + }) + + fakeClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) { + tc.Lock() + defer tc.Unlock() + + obj := action.(core.UpdateAction).GetObject().(*extensions.Scale) + replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas + assert.Equal(t, tc.desiredReplicas, replicas) + tc.scaleUpdated = true + return true, obj, nil + }) + + fakeClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) { + tc.Lock() + defer tc.Unlock() + + obj := action.(core.UpdateAction).GetObject().(*extensions.Scale) + replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas + assert.Equal(t, tc.desiredReplicas, replicas) + tc.scaleUpdated = true + return true, obj, nil + }) + + fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + tc.Lock() + defer tc.Unlock() + + obj := action.(core.UpdateAction).GetObject().(*extensions.Scale) + replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas + assert.Equal(t, tc.desiredReplicas, replicas) + tc.scaleUpdated = true + return true, obj, nil + }) + + fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) { + tc.Lock() + defer tc.Unlock() + + obj := action.(core.UpdateAction).GetObject().(*autoscaling.HorizontalPodAutoscaler) + assert.Equal(t, namespace, obj.Namespace) + assert.Equal(t, hpaName, obj.Name) + assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas) + if tc.verifyCPUCurrent { + assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage) + assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage) + } + tc.statusUpdated = true + // Every time we reconcile HPA object we are updating status. + tc.processed <- obj.Name + return true, obj, nil + }) + + fakeClient.AddReactor("*", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) { + tc.Lock() + defer tc.Unlock() + + obj := action.(core.CreateAction).GetObject().(*api.Event) + if tc.verifyEvents { + assert.Equal(t, "SuccessfulRescale", obj.Reason) + assert.Equal(t, fmt.Sprintf("New size: %d; reason: CPU utilization above target", tc.desiredReplicas), obj.Message) + } + tc.eventCreated = true + return true, obj, nil + }) + + fakeWatch := watch.NewFake() + fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil)) + + return fakeClient +} + +func (tc *testCase) verifyResults(t *testing.T) { + tc.Lock() + defer tc.Unlock() + + assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.scaleUpdated) + assert.True(t, tc.statusUpdated) + if tc.verifyEvents { + assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.eventCreated) + } +} + +func (tc *testCase) runTest(t *testing.T) { + testClient := tc.prepareTestClient(t) + metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort) + + broadcaster := record.NewBroadcasterForTests(0) + broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: testClient.Core().Events("")}) + recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"}) + + hpaController := &HorizontalController{ + metricsClient: metricsClient, + eventRecorder: recorder, + scaleNamespacer: testClient.Extensions(), + hpaNamespacer: testClient.Autoscaling(), + } + + store, frameworkController := newInformer(hpaController, time.Minute) + hpaController.store = store + hpaController.controller = frameworkController + + stop := make(chan struct{}) + defer close(stop) + go hpaController.Run(stop) + + tc.Lock() + if tc.verifyEvents { + tc.Unlock() + // We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration). + time.Sleep(2 * time.Second) + } else { + tc.Unlock() + } + // Wait for HPA to be processed. + <-tc.processed + tc.verifyResults(t) +} + +func TestDefaultScaleUpRC(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + desiredReplicas: 5, + verifyCPUCurrent: true, + reportedLevels: []uint64{900, 950, 950, 1000}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestDefaultScaleUpDeployment(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + desiredReplicas: 5, + verifyCPUCurrent: true, + reportedLevels: []uint64{900, 950, 950, 1000}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsApi: true, + resource: &fakeResource{ + name: "test-dep", + apiVersion: "extensions/v1beta1", + kind: "deployments", + }, + } + tc.runTest(t) +} + +func TestDefaultScaleUpReplicaSet(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + desiredReplicas: 5, + verifyCPUCurrent: true, + reportedLevels: []uint64{900, 950, 950, 1000}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsApi: true, + resource: &fakeResource{ + name: "test-replicaset", + apiVersion: "extensions/v1beta1", + kind: "replicasets", + }, + } + tc.runTest(t) +} + +func TestScaleUp(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + desiredReplicas: 5, + CPUTarget: 30, + verifyCPUCurrent: true, + reportedLevels: []uint64{300, 500, 700}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestScaleUpDeployment(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + desiredReplicas: 5, + CPUTarget: 30, + verifyCPUCurrent: true, + reportedLevels: []uint64{300, 500, 700}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsApi: true, + resource: &fakeResource{ + name: "test-dep", + apiVersion: "extensions/v1beta1", + kind: "deployments", + }, + } + tc.runTest(t) +} + +func TestScaleUpReplicaSet(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + desiredReplicas: 5, + CPUTarget: 30, + verifyCPUCurrent: true, + reportedLevels: []uint64{300, 500, 700}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsApi: true, + resource: &fakeResource{ + name: "test-replicaset", + apiVersion: "extensions/v1beta1", + kind: "replicasets", + }, + } + tc.runTest(t) +} + +func TestScaleUpCM(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + desiredReplicas: 4, + CPUTarget: 0, + cmTarget: &extensions.CustomMetricTargetList{ + Items: []extensions.CustomMetricTarget{{ + Name: "qps", + TargetValue: resource.MustParse("15.0"), + }}, + }, + reportedLevels: []uint64{20, 10, 30}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + } + tc.runTest(t) +} + +func TestDefaultScaleDown(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 5, + desiredReplicas: 4, + verifyCPUCurrent: true, + reportedLevels: []uint64{400, 500, 600, 700, 800}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestScaleDown(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 5, + desiredReplicas: 3, + CPUTarget: 50, + verifyCPUCurrent: true, + reportedLevels: []uint64{100, 300, 500, 250, 250}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestScaleDownCM(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 5, + desiredReplicas: 3, + CPUTarget: 0, + cmTarget: &extensions.CustomMetricTargetList{ + Items: []extensions.CustomMetricTarget{{ + Name: "qps", + TargetValue: resource.MustParse("20"), + }}}, + reportedLevels: []uint64{12, 12, 12, 12, 12}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + } + tc.runTest(t) +} + +func TestTolerance(t *testing.T) { + tc := testCase{ + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 3, + desiredReplicas: 3, + CPUTarget: 100, + reportedLevels: []uint64{1010, 1030, 1020}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestToleranceCM(t *testing.T) { + tc := testCase{ + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 3, + desiredReplicas: 3, + cmTarget: &extensions.CustomMetricTargetList{ + Items: []extensions.CustomMetricTarget{{ + Name: "qps", + TargetValue: resource.MustParse("20"), + }}}, + reportedLevels: []uint64{20, 21, 21}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, + } + tc.runTest(t) +} + +func TestMinReplicas(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 5, + initialReplicas: 3, + desiredReplicas: 2, + CPUTarget: 90, + reportedLevels: []uint64{10, 95, 10}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestZeroReplicas(t *testing.T) { + tc := testCase{ + minReplicas: 3, + maxReplicas: 5, + initialReplicas: 0, + desiredReplicas: 3, + CPUTarget: 90, + reportedLevels: []uint64{}, + reportedCPURequests: []resource.Quantity{}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestTooFewReplicas(t *testing.T) { + tc := testCase{ + minReplicas: 3, + maxReplicas: 5, + initialReplicas: 2, + desiredReplicas: 3, + CPUTarget: 90, + reportedLevels: []uint64{}, + reportedCPURequests: []resource.Quantity{}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestTooManyReplicas(t *testing.T) { + tc := testCase{ + minReplicas: 3, + maxReplicas: 5, + initialReplicas: 10, + desiredReplicas: 5, + CPUTarget: 90, + reportedLevels: []uint64{}, + reportedCPURequests: []resource.Quantity{}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestMaxReplicas(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 5, + initialReplicas: 3, + desiredReplicas: 5, + CPUTarget: 90, + reportedLevels: []uint64{8000, 9500, 1000}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestSuperfluousMetrics(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + desiredReplicas: 4, + CPUTarget: 100, + reportedLevels: []uint64{4000, 9500, 3000, 7000, 3200, 2000}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestMissingMetrics(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + desiredReplicas: 4, + CPUTarget: 100, + reportedLevels: []uint64{400, 95}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestEmptyMetrics(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + desiredReplicas: 4, + CPUTarget: 100, + reportedLevels: []uint64{}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestEmptyCPURequest(t *testing.T) { + tc := testCase{ + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 1, + desiredReplicas: 1, + CPUTarget: 100, + reportedLevels: []uint64{200}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestEventCreated(t *testing.T) { + tc := testCase{ + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 1, + desiredReplicas: 2, + CPUTarget: 50, + reportedLevels: []uint64{200}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")}, + verifyEvents: true, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestEventNotCreated(t *testing.T) { + tc := testCase{ + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 2, + desiredReplicas: 2, + CPUTarget: 50, + reportedLevels: []uint64{200, 200}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.4"), resource.MustParse("0.4")}, + verifyEvents: true, + useMetricsApi: true, + } + tc.runTest(t) +} + +// TestComputedToleranceAlgImplementation is a regression test which +// back-calculates a minimal percentage for downscaling based on a small percentage +// increase in pod utilization which is calibrated against the tolerance value. +func TestComputedToleranceAlgImplementation(t *testing.T) { + + startPods := int32(10) + // 150 mCPU per pod. + totalUsedCPUOfAllPods := uint64(startPods * 150) + // Each pod starts out asking for 2X what is really needed. + // This means we will have a 50% ratio of used/requested + totalRequestedCPUOfAllPods := int32(2 * totalUsedCPUOfAllPods) + requestedToUsed := float64(totalRequestedCPUOfAllPods / int32(totalUsedCPUOfAllPods)) + // Spread the amount we ask over 10 pods. We can add some jitter later in reportedLevels. + perPodRequested := totalRequestedCPUOfAllPods / startPods + + // Force a minimal scaling event by satisfying (tolerance < 1 - resourcesUsedRatio). + target := math.Abs(1/(requestedToUsed*(1-tolerance))) + .01 + finalCpuPercentTarget := int32(target * 100) + resourcesUsedRatio := float64(totalUsedCPUOfAllPods) / float64(float64(totalRequestedCPUOfAllPods)*target) + + // i.e. .60 * 20 -> scaled down expectation. + finalPods := int32(math.Ceil(resourcesUsedRatio * float64(startPods))) + + // To breach tolerance we will create a utilization ratio difference of tolerance to usageRatioToleranceValue) + tc := testCase{ + minReplicas: 0, + maxReplicas: 1000, + initialReplicas: startPods, + desiredReplicas: finalPods, + CPUTarget: finalCpuPercentTarget, + reportedLevels: []uint64{ + totalUsedCPUOfAllPods / 10, + totalUsedCPUOfAllPods / 10, + totalUsedCPUOfAllPods / 10, + totalUsedCPUOfAllPods / 10, + totalUsedCPUOfAllPods / 10, + totalUsedCPUOfAllPods / 10, + totalUsedCPUOfAllPods / 10, + totalUsedCPUOfAllPods / 10, + totalUsedCPUOfAllPods / 10, + totalUsedCPUOfAllPods / 10, + }, + reportedCPURequests: []resource.Quantity{ + resource.MustParse(fmt.Sprint(perPodRequested+100) + "m"), + resource.MustParse(fmt.Sprint(perPodRequested-100) + "m"), + resource.MustParse(fmt.Sprint(perPodRequested+10) + "m"), + resource.MustParse(fmt.Sprint(perPodRequested-10) + "m"), + resource.MustParse(fmt.Sprint(perPodRequested+2) + "m"), + resource.MustParse(fmt.Sprint(perPodRequested-2) + "m"), + resource.MustParse(fmt.Sprint(perPodRequested+1) + "m"), + resource.MustParse(fmt.Sprint(perPodRequested-1) + "m"), + resource.MustParse(fmt.Sprint(perPodRequested) + "m"), + resource.MustParse(fmt.Sprint(perPodRequested) + "m"), + }, + useMetricsApi: true, + } + + tc.runTest(t) + + // Reuse the data structure above, now testing "unscaling". + // Now, we test that no scaling happens if we are in a very close margin to the tolerance + target = math.Abs(1/(requestedToUsed*(1-tolerance))) + .004 + finalCpuPercentTarget = int32(target * 100) + tc.CPUTarget = finalCpuPercentTarget + tc.initialReplicas = startPods + tc.desiredReplicas = startPods + tc.runTest(t) +} + +// TODO: add more tests diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go index 9e05767f3a3e..0e6f208ee625 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go @@ -24,10 +24,12 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/labels" - heapster "k8s.io/heapster/api/v1/types" + heapster "k8s.io/heapster/metrics/api/v1/types" + metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1" ) const ( @@ -83,8 +85,6 @@ var averageFunction = func(metrics heapster.MetricResultList) (intAndFloat, int, return result, count, timestamp } -var heapsterCpuUsageMetricDefinition = metricDefinition{"cpu-usage", averageFunction} - func getHeapsterCustomMetricDefinition(metricName string) metricDefinition { return metricDefinition{"custom/" + metricName, averageFunction} } @@ -118,7 +118,7 @@ func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace st if err != nil { return 0, 0, time.Time{}, fmt.Errorf("failed to get pod list: %v", err) } - podNames := []string{} + podNames := map[string]struct{}{} requestSum := int64(0) missing := false for _, pod := range podList.Items { @@ -127,10 +127,9 @@ func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace st continue } - podNames = append(podNames, pod.Name) + podNames[pod.Name] = struct{}{} for _, container := range pod.Spec.Containers { - containerRequest := container.Resources.Requests[api.ResourceCPU] - if containerRequest.Amount != nil { + if containerRequest, ok := container.Resources.Requests[api.ResourceCPU]; ok { requestSum += containerRequest.MilliValue() } else { missing = true @@ -146,11 +145,52 @@ func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace st glog.V(4).Infof("%s %s - sum of CPU requested: %d", namespace, selector, requestSum) requestAvg := requestSum / int64(len(podList.Items)) // Consumption is already averaged and in millis. - consumption, timestamp, err := h.getForPods(heapsterCpuUsageMetricDefinition, namespace, podNames) + consumption, timestamp, err := h.getCpuUtilizationForPods(namespace, selector, podNames) if err != nil { return 0, 0, time.Time{}, err } - return consumption.intValue, requestAvg, timestamp, nil + return consumption, requestAvg, timestamp, nil +} + +func (h *HeapsterMetricsClient) getCpuUtilizationForPods(namespace string, selector labels.Selector, podNames map[string]struct{}) (int64, time.Time, error) { + metricPath := fmt.Sprintf("/apis/metrics/v1alpha1/namespaces/%s/pods", namespace) + params := map[string]string{"labelSelector": selector.String()} + + resultRaw, err := h.client.Core().Services(h.heapsterNamespace). + ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, params). + DoRaw() + if err != nil { + return 0, time.Time{}, fmt.Errorf("failed to get pods metrics: %v", err) + } + + glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw)) + + metrics := make([]metrics_api.PodMetrics, 0) + err = json.Unmarshal(resultRaw, &metrics) + if err != nil { + return 0, time.Time{}, fmt.Errorf("failed to unmarshall heapster response: %v", err) + } + + if len(metrics) != len(podNames) { + return 0, time.Time{}, fmt.Errorf("metrics obtained for %d/%d of pods", len(metrics), len(podNames)) + } + + sum := int64(0) + for _, m := range metrics { + if _, found := podNames[m.Name]; found { + for _, c := range m.Containers { + cpu, found := c.Usage[v1.ResourceCPU] + if !found { + return 0, time.Time{}, fmt.Errorf("no cpu for container %v in pod %v/%v", c.Name, namespace, m.Name) + } + sum += cpu.MilliValue() + } + } else { + return 0, time.Time{}, fmt.Errorf("not expected metrics for pod %v/%v", namespace, m.Name) + } + } + + return sum / int64(len(metrics)), metrics[0].Timestamp.Time, nil } // GetCustomMetric returns the average value of the given custom metric from the @@ -175,14 +215,14 @@ func (h *HeapsterMetricsClient) GetCustomMetric(customMetricName string, namespa return nil, time.Time{}, fmt.Errorf("no running pods") } - value, timestamp, err := h.getForPods(metricSpec, namespace, podNames) + value, timestamp, err := h.getCustomMetricForPods(metricSpec, namespace, podNames) if err != nil { return nil, time.Time{}, err } return &value.floatValue, timestamp, nil } -func (h *HeapsterMetricsClient) getForPods(metricSpec metricDefinition, namespace string, podNames []string) (*intAndFloat, time.Time, error) { +func (h *HeapsterMetricsClient) getCustomMetricForPods(metricSpec metricDefinition, namespace string, podNames []string) (*intAndFloat, time.Time, error) { now := time.Now() diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client_test.go new file mode 100644 index 000000000000..1467b2092385 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client_test.go @@ -0,0 +1,474 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "encoding/json" + "fmt" + "io" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + _ "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + + heapster "k8s.io/heapster/metrics/api/v1/types" + metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1" + + "github.com/stretchr/testify/assert" +) + +var fixedTimestamp = time.Date(2015, time.November, 10, 12, 30, 0, 0, time.UTC) + +func (w fakeResponseWrapper) DoRaw() ([]byte, error) { + return w.raw, nil +} + +func (w fakeResponseWrapper) Stream() (io.ReadCloser, error) { + return nil, nil +} + +func newFakeResponseWrapper(raw []byte) fakeResponseWrapper { + return fakeResponseWrapper{raw: raw} +} + +type fakeResponseWrapper struct { + raw []byte +} + +// timestamp is used for establishing order on metricPoints +type metricPoint struct { + level uint64 + timestamp int +} + +type testCase struct { + replicas int + desiredValue float64 + desiredError error + targetResource string + targetTimestamp int + reportedMetricsPoints [][]metricPoint + reportedPodMetrics [][]int64 + namespace string + podListOverride *api.PodList + selector labels.Selector + useMetricsApi bool +} + +func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset { + namespace := "test-namespace" + tc.namespace = namespace + podNamePrefix := "test-pod" + podLabels := map[string]string{"name": podNamePrefix} + tc.selector = labels.SelectorFromSet(podLabels) + + fakeClient := &fake.Clientset{} + + fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { + if tc.podListOverride != nil { + return true, tc.podListOverride, nil + } + obj := &api.PodList{} + for i := 0; i < tc.replicas; i++ { + podName := fmt.Sprintf("%s-%d", podNamePrefix, i) + pod := buildPod(namespace, podName, podLabels, api.PodRunning) + obj.Items = append(obj.Items, pod) + } + return true, obj, nil + }) + + if tc.useMetricsApi { + fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) { + metrics := []*metrics_api.PodMetrics{} + for i, containers := range tc.reportedPodMetrics { + metric := &metrics_api.PodMetrics{ + ObjectMeta: v1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", podNamePrefix, i), + Namespace: namespace, + }, + Timestamp: unversioned.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)}, + Containers: []metrics_api.ContainerMetrics{}, + } + for j, cpu := range containers { + cm := metrics_api.ContainerMetrics{ + Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j), + Usage: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity( + cpu, + resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity( + int64(1024*1024), + resource.BinarySI), + }, + } + metric.Containers = append(metric.Containers, cm) + } + metrics = append(metrics, metric) + } + heapsterRawMemResponse, _ := json.Marshal(&metrics) + return true, newFakeResponseWrapper(heapsterRawMemResponse), nil + }) + } else { + fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) { + metrics := heapster.MetricResultList{} + var latestTimestamp time.Time + for _, reportedMetricPoints := range tc.reportedMetricsPoints { + var heapsterMetricPoints []heapster.MetricPoint + for _, reportedMetricPoint := range reportedMetricPoints { + timestamp := fixedTimestamp.Add(time.Duration(reportedMetricPoint.timestamp) * time.Minute) + if latestTimestamp.Before(timestamp) { + latestTimestamp = timestamp + } + heapsterMetricPoint := heapster.MetricPoint{Timestamp: timestamp, Value: reportedMetricPoint.level, FloatValue: nil} + heapsterMetricPoints = append(heapsterMetricPoints, heapsterMetricPoint) + } + metric := heapster.MetricResult{ + Metrics: heapsterMetricPoints, + LatestTimestamp: latestTimestamp, + } + metrics.Items = append(metrics.Items, metric) + } + heapsterRawMemResponse, _ := json.Marshal(&metrics) + return true, newFakeResponseWrapper(heapsterRawMemResponse), nil + }) + } + + return fakeClient +} + +func buildPod(namespace, podName string, podLabels map[string]string, phase api.PodPhase) api.Pod { + return api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: podName, + Namespace: namespace, + Labels: podLabels, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("10"), + }, + }, + }, + }, + }, + Status: api.PodStatus{ + Phase: phase, + }, + } +} + +func (tc *testCase) verifyResults(t *testing.T, val *float64, timestamp time.Time, err error) { + assert.Equal(t, tc.desiredError, err) + if tc.desiredError != nil { + return + } + assert.NotNil(t, val) + assert.True(t, tc.desiredValue-0.001 < *val) + assert.True(t, tc.desiredValue+0.001 > *val) + + targetTimestamp := fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute) + assert.True(t, targetTimestamp.Equal(timestamp)) +} + +func (tc *testCase) runTest(t *testing.T) { + testClient := tc.prepareTestClient(t) + metricsClient := NewHeapsterMetricsClient(testClient, DefaultHeapsterNamespace, DefaultHeapsterScheme, DefaultHeapsterService, DefaultHeapsterPort) + if tc.targetResource == "cpu-usage" { + val, _, timestamp, err := metricsClient.GetCpuConsumptionAndRequestInMillis(tc.namespace, tc.selector) + fval := float64(val) + tc.verifyResults(t, &fval, timestamp, err) + } else { + val, timestamp, err := metricsClient.GetCustomMetric(tc.targetResource, tc.namespace, tc.selector) + tc.verifyResults(t, val, timestamp, err) + } +} + +func TestCPU(t *testing.T) { + tc := testCase{ + replicas: 3, + desiredValue: 5000, + targetResource: "cpu-usage", + targetTimestamp: 1, + reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestCPUPending(t *testing.T) { + tc := testCase{ + replicas: 4, + desiredValue: 5000, + targetResource: "cpu-usage", + targetTimestamp: 1, + reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}}, + useMetricsApi: true, + podListOverride: &api.PodList{}, + } + + namespace := "test-namespace" + podNamePrefix := "test-pod" + podLabels := map[string]string{"name": podNamePrefix} + for i := 0; i < tc.replicas; i++ { + podName := fmt.Sprintf("%s-%d", podNamePrefix, i) + pod := buildPod(namespace, podName, podLabels, api.PodRunning) + tc.podListOverride.Items = append(tc.podListOverride.Items, pod) + } + tc.podListOverride.Items[3].Status.Phase = api.PodPending + + tc.runTest(t) +} + +func TestCPUAllPending(t *testing.T) { + tc := testCase{ + replicas: 4, + targetResource: "cpu-usage", + targetTimestamp: 1, + reportedPodMetrics: [][]int64{}, + useMetricsApi: true, + podListOverride: &api.PodList{}, + desiredError: fmt.Errorf("no running pods"), + } + + namespace := "test-namespace" + podNamePrefix := "test-pod" + podLabels := map[string]string{"name": podNamePrefix} + for i := 0; i < tc.replicas; i++ { + podName := fmt.Sprintf("%s-%d", podNamePrefix, i) + pod := buildPod(namespace, podName, podLabels, api.PodPending) + tc.podListOverride.Items = append(tc.podListOverride.Items, pod) + } + tc.runTest(t) +} + +func TestQPS(t *testing.T) { + tc := testCase{ + replicas: 3, + desiredValue: 13.33333, + targetResource: "qps", + targetTimestamp: 1, + reportedMetricsPoints: [][]metricPoint{{{10, 1}}, {{20, 1}}, {{10, 1}}}, + } + tc.runTest(t) +} + +func TestQPSPending(t *testing.T) { + tc := testCase{ + replicas: 4, + desiredValue: 13.33333, + targetResource: "qps", + targetTimestamp: 1, + reportedMetricsPoints: [][]metricPoint{{{10, 1}}, {{20, 1}}, {{10, 1}}}, + podListOverride: &api.PodList{}, + } + + namespace := "test-namespace" + podNamePrefix := "test-pod" + podLabels := map[string]string{"name": podNamePrefix} + for i := 0; i < tc.replicas; i++ { + podName := fmt.Sprintf("%s-%d", podNamePrefix, i) + pod := buildPod(namespace, podName, podLabels, api.PodRunning) + tc.podListOverride.Items = append(tc.podListOverride.Items, pod) + } + tc.podListOverride.Items[0].Status.Phase = api.PodPending + tc.runTest(t) +} + +func TestQPSAllPending(t *testing.T) { + tc := testCase{ + replicas: 4, + desiredError: fmt.Errorf("no running pods"), + targetResource: "qps", + targetTimestamp: 1, + reportedMetricsPoints: [][]metricPoint{}, + podListOverride: &api.PodList{}, + } + + namespace := "test-namespace" + podNamePrefix := "test-pod" + podLabels := map[string]string{"name": podNamePrefix} + for i := 0; i < tc.replicas; i++ { + podName := fmt.Sprintf("%s-%d", podNamePrefix, i) + pod := buildPod(namespace, podName, podLabels, api.PodPending) + tc.podListOverride.Items = append(tc.podListOverride.Items, pod) + } + tc.podListOverride.Items[0].Status.Phase = api.PodPending + tc.runTest(t) +} + +func TestCPUSumEqualZero(t *testing.T) { + tc := testCase{ + replicas: 3, + desiredValue: 0, + targetResource: "cpu-usage", + targetTimestamp: 0, + reportedPodMetrics: [][]int64{{0}, {0}, {0}}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestQpsSumEqualZero(t *testing.T) { + tc := testCase{ + replicas: 3, + desiredValue: 0, + targetResource: "qps", + targetTimestamp: 0, + reportedMetricsPoints: [][]metricPoint{{{0, 0}}, {{0, 0}}, {{0, 0}}}, + } + tc.runTest(t) +} + +func TestCPUMoreMetrics(t *testing.T) { + tc := testCase{ + replicas: 5, + desiredValue: 5000, + targetResource: "cpu-usage", + targetTimestamp: 10, + reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestCPUMissingMetrics(t *testing.T) { + tc := testCase{ + replicas: 3, + targetResource: "cpu-usage", + desiredError: fmt.Errorf("metrics obtained for 1/3 of pods"), + reportedPodMetrics: [][]int64{{4000}}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestQpsMissingMetrics(t *testing.T) { + tc := testCase{ + replicas: 3, + targetResource: "qps", + desiredError: fmt.Errorf("metrics obtained for 1/3 of pods"), + reportedMetricsPoints: [][]metricPoint{{{4000, 4}}}, + } + tc.runTest(t) +} + +func TestCPUSuperfluousMetrics(t *testing.T) { + tc := testCase{ + replicas: 3, + targetResource: "cpu-usage", + desiredError: fmt.Errorf("metrics obtained for 6/3 of pods"), + reportedPodMetrics: [][]int64{{1000}, {2000}, {4000}, {4000}, {2000}, {4000}}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestQpsSuperfluousMetrics(t *testing.T) { + tc := testCase{ + replicas: 3, + targetResource: "qps", + desiredError: fmt.Errorf("metrics obtained for 6/3 of pods"), + reportedMetricsPoints: [][]metricPoint{{{1000, 1}}, {{2000, 4}}, {{2000, 1}}, {{4000, 5}}, {{2000, 1}}, {{4000, 4}}}, + } + tc.runTest(t) +} + +func TestCPUEmptyMetrics(t *testing.T) { + tc := testCase{ + replicas: 3, + targetResource: "cpu-usage", + desiredError: fmt.Errorf("metrics obtained for 0/3 of pods"), + reportedMetricsPoints: [][]metricPoint{}, + reportedPodMetrics: [][]int64{}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestCPUZeroReplicas(t *testing.T) { + tc := testCase{ + replicas: 0, + targetResource: "cpu-usage", + desiredError: fmt.Errorf("some pods do not have request for cpu"), + reportedPodMetrics: [][]int64{}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestCPUEmptyMetricsForOnePod(t *testing.T) { + tc := testCase{ + replicas: 3, + targetResource: "cpu-usage", + desiredError: fmt.Errorf("metrics obtained for 2/3 of pods"), + reportedPodMetrics: [][]int64{{100}, {300, 400}}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestAggregateSum(t *testing.T) { + //calculateSumFromTimeSample(metrics heapster.MetricResultList, duration time.Duration) (sum intAndFloat, count int, timestamp time.Time) { + now := time.Now() + result := heapster.MetricResultList{ + Items: []heapster.MetricResult{ + { + Metrics: []heapster.MetricPoint{ + {now, 50, nil}, + {now.Add(-15 * time.Second), 100, nil}, + {now.Add(-60 * time.Second), 100000, nil}}, + LatestTimestamp: now, + }, + }, + } + sum, cnt, _ := calculateSumFromTimeSample(result, time.Minute) + assert.Equal(t, int64(75), sum.intValue) + assert.InEpsilon(t, 75.0, sum.floatValue, 0.1) + assert.Equal(t, 1, cnt) +} + +func TestAggregateSumSingle(t *testing.T) { + now := time.Now() + result := heapster.MetricResultList{ + Items: []heapster.MetricResult{ + { + Metrics: []heapster.MetricPoint{ + {now, 50, nil}, + {now.Add(-65 * time.Second), 100000, nil}}, + LatestTimestamp: now, + }, + }, + } + sum, cnt, _ := calculateSumFromTimeSample(result, time.Minute) + assert.Equal(t, int64(50), sum.intValue) + assert.InEpsilon(t, 50.0, sum.floatValue, 0.1) + assert.Equal(t, 1, cnt) +} + +// TODO: add proper tests for request diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go index 2012d7be7fec..e26ce810c8cc 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go @@ -31,12 +31,13 @@ import ( "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/client/record" - unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/metrics" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/workqueue" @@ -98,7 +99,11 @@ type ReplicaSetController struct { func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicaSetController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")}) + eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) + + if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) + } rsc := &ReplicaSetController{ kubeClient: kubeClient, @@ -168,7 +173,7 @@ func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod contro }, ) - rsc.podStore.Store, rsc.podController = framework.NewInformer( + rsc.podStore.Indexer, rsc.podController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return rsc.kubeClient.Core().Pods(api.NamespaceAll).List(options) @@ -187,6 +192,7 @@ func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod contro UpdateFunc: rsc.updatePod, DeleteFunc: rsc.deletePod, }, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) rsc.syncHandler = rsc.syncReplicaSet @@ -416,7 +422,7 @@ func (rsc *ReplicaSetController) worker() { // manageReplicas checks and updates replicas for the given ReplicaSet. func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *extensions.ReplicaSet) { - diff := len(filteredPods) - rs.Spec.Replicas + diff := len(filteredPods) - int(rs.Spec.Replicas) rsKey, err := controller.KeyFunc(rs) if err != nil { glog.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go new file mode 100644 index 000000000000..d8324ebb0a87 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go @@ -0,0 +1,1036 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// If you make changes to this file, you should also make the corresponding change in ReplicationController. + +package replicaset + +import ( + "fmt" + "math/rand" + "net/http/httptest" + "strings" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/securitycontext" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" + utiltesting "k8s.io/kubernetes/pkg/util/testing" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" +) + +var alwaysReady = func() bool { return true } + +func getKey(rs *extensions.ReplicaSet, t *testing.T) string { + if key, err := controller.KeyFunc(rs); err != nil { + t.Errorf("Unexpected error getting key for ReplicaSet %v: %v", rs.Name, err) + return "" + } else { + return key + } +} + +func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.ReplicaSet { + rs := &extensions.ReplicaSet{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()}, + ObjectMeta: api.ObjectMeta{ + UID: util.NewUUID(), + Name: "foobar", + Namespace: api.NamespaceDefault, + ResourceVersion: "18", + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: int32(replicas), + Selector: &unversioned.LabelSelector{MatchLabels: selectorMap}, + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "name": "foo", + "type": "production", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo/bar", + TerminationMessagePath: api.TerminationMessagePathDefault, + ImagePullPolicy: api.PullIfNotPresent, + SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), + }, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSDefault, + NodeSelector: map[string]string{ + "baz": "blah", + }, + }, + }, + }, + } + return rs +} + +// create count pods with the given phase for the given ReplicaSet (same selectors and namespace), and add them to the store. +func newPodList(store cache.Store, count int, status api.PodPhase, labelMap map[string]string, rs *extensions.ReplicaSet, name string) *api.PodList { + pods := []api.Pod{} + for i := 0; i < count; i++ { + newPod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("%s%d", name, i), + Labels: labelMap, + Namespace: rs.Namespace, + }, + Status: api.PodStatus{Phase: status}, + } + if store != nil { + store.Add(&newPod) + } + pods = append(pods, newPod) + } + return &api.PodList{ + Items: pods, + } +} + +func validateSyncReplicaSet(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes int) { + if len(fakePodControl.Templates) != expectedCreates { + t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates)) + } + if len(fakePodControl.DeletePodName) != expectedDeletes { + t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", expectedDeletes, len(fakePodControl.DeletePodName)) + } +} + +func replicaSetResourceName() string { + return "replicasets" +} + +type serverResponse struct { + statusCode int + obj interface{} +} + +func TestSyncReplicaSetDoesNothing(t *testing.T) { + client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + fakePodControl := controller.FakePodControl{} + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + // 2 running pods, a controller with 2 replicas, sync is a no-op + labelMap := map[string]string{"foo": "bar"} + rsSpec := newReplicaSet(2, labelMap) + manager.rsStore.Store.Add(rsSpec) + newPodList(manager.podStore.Indexer, 2, api.PodRunning, labelMap, rsSpec, "pod") + + manager.podControl = &fakePodControl + manager.syncReplicaSet(getKey(rsSpec, t)) + validateSyncReplicaSet(t, &fakePodControl, 0, 0) +} + +func TestSyncReplicaSetDeletes(t *testing.T) { + client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + fakePodControl := controller.FakePodControl{} + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + manager.podControl = &fakePodControl + + // 2 running pods and a controller with 1 replica, one pod delete expected + labelMap := map[string]string{"foo": "bar"} + rsSpec := newReplicaSet(1, labelMap) + manager.rsStore.Store.Add(rsSpec) + newPodList(manager.podStore.Indexer, 2, api.PodRunning, labelMap, rsSpec, "pod") + + manager.syncReplicaSet(getKey(rsSpec, t)) + validateSyncReplicaSet(t, &fakePodControl, 0, 1) +} + +func TestDeleteFinalStateUnknown(t *testing.T) { + client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + fakePodControl := controller.FakePodControl{} + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + manager.podControl = &fakePodControl + + received := make(chan string) + manager.syncHandler = func(key string) error { + received <- key + return nil + } + + // The DeletedFinalStateUnknown object should cause the ReplicaSet manager to insert + // the controller matching the selectors of the deleted pod into the work queue. + labelMap := map[string]string{"foo": "bar"} + rsSpec := newReplicaSet(1, labelMap) + manager.rsStore.Store.Add(rsSpec) + pods := newPodList(nil, 1, api.PodRunning, labelMap, rsSpec, "pod") + manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]}) + + go manager.worker() + + expected := getKey(rsSpec, t) + select { + case key := <-received: + if key != expected { + t.Errorf("Unexpected sync all for ReplicaSet %v, expected %v", key, expected) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Processing DeleteFinalStateUnknown took longer than expected") + } +} + +func TestSyncReplicaSetCreates(t *testing.T) { + client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + // A controller with 2 replicas and no pods in the store, 2 creates expected + labelMap := map[string]string{"foo": "bar"} + rs := newReplicaSet(2, labelMap) + manager.rsStore.Store.Add(rs) + + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + manager.syncReplicaSet(getKey(rs, t)) + validateSyncReplicaSet(t, &fakePodControl, 2, 0) +} + +func TestStatusUpdatesWithoutReplicasChange(t *testing.T) { + // Setup a fake server to listen for requests, and run the ReplicaSet controller in steady state + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: "{}", + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + // Steady state for the ReplicaSet, no Status.Replicas updates expected + activePods := 5 + labelMap := map[string]string{"foo": "bar"} + rs := newReplicaSet(activePods, labelMap) + manager.rsStore.Store.Add(rs) + rs.Status = extensions.ReplicaSetStatus{Replicas: int32(activePods)} + newPodList(manager.podStore.Indexer, activePods, api.PodRunning, labelMap, rs, "pod") + + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + manager.syncReplicaSet(getKey(rs, t)) + + validateSyncReplicaSet(t, &fakePodControl, 0, 0) + if fakeHandler.RequestReceived != nil { + t.Errorf("Unexpected update when pods and ReplicaSets are in a steady state") + } + + // This response body is just so we don't err out decoding the http response, all + // we care about is the request body sent below. + response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{}) + fakeHandler.ResponseBody = response + + rs.Generation = rs.Generation + 1 + manager.syncReplicaSet(getKey(rs, t)) + + rs.Status.ObservedGeneration = rs.Generation + updatedRc := runtime.EncodeOrDie(testapi.Extensions.Codec(), rs) + fakeHandler.ValidateRequest(t, testapi.Extensions.ResourcePath(replicaSetResourceName(), rs.Namespace, rs.Name)+"/status", "PUT", &updatedRc) +} + +func TestControllerUpdateReplicas(t *testing.T) { + // This is a happy server just to record the PUT request we expect for status.Replicas + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: "{}", + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + // Insufficient number of pods in the system, and Status.Replicas is wrong; + // Status.Replica should update to match number of pods in system, 1 new pod should be created. + labelMap := map[string]string{"foo": "bar"} + extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"} + rs := newReplicaSet(5, labelMap) + rs.Spec.Template.Labels = extraLabelMap + manager.rsStore.Store.Add(rs) + rs.Status = extensions.ReplicaSetStatus{Replicas: 2, FullyLabeledReplicas: 6, ObservedGeneration: 0} + rs.Generation = 1 + newPodList(manager.podStore.Indexer, 2, api.PodRunning, labelMap, rs, "pod") + newPodList(manager.podStore.Indexer, 2, api.PodRunning, extraLabelMap, rs, "podWithExtraLabel") + + // This response body is just so we don't err out decoding the http response + response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{}) + fakeHandler.ResponseBody = response + + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + + manager.syncReplicaSet(getKey(rs, t)) + + // 1. Status.Replicas should go up from 2->4 even though we created 5-4=1 pod. + // 2. Status.FullyLabeledReplicas should equal to the number of pods that + // has the extra labels, i.e., 2. + // 3. Every update to the status should include the Generation of the spec. + rs.Status = extensions.ReplicaSetStatus{Replicas: 4, FullyLabeledReplicas: 2, ObservedGeneration: 1} + + decRc := runtime.EncodeOrDie(testapi.Extensions.Codec(), rs) + fakeHandler.ValidateRequest(t, testapi.Extensions.ResourcePath(replicaSetResourceName(), rs.Namespace, rs.Name)+"/status", "PUT", &decRc) + validateSyncReplicaSet(t, &fakePodControl, 1, 0) +} + +func TestSyncReplicaSetDormancy(t *testing.T) { + // Setup a test server so we can lie about the current state of pods + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: "{}", + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + + fakePodControl := controller.FakePodControl{} + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + manager.podControl = &fakePodControl + + labelMap := map[string]string{"foo": "bar"} + rsSpec := newReplicaSet(2, labelMap) + manager.rsStore.Store.Add(rsSpec) + newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap, rsSpec, "pod") + + // Creates a replica and sets expectations + rsSpec.Status.Replicas = 1 + manager.syncReplicaSet(getKey(rsSpec, t)) + validateSyncReplicaSet(t, &fakePodControl, 1, 0) + + // Expectations prevents replicas but not an update on status + rsSpec.Status.Replicas = 0 + fakePodControl.Clear() + manager.syncReplicaSet(getKey(rsSpec, t)) + validateSyncReplicaSet(t, &fakePodControl, 0, 0) + + // Get the key for the controller + rsKey, err := controller.KeyFunc(rsSpec) + if err != nil { + t.Errorf("Couldn't get key for object %+v: %v", rsSpec, err) + } + + // Lowering expectations should lead to a sync that creates a replica, however the + // fakePodControl error will prevent this, leaving expectations at 0, 0 + manager.expectations.CreationObserved(rsKey) + rsSpec.Status.Replicas = 1 + fakePodControl.Clear() + fakePodControl.Err = fmt.Errorf("Fake Error") + + manager.syncReplicaSet(getKey(rsSpec, t)) + validateSyncReplicaSet(t, &fakePodControl, 0, 0) + + // This replica should not need a Lowering of expectations, since the previous create failed + fakePodControl.Err = nil + manager.syncReplicaSet(getKey(rsSpec, t)) + validateSyncReplicaSet(t, &fakePodControl, 1, 0) + + // 1 PUT for the ReplicaSet status during dormancy window. + // Note that the pod creates go through pod control so they're not recorded. + fakeHandler.ValidateRequestCount(t, 1) +} + +func TestPodControllerLookup(t *testing.T) { + manager := NewReplicaSetController(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + testCases := []struct { + inRSs []*extensions.ReplicaSet + pod *api.Pod + outRSName string + }{ + // pods without labels don't match any ReplicaSets + { + inRSs: []*extensions.ReplicaSet{ + {ObjectMeta: api.ObjectMeta{Name: "basic"}}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}}, + outRSName: "", + }, + // Matching labels, not namespace + { + inRSs: []*extensions.ReplicaSet{ + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + }, + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, + outRSName: "", + }, + // Matching ns and labels returns the key to the ReplicaSet, not the ReplicaSet name + { + inRSs: []*extensions.ReplicaSet{ + { + ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + }, + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, + outRSName: "bar", + }, + } + for _, c := range testCases { + for _, r := range c.inRSs { + manager.rsStore.Add(r) + } + if rs := manager.getPodReplicaSet(c.pod); rs != nil { + if c.outRSName != rs.Name { + t.Errorf("Got replica set %+v expected %+v", rs.Name, c.outRSName) + } + } else if c.outRSName != "" { + t.Errorf("Expected a replica set %v pod %v, found none", c.outRSName, c.pod.Name) + } + } +} + +type FakeWatcher struct { + w *watch.FakeWatcher + *fake.Clientset +} + +func TestWatchControllers(t *testing.T) { + fakeWatch := watch.NewFake() + client := &fake.Clientset{} + client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil)) + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + var testRSSpec extensions.ReplicaSet + received := make(chan string) + + // The update sent through the fakeWatcher should make its way into the workqueue, + // and eventually into the syncHandler. The handler validates the received controller + // and closes the received channel to indicate that the test can finish. + manager.syncHandler = func(key string) error { + + obj, exists, err := manager.rsStore.Store.GetByKey(key) + if !exists || err != nil { + t.Errorf("Expected to find replica set under key %v", key) + } + rsSpec := *obj.(*extensions.ReplicaSet) + if !api.Semantic.DeepDerivative(rsSpec, testRSSpec) { + t.Errorf("Expected %#v, but got %#v", testRSSpec, rsSpec) + } + close(received) + return nil + } + // Start only the ReplicaSet watcher and the workqueue, send a watch event, + // and make sure it hits the sync method. + stopCh := make(chan struct{}) + defer close(stopCh) + go manager.rsController.Run(stopCh) + go wait.Until(manager.worker, 10*time.Millisecond, stopCh) + + testRSSpec.Name = "foo" + fakeWatch.Add(&testRSSpec) + + select { + case <-received: + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Expected 1 call but got 0") + } +} + +func TestWatchPods(t *testing.T) { + fakeWatch := watch.NewFake() + client := &fake.Clientset{} + client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil)) + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + // Put one ReplicaSet and one pod into the controller's stores + labelMap := map[string]string{"foo": "bar"} + testRSSpec := newReplicaSet(1, labelMap) + manager.rsStore.Store.Add(testRSSpec) + received := make(chan string) + // The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and + // send it into the syncHandler. + manager.syncHandler = func(key string) error { + + obj, exists, err := manager.rsStore.Store.GetByKey(key) + if !exists || err != nil { + t.Errorf("Expected to find replica set under key %v", key) + } + rsSpec := obj.(*extensions.ReplicaSet) + if !api.Semantic.DeepDerivative(rsSpec, testRSSpec) { + t.Errorf("\nExpected %#v,\nbut got %#v", testRSSpec, rsSpec) + } + close(received) + return nil + } + // Start only the pod watcher and the workqueue, send a watch event, + // and make sure it hits the sync method for the right ReplicaSet. + stopCh := make(chan struct{}) + defer close(stopCh) + go manager.podController.Run(stopCh) + go wait.Until(manager.worker, 10*time.Millisecond, stopCh) + + pods := newPodList(nil, 1, api.PodRunning, labelMap, testRSSpec, "pod") + testPod := pods.Items[0] + testPod.Status.Phase = api.PodFailed + fakeWatch.Add(&testPod) + + select { + case <-received: + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Expected 1 call but got 0") + } +} + +func TestUpdatePods(t *testing.T) { + manager := NewReplicaSetController(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + received := make(chan string) + + manager.syncHandler = func(key string) error { + obj, exists, err := manager.rsStore.Store.GetByKey(key) + if !exists || err != nil { + t.Errorf("Expected to find replica set under key %v", key) + } + received <- obj.(*extensions.ReplicaSet).Name + return nil + } + + stopCh := make(chan struct{}) + defer close(stopCh) + go wait.Until(manager.worker, 10*time.Millisecond, stopCh) + + // Put 2 ReplicaSets and one pod into the controller's stores + labelMap1 := map[string]string{"foo": "bar"} + testRSSpec1 := newReplicaSet(1, labelMap1) + manager.rsStore.Store.Add(testRSSpec1) + testRSSpec2 := *testRSSpec1 + labelMap2 := map[string]string{"bar": "foo"} + testRSSpec2.Spec.Selector = &unversioned.LabelSelector{MatchLabels: labelMap2} + testRSSpec2.Name = "barfoo" + manager.rsStore.Store.Add(&testRSSpec2) + + // Put one pod in the podStore + pod1 := newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap1, testRSSpec1, "pod").Items[0] + pod2 := pod1 + pod2.Labels = labelMap2 + + // Send an update of the same pod with modified labels, and confirm we get a sync request for + // both controllers + manager.updatePod(&pod1, &pod2) + + expected := sets.NewString(testRSSpec1.Name, testRSSpec2.Name) + for _, name := range expected.List() { + t.Logf("Expecting update for %+v", name) + select { + case got := <-received: + if !expected.Has(got) { + t.Errorf("Expected keys %#v got %v", expected, got) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Expected update notifications for replica sets within 100ms each") + } + } +} + +func TestControllerUpdateRequeue(t *testing.T) { + // This server should force a requeue of the controller because it fails to update status.Replicas. + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 500, + ResponseBody: "{}", + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + + client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + labelMap := map[string]string{"foo": "bar"} + rs := newReplicaSet(1, labelMap) + manager.rsStore.Store.Add(rs) + rs.Status = extensions.ReplicaSetStatus{Replicas: 2} + newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap, rs, "pod") + + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + + manager.syncReplicaSet(getKey(rs, t)) + + ch := make(chan interface{}) + go func() { + item, _ := manager.queue.Get() + ch <- item + }() + select { + case key := <-ch: + expectedKey := getKey(rs, t) + if key != expectedKey { + t.Errorf("Expected requeue of replica set with key %s got %s", expectedKey, key) + } + case <-time.After(wait.ForeverTestTimeout): + manager.queue.ShutDown() + t.Errorf("Expected to find a ReplicaSet in the queue, found none.") + } + // 1 Update and 1 GET, both of which fail + fakeHandler.ValidateRequestCount(t, 2) +} + +func TestControllerUpdateStatusWithFailure(t *testing.T) { + rs := newReplicaSet(1, map[string]string{"foo": "bar"}) + fakeClient := &fake.Clientset{} + fakeClient.AddReactor("get", "replicasets", func(action core.Action) (bool, runtime.Object, error) { return true, rs, nil }) + fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { + return true, &extensions.ReplicaSet{}, fmt.Errorf("Fake error") + }) + fakeRSClient := fakeClient.Extensions().ReplicaSets("default") + numReplicas := 10 + updateReplicaCount(fakeRSClient, *rs, numReplicas, 0) + updates, gets := 0, 0 + for _, a := range fakeClient.Actions() { + if a.GetResource().Resource != "replicasets" { + t.Errorf("Unexpected action %+v", a) + continue + } + + switch action := a.(type) { + case core.GetAction: + gets++ + // Make sure the get is for the right ReplicaSet even though the update failed. + if action.GetName() != rs.Name { + t.Errorf("Expected get for ReplicaSet %v, got %+v instead", rs.Name, action.GetName()) + } + case core.UpdateAction: + updates++ + // Confirm that the update has the right status.Replicas even though the Get + // returned a ReplicaSet with replicas=1. + if c, ok := action.GetObject().(*extensions.ReplicaSet); !ok { + t.Errorf("Expected a ReplicaSet as the argument to update, got %T", c) + } else if int(c.Status.Replicas) != numReplicas { + t.Errorf("Expected update for ReplicaSet to contain replicas %v, got %v instead", + numReplicas, c.Status.Replicas) + } + default: + t.Errorf("Unexpected action %+v", a) + break + } + } + if gets != 1 || updates != 2 { + t.Errorf("Expected 1 get and 2 updates, got %d gets %d updates", gets, updates) + } +} + +// TODO: This test is too hairy for a unittest. It should be moved to an E2E suite. +func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) { + client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + fakePodControl := controller.FakePodControl{} + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, burstReplicas, 0) + manager.podStoreSynced = alwaysReady + manager.podControl = &fakePodControl + + labelMap := map[string]string{"foo": "bar"} + rsSpec := newReplicaSet(numReplicas, labelMap) + manager.rsStore.Store.Add(rsSpec) + + expectedPods := int32(0) + pods := newPodList(nil, numReplicas, api.PodPending, labelMap, rsSpec, "pod") + + rsKey, err := controller.KeyFunc(rsSpec) + if err != nil { + t.Errorf("Couldn't get key for object %+v: %v", rsSpec, err) + } + + // Size up the controller, then size it down, and confirm the expected create/delete pattern + for _, replicas := range []int32{int32(numReplicas), 0} { + + rsSpec.Spec.Replicas = replicas + manager.rsStore.Store.Add(rsSpec) + + for i := 0; i < numReplicas; i += burstReplicas { + manager.syncReplicaSet(getKey(rsSpec, t)) + + // The store accrues active pods. It's also used by the ReplicaSet to determine how many + // replicas to create. + activePods := int32(len(manager.podStore.Indexer.List())) + if replicas != 0 { + // This is the number of pods currently "in flight". They were created by the + // ReplicaSet controller above, which then puts the ReplicaSet to sleep till + // all of them have been observed. + expectedPods = replicas - activePods + if expectedPods > int32(burstReplicas) { + expectedPods = int32(burstReplicas) + } + // This validates the ReplicaSet manager sync actually created pods + validateSyncReplicaSet(t, &fakePodControl, int(expectedPods), 0) + + // This simulates the watch events for all but 1 of the expected pods. + // None of these should wake the controller because it has expectations==BurstReplicas. + for i := int32(0); i < expectedPods-1; i++ { + manager.podStore.Indexer.Add(&pods.Items[i]) + manager.addPod(&pods.Items[i]) + } + + podExp, exists, err := manager.expectations.GetExpectations(rsKey) + if !exists || err != nil { + t.Fatalf("Did not find expectations for rc.") + } + if add, _ := podExp.GetExpectations(); add != 1 { + t.Fatalf("Expectations are wrong %v", podExp) + } + } else { + expectedPods = (replicas - activePods) * -1 + if expectedPods > int32(burstReplicas) { + expectedPods = int32(burstReplicas) + } + validateSyncReplicaSet(t, &fakePodControl, 0, int(expectedPods)) + + // To accurately simulate a watch we must delete the exact pods + // the rs is waiting for. + expectedDels := manager.expectations.GetUIDs(getKey(rsSpec, t)) + podsToDelete := []*api.Pod{} + for _, key := range expectedDels.List() { + nsName := strings.Split(key, "/") + podsToDelete = append(podsToDelete, &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: nsName[1], + Namespace: nsName[0], + Labels: rsSpec.Spec.Selector.MatchLabels, + }, + }) + } + // Don't delete all pods because we confirm that the last pod + // has exactly one expectation at the end, to verify that we + // don't double delete. + for i := range podsToDelete[1:] { + manager.podStore.Delete(podsToDelete[i]) + manager.deletePod(podsToDelete[i]) + } + podExp, exists, err := manager.expectations.GetExpectations(rsKey) + if !exists || err != nil { + t.Fatalf("Did not find expectations for ReplicaSet.") + } + if _, del := podExp.GetExpectations(); del != 1 { + t.Fatalf("Expectations are wrong %v", podExp) + } + } + + // Check that the ReplicaSet didn't take any action for all the above pods + fakePodControl.Clear() + manager.syncReplicaSet(getKey(rsSpec, t)) + validateSyncReplicaSet(t, &fakePodControl, 0, 0) + + // Create/Delete the last pod + // The last add pod will decrease the expectation of the ReplicaSet to 0, + // which will cause it to create/delete the remaining replicas up to burstReplicas. + if replicas != 0 { + manager.podStore.Indexer.Add(&pods.Items[expectedPods-1]) + manager.addPod(&pods.Items[expectedPods-1]) + } else { + expectedDel := manager.expectations.GetUIDs(getKey(rsSpec, t)) + if expectedDel.Len() != 1 { + t.Fatalf("Waiting on unexpected number of deletes.") + } + nsName := strings.Split(expectedDel.List()[0], "/") + lastPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: nsName[1], + Namespace: nsName[0], + Labels: rsSpec.Spec.Selector.MatchLabels, + }, + } + manager.podStore.Indexer.Delete(lastPod) + manager.deletePod(lastPod) + } + pods.Items = pods.Items[expectedPods:] + } + + // Confirm that we've created the right number of replicas + activePods := int32(len(manager.podStore.Indexer.List())) + if activePods != rsSpec.Spec.Replicas { + t.Fatalf("Unexpected number of active pods, expected %d, got %d", rsSpec.Spec.Replicas, activePods) + } + // Replenish the pod list, since we cut it down sizing up + pods = newPodList(nil, int(replicas), api.PodRunning, labelMap, rsSpec, "pod") + } +} + +func TestControllerBurstReplicas(t *testing.T) { + doTestControllerBurstReplicas(t, 5, 30) + doTestControllerBurstReplicas(t, 5, 12) + doTestControllerBurstReplicas(t, 3, 2) +} + +type FakeRSExpectations struct { + *controller.ControllerExpectations + satisfied bool + expSatisfied func() +} + +func (fe FakeRSExpectations) SatisfiedExpectations(controllerKey string) bool { + fe.expSatisfied() + return fe.satisfied +} + +// TestRSSyncExpectations tests that a pod cannot sneak in between counting active pods +// and checking expectations. +func TestRSSyncExpectations(t *testing.T) { + client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + fakePodControl := controller.FakePodControl{} + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2, 0) + manager.podStoreSynced = alwaysReady + manager.podControl = &fakePodControl + + labelMap := map[string]string{"foo": "bar"} + rsSpec := newReplicaSet(2, labelMap) + manager.rsStore.Store.Add(rsSpec) + pods := newPodList(nil, 2, api.PodPending, labelMap, rsSpec, "pod") + manager.podStore.Indexer.Add(&pods.Items[0]) + postExpectationsPod := pods.Items[1] + + manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRSExpectations{ + controller.NewControllerExpectations(), true, func() { + // If we check active pods before checking expectataions, the + // ReplicaSet will create a new replica because it doesn't see + // this pod, but has fulfilled its expectations. + manager.podStore.Indexer.Add(&postExpectationsPod) + }, + }) + manager.syncReplicaSet(getKey(rsSpec, t)) + validateSyncReplicaSet(t, &fakePodControl, 0, 0) +} + +func TestDeleteControllerAndExpectations(t *testing.T) { + client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 10, 0) + manager.podStoreSynced = alwaysReady + + rs := newReplicaSet(1, map[string]string{"foo": "bar"}) + manager.rsStore.Store.Add(rs) + + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + + // This should set expectations for the ReplicaSet + manager.syncReplicaSet(getKey(rs, t)) + validateSyncReplicaSet(t, &fakePodControl, 1, 0) + fakePodControl.Clear() + + // Get the ReplicaSet key + rsKey, err := controller.KeyFunc(rs) + if err != nil { + t.Errorf("Couldn't get key for object %+v: %v", rs, err) + } + + // This is to simulate a concurrent addPod, that has a handle on the expectations + // as the controller deletes it. + podExp, exists, err := manager.expectations.GetExpectations(rsKey) + if !exists || err != nil { + t.Errorf("No expectations found for ReplicaSet") + } + manager.rsStore.Delete(rs) + manager.syncReplicaSet(getKey(rs, t)) + + if _, exists, err = manager.expectations.GetExpectations(rsKey); exists { + t.Errorf("Found expectaions, expected none since the ReplicaSet has been deleted.") + } + + // This should have no effect, since we've deleted the ReplicaSet. + podExp.Add(-1, 0) + manager.podStore.Indexer.Replace(make([]interface{}, 0), "0") + manager.syncReplicaSet(getKey(rs, t)) + validateSyncReplicaSet(t, &fakePodControl, 0, 0) +} + +func TestRSManagerNotReady(t *testing.T) { + client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + fakePodControl := controller.FakePodControl{} + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2, 0) + manager.podControl = &fakePodControl + manager.podStoreSynced = func() bool { return false } + + // Simulates the ReplicaSet reflector running before the pod reflector. We don't + // want to end up creating replicas in this case until the pod reflector + // has synced, so the ReplicaSet controller should just requeue the ReplicaSet. + rsSpec := newReplicaSet(1, map[string]string{"foo": "bar"}) + manager.rsStore.Store.Add(rsSpec) + + rsKey := getKey(rsSpec, t) + manager.syncReplicaSet(rsKey) + validateSyncReplicaSet(t, &fakePodControl, 0, 0) + queueRS, _ := manager.queue.Get() + if queueRS != rsKey { + t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS) + } + + manager.podStoreSynced = alwaysReady + manager.syncReplicaSet(rsKey) + validateSyncReplicaSet(t, &fakePodControl, 1, 0) +} + +// shuffle returns a new shuffled list of container controllers. +func shuffle(controllers []*extensions.ReplicaSet) []*extensions.ReplicaSet { + numControllers := len(controllers) + randIndexes := rand.Perm(numControllers) + shuffled := make([]*extensions.ReplicaSet, numControllers) + for i := 0; i < numControllers; i++ { + shuffled[i] = controllers[randIndexes[i]] + } + return shuffled +} + +func TestOverlappingRSs(t *testing.T) { + client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + labelMap := map[string]string{"foo": "bar"} + + for i := 0; i < 5; i++ { + manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 10, 0) + manager.podStoreSynced = alwaysReady + + // Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store + var controllers []*extensions.ReplicaSet + for j := 1; j < 10; j++ { + rsSpec := newReplicaSet(1, labelMap) + rsSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local) + rsSpec.Name = string(util.NewUUID()) + controllers = append(controllers, rsSpec) + } + shuffledControllers := shuffle(controllers) + for j := range shuffledControllers { + manager.rsStore.Store.Add(shuffledControllers[j]) + } + // Add a pod and make sure only the oldest ReplicaSet is synced + pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0], "pod") + rsKey := getKey(controllers[0], t) + + manager.addPod(&pods.Items[0]) + queueRS, _ := manager.queue.Get() + if queueRS != rsKey { + t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS) + } + } +} + +func TestDeletionTimestamp(t *testing.T) { + c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + labelMap := map[string]string{"foo": "bar"} + manager := NewReplicaSetController(c, controller.NoResyncPeriodFunc, 10, 0) + manager.podStoreSynced = alwaysReady + + rs := newReplicaSet(1, labelMap) + manager.rsStore.Store.Add(rs) + rsKey, err := controller.KeyFunc(rs) + if err != nil { + t.Errorf("Couldn't get key for object %+v: %v", rs, err) + } + pod := newPodList(nil, 1, api.PodPending, labelMap, rs, "pod").Items[0] + pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()} + manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(&pod)}) + + // A pod added with a deletion timestamp should decrement deletions, not creations. + manager.addPod(&pod) + + queueRC, _ := manager.queue.Get() + if queueRC != rsKey { + t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRC) + } + manager.queue.Done(rsKey) + + podExp, exists, err := manager.expectations.GetExpectations(rsKey) + if !exists || err != nil || !podExp.Fulfilled() { + t.Fatalf("Wrong expectations %+v", podExp) + } + + // An update from no deletion timestamp to having one should be treated + // as a deletion. + oldPod := newPodList(nil, 1, api.PodPending, labelMap, rs, "pod").Items[0] + manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(&pod)}) + manager.updatePod(&oldPod, &pod) + + queueRC, _ = manager.queue.Get() + if queueRC != rsKey { + t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRC) + } + manager.queue.Done(rsKey) + + podExp, exists, err = manager.expectations.GetExpectations(rsKey) + if !exists || err != nil || !podExp.Fulfilled() { + t.Fatalf("Wrong expectations %+v", podExp) + } + + // An update to the pod (including an update to the deletion timestamp) + // should not be counted as a second delete. + secondPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Namespace: pod.Namespace, + Name: "secondPod", + Labels: pod.Labels, + }, + } + manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(secondPod)}) + oldPod.DeletionTimestamp = &unversioned.Time{Time: time.Now()} + manager.updatePod(&oldPod, &pod) + + podExp, exists, err = manager.expectations.GetExpectations(rsKey) + if !exists || err != nil || podExp.Fulfilled() { + t.Fatalf("Wrong expectations %+v", podExp) + } + + // A pod with a non-nil deletion timestamp should also be ignored by the + // delete handler, because it's already been counted in the update. + manager.deletePod(&pod) + podExp, exists, err = manager.expectations.GetExpectations(rsKey) + if !exists || err != nil || podExp.Fulfilled() { + t.Fatalf("Wrong expectations %+v", podExp) + } + + // Deleting the second pod should clear expectations. + manager.deletePod(secondPod) + + queueRC, _ = manager.queue.Get() + if queueRC != rsKey { + t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRC) + } + manager.queue.Done(rsKey) + + podExp, exists, err = manager.expectations.GetExpectations(rsKey) + if !exists || err != nil || !podExp.Fulfilled() { + t.Fatalf("Wrong expectations %+v", podExp) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_utils.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_utils.go index 382f2aee44db..fd8bd70626e4 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_utils.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_utils.go @@ -31,8 +31,8 @@ func updateReplicaCount(rsClient client.ReplicaSetInterface, rs extensions.Repli // This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since // we do a periodic relist every 30s. If the generations differ but the replicas are // the same, a caller might've resized to the same replica count. - if rs.Status.Replicas == numReplicas && - rs.Status.FullyLabeledReplicas == numFullyLabeledReplicas && + if int(rs.Status.Replicas) == numReplicas && + int(rs.Status.FullyLabeledReplicas) == numFullyLabeledReplicas && rs.Generation == rs.Status.ObservedGeneration { return nil } @@ -49,7 +49,7 @@ func updateReplicaCount(rsClient client.ReplicaSetInterface, rs extensions.Repli fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, numFullyLabeledReplicas) + fmt.Sprintf("sequence No: %v->%v", rs.Status.ObservedGeneration, generation)) - rs.Status = extensions.ReplicaSetStatus{Replicas: numReplicas, FullyLabeledReplicas: numFullyLabeledReplicas, ObservedGeneration: generation} + rs.Status = extensions.ReplicaSetStatus{Replicas: int32(numReplicas), FullyLabeledReplicas: int32(numFullyLabeledReplicas), ObservedGeneration: generation} _, updateErr = rsClient.UpdateStatus(rs) if updateErr == nil || i >= statusUpdateRetries { return updateErr diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go index 518b094bde23..6fda7ce89697 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go @@ -28,12 +28,15 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/client/record" - unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/controller/framework/informers" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/metrics" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/workqueue" @@ -44,7 +47,11 @@ const ( // We'll attempt to recompute the required replicas of all replication controllers // that have fulfilled their expectations at least this often. This recomputation // happens based on contents in local pod storage. - FullControllerResyncPeriod = 30 * time.Second + // Full Resync shouldn't be needed at all in a healthy system. This is a protection + // against disappearing objects and watch notification, that we believe should not + // happen at all. + // TODO: We should get rid of it completely in the fullness of time. + FullControllerResyncPeriod = 10 * time.Minute // Realistic value of the burstReplica field for the replication manager based off // performance requirements for kubernetes 1.0. @@ -66,6 +73,13 @@ type ReplicationManager struct { kubeClient clientset.Interface podControl controller.PodControlInterface + // internalPodInformer is used to hold a personal informer. If we're using + // a normal shared informer, then the informer will be started for us. If + // we have a personal informer, we must start it ourselves. If you start + // the controller using NewReplicationManager(passing SharedInformer), this + // will be null + internalPodInformer framework.SharedIndexInformer + // An rc is temporarily suspended after creating/deleting these many replicas. // It resumes normal action after observing the watch events for them. burstReplicas int @@ -82,7 +96,7 @@ type ReplicationManager struct { // A store of pods, populated by the podController podStore cache.StoreToPodLister // Watches changes to all pods - podController *framework.Controller + podController framework.ControllerInterface // podStoreSynced returns true if the pod store has been synced at least once. // Added as a member to the struct to allow injection for testing. podStoreSynced func() bool @@ -93,24 +107,34 @@ type ReplicationManager struct { queue *workqueue.Type } -// NewReplicationManager creates a new ReplicationManager. -func NewReplicationManager(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager { +// NewReplicationManager creates a replication manager +func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")}) + eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) + return newReplicationManagerInternal( + eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}), + podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize) +} + +// newReplicationManagerInternal configures a replication manager with the specified event recorder +func newReplicationManagerInternal(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager { + if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) + } rm := &ReplicationManager{ kubeClient: kubeClient, podControl: controller.RealPodControl{ KubeClient: kubeClient, - Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}), + Recorder: eventRecorder, }, burstReplicas: burstReplicas, expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()), queue: workqueue.New(), } - rm.rcStore.Store, rm.rcController = framework.NewInformer( + rm.rcStore.Indexer, rm.rcController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options) @@ -165,28 +189,19 @@ func NewReplicationManager(kubeClient clientset.Interface, resyncPeriod controll // way of achieving this is by performing a `stop` operation on the controller. DeleteFunc: rm.enqueueController, }, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) - rm.podStore.Store, rm.podController = framework.NewInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return rm.kubeClient.Core().Pods(api.NamespaceAll).List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return rm.kubeClient.Core().Pods(api.NamespaceAll).Watch(options) - }, - }, - &api.Pod{}, - resyncPeriod(), - framework.ResourceEventHandlerFuncs{ - AddFunc: rm.addPod, - // This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill - // the most frequent pod update is status, and the associated rc will only list from local storage, so - // it should be ok. - UpdateFunc: rm.updatePod, - DeleteFunc: rm.deletePod, - }, - ) + podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ + AddFunc: rm.addPod, + // This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill + // the most frequent pod update is status, and the associated rc will only list from local storage, so + // it should be ok. + UpdateFunc: rm.updatePod, + DeleteFunc: rm.deletePod, + }) + rm.podStore.Indexer = podInformer.GetIndexer() + rm.podController = podInformer.GetController() rm.syncHandler = rm.syncReplicationController rm.podStoreSynced = rm.podController.HasSynced @@ -194,6 +209,23 @@ func NewReplicationManager(kubeClient clientset.Interface, resyncPeriod controll return rm } +// NewReplicationManagerFromClientForIntegration creates a new ReplicationManager that runs its own informer. It disables event recording for use in integration tests. +func NewReplicationManagerFromClientForIntegration(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager { + podInformer := informers.CreateSharedPodIndexInformer(kubeClient, resyncPeriod()) + rm := newReplicationManagerInternal(&record.FakeRecorder{}, podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize) + rm.internalPodInformer = podInformer + return rm +} + +// NewReplicationManagerFromClient creates a new ReplicationManager that runs its own informer. +func NewReplicationManagerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager { + podInformer := informers.CreateSharedPodIndexInformer(kubeClient, resyncPeriod()) + rm := NewReplicationManager(podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize) + rm.internalPodInformer = podInformer + + return rm +} + // SetEventRecorder replaces the event recorder used by the replication manager // with the given recorder. Only used for testing. func (rm *ReplicationManager) SetEventRecorder(recorder record.EventRecorder) { @@ -211,6 +243,11 @@ func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) { for i := 0; i < workers; i++ { go wait.Until(rm.worker, time.Second, stopCh) } + + if rm.internalPodInformer != nil { + go rm.internalPodInformer.Run(stopCh) + } + <-stopCh glog.Infof("Shutting down RC Manager") rm.queue.ShutDown() @@ -259,7 +296,7 @@ func (rm *ReplicationManager) getPodController(pod *api.Pod) *api.ReplicationCon // isCacheValid check if the cache is valid func (rm *ReplicationManager) isCacheValid(pod *api.Pod, cachedRC *api.ReplicationController) bool { - _, exists, err := rm.rcStore.Get(cachedRC) + exists, err := rm.rcStore.Exists(cachedRC) // rc has been deleted or updated, cache is invalid if err != nil || !exists || !isControllerMatch(pod, cachedRC) { return false @@ -395,24 +432,29 @@ func (rm *ReplicationManager) enqueueController(obj interface{}) { // worker runs a worker thread that just dequeues items, processes them, and marks them done. // It enforces that the syncHandler is never invoked concurrently with the same key. func (rm *ReplicationManager) worker() { + workFunc := func() bool { + key, quit := rm.queue.Get() + if quit { + return true + } + defer rm.queue.Done(key) + err := rm.syncHandler(key.(string)) + if err != nil { + glog.Errorf("Error syncing replication controller: %v", err) + } + return false + } for { - func() { - key, quit := rm.queue.Get() - if quit { - return - } - defer rm.queue.Done(key) - err := rm.syncHandler(key.(string)) - if err != nil { - glog.Errorf("Error syncing replication controller: %v", err) - } - }() + if quit := workFunc(); quit { + glog.Infof("replication controller worker shutting down") + return + } } } // manageReplicas checks and updates replicas for the given replication controller. func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.ReplicationController) { - diff := len(filteredPods) - rc.Spec.Replicas + diff := len(filteredPods) - int(rc.Spec.Replicas) rcKey, err := controller.KeyFunc(rc) if err != nil { glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) @@ -439,6 +481,7 @@ func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.Re // Decrement the expected number of creates because the informer won't observe this pod glog.V(2).Infof("Failed creation, decrementing expectations for controller %q/%q", rc.Namespace, rc.Name) rm.expectations.CreationObserved(rcKey) + rm.enqueueController(rc) utilruntime.HandleError(err) } }() @@ -478,8 +521,9 @@ func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.Re if err := rm.podControl.DeletePod(rc.Namespace, filteredPods[ix].Name, rc); err != nil { // Decrement the expected number of deletes because the informer won't observe this deletion podKey := controller.PodKey(filteredPods[ix]) - glog.V(2).Infof("Failed to delete %v, decrementing expectations for controller %q/%q", podKey, rc.Namespace, rc.Name) + glog.V(2).Infof("Failed to delete %v due to %v, decrementing expectations for controller %q/%q", podKey, err, rc.Namespace, rc.Name) rm.expectations.DeletionObserved(rcKey, podKey) + rm.enqueueController(rc) utilruntime.HandleError(err) } }(i) @@ -492,6 +536,9 @@ func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.Re // it did not expect to see any more of its pods created or deleted. This function is not meant to be invoked // concurrently with the same key. func (rm *ReplicationManager) syncReplicationController(key string) error { + trace := util.NewTrace("syncReplicationController: " + key) + defer trace.LogIfLong(250 * time.Millisecond) + startTime := time.Now() defer func() { glog.V(4).Infof("Finished syncing controller %q (%v)", key, time.Now().Sub(startTime)) @@ -505,7 +552,7 @@ func (rm *ReplicationManager) syncReplicationController(key string) error { return nil } - obj, exists, err := rm.rcStore.Store.GetByKey(key) + obj, exists, err := rm.rcStore.Indexer.GetByKey(key) if !exists { glog.Infof("Replication Controller has been deleted %v", key) rm.expectations.DeleteExpectations(key) @@ -526,19 +573,23 @@ func (rm *ReplicationManager) syncReplicationController(key string) error { glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) return err } + trace.Step("ReplicationController restored") rcNeedsSync := rm.expectations.SatisfiedExpectations(rcKey) + trace.Step("Expectations restored") podList, err := rm.podStore.Pods(rc.Namespace).List(labels.Set(rc.Spec.Selector).AsSelector()) if err != nil { glog.Errorf("Error getting pods for rc %q: %v", key, err) rm.queue.Add(key) return err } + trace.Step("Pods listed") // TODO: Do this in a single pass, or use an index. filteredPods := controller.FilterActivePods(podList.Items) if rcNeedsSync { rm.manageReplicas(filteredPods, &rc) } + trace.Step("manageReplicas done") // Count the number of pods that have labels matching the labels of the pod // template of the replication controller, the matching pods may have more diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replication/replication_controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replication/replication_controller_test.go new file mode 100644 index 000000000000..376e5a4b9c93 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replication/replication_controller_test.go @@ -0,0 +1,1102 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// If you make changes to this file, you should also make the corresponding change in ReplicaSet. + +package replication + +import ( + "fmt" + "math/rand" + "net/http/httptest" + "strings" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/securitycontext" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" + utiltesting "k8s.io/kubernetes/pkg/util/testing" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" +) + +var alwaysReady = func() bool { return true } + +func getKey(rc *api.ReplicationController, t *testing.T) string { + if key, err := controller.KeyFunc(rc); err != nil { + t.Errorf("Unexpected error getting key for rc %v: %v", rc.Name, err) + return "" + } else { + return key + } +} + +func newReplicationController(replicas int) *api.ReplicationController { + rc := &api.ReplicationController{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()}, + ObjectMeta: api.ObjectMeta{ + UID: util.NewUUID(), + Name: "foobar", + Namespace: api.NamespaceDefault, + ResourceVersion: "18", + }, + Spec: api.ReplicationControllerSpec{ + Replicas: int32(replicas), + Selector: map[string]string{"foo": "bar"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "name": "foo", + "type": "production", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo/bar", + TerminationMessagePath: api.TerminationMessagePathDefault, + ImagePullPolicy: api.PullIfNotPresent, + SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), + }, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSDefault, + NodeSelector: map[string]string{ + "baz": "blah", + }, + }, + }, + }, + } + return rc +} + +// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store. +func newPodList(store cache.Store, count int, status api.PodPhase, rc *api.ReplicationController, name string) *api.PodList { + pods := []api.Pod{} + for i := 0; i < count; i++ { + newPod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("%s%d", name, i), + Labels: rc.Spec.Selector, + Namespace: rc.Namespace, + }, + Status: api.PodStatus{Phase: status}, + } + if store != nil { + store.Add(&newPod) + } + pods = append(pods, newPod) + } + return &api.PodList{ + Items: pods, + } +} + +func validateSyncReplication(t *testing.T, fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes int) { + if len(fakePodControl.Templates) != expectedCreates { + t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates)) + } + if len(fakePodControl.DeletePodName) != expectedDeletes { + t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", expectedDeletes, len(fakePodControl.DeletePodName)) + } +} + +func replicationControllerResourceName() string { + return "replicationcontrollers" +} + +type serverResponse struct { + statusCode int + obj interface{} +} + +func TestSyncReplicationControllerDoesNothing(t *testing.T) { + c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + fakePodControl := controller.FakePodControl{} + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + // 2 running pods, a controller with 2 replicas, sync is a no-op + controllerSpec := newReplicationController(2) + manager.rcStore.Indexer.Add(controllerSpec) + newPodList(manager.podStore.Indexer, 2, api.PodRunning, controllerSpec, "pod") + + manager.podControl = &fakePodControl + manager.syncReplicationController(getKey(controllerSpec, t)) + validateSyncReplication(t, &fakePodControl, 0, 0) +} + +func TestSyncReplicationControllerDeletes(t *testing.T) { + c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + fakePodControl := controller.FakePodControl{} + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + manager.podControl = &fakePodControl + + // 2 running pods and a controller with 1 replica, one pod delete expected + controllerSpec := newReplicationController(1) + manager.rcStore.Indexer.Add(controllerSpec) + newPodList(manager.podStore.Indexer, 2, api.PodRunning, controllerSpec, "pod") + + manager.syncReplicationController(getKey(controllerSpec, t)) + validateSyncReplication(t, &fakePodControl, 0, 1) +} + +func TestDeleteFinalStateUnknown(t *testing.T) { + c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + fakePodControl := controller.FakePodControl{} + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + manager.podControl = &fakePodControl + + received := make(chan string) + manager.syncHandler = func(key string) error { + received <- key + return nil + } + + // The DeletedFinalStateUnknown object should cause the rc manager to insert + // the controller matching the selectors of the deleted pod into the work queue. + controllerSpec := newReplicationController(1) + manager.rcStore.Indexer.Add(controllerSpec) + pods := newPodList(nil, 1, api.PodRunning, controllerSpec, "pod") + manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]}) + + go manager.worker() + + expected := getKey(controllerSpec, t) + select { + case key := <-received: + if key != expected { + t.Errorf("Unexpected sync all for rc %v, expected %v", key, expected) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Processing DeleteFinalStateUnknown took longer than expected") + } +} + +func TestSyncReplicationControllerCreates(t *testing.T) { + c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + // A controller with 2 replicas and no pods in the store, 2 creates expected + rc := newReplicationController(2) + manager.rcStore.Indexer.Add(rc) + + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + manager.syncReplicationController(getKey(rc, t)) + validateSyncReplication(t, &fakePodControl, 2, 0) +} + +func TestStatusUpdatesWithoutReplicasChange(t *testing.T) { + // Setup a fake server to listen for requests, and run the rc manager in steady state + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: "", + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + // Steady state for the replication controller, no Status.Replicas updates expected + activePods := 5 + rc := newReplicationController(activePods) + manager.rcStore.Indexer.Add(rc) + rc.Status = api.ReplicationControllerStatus{Replicas: int32(activePods)} + newPodList(manager.podStore.Indexer, activePods, api.PodRunning, rc, "pod") + + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + manager.syncReplicationController(getKey(rc, t)) + + validateSyncReplication(t, &fakePodControl, 0, 0) + if fakeHandler.RequestReceived != nil { + t.Errorf("Unexpected update when pods and rcs are in a steady state") + } + + // This response body is just so we don't err out decoding the http response, all + // we care about is the request body sent below. + response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{}) + fakeHandler.ResponseBody = response + + rc.Generation = rc.Generation + 1 + manager.syncReplicationController(getKey(rc, t)) + + rc.Status.ObservedGeneration = rc.Generation + updatedRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc) + fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &updatedRc) +} + +func TestControllerUpdateReplicas(t *testing.T) { + // This is a happy server just to record the PUT request we expect for status.Replicas + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: "", + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + // Insufficient number of pods in the system, and Status.Replicas is wrong; + // Status.Replica should update to match number of pods in system, 1 new pod should be created. + rc := newReplicationController(5) + manager.rcStore.Indexer.Add(rc) + rc.Status = api.ReplicationControllerStatus{Replicas: 2, FullyLabeledReplicas: 6, ObservedGeneration: 0} + rc.Generation = 1 + newPodList(manager.podStore.Indexer, 2, api.PodRunning, rc, "pod") + rcCopy := *rc + extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"} + rcCopy.Spec.Selector = extraLabelMap + newPodList(manager.podStore.Indexer, 2, api.PodRunning, &rcCopy, "podWithExtraLabel") + + // This response body is just so we don't err out decoding the http response + response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{}) + fakeHandler.ResponseBody = response + + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + + manager.syncReplicationController(getKey(rc, t)) + + // 1. Status.Replicas should go up from 2->4 even though we created 5-4=1 pod. + // 2. Status.FullyLabeledReplicas should equal to the number of pods that + // has the extra labels, i.e., 2. + // 3. Every update to the status should include the Generation of the spec. + rc.Status = api.ReplicationControllerStatus{Replicas: 4, ObservedGeneration: 1} + + decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc) + fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &decRc) + validateSyncReplication(t, &fakePodControl, 1, 0) +} + +func TestSyncReplicationControllerDormancy(t *testing.T) { + // Setup a test server so we can lie about the current state of pods + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: "{}", + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + fakePodControl := controller.FakePodControl{} + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + manager.podControl = &fakePodControl + + controllerSpec := newReplicationController(2) + manager.rcStore.Indexer.Add(controllerSpec) + newPodList(manager.podStore.Indexer, 1, api.PodRunning, controllerSpec, "pod") + + // Creates a replica and sets expectations + controllerSpec.Status.Replicas = 1 + manager.syncReplicationController(getKey(controllerSpec, t)) + validateSyncReplication(t, &fakePodControl, 1, 0) + + // Expectations prevents replicas but not an update on status + controllerSpec.Status.Replicas = 0 + fakePodControl.Clear() + manager.syncReplicationController(getKey(controllerSpec, t)) + validateSyncReplication(t, &fakePodControl, 0, 0) + + // Get the key for the controller + rcKey, err := controller.KeyFunc(controllerSpec) + if err != nil { + t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err) + } + + // Lowering expectations should lead to a sync that creates a replica, however the + // fakePodControl error will prevent this, leaving expectations at 0, 0 + manager.expectations.CreationObserved(rcKey) + controllerSpec.Status.Replicas = 1 + fakePodControl.Clear() + fakePodControl.Err = fmt.Errorf("Fake Error") + + manager.syncReplicationController(getKey(controllerSpec, t)) + validateSyncReplication(t, &fakePodControl, 0, 0) + + // This replica should not need a Lowering of expectations, since the previous create failed + fakePodControl.Err = nil + manager.syncReplicationController(getKey(controllerSpec, t)) + validateSyncReplication(t, &fakePodControl, 1, 0) + + // 1 PUT for the rc status during dormancy window. + // Note that the pod creates go through pod control so they're not recorded. + fakeHandler.ValidateRequestCount(t, 1) +} + +func TestPodControllerLookup(t *testing.T) { + manager := NewReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + testCases := []struct { + inRCs []*api.ReplicationController + pod *api.Pod + outRCName string + }{ + // pods without labels don't match any rcs + { + inRCs: []*api.ReplicationController{ + {ObjectMeta: api.ObjectMeta{Name: "basic"}}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}}, + outRCName: "", + }, + // Matching labels, not namespace + { + inRCs: []*api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{"foo": "bar"}, + }, + }, + }, + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, + outRCName: "", + }, + // Matching ns and labels returns the key to the rc, not the rc name + { + inRCs: []*api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{"foo": "bar"}, + }, + }, + }, + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, + outRCName: "bar", + }, + } + for _, c := range testCases { + for _, r := range c.inRCs { + manager.rcStore.Indexer.Add(r) + } + if rc := manager.getPodController(c.pod); rc != nil { + if c.outRCName != rc.Name { + t.Errorf("Got controller %+v expected %+v", rc.Name, c.outRCName) + } + } else if c.outRCName != "" { + t.Errorf("Expected a controller %v pod %v, found none", c.outRCName, c.pod.Name) + } + } +} + +func TestWatchControllers(t *testing.T) { + fakeWatch := watch.NewFake() + c := &fake.Clientset{} + c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil)) + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + var testControllerSpec api.ReplicationController + received := make(chan string) + + // The update sent through the fakeWatcher should make its way into the workqueue, + // and eventually into the syncHandler. The handler validates the received controller + // and closes the received channel to indicate that the test can finish. + manager.syncHandler = func(key string) error { + + obj, exists, err := manager.rcStore.Indexer.GetByKey(key) + if !exists || err != nil { + t.Errorf("Expected to find controller under key %v", key) + } + controllerSpec := *obj.(*api.ReplicationController) + if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) { + t.Errorf("Expected %#v, but got %#v", testControllerSpec, controllerSpec) + } + close(received) + return nil + } + // Start only the rc watcher and the workqueue, send a watch event, + // and make sure it hits the sync method. + stopCh := make(chan struct{}) + defer close(stopCh) + go manager.rcController.Run(stopCh) + go wait.Until(manager.worker, 10*time.Millisecond, stopCh) + + testControllerSpec.Name = "foo" + fakeWatch.Add(&testControllerSpec) + + select { + case <-received: + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Expected 1 call but got 0") + } +} + +func TestWatchPods(t *testing.T) { + fakeWatch := watch.NewFake() + c := &fake.Clientset{} + c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil)) + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + // Put one rc and one pod into the controller's stores + testControllerSpec := newReplicationController(1) + manager.rcStore.Indexer.Add(testControllerSpec) + received := make(chan string) + // The pod update sent through the fakeWatcher should figure out the managing rc and + // send it into the syncHandler. + manager.syncHandler = func(key string) error { + + obj, exists, err := manager.rcStore.Indexer.GetByKey(key) + if !exists || err != nil { + t.Errorf("Expected to find controller under key %v", key) + } + controllerSpec := obj.(*api.ReplicationController) + if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) { + t.Errorf("\nExpected %#v,\nbut got %#v", testControllerSpec, controllerSpec) + } + close(received) + return nil + } + // Start only the pod watcher and the workqueue, send a watch event, + // and make sure it hits the sync method for the right rc. + stopCh := make(chan struct{}) + defer close(stopCh) + go manager.podController.Run(stopCh) + go manager.internalPodInformer.Run(stopCh) + go wait.Until(manager.worker, 10*time.Millisecond, stopCh) + + pods := newPodList(nil, 1, api.PodRunning, testControllerSpec, "pod") + testPod := pods.Items[0] + testPod.Status.Phase = api.PodFailed + fakeWatch.Add(&testPod) + + select { + case <-received: + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Expected 1 call but got 0") + } +} + +func TestUpdatePods(t *testing.T) { + manager := NewReplicationManagerFromClient(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + received := make(chan string) + + manager.syncHandler = func(key string) error { + obj, exists, err := manager.rcStore.Indexer.GetByKey(key) + if !exists || err != nil { + t.Errorf("Expected to find controller under key %v", key) + } + received <- obj.(*api.ReplicationController).Name + return nil + } + + stopCh := make(chan struct{}) + defer close(stopCh) + go wait.Until(manager.worker, 10*time.Millisecond, stopCh) + + // Put 2 rcs and one pod into the controller's stores + testControllerSpec1 := newReplicationController(1) + manager.rcStore.Indexer.Add(testControllerSpec1) + testControllerSpec2 := *testControllerSpec1 + testControllerSpec2.Spec.Selector = map[string]string{"bar": "foo"} + testControllerSpec2.Name = "barfoo" + manager.rcStore.Indexer.Add(&testControllerSpec2) + + // Put one pod in the podStore + pod1 := newPodList(manager.podStore.Indexer, 1, api.PodRunning, testControllerSpec1, "pod").Items[0] + pod2 := pod1 + pod2.Labels = testControllerSpec2.Spec.Selector + + // Send an update of the same pod with modified labels, and confirm we get a sync request for + // both controllers + manager.updatePod(&pod1, &pod2) + + expected := sets.NewString(testControllerSpec1.Name, testControllerSpec2.Name) + for _, name := range expected.List() { + t.Logf("Expecting update for %+v", name) + select { + case got := <-received: + if !expected.Has(got) { + t.Errorf("Expected keys %#v got %v", expected, got) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Expected update notifications for controllers within 100ms each") + } + } +} + +func TestControllerUpdateRequeue(t *testing.T) { + // This server should force a requeue of the controller because it fails to update status.Replicas. + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 500, + ResponseBody: "", + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + + c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) + manager.podStoreSynced = alwaysReady + + rc := newReplicationController(1) + manager.rcStore.Indexer.Add(rc) + rc.Status = api.ReplicationControllerStatus{Replicas: 2} + newPodList(manager.podStore.Indexer, 1, api.PodRunning, rc, "pod") + + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + + manager.syncReplicationController(getKey(rc, t)) + + ch := make(chan interface{}) + go func() { + item, _ := manager.queue.Get() + ch <- item + }() + select { + case key := <-ch: + expectedKey := getKey(rc, t) + if key != expectedKey { + t.Errorf("Expected requeue of controller with key %s got %s", expectedKey, key) + } + case <-time.After(wait.ForeverTestTimeout): + manager.queue.ShutDown() + t.Errorf("Expected to find an rc in the queue, found none.") + } + // 1 Update and 1 GET, both of which fail + fakeHandler.ValidateRequestCount(t, 2) +} + +func TestControllerUpdateStatusWithFailure(t *testing.T) { + rc := newReplicationController(1) + c := &fake.Clientset{} + c.AddReactor("get", "replicationcontrollers", func(action core.Action) (bool, runtime.Object, error) { + return true, rc, nil + }) + c.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { + return true, &api.ReplicationController{}, fmt.Errorf("Fake error") + }) + fakeRCClient := c.Core().ReplicationControllers("default") + numReplicas := 10 + updateReplicaCount(fakeRCClient, *rc, numReplicas, 0) + updates, gets := 0, 0 + for _, a := range c.Actions() { + if a.GetResource().Resource != "replicationcontrollers" { + t.Errorf("Unexpected action %+v", a) + continue + } + + switch action := a.(type) { + case core.GetAction: + gets++ + // Make sure the get is for the right rc even though the update failed. + if action.GetName() != rc.Name { + t.Errorf("Expected get for rc %v, got %+v instead", rc.Name, action.GetName()) + } + case core.UpdateAction: + updates++ + // Confirm that the update has the right status.Replicas even though the Get + // returned an rc with replicas=1. + if c, ok := action.GetObject().(*api.ReplicationController); !ok { + t.Errorf("Expected an rc as the argument to update, got %T", c) + } else if c.Status.Replicas != int32(numReplicas) { + t.Errorf("Expected update for rc to contain replicas %v, got %v instead", + numReplicas, c.Status.Replicas) + } + default: + t.Errorf("Unexpected action %+v", a) + break + } + } + if gets != 1 || updates != 2 { + t.Errorf("Expected 1 get and 2 updates, got %d gets %d updates", gets, updates) + } +} + +// TODO: This test is too hairy for a unittest. It should be moved to an E2E suite. +func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) { + c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + fakePodControl := controller.FakePodControl{} + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, burstReplicas, 0) + manager.podStoreSynced = alwaysReady + manager.podControl = &fakePodControl + + controllerSpec := newReplicationController(numReplicas) + manager.rcStore.Indexer.Add(controllerSpec) + + expectedPods := 0 + pods := newPodList(nil, numReplicas, api.PodPending, controllerSpec, "pod") + + rcKey, err := controller.KeyFunc(controllerSpec) + if err != nil { + t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err) + } + + // Size up the controller, then size it down, and confirm the expected create/delete pattern + for _, replicas := range []int{numReplicas, 0} { + + controllerSpec.Spec.Replicas = int32(replicas) + manager.rcStore.Indexer.Add(controllerSpec) + + for i := 0; i < numReplicas; i += burstReplicas { + manager.syncReplicationController(getKey(controllerSpec, t)) + + // The store accrues active pods. It's also used by the rc to determine how many + // replicas to create. + activePods := len(manager.podStore.Indexer.List()) + if replicas != 0 { + // This is the number of pods currently "in flight". They were created by the rc manager above, + // which then puts the rc to sleep till all of them have been observed. + expectedPods = replicas - activePods + if expectedPods > burstReplicas { + expectedPods = burstReplicas + } + // This validates the rc manager sync actually created pods + validateSyncReplication(t, &fakePodControl, expectedPods, 0) + + // This simulates the watch events for all but 1 of the expected pods. + // None of these should wake the controller because it has expectations==BurstReplicas. + for i := 0; i < expectedPods-1; i++ { + manager.podStore.Indexer.Add(&pods.Items[i]) + manager.addPod(&pods.Items[i]) + } + + podExp, exists, err := manager.expectations.GetExpectations(rcKey) + if !exists || err != nil { + t.Fatalf("Did not find expectations for rc.") + } + if add, _ := podExp.GetExpectations(); add != 1 { + t.Fatalf("Expectations are wrong %v", podExp) + } + } else { + expectedPods = (replicas - activePods) * -1 + if expectedPods > burstReplicas { + expectedPods = burstReplicas + } + validateSyncReplication(t, &fakePodControl, 0, expectedPods) + + // To accurately simulate a watch we must delete the exact pods + // the rc is waiting for. + expectedDels := manager.expectations.GetUIDs(getKey(controllerSpec, t)) + podsToDelete := []*api.Pod{} + for _, key := range expectedDels.List() { + nsName := strings.Split(key, "/") + podsToDelete = append(podsToDelete, &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: nsName[1], + Namespace: nsName[0], + Labels: controllerSpec.Spec.Selector, + }, + }) + } + // Don't delete all pods because we confirm that the last pod + // has exactly one expectation at the end, to verify that we + // don't double delete. + for i := range podsToDelete[1:] { + manager.podStore.Indexer.Delete(podsToDelete[i]) + manager.deletePod(podsToDelete[i]) + } + podExp, exists, err := manager.expectations.GetExpectations(rcKey) + if !exists || err != nil { + t.Fatalf("Did not find expectations for rc.") + } + if _, del := podExp.GetExpectations(); del != 1 { + t.Fatalf("Expectations are wrong %v", podExp) + } + } + + // Check that the rc didn't take any action for all the above pods + fakePodControl.Clear() + manager.syncReplicationController(getKey(controllerSpec, t)) + validateSyncReplication(t, &fakePodControl, 0, 0) + + // Create/Delete the last pod + // The last add pod will decrease the expectation of the rc to 0, + // which will cause it to create/delete the remaining replicas up to burstReplicas. + if replicas != 0 { + manager.podStore.Indexer.Add(&pods.Items[expectedPods-1]) + manager.addPod(&pods.Items[expectedPods-1]) + } else { + expectedDel := manager.expectations.GetUIDs(getKey(controllerSpec, t)) + if expectedDel.Len() != 1 { + t.Fatalf("Waiting on unexpected number of deletes.") + } + nsName := strings.Split(expectedDel.List()[0], "/") + lastPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: nsName[1], + Namespace: nsName[0], + Labels: controllerSpec.Spec.Selector, + }, + } + manager.podStore.Indexer.Delete(lastPod) + manager.deletePod(lastPod) + } + pods.Items = pods.Items[expectedPods:] + } + + // Confirm that we've created the right number of replicas + activePods := int32(len(manager.podStore.Indexer.List())) + if activePods != controllerSpec.Spec.Replicas { + t.Fatalf("Unexpected number of active pods, expected %d, got %d", controllerSpec.Spec.Replicas, activePods) + } + // Replenish the pod list, since we cut it down sizing up + pods = newPodList(nil, replicas, api.PodRunning, controllerSpec, "pod") + } +} + +func TestControllerBurstReplicas(t *testing.T) { + doTestControllerBurstReplicas(t, 5, 30) + doTestControllerBurstReplicas(t, 5, 12) + doTestControllerBurstReplicas(t, 3, 2) +} + +type FakeRCExpectations struct { + *controller.ControllerExpectations + satisfied bool + expSatisfied func() +} + +func (fe FakeRCExpectations) SatisfiedExpectations(controllerKey string) bool { + fe.expSatisfied() + return fe.satisfied +} + +// TestRCSyncExpectations tests that a pod cannot sneak in between counting active pods +// and checking expectations. +func TestRCSyncExpectations(t *testing.T) { + c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + fakePodControl := controller.FakePodControl{} + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0) + manager.podStoreSynced = alwaysReady + manager.podControl = &fakePodControl + + controllerSpec := newReplicationController(2) + manager.rcStore.Indexer.Add(controllerSpec) + pods := newPodList(nil, 2, api.PodPending, controllerSpec, "pod") + manager.podStore.Indexer.Add(&pods.Items[0]) + postExpectationsPod := pods.Items[1] + + manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRCExpectations{ + controller.NewControllerExpectations(), true, func() { + // If we check active pods before checking expectataions, the rc + // will create a new replica because it doesn't see this pod, but + // has fulfilled its expectations. + manager.podStore.Indexer.Add(&postExpectationsPod) + }, + }) + manager.syncReplicationController(getKey(controllerSpec, t)) + validateSyncReplication(t, &fakePodControl, 0, 0) +} + +func TestDeleteControllerAndExpectations(t *testing.T) { + c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0) + manager.podStoreSynced = alwaysReady + + rc := newReplicationController(1) + manager.rcStore.Indexer.Add(rc) + + fakePodControl := controller.FakePodControl{} + manager.podControl = &fakePodControl + + // This should set expectations for the rc + manager.syncReplicationController(getKey(rc, t)) + validateSyncReplication(t, &fakePodControl, 1, 0) + fakePodControl.Clear() + + // Get the RC key + rcKey, err := controller.KeyFunc(rc) + if err != nil { + t.Errorf("Couldn't get key for object %+v: %v", rc, err) + } + + // This is to simulate a concurrent addPod, that has a handle on the expectations + // as the controller deletes it. + podExp, exists, err := manager.expectations.GetExpectations(rcKey) + if !exists || err != nil { + t.Errorf("No expectations found for rc") + } + manager.rcStore.Indexer.Delete(rc) + manager.syncReplicationController(getKey(rc, t)) + + if _, exists, err = manager.expectations.GetExpectations(rcKey); exists { + t.Errorf("Found expectaions, expected none since the rc has been deleted.") + } + + // This should have no effect, since we've deleted the rc. + podExp.Add(-1, 0) + manager.podStore.Indexer.Replace(make([]interface{}, 0), "0") + manager.syncReplicationController(getKey(rc, t)) + validateSyncReplication(t, &fakePodControl, 0, 0) +} + +func TestRCManagerNotReady(t *testing.T) { + c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + fakePodControl := controller.FakePodControl{} + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0) + manager.podControl = &fakePodControl + manager.podStoreSynced = func() bool { return false } + + // Simulates the rc reflector running before the pod reflector. We don't + // want to end up creating replicas in this case until the pod reflector + // has synced, so the rc manager should just requeue the rc. + controllerSpec := newReplicationController(1) + manager.rcStore.Indexer.Add(controllerSpec) + + rcKey := getKey(controllerSpec, t) + manager.syncReplicationController(rcKey) + validateSyncReplication(t, &fakePodControl, 0, 0) + queueRC, _ := manager.queue.Get() + if queueRC != rcKey { + t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) + } + + manager.podStoreSynced = alwaysReady + manager.syncReplicationController(rcKey) + validateSyncReplication(t, &fakePodControl, 1, 0) +} + +// shuffle returns a new shuffled list of container controllers. +func shuffle(controllers []*api.ReplicationController) []*api.ReplicationController { + numControllers := len(controllers) + randIndexes := rand.Perm(numControllers) + shuffled := make([]*api.ReplicationController, numControllers) + for i := 0; i < numControllers; i++ { + shuffled[i] = controllers[randIndexes[i]] + } + return shuffled +} + +func TestOverlappingRCs(t *testing.T) { + c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + + for i := 0; i < 5; i++ { + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0) + manager.podStoreSynced = alwaysReady + + // Create 10 rcs, shuffled them randomly and insert them into the rc manager's store + var controllers []*api.ReplicationController + for j := 1; j < 10; j++ { + controllerSpec := newReplicationController(1) + controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local) + controllerSpec.Name = string(util.NewUUID()) + controllers = append(controllers, controllerSpec) + } + shuffledControllers := shuffle(controllers) + for j := range shuffledControllers { + manager.rcStore.Indexer.Add(shuffledControllers[j]) + } + // Add a pod and make sure only the oldest rc is synced + pods := newPodList(nil, 1, api.PodPending, controllers[0], "pod") + rcKey := getKey(controllers[0], t) + + manager.addPod(&pods.Items[0]) + queueRC, _ := manager.queue.Get() + if queueRC != rcKey { + t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) + } + } +} + +func TestDeletionTimestamp(t *testing.T) { + c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0) + manager.podStoreSynced = alwaysReady + + controllerSpec := newReplicationController(1) + manager.rcStore.Indexer.Add(controllerSpec) + rcKey, err := controller.KeyFunc(controllerSpec) + if err != nil { + t.Errorf("Couldn't get key for object %+v: %v", controllerSpec, err) + } + pod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0] + pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()} + manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)}) + + // A pod added with a deletion timestamp should decrement deletions, not creations. + manager.addPod(&pod) + + queueRC, _ := manager.queue.Get() + if queueRC != rcKey { + t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) + } + manager.queue.Done(rcKey) + + podExp, exists, err := manager.expectations.GetExpectations(rcKey) + if !exists || err != nil || !podExp.Fulfilled() { + t.Fatalf("Wrong expectations %+v", podExp) + } + + // An update from no deletion timestamp to having one should be treated + // as a deletion. + oldPod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0] + manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)}) + manager.updatePod(&oldPod, &pod) + + queueRC, _ = manager.queue.Get() + if queueRC != rcKey { + t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) + } + manager.queue.Done(rcKey) + + podExp, exists, err = manager.expectations.GetExpectations(rcKey) + if !exists || err != nil || !podExp.Fulfilled() { + t.Fatalf("Wrong expectations %+v", podExp) + } + + // An update to the pod (including an update to the deletion timestamp) + // should not be counted as a second delete. + secondPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Namespace: pod.Namespace, + Name: "secondPod", + Labels: pod.Labels, + }, + } + manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(secondPod)}) + oldPod.DeletionTimestamp = &unversioned.Time{Time: time.Now()} + manager.updatePod(&oldPod, &pod) + + podExp, exists, err = manager.expectations.GetExpectations(rcKey) + if !exists || err != nil || podExp.Fulfilled() { + t.Fatalf("Wrong expectations %+v", podExp) + } + + // A pod with a non-nil deletion timestamp should also be ignored by the + // delete handler, because it's already been counted in the update. + manager.deletePod(&pod) + podExp, exists, err = manager.expectations.GetExpectations(rcKey) + if !exists || err != nil || podExp.Fulfilled() { + t.Fatalf("Wrong expectations %+v", podExp) + } + + // Deleting the second pod should clear expectations. + manager.deletePod(secondPod) + + queueRC, _ = manager.queue.Get() + if queueRC != rcKey { + t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC) + } + manager.queue.Done(rcKey) + + podExp, exists, err = manager.expectations.GetExpectations(rcKey) + if !exists || err != nil || !podExp.Fulfilled() { + t.Fatalf("Wrong expectations %+v", podExp) + } +} + +func BenchmarkGetPodControllerMultiNS(b *testing.B) { + client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + + const nsNum = 1000 + + pods := []api.Pod{} + for i := 0; i < nsNum; i++ { + ns := fmt.Sprintf("ns-%d", i) + for j := 0; j < 10; j++ { + rcName := fmt.Sprintf("rc-%d", j) + for k := 0; k < 10; k++ { + podName := fmt.Sprintf("pod-%d-%d", j, k) + pods = append(pods, api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: podName, + Namespace: ns, + Labels: map[string]string{"rcName": rcName}, + }, + }) + } + } + } + + for i := 0; i < nsNum; i++ { + ns := fmt.Sprintf("ns-%d", i) + for j := 0; j < 10; j++ { + rcName := fmt.Sprintf("rc-%d", j) + manager.rcStore.Indexer.Add(&api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: rcName, Namespace: ns}, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{"rcName": rcName}, + }, + }) + } + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + for _, pod := range pods { + manager.getPodController(&pod) + } + } +} + +func BenchmarkGetPodControllerSingleNS(b *testing.B) { + client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) + + const rcNum = 1000 + const replicaNum = 3 + + pods := []api.Pod{} + for i := 0; i < rcNum; i++ { + rcName := fmt.Sprintf("rc-%d", i) + for j := 0; j < replicaNum; j++ { + podName := fmt.Sprintf("pod-%d-%d", i, j) + pods = append(pods, api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: podName, + Namespace: "foo", + Labels: map[string]string{"rcName": rcName}, + }, + }) + } + } + + for i := 0; i < rcNum; i++ { + rcName := fmt.Sprintf("rc-%d", i) + manager.rcStore.Indexer.Add(&api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: rcName, Namespace: "foo"}, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{"rcName": rcName}, + }, + }) + } + b.ResetTimer() + + for i := 0; i < b.N; i++ { + for _, pod := range pods { + manager.getPodController(&pod) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replication/replication_controller_utils.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replication/replication_controller_utils.go index 7e3d402d2021..0383fa94649a 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replication/replication_controller_utils.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/replication/replication_controller_utils.go @@ -23,7 +23,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" ) // updateReplicaCount attempts to update the Status.Replicas of the given controller, with a single GET/PUT retry. @@ -31,8 +31,8 @@ func updateReplicaCount(rcClient unversionedcore.ReplicationControllerInterface, // This is the steady state. It happens when the rc doesn't have any expectations, since // we do a periodic relist every 30s. If the generations differ but the replicas are // the same, a caller might've resized to the same replica count. - if controller.Status.Replicas == numReplicas && - controller.Status.FullyLabeledReplicas == numFullyLabeledReplicas && + if int(controller.Status.Replicas) == numReplicas && + int(controller.Status.FullyLabeledReplicas) == numFullyLabeledReplicas && controller.Generation == controller.Status.ObservedGeneration { return nil } @@ -49,7 +49,7 @@ func updateReplicaCount(rcClient unversionedcore.ReplicationControllerInterface, fmt.Sprintf("fullyLabeledReplicas %d->%d, ", controller.Status.FullyLabeledReplicas, numFullyLabeledReplicas) + fmt.Sprintf("sequence No: %v->%v", controller.Status.ObservedGeneration, generation)) - rc.Status = api.ReplicationControllerStatus{Replicas: numReplicas, FullyLabeledReplicas: numFullyLabeledReplicas, ObservedGeneration: generation} + rc.Status = api.ReplicationControllerStatus{Replicas: int32(numReplicas), FullyLabeledReplicas: int32(numFullyLabeledReplicas), ObservedGeneration: generation} _, updateErr = rcClient.UpdateStatus(rc) if updateErr == nil || i >= statusUpdateRetries { return updateErr diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller.go index f02b945a8f0e..a344bec8709c 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller.go @@ -28,8 +28,10 @@ import ( clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/controller/framework/informers" "k8s.io/kubernetes/pkg/quota/evaluator/core" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/metrics" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/watch" ) @@ -86,43 +88,50 @@ func ObjectReplenishmentDeleteFunc(options *ReplenishmentControllerOptions) func // ReplenishmentControllerFactory knows how to build replenishment controllers type ReplenishmentControllerFactory interface { - // NewController returns a controller configured with the specified options - NewController(options *ReplenishmentControllerOptions) (*framework.Controller, error) + // NewController returns a controller configured with the specified options. + // This method is NOT thread-safe. + NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error) } // replenishmentControllerFactory implements ReplenishmentControllerFactory type replenishmentControllerFactory struct { - kubeClient clientset.Interface + kubeClient clientset.Interface + podInformer framework.SharedInformer } // NewReplenishmentControllerFactory returns a factory that knows how to build controllers // to replenish resources when updated or deleted -func NewReplenishmentControllerFactory(kubeClient clientset.Interface) ReplenishmentControllerFactory { +func NewReplenishmentControllerFactory(podInformer framework.SharedInformer, kubeClient clientset.Interface) ReplenishmentControllerFactory { return &replenishmentControllerFactory{ - kubeClient: kubeClient, + kubeClient: kubeClient, + podInformer: podInformer, } } -func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (*framework.Controller, error) { - var result *framework.Controller +func NewReplenishmentControllerFactoryFromClient(kubeClient clientset.Interface) ReplenishmentControllerFactory { + return NewReplenishmentControllerFactory(nil, kubeClient) +} + +func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (framework.ControllerInterface, error) { + var result framework.ControllerInterface + if r.kubeClient != nil && r.kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("replenishment_controller", r.kubeClient.Core().GetRESTClient().GetRateLimiter()) + } + switch options.GroupKind { case api.Kind("Pod"): - _, result = framework.NewInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return r.kubeClient.Core().Pods(api.NamespaceAll).List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return r.kubeClient.Core().Pods(api.NamespaceAll).Watch(options) - }, - }, - &api.Pod{}, - options.ResyncPeriod(), - framework.ResourceEventHandlerFuncs{ + if r.podInformer != nil { + r.podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ UpdateFunc: PodReplenishmentUpdateFunc(options), DeleteFunc: ObjectReplenishmentDeleteFunc(options), - }, - ) + }) + result = r.podInformer.GetController() + break + } + + r.podInformer = informers.CreateSharedPodInformer(r.kubeClient, options.ResyncPeriod()) + result = r.podInformer + case api.Kind("Service"): _, result = framework.NewInformer( &cache.ListWatch{ @@ -136,6 +145,7 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon &api.Service{}, options.ResyncPeriod(), framework.ResourceEventHandlerFuncs{ + UpdateFunc: ServiceReplenishmentUpdateFunc(options), DeleteFunc: ObjectReplenishmentDeleteFunc(options), }, ) @@ -208,3 +218,14 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon } return result, nil } + +// ServiceReplenishmentUpdateFunc will replenish if the old service was quota tracked but the new is not +func ServiceReplenishmentUpdateFunc(options *ReplenishmentControllerOptions) func(oldObj, newObj interface{}) { + return func(oldObj, newObj interface{}) { + oldService := oldObj.(*api.Service) + newService := newObj.(*api.Service) + if core.QuotaServiceType(oldService) || core.QuotaServiceType(newService) { + options.ReplenishmentFunc(options.GroupKind, newService.Namespace, newService) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller_test.go new file mode 100644 index 000000000000..b7bb6650225e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller_test.go @@ -0,0 +1,121 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcequota + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/intstr" +) + +// testReplenishment lets us test replenishment functions are invoked +type testReplenishment struct { + groupKind unversioned.GroupKind + namespace string +} + +// mock function that holds onto the last kind that was replenished +func (t *testReplenishment) Replenish(groupKind unversioned.GroupKind, namespace string, object runtime.Object) { + t.groupKind = groupKind + t.namespace = namespace +} + +func TestPodReplenishmentUpdateFunc(t *testing.T) { + mockReplenish := &testReplenishment{} + options := ReplenishmentControllerOptions{ + GroupKind: api.Kind("Pod"), + ReplenishmentFunc: mockReplenish.Replenish, + ResyncPeriod: controller.NoResyncPeriodFunc, + } + oldPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "pod"}, + Status: api.PodStatus{Phase: api.PodRunning}, + } + newPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "pod"}, + Status: api.PodStatus{Phase: api.PodFailed}, + } + updateFunc := PodReplenishmentUpdateFunc(&options) + updateFunc(oldPod, newPod) + if mockReplenish.groupKind != api.Kind("Pod") { + t.Errorf("Unexpected group kind %v", mockReplenish.groupKind) + } + if mockReplenish.namespace != oldPod.Namespace { + t.Errorf("Unexpected namespace %v", mockReplenish.namespace) + } +} + +func TestObjectReplenishmentDeleteFunc(t *testing.T) { + mockReplenish := &testReplenishment{} + options := ReplenishmentControllerOptions{ + GroupKind: api.Kind("Pod"), + ReplenishmentFunc: mockReplenish.Replenish, + ResyncPeriod: controller.NoResyncPeriodFunc, + } + oldPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "pod"}, + Status: api.PodStatus{Phase: api.PodRunning}, + } + deleteFunc := ObjectReplenishmentDeleteFunc(&options) + deleteFunc(oldPod) + if mockReplenish.groupKind != api.Kind("Pod") { + t.Errorf("Unexpected group kind %v", mockReplenish.groupKind) + } + if mockReplenish.namespace != oldPod.Namespace { + t.Errorf("Unexpected namespace %v", mockReplenish.namespace) + } +} + +func TestServiceReplenishmentUpdateFunc(t *testing.T) { + mockReplenish := &testReplenishment{} + options := ReplenishmentControllerOptions{ + GroupKind: api.Kind("Service"), + ReplenishmentFunc: mockReplenish.Replenish, + ResyncPeriod: controller.NoResyncPeriodFunc, + } + oldService := &api.Service{ + ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "mysvc"}, + Spec: api.ServiceSpec{ + Type: api.ServiceTypeNodePort, + Ports: []api.ServicePort{{ + Port: 80, + TargetPort: intstr.FromInt(80), + }}, + }, + } + newService := &api.Service{ + ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "mysvc"}, + Spec: api.ServiceSpec{ + Type: api.ServiceTypeClusterIP, + Ports: []api.ServicePort{{ + Port: 80, + TargetPort: intstr.FromInt(80), + }}}, + } + updateFunc := ServiceReplenishmentUpdateFunc(&options) + updateFunc(oldService, newService) + if mockReplenish.groupKind != api.Kind("Service") { + t.Errorf("Unexpected group kind %v", mockReplenish.groupKind) + } + if mockReplenish.namespace != oldService.Namespace { + t.Errorf("Unexpected namespace %v", mockReplenish.namespace) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go index 0a21ebf57f42..a64781c012e0 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/metrics" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/workqueue" @@ -61,7 +62,7 @@ type ResourceQuotaController struct { // Watches changes to all resource quota rqController *framework.Controller // ResourceQuota objects that need to be synchronized - queue *workqueue.Type + queue workqueue.RateLimitingInterface // To allow injection of syncUsage for testing. syncHandler func(key string) error // function that controls full recalculation of quota usage @@ -69,19 +70,21 @@ type ResourceQuotaController struct { // knows how to calculate usage registry quota.Registry // controllers monitoring to notify for replenishment - replenishmentControllers []*framework.Controller + replenishmentControllers []framework.ControllerInterface } func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *ResourceQuotaController { // build the resource quota controller rq := &ResourceQuotaController{ kubeClient: options.KubeClient, - queue: workqueue.New(), + queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), resyncPeriod: options.ResyncPeriod, registry: options.Registry, - replenishmentControllers: []*framework.Controller{}, + replenishmentControllers: []framework.ControllerInterface{}, + } + if options.KubeClient != nil && options.KubeClient.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", options.KubeClient.Core().GetRESTClient().GetRateLimiter()) } - // set the synchronization handler rq.syncHandler = rq.syncResourceQuotaFromKey @@ -160,19 +163,26 @@ func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) { // worker runs a worker thread that just dequeues items, processes them, and marks them done. // It enforces that the syncHandler is never invoked concurrently with the same key. func (rq *ResourceQuotaController) worker() { + workFunc := func() bool { + key, quit := rq.queue.Get() + if quit { + return true + } + defer rq.queue.Done(key) + err := rq.syncHandler(key.(string)) + if err == nil { + rq.queue.Forget(key) + return false + } + utilruntime.HandleError(err) + rq.queue.AddRateLimited(key) + return false + } for { - func() { - key, quit := rq.queue.Get() - if quit { - return - } - defer rq.queue.Done(key) - err := rq.syncHandler(key.(string)) - if err != nil { - utilruntime.HandleError(err) - rq.queue.Add(key) - } - }() + if quit := workFunc(); quit { + glog.Infof("resource quota controller worker shutting down") + return + } } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller_test.go new file mode 100644 index 000000000000..71ef5df79b44 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller_test.go @@ -0,0 +1,302 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcequota + +import ( + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/quota/install" + "k8s.io/kubernetes/pkg/util/sets" +) + +func getResourceList(cpu, memory string) api.ResourceList { + res := api.ResourceList{} + if cpu != "" { + res[api.ResourceCPU] = resource.MustParse(cpu) + } + if memory != "" { + res[api.ResourceMemory] = resource.MustParse(memory) + } + return res +} + +func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements { + res := api.ResourceRequirements{} + res.Requests = requests + res.Limits = limits + return res +} + +func TestSyncResourceQuota(t *testing.T) { + podList := api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{Name: "pod-running", Namespace: "testing"}, + Status: api.PodStatus{Phase: api.PodRunning}, + Spec: api.PodSpec{ + Volumes: []api.Volume{{Name: "vol"}}, + Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}}, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "pod-running-2", Namespace: "testing"}, + Status: api.PodStatus{Phase: api.PodRunning}, + Spec: api.PodSpec{ + Volumes: []api.Volume{{Name: "vol"}}, + Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}}, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "pod-failed", Namespace: "testing"}, + Status: api.PodStatus{Phase: api.PodFailed}, + Spec: api.PodSpec{ + Volumes: []api.Volume{{Name: "vol"}}, + Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}}, + }, + }, + }, + } + resourceQuota := api.ResourceQuota{ + ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "testing"}, + Spec: api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("3"), + api.ResourceMemory: resource.MustParse("100Gi"), + api.ResourcePods: resource.MustParse("5"), + }, + }, + } + expectedUsage := api.ResourceQuota{ + Status: api.ResourceQuotaStatus{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("3"), + api.ResourceMemory: resource.MustParse("100Gi"), + api.ResourcePods: resource.MustParse("5"), + }, + Used: api.ResourceList{ + api.ResourceCPU: resource.MustParse("200m"), + api.ResourceMemory: resource.MustParse("2Gi"), + api.ResourcePods: resource.MustParse("2"), + }, + }, + } + + kubeClient := fake.NewSimpleClientset(&podList, &resourceQuota) + resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{ + KubeClient: kubeClient, + ResyncPeriod: controller.NoResyncPeriodFunc, + Registry: install.NewRegistry(kubeClient), + GroupKindsToReplenish: []unversioned.GroupKind{ + api.Kind("Pod"), + api.Kind("Service"), + api.Kind("ReplicationController"), + api.Kind("PersistentVolumeClaim"), + }, + ControllerFactory: NewReplenishmentControllerFactoryFromClient(kubeClient), + ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc, + } + quotaController := NewResourceQuotaController(resourceQuotaControllerOptions) + err := quotaController.syncResourceQuota(resourceQuota) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + expectedActionSet := sets.NewString( + strings.Join([]string{"list", "replicationcontrollers", ""}, "-"), + strings.Join([]string{"list", "services", ""}, "-"), + strings.Join([]string{"list", "pods", ""}, "-"), + strings.Join([]string{"list", "resourcequotas", ""}, "-"), + strings.Join([]string{"list", "secrets", ""}, "-"), + strings.Join([]string{"list", "persistentvolumeclaims", ""}, "-"), + strings.Join([]string{"update", "resourcequotas", "status"}, "-"), + ) + actionSet := sets.NewString() + for _, action := range kubeClient.Actions() { + actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-")) + } + if !actionSet.HasAll(expectedActionSet.List()...) { + t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet)) + } + + lastActionIndex := len(kubeClient.Actions()) - 1 + usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*api.ResourceQuota) + + // ensure hard and used limits are what we expected + for k, v := range expectedUsage.Status.Hard { + actual := usage.Status.Hard[k] + actualValue := actual.String() + expectedValue := v.String() + if expectedValue != actualValue { + t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue) + } + } + for k, v := range expectedUsage.Status.Used { + actual := usage.Status.Used[k] + actualValue := actual.String() + expectedValue := v.String() + if expectedValue != actualValue { + t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue) + } + } +} + +func TestSyncResourceQuotaSpecChange(t *testing.T) { + resourceQuota := api.ResourceQuota{ + Spec: api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("4"), + }, + }, + Status: api.ResourceQuotaStatus{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("3"), + }, + Used: api.ResourceList{ + api.ResourceCPU: resource.MustParse("0"), + }, + }, + } + + expectedUsage := api.ResourceQuota{ + Status: api.ResourceQuotaStatus{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("4"), + }, + Used: api.ResourceList{ + api.ResourceCPU: resource.MustParse("0"), + }, + }, + } + + kubeClient := fake.NewSimpleClientset(&resourceQuota) + resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{ + KubeClient: kubeClient, + ResyncPeriod: controller.NoResyncPeriodFunc, + Registry: install.NewRegistry(kubeClient), + GroupKindsToReplenish: []unversioned.GroupKind{ + api.Kind("Pod"), + api.Kind("Service"), + api.Kind("ReplicationController"), + api.Kind("PersistentVolumeClaim"), + }, + ControllerFactory: NewReplenishmentControllerFactoryFromClient(kubeClient), + ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc, + } + quotaController := NewResourceQuotaController(resourceQuotaControllerOptions) + err := quotaController.syncResourceQuota(resourceQuota) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + + expectedActionSet := sets.NewString( + strings.Join([]string{"list", "replicationcontrollers", ""}, "-"), + strings.Join([]string{"list", "services", ""}, "-"), + strings.Join([]string{"list", "pods", ""}, "-"), + strings.Join([]string{"list", "resourcequotas", ""}, "-"), + strings.Join([]string{"list", "secrets", ""}, "-"), + strings.Join([]string{"list", "persistentvolumeclaims", ""}, "-"), + strings.Join([]string{"update", "resourcequotas", "status"}, "-"), + ) + actionSet := sets.NewString() + for _, action := range kubeClient.Actions() { + actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-")) + } + if !actionSet.HasAll(expectedActionSet.List()...) { + t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet)) + } + + lastActionIndex := len(kubeClient.Actions()) - 1 + usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*api.ResourceQuota) + + // ensure hard and used limits are what we expected + for k, v := range expectedUsage.Status.Hard { + actual := usage.Status.Hard[k] + actualValue := actual.String() + expectedValue := v.String() + if expectedValue != actualValue { + t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue) + } + } + for k, v := range expectedUsage.Status.Used { + actual := usage.Status.Used[k] + actualValue := actual.String() + expectedValue := v.String() + if expectedValue != actualValue { + t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue) + } + } + +} + +func TestSyncResourceQuotaNoChange(t *testing.T) { + resourceQuota := api.ResourceQuota{ + Spec: api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("4"), + }, + }, + Status: api.ResourceQuotaStatus{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("4"), + }, + Used: api.ResourceList{ + api.ResourceCPU: resource.MustParse("0"), + }, + }, + } + + kubeClient := fake.NewSimpleClientset(&api.PodList{}, &resourceQuota) + resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{ + KubeClient: kubeClient, + ResyncPeriod: controller.NoResyncPeriodFunc, + Registry: install.NewRegistry(kubeClient), + GroupKindsToReplenish: []unversioned.GroupKind{ + api.Kind("Pod"), + api.Kind("Service"), + api.Kind("ReplicationController"), + api.Kind("PersistentVolumeClaim"), + }, + ControllerFactory: NewReplenishmentControllerFactoryFromClient(kubeClient), + ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc, + } + quotaController := NewResourceQuotaController(resourceQuotaControllerOptions) + err := quotaController.syncResourceQuota(resourceQuota) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + expectedActionSet := sets.NewString( + strings.Join([]string{"list", "replicationcontrollers", ""}, "-"), + strings.Join([]string{"list", "services", ""}, "-"), + strings.Join([]string{"list", "pods", ""}, "-"), + strings.Join([]string{"list", "resourcequotas", ""}, "-"), + strings.Join([]string{"list", "secrets", ""}, "-"), + strings.Join([]string{"list", "persistentvolumeclaims", ""}, "-"), + ) + actionSet := sets.NewString() + for _, action := range kubeClient.Actions() { + actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-")) + } + if !actionSet.HasAll(expectedActionSet.List()...) { + t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet)) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/route/routecontroller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/route/routecontroller.go index c297347cc56e..c9d72eb23b86 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/route/routecontroller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/route/routecontroller.go @@ -24,11 +24,23 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/wait" ) +const ( + // Maximal number of concurrent CreateRoute API calls. + // TODO: This should be per-provider. + maxConcurrentRouteCreations int = 200 + // Maximum number of retries of route creations. + maxRetries int = 5 + // Maximum number of retries of node status update. + updateNodeStatusMaxRetries int = 3 +) + type RouteController struct { routes cloudprovider.Routes kubeClient clientset.Interface @@ -37,6 +49,9 @@ type RouteController struct { } func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterName string, clusterCIDR *net.IPNet) *RouteController { + if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("route_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) + } return &RouteController{ routes: routes, kubeClient: kubeClient, @@ -46,7 +61,12 @@ func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterNam } func (rc *RouteController) Run(syncPeriod time.Duration) { - go wait.Until(func() { + // TODO: If we do just the full Resync every 5 minutes (default value) + // that means that we may wait up to 5 minutes before even starting + // creating a route for it. This is bad. + // We should have a watch on node and if we observe a new node (with CIDR?) + // trigger reconciliation for that node. + go wait.NonSlidingUntil(func() { if err := rc.reconcileNodeRoutes(); err != nil { glog.Errorf("Couldn't reconcile node routes: %v", err) } @@ -75,7 +95,10 @@ func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.R for _, route := range routes { routeMap[route.TargetInstance] = route } + wg := sync.WaitGroup{} + rateLimiter := make(chan struct{}, maxConcurrentRouteCreations) + for _, node := range nodes { // Skip if the node hasn't been assigned a CIDR yet. if node.Spec.PodCIDR == "" { @@ -92,14 +115,27 @@ func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.R nameHint := string(node.UID) wg.Add(1) glog.Infof("Creating route for node %s %s with hint %s", node.Name, route.DestinationCIDR, nameHint) - go func(nodeName string, nameHint string, route *cloudprovider.Route, startTime time.Time) { - if err := rc.routes.CreateRoute(rc.clusterName, nameHint, route); err != nil { - glog.Errorf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Now().Sub(startTime), err) - } else { - glog.Infof("Created route for node %s %s with hint %s after %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime)) + go func(nodeName string, nameHint string, route *cloudprovider.Route) { + defer wg.Done() + for i := 0; i < maxRetries; i++ { + startTime := time.Now() + // Ensure that we don't have more than maxConcurrentRouteCreations + // CreateRoute calls in flight. + rateLimiter <- struct{}{} + err := rc.routes.CreateRoute(rc.clusterName, nameHint, route) + <-rateLimiter + + rc.updateNetworkingCondition(nodeName, err == nil) + if err != nil { + glog.Errorf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Now().Sub(startTime), err) + } else { + glog.Infof("Created route for node %s %s with hint %s after %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime)) + return + } } - wg.Done() - }(node.Name, nameHint, route, time.Now()) + }(node.Name, nameHint, route) + } else { + rc.updateNetworkingCondition(node.Name, true) } nodeCIDRs[node.Name] = node.Spec.PodCIDR } @@ -126,6 +162,65 @@ func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.R return nil } +func updateNetworkingCondition(node *api.Node, routeCreated bool) { + _, networkingCondition := api.GetNodeCondition(&node.Status, api.NodeNetworkUnavailable) + currentTime := unversioned.Now() + if routeCreated { + if networkingCondition != nil && networkingCondition.Status != api.ConditionFalse { + networkingCondition.Status = api.ConditionFalse + networkingCondition.Reason = "RouteCreated" + networkingCondition.Message = "RouteController created a route" + networkingCondition.LastTransitionTime = currentTime + } else if networkingCondition == nil { + node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{ + Type: api.NodeNetworkUnavailable, + Status: api.ConditionFalse, + Reason: "RouteCreated", + Message: "RouteController created a route", + LastTransitionTime: currentTime, + }) + } + } else { + if networkingCondition != nil && networkingCondition.Status != api.ConditionTrue { + networkingCondition.Status = api.ConditionTrue + networkingCondition.Reason = "NoRouteCreated" + networkingCondition.Message = "RouteController failed to create a route" + networkingCondition.LastTransitionTime = currentTime + } else if networkingCondition == nil { + node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{ + Type: api.NodeNetworkUnavailable, + Status: api.ConditionTrue, + Reason: "NoRouteCreated", + Message: "RouteController failed to create a route", + LastTransitionTime: currentTime, + }) + } + } +} + +func (rc *RouteController) updateNetworkingCondition(nodeName string, routeCreated bool) error { + var err error + for i := 0; i < updateNodeStatusMaxRetries; i++ { + node, err := rc.kubeClient.Core().Nodes().Get(nodeName) + if err != nil { + glog.Errorf("Error geting node: %v", err) + continue + } + updateNetworkingCondition(node, routeCreated) + // TODO: Use Patch instead once #26381 is merged. + // See kubernetes/node-problem-detector#9 for details. + if _, err = rc.kubeClient.Core().Nodes().UpdateStatus(node); err == nil { + return nil + } + if i+1 < updateNodeStatusMaxRetries { + glog.Errorf("Error updating node %s, retrying: %v", node.Name, err) + } else { + glog.Errorf("Error updating node %s: %v", node.Name, err) + } + } + return err +} + func (rc *RouteController) isResponsibleForRoute(route *cloudprovider.Route) bool { _, cidr, err := net.ParseCIDR(route.DestinationCIDR) if err != nil { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/route/routecontroller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/route/routecontroller_test.go new file mode 100644 index 000000000000..398a494ee4db --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/route/routecontroller_test.go @@ -0,0 +1,266 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package route + +import ( + "net" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/cloudprovider" + fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" +) + +func TestIsResponsibleForRoute(t *testing.T) { + myClusterName := "my-awesome-cluster" + myClusterRoute := "my-awesome-cluster-12345678-90ab-cdef-1234-567890abcdef" + testCases := []struct { + clusterCIDR string + routeName string + routeCIDR string + expectedResponsible bool + }{ + // Routes that belong to this cluster + {"10.244.0.0/16", myClusterRoute, "10.244.0.0/24", true}, + {"10.244.0.0/16", myClusterRoute, "10.244.10.0/24", true}, + {"10.244.0.0/16", myClusterRoute, "10.244.255.0/24", true}, + {"10.244.0.0/14", myClusterRoute, "10.244.0.0/24", true}, + {"10.244.0.0/14", myClusterRoute, "10.247.255.0/24", true}, + // Routes that match our naming/tagging scheme, but are outside our cidr + {"10.244.0.0/16", myClusterRoute, "10.224.0.0/24", false}, + {"10.244.0.0/16", myClusterRoute, "10.0.10.0/24", false}, + {"10.244.0.0/16", myClusterRoute, "10.255.255.0/24", false}, + {"10.244.0.0/14", myClusterRoute, "10.248.0.0/24", false}, + {"10.244.0.0/14", myClusterRoute, "10.243.255.0/24", false}, + } + for i, testCase := range testCases { + _, cidr, err := net.ParseCIDR(testCase.clusterCIDR) + if err != nil { + t.Errorf("%d. Error in test case: unparsable cidr %q", i, testCase.clusterCIDR) + } + rc := New(nil, nil, myClusterName, cidr) + route := &cloudprovider.Route{ + Name: testCase.routeName, + TargetInstance: "doesnt-matter-for-this-test", + DestinationCIDR: testCase.routeCIDR, + } + if resp := rc.isResponsibleForRoute(route); resp != testCase.expectedResponsible { + t.Errorf("%d. isResponsibleForRoute() = %t; want %t", i, resp, testCase.expectedResponsible) + } + } +} + +func TestReconcile(t *testing.T) { + cluster := "my-k8s" + node1 := api.Node{ObjectMeta: api.ObjectMeta{Name: "node-1", UID: "01"}, Spec: api.NodeSpec{PodCIDR: "10.120.0.0/24"}} + node2 := api.Node{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: "10.120.1.0/24"}} + nodeNoCidr := api.Node{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: ""}} + + testCases := []struct { + nodes []api.Node + initialRoutes []*cloudprovider.Route + expectedRoutes []*cloudprovider.Route + expectedNetworkUnavailable []bool + clientset *fake.Clientset + }{ + // 2 nodes, routes already there + { + nodes: []api.Node{ + node1, + node2, + }, + initialRoutes: []*cloudprovider.Route{ + {cluster + "-01", "node-1", "10.120.0.0/24"}, + {cluster + "-02", "node-2", "10.120.1.0/24"}, + }, + expectedRoutes: []*cloudprovider.Route{ + {cluster + "-01", "node-1", "10.120.0.0/24"}, + {cluster + "-02", "node-2", "10.120.1.0/24"}, + }, + expectedNetworkUnavailable: []bool{true, true}, + clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}), + }, + // 2 nodes, one route already there + { + nodes: []api.Node{ + node1, + node2, + }, + initialRoutes: []*cloudprovider.Route{ + {cluster + "-01", "node-1", "10.120.0.0/24"}, + }, + expectedRoutes: []*cloudprovider.Route{ + {cluster + "-01", "node-1", "10.120.0.0/24"}, + {cluster + "-02", "node-2", "10.120.1.0/24"}, + }, + expectedNetworkUnavailable: []bool{true, true}, + clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}), + }, + // 2 nodes, no routes yet + { + nodes: []api.Node{ + node1, + node2, + }, + initialRoutes: []*cloudprovider.Route{}, + expectedRoutes: []*cloudprovider.Route{ + {cluster + "-01", "node-1", "10.120.0.0/24"}, + {cluster + "-02", "node-2", "10.120.1.0/24"}, + }, + expectedNetworkUnavailable: []bool{true, true}, + clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}), + }, + // 2 nodes, a few too many routes + { + nodes: []api.Node{ + node1, + node2, + }, + initialRoutes: []*cloudprovider.Route{ + {cluster + "-01", "node-1", "10.120.0.0/24"}, + {cluster + "-02", "node-2", "10.120.1.0/24"}, + {cluster + "-03", "node-3", "10.120.2.0/24"}, + {cluster + "-04", "node-4", "10.120.3.0/24"}, + }, + expectedRoutes: []*cloudprovider.Route{ + {cluster + "-01", "node-1", "10.120.0.0/24"}, + {cluster + "-02", "node-2", "10.120.1.0/24"}, + }, + expectedNetworkUnavailable: []bool{true, true}, + clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}), + }, + // 2 nodes, 2 routes, but only 1 is right + { + nodes: []api.Node{ + node1, + node2, + }, + initialRoutes: []*cloudprovider.Route{ + {cluster + "-01", "node-1", "10.120.0.0/24"}, + {cluster + "-03", "node-3", "10.120.2.0/24"}, + }, + expectedRoutes: []*cloudprovider.Route{ + {cluster + "-01", "node-1", "10.120.0.0/24"}, + {cluster + "-02", "node-2", "10.120.1.0/24"}, + }, + expectedNetworkUnavailable: []bool{true, true}, + clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}), + }, + // 2 nodes, one node without CIDR assigned. + { + nodes: []api.Node{ + node1, + nodeNoCidr, + }, + initialRoutes: []*cloudprovider.Route{}, + expectedRoutes: []*cloudprovider.Route{ + {cluster + "-01", "node-1", "10.120.0.0/24"}, + }, + expectedNetworkUnavailable: []bool{true, false}, + clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, nodeNoCidr}}), + }, + } + for i, testCase := range testCases { + cloud := &fakecloud.FakeCloud{RouteMap: make(map[string]*fakecloud.FakeRoute)} + for _, route := range testCase.initialRoutes { + fakeRoute := &fakecloud.FakeRoute{} + fakeRoute.ClusterName = cluster + fakeRoute.Route = *route + cloud.RouteMap[route.Name] = fakeRoute + } + routes, ok := cloud.Routes() + if !ok { + t.Error("Error in test: fakecloud doesn't support Routes()") + } + _, cidr, _ := net.ParseCIDR("10.120.0.0/16") + rc := New(routes, testCase.clientset, cluster, cidr) + if err := rc.reconcile(testCase.nodes, testCase.initialRoutes); err != nil { + t.Errorf("%d. Error from rc.reconcile(): %v", i, err) + } + for _, action := range testCase.clientset.Actions() { + if action.GetVerb() == "update" && action.GetResource().Resource == "nodes" { + node := action.(core.UpdateAction).GetObject().(*api.Node) + _, condition := api.GetNodeCondition(&node.Status, api.NodeNetworkUnavailable) + if condition == nil { + t.Errorf("%d. Missing NodeNetworkUnavailable condition for Node %v", i, node.Name) + } else { + check := func(index int) bool { + return (condition.Status == api.ConditionFalse) == testCase.expectedNetworkUnavailable[index] + } + index := -1 + for j := range testCase.nodes { + if testCase.nodes[j].Name == node.Name { + index = j + } + } + if index == -1 { + // Something's wrong + continue + } + if !check(index) { + t.Errorf("%d. Invalid NodeNetworkUnavailable condition for Node %v, expected %v, got %v", + i, node.Name, testCase.expectedNetworkUnavailable[index], (condition.Status == api.ConditionFalse)) + } + } + } + } + var finalRoutes []*cloudprovider.Route + var err error + timeoutChan := time.After(200 * time.Millisecond) + tick := time.NewTicker(10 * time.Millisecond) + defer tick.Stop() + poll: + for { + select { + case <-tick.C: + if finalRoutes, err = routes.ListRoutes(cluster); err == nil && routeListEqual(finalRoutes, testCase.expectedRoutes) { + break poll + } + case <-timeoutChan: + t.Errorf("%d. rc.reconcile() = %v, routes:\n%v\nexpected: nil, routes:\n%v\n", i, err, flatten(finalRoutes), flatten(testCase.expectedRoutes)) + break poll + } + } + } +} + +func routeListEqual(list1, list2 []*cloudprovider.Route) bool { + if len(list1) != len(list2) { + return false + } + routeMap1 := make(map[string]*cloudprovider.Route) + for _, route1 := range list1 { + routeMap1[route1.Name] = route1 + } + for _, route2 := range list2 { + if route1, exists := routeMap1[route2.Name]; !exists || *route1 != *route2 { + return false + } + } + return true +} + +func flatten(list []*cloudprovider.Route) []cloudprovider.Route { + var structList []cloudprovider.Route + for _, route := range list { + structList = append(structList, *route) + } + return structList +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/service/servicecontroller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/service/servicecontroller.go index e8135247bd9c..0fb1820869e5 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/service/servicecontroller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/service/servicecontroller.go @@ -18,7 +18,6 @@ package service import ( "fmt" - "net" "sort" "sync" "time" @@ -30,11 +29,12 @@ import ( "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/client/record" - unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/runtime" ) @@ -90,9 +90,13 @@ type ServiceController struct { // (like load balancers) in sync with the registry. func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) *ServiceController { broadcaster := record.NewBroadcaster() - broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{kubeClient.Core().Events("")}) + broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"}) + if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) + } + return &ServiceController{ cloud: cloud, kubeClient: kubeClient, @@ -233,7 +237,7 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, time.Durati namespacedName.Name = deltaService.Name cachedService = s.cache.getOrCreate(namespacedName.String()) } - glog.V(2).Infof("Got new %s delta for service: %+v", delta.Type, deltaService) + glog.V(2).Infof("Got new %s delta for service: %v", delta.Type, namespacedName) // Ensure that no other goroutine will interfere with our processing of the // service. @@ -250,8 +254,8 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, time.Durati return err, cachedService.nextRetryDelay() } else if errors.IsNotFound(err) { glog.V(2).Infof("Service %v not found, ensuring load balancer is deleted", namespacedName) - s.eventRecorder.Event(service, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer") - err := s.balancer.EnsureLoadBalancerDeleted(s.loadBalancerName(deltaService), s.zone.Region) + s.eventRecorder.Event(deltaService, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer") + err := s.balancer.EnsureLoadBalancerDeleted(deltaService) if err != nil { message := "Error deleting load balancer (will retry): " + err.Error() s.eventRecorder.Event(deltaService, api.EventTypeWarning, "DeletingLoadBalancerFailed", message) @@ -315,7 +319,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name // If we don't have any cached memory of the load balancer, we have to ask // the cloud provider for what it knows about it. // Technically EnsureLoadBalancerDeleted can cope, but we want to post meaningful events - _, exists, err := s.balancer.GetLoadBalancer(s.loadBalancerName(service), s.zone.Region) + _, exists, err := s.balancer.GetLoadBalancer(service) if err != nil { return fmt.Errorf("Error getting LB for service %s: %v", namespacedName, err), retryable } @@ -327,7 +331,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name if needDelete { glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", namespacedName) s.eventRecorder.Event(service, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer") - if err := s.balancer.EnsureLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region); err != nil { + if err := s.balancer.EnsureLoadBalancerDeleted(service); err != nil { return err, retryable } s.eventRecorder.Event(service, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer") @@ -341,8 +345,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name // The load balancer doesn't exist yet, so create it. s.eventRecorder.Event(service, api.EventTypeNormal, "CreatingLoadBalancer", "Creating load balancer") - - err := s.createLoadBalancer(service, namespacedName) + err := s.createLoadBalancer(service) if err != nil { return fmt.Errorf("Failed to create load balancer for service %s: %v", namespacedName, err), retryable } @@ -392,21 +395,16 @@ func (s *ServiceController) persistUpdate(service *api.Service) error { return err } -func (s *ServiceController) createLoadBalancer(service *api.Service, serviceName types.NamespacedName) error { - ports, err := getPortsForLB(service) - if err != nil { - return err - } +func (s *ServiceController) createLoadBalancer(service *api.Service) error { nodes, err := s.nodeLister.List() if err != nil { return err } - name := s.loadBalancerName(service) + // - Only one protocol supported per service // - Not all cloud providers support all protocols and the next step is expected to return // an error for unsupported protocols - status, err := s.balancer.EnsureLoadBalancer(name, s.zone.Region, net.ParseIP(service.Spec.LoadBalancerIP), - ports, hostsFromNodeList(&nodes), serviceName, service.Spec.SessionAffinity, service.ObjectMeta.Annotations) + status, err := s.balancer.EnsureLoadBalancer(service, hostsFromNodeList(&nodes)) if err != nil { return err } else { @@ -727,16 +725,15 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service, } // This operation doesn't normally take very long (and happens pretty often), so we only record the final event - name := cloudprovider.GetLoadBalancerName(service) - err := s.balancer.UpdateLoadBalancer(name, s.zone.Region, hosts) + err := s.balancer.UpdateLoadBalancer(service, hosts) if err == nil { s.eventRecorder.Event(service, api.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts") return nil } // It's only an actual error if the load balancer still exists. - if _, exists, err := s.balancer.GetLoadBalancer(name, s.zone.Region); err != nil { - glog.Errorf("External error while checking if load balancer %q exists: name, %v", name, err) + if _, exists, err := s.balancer.GetLoadBalancer(service); err != nil { + glog.Errorf("External error while checking if load balancer %q exists: name, %v", cloudprovider.GetLoadBalancerName(service), err) } else if !exists { return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/service/servicecontroller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/service/servicecontroller_test.go new file mode 100644 index 000000000000..8f58de10ae8b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/service/servicecontroller_test.go @@ -0,0 +1,329 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" + "k8s.io/kubernetes/pkg/types" +) + +const region = "us-central" + +func newService(name string, uid types.UID, serviceType api.ServiceType) *api.Service { + return &api.Service{ObjectMeta: api.ObjectMeta{Name: name, Namespace: "namespace", UID: uid}, Spec: api.ServiceSpec{Type: serviceType}} +} + +func TestCreateExternalLoadBalancer(t *testing.T) { + table := []struct { + service *api.Service + expectErr bool + expectCreateAttempt bool + }{ + { + service: &api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "no-external-balancer", + Namespace: "default", + }, + Spec: api.ServiceSpec{ + Type: api.ServiceTypeClusterIP, + }, + }, + expectErr: false, + expectCreateAttempt: false, + }, + { + service: &api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "udp-service", + Namespace: "default", + }, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{{ + Port: 80, + Protocol: api.ProtocolUDP, + }}, + Type: api.ServiceTypeLoadBalancer, + }, + }, + expectErr: false, + expectCreateAttempt: true, + }, + { + service: &api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "basic-service1", + Namespace: "default", + }, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{{ + Port: 80, + Protocol: api.ProtocolTCP, + }}, + Type: api.ServiceTypeLoadBalancer, + }, + }, + expectErr: false, + expectCreateAttempt: true, + }, + } + + for _, item := range table { + cloud := &fakecloud.FakeCloud{} + cloud.Region = region + client := &fake.Clientset{} + controller := New(cloud, client, "test-cluster") + controller.init() + cloud.Calls = nil // ignore any cloud calls made in init() + client.ClearActions() // ignore any client calls made in init() + err, _ := controller.createLoadBalancerIfNeeded(types.NamespacedName{Namespace: "foo", Name: "bar"}, item.service, nil) + if !item.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + } else if item.expectErr && err == nil { + t.Errorf("expected error creating %v, got nil", item.service) + } + actions := client.Actions() + if !item.expectCreateAttempt { + if len(cloud.Calls) > 0 { + t.Errorf("unexpected cloud provider calls: %v", cloud.Calls) + } + if len(actions) > 0 { + t.Errorf("unexpected client actions: %v", actions) + } + } else { + var balancer *fakecloud.FakeBalancer + for k := range cloud.Balancers { + if balancer == nil { + b := cloud.Balancers[k] + balancer = &b + } else { + t.Errorf("expected one load balancer to be created, got %v", cloud.Balancers) + break + } + } + if balancer == nil { + t.Errorf("expected one load balancer to be created, got none") + } else if balancer.Name != controller.loadBalancerName(item.service) || + balancer.Region != region || + balancer.Ports[0].Port != item.service.Spec.Ports[0].Port { + t.Errorf("created load balancer has incorrect parameters: %v", balancer) + } + actionFound := false + for _, action := range actions { + if action.GetVerb() == "update" && action.GetResource().Resource == "services" { + actionFound = true + } + } + if !actionFound { + t.Errorf("expected updated service to be sent to client, got these actions instead: %v", actions) + } + } + } +} + +// TODO: Finish converting and update comments +func TestUpdateNodesInExternalLoadBalancer(t *testing.T) { + hosts := []string{"node0", "node1", "node73"} + table := []struct { + services []*api.Service + expectedUpdateCalls []fakecloud.FakeUpdateBalancerCall + }{ + { + // No services present: no calls should be made. + services: []*api.Service{}, + expectedUpdateCalls: nil, + }, + { + // Services do not have external load balancers: no calls should be made. + services: []*api.Service{ + newService("s0", "111", api.ServiceTypeClusterIP), + newService("s1", "222", api.ServiceTypeNodePort), + }, + expectedUpdateCalls: nil, + }, + { + // Services does have an external load balancer: one call should be made. + services: []*api.Service{ + newService("s0", "333", api.ServiceTypeLoadBalancer), + }, + expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{ + {newService("s0", "333", api.ServiceTypeLoadBalancer), hosts}, + }, + }, + { + // Three services have an external load balancer: three calls. + services: []*api.Service{ + newService("s0", "444", api.ServiceTypeLoadBalancer), + newService("s1", "555", api.ServiceTypeLoadBalancer), + newService("s2", "666", api.ServiceTypeLoadBalancer), + }, + expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{ + {newService("s0", "444", api.ServiceTypeLoadBalancer), hosts}, + {newService("s1", "555", api.ServiceTypeLoadBalancer), hosts}, + {newService("s2", "666", api.ServiceTypeLoadBalancer), hosts}, + }, + }, + { + // Two services have an external load balancer and two don't: two calls. + services: []*api.Service{ + newService("s0", "777", api.ServiceTypeNodePort), + newService("s1", "888", api.ServiceTypeLoadBalancer), + newService("s3", "999", api.ServiceTypeLoadBalancer), + newService("s4", "123", api.ServiceTypeClusterIP), + }, + expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{ + {newService("s1", "888", api.ServiceTypeLoadBalancer), hosts}, + {newService("s3", "999", api.ServiceTypeLoadBalancer), hosts}, + }, + }, + { + // One service has an external load balancer and one is nil: one call. + services: []*api.Service{ + newService("s0", "234", api.ServiceTypeLoadBalancer), + nil, + }, + expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{ + {newService("s0", "234", api.ServiceTypeLoadBalancer), hosts}, + }, + }, + } + for _, item := range table { + cloud := &fakecloud.FakeCloud{} + + cloud.Region = region + client := &fake.Clientset{} + controller := New(cloud, client, "test-cluster2") + controller.init() + cloud.Calls = nil // ignore any cloud calls made in init() + + var services []*cachedService + for _, service := range item.services { + services = append(services, &cachedService{lastState: service, appliedState: service}) + } + if err := controller.updateLoadBalancerHosts(services, hosts); err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(item.expectedUpdateCalls, cloud.UpdateCalls) { + t.Errorf("expected update calls mismatch, expected %+v, got %+v", item.expectedUpdateCalls, cloud.UpdateCalls) + } + } +} + +func TestHostsFromNodeList(t *testing.T) { + tests := []struct { + nodes *api.NodeList + expectedHosts []string + }{ + { + nodes: &api.NodeList{}, + expectedHosts: []string{}, + }, + { + nodes: &api.NodeList{ + Items: []api.Node{ + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Status: api.NodeStatus{Phase: api.NodeRunning}, + }, + { + ObjectMeta: api.ObjectMeta{Name: "bar"}, + Status: api.NodeStatus{Phase: api.NodeRunning}, + }, + }, + }, + expectedHosts: []string{"foo", "bar"}, + }, + { + nodes: &api.NodeList{ + Items: []api.Node{ + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Status: api.NodeStatus{Phase: api.NodeRunning}, + }, + { + ObjectMeta: api.ObjectMeta{Name: "bar"}, + Status: api.NodeStatus{Phase: api.NodeRunning}, + }, + { + ObjectMeta: api.ObjectMeta{Name: "unschedulable"}, + Spec: api.NodeSpec{Unschedulable: true}, + Status: api.NodeStatus{Phase: api.NodeRunning}, + }, + }, + }, + expectedHosts: []string{"foo", "bar"}, + }, + } + + for _, test := range tests { + hosts := hostsFromNodeList(test.nodes) + if !reflect.DeepEqual(hosts, test.expectedHosts) { + t.Errorf("expected: %v, saw: %v", test.expectedHosts, hosts) + } + } +} + +func TestGetNodeConditionPredicate(t *testing.T) { + tests := []struct { + node api.Node + expectAccept bool + name string + }{ + { + node: api.Node{}, + expectAccept: false, + name: "empty", + }, + { + node: api.Node{ + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + {Type: api.NodeReady, Status: api.ConditionTrue}, + }, + }, + }, + expectAccept: true, + name: "basic", + }, + { + node: api.Node{ + Spec: api.NodeSpec{Unschedulable: true}, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + {Type: api.NodeReady, Status: api.ConditionTrue}, + }, + }, + }, + expectAccept: false, + name: "unschedulable", + }, + } + pred := getNodeConditionPredicate() + for _, test := range tests { + accept := pred(test.node) + if accept != test.expectAccept { + t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectAccept, accept) + } + } +} + +// TODO(a-robinson): Add tests for update/sync/delete. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go index dd3853127917..084ddae500a0 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/watch" ) @@ -71,7 +72,9 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount client: cl, serviceAccountsToEnsure: options.ServiceAccounts, } - + if cl != nil && cl.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().GetRESTClient().GetRateLimiter()) + } accountSelector := fields.Everything() if len(options.ServiceAccounts) == 1 { // If we're maintaining a single account, we can scope the accounts we watch to just that name diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller_test.go new file mode 100644 index 000000000000..e55ffeb51311 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller_test.go @@ -0,0 +1,197 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/util/sets" +) + +type serverResponse struct { + statusCode int + obj interface{} +} + +func TestServiceAccountCreation(t *testing.T) { + ns := api.NamespaceDefault + + defaultName := "default" + managedName := "managed" + + activeNS := &api.Namespace{ + ObjectMeta: api.ObjectMeta{Name: ns}, + Status: api.NamespaceStatus{ + Phase: api.NamespaceActive, + }, + } + terminatingNS := &api.Namespace{ + ObjectMeta: api.ObjectMeta{Name: ns}, + Status: api.NamespaceStatus{ + Phase: api.NamespaceTerminating, + }, + } + defaultServiceAccount := &api.ServiceAccount{ + ObjectMeta: api.ObjectMeta{ + Name: defaultName, + Namespace: ns, + ResourceVersion: "1", + }, + } + managedServiceAccount := &api.ServiceAccount{ + ObjectMeta: api.ObjectMeta{ + Name: managedName, + Namespace: ns, + ResourceVersion: "1", + }, + } + unmanagedServiceAccount := &api.ServiceAccount{ + ObjectMeta: api.ObjectMeta{ + Name: "other-unmanaged", + Namespace: ns, + ResourceVersion: "1", + }, + } + + testcases := map[string]struct { + ExistingNamespace *api.Namespace + ExistingServiceAccounts []*api.ServiceAccount + + AddedNamespace *api.Namespace + UpdatedNamespace *api.Namespace + DeletedServiceAccount *api.ServiceAccount + + ExpectCreatedServiceAccounts []string + }{ + "new active namespace missing serviceaccounts": { + ExistingServiceAccounts: []*api.ServiceAccount{}, + AddedNamespace: activeNS, + ExpectCreatedServiceAccounts: sets.NewString(defaultName, managedName).List(), + }, + "new active namespace missing serviceaccount": { + ExistingServiceAccounts: []*api.ServiceAccount{managedServiceAccount}, + AddedNamespace: activeNS, + ExpectCreatedServiceAccounts: []string{defaultName}, + }, + "new active namespace with serviceaccounts": { + ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount, managedServiceAccount}, + AddedNamespace: activeNS, + ExpectCreatedServiceAccounts: []string{}, + }, + + "new terminating namespace": { + ExistingServiceAccounts: []*api.ServiceAccount{}, + AddedNamespace: terminatingNS, + ExpectCreatedServiceAccounts: []string{}, + }, + + "updated active namespace missing serviceaccounts": { + ExistingServiceAccounts: []*api.ServiceAccount{}, + UpdatedNamespace: activeNS, + ExpectCreatedServiceAccounts: sets.NewString(defaultName, managedName).List(), + }, + "updated active namespace missing serviceaccount": { + ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount}, + UpdatedNamespace: activeNS, + ExpectCreatedServiceAccounts: []string{managedName}, + }, + "updated active namespace with serviceaccounts": { + ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount, managedServiceAccount}, + UpdatedNamespace: activeNS, + ExpectCreatedServiceAccounts: []string{}, + }, + "updated terminating namespace": { + ExistingServiceAccounts: []*api.ServiceAccount{}, + UpdatedNamespace: terminatingNS, + ExpectCreatedServiceAccounts: []string{}, + }, + + "deleted serviceaccount without namespace": { + DeletedServiceAccount: defaultServiceAccount, + ExpectCreatedServiceAccounts: []string{}, + }, + "deleted serviceaccount with active namespace": { + ExistingNamespace: activeNS, + DeletedServiceAccount: defaultServiceAccount, + ExpectCreatedServiceAccounts: []string{defaultName}, + }, + "deleted serviceaccount with terminating namespace": { + ExistingNamespace: terminatingNS, + DeletedServiceAccount: defaultServiceAccount, + ExpectCreatedServiceAccounts: []string{}, + }, + "deleted unmanaged serviceaccount with active namespace": { + ExistingNamespace: activeNS, + DeletedServiceAccount: unmanagedServiceAccount, + ExpectCreatedServiceAccounts: []string{}, + }, + "deleted unmanaged serviceaccount with terminating namespace": { + ExistingNamespace: terminatingNS, + DeletedServiceAccount: unmanagedServiceAccount, + ExpectCreatedServiceAccounts: []string{}, + }, + } + + for k, tc := range testcases { + client := fake.NewSimpleClientset(defaultServiceAccount, managedServiceAccount) + options := DefaultServiceAccountsControllerOptions() + options.ServiceAccounts = []api.ServiceAccount{ + {ObjectMeta: api.ObjectMeta{Name: defaultName}}, + {ObjectMeta: api.ObjectMeta{Name: managedName}}, + } + controller := NewServiceAccountsController(client, options) + + if tc.ExistingNamespace != nil { + controller.namespaces.Add(tc.ExistingNamespace) + } + for _, s := range tc.ExistingServiceAccounts { + controller.serviceAccounts.Add(s) + } + + if tc.AddedNamespace != nil { + controller.namespaces.Add(tc.AddedNamespace) + controller.namespaceAdded(tc.AddedNamespace) + } + if tc.UpdatedNamespace != nil { + controller.namespaces.Add(tc.UpdatedNamespace) + controller.namespaceUpdated(nil, tc.UpdatedNamespace) + } + if tc.DeletedServiceAccount != nil { + controller.serviceAccountDeleted(tc.DeletedServiceAccount) + } + + actions := client.Actions() + if len(tc.ExpectCreatedServiceAccounts) != len(actions) { + t.Errorf("%s: Expected to create accounts %#v. Actual actions were: %#v", k, tc.ExpectCreatedServiceAccounts, actions) + continue + } + for i, expectedName := range tc.ExpectCreatedServiceAccounts { + action := actions[i] + if !action.Matches("create", "serviceaccounts") { + t.Errorf("%s: Unexpected action %s", k, action) + break + } + createdAccount := action.(core.CreateAction).GetObject().(*api.ServiceAccount) + if createdAccount.Name != expectedName { + t.Errorf("%s: Expected %s to be created, got %s", k, expectedName, createdAccount.Name) + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/tokens_controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/tokens_controller.go index 168941493f4a..b660afc963de 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/tokens_controller.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/tokens_controller.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/registry/secret" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/serviceaccount" + "k8s.io/kubernetes/pkg/util/metrics" utilruntime "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" @@ -68,7 +69,9 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions token: options.TokenGenerator, rootCA: options.RootCA, } - + if cl != nil && cl.Core().GetRESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().GetRESTClient().GetRateLimiter()) + } e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/tokens_controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/tokens_controller_test.go new file mode 100644 index 000000000000..4f2ff6fb53c7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/serviceaccount/tokens_controller_test.go @@ -0,0 +1,562 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/runtime" + utilrand "k8s.io/kubernetes/pkg/util/rand" +) + +type testGenerator struct { + GeneratedServiceAccounts []api.ServiceAccount + GeneratedSecrets []api.Secret + Token string + Err error +} + +func (t *testGenerator) GenerateToken(serviceAccount api.ServiceAccount, secret api.Secret) (string, error) { + t.GeneratedSecrets = append(t.GeneratedSecrets, secret) + t.GeneratedServiceAccounts = append(t.GeneratedServiceAccounts, serviceAccount) + return t.Token, t.Err +} + +// emptySecretReferences is used by a service account without any secrets +func emptySecretReferences() []api.ObjectReference { + return []api.ObjectReference{} +} + +// missingSecretReferences is used by a service account that references secrets which do no exist +func missingSecretReferences() []api.ObjectReference { + return []api.ObjectReference{{Name: "missing-secret-1"}} +} + +// regularSecretReferences is used by a service account that references secrets which are not ServiceAccountTokens +func regularSecretReferences() []api.ObjectReference { + return []api.ObjectReference{{Name: "regular-secret-1"}} +} + +// tokenSecretReferences is used by a service account that references a ServiceAccountToken secret +func tokenSecretReferences() []api.ObjectReference { + return []api.ObjectReference{{Name: "token-secret-1"}} +} + +// addTokenSecretReference adds a reference to the ServiceAccountToken that will be created +func addTokenSecretReference(refs []api.ObjectReference) []api.ObjectReference { + return append(refs, api.ObjectReference{Name: "default-token-fplln"}) +} + +// serviceAccount returns a service account with the given secret refs +func serviceAccount(secretRefs []api.ObjectReference) *api.ServiceAccount { + return &api.ServiceAccount{ + ObjectMeta: api.ObjectMeta{ + Name: "default", + UID: "12345", + Namespace: "default", + ResourceVersion: "1", + }, + Secrets: secretRefs, + } +} + +// updatedServiceAccount returns a service account with the resource version modified +func updatedServiceAccount(secretRefs []api.ObjectReference) *api.ServiceAccount { + sa := serviceAccount(secretRefs) + sa.ResourceVersion = "2" + return sa +} + +// opaqueSecret returns a persisted non-ServiceAccountToken secret named "regular-secret-1" +func opaqueSecret() *api.Secret { + return &api.Secret{ + ObjectMeta: api.ObjectMeta{ + Name: "regular-secret-1", + Namespace: "default", + UID: "23456", + ResourceVersion: "1", + }, + Type: "Opaque", + Data: map[string][]byte{ + "mykey": []byte("mydata"), + }, + } +} + +// createdTokenSecret returns the ServiceAccountToken secret posted when creating a new token secret. +// Named "default-token-fplln", since that is the first generated name after rand.Seed(1) +func createdTokenSecret() *api.Secret { + return &api.Secret{ + ObjectMeta: api.ObjectMeta{ + Name: "default-token-fplln", + Namespace: "default", + Annotations: map[string]string{ + api.ServiceAccountNameKey: "default", + api.ServiceAccountUIDKey: "12345", + }, + }, + Type: api.SecretTypeServiceAccountToken, + Data: map[string][]byte{ + "token": []byte("ABC"), + "ca.crt": []byte("CA Data"), + "namespace": []byte("default"), + }, + } +} + +// serviceAccountTokenSecret returns an existing ServiceAccountToken secret named "token-secret-1" +func serviceAccountTokenSecret() *api.Secret { + return &api.Secret{ + ObjectMeta: api.ObjectMeta{ + Name: "token-secret-1", + Namespace: "default", + UID: "23456", + ResourceVersion: "1", + Annotations: map[string]string{ + api.ServiceAccountNameKey: "default", + api.ServiceAccountUIDKey: "12345", + }, + }, + Type: api.SecretTypeServiceAccountToken, + Data: map[string][]byte{ + "token": []byte("ABC"), + "ca.crt": []byte("CA Data"), + "namespace": []byte("default"), + }, + } +} + +// serviceAccountTokenSecretWithoutTokenData returns an existing ServiceAccountToken secret that lacks token data +func serviceAccountTokenSecretWithoutTokenData() *api.Secret { + secret := serviceAccountTokenSecret() + delete(secret.Data, api.ServiceAccountTokenKey) + return secret +} + +// serviceAccountTokenSecretWithoutCAData returns an existing ServiceAccountToken secret that lacks ca data +func serviceAccountTokenSecretWithoutCAData() *api.Secret { + secret := serviceAccountTokenSecret() + delete(secret.Data, api.ServiceAccountRootCAKey) + return secret +} + +// serviceAccountTokenSecretWithCAData returns an existing ServiceAccountToken secret with the specified ca data +func serviceAccountTokenSecretWithCAData(data []byte) *api.Secret { + secret := serviceAccountTokenSecret() + secret.Data[api.ServiceAccountRootCAKey] = data + return secret +} + +// serviceAccountTokenSecretWithoutNamespaceData returns an existing ServiceAccountToken secret that lacks namespace data +func serviceAccountTokenSecretWithoutNamespaceData() *api.Secret { + secret := serviceAccountTokenSecret() + delete(secret.Data, api.ServiceAccountNamespaceKey) + return secret +} + +// serviceAccountTokenSecretWithNamespaceData returns an existing ServiceAccountToken secret with the specified namespace data +func serviceAccountTokenSecretWithNamespaceData(data []byte) *api.Secret { + secret := serviceAccountTokenSecret() + secret.Data[api.ServiceAccountNamespaceKey] = data + return secret +} + +func TestTokenCreation(t *testing.T) { + testcases := map[string]struct { + ClientObjects []runtime.Object + + SecretsSyncPending bool + ServiceAccountsSyncPending bool + + ExistingServiceAccount *api.ServiceAccount + ExistingSecrets []*api.Secret + + AddedServiceAccount *api.ServiceAccount + UpdatedServiceAccount *api.ServiceAccount + DeletedServiceAccount *api.ServiceAccount + AddedSecret *api.Secret + UpdatedSecret *api.Secret + DeletedSecret *api.Secret + + ExpectedActions []core.Action + }{ + "new serviceaccount with no secrets": { + ClientObjects: []runtime.Object{serviceAccount(emptySecretReferences()), createdTokenSecret()}, + + AddedServiceAccount: serviceAccount(emptySecretReferences()), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))), + }, + }, + "new serviceaccount with no secrets with unsynced secret store": { + ClientObjects: []runtime.Object{serviceAccount(emptySecretReferences()), createdTokenSecret()}, + + SecretsSyncPending: true, + + AddedServiceAccount: serviceAccount(emptySecretReferences()), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))), + }, + }, + "new serviceaccount with missing secrets": { + ClientObjects: []runtime.Object{serviceAccount(missingSecretReferences()), createdTokenSecret()}, + + AddedServiceAccount: serviceAccount(missingSecretReferences()), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))), + }, + }, + "new serviceaccount with missing secrets with unsynced secret store": { + ClientObjects: []runtime.Object{serviceAccount(missingSecretReferences()), createdTokenSecret()}, + + SecretsSyncPending: true, + + AddedServiceAccount: serviceAccount(missingSecretReferences()), + ExpectedActions: []core.Action{}, + }, + "new serviceaccount with non-token secrets": { + ClientObjects: []runtime.Object{serviceAccount(regularSecretReferences()), createdTokenSecret(), opaqueSecret()}, + + AddedServiceAccount: serviceAccount(regularSecretReferences()), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))), + }, + }, + "new serviceaccount with token secrets": { + ClientObjects: []runtime.Object{serviceAccount(tokenSecretReferences()), serviceAccountTokenSecret()}, + ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()}, + + AddedServiceAccount: serviceAccount(tokenSecretReferences()), + ExpectedActions: []core.Action{}, + }, + "new serviceaccount with no secrets with resource conflict": { + ClientObjects: []runtime.Object{updatedServiceAccount(emptySecretReferences()), createdTokenSecret()}, + + AddedServiceAccount: serviceAccount(emptySecretReferences()), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + }, + }, + + "updated serviceaccount with no secrets": { + ClientObjects: []runtime.Object{serviceAccount(emptySecretReferences()), createdTokenSecret()}, + + UpdatedServiceAccount: serviceAccount(emptySecretReferences()), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))), + }, + }, + "updated serviceaccount with no secrets with unsynced secret store": { + ClientObjects: []runtime.Object{serviceAccount(emptySecretReferences()), createdTokenSecret()}, + + SecretsSyncPending: true, + + UpdatedServiceAccount: serviceAccount(emptySecretReferences()), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))), + }, + }, + "updated serviceaccount with missing secrets": { + ClientObjects: []runtime.Object{serviceAccount(missingSecretReferences()), createdTokenSecret()}, + + UpdatedServiceAccount: serviceAccount(missingSecretReferences()), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))), + }, + }, + "updated serviceaccount with missing secrets with unsynced secret store": { + ClientObjects: []runtime.Object{serviceAccount(missingSecretReferences()), createdTokenSecret()}, + + SecretsSyncPending: true, + + UpdatedServiceAccount: serviceAccount(missingSecretReferences()), + ExpectedActions: []core.Action{}, + }, + "updated serviceaccount with non-token secrets": { + ClientObjects: []runtime.Object{serviceAccount(regularSecretReferences()), createdTokenSecret(), opaqueSecret()}, + + UpdatedServiceAccount: serviceAccount(regularSecretReferences()), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))), + }, + }, + "updated serviceaccount with token secrets": { + ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()}, + + UpdatedServiceAccount: serviceAccount(tokenSecretReferences()), + ExpectedActions: []core.Action{}, + }, + "updated serviceaccount with no secrets with resource conflict": { + ClientObjects: []runtime.Object{updatedServiceAccount(emptySecretReferences()), createdTokenSecret()}, + + UpdatedServiceAccount: serviceAccount(emptySecretReferences()), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + }, + }, + + "deleted serviceaccount with no secrets": { + DeletedServiceAccount: serviceAccount(emptySecretReferences()), + ExpectedActions: []core.Action{}, + }, + "deleted serviceaccount with missing secrets": { + DeletedServiceAccount: serviceAccount(missingSecretReferences()), + ExpectedActions: []core.Action{}, + }, + "deleted serviceaccount with non-token secrets": { + ClientObjects: []runtime.Object{opaqueSecret()}, + + DeletedServiceAccount: serviceAccount(regularSecretReferences()), + ExpectedActions: []core.Action{}, + }, + "deleted serviceaccount with token secrets": { + ClientObjects: []runtime.Object{serviceAccountTokenSecret()}, + ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()}, + + DeletedServiceAccount: serviceAccount(tokenSecretReferences()), + ExpectedActions: []core.Action{ + core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), + }, + }, + + "added secret without serviceaccount": { + ClientObjects: []runtime.Object{serviceAccountTokenSecret()}, + + AddedSecret: serviceAccountTokenSecret(), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), + }, + }, + "added secret with serviceaccount": { + ExistingServiceAccount: serviceAccount(tokenSecretReferences()), + + AddedSecret: serviceAccountTokenSecret(), + ExpectedActions: []core.Action{}, + }, + "added token secret without token data": { + ClientObjects: []runtime.Object{serviceAccountTokenSecretWithoutTokenData()}, + ExistingServiceAccount: serviceAccount(tokenSecretReferences()), + + AddedSecret: serviceAccountTokenSecretWithoutTokenData(), + ExpectedActions: []core.Action{ + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), + }, + }, + "added token secret without ca data": { + ClientObjects: []runtime.Object{serviceAccountTokenSecretWithoutCAData()}, + ExistingServiceAccount: serviceAccount(tokenSecretReferences()), + + AddedSecret: serviceAccountTokenSecretWithoutCAData(), + ExpectedActions: []core.Action{ + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), + }, + }, + "added token secret with mismatched ca data": { + ClientObjects: []runtime.Object{serviceAccountTokenSecretWithCAData([]byte("mismatched"))}, + ExistingServiceAccount: serviceAccount(tokenSecretReferences()), + + AddedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")), + ExpectedActions: []core.Action{ + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), + }, + }, + "added token secret without namespace data": { + ClientObjects: []runtime.Object{serviceAccountTokenSecretWithoutNamespaceData()}, + ExistingServiceAccount: serviceAccount(tokenSecretReferences()), + + AddedSecret: serviceAccountTokenSecretWithoutNamespaceData(), + ExpectedActions: []core.Action{ + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), + }, + }, + "added token secret with custom namespace data": { + ClientObjects: []runtime.Object{serviceAccountTokenSecretWithNamespaceData([]byte("custom"))}, + ExistingServiceAccount: serviceAccount(tokenSecretReferences()), + + AddedSecret: serviceAccountTokenSecretWithNamespaceData([]byte("custom")), + ExpectedActions: []core.Action{ + // no update is performed... the custom namespace is preserved + }, + }, + + "updated secret without serviceaccount": { + ClientObjects: []runtime.Object{serviceAccountTokenSecret()}, + + UpdatedSecret: serviceAccountTokenSecret(), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), + }, + }, + "updated secret with serviceaccount": { + ExistingServiceAccount: serviceAccount(tokenSecretReferences()), + + UpdatedSecret: serviceAccountTokenSecret(), + ExpectedActions: []core.Action{}, + }, + "updated token secret without token data": { + ClientObjects: []runtime.Object{serviceAccountTokenSecretWithoutTokenData()}, + ExistingServiceAccount: serviceAccount(tokenSecretReferences()), + + UpdatedSecret: serviceAccountTokenSecretWithoutTokenData(), + ExpectedActions: []core.Action{ + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), + }, + }, + "updated token secret without ca data": { + ClientObjects: []runtime.Object{serviceAccountTokenSecretWithoutCAData()}, + ExistingServiceAccount: serviceAccount(tokenSecretReferences()), + + UpdatedSecret: serviceAccountTokenSecretWithoutCAData(), + ExpectedActions: []core.Action{ + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), + }, + }, + "updated token secret with mismatched ca data": { + ClientObjects: []runtime.Object{serviceAccountTokenSecretWithCAData([]byte("mismatched"))}, + ExistingServiceAccount: serviceAccount(tokenSecretReferences()), + + UpdatedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")), + ExpectedActions: []core.Action{ + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), + }, + }, + "updated token secret without namespace data": { + ClientObjects: []runtime.Object{serviceAccountTokenSecretWithoutNamespaceData()}, + ExistingServiceAccount: serviceAccount(tokenSecretReferences()), + + UpdatedSecret: serviceAccountTokenSecretWithoutNamespaceData(), + ExpectedActions: []core.Action{ + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), + }, + }, + "updated token secret with custom namespace data": { + ClientObjects: []runtime.Object{serviceAccountTokenSecretWithNamespaceData([]byte("custom"))}, + ExistingServiceAccount: serviceAccount(tokenSecretReferences()), + + UpdatedSecret: serviceAccountTokenSecretWithNamespaceData([]byte("custom")), + ExpectedActions: []core.Action{ + // no update is performed... the custom namespace is preserved + }, + }, + + "deleted secret without serviceaccount": { + DeletedSecret: serviceAccountTokenSecret(), + ExpectedActions: []core.Action{}, + }, + "deleted secret with serviceaccount with reference": { + ClientObjects: []runtime.Object{serviceAccount(tokenSecretReferences())}, + ExistingServiceAccount: serviceAccount(tokenSecretReferences()), + + DeletedSecret: serviceAccountTokenSecret(), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(emptySecretReferences())), + }, + }, + "deleted secret with serviceaccount without reference": { + ExistingServiceAccount: serviceAccount(emptySecretReferences()), + + DeletedSecret: serviceAccountTokenSecret(), + ExpectedActions: []core.Action{ + core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), + }, + }, + } + + for k, tc := range testcases { + + // Re-seed to reset name generation + utilrand.Seed(1) + + generator := &testGenerator{Token: "ABC"} + + client := fake.NewSimpleClientset(tc.ClientObjects...) + + controller := NewTokensController(client, TokensControllerOptions{TokenGenerator: generator, RootCA: []byte("CA Data")}) + + // Tell the token controller whether its stores have been synced + controller.serviceAccountsSynced = func() bool { return !tc.ServiceAccountsSyncPending } + controller.secretsSynced = func() bool { return !tc.SecretsSyncPending } + + if tc.ExistingServiceAccount != nil { + controller.serviceAccounts.Add(tc.ExistingServiceAccount) + } + for _, s := range tc.ExistingSecrets { + controller.secrets.Add(s) + } + + if tc.AddedServiceAccount != nil { + controller.serviceAccountAdded(tc.AddedServiceAccount) + } + if tc.UpdatedServiceAccount != nil { + controller.serviceAccountUpdated(nil, tc.UpdatedServiceAccount) + } + if tc.DeletedServiceAccount != nil { + controller.serviceAccountDeleted(tc.DeletedServiceAccount) + } + if tc.AddedSecret != nil { + controller.secretAdded(tc.AddedSecret) + } + if tc.UpdatedSecret != nil { + controller.secretUpdated(nil, tc.UpdatedSecret) + } + if tc.DeletedSecret != nil { + controller.secretDeleted(tc.DeletedSecret) + } + + actions := client.Actions() + for i, action := range actions { + if len(tc.ExpectedActions) < i+1 { + t.Errorf("%s: %d unexpected actions: %+v", k, len(actions)-len(tc.ExpectedActions), actions[i:]) + break + } + + expectedAction := tc.ExpectedActions[i] + if !reflect.DeepEqual(expectedAction, action) { + t.Errorf("%s: Expected\n\t%#v\ngot\n\t%#v", k, expectedAction, action) + continue + } + } + + if len(tc.ExpectedActions) > len(actions) { + t.Errorf("%s: %d additional expected actions:%+v", k, len(tc.ExpectedActions)-len(actions), tc.ExpectedActions[len(actions):]) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/attach_detach_controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/attach_detach_controller.go new file mode 100644 index 000000000000..234ac8044c95 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/attach_detach_controller.go @@ -0,0 +1,532 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package volume implements a controller to manage volume attach and detach +// operations. +package volume + +import ( + "fmt" + "strings" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/controller/volume/attacherdetacher" + "k8s.io/kubernetes/pkg/controller/volume/cache" + "k8s.io/kubernetes/pkg/controller/volume/reconciler" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/io" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/volume" +) + +const ( + // ControllerManagedAnnotation is the key of the annotation on Node objects + // that indicates attach/detach operations for the node should be managed + // by the attach/detach controller + ControllerManagedAnnotation string = "volumes.kubernetes.io/controller-managed-attach" + + // SafeToDetachAnnotation is the annotation added to the Node object by + // kubelet in the format "volumes.kubernetes.io/safetodetach/{volumename}" + // to indicate the volume has been unmounted and is safe to detach. + SafeToDetachAnnotation string = "volumes.kubernetes.io/safetodetach-" + + // loopPeriod is the ammount of time the reconciler loop waits between + // successive executions + reconcilerLoopPeriod time.Duration = 100 * time.Millisecond + + // reconcilerMaxSafeToDetachDuration is the maximum amount of time the + // attach detach controller will wait for a volume to be safely detached + // from its node. Once this time has expired, the controller will assume the + // node or kubelet are unresponsive and will detach the volume anyway. + reconcilerMaxSafeToDetachDuration time.Duration = 10 * time.Minute +) + +// AttachDetachController defines the operations supported by this controller. +type AttachDetachController interface { + Run(stopCh <-chan struct{}) +} + +// NewAttachDetachController returns a new instance of AttachDetachController. +func NewAttachDetachController( + kubeClient internalclientset.Interface, + podInformer framework.SharedInformer, + nodeInformer framework.SharedInformer, + pvcInformer framework.SharedInformer, + pvInformer framework.SharedInformer, + cloud cloudprovider.Interface, + plugins []volume.VolumePlugin) (AttachDetachController, error) { + // TODO: The default resyncPeriod for shared informers is 12 hours, this is + // unacceptable for the attach/detach controller. For example, if a pod is + // skipped because the node it is scheduled to didn't set its annotation in + // time, we don't want to have to wait 12hrs before processing the pod + // again. + // Luckily https://github.com/kubernetes/kubernetes/issues/23394 is being + // worked on and will split resync in to resync and relist. Once that + // happens the resync period can be set to something much faster (30 + // seconds). + // If that issue is not resolved in time, then this controller will have to + // consider some unappealing alternate options: use a non-shared informer + // and set a faster resync period even if it causes relist, or requeue + // dropped pods so they are continuously processed until it is accepted or + // deleted (probably can't do this with sharedInformer), etc. + adc := &attachDetachController{ + kubeClient: kubeClient, + pvcInformer: pvcInformer, + pvInformer: pvInformer, + cloud: cloud, + } + + podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ + AddFunc: adc.podAdd, + UpdateFunc: adc.podUpdate, + DeleteFunc: adc.podDelete, + }) + + nodeInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ + AddFunc: adc.nodeAdd, + UpdateFunc: adc.nodeUpdate, + DeleteFunc: adc.nodeDelete, + }) + + if err := adc.volumePluginMgr.InitPlugins(plugins, adc); err != nil { + return nil, fmt.Errorf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err) + } + + adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr) + adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr) + adc.attacherDetacher = attacherdetacher.NewAttacherDetacher(&adc.volumePluginMgr) + adc.reconciler = reconciler.NewReconciler( + reconcilerLoopPeriod, + reconcilerMaxSafeToDetachDuration, + adc.desiredStateOfWorld, + adc.actualStateOfWorld, + adc.attacherDetacher) + + return adc, nil +} + +type attachDetachController struct { + // kubeClient is the kube API client used by volumehost to communicate with + // the API server. + kubeClient internalclientset.Interface + + // pvcInformer is the shared PVC informer used to fetch and store PVC + // objects from the API server. It is shared with other controllers and + // therefore the PVC objects in its store should be treated as immutable. + pvcInformer framework.SharedInformer + + // pvInformer is the shared PV informer used to fetch and store PV objects + // from the API server. It is shared with other controllers and therefore + // the PV objects in its store should be treated as immutable. + pvInformer framework.SharedInformer + + // cloud provider used by volume host + cloud cloudprovider.Interface + + // volumePluginMgr used to initialize and fetch volume plugins + volumePluginMgr volume.VolumePluginMgr + + // desiredStateOfWorld is a data structure containing the desired state of + // the world according to this controller: i.e. what nodes the controller + // is managing, what volumes it wants be attached to these nodes, and which + // pods are scheduled to those nodes referencing the volumes. + // The data structure is populated by the controller using a stream of node + // and pod API server objects fetched by the informers. + desiredStateOfWorld cache.DesiredStateOfWorld + + // actualStateOfWorld is a data structure containing the actual state of + // the world according to this controller: i.e. which volumes are attached + // to which nodes. + // The data structure is populated upon successful completion of attach and + // detach actions triggered by the controller and a periodic sync with + // storage providers for the "true" state of the world. + actualStateOfWorld cache.ActualStateOfWorld + + // attacherDetacher is used to start asynchronous attach and operations + attacherDetacher attacherdetacher.AttacherDetacher + + // reconciler is used to run an asynchronous periodic loop to reconcile the + // desiredStateOfWorld with the actualStateOfWorld by triggering attach + // detach operations using the attacherDetacher. + reconciler reconciler.Reconciler +} + +func (adc *attachDetachController) Run(stopCh <-chan struct{}) { + defer runtime.HandleCrash() + glog.Infof("Starting Attach Detach Controller") + + go adc.reconciler.Run(stopCh) + + <-stopCh + glog.Infof("Shutting down Attach Detach Controller") +} + +func (adc *attachDetachController) podAdd(obj interface{}) { + pod, ok := obj.(*api.Pod) + if pod == nil || !ok { + return + } + + if pod.Spec.NodeName == "" { + // Ignore pods without NodeName, indicating they are not scheduled. + return + } + + adc.processPodVolumes(pod, true /* addVolumes */) +} + +func (adc *attachDetachController) podUpdate(oldObj, newObj interface{}) { + // The flow for update is the same as add. + adc.podAdd(newObj) +} + +func (adc *attachDetachController) podDelete(obj interface{}) { + pod, ok := obj.(*api.Pod) + if pod == nil || !ok { + return + } + + adc.processPodVolumes(pod, false /* addVolumes */) +} + +func (adc *attachDetachController) nodeAdd(obj interface{}) { + node, ok := obj.(*api.Node) + if node == nil || !ok { + return + } + + nodeName := node.Name + if _, exists := node.Annotations[ControllerManagedAnnotation]; exists { + // Node specifies annotation indicating it should be managed by attach + // detach controller. Add it to desired state of world. + adc.desiredStateOfWorld.AddNode(nodeName) + } + + adc.processSafeToDetachAnnotations(nodeName, node.Annotations) +} + +func (adc *attachDetachController) nodeUpdate(oldObj, newObj interface{}) { + // The flow for update is the same as add. + adc.nodeAdd(newObj) +} + +func (adc *attachDetachController) nodeDelete(obj interface{}) { + node, ok := obj.(*api.Node) + if node == nil || !ok { + return + } + + nodeName := node.Name + if err := adc.desiredStateOfWorld.DeleteNode(nodeName); err != nil { + glog.V(10).Infof("%v", err) + } + + adc.processSafeToDetachAnnotations(nodeName, node.Annotations) +} + +// processPodVolumes processes the volumes in the given pod and adds them to the +// desired state of the world if addVolumes is true, otherwise it removes them. +func (adc *attachDetachController) processPodVolumes( + pod *api.Pod, addVolumes bool) { + if pod == nil { + return + } + + if len(pod.Spec.Volumes) <= 0 { + return + } + + if !adc.desiredStateOfWorld.NodeExists(pod.Spec.NodeName) { + // If the node the pod is scheduled to does not exist in the desired + // state of the world data structure, that indicates the node is not + // yet managed by the controller. Therefore, ignore the pod. + // If the node is added to the list of managed nodes in the future, + // future adds and updates to the pod will be processed. + glog.V(10).Infof( + "Skipping processing of pod %q/%q: it is scheduled to node %q which is not managed by the controller.", + pod.Namespace, + pod.Name, + pod.Spec.NodeName) + return + } + + // Process volume spec for each volume defined in pod + for _, podVolume := range pod.Spec.Volumes { + volumeSpec, err := adc.createVolumeSpec(podVolume, pod.Namespace) + if err != nil { + glog.V(10).Infof( + "Error processing volume %q for pod %q/%q: %v", + podVolume.Name, + pod.Namespace, + pod.Name, + err) + continue + } + + attachableVolumePlugin, err := + adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) + if err != nil || attachableVolumePlugin == nil { + glog.V(10).Infof( + "Skipping volume %q for pod %q/%q: it does not implement attacher interface. err=%v", + podVolume.Name, + pod.Namespace, + pod.Name, + err) + continue + } + + if addVolumes { + // Add volume to desired state of world + _, err := adc.desiredStateOfWorld.AddPod( + getUniquePodName(pod), volumeSpec, pod.Spec.NodeName) + if err != nil { + glog.V(10).Infof( + "Failed to add volume %q for pod %q/%q to desiredStateOfWorld. %v", + podVolume.Name, + pod.Namespace, + pod.Name, + err) + } + + } else { + // Remove volume from desired state of world + uniqueVolumeName, err := attachableVolumePlugin.GetUniqueVolumeName(volumeSpec) + if err != nil { + glog.V(10).Infof( + "Failed to delete volume %q for pod %q/%q from desiredStateOfWorld. GetUniqueVolumeName failed with %v", + podVolume.Name, + pod.Namespace, + pod.Name, + err) + continue + } + adc.desiredStateOfWorld.DeletePod( + getUniquePodName(pod), uniqueVolumeName, pod.Spec.NodeName) + } + } + + return +} + +// createVolumeSpec creates and returns a mutatable volume.Spec object for the +// specified volume. It dereference any PVC to get PV objects, if needed. +func (adc *attachDetachController) createVolumeSpec( + podVolume api.Volume, podNamespace string) (*volume.Spec, error) { + if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil { + // If podVolume is a PVC, fetch the real PV behind the claim + pvName, pvcUID, err := adc.getPVCFromCacheExtractPV( + podNamespace, pvcSource.ClaimName) + if err != nil { + return nil, fmt.Errorf("error processing PVC %q: %v", pvcSource.ClaimName, err) + } + + // Fetch actual PV object + volumeSpec, err := adc.getPVSpecFromCache( + pvName, pvcSource.ReadOnly, pvcUID) + if err != nil { + return nil, fmt.Errorf("error processing PVC %q: %v", pvcSource.ClaimName, err) + } + + return volumeSpec, nil + } + + // Do not return the original volume object, since it's from the shared + // informer it may be mutated by another consumer. + clonedPodVolumeObj, err := api.Scheme.DeepCopy(podVolume) + if err != nil || clonedPodVolumeObj == nil { + return nil, fmt.Errorf("failed to deep copy %q volume object", podVolume.Name) + } + + clonedPodVolume, ok := clonedPodVolumeObj.(api.Volume) + if !ok { + return nil, fmt.Errorf("failed to cast clonedPodVolume %#v to api.Volume", clonedPodVolumeObj) + } + + return volume.NewSpecFromVolume(&clonedPodVolume), nil +} + +// getPVCFromCacheExtractPV fetches the PVC object with the given namespace and +// name from the shared internal PVC store extracts the name of the PV it is +// pointing to and returns it. +// This method returns an error if a PVC object does not exist in the cache +// with the given namespace/name. +// This method returns an error if the PVC object's phase is not "Bound". +func (adc *attachDetachController) getPVCFromCacheExtractPV( + namespace string, name string) (string, types.UID, error) { + key := name + if len(namespace) > 0 { + key = namespace + "/" + name + } + + pvcObj, exists, err := adc.pvcInformer.GetStore().Get(key) + if pvcObj == nil || !exists || err != nil { + return "", "", fmt.Errorf( + "failed to find PVC %q in PVCInformer cache. %v", + key, + err) + } + + pvc, ok := pvcObj.(*api.PersistentVolumeClaim) + if ok || pvc == nil { + return "", "", fmt.Errorf( + "failed to cast %q object %#v to PersistentVolumeClaim", + key, + pvcObj) + } + + if pvc.Status.Phase != api.ClaimBound || pvc.Spec.VolumeName == "" { + return "", "", fmt.Errorf( + "PVC %q has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)", + key, + pvc.Status.Phase, + pvc.Spec.VolumeName) + } + + return pvc.Spec.VolumeName, pvc.UID, nil +} + +// getPVSpecFromCache fetches the PV object with the given name from the shared +// internal PV store and returns a volume.Spec representing it. +// This method returns an error if a PV object does not exist in the cache with +// the given name. +// This method deep copies the PV object so the caller may use the returned +// volume.Spec object without worrying about it mutating unexpectedly. +func (adc *attachDetachController) getPVSpecFromCache( + name string, + pvcReadOnly bool, + expectedClaimUID types.UID) (*volume.Spec, error) { + pvObj, exists, err := adc.pvInformer.GetStore().Get(name) + if pvObj == nil || !exists || err != nil { + return nil, fmt.Errorf( + "failed to find PV %q in PVInformer cache. %v", name, err) + } + + pv, ok := pvObj.(*api.PersistentVolume) + if ok || pv == nil { + return nil, fmt.Errorf( + "failed to cast %q object %#v to PersistentVolume", name, pvObj) + } + + if pv.Spec.ClaimRef == nil { + return nil, fmt.Errorf( + "found PV object %q but it has a nil pv.Spec.ClaimRef indicating it is not yet bound to the claim", + name) + } + + if pv.Spec.ClaimRef.UID != expectedClaimUID { + return nil, fmt.Errorf( + "found PV object %q but its pv.Spec.ClaimRef.UID (%q) does not point to claim.UID (%q)", + name, + pv.Spec.ClaimRef.UID, + expectedClaimUID) + } + + // Do not return the object from the informer, since the store is shared it + // may be mutated by another consumer. + clonedPVObj, err := api.Scheme.DeepCopy(pv) + if err != nil || clonedPVObj == nil { + return nil, fmt.Errorf("failed to deep copy %q PV object", name) + } + + clonedPV, ok := clonedPVObj.(api.PersistentVolume) + if !ok { + return nil, fmt.Errorf( + "failed to cast %q clonedPV %#v to PersistentVolume", name, pvObj) + } + + return volume.NewSpecFromPersistentVolume(&clonedPV, pvcReadOnly), nil +} + +// processSafeToDetachAnnotations processes the "safe to detach" annotations for +// the given node. It makes calls to delete any annotations referring to volumes +// it is not aware of. For volumes it is aware of, it marks them safe to detach +// in the "actual state of world" data structure. +func (adc *attachDetachController) processSafeToDetachAnnotations( + nodeName string, annotations map[string]string) { + var annotationsToRemove []string + for annotation := range annotations { + // Check annotations for "safe to detach" volumes + annotation = strings.ToLower(annotation) + if strings.HasPrefix(annotation, SafeToDetachAnnotation) { + // If volume exists in "actual state of world" mark it as safe to detach + safeToAttachVolume := strings.TrimPrefix(annotation, SafeToDetachAnnotation) + if err := adc.actualStateOfWorld.MarkVolumeNodeSafeToDetach(safeToAttachVolume, nodeName); err != nil { + // If volume doesn't exist in "actual state of world" remove + // the "safe to detach" annotation from the node + annotationsToRemove = append(annotationsToRemove, annotation) + } + } + } + + // TODO: Call out to API server to delete annotationsToRemove from Node +} + +// getUniquePodName returns a unique name to reference pod by in memory caches +func getUniquePodName(pod *api.Pod) string { + return types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}.String() +} + +// VolumeHost implementation +// This is an unfortunate requirement of the current factoring of volume plugin +// initializing code. It requires kubelet specific methods used by the mounting +// code to be implemented by all initializers even if the initializer does not +// do mounting (like this attach/detach controller). +// Issue kubernetes/kubernetes/issues/14217 to fix this. +func (adc *attachDetachController) GetPluginDir(podUID string) string { + return "" +} + +func (adc *attachDetachController) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string { + return "" +} + +func (adc *attachDetachController) GetPodPluginDir(podUID types.UID, pluginName string) string { + return "" +} + +func (adc *attachDetachController) GetKubeClient() internalclientset.Interface { + return adc.kubeClient +} + +func (adc *attachDetachController) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { + return nil, fmt.Errorf("NewWrapperMounter not supported by Attach/Detach controller's VolumeHost implementation") +} + +func (adc *attachDetachController) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) { + return nil, fmt.Errorf("NewWrapperUnmounter not supported by Attach/Detach controller's VolumeHost implementation") +} + +func (adc *attachDetachController) GetCloudProvider() cloudprovider.Interface { + return adc.cloud +} + +func (adc *attachDetachController) GetMounter() mount.Interface { + return nil +} + +func (adc *attachDetachController) GetWriter() io.Writer { + return nil +} + +func (adc *attachDetachController) GetHostName() string { + return "" +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/attach_detach_controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/attach_detach_controller_test.go new file mode 100644 index 000000000000..2c372ce3640e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/attach_detach_controller_test.go @@ -0,0 +1,114 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volume + +import ( + "fmt" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/controller/framework/informers" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +func Test_NewAttachDetachController_Positive(t *testing.T) { + // Arrange + fakeKubeClient := createTestClient() + resyncPeriod := 5 * time.Minute + podInformer := informers.CreateSharedPodIndexInformer(fakeKubeClient, resyncPeriod) + nodeInformer := informers.CreateSharedNodeIndexInformer(fakeKubeClient, resyncPeriod) + pvcInformer := informers.CreateSharedPVCIndexInformer(fakeKubeClient, resyncPeriod) + pvInformer := informers.CreateSharedPVIndexInformer(fakeKubeClient, resyncPeriod) + + // Act + _, err := NewAttachDetachController( + fakeKubeClient, + podInformer, + nodeInformer, + pvcInformer, + pvInformer, + nil, /* cloud */ + nil /* plugins */) + + // Assert + if err != nil { + t.Fatalf("Run failed with error. Expected: Actual: <%v>", err) + } +} + +func createTestClient() *fake.Clientset { + fakeClient := &fake.Clientset{} + + fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { + obj := &api.PodList{} + podNamePrefix := "mypod" + namespace := "mynamespace" + for i := 0; i < 5; i++ { + podName := fmt.Sprintf("%s-%d", podNamePrefix, i) + pod := api.Pod{ + Status: api.PodStatus{ + Phase: api.PodRunning, + }, + ObjectMeta: api.ObjectMeta{ + Name: podName, + Namespace: namespace, + Labels: map[string]string{ + "name": podName, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "containerName", + Image: "containerImage", + VolumeMounts: []api.VolumeMount{ + { + Name: "volumeMountName", + ReadOnly: false, + MountPath: "/mnt", + }, + }, + }, + }, + Volumes: []api.Volume{ + { + Name: "volumeName", + VolumeSource: api.VolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + PDName: "pdName", + FSType: "ext4", + ReadOnly: false, + }, + }, + }, + }, + }, + } + obj.Items = append(obj.Items, pod) + } + return true, obj, nil + }) + + fakeWatch := watch.NewFake() + fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil)) + + return fakeClient +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/attacherdetacher/attacher_detacher.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/attacherdetacher/attacher_detacher.go new file mode 100644 index 000000000000..fb7dd54f4643 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/attacherdetacher/attacher_detacher.go @@ -0,0 +1,183 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package attacherdetacher implements interfaces that enable triggering attach +// and detach operations on volumes. +package attacherdetacher + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/controller/volume/cache" + "k8s.io/kubernetes/pkg/util/goroutinemap" + "k8s.io/kubernetes/pkg/volume" +) + +// AttacherDetacher defines a set of operations for attaching or detaching a +// volume from a node. +type AttacherDetacher interface { + // Spawns a new goroutine to execute volume-specific logic to attach the + // volume to the node specified in the volumeToAttach. + // Once attachment completes successfully, the actualStateOfWorld is updated + // to indicate the volume is attached to the node. + // If there is an error indicating the volume is already attached to the + // specified node, attachment is assumed to be successful (plugins are + // responsible for implmenting this behavior). + // All other errors are logged and the goroutine terminates without updating + // actualStateOfWorld (caller is responsible for retrying as needed). + AttachVolume(volumeToAttach *cache.VolumeToAttach, actualStateOfWorld cache.ActualStateOfWorld) error + + // Spawns a new goroutine to execute volume-specific logic to detach the + // volume from the node specified in volumeToDetach. + // Once detachment completes successfully, the actualStateOfWorld is updated + // to remove the volume/node combo. + // If there is an error indicating the volume is already detached from the + // specified node, detachment is assumed to be successful (plugins are + // responsible for implmenting this behavior). + // All other errors are logged and the goroutine terminates without updating + // actualStateOfWorld (caller is responsible for retrying as needed). + DetachVolume(volumeToDetach *cache.AttachedVolume, actualStateOfWorld cache.ActualStateOfWorld) error +} + +// NewAttacherDetacher returns a new instance of AttacherDetacher. +func NewAttacherDetacher(volumePluginMgr *volume.VolumePluginMgr) AttacherDetacher { + return &attacherDetacher{ + volumePluginMgr: volumePluginMgr, + pendingOperations: goroutinemap.NewGoRoutineMap(), + } +} + +type attacherDetacher struct { + // volumePluginMgr is the volume plugin manager used to create volume + // plugin objects. + volumePluginMgr *volume.VolumePluginMgr + // pendingOperations keeps track of pending attach and detach operations so + // multiple operations are not started on the same volume + pendingOperations goroutinemap.GoRoutineMap +} + +func (ad *attacherDetacher) AttachVolume( + volumeToAttach *cache.VolumeToAttach, + actualStateOfWorld cache.ActualStateOfWorld) error { + attachFunc, err := ad.generateAttachVolumeFunc(volumeToAttach, actualStateOfWorld) + if err != nil { + return err + } + + return ad.pendingOperations.Run(volumeToAttach.VolumeName, attachFunc) +} + +func (ad *attacherDetacher) DetachVolume( + volumeToDetach *cache.AttachedVolume, + actualStateOfWorld cache.ActualStateOfWorld) error { + detachFunc, err := ad.generateDetachVolumeFunc(volumeToDetach, actualStateOfWorld) + if err != nil { + return err + } + + return ad.pendingOperations.Run(volumeToDetach.VolumeName, detachFunc) +} + +func (ad *attacherDetacher) generateAttachVolumeFunc( + volumeToAttach *cache.VolumeToAttach, + actualStateOfWorld cache.ActualStateOfWorld) (func() error, error) { + // Get attacher plugin + attachableVolumePlugin, err := ad.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec) + if err != nil || attachableVolumePlugin == nil { + return nil, fmt.Errorf( + "failed to get AttachablePlugin from volumeSpec for volume %q err=%v", + volumeToAttach.VolumeSpec.Name(), + err) + } + + volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher() + if newAttacherErr != nil { + return nil, fmt.Errorf( + "failed to get NewAttacher from volumeSpec for volume %q err=%v", + volumeToAttach.VolumeSpec.Name(), + newAttacherErr) + } + + return func() error { + // Execute attach + attachErr := volumeAttacher.Attach(volumeToAttach.VolumeSpec, volumeToAttach.NodeName) + + if attachErr != nil { + // On failure, just log and exit. The controller will retry + glog.Errorf("Attach operation for %q failed with: %v", volumeToAttach.VolumeName, attachErr) + return attachErr + } + + // Update actual state of world + _, addVolumeNodeErr := actualStateOfWorld.AddVolumeNode(volumeToAttach.VolumeSpec, volumeToAttach.NodeName) + if addVolumeNodeErr != nil { + // On failure, just log and exit. The controller will retry + glog.Errorf("Attach operation for %q succeeded but updating actualStateOfWorld failed with: %v", volumeToAttach.VolumeName, addVolumeNodeErr) + return addVolumeNodeErr + } + + return nil + }, nil +} + +func (ad *attacherDetacher) generateDetachVolumeFunc( + volumeToDetach *cache.AttachedVolume, + actualStateOfWorld cache.ActualStateOfWorld) (func() error, error) { + // Get attacher plugin + attachableVolumePlugin, err := ad.volumePluginMgr.FindAttachablePluginBySpec(volumeToDetach.VolumeSpec) + if err != nil || attachableVolumePlugin == nil { + return nil, fmt.Errorf( + "failed to get AttachablePlugin from volumeSpec for volume %q err=%v", + volumeToDetach.VolumeSpec.Name(), + err) + } + + deviceName, err := attachableVolumePlugin.GetDeviceName(volumeToDetach.VolumeSpec) + if err != nil { + return nil, fmt.Errorf( + "failed to GetUniqueVolumeName from AttachablePlugin for volumeSpec %q err=%v", + volumeToDetach.VolumeSpec.Name(), + err) + } + + volumeDetacher, err := attachableVolumePlugin.NewDetacher() + if err != nil { + return nil, fmt.Errorf( + "failed to get NewDetacher from volumeSpec for volume %q err=%v", + volumeToDetach.VolumeSpec.Name(), + err) + } + + return func() error { + // Execute detach + detachErr := volumeDetacher.Detach(deviceName, volumeToDetach.NodeName) + + if detachErr != nil { + // On failure, just log and exit. The controller will retry + glog.Errorf("Detach operation for %q failed with: %v", volumeToDetach.VolumeName, detachErr) + return detachErr + } + + // TODO: Reset "safe to detach" annotation on Node + + // Update actual state of world + actualStateOfWorld.DeleteVolumeNode(volumeToDetach.VolumeName, volumeToDetach.NodeName) + + return nil + }, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/cache/actual_state_of_world.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/cache/actual_state_of_world.go new file mode 100644 index 000000000000..d6d979671ed8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/cache/actual_state_of_world.go @@ -0,0 +1,314 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package cache implements data structures used by the attach/detach controller +to keep track of volumes, the nodes they are attached to, and the pods that +reference them. +*/ +package cache + +import ( + "fmt" + "sync" + "time" + + "k8s.io/kubernetes/pkg/volume" +) + +// ActualStateOfWorld defines a set of thread-safe operations supported on +// the attach/detach controller's actual state of the world cache. +// This cache contains volumes->nodes i.e. a set of all volumes and the nodes +// the attach/detach controller believes are successfully attached. +type ActualStateOfWorld interface { + // AddVolumeNode adds the given volume and node to the underlying store + // indicating the specified volume is attached to the specified node. + // A unique volumeName is generated from the volumeSpec and returned on + // success. + // If the volume/node combo already exists, this is a no-op. + // If volumeSpec is not an attachable volume plugin, an error is returned. + // If no volume with the name volumeName exists in the store, the volume is + // added. + // If no node with the name nodeName exists in list of attached nodes for + // the specified volume, the node is added. + AddVolumeNode(volumeSpec *volume.Spec, nodeName string) (string, error) + + // MarkVolumeNodeSafeToDetach marks the given volume as safe to detach from + // the given node. + // If no volume with the name volumeName exists in the store, an error is + // returned. + // If no node with the name nodeName exists in list of attached nodes for + // the specified volume, an error is returned. + MarkVolumeNodeSafeToDetach(volumeName, nodeName string) error + + // MarkDesireToDetach returns the difference between the current time and + // the DetachRequestedTime for the given volume/node combo. If the + // DetachRequestedTime is zero, it is set to the current time. + // If no volume with the name volumeName exists in the store, an error is + // returned. + // If no node with the name nodeName exists in list of attached nodes for + // the specified volume, an error is returned. + MarkDesireToDetach(volumeName, nodeName string) (time.Duration, error) + + // DeleteVolumeNode removes the given volume and node from the underlying + // store indicating the specified volume is no longer attached to the + // specified node. + // If the volume/node combo does not exist, this is a no-op. + // If after deleting the node, the specified volume contains no other child + // nodes, the volume is also deleted. + DeleteVolumeNode(volumeName, nodeName string) + + // VolumeNodeExists returns true if the specified volume/node combo exists + // in the underlying store indicating the specified volume is attached to + // the specified node. + VolumeNodeExists(volumeName, nodeName string) bool + + // GetAttachedVolumes generates and returns a list of volumes/node pairs + // reflecting which volumes are attached to which nodes based on the + // current actual state of the world. + GetAttachedVolumes() []AttachedVolume +} + +// AttachedVolume represents a volume that is attached to a node. +type AttachedVolume struct { + // VolumeName is the unique identifier for the volume that is attached. + VolumeName string + + // VolumeSpec is the volume spec containing the specification for the + // volume that is attached. + VolumeSpec *volume.Spec + + // NodeName is the identifier for the node that the volume is attached to. + NodeName string + + // SafeToDetach indicates that this volume has been been unmounted from the + // node and is safe to detach. + // The value is set by MarkVolumeNodeSafeToDetach(...) and reset on + // AddVolumeNode(...) calls. + SafeToDetach bool + + // DetachRequestedTime is used to capture the desire to detach this volume. + // When the volume is newly created this value is set to time zero. + // It is set to current time, when MarkDesireToDetach(...) is called, if it + // was previously set to zero (other wise its value remains the same). + // It is reset to zero on AddVolumeNode(...) calls. + DetachRequestedTime time.Time +} + +// NewActualStateOfWorld returns a new instance of ActualStateOfWorld. +func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld { + return &actualStateOfWorld{ + attachedVolumes: make(map[string]attachedVolume), + volumePluginMgr: volumePluginMgr, + } +} + +type actualStateOfWorld struct { + // attachedVolumes is a map containing the set of volumes the attach/detach + // controller believes to be successfully attached to the nodes it is + // managing. The key in this map is the name of the volume and the value is + // an object containing more information about the attached volume. + attachedVolumes map[string]attachedVolume + // volumePluginMgr is the volume plugin manager used to create volume + // plugin objects. + volumePluginMgr *volume.VolumePluginMgr + sync.RWMutex +} + +// The volume object represents a volume the the attach/detach controller +// believes to be succesfully attached to a node it is managing. +type attachedVolume struct { + // volumeName contains the unique identifier for this volume. + volumeName string + + // spec is the volume spec containing the specification for this volume. + // Used to generate the volume plugin object, and passed to attach/detach + // methods. + spec *volume.Spec + + // nodesAttachedTo is a map containing the set of nodes this volume has + // successfully been attached to. The key in this map is the name of the + // node and the value is a node object containing more information about + // the node. + nodesAttachedTo map[string]nodeAttachedTo +} + +// The nodeAttachedTo object represents a node that . +type nodeAttachedTo struct { + // nodeName contains the name of this node. + nodeName string + + // safeToDetach indicates that this node/volume combo has been unmounted + // by the node and is safe to detach + safeToDetach bool + + // detachRequestedTime used to capture the desire to detach this volume + detachRequestedTime time.Time +} + +func (asw *actualStateOfWorld) AddVolumeNode(volumeSpec *volume.Spec, nodeName string) (string, error) { + asw.Lock() + defer asw.Unlock() + + attachableVolumePlugin, err := asw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) + if err != nil || attachableVolumePlugin == nil { + return "", fmt.Errorf( + "failed to get AttachablePlugin from volumeSpec for volume %q err=%v", + volumeSpec.Name(), + err) + } + + volumeName, err := attachableVolumePlugin.GetUniqueVolumeName(volumeSpec) + if err != nil { + return "", fmt.Errorf( + "failed to GetUniqueVolumeName from AttachablePlugin for volumeSpec %q err=%v", + volumeSpec.Name(), + err) + } + + volumeObj, volumeExists := asw.attachedVolumes[volumeName] + if !volumeExists { + volumeObj = attachedVolume{ + volumeName: volumeName, + spec: volumeSpec, + nodesAttachedTo: make(map[string]nodeAttachedTo), + } + asw.attachedVolumes[volumeName] = volumeObj + } + + nodeObj, nodeExists := volumeObj.nodesAttachedTo[nodeName] + if !nodeExists || nodeObj.safeToDetach || !nodeObj.detachRequestedTime.IsZero() { + // Create object if it doesn't exist. + // Reset safeToDeatch and detachRequestedTime values if it does. + volumeObj.nodesAttachedTo[nodeName] = nodeAttachedTo{ + nodeName: nodeName, + safeToDetach: false, + detachRequestedTime: time.Time{}, + } + } + + return volumeName, nil +} + +func (asw *actualStateOfWorld) MarkVolumeNodeSafeToDetach( + volumeName, nodeName string) error { + asw.Lock() + defer asw.Unlock() + volumeObj, volumeExists := asw.attachedVolumes[volumeName] + if !volumeExists { + return fmt.Errorf( + "failed to MarkVolumeNodeSafeToDetach(volumeName=%q, nodeName=%q) volumeName does not exist", + volumeName, + nodeName) + } + + nodeObj, nodeExists := volumeObj.nodesAttachedTo[nodeName] + if !nodeExists { + return fmt.Errorf( + "failed to MarkVolumeNodeSafeToDetach(volumeName=%q, nodeName=%q) nodeName does not exist", + volumeName, + nodeName) + } + + // Reset safe to detach + nodeObj.safeToDetach = true + volumeObj.nodesAttachedTo[nodeName] = nodeObj + + return nil +} + +func (asw *actualStateOfWorld) MarkDesireToDetach( + volumeName, nodeName string) (time.Duration, error) { + asw.Lock() + defer asw.Unlock() + + volumeObj, volumeExists := asw.attachedVolumes[volumeName] + if !volumeExists { + return time.Millisecond * 0, fmt.Errorf( + "failed to MarkVolumeNodeSafeToDetach(volumeName=%q, nodeName=%q) volumeName does not exist", + volumeName, + nodeName) + } + + nodeObj, nodeExists := volumeObj.nodesAttachedTo[nodeName] + if !nodeExists { + return time.Millisecond * 0, fmt.Errorf( + "failed to MarkVolumeNodeSafeToDetach(volumeName=%q, nodeName=%q) nodeName does not exist", + volumeName, + nodeName) + } + + if nodeObj.detachRequestedTime.IsZero() { + nodeObj.detachRequestedTime = time.Now() + volumeObj.nodesAttachedTo[nodeName] = nodeObj + } + + return time.Since(volumeObj.nodesAttachedTo[nodeName].detachRequestedTime), nil +} + +func (asw *actualStateOfWorld) DeleteVolumeNode(volumeName, nodeName string) { + asw.Lock() + defer asw.Unlock() + + volumeObj, volumeExists := asw.attachedVolumes[volumeName] + if !volumeExists { + return + } + + _, nodeExists := volumeObj.nodesAttachedTo[nodeName] + if nodeExists { + delete(asw.attachedVolumes[volumeName].nodesAttachedTo, nodeName) + } + + if len(volumeObj.nodesAttachedTo) == 0 { + delete(asw.attachedVolumes, volumeName) + } +} + +func (asw *actualStateOfWorld) VolumeNodeExists(volumeName, nodeName string) bool { + asw.RLock() + defer asw.RUnlock() + + volumeObj, volumeExists := asw.attachedVolumes[volumeName] + if volumeExists { + if _, nodeExists := volumeObj.nodesAttachedTo[nodeName]; nodeExists { + return true + } + } + + return false +} + +func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume { + asw.RLock() + defer asw.RUnlock() + + attachedVolumes := make([]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */) + for volumeName, volumeObj := range asw.attachedVolumes { + for nodeName, nodeObj := range volumeObj.nodesAttachedTo { + attachedVolumes = append( + attachedVolumes, + AttachedVolume{ + NodeName: nodeName, + VolumeName: volumeName, + VolumeSpec: volumeObj.spec, + SafeToDetach: nodeObj.safeToDetach, + DetachRequestedTime: nodeObj.detachRequestedTime}) + } + } + + return attachedVolumes +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/cache/actual_state_of_world_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/cache/actual_state_of_world_test.go new file mode 100644 index 000000000000..a9ccddc4ef45 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/cache/actual_state_of_world_test.go @@ -0,0 +1,682 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "testing" + + controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing" +) + +func Test_AddVolumeNode_Positive_NewVolumeNewNode(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + + nodeName := "node-name" + + // Act + generatedVolumeName, err := asw.AddVolumeNode(volumeSpec, nodeName) + + // Assert + if err != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", err) + } + + volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName, nodeName) + if !volumeNodeComboExists { + t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName, nodeName) + } + + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_AddVolumeNode_Positive_ExistingVolumeNewNode(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + node1Name := "node1-name" + node2Name := "node2-name" + + // Act + generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name) + generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name) + + // Assert + if add1Err != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add1Err) + } + if add2Err != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add2Err) + } + + if generatedVolumeName1 != generatedVolumeName2 { + t.Fatalf( + "Generated volume names for the same volume should be the same but they are not: %q and %q", + generatedVolumeName1, + generatedVolumeName2) + } + + volumeNode1ComboExists := asw.VolumeNodeExists(generatedVolumeName1, node1Name) + if !volumeNode1ComboExists { + t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName1, node1Name) + } + + volumeNode2ComboExists := asw.VolumeNodeExists(generatedVolumeName1, node2Name) + if !volumeNode2ComboExists { + t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName1, node2Name) + } + + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 2 { + t.Fatalf("len(attachedVolumes) Expected: <2> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volumeName, node1Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volumeName, node2Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_AddVolumeNode_Positive_ExistingVolumeExistingNode(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + + // Act + generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, nodeName) + generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, nodeName) + + // Assert + if add1Err != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add1Err) + } + if add2Err != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add2Err) + } + + if generatedVolumeName1 != generatedVolumeName2 { + t.Fatalf( + "Generated volume names for the same volume should be the same but they are not: %q and %q", + generatedVolumeName1, + generatedVolumeName2) + } + + volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName1, nodeName) + if !volumeNodeComboExists { + t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName1, nodeName) + } + + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_DeleteVolumeNode_Positive_VolumeExistsNodeExists(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName) + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + + // Act + asw.DeleteVolumeNode(generatedVolumeName, nodeName) + + // Assert + volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName, nodeName) + if volumeNodeComboExists { + t.Fatalf("%q/%q volume/node combo exists, it should not.", generatedVolumeName, nodeName) + } + + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 0 { + t.Fatalf("len(attachedVolumes) Expected: <0> Actual: <%v>", len(attachedVolumes)) + } +} + +func Test_DeleteVolumeNode_Positive_VolumeDoesntExistNodeDoesntExist(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + nodeName := "node-name" + + // Act + asw.DeleteVolumeNode(volumeName, nodeName) + + // Assert + volumeNodeComboExists := asw.VolumeNodeExists(volumeName, nodeName) + if volumeNodeComboExists { + t.Fatalf("%q/%q volume/node combo exists, it should not.", volumeName, nodeName) + } + + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 0 { + t.Fatalf("len(attachedVolumes) Expected: <0> Actual: <%v>", len(attachedVolumes)) + } +} + +func Test_DeleteVolumeNode_Positive_TwoNodesOneDeleted(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + node1Name := "node1-name" + node2Name := "node2-name" + generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name) + if add1Err != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add1Err) + } + generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name) + if add2Err != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add2Err) + } + if generatedVolumeName1 != generatedVolumeName2 { + t.Fatalf( + "Generated volume names for the same volume should be the same but they are not: %q and %q", + generatedVolumeName1, + generatedVolumeName2) + } + + // Act + asw.DeleteVolumeNode(generatedVolumeName1, node1Name) + + // Assert + volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName1, node1Name) + if volumeNodeComboExists { + t.Fatalf("%q/%q volume/node combo exists, it should not.", generatedVolumeName1, node1Name) + } + + volumeNodeComboExists = asw.VolumeNodeExists(generatedVolumeName1, node2Name) + if !volumeNodeComboExists { + t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName1, node2Name) + } + + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volumeName, node2Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_VolumeNodeExists_Positive_VolumeExistsNodeExists(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName) + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + + // Act + volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName, nodeName) + + // Assert + if !volumeNodeComboExists { + t.Fatalf("%q/%q volume/node combo does not exist, it should.", generatedVolumeName, nodeName) + } + + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + node1Name := "node1-name" + node2Name := "node2-name" + generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, node1Name) + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + + // Act + volumeNodeComboExists := asw.VolumeNodeExists(generatedVolumeName, node2Name) + + // Assert + if volumeNodeComboExists { + t.Fatalf("%q/%q volume/node combo exists, it should not.", generatedVolumeName, node2Name) + } + + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, node1Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_VolumeNodeExists_Positive_VolumeAndNodeDontExist(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + nodeName := "node-name" + + // Act + volumeNodeComboExists := asw.VolumeNodeExists(volumeName, nodeName) + + // Assert + if volumeNodeComboExists { + t.Fatalf("%q/%q volume/node combo exists, it should not.", volumeName, nodeName) + } + + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 0 { + t.Fatalf("len(attachedVolumes) Expected: <0> Actual: <%v>", len(attachedVolumes)) + } +} + +func Test_GetAttachedVolumes_Positive_NoVolumesOrNodes(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + + // Act + attachedVolumes := asw.GetAttachedVolumes() + + // Assert + if len(attachedVolumes) != 0 { + t.Fatalf("len(attachedVolumes) Expected: <0> Actual: <%v>", len(attachedVolumes)) + } +} + +func Test_GetAttachedVolumes_Positive_OneVolumeOneNode(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName) + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + + // Act + attachedVolumes := asw.GetAttachedVolumes() + + // Assert + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volume1Name := "volume1-name" + volume1Spec := controllervolumetesting.GetTestVolumeSpec(volume1Name, volume1Name) + node1Name := "node1-name" + generatedVolumeName1, add1Err := asw.AddVolumeNode(volume1Spec, node1Name) + if add1Err != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add1Err) + } + volume2Name := "volume2-name" + volume2Spec := controllervolumetesting.GetTestVolumeSpec(volume2Name, volume2Name) + node2Name := "node2-name" + generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name) + if add2Err != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add2Err) + } + + // Act + attachedVolumes := asw.GetAttachedVolumes() + + // Assert + if len(attachedVolumes) != 2 { + t.Fatalf("len(attachedVolumes) Expected: <2> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volume1Name, node1Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName2, volume2Name, node2Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_GetAttachedVolumes_Positive_OneVolumeTwoNodes(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + node1Name := "node1-name" + generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name) + if add1Err != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add1Err) + } + node2Name := "node2-name" + generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name) + if add2Err != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add2Err) + } + + if generatedVolumeName1 != generatedVolumeName2 { + t.Fatalf( + "Generated volume names for the same volume should be the same but they are not: %q and %q", + generatedVolumeName1, + generatedVolumeName2) + } + + // Act + attachedVolumes := asw.GetAttachedVolumes() + + // Assert + if len(attachedVolumes) != 2 { + t.Fatalf("len(attachedVolumes) Expected: <2> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volumeName, node1Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, volumeName, node2Name, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_MarkVolumeNodeSafeToDetach_Positive_NotMarked(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName) + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + + // Act: do not mark -- test default value + + // Assert + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_MarkVolumeNodeSafeToDetach_Positive_Marked(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName) + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + + // Act + markSafeToDetachErr := asw.MarkVolumeNodeSafeToDetach(generatedVolumeName, nodeName) + + // Assert + if markSafeToDetachErr != nil { + t.Fatalf("MarkVolumeNodeSafeToDetach failed. Expected Actual: <%v>", markSafeToDetachErr) + } + + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, true /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_MarkVolumeNodeSafeToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName) + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + + // Act + markSafeToDetachErr := asw.MarkVolumeNodeSafeToDetach(generatedVolumeName, nodeName) + generatedVolumeName, addErr = asw.AddVolumeNode(volumeSpec, nodeName) + + // Assert + if markSafeToDetachErr != nil { + t.Fatalf("MarkVolumeNodeSafeToDetach failed. Expected Actual: <%v>", markSafeToDetachErr) + } + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_MarkVolumeNodeSafeToDetach_Positive_MarkedVerifyDetachRequestedTimePerserved(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName) + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + _, err := asw.MarkDesireToDetach(generatedVolumeName, nodeName) + if err != nil { + t.Fatalf("MarkDesireToDetach failed. Expected: Actual: <%v>", err) + } + expectedDetachRequestedTime := asw.GetAttachedVolumes()[0].DetachRequestedTime + + // Act + markSafeToDetachErr := asw.MarkVolumeNodeSafeToDetach(generatedVolumeName, nodeName) + + // Assert + if markSafeToDetachErr != nil { + t.Fatalf("MarkVolumeNodeSafeToDetach failed. Expected Actual: <%v>", markSafeToDetachErr) + } + + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, true /* expectedSafeToDetach */, true /* expectNonZeroDetachRequestedTime */) + if !expectedDetachRequestedTime.Equal(attachedVolumes[0].DetachRequestedTime) { + t.Fatalf("DetachRequestedTime changed. Expected: <%v> Actual: <%v>", expectedDetachRequestedTime, attachedVolumes[0].DetachRequestedTime) + } +} + +func Test_MarkDesireToDetach_Positive_NotMarked(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName) + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + + // Act: do not mark -- test default value + + // Assert + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_MarkDesireToDetach_Positive_Marked(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName) + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + + // Act + _, markDesireToDetachErr := asw.MarkDesireToDetach(generatedVolumeName, nodeName) + + // Assert + if markDesireToDetachErr != nil { + t.Fatalf("MarkDesireToDetach failed. Expected: Actual: <%v>", markDesireToDetachErr) + } + + // Assert + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, true /* expectNonZeroDetachRequestedTime */) +} + +func Test_MarkDesireToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName) + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + + // Act + _, markDesireToDetachErr := asw.MarkDesireToDetach(generatedVolumeName, nodeName) + generatedVolumeName, addErr = asw.AddVolumeNode(volumeSpec, nodeName) + + // Assert + if markDesireToDetachErr != nil { + t.Fatalf("MarkDesireToDetach failed. Expected: Actual: <%v>", markDesireToDetachErr) + } + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + + // Assert + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, false /* expectedSafeToDetach */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_MarkDesireToDetach_Positive_MarkedVerifySafeToDetachPreserved(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName) + if addErr != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) + } + markSafeToDetachErr := asw.MarkVolumeNodeSafeToDetach(generatedVolumeName, nodeName) + if markSafeToDetachErr != nil { + t.Fatalf("MarkVolumeNodeSafeToDetach failed. Expected Actual: <%v>", markSafeToDetachErr) + } + + // Act + _, markDesireToDetachErr := asw.MarkDesireToDetach(generatedVolumeName, nodeName) + + // Assert + if markDesireToDetachErr != nil { + t.Fatalf("MarkDesireToDetach failed. Expected: Actual: <%v>", markDesireToDetachErr) + } + + // Assert + attachedVolumes := asw.GetAttachedVolumes() + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, volumeName, nodeName, true /* expectedSafeToDetach */, true /* expectNonZeroDetachRequestedTime */) +} + +func verifyAttachedVolume( + t *testing.T, + attachedVolumes []AttachedVolume, + expectedVolumeName, + expectedVolumeSpecName, + expectedNodeName string, + expectedSafeToDetach, + expectNonZeroDetachRequestedTime bool) { + for _, attachedVolume := range attachedVolumes { + if attachedVolume.VolumeName == expectedVolumeName && + attachedVolume.VolumeSpec.Name() == expectedVolumeSpecName && + attachedVolume.NodeName == expectedNodeName && + attachedVolume.SafeToDetach == expectedSafeToDetach && + attachedVolume.DetachRequestedTime.IsZero() == !expectNonZeroDetachRequestedTime { + return + } + } + + t.Fatalf( + "attachedVolumes (%v) should contain the volume/node combo %q/%q with SafeToDetach=%v and NonZeroDetachRequestedTime=%v. It does not.", + attachedVolumes, + expectedVolumeName, + expectedNodeName, + expectedSafeToDetach, + expectNonZeroDetachRequestedTime) +} + +// t.Logf("attachedVolumes: %v", asw.GetAttachedVolumes()) // TEMP diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/cache/desired_state_of_world.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/cache/desired_state_of_world.go new file mode 100644 index 000000000000..f2095150df24 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/cache/desired_state_of_world.go @@ -0,0 +1,301 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package cache implements data structures used by the attach/detach controller +to keep track of volumes, the nodes they are attached to, and the pods that +reference them. +*/ +package cache + +import ( + "fmt" + "sync" + + "k8s.io/kubernetes/pkg/volume" +) + +// DesiredStateOfWorld defines a set of thread-safe operations supported on +// the attach/detach controller's desired state of the world cache. +// This cache contains nodes->volumes->pods where nodes are all the nodes +// managed by the attach/detach controller, volumes are all the volumes that +// should be attached to the specified node, and pods are the pods that +// reference the volume and are scheduled to that node. +type DesiredStateOfWorld interface { + // AddNode adds the given node to the list of nodes managed by the attach/ + // detach controller. + // If the node already exists this is a no-op. + AddNode(nodeName string) + + // AddPod adds the given pod to the list of pods that reference the + // specified volume and is scheduled to the specified node. + // A unique volumeName is generated from the volumeSpec and returned on + // success. + // If the pod already exists under the specified volume, this is a no-op. + // If volumeSpec is not an attachable volume plugin, an error is returned. + // If no volume with the name volumeName exists in the list of volumes that + // should be attached to the specified node, the volume is implicitly added. + // If no node with the name nodeName exists in list of nodes managed by the + // attach/detach attached controller, an error is returned. + AddPod(podName string, volumeSpec *volume.Spec, nodeName string) (string, error) + + // DeleteNode removes the given node from the list of nodes managed by the + // attach/detach controller. + // If the node does not exist this is a no-op. + // If the node exists but has 1 or more child volumes, an error is returned. + DeleteNode(nodeName string) error + + // DeletePod removes the given pod from the list of pods that reference the + // specified volume and are scheduled to the specified node. + // If no pod exists in the list of pods that reference the specified volume + // and are scheduled to the specified node, this is a no-op. + // If a node with the name nodeName does not exist in the list of nodes + // managed by the attach/detach attached controller, this is a no-op. + // If no volume with the name volumeName exists in the list of managed + // volumes under the specified node, this is a no-op. + // If after deleting the pod, the specified volume contains no other child + // pods, the volume is also deleted. + DeletePod(podName, volumeName, nodeName string) + + // NodeExists returns true if the node with the specified name exists in + // the list of nodes managed by the attach/detach controller. + NodeExists(nodeName string) bool + + // VolumeExists returns true if the volume with the specified name exists + // in the list of volumes that should be attached to the specified node by + // the attach detach controller. + VolumeExists(volumeName, nodeName string) bool + + // GetVolumesToAttach generates and returns a list of volumes to attach + // and the nodes they should be attached to based on the current desired + // state of the world. + GetVolumesToAttach() []VolumeToAttach +} + +// VolumeToAttach represents a volume that should be attached to a node. +type VolumeToAttach struct { + // VolumeName is the unique identifier for the volume that should be + // attached. + VolumeName string + + // VolumeSpec is a volume spec containing the specification for the volume + // that should be attached. + VolumeSpec *volume.Spec + + // NodeName is the identifier for the node that the volume should be + // attached to. + NodeName string +} + +// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld. +func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld { + return &desiredStateOfWorld{ + nodesManaged: make(map[string]nodeManaged), + volumePluginMgr: volumePluginMgr, + } +} + +type desiredStateOfWorld struct { + // nodesManaged is a map containing the set of nodes managed by the attach/ + // detach controller. The key in this map is the name of the node and the + // value is a node object containing more information about the node. + nodesManaged map[string]nodeManaged + // volumePluginMgr is the volume plugin manager used to create volume + // plugin objects. + volumePluginMgr *volume.VolumePluginMgr + sync.RWMutex +} + +// nodeManaged represents a node that is being managed by the attach/detach +// controller. +type nodeManaged struct { + // nodName contains the name of this node. + nodeName string + + // volumesToAttach is a map containing the set of volumes that should be + // attached to this node. The key in the map is the name of the volume and + // the value is a pod object containing more information about the volume. + volumesToAttach map[string]volumeToAttach +} + +// The volume object represents a volume that should be attached to a node. +type volumeToAttach struct { + // volumeName contains the unique identifier for this volume. + volumeName string + + // spec is the volume spec containing the specification for this volume. + // Used to generate the volume plugin object, and passed to attach/detach + // methods. + spec *volume.Spec + + // scheduledPods is a map containing the set of pods that reference this + // volume and are scheduled to the underlying node. The key in the map is + // the name of the pod and the value is a pod object containing more + // information about the pod. + scheduledPods map[string]pod +} + +// The pod object represents a pod that references the underlying volume and is +// scheduled to the underlying node. +type pod struct { + // podName contains the name of this pod. + podName string +} + +func (dsw *desiredStateOfWorld) AddNode(nodeName string) { + dsw.Lock() + defer dsw.Unlock() + + if _, nodeExists := dsw.nodesManaged[nodeName]; !nodeExists { + dsw.nodesManaged[nodeName] = nodeManaged{ + nodeName: nodeName, + volumesToAttach: make(map[string]volumeToAttach), + } + } +} + +func (dsw *desiredStateOfWorld) AddPod(podName string, volumeSpec *volume.Spec, nodeName string) (string, error) { + dsw.Lock() + defer dsw.Unlock() + + nodeObj, nodeExists := dsw.nodesManaged[nodeName] + if !nodeExists { + return "", fmt.Errorf( + "no node with the name %q exists in the list of managed nodes", + nodeName) + } + + attachableVolumePlugin, err := dsw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) + if err != nil || attachableVolumePlugin == nil { + return "", fmt.Errorf( + "failed to get AttachablePlugin from volumeSpec for volume %q err=%v", + volumeSpec.Name(), + err) + } + + volumeName, err := attachableVolumePlugin.GetUniqueVolumeName(volumeSpec) + if err != nil { + return "", fmt.Errorf( + "failed to GetUniqueVolumeName from AttachablePlugin for volumeSpec %q err=%v", + volumeSpec.Name(), + err) + } + + volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName] + if !volumeExists { + volumeObj = volumeToAttach{ + volumeName: volumeName, + spec: volumeSpec, + scheduledPods: make(map[string]pod), + } + dsw.nodesManaged[nodeName].volumesToAttach[volumeName] = volumeObj + } + + if _, podExists := volumeObj.scheduledPods[podName]; !podExists { + dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods[podName] = + pod{ + podName: podName, + } + } + + return volumeName, nil +} + +func (dsw *desiredStateOfWorld) DeleteNode(nodeName string) error { + dsw.Lock() + defer dsw.Unlock() + + nodeObj, nodeExists := dsw.nodesManaged[nodeName] + if !nodeExists { + return nil + } + + if len(nodeObj.volumesToAttach) > 0 { + return fmt.Errorf( + "failed to delete node %q from list of nodes managed by attach/detach controller--the node still contains %v volumes in its list of volumes to attach", + nodeName, + len(nodeObj.volumesToAttach)) + } + + delete( + dsw.nodesManaged, + nodeName) + return nil +} + +func (dsw *desiredStateOfWorld) DeletePod(podName, volumeName, nodeName string) { + dsw.Lock() + defer dsw.Unlock() + + nodeObj, nodeExists := dsw.nodesManaged[nodeName] + if !nodeExists { + return + } + + volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName] + if !volumeExists { + return + } + if _, podExists := volumeObj.scheduledPods[podName]; !podExists { + return + } + + delete( + dsw.nodesManaged[nodeName].volumesToAttach[volumeName].scheduledPods, + podName) + + if len(volumeObj.scheduledPods) == 0 { + delete( + dsw.nodesManaged[nodeName].volumesToAttach, + volumeName) + } +} + +func (dsw *desiredStateOfWorld) NodeExists(nodeName string) bool { + dsw.RLock() + defer dsw.RUnlock() + + _, nodeExists := dsw.nodesManaged[nodeName] + return nodeExists +} + +func (dsw *desiredStateOfWorld) VolumeExists(volumeName, nodeName string) bool { + dsw.RLock() + defer dsw.RUnlock() + + nodeObj, nodeExists := dsw.nodesManaged[nodeName] + if nodeExists { + if _, volumeExists := nodeObj.volumesToAttach[volumeName]; volumeExists { + return true + } + } + + return false +} + +func (dsw *desiredStateOfWorld) GetVolumesToAttach() []VolumeToAttach { + dsw.RLock() + defer dsw.RUnlock() + + volumesToAttach := make([]VolumeToAttach, 0 /* len */, len(dsw.nodesManaged) /* cap */) + for nodeName, nodeObj := range dsw.nodesManaged { + for volumeName, volumeObj := range nodeObj.volumesToAttach { + volumesToAttach = append(volumesToAttach, VolumeToAttach{NodeName: nodeName, VolumeName: volumeName, VolumeSpec: volumeObj.spec}) + } + } + + return volumesToAttach +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/cache/desired_state_of_world_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/cache/desired_state_of_world_test.go new file mode 100644 index 000000000000..18bd2b838196 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/cache/desired_state_of_world_test.go @@ -0,0 +1,974 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "testing" + + controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing" +) + +func Test_AddNode_Positive_NewNode(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + nodeName := "node-name" + + // Act + dsw.AddNode(nodeName) + + // Assert + nodeExists := dsw.NodeExists(nodeName) + if !nodeExists { + t.Fatalf("Added node %q does not exist, it should.", nodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 0 { + t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach)) + } +} + +func Test_AddNode_Positive_ExistingVolume(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + nodeName := "node-name" + dsw.AddNode(nodeName) + + // Act + dsw.AddNode(nodeName) + + // Assert + nodeExists := dsw.NodeExists(nodeName) + if !nodeExists { + t.Fatalf("Added node %q does not exist, it should.", nodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 0 { + t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach)) + } +} +func Test_AddNode_Positive_ExistingNode(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + nodeName := "node-name" + + // Act + dsw.AddNode(nodeName) + + // Assert + nodeExists := dsw.NodeExists(nodeName) + if !nodeExists { + t.Fatalf("Added node %q does not exist, it should.", nodeName) + } + + // Act + dsw.AddNode(nodeName) + + // Assert + nodeExists = dsw.NodeExists(nodeName) + if !nodeExists { + t.Fatalf("Added node %q does not exist, it should.", nodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 0 { + t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach)) + } +} + +func Test_AddPod_Positive_NewPodNodeExistsVolumeDoesntExist(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + podName := "pod-name" + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + dsw.AddNode(nodeName) + volumeExists := dsw.VolumeExists(volumeName, nodeName) + if volumeExists { + t.Fatalf( + "Volume %q/node %q should not exist, but it does.", + volumeName, + nodeName) + } + + // Act + generatedVolumeName, podErr := dsw.AddPod(podName, volumeSpec, nodeName) + + // Assert + if podErr != nil { + t.Fatalf("AddPod failed. Expected: Actual: <%v>", podErr) + } + + volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName) + if !volumeExists { + t.Fatalf( + "Added pod %q to volume %q/node %q. Volume does not exist, it should.", + podName, + generatedVolumeName, + nodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 1 { + t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach)) + } + + verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName, volumeName) +} + +func Test_AddPod_Positive_NewPodNodeExistsVolumeExists(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + pod1Name := "pod1-name" + pod2Name := "pod2-name" + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + dsw.AddNode(nodeName) + volumeExists := dsw.VolumeExists(volumeName, nodeName) + if volumeExists { + t.Fatalf( + "Volume %q/node %q should not exist, but it does.", + volumeName, + nodeName) + } + + // Act + generatedVolumeName, podErr := dsw.AddPod(pod1Name, volumeSpec, nodeName) + + // Assert + if podErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + pod1Name, + podErr) + } + + volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName) + if !volumeExists { + t.Fatalf( + "Added pod %q to volume %q/node %q. Volume does not exist, it should.", + pod1Name, + generatedVolumeName, + nodeName) + } + + // Act + generatedVolumeName, podErr = dsw.AddPod(pod2Name, volumeSpec, nodeName) + + // Assert + if podErr != nil { + t.Fatalf("AddPod failed for pod %q. Expected: Actual: <%v>", + pod2Name, + podErr) + } + + volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName) + if !volumeExists { + t.Fatalf( + "Added pod %q to volume %q/node %q. Volume does not exist, it should.", + pod1Name, + generatedVolumeName, + nodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 1 { + t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach)) + } + + verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName, volumeName) +} + +func Test_AddPod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + podName := "pod-name" + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + dsw.AddNode(nodeName) + volumeExists := dsw.VolumeExists(volumeName, nodeName) + if volumeExists { + t.Fatalf( + "Volume %q/node %q should not exist, but it does.", + volumeName, + nodeName) + } + + // Act + generatedVolumeName, podErr := dsw.AddPod(podName, volumeSpec, nodeName) + + // Assert + if podErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + podName, + podErr) + } + + volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName) + if !volumeExists { + t.Fatalf( + "Added pod %q to volume %q/node %q. Volume does not exist, it should.", + podName, + generatedVolumeName, + nodeName) + } + + // Act + generatedVolumeName, podErr = dsw.AddPod(podName, volumeSpec, nodeName) + + // Assert + if podErr != nil { + t.Fatalf("AddPod failed for pod %q. Expected: Actual: <%v>", + podName, + podErr) + } + + volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName) + if !volumeExists { + t.Fatalf( + "Added pod %q to volume %q/node %q. Volume does not exist, it should.", + podName, + generatedVolumeName, + nodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 1 { + t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach)) + } + + verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName, volumeName) +} + +func Test_AddPod_Negative_NewPodNodeDoesntExistVolumeDoesntExist(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + podName := "pod-name" + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + volumeExists := dsw.VolumeExists(volumeName, nodeName) + if volumeExists { + t.Fatalf( + "Volume %q/node %q should not exist, but it does.", + volumeName, + nodeName) + } + + // Act + _, podErr := dsw.AddPod(podName, volumeSpec, nodeName) + + // Assert + if podErr == nil { + t.Fatalf("AddPod did not fail. Expected: <\"failed to add pod...no node with that name exists in the list of managed nodes\"> Actual: ") + } + + volumeExists = dsw.VolumeExists(volumeName, nodeName) + if volumeExists { + t.Fatalf( + "Volume %q/node %q should not exist, but it does.", + volumeName, + nodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 0 { + t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach)) + } +} + +func Test_DeleteNode_Positive_NodeExists(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + nodeName := "node-name" + dsw.AddNode(nodeName) + + // Act + err := dsw.DeleteNode(nodeName) + + // Assert + if err != nil { + t.Fatalf("DeleteNode failed. Expected: Actual: <%v>", err) + } + + nodeExists := dsw.NodeExists(nodeName) + if nodeExists { + t.Fatalf("Deleted node %q still exists, it should not.", nodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 0 { + t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach)) + } +} + +func Test_DeleteNode_Positive_NodeDoesntExist(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + notAddedNodeName := "node-not-added-name" + + // Act + err := dsw.DeleteNode(notAddedNodeName) + + // Assert + if err != nil { + t.Fatalf("DeleteNode failed. Expected: Actual: <%v>", err) + } + + nodeExists := dsw.NodeExists(notAddedNodeName) + if nodeExists { + t.Fatalf("Deleted node %q still exists, it should not.", notAddedNodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 0 { + t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach)) + } +} + +func Test_DeleteNode_Negative_NodeExistsHasChildVolumes(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + nodeName := "node-name" + dsw.AddNode(nodeName) + podName := "pod-name" + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + generatedVolumeName, podAddErr := dsw.AddPod(podName, volumeSpec, nodeName) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + podName, + podAddErr) + } + + // Act + err := dsw.DeleteNode(nodeName) + + // Assert + if err == nil { + t.Fatalf("DeleteNode did not fail. Expected: <\"\"> Actual: ") + } + + nodeExists := dsw.NodeExists(nodeName) + if !nodeExists { + t.Fatalf("Node %q no longer exists, it should.", nodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 1 { + t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach)) + } + + verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName, volumeName) +} + +func Test_DeletePod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + podName := "pod-name" + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + dsw.AddNode(nodeName) + generatedVolumeName, podAddErr := dsw.AddPod(podName, volumeSpec, nodeName) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + podName, + podAddErr) + } + volumeExists := dsw.VolumeExists(generatedVolumeName, nodeName) + if !volumeExists { + t.Fatalf( + "Added pod %q to volume %q/node %q. Volume does not exist, it should.", + podName, + generatedVolumeName, + nodeName) + } + + // Act + dsw.DeletePod(podName, generatedVolumeName, nodeName) + + // Assert + volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName) + if volumeExists { + t.Fatalf( + "Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.", + podName, + generatedVolumeName, + nodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 0 { + t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach)) + } +} + +func Test_DeletePod_Positive_2PodsExistNodeExistsVolumesExist(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + pod1Name := "pod1-name" + pod2Name := "pod2-name" + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + dsw.AddNode(nodeName) + generatedVolumeName1, pod1AddErr := dsw.AddPod(pod1Name, volumeSpec, nodeName) + if pod1AddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + pod1Name, + pod1AddErr) + } + generatedVolumeName2, pod2AddErr := dsw.AddPod(pod2Name, volumeSpec, nodeName) + if pod2AddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + pod2Name, + pod2AddErr) + } + if generatedVolumeName1 != generatedVolumeName2 { + t.Fatalf( + "Generated volume names for the same volume should be the same but they are not: %q and %q", + generatedVolumeName1, + generatedVolumeName2) + } + volumeExists := dsw.VolumeExists(generatedVolumeName1, nodeName) + if !volumeExists { + t.Fatalf( + "Volume %q does not exist under node %q, it should.", + generatedVolumeName1, + nodeName) + } + + // Act + dsw.DeletePod(pod1Name, generatedVolumeName1, nodeName) + + // Assert + volumeExists = dsw.VolumeExists(generatedVolumeName1, nodeName) + if !volumeExists { + t.Fatalf( + "Volume %q under node %q should still exist, but it does not.", + generatedVolumeName1, + nodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 1 { + t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach)) + } + + verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName1, volumeName) +} + +func Test_DeletePod_Positive_PodDoesNotExist(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + pod1Name := "pod1-name" + pod2Name := "pod2-name" + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + dsw.AddNode(nodeName) + generatedVolumeName, pod1AddErr := dsw.AddPod(pod1Name, volumeSpec, nodeName) + if pod1AddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + pod1Name, + pod1AddErr) + } + volumeExists := dsw.VolumeExists(generatedVolumeName, nodeName) + if !volumeExists { + t.Fatalf( + "Added pod %q to volume %q/node %q. Volume does not exist, it should.", + pod1Name, + generatedVolumeName, + nodeName) + } + + // Act + dsw.DeletePod(pod2Name, generatedVolumeName, nodeName) + + // Assert + volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName) + if !volumeExists { + t.Fatalf( + "Volume %q/node %q does not exist, it should.", + generatedVolumeName, + nodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 1 { + t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach)) + } + + verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName, volumeName) +} + +func Test_DeletePod_Positive_NodeDoesNotExist(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + podName := "pod-name" + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + node1Name := "node1-name" + dsw.AddNode(node1Name) + generatedVolumeName, podAddErr := dsw.AddPod(podName, volumeSpec, node1Name) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + podName, + podAddErr) + } + volumeExists := dsw.VolumeExists(generatedVolumeName, node1Name) + if !volumeExists { + t.Fatalf( + "Added pod %q to volume %q/node %q. Volume does not exist, it should.", + podName, + generatedVolumeName, + node1Name) + } + node2Name := "node2-name" + + // Act + dsw.DeletePod(podName, generatedVolumeName, node2Name) + + // Assert + volumeExists = dsw.VolumeExists(generatedVolumeName, node1Name) + if !volumeExists { + t.Fatalf( + "Volume %q/node %q does not exist, it should.", + generatedVolumeName, + node1Name) + } + volumeExists = dsw.VolumeExists(generatedVolumeName, node2Name) + if volumeExists { + t.Fatalf( + "node %q exists, it should not.", + node2Name) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 1 { + t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach)) + } + + verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolumeName, volumeName) +} + +func Test_DeletePod_Positive_VolumeDoesNotExist(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + podName := "pod-name" + volume1Name := "volume1-name" + volume1Spec := controllervolumetesting.GetTestVolumeSpec(volume1Name, volume1Name) + nodeName := "node-name" + dsw.AddNode(nodeName) + generatedVolume1Name, podAddErr := dsw.AddPod(podName, volume1Spec, nodeName) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + podName, + podAddErr) + } + volumeExists := dsw.VolumeExists(generatedVolume1Name, nodeName) + if !volumeExists { + t.Fatalf( + "Added pod %q to volume %q/node %q. Volume does not exist, it should.", + podName, + generatedVolume1Name, + nodeName) + } + volume2Name := "volume2-name" + + // Act + dsw.DeletePod(podName, volume2Name, nodeName) + + // Assert + volumeExists = dsw.VolumeExists(generatedVolume1Name, nodeName) + if !volumeExists { + t.Fatalf( + "Volume %q/node %q does not exist, it should.", + generatedVolume1Name, + nodeName) + } + volumeExists = dsw.VolumeExists(volume2Name, nodeName) + if volumeExists { + t.Fatalf( + "volume %q exists, it should not.", + volume2Name) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 1 { + t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach)) + } + + verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolume1Name, volume1Name) +} + +func Test_NodeExists_Positive_NodeExists(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + notAddedNodeName := "node-not-added-name" + + // Act + notAddedNodeExists := dsw.NodeExists(notAddedNodeName) + + // Assert + if notAddedNodeExists { + t.Fatalf("Node %q exists, it should not.", notAddedNodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 0 { + t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach)) + } +} + +func Test_NodeExists_Positive_NodeDoesntExist(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + nodeName := "node-name" + dsw.AddNode(nodeName) + + // Act + nodeExists := dsw.NodeExists(nodeName) + + // Assert + if !nodeExists { + t.Fatalf("Node %q does not exist, it should.", nodeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 0 { + t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach)) + } +} + +func Test_VolumeExists_Positive_VolumeExistsNodeExists(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + nodeName := "node-name" + dsw.AddNode(nodeName) + podName := "pod-name" + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + generatedVolumeName, _ := dsw.AddPod(podName, volumeSpec, nodeName) + + // Act + volumeExists := dsw.VolumeExists(generatedVolumeName, nodeName) + + // Assert + if !volumeExists { + t.Fatalf("Volume %q does not exist, it should.", generatedVolumeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 1 { + t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach)) + } + + verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolumeName, volumeName) +} + +func Test_VolumeExists_Positive_VolumeDoesntExistNodeExists(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + nodeName := "node-name" + dsw.AddNode(nodeName) + podName := "pod-name" + volume1Name := "volume1-name" + volume1Spec := controllervolumetesting.GetTestVolumeSpec(volume1Name, volume1Name) + generatedVolume1Name, podAddErr := dsw.AddPod(podName, volume1Spec, nodeName) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + podName, + podAddErr) + } + volume2Name := "volume2-name" + + // Act + volumeExists := dsw.VolumeExists(volume2Name, nodeName) + + // Assert + if volumeExists { + t.Fatalf("Volume %q exists, it should not.", volume2Name) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 1 { + t.Fatalf("len(volumesToAttach) Expected: <1> Actual: <%v>", len(volumesToAttach)) + } + + verifyVolumeToAttach(t, volumesToAttach, nodeName, generatedVolume1Name, volume1Name) +} + +func Test_VolumeExists_Positive_VolumeDoesntExistNodeDoesntExists(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + nodeName := "node-name" + volumeName := "volume-name" + + // Act + volumeExists := dsw.VolumeExists(volumeName, nodeName) + + // Assert + if volumeExists { + t.Fatalf("Volume %q exists, it should not.", volumeName) + } + + volumesToAttach := dsw.GetVolumesToAttach() + if len(volumesToAttach) != 0 { + t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach)) + } +} + +func Test_GetVolumesToAttach_Positive_NoNodes(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + + // Act + volumesToAttach := dsw.GetVolumesToAttach() + + // Assert + if len(volumesToAttach) > 0 { + t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach)) + } +} + +func Test_GetVolumesToAttach_Positive_TwoNodes(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + node1Name := "node1-name" + node2Name := "node2-name" + dsw.AddNode(node1Name) + dsw.AddNode(node2Name) + + // Act + volumesToAttach := dsw.GetVolumesToAttach() + + // Assert + if len(volumesToAttach) != 0 { + t.Fatalf("len(volumesToAttach) Expected: <0> Actual: <%v>", len(volumesToAttach)) + } +} + +func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + node1Name := "node1-name" + pod1Name := "pod1-name" + volume1Name := "volume1-name" + volume1Spec := controllervolumetesting.GetTestVolumeSpec(volume1Name, volume1Name) + dsw.AddNode(node1Name) + generatedVolume1Name, podAddErr := dsw.AddPod(pod1Name, volume1Spec, node1Name) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + pod1Name, + podAddErr) + } + node2Name := "node2-name" + pod2Name := "pod2-name" + volume2Name := "volume2-name" + volume2Spec := controllervolumetesting.GetTestVolumeSpec(volume2Name, volume2Name) + dsw.AddNode(node2Name) + generatedVolume2Name, podAddErr := dsw.AddPod(pod2Name, volume2Spec, node2Name) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + pod2Name, + podAddErr) + } + + // Act + volumesToAttach := dsw.GetVolumesToAttach() + + // Assert + if len(volumesToAttach) != 2 { + t.Fatalf("len(volumesToAttach) Expected: <2> Actual: <%v>", len(volumesToAttach)) + } + + verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolume1Name, volume1Name) + verifyVolumeToAttach(t, volumesToAttach, node2Name, generatedVolume2Name, volume2Name) +} + +func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + node1Name := "node1-name" + pod1Name := "pod1-name" + volume1Name := "volume1-name" + volume1Spec := controllervolumetesting.GetTestVolumeSpec(volume1Name, volume1Name) + dsw.AddNode(node1Name) + generatedVolume1Name, podAddErr := dsw.AddPod(pod1Name, volume1Spec, node1Name) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + pod1Name, + podAddErr) + } + node2Name := "node2-name" + pod2Name := "pod2-name" + volume2Name := "volume2-name" + volume2Spec := controllervolumetesting.GetTestVolumeSpec(volume2Name, volume2Name) + dsw.AddNode(node2Name) + generatedVolume2Name, podAddErr := dsw.AddPod(pod2Name, volume2Spec, node2Name) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + pod2Name, + podAddErr) + } + pod3Name := "pod3-name" + dsw.AddPod(pod3Name, volume2Spec, node2Name) + _, podAddErr = dsw.AddPod(pod3Name, volume2Spec, node2Name) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + pod3Name, + podAddErr) + } + + // Act + volumesToAttach := dsw.GetVolumesToAttach() + + // Assert + if len(volumesToAttach) != 2 { + t.Fatalf("len(volumesToAttach) Expected: <2> Actual: <%v>", len(volumesToAttach)) + } + + verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolume1Name, volume1Name) + verifyVolumeToAttach(t, volumesToAttach, node2Name, generatedVolume2Name, volume2Name) +} + +func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) { + // Arrange + volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := NewDesiredStateOfWorld(volumePluginMgr) + node1Name := "node1-name" + pod1Name := "pod1-name" + volume1Name := "volume1-name" + volume1Spec := controllervolumetesting.GetTestVolumeSpec(volume1Name, volume1Name) + dsw.AddNode(node1Name) + generatedVolume1Name, podAddErr := dsw.AddPod(pod1Name, volume1Spec, node1Name) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + pod1Name, + podAddErr) + } + node2Name := "node2-name" + pod2aName := "pod2a-name" + volume2Name := "volume2-name" + volume2Spec := controllervolumetesting.GetTestVolumeSpec(volume2Name, volume2Name) + dsw.AddNode(node2Name) + generatedVolume2Name1, podAddErr := dsw.AddPod(pod2aName, volume2Spec, node2Name) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + pod2aName, + podAddErr) + } + pod2bName := "pod2b-name" + generatedVolume2Name2, podAddErr := dsw.AddPod(pod2bName, volume2Spec, node2Name) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + pod2bName, + podAddErr) + } + if generatedVolume2Name1 != generatedVolume2Name2 { + t.Fatalf( + "Generated volume names for the same volume should be the same but they are not: %q and %q", + generatedVolume2Name1, + generatedVolume2Name2) + } + pod3Name := "pod3-name" + volume3Name := "volume3-name" + volume3Spec := controllervolumetesting.GetTestVolumeSpec(volume3Name, volume3Name) + generatedVolume3Name, podAddErr := dsw.AddPod(pod3Name, volume3Spec, node1Name) + if podAddErr != nil { + t.Fatalf( + "AddPod failed for pod %q. Expected: Actual: <%v>", + pod3Name, + podAddErr) + } + + // Act + volumesToAttach := dsw.GetVolumesToAttach() + + // Assert + if len(volumesToAttach) != 3 { + t.Fatalf("len(volumesToAttach) Expected: <3> Actual: <%v>", len(volumesToAttach)) + } + + verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolume1Name, volume1Name) + verifyVolumeToAttach(t, volumesToAttach, node2Name, generatedVolume2Name1, volume2Name) + verifyVolumeToAttach(t, volumesToAttach, node1Name, generatedVolume3Name, volume3Name) +} + +func verifyVolumeToAttach( + t *testing.T, + volumesToAttach []VolumeToAttach, + expectedNodeName, + expectedVolumeName, + expectedVolumeSpecName string) { + for _, volumeToAttach := range volumesToAttach { + if volumeToAttach.NodeName == expectedNodeName && + volumeToAttach.VolumeName == expectedVolumeName && + volumeToAttach.VolumeSpec.Name() == expectedVolumeSpecName { + return + } + } + + t.Fatalf("volumesToAttach (%v) should contain %q/%q. It does not.", volumesToAttach, expectedVolumeName, expectedNodeName) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/reconciler/reconciler.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/reconciler/reconciler.go new file mode 100644 index 000000000000..09812757e437 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/reconciler/reconciler.go @@ -0,0 +1,118 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package reconciler implements interfaces that attempt to reconcile the +// desired state of the with the actual state of the world by triggering +// actions. +package reconciler + +import ( + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/controller/volume/attacherdetacher" + "k8s.io/kubernetes/pkg/controller/volume/cache" + "k8s.io/kubernetes/pkg/util/wait" +) + +// Reconciler runs a periodic loop to reconcile the desired state of the with +// the actual state of the world by triggering attach detach operations. +type Reconciler interface { + // Starts running the reconcilation loop which executes periodically, checks + // if volumes that should be attached are attached and volumes that should + // be detached are detached. If not, it will trigger attach/detach + // operations to rectify. + Run(stopCh <-chan struct{}) +} + +// NewReconciler returns a new instance of Reconciler that waits loopPeriod +// between successive executions. +// loopPeriod is the ammount of time the reconciler loop waits between +// successive executions. +// maxSafeToDetachDuration is the max ammount of time the reconciler will wait +// for the volume to deatch, after this it will detach the volume anyway +// assuming the node is unavilable. If during this time the volume becomes used +// by a new pod, the detach request will be aborted and the timer cleared. +func NewReconciler( + loopPeriod time.Duration, + maxSafeToDetachDuration time.Duration, + desiredStateOfWorld cache.DesiredStateOfWorld, + actualStateOfWorld cache.ActualStateOfWorld, + attacherDetacher attacherdetacher.AttacherDetacher) Reconciler { + return &reconciler{ + loopPeriod: loopPeriod, + maxSafeToDetachDuration: maxSafeToDetachDuration, + desiredStateOfWorld: desiredStateOfWorld, + actualStateOfWorld: actualStateOfWorld, + attacherDetacher: attacherDetacher, + } +} + +type reconciler struct { + loopPeriod time.Duration + maxSafeToDetachDuration time.Duration + desiredStateOfWorld cache.DesiredStateOfWorld + actualStateOfWorld cache.ActualStateOfWorld + attacherDetacher attacherdetacher.AttacherDetacher +} + +func (rc *reconciler) Run(stopCh <-chan struct{}) { + wait.Until(rc.reconciliationLoopFunc(), rc.loopPeriod, stopCh) +} + +func (rc *reconciler) reconciliationLoopFunc() func() { + return func() { + // Ensure volumes that should be attached are attached. + for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() { + if rc.actualStateOfWorld.VolumeNodeExists( + volumeToAttach.VolumeName, volumeToAttach.NodeName) { + // Volume/Node exists, touch it to reset "safe to detach" + glog.V(12).Infof("Volume %q/Node %q is attached--touching.", volumeToAttach.VolumeName, volumeToAttach.NodeName) + _, err := rc.actualStateOfWorld.AddVolumeNode( + volumeToAttach.VolumeSpec, volumeToAttach.NodeName) + if err != nil { + glog.Errorf("Unexpected error on actualStateOfWorld.AddVolumeNode(): %v", err) + } + } else { + // Volume/Node doesn't exist, spawn a goroutine to attach it + glog.V(5).Infof("Triggering AttachVolume for volume %q to node %q", volumeToAttach.VolumeName, volumeToAttach.NodeName) + rc.attacherDetacher.AttachVolume(&volumeToAttach, rc.actualStateOfWorld) + } + } + + // Ensure volumes that should be detached are detached. + for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() { + if !rc.desiredStateOfWorld.VolumeExists( + attachedVolume.VolumeName, attachedVolume.NodeName) { + // Volume exists in actual state of world but not desired + if attachedVolume.SafeToDetach { + glog.V(5).Infof("Triggering DetachVolume for volume %q to node %q", attachedVolume.VolumeName, attachedVolume.NodeName) + rc.attacherDetacher.DetachVolume(&attachedVolume, rc.actualStateOfWorld) + } else { + // If volume is not safe to detach wait a max amount of time before detaching any way. + timeElapsed, err := rc.actualStateOfWorld.MarkDesireToDetach(attachedVolume.VolumeName, attachedVolume.NodeName) + if err != nil { + glog.Errorf("Unexpected error actualStateOfWorld.MarkDesireToDetach(): %v", err) + } + if timeElapsed > rc.maxSafeToDetachDuration { + glog.V(5).Infof("Triggering DetachVolume for volume %q to node %q. Volume is not safe to detach, but max wait time expired.", attachedVolume.VolumeName, attachedVolume.NodeName) + rc.attacherDetacher.DetachVolume(&attachedVolume, rc.actualStateOfWorld) + } + } + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/reconciler/reconciler_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/reconciler/reconciler_test.go new file mode 100644 index 000000000000..c0c513681c28 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/reconciler/reconciler_test.go @@ -0,0 +1,365 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconciler + +import ( + "testing" + "time" + + "k8s.io/kubernetes/pkg/controller/volume/attacherdetacher" + "k8s.io/kubernetes/pkg/controller/volume/cache" + controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing" + "k8s.io/kubernetes/pkg/util/wait" + volumetesting "k8s.io/kubernetes/pkg/volume/testing" +) + +const ( + reconcilerLoopPeriod time.Duration = 0 * time.Millisecond + maxSafeToDetachDuration time.Duration = 50 * time.Millisecond +) + +func Test_Run_Positive_DoNothing(t *testing.T) { + // Arrange + volumePluginMgr, fakePlugin := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := cache.NewDesiredStateOfWorld(volumePluginMgr) + asw := cache.NewActualStateOfWorld(volumePluginMgr) + ad := attacherdetacher.NewAttacherDetacher(volumePluginMgr) + reconciler := NewReconciler( + reconcilerLoopPeriod, maxSafeToDetachDuration, dsw, asw, ad) + + // Act + go reconciler.Run(wait.NeverStop) + + // Assert + waitForNewAttacherCallCount(t, 0 /* expectedCallCount */, fakePlugin) + verifyNewAttacherCallCount(t, true /* expectZeroNewAttacherCallCount */, fakePlugin) + verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin) + waitForAttachCallCount(t, 0 /* expectedAttachCallCount */, fakePlugin) + waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin) +} + +func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) { + // Arrange + volumePluginMgr, fakePlugin := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := cache.NewDesiredStateOfWorld(volumePluginMgr) + asw := cache.NewActualStateOfWorld(volumePluginMgr) + ad := attacherdetacher.NewAttacherDetacher(volumePluginMgr) + reconciler := NewReconciler( + reconcilerLoopPeriod, maxSafeToDetachDuration, dsw, asw, ad) + podName := "pod-name" + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + dsw.AddNode(nodeName) + volumeExists := dsw.VolumeExists(volumeName, nodeName) + if volumeExists { + t.Fatalf( + "Volume %q/node %q should not exist, but it does.", + volumeName, + nodeName) + } + + _, podErr := dsw.AddPod(podName, volumeSpec, nodeName) + if podErr != nil { + t.Fatalf("AddPod failed. Expected: Actual: <%v>", podErr) + } + + // Act + go reconciler.Run(wait.NeverStop) + + // Assert + waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) + waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin) + verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin) +} + +func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMarkVolume(t *testing.T) { + // Arrange + volumePluginMgr, fakePlugin := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := cache.NewDesiredStateOfWorld(volumePluginMgr) + asw := cache.NewActualStateOfWorld(volumePluginMgr) + ad := attacherdetacher.NewAttacherDetacher(volumePluginMgr) + reconciler := NewReconciler( + reconcilerLoopPeriod, maxSafeToDetachDuration, dsw, asw, ad) + podName := "pod-name" + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + dsw.AddNode(nodeName) + volumeExists := dsw.VolumeExists(volumeName, nodeName) + if volumeExists { + t.Fatalf( + "Volume %q/node %q should not exist, but it does.", + volumeName, + nodeName) + } + + generatedVolumeName, podAddErr := dsw.AddPod(podName, volumeSpec, nodeName) + if podAddErr != nil { + t.Fatalf("AddPod failed. Expected: Actual: <%v>", podAddErr) + } + + // Act + go reconciler.Run(wait.NeverStop) + + // Assert + waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) + verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin) + waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin) + verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin) + waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin) + + // Act + dsw.DeletePod(podName, generatedVolumeName, nodeName) + volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName) + if volumeExists { + t.Fatalf( + "Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.", + podName, + generatedVolumeName, + nodeName) + } + asw.MarkVolumeNodeSafeToDetach(generatedVolumeName, nodeName) + + // Assert -- Marked SafeToDetach + waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) + verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin) + waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin) + verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin) + waitForDetachCallCount(t, 1 /* expectedDetachCallCount */, fakePlugin) +} + +func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithoutMarkVolume(t *testing.T) { + // Arrange + volumePluginMgr, fakePlugin := controllervolumetesting.GetTestVolumePluginMgr((t)) + dsw := cache.NewDesiredStateOfWorld(volumePluginMgr) + asw := cache.NewActualStateOfWorld(volumePluginMgr) + ad := attacherdetacher.NewAttacherDetacher(volumePluginMgr) + reconciler := NewReconciler( + reconcilerLoopPeriod, maxSafeToDetachDuration, dsw, asw, ad) + podName := "pod-name" + volumeName := "volume-name" + volumeSpec := controllervolumetesting.GetTestVolumeSpec(volumeName, volumeName) + nodeName := "node-name" + dsw.AddNode(nodeName) + volumeExists := dsw.VolumeExists(volumeName, nodeName) + if volumeExists { + t.Fatalf( + "Volume %q/node %q should not exist, but it does.", + volumeName, + nodeName) + } + + generatedVolumeName, podAddErr := dsw.AddPod(podName, volumeSpec, nodeName) + if podAddErr != nil { + t.Fatalf("AddPod failed. Expected: Actual: <%v>", podAddErr) + } + + // Act + go reconciler.Run(wait.NeverStop) + + // Assert + waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) + verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin) + waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin) + verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin) + waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin) + + // Act + dsw.DeletePod(podName, generatedVolumeName, nodeName) + volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName) + if volumeExists { + t.Fatalf( + "Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.", + podName, + generatedVolumeName, + nodeName) + } + + // Assert -- Timer will triger detach + waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) + verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin) + waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin) + verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin) + waitForDetachCallCount(t, 1 /* expectedDetachCallCount */, fakePlugin) +} + +func waitForNewAttacherCallCount( + t *testing.T, + expectedCallCount int, + fakePlugin *volumetesting.FakeVolumePlugin) { + err := retryWithExponentialBackOff( + time.Duration(5*time.Millisecond), + func() (bool, error) { + actualCallCount := fakePlugin.GetNewAttacherCallCount() + if actualCallCount >= expectedCallCount { + return true, nil + } + t.Logf( + "Warning: Wrong NewAttacherCallCount. Expected: <%v> Actual: <%v>. Will retry.", + expectedCallCount, + actualCallCount) + return false, nil + }, + ) + + if err != nil { + t.Fatalf( + "Timed out waiting for NewAttacherCallCount. Expected: <%v> Actual: <%v>", + expectedCallCount, + fakePlugin.GetNewAttacherCallCount()) + } +} + +func waitForNewDetacherCallCount( + t *testing.T, + expectedCallCount int, + fakePlugin *volumetesting.FakeVolumePlugin) { + err := retryWithExponentialBackOff( + time.Duration(5*time.Millisecond), + func() (bool, error) { + actualCallCount := fakePlugin.GetNewDetacherCallCount() + if actualCallCount >= expectedCallCount { + return true, nil + } + t.Logf( + "Warning: Wrong NewDetacherCallCount. Expected: <%v> Actual: <%v>. Will retry.", + expectedCallCount, + actualCallCount) + return false, nil + }, + ) + + if err != nil { + t.Fatalf( + "Timed out waiting for NewDetacherCallCount. Expected: <%v> Actual: <%v>", + expectedCallCount, + fakePlugin.GetNewDetacherCallCount()) + } +} + +func waitForAttachCallCount( + t *testing.T, + expectedAttachCallCount int, + fakePlugin *volumetesting.FakeVolumePlugin) { + if len(fakePlugin.GetAttachers()) == 0 && expectedAttachCallCount == 0 { + return + } + + err := retryWithExponentialBackOff( + time.Duration(5*time.Millisecond), + func() (bool, error) { + for i, attacher := range fakePlugin.GetAttachers() { + actualCallCount := attacher.GetAttachCallCount() + if actualCallCount == expectedAttachCallCount { + return true, nil + } + t.Logf( + "Warning: Wrong attacher[%v].GetAttachCallCount(). Expected: <%v> Actual: <%v>. Will try next attacher.", + i, + expectedAttachCallCount, + actualCallCount) + } + + t.Logf( + "Warning: No attachers have expected AttachCallCount. Expected: <%v>. Will retry.", + expectedAttachCallCount) + return false, nil + }, + ) + + if err != nil { + t.Fatalf( + "No attachers have expected AttachCallCount. Expected: <%v>", + expectedAttachCallCount) + } +} + +func waitForDetachCallCount( + t *testing.T, + expectedDetachCallCount int, + fakePlugin *volumetesting.FakeVolumePlugin) { + if len(fakePlugin.GetDetachers()) == 0 && expectedDetachCallCount == 0 { + return + } + + err := retryWithExponentialBackOff( + time.Duration(5*time.Millisecond), + func() (bool, error) { + for i, detacher := range fakePlugin.GetDetachers() { + actualCallCount := detacher.GetDetachCallCount() + if actualCallCount == expectedDetachCallCount { + return true, nil + } + t.Logf( + "Wrong detacher[%v].GetDetachCallCount(). Expected: <%v> Actual: <%v>. Will try next detacher.", + i, + expectedDetachCallCount, + actualCallCount) + } + + t.Logf( + "Warning: No detachers have expected DetachCallCount. Expected: <%v>. Will retry.", + expectedDetachCallCount) + return false, nil + }, + ) + + if err != nil { + t.Fatalf( + "No detachers have expected DetachCallCount. Expected: <%v>", + expectedDetachCallCount) + } +} + +func verifyNewAttacherCallCount( + t *testing.T, + expectZeroNewAttacherCallCount bool, + fakePlugin *volumetesting.FakeVolumePlugin) { + + if expectZeroNewAttacherCallCount && + fakePlugin.GetNewAttacherCallCount() != 0 { + t.Fatalf( + "Wrong NewAttacherCallCount. Expected: <0> Actual: <%v>", + fakePlugin.GetNewAttacherCallCount()) + } +} + +func verifyNewDetacherCallCount( + t *testing.T, + expectZeroNewDetacherCallCount bool, + fakePlugin *volumetesting.FakeVolumePlugin) { + + if expectZeroNewDetacherCallCount && + fakePlugin.GetNewDetacherCallCount() != 0 { + t.Fatalf("Wrong NewDetacherCallCount. Expected: <0> Actual: <%v>", + fakePlugin.GetNewDetacherCallCount()) + } +} + +func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error { + backoff := wait.Backoff{ + Duration: initialDuration, + Factor: 3, + Jitter: 0, + Steps: 6, + } + return wait.ExponentialBackoff(backoff, fn) +} + +// t.Logf("asw: %v", asw.GetAttachedVolumes()) +// t.Logf("dsw: %v", dsw.GetVolumesToAttach()) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/testing/testvolumepluginmgr.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/testing/testvolumepluginmgr.go new file mode 100644 index 000000000000..088360b535e5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/testing/testvolumepluginmgr.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/io" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/volume" + volumetesting "k8s.io/kubernetes/pkg/volume/testing" +) + +// GetTestVolumePluginMgr creates, initializes, and returns a test volume +// plugin manager. +func GetTestVolumePluginMgr(t *testing.T) (*volume.VolumePluginMgr, *volumetesting.FakeVolumePlugin) { + plugins := []volume.VolumePlugin{} + + // plugins = append(plugins, aws_ebs.ProbeVolumePlugins()...) + // plugins = append(plugins, gce_pd.ProbeVolumePlugins()...) + // plugins = append(plugins, cinder.ProbeVolumePlugins()...) + volumeTestingPlugins := volumetesting.ProbeVolumePlugins(volume.VolumeConfig{}) + plugins = append(plugins, volumeTestingPlugins...) + + volumePluginMgr := testVolumePluginMgr{} + + if err := volumePluginMgr.InitPlugins(plugins, &volumePluginMgr); err != nil { + t.Fatalf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err) + } + + return &volumePluginMgr.VolumePluginMgr, volumeTestingPlugins[0].(*volumetesting.FakeVolumePlugin) +} + +type testVolumePluginMgr struct { + volume.VolumePluginMgr +} + +// VolumeHost implementation +// This is an unfortunate requirement of the current factoring of volume plugin +// initializing code. It requires kubelet specific methods used by the mounting +// code to be implemented by all initializers even if the initializer does not +// do mounting (like this attach/detach controller). +// Issue kubernetes/kubernetes/issues/14217 to fix this. +func (vpm *testVolumePluginMgr) GetPluginDir(podUID string) string { + return "" +} + +func (vpm *testVolumePluginMgr) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string { + return "" +} + +func (vpm *testVolumePluginMgr) GetPodPluginDir(podUID types.UID, pluginName string) string { + return "" +} + +func (vpm *testVolumePluginMgr) GetKubeClient() internalclientset.Interface { + return nil +} + +func (vpm *testVolumePluginMgr) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { + return nil, fmt.Errorf("NewWrapperMounter not supported by Attach/Detach controller's VolumeHost implementation") +} + +func (vpm *testVolumePluginMgr) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) { + return nil, fmt.Errorf("NewWrapperUnmounter not supported by Attach/Detach controller's VolumeHost implementation") +} + +func (vpm *testVolumePluginMgr) GetCloudProvider() cloudprovider.Interface { + return &fake.FakeCloud{} +} + +func (vpm *testVolumePluginMgr) GetMounter() mount.Interface { + return nil +} + +func (vpm *testVolumePluginMgr) GetWriter() io.Writer { + return nil +} + +func (vpm *testVolumePluginMgr) GetHostName() string { + return "" +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/testing/testvolumespec.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/testing/testvolumespec.go new file mode 100644 index 000000000000..f6678890501c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/volume/testing/testvolumespec.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/volume" +) + +// GetTestVolumeSpec returns a test volume spec +func GetTestVolumeSpec(volumeName, diskName string) *volume.Spec { + return &volume.Spec{ + Volume: &api.Volume{ + Name: volumeName, + VolumeSource: api.VolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + PDName: diskName, + FSType: "fake", + ReadOnly: false, + }, + }, + }, + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/converter.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/converter.go index 19cd4bef9d2f..e045dcd2f77c 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/converter.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/converter.go @@ -40,6 +40,8 @@ type NameFunc func(t reflect.Type) string var DefaultNameFunc = func(t reflect.Type) string { return t.Name() } +type GenericConversionFunc func(a, b interface{}, scope Scope) (bool, error) + // Converter knows how to convert one type to another. type Converter struct { // Map from the conversion pair to a function which can @@ -47,6 +49,11 @@ type Converter struct { conversionFuncs ConversionFuncs generatedConversionFuncs ConversionFuncs + // genericConversions are called during normal conversion to offer a "fast-path" + // that avoids all reflection. These methods are not called outside of the .Convert() + // method. + genericConversions []GenericConversionFunc + // Set of conversions that should be treated as a no-op ignoredConversions map[typePair]struct{} @@ -95,10 +102,18 @@ func NewConverter(nameFn NameFunc) *Converter { inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc), inputDefaultFlags: make(map[reflect.Type]FieldMatchingFlags), } - c.RegisterConversionFunc(ByteSliceCopy) + c.RegisterConversionFunc(Convert_Slice_byte_To_Slice_byte) return c } +// AddGenericConversionFunc adds a function that accepts the ConversionFunc call pattern +// (for two conversion types) to the converter. These functions are checked first during +// a normal conversion, but are otherwise not called. Use AddConversionFuncs when registering +// typed conversions. +func (c *Converter) AddGenericConversionFunc(fn GenericConversionFunc) { + c.genericConversions = append(c.genericConversions, fn) +} + // WithConversions returns a Converter that is a copy of c but with the additional // fns merged on top. func (c *Converter) WithConversions(fns ConversionFuncs) *Converter { @@ -114,8 +129,12 @@ func (c *Converter) DefaultMeta(t reflect.Type) (FieldMatchingFlags, *Meta) { } } -// ByteSliceCopy prevents recursing into every byte -func ByteSliceCopy(in *[]byte, out *[]byte, s Scope) error { +// Convert_Slice_byte_To_Slice_byte prevents recursing into every byte +func Convert_Slice_byte_To_Slice_byte(in *[]byte, out *[]byte, s Scope) error { + if *in == nil { + *out = nil + return nil + } *out = make([]byte, len(*in)) copy(*out, *in) return nil @@ -191,9 +210,6 @@ func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs { // Meta is supplied by Scheme, when it calls Convert. type Meta struct { - SrcVersion string - DestVersion string - // KeyNameMapping is an optional function which may map the listed key (field name) // into a source and destination value. KeyNameMapping FieldMappingFunc @@ -496,6 +512,15 @@ func (f FieldMatchingFlags) IsSet(flag FieldMatchingFlags) bool { // it is not used by Convert() other than storing it in the scope. // Not safe for objects with cyclic references! func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error { + if len(c.genericConversions) > 0 { + // TODO: avoid scope allocation + s := &scope{converter: c, flags: flags, meta: meta} + for _, fn := range c.genericConversions { + if ok, err := fn(src, dest, s); ok { + return err + } + } + } return c.doConversion(src, dest, flags, meta, c.convert) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/converter_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/converter_test.go new file mode 100644 index 000000000000..cdd61435fb72 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/converter_test.go @@ -0,0 +1,847 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/google/gofuzz" + flag "github.com/spf13/pflag" + + "k8s.io/kubernetes/pkg/util/diff" +) + +var fuzzIters = flag.Int("fuzz-iters", 50, "How many fuzzing iterations to do.") + +// Test a weird version/kind embedding format. +type MyWeirdCustomEmbeddedVersionKindField struct { + ID string `json:"ID,omitempty"` + APIVersion string `json:"myVersionKey,omitempty"` + ObjectKind string `json:"myKindKey,omitempty"` + Z string `json:"Z,omitempty"` + Y uint64 `json:"Y,omitempty"` +} + +type TestType1 struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` + C int8 `json:"C,omitempty"` + D int16 `json:"D,omitempty"` + E int32 `json:"E,omitempty"` + F int64 `json:"F,omitempty"` + G uint `json:"G,omitempty"` + H uint8 `json:"H,omitempty"` + I uint16 `json:"I,omitempty"` + J uint32 `json:"J,omitempty"` + K uint64 `json:"K,omitempty"` + L bool `json:"L,omitempty"` + M map[string]int `json:"M,omitempty"` + N map[string]TestType2 `json:"N,omitempty"` + O *TestType2 `json:"O,omitempty"` + P []TestType2 `json:"Q,omitempty"` +} + +type TestType2 struct { + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` +} + +type ExternalTestType2 struct { + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` +} +type ExternalTestType1 struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` + C int8 `json:"C,omitempty"` + D int16 `json:"D,omitempty"` + E int32 `json:"E,omitempty"` + F int64 `json:"F,omitempty"` + G uint `json:"G,omitempty"` + H uint8 `json:"H,omitempty"` + I uint16 `json:"I,omitempty"` + J uint32 `json:"J,omitempty"` + K uint64 `json:"K,omitempty"` + L bool `json:"L,omitempty"` + M map[string]int `json:"M,omitempty"` + N map[string]ExternalTestType2 `json:"N,omitempty"` + O *ExternalTestType2 `json:"O,omitempty"` + P []ExternalTestType2 `json:"Q,omitempty"` +} + +func testLogger(t *testing.T) DebugLogger { + // We don't set logger to eliminate rubbish logs in tests. + // If you want to switch it, simply switch it to: "return t" + return nil +} + +func TestConverter_byteSlice(t *testing.T) { + c := NewConverter(DefaultNameFunc) + src := []byte{1, 2, 3} + dest := []byte{} + err := c.Convert(&src, &dest, 0, nil) + if err != nil { + t.Fatalf("expected no error") + } + if e, a := src, dest; !reflect.DeepEqual(e, a) { + t.Errorf("expected %#v, got %#v", e, a) + } +} + +func TestConverter_MismatchedTypes(t *testing.T) { + c := NewConverter(DefaultNameFunc) + + err := c.RegisterConversionFunc( + func(in *[]string, out *int, s Scope) error { + if str, err := strconv.Atoi((*in)[0]); err != nil { + return err + } else { + *out = str + return nil + } + }, + ) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + src := []string{"5"} + var dest *int + err = c.Convert(&src, &dest, 0, nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if e, a := 5, *dest; e != a { + t.Errorf("expected %#v, got %#v", e, a) + } +} + +func TestConverter_DefaultConvert(t *testing.T) { + type A struct { + Foo string + Baz int + } + type B struct { + Bar string + Baz int + } + c := NewConverter(DefaultNameFunc) + c.Debug = testLogger(t) + c.nameFunc = func(t reflect.Type) string { return "MyType" } + + // Ensure conversion funcs can call DefaultConvert to get default behavior, + // then fixup remaining fields manually + err := c.RegisterConversionFunc(func(in *A, out *B, s Scope) error { + if err := s.DefaultConvert(in, out, IgnoreMissingFields); err != nil { + return err + } + out.Bar = in.Foo + return nil + }) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + x := A{"hello, intrepid test reader!", 3} + y := B{} + + err = c.Convert(&x, &y, 0, nil) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if e, a := x.Foo, y.Bar; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := x.Baz, y.Baz; e != a { + t.Errorf("expected %v, got %v", e, a) + } +} + +func TestConverter_DeepCopy(t *testing.T) { + type A struct { + Foo *string + Bar []string + Baz interface{} + Qux map[string]string + } + c := NewConverter(DefaultNameFunc) + c.Debug = testLogger(t) + + foo, baz := "foo", "baz" + x := A{ + Foo: &foo, + Bar: []string{"bar"}, + Baz: &baz, + Qux: map[string]string{"qux": "qux"}, + } + y := A{} + + if err := c.Convert(&x, &y, 0, nil); err != nil { + t.Fatalf("unexpected error %v", err) + } + *x.Foo = "foo2" + x.Bar[0] = "bar2" + *x.Baz.(*string) = "baz2" + x.Qux["qux"] = "qux2" + if e, a := *x.Foo, *y.Foo; e == a { + t.Errorf("expected difference between %v and %v", e, a) + } + if e, a := x.Bar, y.Bar; reflect.DeepEqual(e, a) { + t.Errorf("expected difference between %v and %v", e, a) + } + if e, a := *x.Baz.(*string), *y.Baz.(*string); e == a { + t.Errorf("expected difference between %v and %v", e, a) + } + if e, a := x.Qux, y.Qux; reflect.DeepEqual(e, a) { + t.Errorf("expected difference between %v and %v", e, a) + } +} + +func TestConverter_CallsRegisteredFunctions(t *testing.T) { + type A struct { + Foo string + Baz int + } + type B struct { + Bar string + Baz int + } + type C struct{} + c := NewConverter(DefaultNameFunc) + c.Debug = testLogger(t) + err := c.RegisterConversionFunc(func(in *A, out *B, s Scope) error { + out.Bar = in.Foo + return s.Convert(&in.Baz, &out.Baz, 0) + }) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + err = c.RegisterConversionFunc(func(in *B, out *A, s Scope) error { + out.Foo = in.Bar + return s.Convert(&in.Baz, &out.Baz, 0) + }) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + x := A{"hello, intrepid test reader!", 3} + y := B{} + + err = c.Convert(&x, &y, 0, nil) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if e, a := x.Foo, y.Bar; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := x.Baz, y.Baz; e != a { + t.Errorf("expected %v, got %v", e, a) + } + + z := B{"all your test are belong to us", 42} + w := A{} + + err = c.Convert(&z, &w, 0, nil) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if e, a := z.Bar, w.Foo; e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := z.Baz, w.Baz; e != a { + t.Errorf("expected %v, got %v", e, a) + } + + err = c.RegisterConversionFunc(func(in *A, out *C, s Scope) error { + return fmt.Errorf("C can't store an A, silly") + }) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + err = c.Convert(&A{}, &C{}, 0, nil) + if err == nil { + t.Errorf("unexpected non-error") + } +} + +func TestConverter_IgnoredConversion(t *testing.T) { + type A struct{} + type B struct{} + + count := 0 + c := NewConverter(DefaultNameFunc) + if err := c.RegisterConversionFunc(func(in *A, out *B, s Scope) error { + count++ + return nil + }); err != nil { + t.Fatalf("unexpected error %v", err) + } + if err := c.RegisterIgnoredConversion(&A{}, &B{}); err != nil { + t.Fatal(err) + } + a := A{} + b := B{} + if err := c.Convert(&a, &b, 0, nil); err != nil { + t.Errorf("%v", err) + } + if count != 0 { + t.Errorf("unexpected number of conversion invocations") + } +} + +func TestConverter_IgnoredConversionNested(t *testing.T) { + type C string + type A struct { + C C + } + type B struct { + C C + } + + c := NewConverter(DefaultNameFunc) + typed := C("") + if err := c.RegisterIgnoredConversion(&typed, &typed); err != nil { + t.Fatal(err) + } + a := A{C: C("test")} + b := B{C: C("other")} + if err := c.Convert(&a, &b, AllowDifferentFieldTypeNames, nil); err != nil { + t.Errorf("%v", err) + } + if b.C != C("other") { + t.Errorf("expected no conversion of field C: %#v", b) + } +} + +func TestConverter_GeneratedConversionOverriden(t *testing.T) { + type A struct{} + type B struct{} + c := NewConverter(DefaultNameFunc) + if err := c.RegisterConversionFunc(func(in *A, out *B, s Scope) error { + return nil + }); err != nil { + t.Fatalf("unexpected error %v", err) + } + if err := c.RegisterGeneratedConversionFunc(func(in *A, out *B, s Scope) error { + return fmt.Errorf("generated function should be overriden") + }); err != nil { + t.Fatalf("unexpected error %v", err) + } + + a := A{} + b := B{} + if err := c.Convert(&a, &b, 0, nil); err != nil { + t.Errorf("%v", err) + } +} + +func TestConverter_WithConversionOverriden(t *testing.T) { + type A struct{} + type B struct{} + c := NewConverter(DefaultNameFunc) + if err := c.RegisterConversionFunc(func(in *A, out *B, s Scope) error { + return fmt.Errorf("conversion function should be overriden") + }); err != nil { + t.Fatalf("unexpected error %v", err) + } + if err := c.RegisterGeneratedConversionFunc(func(in *A, out *B, s Scope) error { + return fmt.Errorf("generated function should be overriden") + }); err != nil { + t.Fatalf("unexpected error %v", err) + } + + ext := NewConversionFuncs() + ext.Add(func(in *A, out *B, s Scope) error { + return nil + }) + newc := c.WithConversions(ext) + + a := A{} + b := B{} + if err := c.Convert(&a, &b, 0, nil); err == nil || err.Error() != "conversion function should be overriden" { + t.Errorf("unexpected error: %v", err) + } + if err := newc.Convert(&a, &b, 0, nil); err != nil { + t.Errorf("%v", err) + } +} + +func TestConverter_MapsStringArrays(t *testing.T) { + type A struct { + Foo string + Baz int + Other string + } + c := NewConverter(DefaultNameFunc) + c.Debug = testLogger(t) + if err := c.RegisterConversionFunc(func(input *[]string, out *string, s Scope) error { + if len(*input) == 0 { + *out = "" + } + *out = (*input)[0] + return nil + }); err != nil { + t.Fatalf("unexpected error %v", err) + } + + x := map[string][]string{ + "Foo": {"bar"}, + "Baz": {"1"}, + "Other": {"", "test"}, + "other": {"wrong"}, + } + y := A{"test", 2, "something"} + + if err := c.Convert(&x, &y, AllowDifferentFieldTypeNames, nil); err == nil { + t.Error("unexpected non-error") + } + + if err := c.RegisterConversionFunc(func(input *[]string, out *int, s Scope) error { + if len(*input) == 0 { + *out = 0 + } + str := (*input)[0] + i, err := strconv.Atoi(str) + if err != nil { + return err + } + *out = i + return nil + }); err != nil { + t.Fatalf("unexpected error %v", err) + } + + if err := c.Convert(&x, &y, AllowDifferentFieldTypeNames, nil); err != nil { + t.Fatalf("unexpected error %v", err) + } + if !reflect.DeepEqual(y, A{"bar", 1, ""}) { + t.Errorf("unexpected result: %#v", y) + } +} + +func TestConverter_MapsStringArraysWithMappingKey(t *testing.T) { + type A struct { + Foo string `json:"test"` + Baz int + Other string + } + c := NewConverter(DefaultNameFunc) + c.Debug = testLogger(t) + if err := c.RegisterConversionFunc(func(input *[]string, out *string, s Scope) error { + if len(*input) == 0 { + *out = "" + } + *out = (*input)[0] + return nil + }); err != nil { + t.Fatalf("unexpected error %v", err) + } + + x := map[string][]string{ + "Foo": {"bar"}, + "test": {"baz"}, + } + y := A{"", 0, ""} + + if err := c.Convert(&x, &y, AllowDifferentFieldTypeNames|IgnoreMissingFields, &Meta{}); err != nil { + t.Fatalf("unexpected error %v", err) + } + if !reflect.DeepEqual(y, A{"bar", 0, ""}) { + t.Errorf("unexpected result: %#v", y) + } + + mapping := func(key string, sourceTag, destTag reflect.StructTag) (source string, dest string) { + if s := destTag.Get("json"); len(s) > 0 { + return strings.SplitN(s, ",", 2)[0], key + } + return key, key + } + + if err := c.Convert(&x, &y, AllowDifferentFieldTypeNames|IgnoreMissingFields, &Meta{KeyNameMapping: mapping}); err != nil { + t.Fatalf("unexpected error %v", err) + } + if !reflect.DeepEqual(y, A{"baz", 0, ""}) { + t.Errorf("unexpected result: %#v", y) + } +} + +func TestConverter_fuzz(t *testing.T) { + // Use the same types from the scheme test. + table := []struct { + from, to, check interface{} + }{ + {&TestType1{}, &ExternalTestType1{}, &TestType1{}}, + {&ExternalTestType1{}, &TestType1{}, &ExternalTestType1{}}, + } + + f := fuzz.New().NilChance(.5).NumElements(0, 100) + c := NewConverter(DefaultNameFunc) + c.nameFunc = func(t reflect.Type) string { + // Hide the fact that we don't have separate packages for these things. + return map[reflect.Type]string{ + reflect.TypeOf(TestType1{}): "TestType1", + reflect.TypeOf(ExternalTestType1{}): "TestType1", + reflect.TypeOf(TestType2{}): "TestType2", + reflect.TypeOf(ExternalTestType2{}): "TestType2", + }[t] + } + c.Debug = testLogger(t) + + for i, item := range table { + for j := 0; j < *fuzzIters; j++ { + f.Fuzz(item.from) + err := c.Convert(item.from, item.to, 0, nil) + if err != nil { + t.Errorf("(%v, %v): unexpected error: %v", i, j, err) + continue + } + err = c.Convert(item.to, item.check, 0, nil) + if err != nil { + t.Errorf("(%v, %v): unexpected error: %v", i, j, err) + continue + } + if e, a := item.from, item.check; !reflect.DeepEqual(e, a) { + t.Errorf("(%v, %v): unexpected diff: %v", i, j, objDiff(e, a)) + } + } + } +} + +func TestConverter_MapElemAddr(t *testing.T) { + type Foo struct { + A map[int]int + } + type Bar struct { + A map[string]string + } + c := NewConverter(DefaultNameFunc) + c.Debug = testLogger(t) + err := c.RegisterConversionFunc( + func(in *int, out *string, s Scope) error { + *out = fmt.Sprintf("%v", *in) + return nil + }, + ) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + err = c.RegisterConversionFunc( + func(in *string, out *int, s Scope) error { + if str, err := strconv.Atoi(*in); err != nil { + return err + } else { + *out = str + return nil + } + }, + ) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + f := fuzz.New().NilChance(0).NumElements(3, 3) + first := Foo{} + second := Bar{} + f.Fuzz(&first) + err = c.Convert(&first, &second, AllowDifferentFieldTypeNames, nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + third := Foo{} + err = c.Convert(&second, &third, AllowDifferentFieldTypeNames, nil) + if e, a := first, third; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected diff: %v", objDiff(e, a)) + } +} + +func TestConverter_tags(t *testing.T) { + type Foo struct { + A string `test:"foo"` + } + type Bar struct { + A string `test:"bar"` + } + c := NewConverter(DefaultNameFunc) + c.Debug = testLogger(t) + err := c.RegisterConversionFunc( + func(in *string, out *string, s Scope) error { + if e, a := "foo", s.SrcTag().Get("test"); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := "bar", s.DestTag().Get("test"); e != a { + t.Errorf("expected %v, got %v", e, a) + } + return nil + }, + ) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + err = c.Convert(&Foo{}, &Bar{}, AllowDifferentFieldTypeNames, nil) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } +} + +func TestConverter_meta(t *testing.T) { + type Foo struct{ A string } + type Bar struct{ A string } + c := NewConverter(DefaultNameFunc) + c.Debug = testLogger(t) + checks := 0 + err := c.RegisterConversionFunc( + func(in *Foo, out *Bar, s Scope) error { + if s.Meta() == nil { + t.Errorf("Meta did not get passed!") + } + checks++ + s.Convert(&in.A, &out.A, 0) + return nil + }, + ) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + err = c.RegisterConversionFunc( + func(in *string, out *string, s Scope) error { + if s.Meta() == nil { + t.Errorf("Meta did not get passed a second time!") + } + checks++ + return nil + }, + ) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + err = c.Convert(&Foo{}, &Bar{}, 0, &Meta{}) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if checks != 2 { + t.Errorf("Registered functions did not get called.") + } +} + +func TestConverter_flags(t *testing.T) { + type Foo struct{ A string } + type Bar struct{ A string } + table := []struct { + from, to interface{} + flags FieldMatchingFlags + shouldSucceed bool + }{ + // Check that DestFromSource allows extra fields only in source. + { + from: &struct{ A string }{}, + to: &struct{ A, B string }{}, + flags: DestFromSource, + shouldSucceed: false, + }, { + from: &struct{ A, B string }{}, + to: &struct{ A string }{}, + flags: DestFromSource, + shouldSucceed: true, + }, + + // Check that SourceToDest allows for extra fields only in dest. + { + from: &struct{ A string }{}, + to: &struct{ A, B string }{}, + flags: SourceToDest, + shouldSucceed: true, + }, { + from: &struct{ A, B string }{}, + to: &struct{ A string }{}, + flags: SourceToDest, + shouldSucceed: false, + }, + + // Check that IgnoreMissingFields makes the above failure cases pass. + { + from: &struct{ A string }{}, + to: &struct{ A, B string }{}, + flags: DestFromSource | IgnoreMissingFields, + shouldSucceed: true, + }, { + from: &struct{ A, B string }{}, + to: &struct{ A string }{}, + flags: SourceToDest | IgnoreMissingFields, + shouldSucceed: true, + }, + + // Check that the field type name must match unless + // AllowDifferentFieldTypeNames is specified. + { + from: &struct{ A, B Foo }{}, + to: &struct{ A Bar }{}, + flags: DestFromSource, + shouldSucceed: false, + }, { + from: &struct{ A Foo }{}, + to: &struct{ A, B Bar }{}, + flags: SourceToDest, + shouldSucceed: false, + }, { + from: &struct{ A, B Foo }{}, + to: &struct{ A Bar }{}, + flags: DestFromSource | AllowDifferentFieldTypeNames, + shouldSucceed: true, + }, { + from: &struct{ A Foo }{}, + to: &struct{ A, B Bar }{}, + flags: SourceToDest | AllowDifferentFieldTypeNames, + shouldSucceed: true, + }, + } + f := fuzz.New().NilChance(.5).NumElements(0, 100) + c := NewConverter(DefaultNameFunc) + c.Debug = testLogger(t) + + for i, item := range table { + for j := 0; j < *fuzzIters; j++ { + f.Fuzz(item.from) + err := c.Convert(item.from, item.to, item.flags, nil) + if item.shouldSucceed && err != nil { + t.Errorf("(%v, %v): unexpected error: %v", i, j, err) + continue + } + if !item.shouldSucceed && err == nil { + t.Errorf("(%v, %v): unexpected non-error", i, j) + continue + } + } + } +} + +func TestConverter_FieldRename(t *testing.T) { + type WeirdMeta struct { + Name string + Type string + } + type NameMeta struct { + Name string + } + type TypeMeta struct { + Type string + } + type A struct { + WeirdMeta + } + type B struct { + TypeMeta + NameMeta + } + + c := NewConverter(DefaultNameFunc) + err := c.SetStructFieldCopy(WeirdMeta{}, "WeirdMeta", TypeMeta{}, "TypeMeta") + if err != nil { + t.Fatalf("unexpected error %v", err) + } + err = c.SetStructFieldCopy(WeirdMeta{}, "WeirdMeta", NameMeta{}, "NameMeta") + if err != nil { + t.Fatalf("unexpected error %v", err) + } + err = c.SetStructFieldCopy(TypeMeta{}, "TypeMeta", WeirdMeta{}, "WeirdMeta") + if err != nil { + t.Fatalf("unexpected error %v", err) + } + err = c.SetStructFieldCopy(NameMeta{}, "NameMeta", WeirdMeta{}, "WeirdMeta") + if err != nil { + t.Fatalf("unexpected error %v", err) + } + c.Debug = testLogger(t) + + aVal := &A{ + WeirdMeta: WeirdMeta{ + Name: "Foo", + Type: "Bar", + }, + } + + bVal := &B{ + TypeMeta: TypeMeta{"Bar"}, + NameMeta: NameMeta{"Foo"}, + } + + table := map[string]struct { + from, to, expect interface{} + flags FieldMatchingFlags + }{ + "to": { + aVal, + &B{}, + bVal, + AllowDifferentFieldTypeNames | SourceToDest | IgnoreMissingFields, + }, + "from": { + bVal, + &A{}, + aVal, + AllowDifferentFieldTypeNames | SourceToDest, + }, + "toDestFirst": { + aVal, + &B{}, + bVal, + AllowDifferentFieldTypeNames, + }, + "fromDestFirst": { + bVal, + &A{}, + aVal, + AllowDifferentFieldTypeNames | IgnoreMissingFields, + }, + } + + for name, item := range table { + err := c.Convert(item.from, item.to, item.flags, nil) + if err != nil { + t.Errorf("%v: unexpected error: %v", name, err) + continue + } + if e, a := item.expect, item.to; !reflect.DeepEqual(e, a) { + t.Errorf("%v: unexpected diff: %v", name, objDiff(e, a)) + } + } +} + +func objDiff(a, b interface{}) string { + ab, err := json.Marshal(a) + if err != nil { + panic("a") + } + bb, err := json.Marshal(b) + if err != nil { + panic("b") + } + return diff.StringDiff(string(ab), string(bb)) + + // An alternate diff attempt, in case json isn't showing you + // the difference. (reflect.DeepEqual makes a distinction between + // nil and empty slices, for example.) + //return diff.StringDiff( + // fmt.Sprintf("%#v", a), + // fmt.Sprintf("%#v", b), + //) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/deep_copy_generated.go new file mode 100644 index 000000000000..717feaf18f24 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/deep_copy_generated.go @@ -0,0 +1,185 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package conversion + +import ( + forked_reflect "k8s.io/kubernetes/third_party/forked/reflect" + reflect "reflect" +) + +func DeepCopy_conversion_Cloner(in Cloner, out *Cloner, c *Cloner) error { + if in.deepCopyFuncs != nil { + in, out := in.deepCopyFuncs, &out.deepCopyFuncs + *out = make(map[reflect.Type]reflect.Value) + for range in { + // FIXME: Copying unassignable keys unsupported reflect.Type + } + } else { + out.deepCopyFuncs = nil + } + if in.generatedDeepCopyFuncs != nil { + in, out := in.generatedDeepCopyFuncs, &out.generatedDeepCopyFuncs + *out = make(map[reflect.Type]reflect.Value) + for range in { + // FIXME: Copying unassignable keys unsupported reflect.Type + } + } else { + out.generatedDeepCopyFuncs = nil + } + return nil +} + +func DeepCopy_conversion_ConversionFuncs(in ConversionFuncs, out *ConversionFuncs, c *Cloner) error { + if in.fns != nil { + in, out := in.fns, &out.fns + *out = make(map[typePair]reflect.Value) + for range in { + // FIXME: Copying unassignable keys unsupported typePair + } + } else { + out.fns = nil + } + return nil +} + +func DeepCopy_conversion_Converter(in Converter, out *Converter, c *Cloner) error { + if err := DeepCopy_conversion_ConversionFuncs(in.conversionFuncs, &out.conversionFuncs, c); err != nil { + return err + } + if err := DeepCopy_conversion_ConversionFuncs(in.generatedConversionFuncs, &out.generatedConversionFuncs, c); err != nil { + return err + } + if in.genericConversions != nil { + in, out := in.genericConversions, &out.genericConversions + *out = make([]GenericConversionFunc, len(in)) + for i := range in { + if newVal, err := c.DeepCopy(in[i]); err != nil { + return err + } else { + (*out)[i] = newVal.(GenericConversionFunc) + } + } + } else { + out.genericConversions = nil + } + if in.ignoredConversions != nil { + in, out := in.ignoredConversions, &out.ignoredConversions + *out = make(map[typePair]struct{}) + for range in { + // FIXME: Copying unassignable keys unsupported typePair + } + } else { + out.ignoredConversions = nil + } + if in.structFieldDests != nil { + in, out := in.structFieldDests, &out.structFieldDests + *out = make(map[typeNamePair][]typeNamePair) + for range in { + // FIXME: Copying unassignable keys unsupported typeNamePair + } + } else { + out.structFieldDests = nil + } + if in.structFieldSources != nil { + in, out := in.structFieldSources, &out.structFieldSources + *out = make(map[typeNamePair][]typeNamePair) + for range in { + // FIXME: Copying unassignable keys unsupported typeNamePair + } + } else { + out.structFieldSources = nil + } + if in.defaultingFuncs != nil { + in, out := in.defaultingFuncs, &out.defaultingFuncs + *out = make(map[reflect.Type]reflect.Value) + for range in { + // FIXME: Copying unassignable keys unsupported reflect.Type + } + } else { + out.defaultingFuncs = nil + } + if in.defaultingInterfaces != nil { + in, out := in.defaultingInterfaces, &out.defaultingInterfaces + *out = make(map[reflect.Type]interface{}) + for range in { + // FIXME: Copying unassignable keys unsupported reflect.Type + } + } else { + out.defaultingInterfaces = nil + } + if in.inputFieldMappingFuncs != nil { + in, out := in.inputFieldMappingFuncs, &out.inputFieldMappingFuncs + *out = make(map[reflect.Type]FieldMappingFunc) + for range in { + // FIXME: Copying unassignable keys unsupported reflect.Type + } + } else { + out.inputFieldMappingFuncs = nil + } + if in.inputDefaultFlags != nil { + in, out := in.inputDefaultFlags, &out.inputDefaultFlags + *out = make(map[reflect.Type]FieldMatchingFlags) + for range in { + // FIXME: Copying unassignable keys unsupported reflect.Type + } + } else { + out.inputDefaultFlags = nil + } + if in.Debug == nil { + out.Debug = nil + } else if newVal, err := c.DeepCopy(in.Debug); err != nil { + return err + } else { + out.Debug = newVal.(DebugLogger) + } + if in.nameFunc == nil { + out.nameFunc = nil + } else if newVal, err := c.DeepCopy(in.nameFunc); err != nil { + return err + } else { + out.nameFunc = newVal.(func(reflect.Type) string) + } + return nil +} + +func DeepCopy_conversion_Equalities(in Equalities, out *Equalities, c *Cloner) error { + if in.Equalities != nil { + in, out := in.Equalities, &out.Equalities + *out = make(forked_reflect.Equalities) + for range in { + // FIXME: Copying unassignable keys unsupported reflect.Type + } + } else { + out.Equalities = nil + } + return nil +} + +func DeepCopy_conversion_Meta(in Meta, out *Meta, c *Cloner) error { + if in.KeyNameMapping == nil { + out.KeyNameMapping = nil + } else if newVal, err := c.DeepCopy(in.KeyNameMapping); err != nil { + return err + } else { + out.KeyNameMapping = newVal.(FieldMappingFunc) + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/deep_copy_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/deep_copy_test.go new file mode 100644 index 000000000000..a1cd65308ad5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/deep_copy_test.go @@ -0,0 +1,161 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "math/rand" + "reflect" + "testing" + + "github.com/google/gofuzz" +) + +func TestDeepCopy(t *testing.T) { + semantic := EqualitiesOrDie() + f := fuzz.New().NilChance(.5).NumElements(0, 100) + table := []interface{}{ + map[string]string{}, + int(5), + "hello world", + struct { + A, B, C struct { + D map[string]int + } + X []int + Y []byte + }{}, + } + for _, obj := range table { + obj2, err := NewCloner().DeepCopy(obj) + if err != nil { + t.Errorf("Error: couldn't copy %#v", obj) + continue + } + if e, a := obj, obj2; !semantic.DeepEqual(e, a) { + t.Errorf("expected %#v\ngot %#v", e, a) + } + + obj3 := reflect.New(reflect.TypeOf(obj)).Interface() + f.Fuzz(obj3) + obj4, err := NewCloner().DeepCopy(obj3) + if err != nil { + t.Errorf("Error: couldn't copy %#v", obj) + continue + } + if e, a := obj3, obj4; !semantic.DeepEqual(e, a) { + t.Errorf("expected %#v\ngot %#v", e, a) + } + f.Fuzz(obj3) + } +} + +func copyOrDie(t *testing.T, in interface{}) interface{} { + out, err := NewCloner().DeepCopy(in) + if err != nil { + t.Fatalf("DeepCopy failed: %#q: %v", in, err) + } + return out +} + +func TestDeepCopySliceSeparate(t *testing.T) { + x := []int{5} + y := copyOrDie(t, x).([]int) + x[0] = 3 + if y[0] == 3 { + t.Errorf("deep copy wasn't deep: %#q %#q", x, y) + } +} + +func TestDeepCopyArraySeparate(t *testing.T) { + x := [1]int{5} + y := copyOrDie(t, x).([1]int) + x[0] = 3 + if y[0] == 3 { + t.Errorf("deep copy wasn't deep: %#q %#q", x, y) + } +} + +func TestDeepCopyMapSeparate(t *testing.T) { + x := map[string]int{"foo": 5} + y := copyOrDie(t, x).(map[string]int) + x["foo"] = 3 + if y["foo"] == 3 { + t.Errorf("deep copy wasn't deep: %#q %#q", x, y) + } +} + +func TestDeepCopyPointerSeparate(t *testing.T) { + z := 5 + x := &z + y := copyOrDie(t, x).(*int) + *x = 3 + if *y == 3 { + t.Errorf("deep copy wasn't deep: %#q %#q", x, y) + } +} + +func TestDeepCopyStruct(t *testing.T) { + type Foo struct { + A int + } + type Bar struct { + Foo + F *Foo + } + a := &Bar{Foo{1}, &Foo{2}} + b := copyOrDie(t, a).(*Bar) + a.A = 3 + a.F.A = 4 + + if b.A != 1 || b.F.A != 2 { + t.Errorf("deep copy wasn't deep: %#v, %#v", a, b) + } +} + +var result interface{} + +func BenchmarkDeepCopy(b *testing.B) { + table := []interface{}{ + map[string]string{}, + int(5), + "hello world", + struct { + A, B, C struct { + D map[string]int + } + X []int + Y []byte + }{}, + } + + f := fuzz.New().RandSource(rand.NewSource(1)).NilChance(.5).NumElements(0, 100) + for i := range table { + out := table[i] + obj := reflect.New(reflect.TypeOf(out)).Interface() + f.Fuzz(obj) + table[i] = obj + } + + b.ResetTimer() + var r interface{} + for i := 0; i < b.N; i++ { + for j := range table { + r, _ = NewCloner().DeepCopy(table[j]) + } + } + result = r +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/helper_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/helper_test.go new file mode 100644 index 000000000000..69fef3334b16 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/helper_test.go @@ -0,0 +1,38 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import "testing" + +func TestInvalidPtrValueKind(t *testing.T) { + var simple interface{} + switch obj := simple.(type) { + default: + _, err := EnforcePtr(obj) + if err == nil { + t.Errorf("Expected error on invalid kind") + } + } +} + +func TestEnforceNilPtr(t *testing.T) { + var nilPtr *struct{} + _, err := EnforcePtr(nilPtr) + if err == nil { + t.Errorf("Expected error on nil pointer") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/queryparams/convert_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/queryparams/convert_test.go new file mode 100644 index 000000000000..cbeeeca73944 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/conversion/queryparams/convert_test.go @@ -0,0 +1,211 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queryparams_test + +import ( + "net/url" + "reflect" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/conversion/queryparams" +) + +type namedString string +type namedBool bool + +type bar struct { + Float1 float32 `json:"float1"` + Float2 float64 `json:"float2"` + Int1 int64 `json:"int1,omitempty"` + Int2 int32 `json:"int2,omitempty"` + Int3 int16 `json:"int3,omitempty"` + Str1 string `json:"str1,omitempty"` + Ignored int + Ignored2 string +} + +func (obj *bar) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } + +type foo struct { + Str string `json:"str"` + Integer int `json:"integer,omitempty"` + Slice []string `json:"slice,omitempty"` + Boolean bool `json:"boolean,omitempty"` + NamedStr namedString `json:"namedStr,omitempty"` + NamedBool namedBool `json:"namedBool,omitempty"` + Foobar bar `json:"foobar,omitempty"` + Testmap map[string]string `json:"testmap,omitempty"` +} + +func (obj *foo) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } + +type baz struct { + Ptr *int `json:"ptr"` + Bptr *bool `json:"bptr,omitempty"` +} + +func (obj *baz) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } + +// childStructs tests some of the types we serialize to query params for log API calls +// notably, the nested time struct +type childStructs struct { + Container string `json:"container,omitempty"` + Follow bool `json:"follow,omitempty"` + Previous bool `json:"previous,omitempty"` + SinceSeconds *int64 `json:"sinceSeconds,omitempty"` + SinceTime *unversioned.Time `json:"sinceTime,omitempty"` + EmptyTime *unversioned.Time `json:"emptyTime"` +} + +func (obj *childStructs) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } + +func validateResult(t *testing.T, input interface{}, actual, expected url.Values) { + local := url.Values{} + for k, v := range expected { + local[k] = v + } + for k, v := range actual { + if ev, ok := local[k]; !ok || !reflect.DeepEqual(ev, v) { + if !ok { + t.Errorf("%#v: actual value key %s not found in expected map", input, k) + } else { + t.Errorf("%#v: values don't match: actual: %#v, expected: %#v", input, v, ev) + } + } + delete(local, k) + } + if len(local) > 0 { + t.Errorf("%#v: expected map has keys that were not found in actual map: %#v", input, local) + } +} + +func TestConvert(t *testing.T) { + sinceSeconds := int64(123) + sinceTime := unversioned.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC) + + tests := []struct { + input interface{} + expected url.Values + }{ + { + input: &foo{ + Str: "hello", + }, + expected: url.Values{"str": {"hello"}}, + }, + { + input: &foo{ + Str: "test string", + Slice: []string{"one", "two", "three"}, + Integer: 234, + Boolean: true, + }, + expected: url.Values{"str": {"test string"}, "slice": {"one", "two", "three"}, "integer": {"234"}, "boolean": {"true"}}, + }, + { + input: &foo{ + Str: "named types", + NamedStr: "value1", + NamedBool: true, + }, + expected: url.Values{"str": {"named types"}, "namedStr": {"value1"}, "namedBool": {"true"}}, + }, + { + input: &foo{ + Str: "don't ignore embedded struct", + Foobar: bar{ + Float1: 5.0, + }, + }, + expected: url.Values{"str": {"don't ignore embedded struct"}, "float1": {"5"}, "float2": {"0"}}, + }, + { + // Ignore untagged fields + input: &bar{ + Float1: 23.5, + Float2: 100.7, + Int1: 1, + Int2: 2, + Int3: 3, + Ignored: 1, + Ignored2: "ignored", + }, + expected: url.Values{"float1": {"23.5"}, "float2": {"100.7"}, "int1": {"1"}, "int2": {"2"}, "int3": {"3"}}, + }, + { + // include fields that are not tagged omitempty + input: &foo{ + NamedStr: "named str", + }, + expected: url.Values{"str": {""}, "namedStr": {"named str"}}, + }, + { + input: &baz{ + Ptr: intp(5), + Bptr: boolp(true), + }, + expected: url.Values{"ptr": {"5"}, "bptr": {"true"}}, + }, + { + input: &baz{ + Bptr: boolp(true), + }, + expected: url.Values{"ptr": {""}, "bptr": {"true"}}, + }, + { + input: &baz{ + Ptr: intp(5), + }, + expected: url.Values{"ptr": {"5"}}, + }, + { + input: &childStructs{ + Container: "mycontainer", + Follow: true, + Previous: true, + SinceSeconds: &sinceSeconds, + SinceTime: &sinceTime, // test a custom marshaller + EmptyTime: nil, // test a nil custom marshaller without omitempty + }, + expected: url.Values{"container": {"mycontainer"}, "follow": {"true"}, "previous": {"true"}, "sinceSeconds": {"123"}, "sinceTime": {"2000-01-01T12:34:56Z"}, "emptyTime": {""}}, + }, + { + input: &childStructs{ + Container: "mycontainer", + Follow: true, + Previous: true, + SinceSeconds: &sinceSeconds, + SinceTime: nil, // test a nil custom marshaller with omitempty + }, + expected: url.Values{"container": {"mycontainer"}, "follow": {"true"}, "previous": {"true"}, "sinceSeconds": {"123"}, "emptyTime": {""}}, + }, + } + + for _, test := range tests { + result, err := queryparams.Convert(test.input) + if err != nil { + t.Errorf("Unexpected error while converting %#v: %v", test.input, err) + } + validateResult(t, test.input, result, test.expected) + } +} + +func intp(n int) *int { return &n } + +func boolp(b bool) *bool { return &b } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go index 395c438edb77..3b9b5c9820d2 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go @@ -14,10 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package aws_credentials +package credentials import ( "encoding/base64" + "fmt" "strings" "time" @@ -26,23 +27,40 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ecr" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/credentialprovider" ) -var registryUrls = []string{"*.dkr.ecr.*.amazonaws.com"} +// AWSRegions is the complete list of regions known to the AWS cloudprovider +// and credentialprovider. +var AWSRegions = [...]string{ + "us-east-1", + "us-west-1", + "us-west-2", + "eu-west-1", + "eu-central-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ap-northeast-2", + "cn-north-1", + "us-gov-west-1", + "sa-east-1", +} + +const registryURLTemplate = "*.dkr.ecr.%s.amazonaws.com" // awsHandlerLogger is a handler that logs all AWS SDK requests // Copied from cloudprovider/aws/log_handler.go func awsHandlerLogger(req *request.Request) { service := req.ClientInfo.ServiceName + region := req.Config.Region name := "?" if req.Operation != nil { name = req.Operation.Name } - glog.V(4).Infof("AWS request: %s %s", service, name) + glog.V(3).Infof("AWS request: %s:%s in %s", service, name, *region) } // An interface for testing purposes. @@ -59,22 +77,86 @@ func (p *ecrTokenGetter) GetAuthorizationToken(input *ecr.GetAuthorizationTokenI return p.svc.GetAuthorizationToken(input) } +// lazyEcrProvider is a DockerConfigProvider that creates on demand an +// ecrProvider for a given region and then proxies requests to it. +type lazyEcrProvider struct { + region string + regionURL string + actualProvider *credentialprovider.CachingDockerConfigProvider +} + +var _ credentialprovider.DockerConfigProvider = &lazyEcrProvider{} + // ecrProvider is a DockerConfigProvider that gets and refreshes 12-hour tokens // from AWS to access ECR. type ecrProvider struct { - getter tokenGetter + region string + regionURL string + getter tokenGetter } +var _ credentialprovider.DockerConfigProvider = &ecrProvider{} + +// Init creates a lazy provider for each AWS region, in order to support +// cross-region ECR access. They have to be lazy because it's unlikely, but not +// impossible, that we'll use more than one. // Not using the package init() function: this module should be initialized only // if using the AWS cloud provider. This way, we avoid timeouts waiting for a // non-existent provider. func Init() { - credentialprovider.RegisterCredentialProvider("aws-ecr-key", - &credentialprovider.CachingDockerConfigProvider{ - Provider: &ecrProvider{}, - // Refresh credentials a little earlier before they expire + for _, region := range AWSRegions { + credentialprovider.RegisterCredentialProvider("aws-ecr-"+region, + &lazyEcrProvider{ + region: region, + regionURL: fmt.Sprintf(registryURLTemplate, region), + }) + } + +} + +// Enabled implements DockerConfigProvider.Enabled for the lazy provider. +// Since we perform no checks/work of our own and actualProvider is only created +// later at image pulling time (if ever), always return true. +func (p *lazyEcrProvider) Enabled() bool { + return true +} + +// LazyProvide implements DockerConfigProvider.LazyProvide. It will be called +// by the client when attempting to pull an image and it will create the actual +// provider only when we actually need it the first time. +func (p *lazyEcrProvider) LazyProvide() *credentialprovider.DockerConfigEntry { + if p.actualProvider == nil { + glog.V(2).Infof("Creating ecrProvider for %s", p.region) + p.actualProvider = &credentialprovider.CachingDockerConfigProvider{ + Provider: newEcrProvider(p.region, nil), + // Refresh credentials a little earlier than expiration time Lifetime: 11*time.Hour + 55*time.Minute, - }) + } + if !p.actualProvider.Enabled() { + return nil + } + } + entry := p.actualProvider.Provide()[p.regionURL] + return &entry +} + +// Provide implements DockerConfigProvider.Provide, creating dummy credentials. +// Client code will call Provider.LazyProvide() at image pulling time. +func (p *lazyEcrProvider) Provide() credentialprovider.DockerConfig { + entry := credentialprovider.DockerConfigEntry{ + Provider: p, + } + cfg := credentialprovider.DockerConfig{} + cfg[p.regionURL] = entry + return cfg +} + +func newEcrProvider(region string, getter tokenGetter) *ecrProvider { + return &ecrProvider{ + region: region, + regionURL: fmt.Sprintf(registryURLTemplate, region), + getter: getter, + } } // Enabled implements DockerConfigProvider.Enabled for the AWS token-based implementation. @@ -82,33 +164,14 @@ func Init() { // TODO: figure how to enable it manually for deployments that are not on AWS but still // use ECR somehow? func (p *ecrProvider) Enabled() bool { - provider, err := cloudprovider.GetCloudProvider("aws", nil) - if err != nil { - glog.Errorf("while initializing AWS cloud provider %v", err) - return false - } - if provider == nil { - return false - } - - zones, ok := provider.Zones() - if !ok { - glog.Errorf("couldn't get Zones() interface") - return false - } - zone, err := zones.GetZone() - if err != nil { - glog.Errorf("while getting zone %v", err) - return false - } - if zone.Region == "" { - glog.Errorf("Region information is empty") + if p.region == "" { + glog.Errorf("Called ecrProvider.Enabled() with no region set") return false } getter := &ecrTokenGetter{svc: ecr.New(session.New(&aws.Config{ Credentials: nil, - Region: &zone.Region, + Region: &p.region, }))} getter.svc.Handlers.Sign.PushFrontNamed(request.NamedHandler{ Name: "k8s/logger", @@ -119,6 +182,11 @@ func (p *ecrProvider) Enabled() bool { return true } +// LazyProvide implements DockerConfigProvider.LazyProvide. Should never be called. +func (p *ecrProvider) LazyProvide() *credentialprovider.DockerConfigEntry { + return nil +} + // Provide implements DockerConfigProvider.Provide, refreshing ECR tokens on demand func (p *ecrProvider) Provide() credentialprovider.DockerConfig { cfg := credentialprovider.DockerConfig{} @@ -140,7 +208,7 @@ func (p *ecrProvider) Provide() credentialprovider.DockerConfig { data.AuthorizationToken != nil { decodedToken, err := base64.StdEncoding.DecodeString(aws.StringValue(data.AuthorizationToken)) if err != nil { - glog.Errorf("while decoding token for endpoint %s %v", data.ProxyEndpoint, err) + glog.Errorf("while decoding token for endpoint %v %v", data.ProxyEndpoint, err) return cfg } parts := strings.SplitN(string(decodedToken), ":", 2) @@ -153,10 +221,10 @@ func (p *ecrProvider) Provide() credentialprovider.DockerConfig { Email: "not@val.id", } - // Add our entry for each of the supported container registry URLs - for _, k := range registryUrls { - cfg[k] = entry - } + glog.V(3).Infof("Adding credentials for user %s in %s", user, p.region) + // Add our config entry for this region's registry URLs + cfg[p.regionURL] = entry + } } return cfg diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials_test.go new file mode 100644 index 000000000000..b286c7d61d2e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials_test.go @@ -0,0 +1,109 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +import ( + "encoding/base64" + "fmt" + "path" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ecr" + + "k8s.io/kubernetes/pkg/credentialprovider" +) + +const user = "foo" +const password = "1234567890abcdef" +const email = "not@val.id" + +// Mock implementation +type testTokenGetter struct { + user string + password string + endpoint string +} + +func (p *testTokenGetter) GetAuthorizationToken(input *ecr.GetAuthorizationTokenInput) (*ecr.GetAuthorizationTokenOutput, error) { + + expiration := time.Now().Add(1 * time.Hour) + creds := []byte(fmt.Sprintf("%s:%s", p.user, p.password)) + data := &ecr.AuthorizationData{ + AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString(creds)), + ExpiresAt: &expiration, + ProxyEndpoint: aws.String(p.endpoint), + } + output := &ecr.GetAuthorizationTokenOutput{ + AuthorizationData: []*ecr.AuthorizationData{data}, + } + + return output, nil //p.svc.GetAuthorizationToken(input) +} + +func TestEcrProvide(t *testing.T) { + registry := "123456789012.dkr.ecr.lala-land-1.amazonaws.com" + otherRegistries := []string{ + "private.registry.com", + "gcr.io", + } + image := "foo/bar" + + provider := newEcrProvider("lala-land-1", + &testTokenGetter{ + user: user, + password: password, + endpoint: registry, + }) + + keyring := &credentialprovider.BasicDockerKeyring{} + keyring.Add(provider.Provide()) + + // Verify that we get the expected username/password combo for + // an ECR image name. + fullImage := path.Join(registry, image) + creds, ok := keyring.Lookup(fullImage) + if !ok { + t.Errorf("Didn't find expected URL: %s", fullImage) + return + } + if len(creds) > 1 { + t.Errorf("Got more hits than expected: %s", creds) + } + val := creds[0] + + if user != val.Username { + t.Errorf("Unexpected username value, want: _token, got: %s", val.Username) + } + if password != val.Password { + t.Errorf("Unexpected password value, want: %s, got: %s", password, val.Password) + } + if email != val.Email { + t.Errorf("Unexpected email value, want: %s, got: %s", email, val.Email) + } + + // Verify that we get an error for other images. + for _, otherRegistry := range otherRegistries { + fullImage = path.Join(otherRegistry, image) + creds, ok = keyring.Lookup(fullImage) + if ok { + t.Errorf("Unexpectedly found image: %s", fullImage) + return + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/config.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/config.go index f03bd26c3189..b80fa5945ca6 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/config.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/config.go @@ -46,6 +46,7 @@ type DockerConfigEntry struct { Username string Password string Email string + Provider DockerConfigProvider } var ( diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/config_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/config_test.go new file mode 100644 index 000000000000..587879fe9311 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/config_test.go @@ -0,0 +1,225 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentialprovider + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestDockerConfigJsonJSONDecode(t *testing.T) { + input := []byte(`{"auths": {"http://foo.example.com":{"username": "foo", "password": "bar", "email": "foo@example.com"}, "http://bar.example.com":{"username": "bar", "password": "baz", "email": "bar@example.com"}}}`) + + expect := DockerConfigJson{ + Auths: DockerConfig(map[string]DockerConfigEntry{ + "http://foo.example.com": { + Username: "foo", + Password: "bar", + Email: "foo@example.com", + }, + "http://bar.example.com": { + Username: "bar", + Password: "baz", + Email: "bar@example.com", + }, + }), + } + + var output DockerConfigJson + err := json.Unmarshal(input, &output) + if err != nil { + t.Errorf("Received unexpected error: %v", err) + } + + if !reflect.DeepEqual(expect, output) { + t.Errorf("Received unexpected output. Expected %#v, got %#v", expect, output) + } +} + +func TestDockerConfigJSONDecode(t *testing.T) { + input := []byte(`{"http://foo.example.com":{"username": "foo", "password": "bar", "email": "foo@example.com"}, "http://bar.example.com":{"username": "bar", "password": "baz", "email": "bar@example.com"}}`) + + expect := DockerConfig(map[string]DockerConfigEntry{ + "http://foo.example.com": { + Username: "foo", + Password: "bar", + Email: "foo@example.com", + }, + "http://bar.example.com": { + Username: "bar", + Password: "baz", + Email: "bar@example.com", + }, + }) + + var output DockerConfig + err := json.Unmarshal(input, &output) + if err != nil { + t.Errorf("Received unexpected error: %v", err) + } + + if !reflect.DeepEqual(expect, output) { + t.Errorf("Received unexpected output. Expected %#v, got %#v", expect, output) + } +} + +func TestDockerConfigEntryJSONDecode(t *testing.T) { + tests := []struct { + input []byte + expect DockerConfigEntry + fail bool + }{ + // simple case, just decode the fields + { + input: []byte(`{"username": "foo", "password": "bar", "email": "foo@example.com"}`), + expect: DockerConfigEntry{ + Username: "foo", + Password: "bar", + Email: "foo@example.com", + }, + fail: false, + }, + + // auth field decodes to username & password + { + input: []byte(`{"auth": "Zm9vOmJhcg==", "email": "foo@example.com"}`), + expect: DockerConfigEntry{ + Username: "foo", + Password: "bar", + Email: "foo@example.com", + }, + fail: false, + }, + + // auth field overrides username & password + { + input: []byte(`{"username": "foo", "password": "bar", "auth": "cGluZzpwb25n", "email": "foo@example.com"}`), + expect: DockerConfigEntry{ + Username: "ping", + Password: "pong", + Email: "foo@example.com", + }, + fail: false, + }, + + // poorly-formatted auth causes failure + { + input: []byte(`{"auth": "pants", "email": "foo@example.com"}`), + expect: DockerConfigEntry{ + Username: "", + Password: "", + Email: "foo@example.com", + }, + fail: true, + }, + + // invalid JSON causes failure + { + input: []byte(`{"email": false}`), + expect: DockerConfigEntry{ + Username: "", + Password: "", + Email: "", + }, + fail: true, + }, + } + + for i, tt := range tests { + var output DockerConfigEntry + err := json.Unmarshal(tt.input, &output) + if (err != nil) != tt.fail { + t.Errorf("case %d: expected fail=%t, got err=%v", i, tt.fail, err) + } + + if !reflect.DeepEqual(tt.expect, output) { + t.Errorf("case %d: expected output %#v, got %#v", i, tt.expect, output) + } + } +} + +func TestDecodeDockerConfigFieldAuth(t *testing.T) { + tests := []struct { + input string + username string + password string + fail bool + }{ + // auth field decodes to username & password + { + input: "Zm9vOmJhcg==", + username: "foo", + password: "bar", + }, + + // good base64 data, but no colon separating username & password + { + input: "cGFudHM=", + fail: true, + }, + + // bad base64 data + { + input: "pants", + fail: true, + }, + } + + for i, tt := range tests { + username, password, err := decodeDockerConfigFieldAuth(tt.input) + if (err != nil) != tt.fail { + t.Errorf("case %d: expected fail=%t, got err=%v", i, tt.fail, err) + } + + if tt.username != username { + t.Errorf("case %d: expected username %q, got %q", i, tt.username, username) + } + + if tt.password != password { + t.Errorf("case %d: expected password %q, got %q", i, tt.password, password) + } + } +} + +func TestDockerConfigEntryJSONCompatibleEncode(t *testing.T) { + tests := []struct { + input DockerConfigEntry + expect []byte + }{ + // simple case, just decode the fields + { + expect: []byte(`{"username":"foo","password":"bar","email":"foo@example.com","auth":"Zm9vOmJhcg=="}`), + input: DockerConfigEntry{ + Username: "foo", + Password: "bar", + Email: "foo@example.com", + }, + }, + } + + for i, tt := range tests { + actual, err := json.Marshal(tt.input) + if err != nil { + t.Errorf("case %d: unexpected error: %v", i, err) + } + + if string(tt.expect) != string(actual) { + t.Errorf("case %d: expected %v, got %v", i, string(tt.expect), string(actual)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/jwt.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/jwt.go index 3c1a05d4bae1..e4c16afa85fe 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/jwt.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/jwt.go @@ -82,6 +82,11 @@ func (j *jwtProvider) Enabled() bool { return true } +// LazyProvide implements DockerConfigProvider. Should never be called. +func (j *jwtProvider) LazyProvide() *credentialprovider.DockerConfigEntry { + return nil +} + // Provide implements DockerConfigProvider func (j *jwtProvider) Provide() credentialprovider.DockerConfig { cfg := credentialprovider.DockerConfig{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/jwt_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/jwt_test.go new file mode 100644 index 000000000000..4066d02af901 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/jwt_test.go @@ -0,0 +1,126 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcp_credentials + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/credentialprovider" +) + +const email = "foo@bar.com" + +// From oauth2/jwt_test.go +var ( + dummyPrivateKey = `-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE +DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY +fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK +1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr +k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9 +/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt +3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn +2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3 +nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK +6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf +5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e +DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1 +M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g +z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y +1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK +J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U +f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx +QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA +cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr +Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw +5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg +KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84 +OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd +mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ +5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg== +-----END RSA PRIVATE KEY-----` + + jsonKey = fmt.Sprintf(`{"private_key":"%[1]s", "client_email":"%[2]s"}`, + strings.Replace(dummyPrivateKey, "\n", "\\n", -1), email) +) + +func TestJwtProvider(t *testing.T) { + token := "asdhflkjsdfkjhsdf" + + // Modeled after oauth2/jwt_test.go + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(fmt.Sprintf(`{ + "access_token": "%[1]s", + "scope": "user", + "token_type": "bearer", + "expires_in": 3600 + }`, token))) + })) + defer ts.Close() + + file, err := ioutil.TempFile(os.TempDir(), "temp") + if err != nil { + t.Fatalf("Error creating temp file: %v", err) + } + + filename := file.Name() + _, err = file.WriteString(jsonKey) + if err != nil { + t.Fatalf("Error writing temp file: %v", err) + } + + provider := &jwtProvider{ + path: &filename, + tokenUrl: ts.URL, + } + if !provider.Enabled() { + t.Fatalf("Provider is unexpectedly disabled") + } + + keyring := &credentialprovider.BasicDockerKeyring{} + keyring.Add(provider.Provide()) + + // Verify that we get the expected username/password combo for + // a gcr.io image name. + registryUrl := "gcr.io/foo/bar" + creds, ok := keyring.Lookup(registryUrl) + if !ok { + t.Errorf("Didn't find expected URL: %s", registryUrl) + return + } + if len(creds) > 1 { + t.Errorf("Got more hits than expected: %s", creds) + } + val := creds[0] + + if "_token" != val.Username { + t.Errorf("Unexpected username value, want: _token, got: %s", val.Username) + } + if token != val.Password { + t.Errorf("Unexpected password value, want: %s, got: %s", token, val.Password) + } + if email != val.Email { + t.Errorf("Unexpected email value, want: %s, got: %s", email, val.Email) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go index 8ab929315f19..fb89b38c0cf6 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata.go @@ -104,6 +104,11 @@ func (g *metadataProvider) Enabled() bool { return err == nil } +// LazyProvide implements DockerConfigProvider. Should never be called. +func (g *dockerConfigKeyProvider) LazyProvide() *credentialprovider.DockerConfigEntry { + return nil +} + // Provide implements DockerConfigProvider func (g *dockerConfigKeyProvider) Provide() credentialprovider.DockerConfig { // Read the contents of the google-dockercfg metadata key and @@ -117,6 +122,11 @@ func (g *dockerConfigKeyProvider) Provide() credentialprovider.DockerConfig { return credentialprovider.DockerConfig{} } +// LazyProvide implements DockerConfigProvider. Should never be called. +func (g *dockerConfigUrlKeyProvider) LazyProvide() *credentialprovider.DockerConfigEntry { + return nil +} + // Provide implements DockerConfigProvider func (g *dockerConfigUrlKeyProvider) Provide() credentialprovider.DockerConfig { // Read the contents of the google-dockercfg-url key and load a .dockercfg from there @@ -166,6 +176,11 @@ type tokenBlob struct { AccessToken string `json:"access_token"` } +// LazyProvide implements DockerConfigProvider. Should never be called. +func (g *containerRegistryProvider) LazyProvide() *credentialprovider.DockerConfigEntry { + return nil +} + // Provide implements DockerConfigProvider func (g *containerRegistryProvider) Provide() credentialprovider.DockerConfig { cfg := credentialprovider.DockerConfig{} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata_test.go new file mode 100644 index 000000000000..b65b13a015f6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/gcp/metadata_test.go @@ -0,0 +1,342 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcp_credentials + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/credentialprovider" + utilnet "k8s.io/kubernetes/pkg/util/net" +) + +func TestDockerKeyringFromGoogleDockerConfigMetadata(t *testing.T) { + registryUrl := "hello.kubernetes.io" + email := "foo@bar.baz" + username := "foo" + password := "bar" + auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) + sampleDockerConfig := fmt.Sprintf(`{ + "https://%s": { + "email": %q, + "auth": %q + } +}`, registryUrl, email, auth) + + const probeEndpoint = "/computeMetadata/v1/" + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Only serve the one metadata key. + if probeEndpoint == r.URL.Path { + w.WriteHeader(http.StatusOK) + } else if strings.HasSuffix(dockerConfigKey, r.URL.Path) { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, sampleDockerConfig) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + // Make a transport that reroutes all traffic to the example server + transport := utilnet.SetTransportDefaults(&http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL + req.URL.Path) + }, + }) + + keyring := &credentialprovider.BasicDockerKeyring{} + provider := &dockerConfigKeyProvider{ + metadataProvider{Client: &http.Client{Transport: transport}}, + } + + if !provider.Enabled() { + t.Errorf("Provider is unexpectedly disabled") + } + + keyring.Add(provider.Provide()) + + creds, ok := keyring.Lookup(registryUrl) + if !ok { + t.Errorf("Didn't find expected URL: %s", registryUrl) + return + } + if len(creds) > 1 { + t.Errorf("Got more hits than expected: %s", creds) + } + val := creds[0] + + if username != val.Username { + t.Errorf("Unexpected username value, want: %s, got: %s", username, val.Username) + } + if password != val.Password { + t.Errorf("Unexpected password value, want: %s, got: %s", password, val.Password) + } + if email != val.Email { + t.Errorf("Unexpected email value, want: %s, got: %s", email, val.Email) + } +} + +func TestDockerKeyringFromGoogleDockerConfigMetadataUrl(t *testing.T) { + registryUrl := "hello.kubernetes.io" + email := "foo@bar.baz" + username := "foo" + password := "bar" + auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) + sampleDockerConfig := fmt.Sprintf(`{ + "https://%s": { + "email": %q, + "auth": %q + } +}`, registryUrl, email, auth) + + const probeEndpoint = "/computeMetadata/v1/" + const valueEndpoint = "/my/value" + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Only serve the URL key and the value endpoint + if probeEndpoint == r.URL.Path { + w.WriteHeader(http.StatusOK) + } else if valueEndpoint == r.URL.Path { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, sampleDockerConfig) + } else if strings.HasSuffix(dockerConfigUrlKey, r.URL.Path) { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/text") + fmt.Fprint(w, "http://foo.bar.com"+valueEndpoint) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + // Make a transport that reroutes all traffic to the example server + transport := utilnet.SetTransportDefaults(&http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL + req.URL.Path) + }, + }) + + keyring := &credentialprovider.BasicDockerKeyring{} + provider := &dockerConfigUrlKeyProvider{ + metadataProvider{Client: &http.Client{Transport: transport}}, + } + + if !provider.Enabled() { + t.Errorf("Provider is unexpectedly disabled") + } + + keyring.Add(provider.Provide()) + + creds, ok := keyring.Lookup(registryUrl) + if !ok { + t.Errorf("Didn't find expected URL: %s", registryUrl) + return + } + if len(creds) > 1 { + t.Errorf("Got more hits than expected: %s", creds) + } + val := creds[0] + + if username != val.Username { + t.Errorf("Unexpected username value, want: %s, got: %s", username, val.Username) + } + if password != val.Password { + t.Errorf("Unexpected password value, want: %s, got: %s", password, val.Password) + } + if email != val.Email { + t.Errorf("Unexpected email value, want: %s, got: %s", email, val.Email) + } +} + +func TestContainerRegistryBasics(t *testing.T) { + registryUrl := "container.cloud.google.com" + email := "1234@project.gserviceaccount.com" + token := &tokenBlob{AccessToken: "ya26.lots-of-indiscernible-garbage"} + + const ( + defaultEndpoint = "/computeMetadata/v1/instance/service-accounts/default/" + scopeEndpoint = defaultEndpoint + "scopes" + emailEndpoint = defaultEndpoint + "email" + tokenEndpoint = defaultEndpoint + "token" + ) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Only serve the URL key and the value endpoint + if scopeEndpoint == r.URL.Path { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `["%s.read_write"]`, storageScopePrefix) + } else if emailEndpoint == r.URL.Path { + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, email) + } else if tokenEndpoint == r.URL.Path { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + bytes, err := json.Marshal(token) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + fmt.Fprintln(w, string(bytes)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + // Make a transport that reroutes all traffic to the example server + transport := utilnet.SetTransportDefaults(&http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL + req.URL.Path) + }, + }) + + keyring := &credentialprovider.BasicDockerKeyring{} + provider := &containerRegistryProvider{ + metadataProvider{Client: &http.Client{Transport: transport}}, + } + + if !provider.Enabled() { + t.Errorf("Provider is unexpectedly disabled") + } + + keyring.Add(provider.Provide()) + + creds, ok := keyring.Lookup(registryUrl) + if !ok { + t.Errorf("Didn't find expected URL: %s", registryUrl) + return + } + if len(creds) > 1 { + t.Errorf("Got more hits than expected: %s", creds) + } + val := creds[0] + + if "_token" != val.Username { + t.Errorf("Unexpected username value, want: %s, got: %s", "_token", val.Username) + } + if token.AccessToken != val.Password { + t.Errorf("Unexpected password value, want: %s, got: %s", token.AccessToken, val.Password) + } + if email != val.Email { + t.Errorf("Unexpected email value, want: %s, got: %s", email, val.Email) + } +} + +func TestContainerRegistryNoStorageScope(t *testing.T) { + const ( + defaultEndpoint = "/computeMetadata/v1/instance/service-accounts/default/" + scopeEndpoint = defaultEndpoint + "scopes" + ) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Only serve the URL key and the value endpoint + if scopeEndpoint == r.URL.Path { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + fmt.Fprint(w, `["https://www.googleapis.com/auth/compute.read_write"]`) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + // Make a transport that reroutes all traffic to the example server + transport := utilnet.SetTransportDefaults(&http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL + req.URL.Path) + }, + }) + + provider := &containerRegistryProvider{ + metadataProvider{Client: &http.Client{Transport: transport}}, + } + + if provider.Enabled() { + t.Errorf("Provider is unexpectedly enabled") + } +} + +func TestComputePlatformScopeSubstitutesStorageScope(t *testing.T) { + const ( + defaultEndpoint = "/computeMetadata/v1/instance/service-accounts/default/" + scopeEndpoint = defaultEndpoint + "scopes" + ) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Only serve the URL key and the value endpoint + if scopeEndpoint == r.URL.Path { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + fmt.Fprint(w, `["https://www.googleapis.com/auth/compute.read_write","https://www.googleapis.com/auth/cloud-platform.read-only"]`) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + // Make a transport that reroutes all traffic to the example server + transport := utilnet.SetTransportDefaults(&http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL + req.URL.Path) + }, + }) + + provider := &containerRegistryProvider{ + metadataProvider{Client: &http.Client{Transport: transport}}, + } + + if !provider.Enabled() { + t.Errorf("Provider is unexpectedly disabled") + } +} + +func TestAllProvidersNoMetadata(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer server.Close() + + // Make a transport that reroutes all traffic to the example server + transport := utilnet.SetTransportDefaults(&http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL + req.URL.Path) + }, + }) + + providers := []credentialprovider.DockerConfigProvider{ + &dockerConfigKeyProvider{ + metadataProvider{Client: &http.Client{Transport: transport}}, + }, + &dockerConfigUrlKeyProvider{ + metadataProvider{Client: &http.Client{Transport: transport}}, + }, + &containerRegistryProvider{ + metadataProvider{Client: &http.Client{Transport: transport}}, + }, + } + + for _, provider := range providers { + if provider.Enabled() { + t.Errorf("Provider %s is unexpectedly enabled", reflect.TypeOf(provider).String()) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/keyring.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/keyring.go index 2378156dcd39..eedbee5ad8f7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/keyring.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/keyring.go @@ -24,9 +24,9 @@ import ( "sort" "strings" - docker "github.com/fsouza/go-dockerclient" "github.com/golang/glog" + dockertypes "github.com/docker/engine-api/types" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util/sets" ) @@ -39,13 +39,13 @@ import ( // most specific match for a given image // - iterating a map does not yield predictable results type DockerKeyring interface { - Lookup(image string) ([]docker.AuthConfiguration, bool) + Lookup(image string) ([]LazyAuthConfiguration, bool) } // BasicDockerKeyring is a trivial map-backed implementation of DockerKeyring type BasicDockerKeyring struct { index []string - creds map[string][]docker.AuthConfiguration + creds map[string][]LazyAuthConfiguration } // lazyDockerKeyring is an implementation of DockerKeyring that lazily @@ -54,17 +54,38 @@ type lazyDockerKeyring struct { Providers []DockerConfigProvider } +// LazyAuthConfiguration wraps dockertypes.AuthConfig, potentially deferring its +// binding. If Provider is non-nil, it will be used to obtain new credentials +// by calling LazyProvide() on it. +type LazyAuthConfiguration struct { + dockertypes.AuthConfig + Provider DockerConfigProvider +} + +func DockerConfigEntryToLazyAuthConfiguration(ident DockerConfigEntry) LazyAuthConfiguration { + return LazyAuthConfiguration{ + AuthConfig: dockertypes.AuthConfig{ + Username: ident.Username, + Password: ident.Password, + Email: ident.Email, + }, + } +} + func (dk *BasicDockerKeyring) Add(cfg DockerConfig) { if dk.index == nil { dk.index = make([]string, 0) - dk.creds = make(map[string][]docker.AuthConfiguration) + dk.creds = make(map[string][]LazyAuthConfiguration) } for loc, ident := range cfg { - creds := docker.AuthConfiguration{ - Username: ident.Username, - Password: ident.Password, - Email: ident.Email, + var creds LazyAuthConfiguration + if ident.Provider != nil { + creds = LazyAuthConfiguration{ + Provider: ident.Provider, + } + } else { + creds = DockerConfigEntryToLazyAuthConfiguration(ident) } value := loc @@ -215,9 +236,9 @@ func urlsMatch(globUrl *url.URL, targetUrl *url.URL) (bool, error) { // Lookup implements the DockerKeyring method for fetching credentials based on image name. // Multiple credentials may be returned if there are multiple potentially valid credentials // available. This allows for rotation. -func (dk *BasicDockerKeyring) Lookup(image string) ([]docker.AuthConfiguration, bool) { +func (dk *BasicDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { // range over the index as iterating over a map does not provide a predictable ordering - ret := []docker.AuthConfiguration{} + ret := []LazyAuthConfiguration{} for _, k := range dk.index { // both k and image are schemeless URLs because even though schemes are allowed // in the credential configurations, we remove them in Add. @@ -239,12 +260,12 @@ func (dk *BasicDockerKeyring) Lookup(image string) ([]docker.AuthConfiguration, } } - return []docker.AuthConfiguration{}, false + return []LazyAuthConfiguration{}, false } // Lookup implements the DockerKeyring method for fetching credentials // based on image name. -func (dk *lazyDockerKeyring) Lookup(image string) ([]docker.AuthConfiguration, bool) { +func (dk *lazyDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { keyring := &BasicDockerKeyring{} for _, p := range dk.Providers { @@ -255,11 +276,11 @@ func (dk *lazyDockerKeyring) Lookup(image string) ([]docker.AuthConfiguration, b } type FakeKeyring struct { - auth []docker.AuthConfiguration + auth []LazyAuthConfiguration ok bool } -func (f *FakeKeyring) Lookup(image string) ([]docker.AuthConfiguration, bool) { +func (f *FakeKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { return f.auth, f.ok } @@ -268,9 +289,8 @@ type unionDockerKeyring struct { keyrings []DockerKeyring } -func (k *unionDockerKeyring) Lookup(image string) ([]docker.AuthConfiguration, bool) { - authConfigs := []docker.AuthConfiguration{} - +func (k *unionDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { + authConfigs := []LazyAuthConfiguration{} for _, subKeyring := range k.keyrings { if subKeyring == nil { continue diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/keyring_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/keyring_test.go new file mode 100644 index 000000000000..376e08017099 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/keyring_test.go @@ -0,0 +1,501 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentialprovider + +import ( + "encoding/base64" + "fmt" + "testing" +) + +func TestUrlsMatch(t *testing.T) { + tests := []struct { + globUrl string + targetUrl string + matchExpected bool + }{ + // match when there is no path component + { + globUrl: "*.kubernetes.io", + targetUrl: "prefix.kubernetes.io", + matchExpected: true, + }, + { + globUrl: "prefix.*.io", + targetUrl: "prefix.kubernetes.io", + matchExpected: true, + }, + { + globUrl: "prefix.kubernetes.*", + targetUrl: "prefix.kubernetes.io", + matchExpected: true, + }, + { + globUrl: "*-good.kubernetes.io", + targetUrl: "prefix-good.kubernetes.io", + matchExpected: true, + }, + // match with path components + { + globUrl: "*.kubernetes.io/blah", + targetUrl: "prefix.kubernetes.io/blah", + matchExpected: true, + }, + { + globUrl: "prefix.*.io/foo", + targetUrl: "prefix.kubernetes.io/foo/bar", + matchExpected: true, + }, + // match with path components and ports + { + globUrl: "*.kubernetes.io:1111/blah", + targetUrl: "prefix.kubernetes.io:1111/blah", + matchExpected: true, + }, + { + globUrl: "prefix.*.io:1111/foo", + targetUrl: "prefix.kubernetes.io:1111/foo/bar", + matchExpected: true, + }, + // no match when number of parts mismatch + { + globUrl: "*.kubernetes.io", + targetUrl: "kubernetes.io", + matchExpected: false, + }, + { + globUrl: "*.*.kubernetes.io", + targetUrl: "prefix.kubernetes.io", + matchExpected: false, + }, + { + globUrl: "*.*.kubernetes.io", + targetUrl: "kubernetes.io", + matchExpected: false, + }, + // no match when some parts mismatch + { + globUrl: "kubernetes.io", + targetUrl: "kubernetes.com", + matchExpected: false, + }, + { + globUrl: "k*.io", + targetUrl: "quay.io", + matchExpected: false, + }, + // no match when ports mismatch + { + globUrl: "*.kubernetes.io:1234/blah", + targetUrl: "prefix.kubernetes.io:1111/blah", + matchExpected: false, + }, + { + globUrl: "prefix.*.io/foo", + targetUrl: "prefix.kubernetes.io:1111/foo/bar", + matchExpected: false, + }, + } + for _, test := range tests { + matched, _ := urlsMatchStr(test.globUrl, test.targetUrl) + if matched != test.matchExpected { + t.Errorf("Expected match result of %s and %s to be %t, but was %t", + test.globUrl, test.targetUrl, test.matchExpected, matched) + } + } +} + +func TestDockerKeyringForGlob(t *testing.T) { + tests := []struct { + globUrl string + targetUrl string + }{ + { + globUrl: "https://hello.kubernetes.io", + targetUrl: "hello.kubernetes.io", + }, + { + globUrl: "https://*.docker.io", + targetUrl: "prefix.docker.io", + }, + { + globUrl: "https://prefix.*.io", + targetUrl: "prefix.docker.io", + }, + { + globUrl: "https://prefix.docker.*", + targetUrl: "prefix.docker.io", + }, + { + globUrl: "https://*.docker.io/path", + targetUrl: "prefix.docker.io/path", + }, + { + globUrl: "https://prefix.*.io/path", + targetUrl: "prefix.docker.io/path/subpath", + }, + { + globUrl: "https://prefix.docker.*/path", + targetUrl: "prefix.docker.io/path", + }, + { + globUrl: "https://*.docker.io:8888", + targetUrl: "prefix.docker.io:8888", + }, + { + globUrl: "https://prefix.*.io:8888", + targetUrl: "prefix.docker.io:8888", + }, + { + globUrl: "https://prefix.docker.*:8888", + targetUrl: "prefix.docker.io:8888", + }, + { + globUrl: "https://*.docker.io/path:1111", + targetUrl: "prefix.docker.io/path:1111", + }, + { + globUrl: "https://*.docker.io/v1/", + targetUrl: "prefix.docker.io/path:1111", + }, + { + globUrl: "https://*.docker.io/v2/", + targetUrl: "prefix.docker.io/path:1111", + }, + { + globUrl: "https://prefix.docker.*/path:1111", + targetUrl: "prefix.docker.io/path:1111", + }, + { + globUrl: "prefix.docker.io:1111", + targetUrl: "prefix.docker.io:1111/path", + }, + { + globUrl: "*.docker.io:1111", + targetUrl: "prefix.docker.io:1111/path", + }, + } + for i, test := range tests { + email := "foo@bar.baz" + username := "foo" + password := "bar" + auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) + sampleDockerConfig := fmt.Sprintf(`{ + "%s": { + "email": %q, + "auth": %q + } +}`, test.globUrl, email, auth) + + keyring := &BasicDockerKeyring{} + if cfg, err := readDockerConfigFileFromBytes([]byte(sampleDockerConfig)); err != nil { + t.Errorf("Error processing json blob %q, %v", sampleDockerConfig, err) + } else { + keyring.Add(cfg) + } + + creds, ok := keyring.Lookup(test.targetUrl + "/foo/bar") + if !ok { + t.Errorf("%d: Didn't find expected URL: %s", i, test.targetUrl) + continue + } + val := creds[0] + + if username != val.Username { + t.Errorf("Unexpected username value, want: %s, got: %s", username, val.Username) + } + if password != val.Password { + t.Errorf("Unexpected password value, want: %s, got: %s", password, val.Password) + } + if email != val.Email { + t.Errorf("Unexpected email value, want: %s, got: %s", email, val.Email) + } + } +} + +func TestKeyringMiss(t *testing.T) { + tests := []struct { + globUrl string + lookupUrl string + }{ + { + globUrl: "https://hello.kubernetes.io", + lookupUrl: "world.mesos.org/foo/bar", + }, + { + globUrl: "https://*.docker.com", + lookupUrl: "prefix.docker.io", + }, + { + globUrl: "https://suffix.*.io", + lookupUrl: "prefix.docker.io", + }, + { + globUrl: "https://prefix.docker.c*", + lookupUrl: "prefix.docker.io", + }, + { + globUrl: "https://prefix.*.io/path:1111", + lookupUrl: "prefix.docker.io/path/subpath:1111", + }, + { + globUrl: "suffix.*.io", + lookupUrl: "prefix.docker.io", + }, + } + for _, test := range tests { + email := "foo@bar.baz" + username := "foo" + password := "bar" + auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) + sampleDockerConfig := fmt.Sprintf(`{ + "%s": { + "email": %q, + "auth": %q + } +}`, test.globUrl, email, auth) + + keyring := &BasicDockerKeyring{} + if cfg, err := readDockerConfigFileFromBytes([]byte(sampleDockerConfig)); err != nil { + t.Errorf("Error processing json blob %q, %v", sampleDockerConfig, err) + } else { + keyring.Add(cfg) + } + + _, ok := keyring.Lookup(test.lookupUrl + "/foo/bar") + if ok { + t.Errorf("Expected not to find URL %s, but found", test.lookupUrl) + } + } + +} + +func TestKeyringMissWithDockerHubCredentials(t *testing.T) { + url := defaultRegistryKey + email := "foo@bar.baz" + username := "foo" + password := "bar" + auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) + sampleDockerConfig := fmt.Sprintf(`{ + "https://%s": { + "email": %q, + "auth": %q + } +}`, url, email, auth) + + keyring := &BasicDockerKeyring{} + if cfg, err := readDockerConfigFileFromBytes([]byte(sampleDockerConfig)); err != nil { + t.Errorf("Error processing json blob %q, %v", sampleDockerConfig, err) + } else { + keyring.Add(cfg) + } + + val, ok := keyring.Lookup("world.mesos.org/foo/bar") + if ok { + t.Errorf("Found unexpected credential: %+v", val) + } +} + +func TestKeyringHitWithUnqualifiedDockerHub(t *testing.T) { + url := defaultRegistryKey + email := "foo@bar.baz" + username := "foo" + password := "bar" + auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) + sampleDockerConfig := fmt.Sprintf(`{ + "https://%s": { + "email": %q, + "auth": %q + } +}`, url, email, auth) + + keyring := &BasicDockerKeyring{} + if cfg, err := readDockerConfigFileFromBytes([]byte(sampleDockerConfig)); err != nil { + t.Errorf("Error processing json blob %q, %v", sampleDockerConfig, err) + } else { + keyring.Add(cfg) + } + + creds, ok := keyring.Lookup("google/docker-registry") + if !ok { + t.Errorf("Didn't find expected URL: %s", url) + return + } + if len(creds) > 1 { + t.Errorf("Got more hits than expected: %s", creds) + } + val := creds[0] + + if username != val.Username { + t.Errorf("Unexpected username value, want: %s, got: %s", username, val.Username) + } + if password != val.Password { + t.Errorf("Unexpected password value, want: %s, got: %s", password, val.Password) + } + if email != val.Email { + t.Errorf("Unexpected email value, want: %s, got: %s", email, val.Email) + } +} + +func TestKeyringHitWithUnqualifiedLibraryDockerHub(t *testing.T) { + url := defaultRegistryKey + email := "foo@bar.baz" + username := "foo" + password := "bar" + auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) + sampleDockerConfig := fmt.Sprintf(`{ + "https://%s": { + "email": %q, + "auth": %q + } +}`, url, email, auth) + + keyring := &BasicDockerKeyring{} + if cfg, err := readDockerConfigFileFromBytes([]byte(sampleDockerConfig)); err != nil { + t.Errorf("Error processing json blob %q, %v", sampleDockerConfig, err) + } else { + keyring.Add(cfg) + } + + creds, ok := keyring.Lookup("jenkins") + if !ok { + t.Errorf("Didn't find expected URL: %s", url) + return + } + if len(creds) > 1 { + t.Errorf("Got more hits than expected: %s", creds) + } + val := creds[0] + + if username != val.Username { + t.Errorf("Unexpected username value, want: %s, got: %s", username, val.Username) + } + if password != val.Password { + t.Errorf("Unexpected password value, want: %s, got: %s", password, val.Password) + } + if email != val.Email { + t.Errorf("Unexpected email value, want: %s, got: %s", email, val.Email) + } +} + +func TestKeyringHitWithQualifiedDockerHub(t *testing.T) { + url := defaultRegistryKey + email := "foo@bar.baz" + username := "foo" + password := "bar" + auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) + sampleDockerConfig := fmt.Sprintf(`{ + "https://%s": { + "email": %q, + "auth": %q + } +}`, url, email, auth) + + keyring := &BasicDockerKeyring{} + if cfg, err := readDockerConfigFileFromBytes([]byte(sampleDockerConfig)); err != nil { + t.Errorf("Error processing json blob %q, %v", sampleDockerConfig, err) + } else { + keyring.Add(cfg) + } + + creds, ok := keyring.Lookup(url + "/google/docker-registry") + if !ok { + t.Errorf("Didn't find expected URL: %s", url) + return + } + if len(creds) > 2 { + t.Errorf("Got more hits than expected: %s", creds) + } + val := creds[0] + + if username != val.Username { + t.Errorf("Unexpected username value, want: %s, got: %s", username, val.Username) + } + if password != val.Password { + t.Errorf("Unexpected password value, want: %s, got: %s", password, val.Password) + } + if email != val.Email { + t.Errorf("Unexpected email value, want: %s, got: %s", email, val.Email) + } +} + +func TestIsDefaultRegistryMatch(t *testing.T) { + samples := []map[bool]string{ + {true: "foo/bar"}, + {true: "docker.io/foo/bar"}, + {true: "index.docker.io/foo/bar"}, + {true: "foo"}, + {false: ""}, + {false: "registry.tld/foo/bar"}, + {false: "registry:5000/foo/bar"}, + {false: "myhostdocker.io/foo/bar"}, + } + for _, sample := range samples { + for expected, imageName := range sample { + if got := isDefaultRegistryMatch(imageName); got != expected { + t.Errorf("Expected '%s' to be %t, got %t", imageName, expected, got) + } + } + } +} + +type testProvider struct { + Count int +} + +// Enabled implements dockerConfigProvider +func (d *testProvider) Enabled() bool { + return true +} + +// LazyProvide implements dockerConfigProvider. Should never be called. +func (d *testProvider) LazyProvide() *DockerConfigEntry { + return nil +} + +// Provide implements dockerConfigProvider +func (d *testProvider) Provide() DockerConfig { + d.Count += 1 + return DockerConfig{} +} + +func TestLazyKeyring(t *testing.T) { + provider := &testProvider{ + Count: 0, + } + lazy := &lazyDockerKeyring{ + Providers: []DockerConfigProvider{ + provider, + }, + } + + if provider.Count != 0 { + t.Errorf("Unexpected number of Provide calls: %v", provider.Count) + } + lazy.Lookup("foo") + if provider.Count != 1 { + t.Errorf("Unexpected number of Provide calls: %v", provider.Count) + } + lazy.Lookup("foo") + if provider.Count != 2 { + t.Errorf("Unexpected number of Provide calls: %v", provider.Count) + } + lazy.Lookup("foo") + if provider.Count != 3 { + t.Errorf("Unexpected number of Provide calls: %v", provider.Count) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/plugins.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/plugins.go index cc29ffd83182..a871cc02bcd9 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/plugins.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/plugins.go @@ -38,7 +38,7 @@ func RegisterCredentialProvider(name string, provider DockerConfigProvider) { if found { glog.Fatalf("Credential provider %q was registered twice", name) } - glog.V(1).Infof("Registered credential provider %q", name) + glog.V(4).Infof("Registered credential provider %q", name) providers[name] = provider } @@ -53,7 +53,7 @@ func NewDockerKeyring() DockerKeyring { // introduce the notion of priorities for conflict resolution. for name, provider := range providers { if provider.Enabled() { - glog.Infof("Registering credential provider: %v", name) + glog.V(4).Infof("Registering credential provider: %v", name) keyring.Providers = append(keyring.Providers, provider) } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/provider.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/provider.go index f4f52c8fd881..215650392bd8 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/provider.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/provider.go @@ -22,6 +22,7 @@ import ( "sync" "time" + dockertypes "github.com/docker/engine-api/types" "github.com/golang/glog" ) @@ -30,6 +31,19 @@ import ( type DockerConfigProvider interface { Enabled() bool Provide() DockerConfig + // LazyProvide() gets called after URL matches have been performed, so the + // location used as the key in DockerConfig would be redundant. + LazyProvide() *DockerConfigEntry +} + +func LazyProvide(creds LazyAuthConfiguration) dockertypes.AuthConfig { + if creds.Provider != nil { + entry := *creds.Provider.LazyProvide() + return DockerConfigEntryToLazyAuthConfiguration(entry).AuthConfig + } else { + return creds.AuthConfig + } + } // A DockerConfigProvider that simply reads the .dockercfg file @@ -73,11 +87,21 @@ func (d *defaultDockerConfigProvider) Provide() DockerConfig { return DockerConfig{} } +// LazyProvide implements dockerConfigProvider. Should never be called. +func (d *defaultDockerConfigProvider) LazyProvide() *DockerConfigEntry { + return nil +} + // Enabled implements dockerConfigProvider func (d *CachingDockerConfigProvider) Enabled() bool { return d.Provider.Enabled() } +// LazyProvide implements dockerConfigProvider. Should never be called. +func (d *CachingDockerConfigProvider) LazyProvide() *DockerConfigEntry { + return nil +} + // Provide implements dockerConfigProvider func (d *CachingDockerConfigProvider) Provide() DockerConfig { d.mu.Lock() @@ -88,7 +112,7 @@ func (d *CachingDockerConfigProvider) Provide() DockerConfig { return d.cacheDockerConfig } - glog.Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String()) + glog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String()) d.cacheDockerConfig = d.Provider.Provide() d.expiration = time.Now().Add(d.Lifetime) return d.cacheDockerConfig diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/provider_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/provider_test.go new file mode 100644 index 000000000000..099d839fbf45 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/credentialprovider/provider_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentialprovider + +import ( + "testing" + "time" +) + +func TestCachingProvider(t *testing.T) { + provider := &testProvider{ + Count: 0, + } + + cache := &CachingDockerConfigProvider{ + Provider: provider, + Lifetime: 1 * time.Second, + } + + if provider.Count != 0 { + t.Errorf("Unexpected number of Provide calls: %v", provider.Count) + } + cache.Provide() + cache.Provide() + cache.Provide() + cache.Provide() + if provider.Count != 1 { + t.Errorf("Unexpected number of Provide calls: %v", provider.Count) + } + + time.Sleep(cache.Lifetime) + cache.Provide() + cache.Provide() + cache.Provide() + cache.Provide() + if provider.Count != 2 { + t.Errorf("Unexpected number of Provide calls: %v", provider.Count) + } + + time.Sleep(cache.Lifetime) + cache.Provide() + cache.Provide() + cache.Provide() + cache.Provide() + if provider.Count != 3 { + t.Errorf("Unexpected number of Provide calls: %v", provider.Count) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go index 88c8a3131c45..30da22664767 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go @@ -18,8 +18,12 @@ package fieldpath import ( "fmt" + "math" + "strconv" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/resource" ) // formatMap formats map[string]string to a string. @@ -58,3 +62,61 @@ func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) return "", fmt.Errorf("Unsupported fieldPath: %v", fieldPath) } + +// ExtractResourceValueByContainerName extracts the value of a resource +// by providing container name +func ExtractResourceValueByContainerName(fs *api.ResourceFieldSelector, pod *api.Pod, containerName string) (string, error) { + container, err := findContainerInPod(pod, containerName) + if err != nil { + return "", err + } + return ExtractContainerResourceValue(fs, container) +} + +// ExtractContainerResourceValue extracts the value of a resource +// in an already known container +func ExtractContainerResourceValue(fs *api.ResourceFieldSelector, container *api.Container) (string, error) { + divisor := resource.Quantity{} + if divisor.Cmp(fs.Divisor) == 0 { + divisor = resource.MustParse("1") + } else { + divisor = fs.Divisor + } + + switch fs.Resource { + case "limits.cpu": + return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) + case "limits.memory": + return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) + case "requests.cpu": + return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) + case "requests.memory": + return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) + } + + return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) +} + +// findContainerInPod finds a container by its name in the provided pod +func findContainerInPod(pod *api.Pod, containerName string) (*api.Container, error) { + for _, container := range pod.Spec.Containers { + if container.Name == containerName { + return &container, nil + } + } + return nil, fmt.Errorf("container %s not found", containerName) +} + +// convertResourceCPUTOString converts cpu value to the format of divisor and returns +// ceiling of the value. +func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { + c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) + return strconv.FormatInt(c, 10), nil +} + +// convertResourceMemoryToString converts memory value to the format of divisor and returns +// ceiling of the value. +func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/fieldpath/fieldpath_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/fieldpath/fieldpath_test.go new file mode 100644 index 000000000000..d510426ee05d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/fieldpath/fieldpath_test.go @@ -0,0 +1,117 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +func TestExtractFieldPathAsString(t *testing.T) { + cases := []struct { + name string + fieldPath string + obj interface{} + expectedValue string + expectedMessageFragment string + }{ + { + name: "not an API object", + fieldPath: "metadata.name", + obj: "", + expectedMessageFragment: "expected struct", + }, + { + name: "ok - namespace", + fieldPath: "metadata.namespace", + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Namespace: "object-namespace", + }, + }, + expectedValue: "object-namespace", + }, + { + name: "ok - name", + fieldPath: "metadata.name", + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "object-name", + }, + }, + expectedValue: "object-name", + }, + { + name: "ok - labels", + fieldPath: "metadata.labels", + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"key": "value"}, + }, + }, + expectedValue: "key=\"value\"\n", + }, + { + name: "ok - labels bslash n", + fieldPath: "metadata.labels", + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"key": "value\n"}, + }, + }, + expectedValue: "key=\"value\\n\"\n", + }, + { + name: "ok - annotations", + fieldPath: "metadata.annotations", + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{"builder": "john-doe"}, + }, + }, + expectedValue: "builder=\"john-doe\"\n", + }, + + { + name: "invalid expression", + fieldPath: "metadata.whoops", + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Namespace: "object-namespace", + }, + }, + expectedMessageFragment: "Unsupported fieldPath", + }, + } + + for _, tc := range cases { + actual, err := ExtractFieldPathAsString(tc.obj, tc.fieldPath) + if err != nil { + if tc.expectedMessageFragment != "" { + if !strings.Contains(err.Error(), tc.expectedMessageFragment) { + t.Errorf("%v: Unexpected error message: %q, expected to contain %q", tc.name, err, tc.expectedMessageFragment) + } + } else { + t.Errorf("%v: unexpected error: %v", tc.name, err) + } + } else if e := tc.expectedValue; e != "" && e != actual { + t.Errorf("%v: Unexpected result; got %q, expected %q", tc.name, actual, e) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/fields/fields_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/fields/fields_test.go new file mode 100644 index 000000000000..9f6f3fc35773 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/fields/fields_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import ( + "testing" +) + +func matches(t *testing.T, ls Set, want string) { + if ls.String() != want { + t.Errorf("Expected '%s', but got '%s'", want, ls.String()) + } +} + +func TestSetString(t *testing.T) { + matches(t, Set{"x": "y"}, "x=y") + matches(t, Set{"foo": "bar"}, "foo=bar") + matches(t, Set{"foo": "bar", "baz": "qup"}, "baz=qup,foo=bar") +} + +func TestFieldHas(t *testing.T) { + fieldHasTests := []struct { + Ls Fields + Key string + Has bool + }{ + {Set{"x": "y"}, "x", true}, + {Set{"x": ""}, "x", true}, + {Set{"x": "y"}, "foo", false}, + } + for _, lh := range fieldHasTests { + if has := lh.Ls.Has(lh.Key); has != lh.Has { + t.Errorf("%#v.Has(%#v) => %v, expected %v", lh.Ls, lh.Key, has, lh.Has) + } + } +} + +func TestFieldGet(t *testing.T) { + ls := Set{"x": "y"} + if ls.Get("x") != "y" { + t.Errorf("Set.Get is broken") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/fields/selector_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/fields/selector_test.go new file mode 100644 index 000000000000..7651ae6bb3eb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/fields/selector_test.go @@ -0,0 +1,208 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import ( + "testing" +) + +func TestSelectorParse(t *testing.T) { + testGoodStrings := []string{ + "x=a,y=b,z=c", + "", + "x!=a,y=b", + } + testBadStrings := []string{ + "x=a||y=b", + "x==a==b", + } + for _, test := range testGoodStrings { + lq, err := ParseSelector(test) + if err != nil { + t.Errorf("%v: error %v (%#v)\n", test, err, err) + } + if test != lq.String() { + t.Errorf("%v restring gave: %v\n", test, lq.String()) + } + } + for _, test := range testBadStrings { + _, err := ParseSelector(test) + if err == nil { + t.Errorf("%v: did not get expected error\n", test) + } + } +} + +func TestDeterministicParse(t *testing.T) { + s1, err := ParseSelector("x=a,a=x") + s2, err2 := ParseSelector("a=x,x=a") + if err != nil || err2 != nil { + t.Errorf("Unexpected parse error") + } + if s1.String() != s2.String() { + t.Errorf("Non-deterministic parse") + } +} + +func expectMatch(t *testing.T, selector string, ls Set) { + lq, err := ParseSelector(selector) + if err != nil { + t.Errorf("Unable to parse %v as a selector\n", selector) + return + } + if !lq.Matches(ls) { + t.Errorf("Wanted %s to match '%s', but it did not.\n", selector, ls) + } +} + +func expectNoMatch(t *testing.T, selector string, ls Set) { + lq, err := ParseSelector(selector) + if err != nil { + t.Errorf("Unable to parse %v as a selector\n", selector) + return + } + if lq.Matches(ls) { + t.Errorf("Wanted '%s' to not match '%s', but it did.", selector, ls) + } +} + +func TestEverything(t *testing.T) { + if !Everything().Matches(Set{"x": "y"}) { + t.Errorf("Nil selector didn't match") + } + if !Everything().Empty() { + t.Errorf("Everything was not empty") + } +} + +func TestSelectorMatches(t *testing.T) { + expectMatch(t, "", Set{"x": "y"}) + expectMatch(t, "x=y", Set{"x": "y"}) + expectMatch(t, "x=y,z=w", Set{"x": "y", "z": "w"}) + expectMatch(t, "x!=y,z!=w", Set{"x": "z", "z": "a"}) + expectMatch(t, "notin=in", Set{"notin": "in"}) // in and notin in exactMatch + expectNoMatch(t, "x=y", Set{"x": "z"}) + expectNoMatch(t, "x=y,z=w", Set{"x": "w", "z": "w"}) + expectNoMatch(t, "x!=y,z!=w", Set{"x": "z", "z": "w"}) + + labelset := Set{ + "foo": "bar", + "baz": "blah", + } + expectMatch(t, "foo=bar", labelset) + expectMatch(t, "baz=blah", labelset) + expectMatch(t, "foo=bar,baz=blah", labelset) + expectNoMatch(t, "foo=blah", labelset) + expectNoMatch(t, "baz=bar", labelset) + expectNoMatch(t, "foo=bar,foobar=bar,baz=blah", labelset) +} + +func TestOneTermEqualSelector(t *testing.T) { + if !OneTermEqualSelector("x", "y").Matches(Set{"x": "y"}) { + t.Errorf("No match when match expected.") + } + if OneTermEqualSelector("x", "y").Matches(Set{"x": "z"}) { + t.Errorf("Match when none expected.") + } +} + +func expectMatchDirect(t *testing.T, selector, ls Set) { + if !SelectorFromSet(selector).Matches(ls) { + t.Errorf("Wanted %s to match '%s', but it did not.\n", selector, ls) + } +} + +func expectNoMatchDirect(t *testing.T, selector, ls Set) { + if SelectorFromSet(selector).Matches(ls) { + t.Errorf("Wanted '%s' to not match '%s', but it did.", selector, ls) + } +} + +func TestSetMatches(t *testing.T) { + labelset := Set{ + "foo": "bar", + "baz": "blah", + } + expectMatchDirect(t, Set{}, labelset) + expectMatchDirect(t, Set{"foo": "bar"}, labelset) + expectMatchDirect(t, Set{"baz": "blah"}, labelset) + expectMatchDirect(t, Set{"foo": "bar", "baz": "blah"}, labelset) + expectNoMatchDirect(t, Set{"foo": "=blah"}, labelset) + expectNoMatchDirect(t, Set{"baz": "=bar"}, labelset) + expectNoMatchDirect(t, Set{"foo": "=bar", "foobar": "bar", "baz": "blah"}, labelset) +} + +func TestNilMapIsValid(t *testing.T) { + selector := Set(nil).AsSelector() + if selector == nil { + t.Errorf("Selector for nil set should be Everything") + } + if !selector.Empty() { + t.Errorf("Selector for nil set should be Empty") + } +} + +func TestSetIsEmpty(t *testing.T) { + if !(Set{}).AsSelector().Empty() { + t.Errorf("Empty set should be empty") + } + if !(andTerm(nil)).Empty() { + t.Errorf("Nil andTerm should be empty") + } + if (&hasTerm{}).Empty() { + t.Errorf("hasTerm should not be empty") + } + if (¬HasTerm{}).Empty() { + t.Errorf("notHasTerm should not be empty") + } + if !(andTerm{andTerm{}}).Empty() { + t.Errorf("Nested andTerm should be empty") + } + if (andTerm{&hasTerm{"a", "b"}}).Empty() { + t.Errorf("Nested andTerm should not be empty") + } +} + +func TestRequiresExactMatch(t *testing.T) { + testCases := map[string]struct { + S Selector + Label string + Value string + Found bool + }{ + "empty set": {Set{}.AsSelector(), "test", "", false}, + "nil andTerm": {andTerm(nil), "test", "", false}, + "empty hasTerm": {&hasTerm{}, "test", "", false}, + "skipped hasTerm": {&hasTerm{"a", "b"}, "test", "", false}, + "valid hasTerm": {&hasTerm{"test", "b"}, "test", "b", true}, + "valid hasTerm no value": {&hasTerm{"test", ""}, "test", "", true}, + "valid notHasTerm": {¬HasTerm{"test", "b"}, "test", "", false}, + "valid notHasTerm no value": {¬HasTerm{"test", ""}, "test", "", false}, + "nested andTerm": {andTerm{andTerm{}}, "test", "", false}, + "nested andTerm matches": {andTerm{&hasTerm{"test", "b"}}, "test", "b", true}, + "andTerm with non-match": {andTerm{&hasTerm{}, &hasTerm{"test", "b"}}, "test", "b", true}, + } + for k, v := range testCases { + value, found := v.S.RequiresExactMatch(v.Label) + if value != v.Value { + t.Errorf("%s: expected value %s, got %s", k, v.Value, value) + } + if found != v.Found { + t.Errorf("%s: expected found %t, got %t", k, v.Found, found) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/apply.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/apply.go index 3149bcb29daa..c75c4f8a0124 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/apply.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/apply.go @@ -19,6 +19,7 @@ package kubectl import ( "encoding/json" + "k8s.io/kubernetes/pkg/api/annotations" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/runtime" @@ -28,23 +29,19 @@ type debugError interface { DebugError() (msg string, args []interface{}) } -// LastAppliedConfigAnnotation is the annotation used to store the previous -// configuration of a resource for use in a three way diff by UpdateApplyAnnotation. -const LastAppliedConfigAnnotation = kubectlAnnotationPrefix + "last-applied-configuration" - // GetOriginalConfiguration retrieves the original configuration of the object // from the annotation, or nil if no annotation was found. func GetOriginalConfiguration(info *resource.Info) ([]byte, error) { - annotations, err := info.Mapping.MetadataAccessor.Annotations(info.Object) + annots, err := info.Mapping.MetadataAccessor.Annotations(info.Object) if err != nil { return nil, err } - if annotations == nil { + if annots == nil { return nil, nil } - original, ok := annotations[LastAppliedConfigAnnotation] + original, ok := annots[annotations.LastAppliedConfigAnnotation] if !ok { return nil, nil } @@ -60,17 +57,17 @@ func SetOriginalConfiguration(info *resource.Info, original []byte) error { } accessor := info.Mapping.MetadataAccessor - annotations, err := accessor.Annotations(info.Object) + annots, err := accessor.Annotations(info.Object) if err != nil { return err } - if annotations == nil { - annotations = map[string]string{} + if annots == nil { + annots = map[string]string{} } - annotations[LastAppliedConfigAnnotation] = string(original) - if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annotations); err != nil { + annots[annotations.LastAppliedConfigAnnotation] = string(original) + if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { return err } @@ -93,14 +90,14 @@ func GetModifiedConfiguration(info *resource.Info, annotate bool, codec runtime. } // Get the current annotations from the object. - annotations := accessor.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} + annots := accessor.GetAnnotations() + if annots == nil { + annots = map[string]string{} } - original := annotations[LastAppliedConfigAnnotation] - delete(annotations, LastAppliedConfigAnnotation) - accessor.SetAnnotations(annotations) + original := annots[annotations.LastAppliedConfigAnnotation] + delete(annots, annotations.LastAppliedConfigAnnotation) + accessor.SetAnnotations(annots) // TODO: this needs to be abstracted - there should be no assumption that versioned object // can be marshalled to JSON. modified, err = json.Marshal(info.VersionedObject) @@ -109,8 +106,8 @@ func GetModifiedConfiguration(info *resource.Info, annotate bool, codec runtime. } if annotate { - annotations[LastAppliedConfigAnnotation] = string(modified) - accessor.SetAnnotations(annotations) + annots[annotations.LastAppliedConfigAnnotation] = string(modified) + accessor.SetAnnotations(annots) // TODO: this needs to be abstracted - there should be no assumption that versioned object // can be marshalled to JSON. modified, err = json.Marshal(info.VersionedObject) @@ -120,24 +117,24 @@ func GetModifiedConfiguration(info *resource.Info, annotate bool, codec runtime. } // Restore the object to its original condition. - annotations[LastAppliedConfigAnnotation] = original - accessor.SetAnnotations(annotations) + annots[annotations.LastAppliedConfigAnnotation] = original + accessor.SetAnnotations(annots) } else { // Otherwise, use the server side version of the object. accessor := info.Mapping.MetadataAccessor // Get the current annotations from the object. - annotations, err := accessor.Annotations(info.Object) + annots, err := accessor.Annotations(info.Object) if err != nil { return nil, err } - if annotations == nil { - annotations = map[string]string{} + if annots == nil { + annots = map[string]string{} } - original := annotations[LastAppliedConfigAnnotation] - delete(annotations, LastAppliedConfigAnnotation) - if err := accessor.SetAnnotations(info.Object, annotations); err != nil { + original := annots[annotations.LastAppliedConfigAnnotation] + delete(annots, annotations.LastAppliedConfigAnnotation) + if err := accessor.SetAnnotations(info.Object, annots); err != nil { return nil, err } @@ -147,8 +144,8 @@ func GetModifiedConfiguration(info *resource.Info, annotate bool, codec runtime. } if annotate { - annotations[LastAppliedConfigAnnotation] = string(modified) - if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annotations); err != nil { + annots[annotations.LastAppliedConfigAnnotation] = string(modified) + if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { return nil, err } @@ -159,8 +156,8 @@ func GetModifiedConfiguration(info *resource.Info, annotate bool, codec runtime. } // Restore the object to its original condition. - annotations[LastAppliedConfigAnnotation] = original - if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annotations); err != nil { + annots[annotations.LastAppliedConfigAnnotation] = original + if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { return nil, err } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/autoscale.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/autoscale.go index e831bca70ca3..e3a7fc088a91 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/autoscale.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/autoscale.go @@ -21,7 +21,7 @@ import ( "strconv" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/runtime" ) @@ -86,25 +86,26 @@ func (HorizontalPodAutoscalerV1Beta1) Generate(genericParams map[string]interfac } } - scaler := extensions.HorizontalPodAutoscaler{ + scaler := autoscaling.HorizontalPodAutoscaler{ ObjectMeta: api.ObjectMeta{ Name: name, }, - Spec: extensions.HorizontalPodAutoscalerSpec{ - ScaleRef: extensions.SubresourceReference{ - Kind: params["scaleRef-kind"], - Name: params["scaleRef-name"], - APIVersion: params["scaleRef-apiVersion"], - Subresource: scaleSubResource, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{ + Kind: params["scaleRef-kind"], + Name: params["scaleRef-name"], + APIVersion: params["scaleRef-apiVersion"], }, - MaxReplicas: max, + MaxReplicas: int32(max), }, } if min > 0 { - scaler.Spec.MinReplicas = &min + v := int32(min) + scaler.Spec.MinReplicas = &v } if cpu >= 0 { - scaler.Spec.CPUUtilization = &extensions.CPUTargetUtilization{cpu} + c := int32(cpu) + scaler.Spec.TargetCPUUtilizationPercentage = &c } return &scaler, nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go index a89fef07a060..e3eaf30e4bb7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go @@ -28,7 +28,7 @@ import ( func AddJsonFilenameFlag(cmd *cobra.Command, value *[]string, usage string) { cmd.Flags().StringSliceVarP(value, "filename", "f", *value, usage) - annotations := []string{} + annotations := make([]string, 0, len(resource.FileExtensions)) for _, ext := range resource.FileExtensions { annotations = append(annotations, strings.TrimLeft(ext, ".")) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/annotate.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/annotate.go index e4273c0a4e38..b2e4e3051d49 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/annotate.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/annotate.go @@ -21,11 +21,13 @@ import ( "encoding/json" "fmt" "io" + "regexp" "strings" "github.com/golang/glog" "github.com/spf13/cobra" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" @@ -52,9 +54,18 @@ type AnnotateOptions struct { f *cmdutil.Factory out io.Writer cmd *cobra.Command + + recursive bool } const ( + annotate_resources = ` + pod (po), service (svc), replicationcontroller (rc), + node (no), event (ev), componentstatuse (cs), + limitrange (limits), persistentvolume (pv), persistentvolumeclaim (pvc), + horizontalpodautoscaler (hpa), resourcequota (quota), secret +` + annotate_long = `Update the annotations on one or more resources. An annotation is a key/value pair that can hold larger (compared to a label), and possibly not human-readable, data. @@ -62,10 +73,8 @@ It is intended to store non-identifying auxiliary data, especially data manipula If --overwrite is true, then existing annotations can be overwritten, otherwise attempting to overwrite an annotation will result in an error. If --resource-version is specified, then updates will use this resource version, otherwise the existing resource-version will be used. -Possible resources include (case insensitive): pods (po), services (svc), -replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs), -limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), -horizontalpodautoscalers (hpa), resourcequotas (quota) or secrets.` +Possible resources include (case insensitive):` + annotate_resources + annotate_example = `# Update pod 'foo' with the annotation 'description' and the value 'my frontend'. # If the same annotation is set multiple times, only the last value will be applied kubectl annotate pods foo description='my frontend' @@ -90,6 +99,13 @@ kubectl annotate pods foo description-` func NewCmdAnnotate(f *cmdutil.Factory, out io.Writer) *cobra.Command { options := &AnnotateOptions{} + validArgs, argAliases := []string{}, []string{} + resources := regexp.MustCompile(`\s*,`).Split(annotate_resources, -1) + for _, r := range resources { + validArgs = append(validArgs, strings.Fields(r)[0]) + argAliases = kubectl.ResourceAliases(validArgs) + } + cmd := &cobra.Command{ Use: "annotate [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]", Short: "Update the annotations on a resource", @@ -106,14 +122,18 @@ func NewCmdAnnotate(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmdutil.CheckErr(err) } }, + ValidArgs: validArgs, + ArgAliases: argAliases, } cmdutil.AddPrinterFlags(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) cmd.Flags().StringVarP(&options.selector, "selector", "l", "", "Selector (label query) to filter on") cmd.Flags().BoolVar(&options.overwrite, "overwrite", false, "If true, allow annotations to be overwritten, otherwise reject annotation updates that overwrite existing annotations.") cmd.Flags().BoolVar(&options.all, "all", false, "select all resources in the namespace of the specified resource types") cmd.Flags().StringVar(&options.resourceVersion, "resource-version", "", "If non-empty, the annotation update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.") usage := "Filename, directory, or URL to a file identifying the resource to update the annotation" kubectl.AddJsonFilenameFlag(cmd, &options.filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &options.recursive) cmdutil.AddRecordFlag(cmd) return cmd } @@ -128,22 +148,11 @@ func (o *AnnotateOptions) Complete(f *cmdutil.Factory, out io.Writer, cmd *cobra // retrieves resource and annotation args from args // also checks args to verify that all resources are specified before annotations - annotationArgs := []string{} - metAnnotaionArg := false - for _, s := range args { - isAnnotation := strings.Contains(s, "=") || strings.HasSuffix(s, "-") - switch { - case !metAnnotaionArg && isAnnotation: - metAnnotaionArg = true - fallthrough - case metAnnotaionArg && isAnnotation: - annotationArgs = append(annotationArgs, s) - case !metAnnotaionArg && !isAnnotation: - o.resources = append(o.resources, s) - case metAnnotaionArg && !isAnnotation: - return fmt.Errorf("all resources must be specified before annotation changes: %s", s) - } + resources, annotationArgs, err := cmdutil.GetResourcesAndPairs(args, "annotation") + if err != nil { + return err } + o.resources = resources if len(o.resources) < 1 && len(o.filenames) == 0 { return fmt.Errorf("one or more resources must be specified as or /") } @@ -158,11 +167,11 @@ func (o *AnnotateOptions) Complete(f *cmdutil.Factory, out io.Writer, cmd *cobra o.recordChangeCause = cmdutil.GetRecordFlag(cmd) o.changeCause = f.Command() - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) o.builder = resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). ContinueOnError(). NamespaceParam(namespace).DefaultNamespace(). - FilenameParam(enforceNamespace, o.filenames...). + FilenameParam(enforceNamespace, o.recursive, o.filenames...). SelectorParam(o.selector). ResourceTypeOrNameArgs(o.all, o.resources...). Flatten(). @@ -201,7 +210,7 @@ func (o AnnotateOptions) RunAnnotate() error { return err } - obj, err := info.Mapping.ConvertToVersion(info.Object, info.Mapping.GroupVersionKind.GroupVersion().String()) + obj, err := info.Mapping.ConvertToVersion(info.Object, info.Mapping.GroupVersionKind.GroupVersion()) if err != nil { return err } @@ -243,11 +252,13 @@ func (o AnnotateOptions) RunAnnotate() error { if err != nil { return err } + + mapper, _ := o.f.Object(cmdutil.GetIncludeThirdPartyAPIs(o.cmd)) outputFormat := cmdutil.GetFlagString(o.cmd, "output") if outputFormat != "" { - return o.f.PrintObject(o.cmd, outputObj, o.out) + return o.f.PrintObject(o.cmd, mapper, outputObj, o.out) } - mapper, _ := o.f.Object() + cmdutil.PrintSuccess(mapper, false, o.out, info.Mapping.Resource, info.Name, "annotated") return nil }) @@ -255,34 +266,7 @@ func (o AnnotateOptions) RunAnnotate() error { // parseAnnotations retrieves new and remove annotations from annotation args func parseAnnotations(annotationArgs []string) (map[string]string, []string, error) { - var invalidBuf bytes.Buffer - newAnnotations := map[string]string{} - removeAnnotations := []string{} - for _, annotationArg := range annotationArgs { - if strings.Index(annotationArg, "=") != -1 { - parts := strings.SplitN(annotationArg, "=", 2) - if len(parts) != 2 || len(parts[1]) == 0 { - if invalidBuf.Len() > 0 { - invalidBuf.WriteString(", ") - } - invalidBuf.WriteString(fmt.Sprintf(annotationArg)) - } else { - newAnnotations[parts[0]] = parts[1] - } - } else if strings.HasSuffix(annotationArg, "-") { - removeAnnotations = append(removeAnnotations, annotationArg[:len(annotationArg)-1]) - } else { - if invalidBuf.Len() > 0 { - invalidBuf.WriteString(", ") - } - invalidBuf.WriteString(fmt.Sprintf(annotationArg)) - } - } - if invalidBuf.Len() > 0 { - return newAnnotations, removeAnnotations, fmt.Errorf("invalid annotation format: %s", invalidBuf.String()) - } - - return newAnnotations, removeAnnotations, nil + return cmdutil.ParsePairs(annotationArgs, "annotation", true) } // validateAnnotations checks the format of annotation args and checks removed annotations aren't in the new annotations map @@ -304,14 +288,14 @@ func validateAnnotations(removeAnnotations []string, newAnnotations map[string]s } // validateNoAnnotationOverwrites validates that when overwrite is false, to-be-updated annotations don't exist in the object annotation map (yet) -func validateNoAnnotationOverwrites(meta *api.ObjectMeta, annotations map[string]string) error { +func validateNoAnnotationOverwrites(accessor meta.Object, annotations map[string]string) error { var buf bytes.Buffer for key := range annotations { // change-cause annotation can always be overwritten if key == kubectl.ChangeCauseAnnotation { continue } - if value, found := meta.Annotations[key]; found { + if value, found := accessor.GetAnnotations()[key]; found { if buf.Len() > 0 { buf.WriteString("; ") } @@ -326,29 +310,31 @@ func validateNoAnnotationOverwrites(meta *api.ObjectMeta, annotations map[string // updateAnnotations updates annotations of obj func (o AnnotateOptions) updateAnnotations(obj runtime.Object) error { - meta, err := api.ObjectMetaFor(obj) + accessor, err := meta.Accessor(obj) if err != nil { return err } if !o.overwrite { - if err := validateNoAnnotationOverwrites(meta, o.newAnnotations); err != nil { + if err := validateNoAnnotationOverwrites(accessor, o.newAnnotations); err != nil { return err } } - if meta.Annotations == nil { - meta.Annotations = make(map[string]string) + annotations := accessor.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) } for key, value := range o.newAnnotations { - meta.Annotations[key] = value + annotations[key] = value } for _, annotation := range o.removeAnnotations { - delete(meta.Annotations, annotation) + delete(annotations, annotation) } + accessor.SetAnnotations(annotations) if len(o.resourceVersion) != 0 { - meta.ResourceVersion = o.resourceVersion + accessor.SetResourceVersion(o.resourceVersion) } return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/annotate_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/annotate_test.go new file mode 100644 index 000000000000..58730662ab0a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/annotate_test.go @@ -0,0 +1,571 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "net/http" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestValidateAnnotationOverwrites(t *testing.T) { + tests := []struct { + meta *api.ObjectMeta + annotations map[string]string + expectErr bool + scenario string + }{ + { + meta: &api.ObjectMeta{ + Annotations: map[string]string{ + "a": "A", + "b": "B", + }, + }, + annotations: map[string]string{ + "a": "a", + "c": "C", + }, + scenario: "share first annotation", + expectErr: true, + }, + { + meta: &api.ObjectMeta{ + Annotations: map[string]string{ + "a": "A", + "c": "C", + }, + }, + annotations: map[string]string{ + "b": "B", + "c": "c", + }, + scenario: "share second annotation", + expectErr: true, + }, + { + meta: &api.ObjectMeta{ + Annotations: map[string]string{ + "a": "A", + "c": "C", + }, + }, + annotations: map[string]string{ + "b": "B", + "d": "D", + }, + scenario: "no overlap", + }, + { + meta: &api.ObjectMeta{}, + annotations: map[string]string{ + "a": "A", + "b": "B", + }, + scenario: "no annotations", + }, + } + for _, test := range tests { + err := validateNoAnnotationOverwrites(test.meta, test.annotations) + if test.expectErr && err == nil { + t.Errorf("%s: unexpected non-error", test.scenario) + } else if !test.expectErr && err != nil { + t.Errorf("%s: unexpected error: %v", test.scenario, err) + } + } +} + +func TestParseAnnotations(t *testing.T) { + testURL := "https://test.com/index.htm?id=123#u=user-name" + testJSON := `'{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"ReplicationController","namespace":"default","name":"my-nginx","uid":"c544ee78-2665-11e5-8051-42010af0c213","apiVersion":"v1","resourceVersion":"61368"}}'` + tests := []struct { + annotations []string + expected map[string]string + expectedRemove []string + scenario string + expectedErr string + expectErr bool + }{ + { + annotations: []string{"a=b", "c=d"}, + expected: map[string]string{"a": "b", "c": "d"}, + expectedRemove: []string{}, + scenario: "add two annotations", + expectErr: false, + }, + { + annotations: []string{"url=" + testURL, "kubernetes.io/created-by=" + testJSON}, + expected: map[string]string{"url": testURL, "kubernetes.io/created-by": testJSON}, + expectedRemove: []string{}, + scenario: "add annotations with special characters", + expectErr: false, + }, + { + annotations: []string{}, + expected: map[string]string{}, + expectedRemove: []string{}, + scenario: "add no annotations", + expectErr: false, + }, + { + annotations: []string{"a=b", "c=d", "e-"}, + expected: map[string]string{"a": "b", "c": "d"}, + expectedRemove: []string{"e"}, + scenario: "add two annotations, remove one", + expectErr: false, + }, + { + annotations: []string{"ab", "c=d"}, + expectedErr: "invalid annotation format: ab", + scenario: "incorrect annotation input (missing =value)", + expectErr: true, + }, + { + annotations: []string{"a="}, + expectedErr: "invalid annotation format: a=", + scenario: "incorrect annotation input (missing value)", + expectErr: true, + }, + { + annotations: []string{"ab", "a="}, + expectedErr: "invalid annotation format: ab, a=", + scenario: "incorrect multiple annotation input (missing value)", + expectErr: true, + }, + } + for _, test := range tests { + annotations, remove, err := parseAnnotations(test.annotations) + switch { + case test.expectErr && err == nil: + t.Errorf("%s: unexpected non-error, should return %v", test.scenario, test.expectedErr) + case test.expectErr && err.Error() != test.expectedErr: + t.Errorf("%s: unexpected error %v, expected %v", test.scenario, err, test.expectedErr) + case !test.expectErr && err != nil: + t.Errorf("%s: unexpected error %v", test.scenario, err) + case !test.expectErr && !reflect.DeepEqual(annotations, test.expected): + t.Errorf("%s: expected %v, got %v", test.scenario, test.expected, annotations) + case !test.expectErr && !reflect.DeepEqual(remove, test.expectedRemove): + t.Errorf("%s: expected %v, got %v", test.scenario, test.expectedRemove, remove) + } + } +} + +func TestValidateAnnotations(t *testing.T) { + tests := []struct { + removeAnnotations []string + newAnnotations map[string]string + expectedErr string + scenario string + }{ + { + expectedErr: "can not both modify and remove the following annotation(s) in the same command: a", + removeAnnotations: []string{"a"}, + newAnnotations: map[string]string{"a": "b", "c": "d"}, + scenario: "remove an added annotation", + }, + { + expectedErr: "can not both modify and remove the following annotation(s) in the same command: a, c", + removeAnnotations: []string{"a", "c"}, + newAnnotations: map[string]string{"a": "b", "c": "d"}, + scenario: "remove added annotations", + }, + } + for _, test := range tests { + if err := validateAnnotations(test.removeAnnotations, test.newAnnotations); err == nil { + t.Errorf("%s: unexpected non-error", test.scenario) + } else if err.Error() != test.expectedErr { + t.Errorf("%s: expected error %s, got %s", test.scenario, test.expectedErr, err.Error()) + } + } +} + +func TestUpdateAnnotations(t *testing.T) { + tests := []struct { + obj runtime.Object + overwrite bool + version string + annotations map[string]string + remove []string + expected runtime.Object + expectErr bool + }{ + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{"a": "b"}, + }, + }, + annotations: map[string]string{"a": "b"}, + expectErr: true, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{"a": "b"}, + }, + }, + annotations: map[string]string{"a": "c"}, + overwrite: true, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{"a": "c"}, + }, + }, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{"a": "b"}, + }, + }, + annotations: map[string]string{"c": "d"}, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{"a": "b", "c": "d"}, + }, + }, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{"a": "b"}, + }, + }, + annotations: map[string]string{"c": "d"}, + version: "2", + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{"a": "b", "c": "d"}, + ResourceVersion: "2", + }, + }, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{"a": "b"}, + }, + }, + annotations: map[string]string{}, + remove: []string{"a"}, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{"a": "b", "c": "d"}, + }, + }, + annotations: map[string]string{"e": "f"}, + remove: []string{"a"}, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{ + "c": "d", + "e": "f", + }, + }, + }, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{"a": "b", "c": "d"}, + }, + }, + annotations: map[string]string{"e": "f"}, + remove: []string{"g"}, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{ + "a": "b", + "c": "d", + "e": "f", + }, + }, + }, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{"a": "b", "c": "d"}, + }, + }, + remove: []string{"e"}, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{ + "a": "b", + "c": "d", + }, + }, + }, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{}, + }, + annotations: map[string]string{"a": "b"}, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{"a": "b"}, + }, + }, + }, + } + for _, test := range tests { + options := &AnnotateOptions{ + overwrite: test.overwrite, + newAnnotations: test.annotations, + removeAnnotations: test.remove, + resourceVersion: test.version, + } + err := options.updateAnnotations(test.obj) + if test.expectErr { + if err == nil { + t.Errorf("unexpected non-error: %v", test) + } + continue + } + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v %v", err, test) + } + if !reflect.DeepEqual(test.obj, test.expected) { + t.Errorf("expected: %v, got %v", test.expected, test.obj) + } + } +} + +func TestAnnotateErrors(t *testing.T) { + testCases := map[string]struct { + args []string + flags map[string]string + errFn func(error) bool + }{ + "no args": { + args: []string{}, + errFn: func(err error) bool { return strings.Contains(err.Error(), "one or more resources must be specified") }, + }, + "not enough annotations": { + args: []string{"pods"}, + errFn: func(err error) bool { + return strings.Contains(err.Error(), "at least one annotation update is required") + }, + }, + "no resources remove annotations": { + args: []string{"pods-"}, + errFn: func(err error) bool { return strings.Contains(err.Error(), "one or more resources must be specified") }, + }, + "no resources add annotations": { + args: []string{"pods=bar"}, + errFn: func(err error) bool { return strings.Contains(err.Error(), "one or more resources must be specified") }, + }, + } + + for k, testCase := range testCases { + f, tf, _ := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}} + + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdAnnotate(f, buf) + cmd.SetOutput(buf) + + for k, v := range testCase.flags { + cmd.Flags().Set(k, v) + } + options := &AnnotateOptions{} + err := options.Complete(f, buf, cmd, testCase.args) + if !testCase.errFn(err) { + t.Errorf("%s: unexpected error: %v", k, err) + continue + } + if tf.Printer.(*testPrinter).Objects != nil { + t.Errorf("unexpected print to default printer") + } + if buf.Len() > 0 { + t.Errorf("buffer should be empty: %s", string(buf.Bytes())) + } + } +} + +func TestAnnotateObject(t *testing.T) { + pods, _, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch req.Method { + case "GET": + switch req.URL.Path { + case "/namespaces/test/pods/foo": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + case "PATCH": + switch req.URL.Path { + case "/namespaces/test/pods/foo": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + default: + t.Fatalf("unexpected request: %s %#v\n%#v", req.Method, req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}} + + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdAnnotate(f, buf) + cmd.SetOutput(buf) + options := &AnnotateOptions{} + args := []string{"pods/foo", "a=b", "c-"} + if err := options.Complete(f, buf, cmd, args); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := options.Validate(args); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := options.RunAnnotate(); err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestAnnotateObjectFromFile(t *testing.T) { + pods, _, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch req.Method { + case "GET": + switch req.URL.Path { + case "/namespaces/test/replicationcontrollers/cassandra": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + case "PATCH": + switch req.URL.Path { + case "/namespaces/test/replicationcontrollers/cassandra": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + default: + t.Fatalf("unexpected request: %s %#v\n%#v", req.Method, req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}} + + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdAnnotate(f, buf) + cmd.SetOutput(buf) + options := &AnnotateOptions{} + options.filenames = []string{"../../../examples/cassandra/cassandra-controller.yaml"} + args := []string{"a=b", "c-"} + if err := options.Complete(f, buf, cmd, args); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := options.Validate(args); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := options.RunAnnotate(); err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestAnnotateMultipleObjects(t *testing.T) { + pods, _, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch req.Method { + case "GET": + switch req.URL.Path { + case "/namespaces/test/pods": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + case "PATCH": + switch req.URL.Path { + case "/namespaces/test/pods/foo": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, nil + case "/namespaces/test/pods/bar": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[1])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + default: + t.Fatalf("unexpected request: %s %#v\n%#v", req.Method, req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}} + + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdAnnotate(f, buf) + cmd.SetOutput(buf) + options := &AnnotateOptions{} + options.all = true + args := []string{"pods", "a=b", "c-"} + if err := options.Complete(f, buf, cmd, args); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := options.Validate(args); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := options.RunAnnotate(); err != nil { + t.Fatalf("unexpected error: %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/apply.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/apply.go index 3ca6c8e4f706..d63b79200c22 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/apply.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/apply.go @@ -35,11 +35,13 @@ import ( // add them here instead of referencing the cmd.Flags() type ApplyOptions struct { Filenames []string + Recursive bool } const ( apply_long = `Apply a configuration to a resource by filename or stdin. The resource will be created if it doesn't exist yet. +To use 'apply', always create the resource initially with either 'apply' or 'create --save-config'. JSON and YAML formats are accepted.` apply_example = `# Apply the configuration in pod.json to a pod. @@ -68,8 +70,10 @@ func NewCmdApply(f *cmdutil.Factory, out io.Writer) *cobra.Command { kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) cmd.MarkFlagRequired("filename") cmdutil.AddValidateFlags(cmd) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) cmdutil.AddOutputFlagsForMutation(cmd) cmdutil.AddRecordFlag(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) return cmd } @@ -93,12 +97,12 @@ func RunApply(f *cmdutil.Factory, cmd *cobra.Command, out io.Writer, options *Ap return err } - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). Schema(schema). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). Flatten(). Do() err = r.Err() diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/apply_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/apply_test.go new file mode 100644 index 000000000000..a711802d7be4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/apply_test.go @@ -0,0 +1,317 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "os" + "testing" + + "github.com/ghodss/yaml" + "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/annotations" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestApplyExtraArgsFail(t *testing.T) { + buf := bytes.NewBuffer([]byte{}) + + f, _, _ := NewAPIFactory() + c := NewCmdApply(f, buf) + if validateApplyArgs(c, []string{"rc"}) == nil { + t.Fatalf("unexpected non-error") + } +} + +func validateApplyArgs(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return cmdutil.UsageError(cmd, "Unexpected args: %v", args) + } + return nil +} + +const ( + filenameRC = "../../../test/fixtures/pkg/kubectl/cmd/apply/rc.yaml" + filenameSVC = "../../../test/fixtures/pkg/kubectl/cmd/apply/service.yaml" + filenameRCSVC = "../../../test/fixtures/pkg/kubectl/cmd/apply/rc-service.yaml" +) + +func readBytesFromFile(t *testing.T, filename string) []byte { + file, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + + data, err := ioutil.ReadAll(file) + if err != nil { + t.Fatal(err) + } + + return data +} + +func readReplicationControllerFromFile(t *testing.T, filename string) *api.ReplicationController { + data := readBytesFromFile(t, filename) + rc := api.ReplicationController{} + // TODO(jackgr): Replace with a call to testapi.Codec().Decode(). + if err := yaml.Unmarshal(data, &rc); err != nil { + t.Fatal(err) + } + + return &rc +} + +func readServiceFromFile(t *testing.T, filename string) *api.Service { + data := readBytesFromFile(t, filename) + svc := api.Service{} + // TODO(jackgr): Replace with a call to testapi.Codec().Decode(). + if err := yaml.Unmarshal(data, &svc); err != nil { + t.Fatal(err) + } + + return &svc +} + +func annotateRuntimeObject(t *testing.T, originalObj, currentObj runtime.Object, kind string) (string, []byte) { + originalAccessor, err := meta.Accessor(originalObj) + if err != nil { + t.Fatal(err) + } + + originalLabels := originalAccessor.GetLabels() + originalLabels["DELETE_ME"] = "DELETE_ME" + originalAccessor.SetLabels(originalLabels) + original, err := json.Marshal(originalObj) + if err != nil { + t.Fatal(err) + } + + currentAccessor, err := meta.Accessor(currentObj) + if err != nil { + t.Fatal(err) + } + + currentAnnotations := currentAccessor.GetAnnotations() + if currentAnnotations == nil { + currentAnnotations = make(map[string]string) + } + currentAnnotations[annotations.LastAppliedConfigAnnotation] = string(original) + currentAccessor.SetAnnotations(currentAnnotations) + current, err := json.Marshal(currentObj) + if err != nil { + t.Fatal(err) + } + + return currentAccessor.GetName(), current +} + +func readAndAnnotateReplicationController(t *testing.T, filename string) (string, []byte) { + rc1 := readReplicationControllerFromFile(t, filename) + rc2 := readReplicationControllerFromFile(t, filename) + return annotateRuntimeObject(t, rc1, rc2, "ReplicationController") +} + +func readAndAnnotateService(t *testing.T, filename string) (string, []byte) { + svc1 := readServiceFromFile(t, filename) + svc2 := readServiceFromFile(t, filename) + return annotateRuntimeObject(t, svc1, svc2, "Service") +} + +func validatePatchApplication(t *testing.T, req *http.Request) { + patch, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatal(err) + } + + patchMap := map[string]interface{}{} + if err := json.Unmarshal(patch, &patchMap); err != nil { + t.Fatal(err) + } + + annotationsMap := walkMapPath(t, patchMap, []string{"metadata", "annotations"}) + if _, ok := annotationsMap[annotations.LastAppliedConfigAnnotation]; !ok { + t.Fatalf("patch does not contain annotation:\n%s\n", patch) + } + + labelMap := walkMapPath(t, patchMap, []string{"metadata", "labels"}) + if deleteMe, ok := labelMap["DELETE_ME"]; !ok || deleteMe != nil { + t.Fatalf("patch does not remove deleted key: DELETE_ME:\n%s\n", patch) + } +} + +func walkMapPath(t *testing.T, start map[string]interface{}, path []string) map[string]interface{} { + finish := start + for i := 0; i < len(path); i++ { + var ok bool + finish, ok = finish[path[i]].(map[string]interface{}) + if !ok { + t.Fatalf("key:%s of path:%v not found in map:%v", path[i], path, start) + } + } + + return finish +} + +func TestApplyObject(t *testing.T) { + initTestErrorHandler(t) + nameRC, currentRC := readAndAnnotateReplicationController(t, filenameRC) + pathRC := "/namespaces/test/replicationcontrollers/" + nameRC + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == pathRC && m == "GET": + bodyRC := ioutil.NopCloser(bytes.NewReader(currentRC)) + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: bodyRC}, nil + case p == pathRC && m == "PATCH": + validatePatchApplication(t, req) + bodyRC := ioutil.NopCloser(bytes.NewReader(currentRC)) + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: bodyRC}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdApply(f, buf) + cmd.Flags().Set("filename", filenameRC) + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + // uses the name from the file, not the response + expectRC := "replicationcontroller/" + nameRC + "\n" + if buf.String() != expectRC { + t.Fatalf("unexpected output: %s\nexpected: %s", buf.String(), expectRC) + } +} + +func TestApplyNonExistObject(t *testing.T) { + nameRC, currentRC := readAndAnnotateReplicationController(t, filenameRC) + pathRC := "/namespaces/test/replicationcontrollers" + pathNameRC := pathRC + "/" + nameRC + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == pathNameRC && m == "GET": + return &http.Response{StatusCode: 404, Header: defaultHeader(), Body: ioutil.NopCloser(bytes.NewReader(nil))}, nil + case p == pathRC && m == "POST": + bodyRC := ioutil.NopCloser(bytes.NewReader(currentRC)) + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: bodyRC}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdApply(f, buf) + cmd.Flags().Set("filename", filenameRC) + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + // uses the name from the file, not the response + expectRC := "replicationcontroller/" + nameRC + "\n" + if buf.String() != expectRC { + t.Errorf("unexpected output: %s\nexpected: %s", buf.String(), expectRC) + } +} + +func TestApplyMultipleObjectsAsList(t *testing.T) { + testApplyMultipleObjects(t, true) +} + +func TestApplyMultipleObjectsAsFiles(t *testing.T) { + testApplyMultipleObjects(t, false) +} + +func testApplyMultipleObjects(t *testing.T, asList bool) { + nameRC, currentRC := readAndAnnotateReplicationController(t, filenameRC) + pathRC := "/namespaces/test/replicationcontrollers/" + nameRC + + nameSVC, currentSVC := readAndAnnotateService(t, filenameSVC) + pathSVC := "/namespaces/test/services/" + nameSVC + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == pathRC && m == "GET": + bodyRC := ioutil.NopCloser(bytes.NewReader(currentRC)) + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: bodyRC}, nil + case p == pathRC && m == "PATCH": + validatePatchApplication(t, req) + bodyRC := ioutil.NopCloser(bytes.NewReader(currentRC)) + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: bodyRC}, nil + case p == pathSVC && m == "GET": + bodySVC := ioutil.NopCloser(bytes.NewReader(currentSVC)) + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: bodySVC}, nil + case p == pathSVC && m == "PATCH": + validatePatchApplication(t, req) + bodySVC := ioutil.NopCloser(bytes.NewReader(currentSVC)) + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: bodySVC}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdApply(f, buf) + if asList { + cmd.Flags().Set("filename", filenameRCSVC) + } else { + cmd.Flags().Set("filename", filenameRC) + cmd.Flags().Set("filename", filenameSVC) + } + cmd.Flags().Set("output", "name") + + cmd.Run(cmd, []string{}) + + // Names should come from the REST response, NOT the files + expectRC := "replicationcontroller/" + nameRC + "\n" + expectSVC := "service/" + nameSVC + "\n" + // Test both possible orders since output is non-deterministic. + expectOne := expectRC + expectSVC + expectTwo := expectSVC + expectRC + if buf.String() != expectOne && buf.String() != expectTwo { + t.Fatalf("unexpected output: %s\nexpected: %s OR %s", buf.String(), expectOne, expectTwo) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/attach.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/attach.go index e05032c128f3..3150bccaee55 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/attach.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/attach.go @@ -20,19 +20,19 @@ import ( "fmt" "io" "net/url" - "os" - "os/signal" - "syscall" - "github.com/docker/docker/pkg/term" "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/restclient" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/client/unversioned/remotecommand" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/interrupt" + "k8s.io/kubernetes/pkg/util/term" ) const ( @@ -53,6 +53,8 @@ func NewCmdAttach(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) Out: cmdOut, Err: cmdErr, + CommandName: "kubectl attach", + Attach: &DefaultRemoteAttach{}, } cmd := &cobra.Command{ @@ -86,7 +88,7 @@ func (*DefaultRemoteAttach) Attach(method string, url *url.URL, config *restclie if err != nil { return err } - return exec.Stream(stdin, stdout, stderr, tty) + return exec.Stream(remotecommandserver.SupportedStreamingProtocols, stdin, stdout, stderr, tty) } // AttachOptions declare the arguments accepted by the Exec command @@ -96,11 +98,17 @@ type AttachOptions struct { ContainerName string Stdin bool TTY bool + CommandName string + + // InterruptParent, if set, is used to handle interrupts while attached + InterruptParent *interrupt.Handler In io.Reader Out io.Writer Err io.Writer + Pod *api.Pod + Attach RemoteAttach Client *client.Client Config *restclient.Config @@ -154,80 +162,65 @@ func (p *AttachOptions) Validate() error { // Run executes a validated remote execution against a pod. func (p *AttachOptions) Run() error { - pod, err := p.Client.Pods(p.Namespace).Get(p.PodName) - if err != nil { - return err + if p.Pod == nil { + pod, err := p.Client.Pods(p.Namespace).Get(p.PodName) + if err != nil { + return err + } + if pod.Status.Phase != api.PodRunning { + return fmt.Errorf("pod %s is not running and cannot be attached to; current phase is %s", p.PodName, pod.Status.Phase) + } + p.Pod = pod + // TODO: convert this to a clean "wait" behavior } + pod := p.Pod - if pod.Status.Phase != api.PodRunning { - return fmt.Errorf("pod %s is not running and cannot be attached to; current phase is %s", p.PodName, pod.Status.Phase) - } + // ensure we can recover the terminal while attached + t := term.TTY{Parent: p.InterruptParent} - var stdin io.Reader + // check for TTY tty := p.TTY - containerToAttach := p.GetContainer(pod) if tty && !containerToAttach.TTY { tty = false - fmt.Fprintf(p.Err, "Unable to use a TTY - container %s doesn't allocate one\n", containerToAttach.Name) + fmt.Fprintf(p.Err, "Unable to use a TTY - container %s did not allocate one\n", containerToAttach.Name) } - - // TODO: refactor with terminal helpers from the edit utility once that is merged if p.Stdin { - stdin = p.In - if tty { - if file, ok := stdin.(*os.File); ok { - inFd := file.Fd() - if term.IsTerminal(inFd) { - oldState, err := term.SetRawTerminal(inFd) - if err != nil { - glog.Fatal(err) - } - fmt.Fprintln(p.Out, "\nHit enter for command prompt") - // this handles a clean exit, where the command finished - defer term.RestoreTerminal(inFd, oldState) - - // SIGINT is handled by term.SetRawTerminal (it runs a goroutine that listens - // for SIGINT and restores the terminal before exiting) - - // this handles SIGTERM - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGTERM) - go func() { - <-sigChan - term.RestoreTerminal(inFd, oldState) - os.Exit(0) - }() - } else { - fmt.Fprintln(p.Err, "STDIN is not a terminal") - } - } else { - tty = false - fmt.Fprintln(p.Err, "Unable to use a TTY - input is not the right kind of file") - } + t.In = p.In + if tty && !t.IsTerminal() { + tty = false + fmt.Fprintln(p.Err, "Unable to use a TTY - input is not a terminal or the right kind of file") } } + t.Raw = tty - // TODO: consider abstracting into a client invocation or client helper - req := p.Client.RESTClient.Post(). - Resource("pods"). - Name(pod.Name). - Namespace(pod.Namespace). - SubResource("attach") - req.VersionedParams(&api.PodAttachOptions{ - Container: containerToAttach.Name, - Stdin: stdin != nil, - Stdout: p.Out != nil, - Stderr: p.Err != nil, - TTY: tty, - }, api.ParameterCodec) - - err = p.Attach.Attach("POST", req.URL(), p.Config, stdin, p.Out, p.Err, tty) - if err != nil { + fn := func() error { + if tty { + fmt.Fprintln(p.Out, "\nHit enter for command prompt") + } + // TODO: consider abstracting into a client invocation or client helper + req := p.Client.RESTClient.Post(). + Resource("pods"). + Name(pod.Name). + Namespace(pod.Namespace). + SubResource("attach") + req.VersionedParams(&api.PodAttachOptions{ + Container: containerToAttach.Name, + Stdin: p.In != nil, + Stdout: p.Out != nil, + Stderr: p.Err != nil, + TTY: tty, + }, api.ParameterCodec) + + return p.Attach.Attach("POST", req.URL(), p.Config, p.In, p.Out, p.Err, tty) + } + + if err := t.Safe(fn); err != nil { return err } + if p.Stdin && tty && pod.Spec.RestartPolicy == api.RestartPolicyAlways { - fmt.Fprintf(p.Out, "Session ended, resume using 'kubectl attach %s -c %s -i -t' command when the pod is running\n", pod.Name, containerToAttach.Name) + fmt.Fprintf(p.Out, "Session ended, resume using '%s %s -c %s -i -t' command when the pod is running\n", p.CommandName, pod.Name, containerToAttach.Name) } return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/attach_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/attach_test.go new file mode 100644 index 000000000000..29128ae51881 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/attach_test.go @@ -0,0 +1,279 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/unversioned/fake" +) + +type fakeRemoteAttach struct { + method string + url *url.URL + attachErr error +} + +func (f *fakeRemoteAttach) Attach(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error { + f.method = method + f.url = url + return f.attachErr +} + +func TestPodAndContainerAttach(t *testing.T) { + tests := []struct { + args []string + p *AttachOptions + name string + expectError bool + expectedPod string + expectedContainer string + }{ + { + p: &AttachOptions{}, + expectError: true, + name: "empty", + }, + { + p: &AttachOptions{}, + args: []string{"foo", "bar"}, + expectError: true, + name: "too many args", + }, + { + p: &AttachOptions{}, + args: []string{"foo"}, + expectedPod: "foo", + name: "no container, no flags", + }, + { + p: &AttachOptions{ContainerName: "bar"}, + args: []string{"foo"}, + expectedPod: "foo", + expectedContainer: "bar", + name: "container in flag", + }, + } + for _, test := range tests { + f, tf, codec := NewAPIFactory() + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { return nil, nil }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{} + + cmd := &cobra.Command{} + options := test.p + err := options.Complete(f, cmd, test.args) + if test.expectError && err == nil { + t.Errorf("unexpected non-error (%s)", test.name) + } + if !test.expectError && err != nil { + t.Errorf("unexpected error: %v (%s)", err, test.name) + } + if err != nil { + continue + } + if options.PodName != test.expectedPod { + t.Errorf("expected: %s, got: %s (%s)", test.expectedPod, options.PodName, test.name) + } + if options.ContainerName != test.expectedContainer { + t.Errorf("expected: %s, got: %s (%s)", test.expectedContainer, options.ContainerName, test.name) + } + } +} + +func TestAttach(t *testing.T) { + version := testapi.Default.GroupVersion().Version + tests := []struct { + name, version, podPath, attachPath, container string + pod *api.Pod + attachErr bool + }{ + { + name: "pod attach", + version: version, + podPath: "/api/" + version + "/namespaces/test/pods/foo", + attachPath: "/api/" + version + "/namespaces/test/pods/foo/attach", + pod: attachPod(), + }, + { + name: "pod attach error", + version: version, + podPath: "/api/" + version + "/namespaces/test/pods/foo", + attachPath: "/api/" + version + "/namespaces/test/pods/foo/attach", + pod: attachPod(), + attachErr: true, + }, + } + for _, test := range tests { + f, tf, codec := NewAPIFactory() + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == test.podPath && m == "GET": + body := objBody(codec, test.pod) + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: body}, nil + default: + // Ensures no GET is performed when deleting by name + t.Errorf("%s: unexpected request: %s %#v\n%#v", test.name, req.Method, req.URL, req) + return nil, fmt.Errorf("unexpected request") + } + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &unversioned.GroupVersion{Version: test.version}}} + bufOut := bytes.NewBuffer([]byte{}) + bufErr := bytes.NewBuffer([]byte{}) + bufIn := bytes.NewBuffer([]byte{}) + ex := &fakeRemoteAttach{} + if test.attachErr { + ex.attachErr = fmt.Errorf("attach error") + } + params := &AttachOptions{ + ContainerName: "bar", + In: bufIn, + Out: bufOut, + Err: bufErr, + Attach: ex, + } + cmd := &cobra.Command{} + if err := params.Complete(f, cmd, []string{"foo"}); err != nil { + t.Fatal(err) + } + err := params.Run() + if test.attachErr && err != ex.attachErr { + t.Errorf("%s: Unexpected exec error: %v", test.name, err) + continue + } + if !test.attachErr && err != nil { + t.Errorf("%s: Unexpected error: %v", test.name, err) + continue + } + if test.attachErr { + continue + } + if ex.url.Path != test.attachPath { + t.Errorf("%s: Did not get expected path for exec request", test.name) + continue + } + if ex.method != "POST" { + t.Errorf("%s: Did not get method for attach request: %s", test.name, ex.method) + } + if ex.url.Query().Get("container") != "bar" { + t.Errorf("%s: Did not have query parameters: %s", test.name, ex.url.Query()) + } + } +} + +func TestAttachWarnings(t *testing.T) { + version := testapi.Default.GroupVersion().Version + tests := []struct { + name, container, version, podPath, expectedErr, expectedOut string + pod *api.Pod + stdin, tty bool + }{ + { + name: "fallback tty if not supported", + version: version, + podPath: "/api/" + version + "/namespaces/test/pods/foo", + pod: attachPod(), + stdin: true, + tty: true, + expectedErr: "Unable to use a TTY - container bar did not allocate one", + }, + } + for _, test := range tests { + f, tf, codec := NewAPIFactory() + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == test.podPath && m == "GET": + body := objBody(codec, test.pod) + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: body}, nil + default: + t.Errorf("%s: unexpected request: %s %#v\n%#v", test.name, req.Method, req.URL, req) + return nil, fmt.Errorf("unexpected request") + } + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &unversioned.GroupVersion{Version: test.version}}} + bufOut := bytes.NewBuffer([]byte{}) + bufErr := bytes.NewBuffer([]byte{}) + bufIn := bytes.NewBuffer([]byte{}) + ex := &fakeRemoteAttach{} + params := &AttachOptions{ + ContainerName: test.container, + In: bufIn, + Out: bufOut, + Err: bufErr, + Stdin: test.stdin, + TTY: test.tty, + Attach: ex, + } + cmd := &cobra.Command{} + if err := params.Complete(f, cmd, []string{"foo"}); err != nil { + t.Fatal(err) + } + if err := params.Run(); err != nil { + t.Fatal(err) + } + + if test.stdin && test.tty { + if !test.pod.Spec.Containers[0].TTY { + if !strings.Contains(bufErr.String(), test.expectedErr) { + t.Errorf("%s: Expected TTY fallback warning for attach request: %s", test.name, bufErr.String()) + continue + } + } + } + } +} + +func attachPod() *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test", ResourceVersion: "10"}, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{ + { + Name: "bar", + }, + }, + }, + Status: api.PodStatus{ + Phase: api.PodRunning, + }, + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale.go index 2e54c3c0e56f..75bf13daef03 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/autoscale.go @@ -23,15 +23,22 @@ import ( "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" - "k8s.io/kubernetes/pkg/util/errors" + utilerrors "k8s.io/kubernetes/pkg/util/errors" "github.com/spf13/cobra" ) +// AutoscaleOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of +// referencing the cmd.Flags() +type AutoscaleOptions struct { + Filenames []string + Recursive bool +} + const ( autoscaleLong = `Creates an autoscaler that automatically chooses and sets the number of pods that run in a kubernetes cluster. -Looks up a deployment or replication controller by name and creates an autoscaler that uses this deployment or replication controller as a reference. +Looks up a Deployment, ReplicaSet, or ReplicationController by name and creates an autoscaler that uses the given resource as a reference. An autoscaler can automatically increase or decrease number of pods deployed within the system as needed.` autoscaleExample = `# Auto scale a deployment "foo", with the number of pods between 2 to 10, target CPU utilization at a default value that server applies: @@ -42,14 +49,15 @@ kubectl autoscale rc foo --max=5 --cpu-percent=80` ) func NewCmdAutoscale(f *cmdutil.Factory, out io.Writer) *cobra.Command { - filenames := []string{} + options := &AutoscaleOptions{} + cmd := &cobra.Command{ Use: "autoscale (-f FILENAME | TYPE NAME | TYPE/NAME) [--min=MINPODS] --max=MAXPODS [--cpu-percent=CPU] [flags]", - Short: "Auto-scale a deployment or replication controller", + Short: "Auto-scale a Deployment, ReplicaSet, or ReplicationController", Long: autoscaleLong, Example: autoscaleExample, Run: func(cmd *cobra.Command, args []string) { - err := RunAutoscale(f, out, cmd, args, filenames) + err := RunAutoscale(f, out, cmd, args, options) cmdutil.CheckErr(err) }, } @@ -60,15 +68,17 @@ func NewCmdAutoscale(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.MarkFlagRequired("max") cmd.Flags().Int("cpu-percent", -1, fmt.Sprintf("The target average CPU utilization (represented as a percent of requested CPU) over all the pods. If it's not specified or negative, the server will apply a default value.")) cmd.Flags().String("name", "", "The name for the newly created object. If not specified, the name of the input resource will be used.") - cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without creating it.") + cmdutil.AddDryRunFlag(cmd) usage := "Filename, directory, or URL to a file identifying the resource to autoscale." - kubectl.AddJsonFilenameFlag(cmd, &filenames, usage) + kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddRecordFlag(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) return cmd } -func RunAutoscale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, filenames []string) error { +func RunAutoscale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *AutoscaleOptions) error { namespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err @@ -79,26 +89,18 @@ func RunAutoscale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args [] return err } - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). ContinueOnError(). NamespaceParam(namespace).DefaultNamespace(). - FilenameParam(enforceNamespace, filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). ResourceTypeOrNameArgs(false, args...). Flatten(). Do() - infos, err := r.Infos() + err = r.Err() if err != nil { return err } - if len(infos) > 1 { - return fmt.Errorf("multiple resources provided: %v", args) - } - info := infos[0] - mapping := info.ResourceMapping() - if err := f.CanBeAutoscaled(mapping.GroupVersionKind.GroupKind()); err != nil { - return err - } // Get the generator, setup and validate all required parameters generatorName := cmdutil.GetFlagString(cmd, "generator") @@ -108,62 +110,83 @@ func RunAutoscale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args [] return cmdutil.UsageError(cmd, fmt.Sprintf("generator %q not found.", generatorName)) } names := generator.ParamNames() - params := kubectl.MakeParams(cmd, names) - name := info.Name - params["default-name"] = name - params["scaleRef-kind"] = mapping.GroupVersionKind.Kind - params["scaleRef-name"] = name - params["scaleRef-apiVersion"] = mapping.GroupVersionKind.GroupVersion().String() + count := 0 + err = r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } - if err = kubectl.ValidateParams(names, params); err != nil { - return err - } - // Check for invalid flags used against the present generator. - if err := kubectl.EnsureFlagsValid(cmd, generators, generatorName); err != nil { - return err - } + mapping := info.ResourceMapping() + if err := f.CanBeAutoscaled(mapping.GroupVersionKind.GroupKind()); err != nil { + return err + } - // Generate new object - object, err := generator.Generate(params) - if err != nil { - return err - } + name := info.Name + params := kubectl.MakeParams(cmd, names) + params["default-name"] = name - resourceMapper := &resource.Mapper{ - ObjectTyper: typer, - RESTMapper: mapper, - ClientMapper: resource.ClientMapperFunc(f.ClientForMapping), - Decoder: f.Decoder(true), - } - hpa, err := resourceMapper.InfoForObject(object, nil) - if err != nil { - return err - } - if cmdutil.ShouldRecord(cmd, hpa) { - if err := cmdutil.RecordChangeCause(hpa.Object, f.Command()); err != nil { + params["scaleRef-kind"] = mapping.GroupVersionKind.Kind + params["scaleRef-name"] = name + params["scaleRef-apiVersion"] = mapping.GroupVersionKind.GroupVersion().String() + + if err = kubectl.ValidateParams(names, params); err != nil { + return err + } + // Check for invalid flags used against the present generator. + if err := kubectl.EnsureFlagsValid(cmd, generators, generatorName); err != nil { return err } - object = hpa.Object - } - // TODO: extract this flag to a central location, when such a location exists. - if cmdutil.GetFlagBool(cmd, "dry-run") { - return f.PrintObject(cmd, object, out) - } - if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), hpa, f.JSONEncoder()); err != nil { - return err - } + // Generate new object + object, err := generator.Generate(params) + if err != nil { + return err + } + + resourceMapper := &resource.Mapper{ + ObjectTyper: typer, + RESTMapper: mapper, + ClientMapper: resource.ClientMapperFunc(f.ClientForMapping), + Decoder: f.Decoder(true), + } + hpa, err := resourceMapper.InfoForObject(object, nil) + if err != nil { + return err + } + if cmdutil.ShouldRecord(cmd, hpa) { + if err := cmdutil.RecordChangeCause(hpa.Object, f.Command()); err != nil { + return err + } + object = hpa.Object + } + if cmdutil.GetDryRunFlag(cmd) { + return f.PrintObject(cmd, mapper, object, out) + } + + if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), hpa, f.JSONEncoder()); err != nil { + return err + } - object, err = resource.NewHelper(hpa.Client, hpa.Mapping).Create(namespace, false, object) + object, err = resource.NewHelper(hpa.Client, hpa.Mapping).Create(namespace, false, object) + if err != nil { + return err + } + + count++ + if len(cmdutil.GetFlagString(cmd, "output")) > 0 { + return f.PrintObject(cmd, mapper, object, out) + } + + cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "autoscaled") + return nil + }) if err != nil { return err } - - if len(cmdutil.GetFlagString(cmd, "output")) > 0 { - return f.PrintObject(cmd, object, out) + if count == 0 { + return fmt.Errorf("no objects passed to autoscale") } - cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "autoscaled") return nil } @@ -176,5 +199,5 @@ func validateFlags(cmd *cobra.Command) error { if cpu > 100 { errs = append(errs, fmt.Errorf("CPU utilization (%%) cannot exceed 100")) } - return errors.NewAggregate(errs) + return utilerrors.NewAggregate(errs) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo.go index c76e28f7c63f..a333eef3a647 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo.go @@ -30,18 +30,23 @@ import ( "github.com/spf13/cobra" ) +var longDescr = `Display addresses of the master and services with label kubernetes.io/cluster-service=true +To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.` + func NewCmdClusterInfo(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "cluster-info", // clusterinfo is deprecated. Aliases: []string{"clusterinfo"}, Short: "Display cluster info", - Long: "Display addresses of the master and services with label kubernetes.io/cluster-service=true", + Long: longDescr, Run: func(cmd *cobra.Command, args []string) { err := RunClusterInfo(f, out, cmd) cmdutil.CheckErr(err) }, } + cmdutil.AddInclude3rdPartyFlags(cmd) + cmd.AddCommand(NewCmdClusterInfoDump(f, out)) return cmd } @@ -56,7 +61,7 @@ func RunClusterInfo(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command) error } printService(out, "Kubernetes master", client.Host) - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) cmdNamespace := cmdutil.GetFlagString(cmd, "namespace") if cmdNamespace == "" { cmdNamespace = api.NamespaceSystem @@ -82,7 +87,7 @@ func RunClusterInfo(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command) error ip = ingress.Hostname } for _, port := range service.Spec.Ports { - link += "http://" + ip + ":" + strconv.Itoa(port.Port) + " " + link += "http://" + ip + ":" + strconv.Itoa(int(port.Port)) + " " } } else { if len(client.GroupVersion.Group) == 0 { @@ -100,6 +105,7 @@ func RunClusterInfo(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command) error } return nil }) + out.Write([]byte("\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n")) return nil // TODO consider printing more information about cluster diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo_dump.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo_dump.go new file mode 100644 index 000000000000..9a1865902a7b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo_dump.go @@ -0,0 +1,215 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "fmt" + "io" + "os" + "path" + + "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/kubectl" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" +) + +// NewCmdCreateSecret groups subcommands to create various types of secrets +func NewCmdClusterInfoDump(f *cmdutil.Factory, cmdOut io.Writer) *cobra.Command { + cmd := &cobra.Command{ + Use: "dump", + Short: "Dump lots of relevant info for debugging and diagnosis.", + Long: dumpLong, + Example: dumpExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(dumpClusterInfo(f, cmd, args, cmdOut)) + }, + } + cmd.Flags().String("output-directory", "", "Where to output the files. If empty or '-' uses stdout, otherwise creates a directory hierarchy in that directory") + cmd.Flags().StringSlice("namespaces", []string{}, "A comma separated list of namespaces to dump.") + cmd.Flags().Bool("all-namespaces", false, "If true, dump all namespaces. If true, --namespaces is ignored.") + return cmd +} + +const ( + dumpLong = ` +Dumps cluster info out suitable for debugging and diagnosing cluster problems. By default, dumps everything to +stdout. You can optionally specify a directory with --output-directory. If you specify a directory, kubernetes will +build a set of files in that directory. By default only dumps things in the 'kube-system' namespace, but you can +switch to a different namespace with the --namespaces flag, or specify --all-namespaces to dump all namespaces. + +The command also dumps the logs of all of the pods in the cluster, these logs are dumped into different directories +based on namespace and pod name. +` + + dumpExample = `# Dump current cluster state to stdout +kubectl cluster-info dump + +# Dump current cluster state to /path/to/cluster-state +kubectl cluster-info dump --output-directory=/path/to/cluster-state + +# Dump all namespaces to stdout +kubectl cluster-info dump --all-namespaces + +# Dump a set of namespaces to /path/to/cluster-state +kubectl cluster-info dump --namespaces default,kube-system --output-directory=/path/to/cluster-state` +) + +func setupOutputWriter(cmd *cobra.Command, defaultWriter io.Writer, filename string) io.Writer { + dir := cmdutil.GetFlagString(cmd, "output-directory") + if len(dir) == 0 || dir == "-" { + return defaultWriter + } + fullFile := path.Join(dir, filename) + parent := path.Dir(fullFile) + cmdutil.CheckErr(os.MkdirAll(parent, 0755)) + + file, err := os.Create(path.Join(dir, filename)) + cmdutil.CheckErr(err) + return file +} + +func dumpClusterInfo(f *cmdutil.Factory, cmd *cobra.Command, args []string, out io.Writer) error { + var c *unversioned.Client + var err error + if c, err = f.Client(); err != nil { + return err + } + printer, _, err := kubectl.GetPrinter("json", "") + if err != nil { + return err + } + + nodes, err := c.Nodes().List(api.ListOptions{}) + if err != nil { + return err + } + + if err := printer.PrintObj(nodes, setupOutputWriter(cmd, out, "nodes.json")); err != nil { + return err + } + + var namespaces []string + if cmdutil.GetFlagBool(cmd, "all-namespaces") { + namespaceList, err := c.Namespaces().List(api.ListOptions{}) + if err != nil { + return err + } + for ix := range namespaceList.Items { + namespaces = append(namespaces, namespaceList.Items[ix].Name) + } + } else { + namespaces = cmdutil.GetFlagStringSlice(cmd, "namespaces") + if len(namespaces) == 0 { + cmdNamespace, _, err := f.DefaultNamespace() + if err != nil { + return err + } + namespaces = []string{ + api.NamespaceSystem, + cmdNamespace, + } + } + } + for _, namespace := range namespaces { + // TODO: this is repetitive in the extreme. Use reflection or + // something to make this a for loop. + events, err := c.Events(namespace).List(api.ListOptions{}) + if err != nil { + return err + } + if err := printer.PrintObj(events, setupOutputWriter(cmd, out, path.Join(namespace, "events.json"))); err != nil { + return err + } + + rcs, err := c.ReplicationControllers(namespace).List(api.ListOptions{}) + if err != nil { + return err + } + if err := printer.PrintObj(rcs, setupOutputWriter(cmd, out, path.Join(namespace, "replication-controllers.json"))); err != nil { + return err + } + + svcs, err := c.Services(namespace).List(api.ListOptions{}) + if err != nil { + return err + } + if err := printer.PrintObj(svcs, setupOutputWriter(cmd, out, path.Join(namespace, "services.json"))); err != nil { + return err + } + + sets, err := c.DaemonSets(namespace).List(api.ListOptions{}) + if err != nil { + return err + } + if err := printer.PrintObj(sets, setupOutputWriter(cmd, out, path.Join(namespace, "daemonsets.json"))); err != nil { + return err + } + + deps, err := c.Deployments(namespace).List(api.ListOptions{}) + if err != nil { + return err + } + if err := printer.PrintObj(deps, setupOutputWriter(cmd, out, path.Join(namespace, "deployments.json"))); err != nil { + return err + } + + rps, err := c.ReplicaSets(namespace).List(api.ListOptions{}) + if err != nil { + return err + } + if err := printer.PrintObj(rps, setupOutputWriter(cmd, out, path.Join(namespace, "replicasets.json"))); err != nil { + return err + } + + pods, err := c.Pods(namespace).List(api.ListOptions{}) + if err != nil { + return err + } + + if err := printer.PrintObj(pods, setupOutputWriter(cmd, out, path.Join(namespace, "pods.json"))); err != nil { + return err + } + + for ix := range pods.Items { + pod := &pods.Items[ix] + writer := setupOutputWriter(cmd, out, path.Join(namespace, pod.Name, "logs.txt")) + writer.Write([]byte(fmt.Sprintf("==== START logs for %s/%s ====\n", pod.Namespace, pod.Name))) + request, err := f.LogsForObject(pod, &api.PodLogOptions{}) + if err != nil { + return err + } + + data, err := request.DoRaw() + if err != nil { + return err + } + writer.Write(data) + writer.Write([]byte(fmt.Sprintf("==== END logs for %s/%s ====\n", pod.Namespace, pod.Name))) + } + } + dir := cmdutil.GetFlagString(cmd, "output-directory") + if len(dir) == 0 { + dir = "." + } + if dir != "-" { + fmt.Fprintf(out, "Cluster info dumped to %s", dir) + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo_dump_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo_dump_test.go new file mode 100644 index 000000000000..35314525895c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/clusterinfo_dump_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "io/ioutil" + "os" + "path" + "testing" +) + +func TestSetupOutputWriterNoOp(t *testing.T) { + tests := []string{"", "-"} + for _, test := range tests { + out := &bytes.Buffer{} + f, _, _ := NewAPIFactory() + cmd := NewCmdClusterInfoDump(f, os.Stdout) + cmd.Flag("output-directory").Value.Set(test) + writer := setupOutputWriter(cmd, out, "/some/file/that/should/be/ignored") + if writer != out { + t.Errorf("expected: %v, saw: %v", out, writer) + } + } +} + +func TestSetupOutputWriterFile(t *testing.T) { + file := "output.json" + dir, err := ioutil.TempDir(os.TempDir(), "out") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + fullPath := path.Join(dir, file) + defer os.RemoveAll(dir) + + out := &bytes.Buffer{} + f, _, _ := NewAPIFactory() + cmd := NewCmdClusterInfoDump(f, os.Stdout) + cmd.Flag("output-directory").Value.Set(dir) + writer := setupOutputWriter(cmd, out, file) + if writer == out { + t.Errorf("expected: %v, saw: %v", out, writer) + } + output := "some data here" + writer.Write([]byte(output)) + + data, err := ioutil.ReadFile(fullPath) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if string(data) != output { + t.Errorf("expected: %v, saw: %v", output, data) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/cmd.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/cmd.go index 790b211512b0..75b935b881d0 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/cmd.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/cmd.go @@ -20,22 +20,59 @@ import ( "io" "github.com/golang/glog" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" cmdconfig "k8s.io/kubernetes/pkg/kubectl/cmd/config" "k8s.io/kubernetes/pkg/kubectl/cmd/rollout" + "k8s.io/kubernetes/pkg/kubectl/cmd/set" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/flag" "github.com/spf13/cobra" ) const ( bash_completion_func = `# call kubectl get $1, +__kubectl_namespace_flag() +{ + local ret two_word_ns + ret="" + two_word_ns=false + for w in "${words[@]}"; do + if [ "$two_word_ns" = true ]; then + ret="--namespace=${w}" + two_word_ns=false + continue + fi + case "${w}" in + --namespace=*) + ret=${w} + ;; + --namespace) + two_word_ns=true + ;; + --all-namespaces) + ret=${w} + ;; + esac + done + echo $ret +} + +__kubectl_get_namespaces() +{ + local template kubectl_out + template="{{ range .items }}{{ .metadata.name }} {{ end }}" + if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then + COMPREPLY=( $( compgen -W "${kubectl_out[*]}" -- "$cur" ) ) + fi +} + __kubectl_parse_get() { local template template="{{ range .items }}{{ .metadata.name }} {{ end }}" local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" "$1" 2>/dev/null); then + if kubectl_out=$(kubectl get $(__kubectl_namespace_flag) -o template --template="${template}" "$1" 2>/dev/null); then COMPREPLY=( $( compgen -W "${kubectl_out[*]}" -- "$cur" ) ) fi } @@ -71,7 +108,7 @@ __kubectl_get_containers() fi local last=${nouns[${len} -1]} local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" pods "${last}" 2>/dev/null); then + if kubectl_out=$(kubectl get $(__kubectl_namespace_flag) -o template --template="${template}" pods "${last}" 2>/dev/null); then COMPREPLY=( $( compgen -W "${kubectl_out[*]}" -- "$cur" ) ) fi } @@ -89,7 +126,8 @@ __kubectl_require_pod_and_container() __custom_func() { case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_label | kubectl_stop) + kubectl_get | kubectl_describe | kubectl_delete | kubectl_label | kubectl_stop | kubectl_edit | kubectl_patch |\ + kubectl_annotate | kubectl_expose) __kubectl_get_resource return ;; @@ -134,7 +172,7 @@ __custom_func() { * replicasets (aka 'rs') * replicationcontrollers (aka 'rc') * secrets - * serviceaccounts + * serviceaccounts (aka 'sa') * services (aka 'svc') ` ) @@ -156,15 +194,16 @@ Find more information at https://github.com/kubernetes/kubernetes.`, f.BindExternalFlags(cmds.PersistentFlags()) // From this point and forward we get warnings on flags that contain "_" separators - cmds.SetGlobalNormalizationFunc(util.WarnWordSepNormalizeFunc) + cmds.SetGlobalNormalizationFunc(flag.WarnWordSepNormalizeFunc) cmds.AddCommand(NewCmdGet(f, out)) + cmds.AddCommand(set.NewCmdSet(f, out)) cmds.AddCommand(NewCmdDescribe(f, out)) cmds.AddCommand(NewCmdCreate(f, out)) cmds.AddCommand(NewCmdReplace(f, out)) cmds.AddCommand(NewCmdPatch(f, out)) cmds.AddCommand(NewCmdDelete(f, out)) - cmds.AddCommand(NewCmdEdit(f, out)) + cmds.AddCommand(NewCmdEdit(f, out, err)) cmds.AddCommand(NewCmdApply(f, out)) cmds.AddCommand(NewCmdNamespace(out)) @@ -177,7 +216,7 @@ Find more information at https://github.com/kubernetes/kubernetes.`, cmds.AddCommand(NewCmdAttach(f, in, out, err)) cmds.AddCommand(NewCmdExec(f, in, out, err)) - cmds.AddCommand(NewCmdPortForward(f)) + cmds.AddCommand(NewCmdPortForward(f, out, err)) cmds.AddCommand(NewCmdProxy(f, out)) cmds.AddCommand(NewCmdRun(f, in, out, err)) @@ -188,13 +227,25 @@ Find more information at https://github.com/kubernetes/kubernetes.`, cmds.AddCommand(NewCmdLabel(f, out)) cmds.AddCommand(NewCmdAnnotate(f, out)) + cmds.AddCommand(NewCmdTaint(f, out)) - cmds.AddCommand(cmdconfig.NewCmdConfig(cmdconfig.NewDefaultPathOptions(), out)) + cmds.AddCommand(cmdconfig.NewCmdConfig(clientcmd.NewDefaultPathOptions(), out)) cmds.AddCommand(NewCmdClusterInfo(f, out)) cmds.AddCommand(NewCmdApiVersions(f, out)) cmds.AddCommand(NewCmdVersion(f, out)) cmds.AddCommand(NewCmdExplain(f, out)) cmds.AddCommand(NewCmdConvert(f, out)) + cmds.AddCommand(NewCmdCompletion(f, out)) + + if cmds.Flag("namespace") != nil { + if cmds.Flag("namespace").Annotations == nil { + cmds.Flag("namespace").Annotations = map[string][]string{} + } + cmds.Flag("namespace").Annotations[cobra.BashCompCustom] = append( + cmds.Flag("namespace").Annotations[cobra.BashCompCustom], + "__kubectl_get_namespaces", + ) + } return cmds } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/cmd_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/cmd_test.go new file mode 100644 index 000000000000..729827772a1e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/cmd_test.go @@ -0,0 +1,818 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "reflect" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/client/restclient" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + "k8s.io/kubernetes/pkg/kubectl" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/resource" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer" + "k8s.io/kubernetes/pkg/util" +) + +func initTestErrorHandler(t *testing.T) { + cmdutil.BehaviorOnFatal(func(str string) { + t.Errorf("Error running command: %s", str) + }) +} + +func defaultHeader() http.Header { + header := http.Header{} + header.Set("Content-Type", runtime.ContentTypeJSON) + return header +} + +func defaultClientConfig() *restclient.Config { + return &restclient.Config{ + ContentConfig: restclient.ContentConfig{ + ContentType: runtime.ContentTypeJSON, + GroupVersion: testapi.Default.GroupVersion(), + }, + } +} + +type internalType struct { + Kind string + APIVersion string + + Name string +} + +type externalType struct { + Kind string `json:"kind"` + APIVersion string `json:"apiVersion"` + + Name string `json:"name"` +} + +type ExternalType2 struct { + Kind string `json:"kind"` + APIVersion string `json:"apiVersion"` + + Name string `json:"name"` +} + +func (obj *internalType) GetObjectKind() unversioned.ObjectKind { return obj } +func (obj *internalType) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *internalType) GroupVersionKind() unversioned.GroupVersionKind { + return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} +func (obj *externalType) GetObjectKind() unversioned.ObjectKind { return obj } +func (obj *externalType) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *externalType) GroupVersionKind() unversioned.GroupVersionKind { + return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} +func (obj *ExternalType2) GetObjectKind() unversioned.ObjectKind { return obj } +func (obj *ExternalType2) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *ExternalType2) GroupVersionKind() unversioned.GroupVersionKind { + return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +var versionErr = errors.New("not a version") + +func versionErrIfFalse(b bool) error { + if b { + return nil + } + return versionErr +} + +var validVersion = testapi.Default.GroupVersion().Version +var internalGV = unversioned.GroupVersion{Group: "apitest", Version: runtime.APIVersionInternal} +var unlikelyGV = unversioned.GroupVersion{Group: "apitest", Version: "unlikelyversion"} +var validVersionGV = unversioned.GroupVersion{Group: "apitest", Version: validVersion} + +func newExternalScheme() (*runtime.Scheme, meta.RESTMapper, runtime.Codec) { + scheme := runtime.NewScheme() + scheme.AddKnownTypeWithName(internalGV.WithKind("Type"), &internalType{}) + scheme.AddKnownTypeWithName(unlikelyGV.WithKind("Type"), &externalType{}) + //This tests that kubectl will not confuse the external scheme with the internal scheme, even when they accidentally have versions of the same name. + scheme.AddKnownTypeWithName(validVersionGV.WithKind("Type"), &ExternalType2{}) + + codecs := serializer.NewCodecFactory(scheme) + codec := codecs.LegacyCodec(unlikelyGV) + mapper := meta.NewDefaultRESTMapper([]unversioned.GroupVersion{unlikelyGV, validVersionGV}, func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + return &meta.VersionInterfaces{ + ObjectConvertor: scheme, + MetadataAccessor: meta.NewAccessor(), + }, versionErrIfFalse(version == validVersionGV || version == unlikelyGV) + }) + for _, gv := range []unversioned.GroupVersion{unlikelyGV, validVersionGV} { + for kind := range scheme.KnownTypes(gv) { + gvk := gv.WithKind(kind) + + scope := meta.RESTScopeNamespace + mapper.Add(gvk, scope) + } + } + + return scheme, mapper, codec +} + +type testPrinter struct { + Objects []runtime.Object + Err error +} + +func (t *testPrinter) PrintObj(obj runtime.Object, out io.Writer) error { + t.Objects = append(t.Objects, obj) + fmt.Fprintf(out, "%#v", obj) + return t.Err +} + +// TODO: implement HandledResources() +func (t *testPrinter) HandledResources() []string { + return []string{} +} + +type testDescriber struct { + Name, Namespace string + Settings kubectl.DescriberSettings + Output string + Err error +} + +func (t *testDescriber) Describe(namespace, name string, describerSettings kubectl.DescriberSettings) (output string, err error) { + t.Namespace, t.Name = namespace, name + t.Settings = describerSettings + return t.Output, t.Err +} + +type testFactory struct { + Mapper meta.RESTMapper + Typer runtime.ObjectTyper + Client kubectl.RESTClient + Describer kubectl.Describer + Printer kubectl.ResourcePrinter + Validator validation.Schema + Namespace string + ClientConfig *restclient.Config + Err error +} + +func NewTestFactory() (*cmdutil.Factory, *testFactory, runtime.Codec) { + scheme, mapper, codec := newExternalScheme() + t := &testFactory{ + Validator: validation.NullSchema{}, + Mapper: mapper, + Typer: scheme, + } + return &cmdutil.Factory{ + Object: func(discovery bool) (meta.RESTMapper, runtime.ObjectTyper) { + priorityRESTMapper := meta.PriorityRESTMapper{ + Delegate: t.Mapper, + ResourcePriority: []unversioned.GroupVersionResource{ + {Group: meta.AnyGroup, Version: "v1", Resource: meta.AnyResource}, + }, + KindPriority: []unversioned.GroupVersionKind{ + {Group: meta.AnyGroup, Version: "v1", Kind: meta.AnyKind}, + }, + } + return priorityRESTMapper, t.Typer + }, + ClientForMapping: func(*meta.RESTMapping) (resource.RESTClient, error) { + return t.Client, t.Err + }, + Decoder: func(bool) runtime.Decoder { + return codec + }, + JSONEncoder: func() runtime.Encoder { + return codec + }, + Describer: func(*meta.RESTMapping) (kubectl.Describer, error) { + return t.Describer, t.Err + }, + Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) { + return t.Printer, t.Err + }, + Validator: func(validate bool, cacheDir string) (validation.Schema, error) { + return t.Validator, t.Err + }, + DefaultNamespace: func() (string, bool, error) { + return t.Namespace, false, t.Err + }, + ClientConfig: func() (*restclient.Config, error) { + return t.ClientConfig, t.Err + }, + }, t, codec +} + +func NewMixedFactory(apiClient resource.RESTClient) (*cmdutil.Factory, *testFactory, runtime.Codec) { + f, t, c := NewTestFactory() + var multiRESTMapper meta.MultiRESTMapper + multiRESTMapper = append(multiRESTMapper, t.Mapper) + multiRESTMapper = append(multiRESTMapper, testapi.Default.RESTMapper()) + f.Object = func(discovery bool) (meta.RESTMapper, runtime.ObjectTyper) { + priorityRESTMapper := meta.PriorityRESTMapper{ + Delegate: multiRESTMapper, + ResourcePriority: []unversioned.GroupVersionResource{ + {Group: meta.AnyGroup, Version: "v1", Resource: meta.AnyResource}, + }, + KindPriority: []unversioned.GroupVersionKind{ + {Group: meta.AnyGroup, Version: "v1", Kind: meta.AnyKind}, + }, + } + return priorityRESTMapper, runtime.MultiObjectTyper{t.Typer, api.Scheme} + } + f.ClientForMapping = func(m *meta.RESTMapping) (resource.RESTClient, error) { + if m.ObjectConvertor == api.Scheme { + return apiClient, t.Err + } + return t.Client, t.Err + } + return f, t, c +} + +func NewAPIFactory() (*cmdutil.Factory, *testFactory, runtime.Codec) { + t := &testFactory{ + Validator: validation.NullSchema{}, + } + + f := &cmdutil.Factory{ + Object: func(discovery bool) (meta.RESTMapper, runtime.ObjectTyper) { + return testapi.Default.RESTMapper(), api.Scheme + }, + Client: func() (*client.Client, error) { + // Swap out the HTTP client out of the client with the fake's version. + fakeClient := t.Client.(*fake.RESTClient) + c := client.NewOrDie(t.ClientConfig) + c.Client = fakeClient.Client + c.ExtensionsClient.Client = fakeClient.Client + return c, t.Err + }, + ClientForMapping: func(*meta.RESTMapping) (resource.RESTClient, error) { + return t.Client, t.Err + }, + Decoder: func(bool) runtime.Decoder { + return testapi.Default.Codec() + }, + JSONEncoder: func() runtime.Encoder { + return testapi.Default.Codec() + }, + Describer: func(*meta.RESTMapping) (kubectl.Describer, error) { + return t.Describer, t.Err + }, + Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) { + return t.Printer, t.Err + }, + Validator: func(validate bool, cacheDir string) (validation.Schema, error) { + return t.Validator, t.Err + }, + DefaultNamespace: func() (string, bool, error) { + return t.Namespace, false, t.Err + }, + ClientConfig: func() (*restclient.Config, error) { + return t.ClientConfig, t.Err + }, + Generators: func(cmdName string) map[string]kubectl.Generator { + return cmdutil.DefaultGenerators(cmdName) + }, + LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) { + fakeClient := t.Client.(*fake.RESTClient) + c := client.NewOrDie(t.ClientConfig) + c.Client = fakeClient.Client + + switch t := object.(type) { + case *api.Pod: + opts, ok := options.(*api.PodLogOptions) + if !ok { + return nil, errors.New("provided options object is not a PodLogOptions") + } + return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil + default: + fqKinds, _, err := api.Scheme.ObjectKinds(object) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("cannot get the logs from %v", fqKinds[0]) + } + }, + } + rf := cmdutil.NewFactory(nil) + f.MapBasedSelectorForObject = rf.MapBasedSelectorForObject + f.PortsForObject = rf.PortsForObject + f.ProtocolsForObject = rf.ProtocolsForObject + f.LabelsForObject = rf.LabelsForObject + f.CanBeExposed = rf.CanBeExposed + f.PrintObjectSpecificMessage = rf.PrintObjectSpecificMessage + return f, t, testapi.Default.Codec() +} + +func objBody(codec runtime.Codec, obj runtime.Object) io.ReadCloser { + return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj)))) +} + +func stringBody(body string) io.ReadCloser { + return ioutil.NopCloser(bytes.NewReader([]byte(body))) +} + +// TODO(jlowdermilk): refactor the Factory so we can test client versions properly, +// with different client/server version skew scenarios. +// Verify that resource.RESTClients constructed from a factory respect mapping.APIVersion +//func TestClientVersions(t *testing.T) { +// f := cmdutil.NewFactory(nil) +// +// version := testapi.Default.Version() +// mapping := &meta.RESTMapping{ +// APIVersion: version, +// } +// c, err := f.ClientForMapping(mapping) +// if err != nil { +// t.Errorf("unexpected error: %v", err) +// } +// client := c.(*client.RESTClient) +// if client.APIVersion() != version { +// t.Errorf("unexpected Client APIVersion: %s %v", client.APIVersion, client) +// } +//} + +func Example_printReplicationControllerWithNamespace() { + f, tf, codec := NewAPIFactory() + tf.Printer = kubectl.NewHumanReadablePrinter(false, true, false, false, false, false, []string{}) + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: nil, + } + cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr) + ctrl := &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "beep", + Labels: map[string]string{"foo": "bar"}, + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"foo": "bar"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + }, + }, + }, + }, + }, + Status: api.ReplicationControllerStatus{ + Replicas: 1, + }, + } + mapper, _ := f.Object(false) + err := f.PrintObject(cmd, mapper, ctrl, os.Stdout) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + // Output: + // NAMESPACE NAME DESIRED CURRENT AGE + // beep foo 1 1 10y +} + +func Example_printMultiContainersReplicationControllerWithWide() { + f, tf, codec := NewAPIFactory() + tf.Printer = kubectl.NewHumanReadablePrinter(false, false, true, false, false, false, []string{}) + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: nil, + } + cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr) + ctrl := &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "bar"}, + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"foo": "bar"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + }, + { + Name: "foo2", + Image: "someimage2", + }, + }, + }, + }, + }, + Status: api.ReplicationControllerStatus{ + Replicas: 1, + }, + } + mapper, _ := f.Object(false) + err := f.PrintObject(cmd, mapper, ctrl, os.Stdout) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + // Output: + // NAME DESIRED CURRENT AGE CONTAINER(S) IMAGE(S) SELECTOR + // foo 1 1 10y foo,foo2 someimage,someimage2 foo=bar +} + +func Example_printReplicationController() { + f, tf, codec := NewAPIFactory() + tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, false, false, false, []string{}) + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: nil, + } + cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr) + ctrl := &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "bar"}, + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"foo": "bar"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + }, + { + Name: "foo2", + Image: "someimage", + }, + }, + }, + }, + }, + Status: api.ReplicationControllerStatus{ + Replicas: 1, + }, + } + mapper, _ := f.Object(false) + err := f.PrintObject(cmd, mapper, ctrl, os.Stdout) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + // Output: + // NAME DESIRED CURRENT AGE + // foo 1 1 10y +} + +func Example_printPodWithWideFormat() { + f, tf, codec := NewAPIFactory() + tf.Printer = kubectl.NewHumanReadablePrinter(false, false, true, false, false, false, []string{}) + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: nil, + } + nodeName := "kubernetes-minion-abcd" + cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr) + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "test1", + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + }, + Spec: api.PodSpec{ + Containers: make([]api.Container, 2), + NodeName: nodeName, + }, + Status: api.PodStatus{ + Phase: "podPhase", + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + PodIP: "10.1.1.3", + }, + } + mapper, _ := f.Object(false) + err := f.PrintObject(cmd, mapper, pod, os.Stdout) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + // Output: + // NAME READY STATUS RESTARTS AGE IP NODE + // test1 1/2 podPhase 6 10y 10.1.1.3 kubernetes-minion-abcd +} + +func Example_printPodWithShowLabels() { + f, tf, codec := NewAPIFactory() + tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, false, true, false, []string{}) + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: nil, + } + nodeName := "kubernetes-minion-abcd" + cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr) + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "test1", + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + Labels: map[string]string{ + "l1": "key", + "l2": "value", + }, + }, + Spec: api.PodSpec{ + Containers: make([]api.Container, 2), + NodeName: nodeName, + }, + Status: api.PodStatus{ + Phase: "podPhase", + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + } + mapper, _ := f.Object(false) + err := f.PrintObject(cmd, mapper, pod, os.Stdout) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + // Output: + // NAME READY STATUS RESTARTS AGE LABELS + // test1 1/2 podPhase 6 10y l1=key,l2=value +} + +func newAllPhasePodList() *api.PodList { + nodeName := "kubernetes-minion-abcd" + return &api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + Name: "test1", + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + }, + Spec: api.PodSpec{ + Containers: make([]api.Container, 2), + NodeName: nodeName, + }, + Status: api.PodStatus{ + Phase: api.PodPending, + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "test2", + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + }, + Spec: api.PodSpec{ + Containers: make([]api.Container, 2), + NodeName: nodeName, + }, + Status: api.PodStatus{ + Phase: api.PodRunning, + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "test3", + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + }, + Spec: api.PodSpec{ + Containers: make([]api.Container, 2), + NodeName: nodeName, + }, + Status: api.PodStatus{ + Phase: api.PodSucceeded, + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "test4", + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + }, + Spec: api.PodSpec{ + Containers: make([]api.Container, 2), + NodeName: nodeName, + }, + Status: api.PodStatus{ + Phase: api.PodFailed, + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "test5", + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + }, + Spec: api.PodSpec{ + Containers: make([]api.Container, 2), + NodeName: nodeName, + }, + Status: api.PodStatus{ + Phase: api.PodUnknown, + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }}, + } +} + +func Example_printPodHideTerminated() { + f, tf, codec := NewAPIFactory() + tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, false, false, false, []string{}) + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: nil, + } + cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr) + podList := newAllPhasePodList() + mapper, _ := f.Object(false) + err := f.PrintObject(cmd, mapper, podList, os.Stdout) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + // Output: + // NAME READY STATUS RESTARTS AGE + // test1 1/2 Pending 6 10y + // test2 1/2 Running 6 10y + // test5 1/2 Unknown 6 10y +} + +func Example_printPodShowAll() { + f, tf, codec := NewAPIFactory() + tf.Printer = kubectl.NewHumanReadablePrinter(false, false, false, true, false, false, []string{}) + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: nil, + } + cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr) + podList := newAllPhasePodList() + mapper, _ := f.Object(false) + err := f.PrintObject(cmd, mapper, podList, os.Stdout) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + // Output: + // NAME READY STATUS RESTARTS AGE + // test1 1/2 Pending 6 10y + // test2 1/2 Running 6 10y + // test3 1/2 Succeeded 6 10y + // test4 1/2 Failed 6 10y + // test5 1/2 Unknown 6 10y +} + +func Example_printServiceWithNamespacesAndLabels() { + f, tf, codec := NewAPIFactory() + tf.Printer = kubectl.NewHumanReadablePrinter(false, true, false, false, false, false, []string{"l1"}) + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: nil, + } + cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr) + svc := &api.ServiceList{ + Items: []api.Service{ + { + ObjectMeta: api.ObjectMeta{ + Name: "svc1", + Namespace: "ns1", + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + Labels: map[string]string{ + "l1": "value", + }, + }, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Protocol: "UDP", Port: 53}, + {Protocol: "TCP", Port: 53}, + }, + Selector: map[string]string{ + "s": "magic", + }, + ClusterIP: "10.1.1.1", + }, + Status: api.ServiceStatus{}, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "svc2", + Namespace: "ns2", + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + Labels: map[string]string{ + "l1": "dolla-bill-yall", + }, + }, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Protocol: "TCP", Port: 80}, + {Protocol: "TCP", Port: 8080}, + }, + Selector: map[string]string{ + "s": "kazam", + }, + ClusterIP: "10.1.1.2", + }, + Status: api.ServiceStatus{}, + }}, + } + ld := util.NewLineDelimiter(os.Stdout, "|") + defer ld.Flush() + + mapper, _ := f.Object(false) + err := f.PrintObject(cmd, mapper, svc, ld) + if err != nil { + fmt.Printf("Unexpected error: %v", err) + } + // Output: + // |NAMESPACE NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE L1| + // |ns1 svc1 10.1.1.1 53/UDP,53/TCP 10y value| + // |ns2 svc2 10.1.1.2 80/TCP,8080/TCP 10y dolla-bill-yall| + // || +} + +func TestNormalizationFuncGlobalExistence(t *testing.T) { + // This test can be safely deleted when we will not support multiple flag formats + root := NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr) + + if root.Parent() != nil { + t.Fatal("We expect the root command to be returned") + } + if root.GlobalNormalizationFunc() == nil { + t.Fatal("We expect that root command has a global normalization function") + } + + if reflect.ValueOf(root.GlobalNormalizationFunc()).Pointer() != reflect.ValueOf(root.Flags().GetNormalizeFunc()).Pointer() { + t.Fatal("root command seems to have a wrong normalization function") + } + + sub := root + for sub.HasSubCommands() { + sub = sub.Commands()[0] + } + + // In case of failure of this test check this PR: spf13/cobra#110 + if reflect.ValueOf(sub.Flags().GetNormalizeFunc()).Pointer() != reflect.ValueOf(root.Flags().GetNormalizeFunc()).Pointer() { + t.Fatal("child and root commands should have the same normalization functions") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/completion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/completion.go new file mode 100644 index 000000000000..678a0791779c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/completion.go @@ -0,0 +1,272 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "io" + + "github.com/spf13/cobra" + + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" +) + +const ( + completion_long = `Output shell completion code for the given shell (bash or zsh). + +This command prints shell code which must be evaluation to provide interactive +completion of kubectl commands. +` + completion_example = ` +$ source <(kubectl completion bash) + +will load the kubectl completion code for bash. Note that this depends on the bash-completion +framework. It must be sourced before sourcing the kubectl completion, i.e. on the Mac: + +$ brew install bash-completion +$ source $(brew --prefix)/etc/bash_completion +$ source <(kubectl completion bash) + +If you use zsh, the following will load kubectl zsh completion: + +$ source <(kubectl completion zsh) +` +) + +var ( + completion_shells = map[string]func(out io.Writer, cmd *cobra.Command) error{ + "bash": runCompletionBash, + "zsh": runCompletionZsh, + } +) + +func NewCmdCompletion(f *cmdutil.Factory, out io.Writer) *cobra.Command { + shells := []string{} + for s := range completion_shells { + shells = append(shells, s) + } + + cmd := &cobra.Command{ + Use: "completion SHELL", + Short: "Output shell completion code for the given shell (bash or zsh)", + Long: completion_long, + Example: completion_example, + Run: func(cmd *cobra.Command, args []string) { + err := RunCompletion(f, out, cmd, args) + cmdutil.CheckErr(err) + }, + ValidArgs: shells, + } + + return cmd +} + +func RunCompletion(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return cmdutil.UsageError(cmd, "Shell not specified.") + } + if len(args) > 1 { + return cmdutil.UsageError(cmd, "Too many arguments. Expected only the shell type.") + } + run, found := completion_shells[args[0]] + if !found { + return cmdutil.UsageError(cmd, "Unsupported shell type %q.", args[0]) + } + + return run(out, cmd.Parent()) +} + +func runCompletionBash(out io.Writer, kubectl *cobra.Command) error { + return kubectl.GenBashCompletion(out) +} + +func runCompletionZsh(out io.Writer, kubectl *cobra.Command) error { + zsh_initialilzation := `# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__kubectl_bash_source() { + alias shopt=':' + alias _expand=_bash_expand + alias _complete=_bash_comp + emulate -L sh + setopt kshglob noshglob braceexpand + + source "$@" +} + +__kubectl_type() { + # -t is not supported by zsh + if [ "$1" == "-t" ]; then + shift + + # fake Bash 4 to disable "complete -o nospace". Instead + # "compopt +-o nospace" is used in the code to toggle trailing + # spaces. We don't support that, but leave trailing spaces on + # all the time + if [ "$1" = "__kubectl_compopt" ]; then + echo builtin + return 0 + fi + fi + type "$@" +} + +__kubectl_compgen() { + local completions w + completions=( $(compgen "$@") ) || return $? + + # filter by given word as prefix + while [[ "$1" = -* && "$1" != -- ]]; do + shift + shift + done + if [[ "$1" == -- ]]; then + shift + fi + for w in "${completions[@]}"; do + if [[ "${w}" = "$1"* ]]; then + echo "${w}" + fi + done +} + +__kubectl_compopt() { + true # don't do anything. Not supported by bashcompinit in zsh +} + +__kubectl_declare() { + if [ "$1" == "-F" ]; then + whence -w "$@" + else + builtin declare "$@" + fi +} + +__kubectl_ltrim_colon_completions() +{ + if [[ "$1" == *:* && "$COMP_WORDBREAKS" == *:* ]]; then + # Remove colon-word prefix from COMPREPLY items + local colon_word=${1%${1##*:}} + local i=${#COMPREPLY[*]} + while [[ $((--i)) -ge 0 ]]; do + COMPREPLY[$i]=${COMPREPLY[$i]#"$colon_word"} + done + fi +} + +__kubectl_get_comp_words_by_ref() { + cur="${COMP_WORDS[COMP_CWORD]}" + prev="${COMP_WORDS[${COMP_CWORD}-1]}" + words=("${COMP_WORDS[@]}") + cword=("${COMP_CWORD[@]}") +} + +__kubectl_filedir() { + local RET OLD_IFS w qw + + __debug "_filedir $@ cur=$cur" + if [[ "$1" = \~* ]]; then + # somehow does not work. Maybe, zsh does not call this at all + eval echo "$1" + return 0 + fi + + OLD_IFS="$IFS" + IFS=$'\n' + if [ "$1" = "-d" ]; then + shift + RET=( $(compgen -d) ) + else + RET=( $(compgen -f) ) + fi + IFS="$OLD_IFS" + + IFS="," __debug "RET=${RET[@]} len=${#RET[@]}" + + for w in ${RET[@]}; do + if [[ ! "${w}" = "${cur}"* ]]; then + continue + fi + if eval "[[ \"\${w}\" = *.$1 || -d \"\${w}\" ]]"; then + qw="$(__kubectl_quote "${w}")" + if [ -d "${w}" ]; then + COMPREPLY+=("${qw}/") + else + COMPREPLY+=("${qw}") + fi + fi + done +} + +__kubectl_quote() { + if [[ $1 == \'* || $1 == \"* ]]; then + # Leave out first character + printf %q "${1:1}" + else + printf %q "$1" + fi +} + +autoload -U +X compinit && compinit +autoload -U +X bashcompinit && bashcompinit + +# use word boundary patterns for BSD or GNU sed +LWORD='[[:<:]]' +RWORD='[[:>:]]' +if sed --help 2>&1 | grep -q GNU; then + LWORD='\<' + RWORD='\>' +fi + +__kubectl_bash_source <(sed \ + -e 's/declare -F/whence -w/' \ + -e 's/local \([a-zA-Z0-9_]*\)=/local \1; \1=/' \ + -e 's/flags+=("\(--.*\)=")/flags+=("\1"); two_word_flags+=("\1")/' \ + -e 's/must_have_one_flag+=("\(--.*\)=")/must_have_one_flag+=("\1")/' \ + -e "s/${LWORD}_filedir${RWORD}/__kubectl_filedir/g" \ + -e "s/${LWORD}_get_comp_words_by_ref${RWORD}/__kubectl_get_comp_words_by_ref/g" \ + -e "s/${LWORD}__ltrim_colon_completions${RWORD}/__kubectl_ltrim_colon_completions/g" \ + -e "s/${LWORD}compgen${RWORD}/__kubectl_compgen/g" \ + -e "s/${LWORD}compopt${RWORD}/__kubectl_compopt/g" \ + -e "s/${LWORD}declare${RWORD}/__kubectl_declare/g" \ + -e "s/\\\$(type${RWORD}/\$(__kubectl_type/g" \ + <<'BASH_COMPLETION_EOF' +` + out.Write([]byte(zsh_initialilzation)) + + buf := new(bytes.Buffer) + kubectl.GenBashCompletion(buf) + out.Write(buf.Bytes()) + + zsh_tail := ` +BASH_COMPLETION_EOF +) +` + out.Write([]byte(zsh_tail)) + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/config.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/config.go index 47eeecf5c072..a28563468381 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/config.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/config.go @@ -17,50 +17,16 @@ limitations under the License. package config import ( - "errors" "io" - "os" "path" - "path/filepath" - "reflect" "strconv" - "github.com/golang/glog" "github.com/spf13/cobra" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" ) -type PathOptions struct { - // GlobalFile is the full path to the file to load as the global (final) option - GlobalFile string - // EnvVar is the env var name that points to the list of kubeconfig files to load - EnvVar string - // ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file - ExplicitFileFlag string - - // GlobalFileSubpath is an optional value used for displaying help - GlobalFileSubpath string - - LoadingRules *clientcmd.ClientConfigLoadingRules -} - -// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files -type ConfigAccess interface { - // GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config - GetLoadingPrecedence() []string - // GetStartingConfig returns the config that subcommands should being operating against. It may or may not be merged depending on loading rules - GetStartingConfig() (*clientcmdapi.Config, error) - // GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one. - GetDefaultFilename() string - // IsExplicitFile indicates whether or not this command is interested in exactly one file. This implementation only ever does that via a flag, but implementations that handle local, global, and flags may have more - IsExplicitFile() bool - // GetExplicitFile returns the particular file this command is operating against. This implementation only ever has one, but implementations that handle local, global, and flags may have more - GetExplicitFile() string -} - -func NewCmdConfig(pathOptions *PathOptions, out io.Writer) *cobra.Command { +func NewCmdConfig(pathOptions *clientcmd.PathOptions, out io.Writer) *cobra.Command { if len(pathOptions.ExplicitFileFlag) == 0 { pathOptions.ExplicitFileFlag = clientcmd.RecommendedConfigPathFlag } @@ -95,345 +61,6 @@ The loading order follows these rules: return cmd } -func NewDefaultPathOptions() *PathOptions { - ret := &PathOptions{ - GlobalFile: clientcmd.RecommendedHomeFile, - EnvVar: clientcmd.RecommendedConfigPathEnvVar, - ExplicitFileFlag: clientcmd.RecommendedConfigPathFlag, - - GlobalFileSubpath: path.Join(clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName), - - LoadingRules: clientcmd.NewDefaultClientConfigLoadingRules(), - } - ret.LoadingRules.DoNotResolvePaths = true - - return ret -} - -func (o *PathOptions) GetEnvVarFiles() []string { - if len(o.EnvVar) == 0 { - return []string{} - } - - envVarValue := os.Getenv(o.EnvVar) - if len(envVarValue) == 0 { - return []string{} - } - - return filepath.SplitList(envVarValue) -} - -func (o *PathOptions) GetLoadingPrecedence() []string { - if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { - return envVarFiles - } - - return []string{o.GlobalFile} -} - -func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) { - // don't mutate the original - loadingRules := *o.LoadingRules - loadingRules.Precedence = o.GetLoadingPrecedence() - - clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &clientcmd.ConfigOverrides{}) - rawConfig, err := clientConfig.RawConfig() - if os.IsNotExist(err) { - return clientcmdapi.NewConfig(), nil - } - if err != nil { - return nil, err - } - - return &rawConfig, nil -} - -func (o *PathOptions) GetDefaultFilename() string { - if o.IsExplicitFile() { - return o.GetExplicitFile() - } - - if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { - if len(envVarFiles) == 1 { - return envVarFiles[0] - } - - // if any of the envvar files already exists, return it - for _, envVarFile := range envVarFiles { - if _, err := os.Stat(envVarFile); err == nil { - return envVarFile - } - } - - // otherwise, return the last one in the list - return envVarFiles[len(envVarFiles)-1] - } - - return o.GlobalFile -} - -func (o *PathOptions) IsExplicitFile() bool { - if len(o.LoadingRules.ExplicitPath) > 0 { - return true - } - - return false -} - -func (o *PathOptions) GetExplicitFile() string { - return o.LoadingRules.ExplicitPath -} - -// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or -// uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. -// Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values -// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, -// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any -// modified element. -func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error { - startingConfig, err := configAccess.GetStartingConfig() - if err != nil { - return err - } - - // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file. - // Special case the test for current context and preferences since those always write to the default file. - if reflect.DeepEqual(*startingConfig, newConfig) { - // nothing to do - return nil - } - - if startingConfig.CurrentContext != newConfig.CurrentContext { - if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil { - return err - } - } - - if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) { - if err := writePreferences(configAccess, newConfig.Preferences); err != nil { - return err - } - } - - // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions - for key, cluster := range newConfig.Clusters { - startingCluster, exists := startingConfig.Clusters[key] - if !reflect.DeepEqual(cluster, startingCluster) || !exists { - destinationFile := cluster.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite := getConfigFromFileOrDie(destinationFile) - t := *cluster - - configToWrite.Clusters[key] = &t - configToWrite.Clusters[key].LocationOfOrigin = destinationFile - if relativizePaths { - if err := clientcmd.RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil { - return err - } - } - - if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, context := range newConfig.Contexts { - startingContext, exists := startingConfig.Contexts[key] - if !reflect.DeepEqual(context, startingContext) || !exists { - destinationFile := context.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite := getConfigFromFileOrDie(destinationFile) - configToWrite.Contexts[key] = context - - if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, authInfo := range newConfig.AuthInfos { - startingAuthInfo, exists := startingConfig.AuthInfos[key] - if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { - destinationFile := authInfo.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite := getConfigFromFileOrDie(destinationFile) - t := *authInfo - configToWrite.AuthInfos[key] = &t - configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile - if relativizePaths { - if err := clientcmd.RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil { - return err - } - } - - if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, cluster := range startingConfig.Clusters { - if _, exists := newConfig.Clusters[key]; !exists { - destinationFile := cluster.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite := getConfigFromFileOrDie(destinationFile) - delete(configToWrite.Clusters, key) - - if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, context := range startingConfig.Contexts { - if _, exists := newConfig.Contexts[key]; !exists { - destinationFile := context.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite := getConfigFromFileOrDie(destinationFile) - delete(configToWrite.Contexts, key) - - if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, authInfo := range startingConfig.AuthInfos { - if _, exists := newConfig.AuthInfos[key]; !exists { - destinationFile := authInfo.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite := getConfigFromFileOrDie(destinationFile) - delete(configToWrite.AuthInfos, key) - - if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - return nil -} - -// writeCurrentContext takes three possible paths. -// If newCurrentContext is the same as the startingConfig's current context, then we exit. -// If newCurrentContext has a value, then that value is written into the default destination file. -// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file -func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error { - if startingConfig, err := configAccess.GetStartingConfig(); err != nil { - return err - } else if startingConfig.CurrentContext == newCurrentContext { - return nil - } - - if configAccess.IsExplicitFile() { - file := configAccess.GetExplicitFile() - currConfig := getConfigFromFileOrDie(file) - currConfig.CurrentContext = newCurrentContext - if err := clientcmd.WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - - if len(newCurrentContext) > 0 { - destinationFile := configAccess.GetDefaultFilename() - config := getConfigFromFileOrDie(destinationFile) - config.CurrentContext = newCurrentContext - - if err := clientcmd.WriteToFile(*config, destinationFile); err != nil { - return err - } - - return nil - } - - // we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it - for _, file := range configAccess.GetLoadingPrecedence() { - if _, err := os.Stat(file); err == nil { - currConfig := getConfigFromFileOrDie(file) - - if len(currConfig.CurrentContext) > 0 { - currConfig.CurrentContext = newCurrentContext - if err := clientcmd.WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - } - } - - return errors.New("no config found to write context") -} - -func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error { - if startingConfig, err := configAccess.GetStartingConfig(); err != nil { - return err - } else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) { - return nil - } - - if configAccess.IsExplicitFile() { - file := configAccess.GetExplicitFile() - currConfig := getConfigFromFileOrDie(file) - currConfig.Preferences = newPrefs - if err := clientcmd.WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - - for _, file := range configAccess.GetLoadingPrecedence() { - currConfig := getConfigFromFileOrDie(file) - - if !reflect.DeepEqual(currConfig.Preferences, newPrefs) { - currConfig.Preferences = newPrefs - if err := clientcmd.WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - } - - return errors.New("no config found to write preferences") -} - -// getConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit -func getConfigFromFileOrDie(filename string) *clientcmdapi.Config { - config, err := clientcmd.LoadFromFile(filename) - if err != nil && !os.IsNotExist(err) { - glog.FatalDepth(1, err) - } - - if config == nil { - return clientcmdapi.NewConfig() - } - - return config -} - func toBool(propertyValue string) (bool, error) { boolValue := false if len(propertyValue) != 0 { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/config_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/config_test.go new file mode 100644 index 000000000000..33ba5f4759f8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/config_test.go @@ -0,0 +1,851 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + "k8s.io/kubernetes/pkg/util/diff" +) + +func newRedFederalCowHammerConfig() clientcmdapi.Config { + return clientcmdapi.Config{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + "red-user": {Token: "red-token"}}, + Clusters: map[string]*clientcmdapi.Cluster{ + "cow-cluster": {Server: "http://cow.org:8080"}}, + Contexts: map[string]*clientcmdapi.Context{ + "federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster"}}, + CurrentContext: "federal-context", + } +} + +func Example_view() { + expectedConfig := newRedFederalCowHammerConfig() + test := configCommandTest{ + args: []string{"view"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + output := test.run(nil) + fmt.Printf("%v", output) + // Output: + // apiVersion: v1 + // clusters: + // - cluster: + // server: http://cow.org:8080 + // name: cow-cluster + // contexts: + // - context: + // cluster: cow-cluster + // user: red-user + // name: federal-context + // current-context: federal-context + // kind: Config + // preferences: {} + // users: + // - name: red-user + // user: + // token: red-token +} + +func TestCurrentContext(t *testing.T) { + startingConfig := newRedFederalCowHammerConfig() + test := configCommandTest{ + args: []string{"current-context"}, + startingConfig: startingConfig, + expectedConfig: startingConfig, + expectedOutputs: []string{startingConfig.CurrentContext}, + } + test.run(t) +} + +func TestSetCurrentContext(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + startingConfig := newRedFederalCowHammerConfig() + + newContextName := "the-new-context" + + startingConfig.Contexts[newContextName] = clientcmdapi.NewContext() + expectedConfig.Contexts[newContextName] = clientcmdapi.NewContext() + + expectedConfig.CurrentContext = newContextName + + test := configCommandTest{ + args: []string{"use-context", "the-new-context"}, + startingConfig: startingConfig, + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestSetNonExistentContext(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + test := configCommandTest{ + args: []string{"use-context", "non-existent-config"}, + startingConfig: expectedConfig, + expectedConfig: expectedConfig, + expectedOutputs: []string{`no context exists with the name: "non-existent-config"`}, + } + test.run(t) +} + +func TestSetIntoExistingStruct(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.AuthInfos["red-user"].Password = "new-path-value" + test := configCommandTest{ + args: []string{"set", "users.red-user.password", "new-path-value"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestSetWithPathPrefixIntoExistingStruct(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.Clusters["cow-cluster"].Server = "http://cow.org:8080/foo/baz" + test := configCommandTest{ + args: []string{"set", "clusters.cow-cluster.server", "http://cow.org:8080/foo/baz"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) + + dc := clientcmd.NewDefaultClientConfig(expectedConfig, &clientcmd.ConfigOverrides{}) + dcc, err := dc.ClientConfig() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + expectedHost := "http://cow.org:8080/foo/baz" + if expectedHost != dcc.Host { + t.Fatalf("expected client.Config.Host = %q instead of %q", expectedHost, dcc.Host) + } +} + +func TestUnsetStruct(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + delete(expectedConfig.AuthInfos, "red-user") + test := configCommandTest{ + args: []string{"unset", "users.red-user"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestUnsetField(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.AuthInfos["red-user"] = clientcmdapi.NewAuthInfo() + test := configCommandTest{ + args: []string{"unset", "users.red-user.token"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestSetIntoNewStruct(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + cluster := clientcmdapi.NewCluster() + cluster.Server = "new-server-value" + expectedConfig.Clusters["big-cluster"] = cluster + test := configCommandTest{ + args: []string{"set", "clusters.big-cluster.server", "new-server-value"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestSetBoolean(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + cluster := clientcmdapi.NewCluster() + cluster.InsecureSkipTLSVerify = true + expectedConfig.Clusters["big-cluster"] = cluster + test := configCommandTest{ + args: []string{"set", "clusters.big-cluster.insecure-skip-tls-verify", "true"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestSetIntoNewConfig(t *testing.T) { + expectedConfig := *clientcmdapi.NewConfig() + context := clientcmdapi.NewContext() + context.AuthInfo = "fake-user" + expectedConfig.Contexts["new-context"] = context + test := configCommandTest{ + args: []string{"set", "contexts.new-context.user", "fake-user"}, + startingConfig: *clientcmdapi.NewConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestNewEmptyAuth(t *testing.T) { + expectedConfig := *clientcmdapi.NewConfig() + expectedConfig.AuthInfos["the-user-name"] = clientcmdapi.NewAuthInfo() + test := configCommandTest{ + args: []string{"set-credentials", "the-user-name"}, + startingConfig: *clientcmdapi.NewConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestAdditionalAuth(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + authInfo := clientcmdapi.NewAuthInfo() + authInfo.Token = "token" + expectedConfig.AuthInfos["another-user"] = authInfo + test := configCommandTest{ + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagBearerToken + "=token"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestEmbedClientCert(t *testing.T) { + fakeCertFile, _ := ioutil.TempFile("", "") + defer os.Remove(fakeCertFile.Name()) + fakeData := []byte("fake-data") + ioutil.WriteFile(fakeCertFile.Name(), fakeData, 0600) + expectedConfig := newRedFederalCowHammerConfig() + authInfo := clientcmdapi.NewAuthInfo() + authInfo.ClientCertificateData = fakeData + expectedConfig.AuthInfos["another-user"] = authInfo + + test := configCommandTest{ + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=" + fakeCertFile.Name(), "--" + clientcmd.FlagEmbedCerts + "=true"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestEmbedClientKey(t *testing.T) { + fakeKeyFile, _ := ioutil.TempFile("", "") + defer os.Remove(fakeKeyFile.Name()) + fakeData := []byte("fake-data") + ioutil.WriteFile(fakeKeyFile.Name(), fakeData, 0600) + expectedConfig := newRedFederalCowHammerConfig() + authInfo := clientcmdapi.NewAuthInfo() + authInfo.ClientKeyData = fakeData + expectedConfig.AuthInfos["another-user"] = authInfo + + test := configCommandTest{ + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagKeyFile + "=" + fakeKeyFile.Name(), "--" + clientcmd.FlagEmbedCerts + "=true"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestEmbedNoKeyOrCertDisallowed(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + test := configCommandTest{ + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagEmbedCerts + "=true"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + expectedOutputs: []string{"--client-certificate", "--client-key", "embed"}, + } + + test.run(t) +} + +func TestEmptyTokenAndCertAllowed(t *testing.T) { + fakeCertFile, _ := ioutil.TempFile("", "cert-file") + + expectedConfig := newRedFederalCowHammerConfig() + authInfo := clientcmdapi.NewAuthInfo() + authInfo.ClientCertificate = path.Base(fakeCertFile.Name()) + expectedConfig.AuthInfos["another-user"] = authInfo + + test := configCommandTest{ + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=" + fakeCertFile.Name(), "--" + clientcmd.FlagBearerToken + "="}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestTokenAndCertAllowed(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + authInfo := clientcmdapi.NewAuthInfo() + authInfo.Token = "token" + authInfo.ClientCertificate = "/cert-file" + expectedConfig.AuthInfos["another-user"] = authInfo + test := configCommandTest{ + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=/cert-file", "--" + clientcmd.FlagBearerToken + "=token"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestTokenAndBasicDisallowed(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + test := configCommandTest{ + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagUsername + "=myuser", "--" + clientcmd.FlagBearerToken + "=token"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + expectedOutputs: []string{"--token", "--username"}, + } + + test.run(t) +} + +func TestBasicClearsToken(t *testing.T) { + authInfoWithToken := clientcmdapi.NewAuthInfo() + authInfoWithToken.Token = "token" + + authInfoWithBasic := clientcmdapi.NewAuthInfo() + authInfoWithBasic.Username = "myuser" + authInfoWithBasic.Password = "mypass" + + startingConfig := newRedFederalCowHammerConfig() + startingConfig.AuthInfos["another-user"] = authInfoWithToken + + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.AuthInfos["another-user"] = authInfoWithBasic + + test := configCommandTest{ + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagUsername + "=myuser", "--" + clientcmd.FlagPassword + "=mypass"}, + startingConfig: startingConfig, + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestTokenClearsBasic(t *testing.T) { + authInfoWithBasic := clientcmdapi.NewAuthInfo() + authInfoWithBasic.Username = "myuser" + authInfoWithBasic.Password = "mypass" + + authInfoWithToken := clientcmdapi.NewAuthInfo() + authInfoWithToken.Token = "token" + + startingConfig := newRedFederalCowHammerConfig() + startingConfig.AuthInfos["another-user"] = authInfoWithBasic + + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.AuthInfos["another-user"] = authInfoWithToken + + test := configCommandTest{ + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagBearerToken + "=token"}, + startingConfig: startingConfig, + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestTokenLeavesCert(t *testing.T) { + authInfoWithCerts := clientcmdapi.NewAuthInfo() + authInfoWithCerts.ClientCertificate = "cert" + authInfoWithCerts.ClientCertificateData = []byte("certdata") + authInfoWithCerts.ClientKey = "key" + authInfoWithCerts.ClientKeyData = []byte("keydata") + + authInfoWithTokenAndCerts := clientcmdapi.NewAuthInfo() + authInfoWithTokenAndCerts.Token = "token" + authInfoWithTokenAndCerts.ClientCertificate = "cert" + authInfoWithTokenAndCerts.ClientCertificateData = []byte("certdata") + authInfoWithTokenAndCerts.ClientKey = "key" + authInfoWithTokenAndCerts.ClientKeyData = []byte("keydata") + + startingConfig := newRedFederalCowHammerConfig() + startingConfig.AuthInfos["another-user"] = authInfoWithCerts + + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.AuthInfos["another-user"] = authInfoWithTokenAndCerts + + test := configCommandTest{ + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagBearerToken + "=token"}, + startingConfig: startingConfig, + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestCertLeavesToken(t *testing.T) { + authInfoWithToken := clientcmdapi.NewAuthInfo() + authInfoWithToken.Token = "token" + + authInfoWithTokenAndCerts := clientcmdapi.NewAuthInfo() + authInfoWithTokenAndCerts.Token = "token" + authInfoWithTokenAndCerts.ClientCertificate = "/cert" + authInfoWithTokenAndCerts.ClientKey = "/key" + + startingConfig := newRedFederalCowHammerConfig() + startingConfig.AuthInfos["another-user"] = authInfoWithToken + + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.AuthInfos["another-user"] = authInfoWithTokenAndCerts + + test := configCommandTest{ + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=/cert", "--" + clientcmd.FlagKeyFile + "=/key"}, + startingConfig: startingConfig, + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestSetBytesBad(t *testing.T) { + startingConfig := newRedFederalCowHammerConfig() + startingConfig.Clusters["another-cluster"] = clientcmdapi.NewCluster() + + test := configCommandTest{ + args: []string{"set", "clusters.another-cluster.certificate-authority-data", "cadata"}, + startingConfig: startingConfig, + expectedConfig: startingConfig, + } + + test.run(t) +} + +func TestSetBytes(t *testing.T) { + clusterInfoWithCAData := clientcmdapi.NewCluster() + clusterInfoWithCAData.CertificateAuthorityData = []byte("cadata") + + startingConfig := newRedFederalCowHammerConfig() + startingConfig.Clusters["another-cluster"] = clientcmdapi.NewCluster() + + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.Clusters["another-cluster"] = clusterInfoWithCAData + + test := configCommandTest{ + args: []string{"set", "clusters.another-cluster.certificate-authority-data", "cadata", "--set-raw-bytes"}, + startingConfig: startingConfig, + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestSetBase64Bytes(t *testing.T) { + clusterInfoWithCAData := clientcmdapi.NewCluster() + clusterInfoWithCAData.CertificateAuthorityData = []byte("cadata") + + startingConfig := newRedFederalCowHammerConfig() + startingConfig.Clusters["another-cluster"] = clientcmdapi.NewCluster() + + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.Clusters["another-cluster"] = clusterInfoWithCAData + + test := configCommandTest{ + args: []string{"set", "clusters.another-cluster.certificate-authority-data", "Y2FkYXRh"}, + startingConfig: startingConfig, + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestUnsetBytes(t *testing.T) { + clusterInfoWithCAData := clientcmdapi.NewCluster() + clusterInfoWithCAData.CertificateAuthorityData = []byte("cadata") + + startingConfig := newRedFederalCowHammerConfig() + startingConfig.Clusters["another-cluster"] = clusterInfoWithCAData + + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.Clusters["another-cluster"] = clientcmdapi.NewCluster() + + test := configCommandTest{ + args: []string{"unset", "clusters.another-cluster.certificate-authority-data"}, + startingConfig: startingConfig, + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestCAClearsInsecure(t *testing.T) { + fakeCAFile, _ := ioutil.TempFile("", "ca-file") + + clusterInfoWithInsecure := clientcmdapi.NewCluster() + clusterInfoWithInsecure.InsecureSkipTLSVerify = true + + clusterInfoWithCA := clientcmdapi.NewCluster() + clusterInfoWithCA.CertificateAuthority = path.Base(fakeCAFile.Name()) + + startingConfig := newRedFederalCowHammerConfig() + startingConfig.Clusters["another-cluster"] = clusterInfoWithInsecure + + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.Clusters["another-cluster"] = clusterInfoWithCA + + test := configCommandTest{ + args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=" + fakeCAFile.Name()}, + startingConfig: startingConfig, + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestCAClearsCAData(t *testing.T) { + clusterInfoWithCAData := clientcmdapi.NewCluster() + clusterInfoWithCAData.CertificateAuthorityData = []byte("cadata") + + clusterInfoWithCA := clientcmdapi.NewCluster() + clusterInfoWithCA.CertificateAuthority = "/cafile" + + startingConfig := newRedFederalCowHammerConfig() + startingConfig.Clusters["another-cluster"] = clusterInfoWithCAData + + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.Clusters["another-cluster"] = clusterInfoWithCA + + test := configCommandTest{ + args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=/cafile", "--" + clientcmd.FlagInsecure + "=false"}, + startingConfig: startingConfig, + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestInsecureClearsCA(t *testing.T) { + clusterInfoWithInsecure := clientcmdapi.NewCluster() + clusterInfoWithInsecure.InsecureSkipTLSVerify = true + + clusterInfoWithCA := clientcmdapi.NewCluster() + clusterInfoWithCA.CertificateAuthority = "cafile" + clusterInfoWithCA.CertificateAuthorityData = []byte("cadata") + + startingConfig := newRedFederalCowHammerConfig() + startingConfig.Clusters["another-cluster"] = clusterInfoWithCA + + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.Clusters["another-cluster"] = clusterInfoWithInsecure + + test := configCommandTest{ + args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagInsecure + "=true"}, + startingConfig: startingConfig, + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestCADataClearsCA(t *testing.T) { + fakeCAFile, _ := ioutil.TempFile("", "") + defer os.Remove(fakeCAFile.Name()) + fakeData := []byte("cadata") + ioutil.WriteFile(fakeCAFile.Name(), fakeData, 0600) + + clusterInfoWithCAData := clientcmdapi.NewCluster() + clusterInfoWithCAData.CertificateAuthorityData = fakeData + + clusterInfoWithCA := clientcmdapi.NewCluster() + clusterInfoWithCA.CertificateAuthority = "cafile" + + startingConfig := newRedFederalCowHammerConfig() + startingConfig.Clusters["another-cluster"] = clusterInfoWithCA + + expectedConfig := newRedFederalCowHammerConfig() + expectedConfig.Clusters["another-cluster"] = clusterInfoWithCAData + + test := configCommandTest{ + args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=" + fakeCAFile.Name(), "--" + clientcmd.FlagEmbedCerts + "=true"}, + startingConfig: startingConfig, + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestEmbedNoCADisallowed(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + test := configCommandTest{ + args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagEmbedCerts + "=true"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + expectedOutputs: []string{"--certificate-authority", "embed"}, + } + + test.run(t) +} + +func TestCAAndInsecureDisallowed(t *testing.T) { + test := configCommandTest{ + args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=cafile", "--" + clientcmd.FlagInsecure + "=true"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: newRedFederalCowHammerConfig(), + expectedOutputs: []string{"certificate", "insecure"}, + } + + test.run(t) +} + +func TestMergeExistingAuth(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + authInfo := expectedConfig.AuthInfos["red-user"] + authInfo.ClientKey = "/key" + expectedConfig.AuthInfos["red-user"] = authInfo + test := configCommandTest{ + args: []string{"set-credentials", "red-user", "--" + clientcmd.FlagKeyFile + "=/key"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestNewEmptyCluster(t *testing.T) { + expectedConfig := *clientcmdapi.NewConfig() + expectedConfig.Clusters["new-cluster"] = clientcmdapi.NewCluster() + test := configCommandTest{ + args: []string{"set-cluster", "new-cluster"}, + startingConfig: *clientcmdapi.NewConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestAdditionalCluster(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + cluster := clientcmdapi.NewCluster() + cluster.CertificateAuthority = "/ca-location" + cluster.InsecureSkipTLSVerify = false + cluster.Server = "serverlocation" + expectedConfig.Clusters["different-cluster"] = cluster + test := configCommandTest{ + args: []string{"set-cluster", "different-cluster", "--" + clientcmd.FlagAPIServer + "=serverlocation", "--" + clientcmd.FlagInsecure + "=false", "--" + clientcmd.FlagCAFile + "=/ca-location"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestOverwriteExistingCluster(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + cluster := clientcmdapi.NewCluster() + cluster.Server = "serverlocation" + expectedConfig.Clusters["cow-cluster"] = cluster + + test := configCommandTest{ + args: []string{"set-cluster", "cow-cluster", "--" + clientcmd.FlagAPIServer + "=serverlocation"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestNewEmptyContext(t *testing.T) { + expectedConfig := *clientcmdapi.NewConfig() + expectedConfig.Contexts["new-context"] = clientcmdapi.NewContext() + test := configCommandTest{ + args: []string{"set-context", "new-context"}, + startingConfig: *clientcmdapi.NewConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestAdditionalContext(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + context := clientcmdapi.NewContext() + context.Cluster = "some-cluster" + context.AuthInfo = "some-user" + context.Namespace = "different-namespace" + expectedConfig.Contexts["different-context"] = context + test := configCommandTest{ + args: []string{"set-context", "different-context", "--" + clientcmd.FlagClusterName + "=some-cluster", "--" + clientcmd.FlagAuthInfoName + "=some-user", "--" + clientcmd.FlagNamespace + "=different-namespace"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestMergeExistingContext(t *testing.T) { + expectedConfig := newRedFederalCowHammerConfig() + context := expectedConfig.Contexts["federal-context"] + context.Namespace = "hammer" + expectedConfig.Contexts["federal-context"] = context + + test := configCommandTest{ + args: []string{"set-context", "federal-context", "--" + clientcmd.FlagNamespace + "=hammer"}, + startingConfig: newRedFederalCowHammerConfig(), + expectedConfig: expectedConfig, + } + + test.run(t) +} + +func TestToBool(t *testing.T) { + type test struct { + in string + out bool + err string + } + + tests := []test{ + {"", false, ""}, + {"true", true, ""}, + {"on", false, `strconv.ParseBool: parsing "on": invalid syntax`}, + } + + for _, curr := range tests { + b, err := toBool(curr.in) + if (len(curr.err) != 0) && err == nil { + t.Errorf("Expected error: %v, but got nil", curr.err) + } + if (len(curr.err) == 0) && err != nil { + t.Errorf("Unexpected error: %v", err) + } + if (err != nil) && (err.Error() != curr.err) { + t.Errorf("Expected %v, got %v", curr.err, err) + + } + if b != curr.out { + t.Errorf("Expected %v, got %v", curr.out, b) + } + } + +} + +func testConfigCommand(args []string, startingConfig clientcmdapi.Config, t *testing.T) (string, clientcmdapi.Config) { + fakeKubeFile, _ := ioutil.TempFile("", "") + defer os.Remove(fakeKubeFile.Name()) + err := clientcmd.WriteToFile(startingConfig, fakeKubeFile.Name()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + argsToUse := make([]string, 0, 2+len(args)) + argsToUse = append(argsToUse, "--kubeconfig="+fakeKubeFile.Name()) + argsToUse = append(argsToUse, args...) + + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdConfig(clientcmd.NewDefaultPathOptions(), buf) + cmd.SetArgs(argsToUse) + cmd.Execute() + + // outBytes, _ := ioutil.ReadFile(fakeKubeFile.Name()) + config := clientcmd.GetConfigFromFileOrDie(fakeKubeFile.Name()) + + return buf.String(), *config +} + +type configCommandTest struct { + args []string + startingConfig clientcmdapi.Config + expectedConfig clientcmdapi.Config + expectedOutputs []string +} + +func (test configCommandTest) run(t *testing.T) string { + out, actualConfig := testConfigCommand(test.args, test.startingConfig, t) + + testSetNilMapsToEmpties(reflect.ValueOf(&test.expectedConfig)) + testSetNilMapsToEmpties(reflect.ValueOf(&actualConfig)) + testClearLocationOfOrigin(&actualConfig) + + if !api.Semantic.DeepEqual(test.expectedConfig, actualConfig) { + t.Errorf("diff: %v", diff.ObjectDiff(test.expectedConfig, actualConfig)) + t.Errorf("expected: %#v\n actual: %#v", test.expectedConfig, actualConfig) + } + + for _, expectedOutput := range test.expectedOutputs { + if !strings.Contains(out, expectedOutput) { + t.Errorf("expected '%s' in output, got '%s'", expectedOutput, out) + } + } + + return out +} +func testClearLocationOfOrigin(config *clientcmdapi.Config) { + for key, obj := range config.AuthInfos { + obj.LocationOfOrigin = "" + config.AuthInfos[key] = obj + } + for key, obj := range config.Clusters { + obj.LocationOfOrigin = "" + config.Clusters[key] = obj + } + for key, obj := range config.Contexts { + obj.LocationOfOrigin = "" + config.Contexts[key] = obj + } +} +func testSetNilMapsToEmpties(curr reflect.Value) { + actualCurrValue := curr + if curr.Kind() == reflect.Ptr { + actualCurrValue = curr.Elem() + } + + switch actualCurrValue.Kind() { + case reflect.Map: + for _, mapKey := range actualCurrValue.MapKeys() { + currMapValue := actualCurrValue.MapIndex(mapKey) + testSetNilMapsToEmpties(currMapValue) + } + + case reflect.Struct: + for fieldIndex := 0; fieldIndex < actualCurrValue.NumField(); fieldIndex++ { + currFieldValue := actualCurrValue.Field(fieldIndex) + + if currFieldValue.Kind() == reflect.Map && currFieldValue.IsNil() { + newValue := reflect.MakeMap(currFieldValue.Type()) + currFieldValue.Set(newValue) + } else { + testSetNilMapsToEmpties(currFieldValue.Addr()) + } + } + + } + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_authinfo.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_authinfo.go index 0f8aa3b6fff9..2fd8cf2cb180 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_authinfo.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_authinfo.go @@ -29,10 +29,11 @@ import ( "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/flag" ) type createAuthInfoOptions struct { - configAccess ConfigAccess + configAccess clientcmd.ConfigAccess name string authPath util.StringFlag clientCertificate util.StringFlag @@ -40,7 +41,7 @@ type createAuthInfoOptions struct { token util.StringFlag username util.StringFlag password util.StringFlag - embedCertData util.BoolFlag + embedCertData flag.Tristate } var create_authinfo_long = fmt.Sprintf(`Sets a user entry in kubeconfig @@ -68,7 +69,7 @@ kubectl config set-credentials cluster-admin --username=admin --password=uXFGweU # Embed client certificate data in the "cluster-admin" entry kubectl config set-credentials cluster-admin --client-certificate=~/.kube/admin.crt --embed-certs=true` -func NewCmdConfigSetAuthInfo(out io.Writer, configAccess ConfigAccess) *cobra.Command { +func NewCmdConfigSetAuthInfo(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &createAuthInfoOptions{configAccess: configAccess} cmd := &cobra.Command{ @@ -90,8 +91,10 @@ func NewCmdConfigSetAuthInfo(out io.Writer, configAccess ConfigAccess) *cobra.Co }, } - cmd.Flags().Var(&options.clientCertificate, clientcmd.FlagCertFile, "path to "+clientcmd.FlagCertFile+" for the user entry in kubeconfig") - cmd.Flags().Var(&options.clientKey, clientcmd.FlagKeyFile, "path to "+clientcmd.FlagKeyFile+" for the user entry in kubeconfig") + cmd.Flags().Var(&options.clientCertificate, clientcmd.FlagCertFile, "path to "+clientcmd.FlagCertFile+" file for the user entry in kubeconfig") + cmd.MarkFlagFilename(clientcmd.FlagCertFile) + cmd.Flags().Var(&options.clientKey, clientcmd.FlagKeyFile, "path to "+clientcmd.FlagKeyFile+" file for the user entry in kubeconfig") + cmd.MarkFlagFilename(clientcmd.FlagKeyFile) cmd.Flags().Var(&options.token, clientcmd.FlagBearerToken, clientcmd.FlagBearerToken+" for the user entry in kubeconfig") cmd.Flags().Var(&options.username, clientcmd.FlagUsername, clientcmd.FlagUsername+" for the user entry in kubeconfig") cmd.Flags().Var(&options.password, clientcmd.FlagPassword, clientcmd.FlagPassword+" for the user entry in kubeconfig") @@ -119,7 +122,7 @@ func (o createAuthInfoOptions) run() error { authInfo := o.modifyAuthInfo(*startingStanza) config.AuthInfos[o.name] = &authInfo - if err := ModifyConfig(o.configAccess, *config, true); err != nil { + if err := clientcmd.ModifyConfig(o.configAccess, *config, true); err != nil { return err } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_cluster.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_cluster.go index 79edfd0cf379..dc9de40a09cb 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_cluster.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_cluster.go @@ -28,16 +28,17 @@ import ( "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/flag" ) type createClusterOptions struct { - configAccess ConfigAccess + configAccess clientcmd.ConfigAccess name string server util.StringFlag apiVersion util.StringFlag - insecureSkipTLSVerify util.BoolFlag + insecureSkipTLSVerify flag.Tristate certificateAuthority util.StringFlag - embedCAData util.BoolFlag + embedCAData flag.Tristate } const ( @@ -53,7 +54,7 @@ kubectl config set-cluster e2e --certificate-authority=~/.kube/e2e/kubernetes.ca kubectl config set-cluster e2e --insecure-skip-tls-verify=true` ) -func NewCmdConfigSetCluster(out io.Writer, configAccess ConfigAccess) *cobra.Command { +func NewCmdConfigSetCluster(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &createClusterOptions{configAccess: configAccess} cmd := &cobra.Command{ @@ -81,7 +82,8 @@ func NewCmdConfigSetCluster(out io.Writer, configAccess ConfigAccess) *cobra.Com cmd.Flags().Var(&options.apiVersion, clientcmd.FlagAPIVersion, clientcmd.FlagAPIVersion+" for the cluster entry in kubeconfig") f := cmd.Flags().VarPF(&options.insecureSkipTLSVerify, clientcmd.FlagInsecure, "", clientcmd.FlagInsecure+" for the cluster entry in kubeconfig") f.NoOptDefVal = "true" - cmd.Flags().Var(&options.certificateAuthority, clientcmd.FlagCAFile, "path to "+clientcmd.FlagCAFile+" for the cluster entry in kubeconfig") + cmd.Flags().Var(&options.certificateAuthority, clientcmd.FlagCAFile, "path to "+clientcmd.FlagCAFile+" file for the cluster entry in kubeconfig") + cmd.MarkFlagFilename(clientcmd.FlagCAFile) f = cmd.Flags().VarPF(&options.embedCAData, clientcmd.FlagEmbedCerts, "", clientcmd.FlagEmbedCerts+" for the cluster entry in kubeconfig") f.NoOptDefVal = "true" @@ -106,7 +108,7 @@ func (o createClusterOptions) run() error { cluster := o.modifyCluster(*startingStanza) config.Clusters[o.name] = &cluster - if err := ModifyConfig(o.configAccess, *config, true); err != nil { + if err := clientcmd.ModifyConfig(o.configAccess, *config, true); err != nil { return err } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_context.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_context.go index e3d165e07c2a..7f0ca2170dbd 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_context.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/create_context.go @@ -29,7 +29,7 @@ import ( ) type createContextOptions struct { - configAccess ConfigAccess + configAccess clientcmd.ConfigAccess name string cluster util.StringFlag authInfo util.StringFlag @@ -43,7 +43,7 @@ Specifying a name that already exists will merge new fields on top of existing v kubectl config set-context gce --user=cluster-admin` ) -func NewCmdConfigSetContext(out io.Writer, configAccess ConfigAccess) *cobra.Command { +func NewCmdConfigSetContext(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &createContextOptions{configAccess: configAccess} cmd := &cobra.Command{ @@ -90,7 +90,7 @@ func (o createContextOptions) run() error { context := o.modifyContext(*startingStanza) config.Contexts[o.name] = &context - if err := ModifyConfig(o.configAccess, *config, true); err != nil { + if err := clientcmd.ModifyConfig(o.configAccess, *config, true); err != nil { return err } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/current_context.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/current_context.go index fe5bcff69ab3..f2941c6dbddd 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/current_context.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/current_context.go @@ -19,13 +19,15 @@ package config import ( "fmt" "io" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" ) type CurrentContextOptions struct { - ConfigAccess ConfigAccess + ConfigAccess clientcmd.ConfigAccess } const ( @@ -34,7 +36,7 @@ const ( kubectl config current-context` ) -func NewCmdConfigCurrentContext(out io.Writer, configAccess ConfigAccess) *cobra.Command { +func NewCmdConfigCurrentContext(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &CurrentContextOptions{ConfigAccess: configAccess} cmd := &cobra.Command{ diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/current_context_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/current_context_test.go new file mode 100644 index 000000000000..7a68415f6a64 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/current_context_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "bytes" + "io/ioutil" + "os" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" +) + +type currentContextTest struct { + startingConfig clientcmdapi.Config + expectedError string +} + +func newFederalContextConfig() clientcmdapi.Config { + return clientcmdapi.Config{ + CurrentContext: "federal-context", + } +} + +func TestCurrentContextWithSetContext(t *testing.T) { + test := currentContextTest{ + startingConfig: newFederalContextConfig(), + expectedError: "", + } + + test.run(t) +} + +func TestCurrentContextWithUnsetContext(t *testing.T) { + test := currentContextTest{ + startingConfig: *clientcmdapi.NewConfig(), + expectedError: "current-context is not set", + } + + test.run(t) +} + +func (test currentContextTest) run(t *testing.T) { + fakeKubeFile, _ := ioutil.TempFile("", "") + defer os.Remove(fakeKubeFile.Name()) + err := clientcmd.WriteToFile(test.startingConfig, fakeKubeFile.Name()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + pathOptions := clientcmd.NewDefaultPathOptions() + pathOptions.GlobalFile = fakeKubeFile.Name() + pathOptions.EnvVar = "" + options := CurrentContextOptions{ + ConfigAccess: pathOptions, + } + + buf := bytes.NewBuffer([]byte{}) + err = RunCurrentContext(buf, []string{}, &options) + if len(test.expectedError) != 0 { + if err == nil { + t.Errorf("Did not get %v", test.expectedError) + } else { + if !strings.Contains(err.Error(), test.expectedError) { + t.Errorf("Expected %v, but got %v", test.expectedError, err) + } + } + return + } + + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/navigation_step_parser_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/navigation_step_parser_test.go new file mode 100644 index 000000000000..2bca8d08920d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/navigation_step_parser_test.go @@ -0,0 +1,96 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "reflect" + "strings" + "testing" + + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + "k8s.io/kubernetes/pkg/util/diff" +) + +type stepParserTest struct { + path string + expectedNavigationSteps navigationSteps + expectedError string +} + +func TestParseWithDots(t *testing.T) { + test := stepParserTest{ + path: "clusters.my.dot.delimited.name.server", + expectedNavigationSteps: navigationSteps{ + steps: []navigationStep{ + {"clusters", reflect.TypeOf(make(map[string]*clientcmdapi.Cluster))}, + {"my.dot.delimited.name", reflect.TypeOf(clientcmdapi.Cluster{})}, + {"server", reflect.TypeOf("")}, + }, + }, + } + + test.run(t) +} + +func TestParseWithDotsEndingWithName(t *testing.T) { + test := stepParserTest{ + path: "contexts.10.12.12.12", + expectedNavigationSteps: navigationSteps{ + steps: []navigationStep{ + {"contexts", reflect.TypeOf(make(map[string]*clientcmdapi.Context))}, + {"10.12.12.12", reflect.TypeOf(clientcmdapi.Context{})}, + }, + }, + } + + test.run(t) +} + +func TestParseWithBadValue(t *testing.T) { + test := stepParserTest{ + path: "user.bad", + expectedNavigationSteps: navigationSteps{ + steps: []navigationStep{}, + }, + expectedError: "unable to parse user.bad after [] at api.Config", + } + + test.run(t) +} + +func (test stepParserTest) run(t *testing.T) { + actualSteps, err := newNavigationSteps(test.path) + if len(test.expectedError) != 0 { + if err == nil { + t.Errorf("Did not get %v", test.expectedError) + } else { + if !strings.Contains(err.Error(), test.expectedError) { + t.Errorf("Expected %v, but got %v", test.expectedError, err) + } + } + return + } + + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if !reflect.DeepEqual(test.expectedNavigationSteps, *actualSteps) { + t.Errorf("diff: %v", diff.ObjectDiff(test.expectedNavigationSteps, *actualSteps)) + t.Errorf("expected: %#v\n actual: %#v", test.expectedNavigationSteps, *actualSteps) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/set.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/set.go index c175a23c2f1a..c1c078bcb86a 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/set.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/set.go @@ -17,6 +17,7 @@ limitations under the License. package config import ( + "encoding/base64" "errors" "fmt" "io" @@ -24,6 +25,9 @@ import ( "strings" "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" + "k8s.io/kubernetes/pkg/util/flag" ) const ( @@ -32,16 +36,17 @@ const ( ) type setOptions struct { - configAccess ConfigAccess + configAccess clientcmd.ConfigAccess propertyName string propertyValue string + setRawBytes flag.Tristate } const set_long = `Sets an individual value in a kubeconfig file PROPERTY_NAME is a dot delimited name where each token represents either a attribute name or a map key. Map keys may not contain dots. -PROPERTY_VALUE is the new value you wish to set.` +PROPERTY_VALUE is the new value you wish to set. Binary fields such as 'certificate-authority-data' expect a base64 encoded string unless the --set-raw-bytes flag is used.` -func NewCmdConfigSet(out io.Writer, configAccess ConfigAccess) *cobra.Command { +func NewCmdConfigSet(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &setOptions{configAccess: configAccess} cmd := &cobra.Command{ @@ -62,6 +67,8 @@ func NewCmdConfigSet(out io.Writer, configAccess ConfigAccess) *cobra.Command { }, } + f := cmd.Flags().VarPF(&options.setRawBytes, "set-raw-bytes", "", "When writing a []byte PROPERTY_VALUE, write the given string directly without base64 decoding.") + f.NoOptDefVal = "true" return cmd } @@ -79,12 +86,18 @@ func (o setOptions) run() error { if err != nil { return err } - err = modifyConfig(reflect.ValueOf(config), steps, o.propertyValue, false) + + setRawBytes := false + if o.setRawBytes.Provided() { + setRawBytes = o.setRawBytes.Value() + } + + err = modifyConfig(reflect.ValueOf(config), steps, o.propertyValue, false, setRawBytes) if err != nil { return err } - if err := ModifyConfig(o.configAccess, *config, false); err != nil { + if err := clientcmd.ModifyConfig(o.configAccess, *config, false); err != nil { return err } @@ -115,7 +128,7 @@ func (o setOptions) validate() error { return nil } -func modifyConfig(curr reflect.Value, steps *navigationSteps, propertyValue string, unset bool) error { +func modifyConfig(curr reflect.Value, steps *navigationSteps, propertyValue string, unset bool, setRawBytes bool) error { currStep := steps.pop() actualCurrValue := curr @@ -145,7 +158,7 @@ func modifyConfig(curr reflect.Value, steps *navigationSteps, propertyValue stri actualCurrValue.SetMapIndex(mapKey, currMapValue) } - err := modifyConfig(currMapValue, steps, propertyValue, unset) + err := modifyConfig(currMapValue, steps, propertyValue, unset, setRawBytes) if err != nil { return err } @@ -159,6 +172,31 @@ func modifyConfig(curr reflect.Value, steps *navigationSteps, propertyValue stri actualCurrValue.SetString(propertyValue) return nil + case reflect.Slice: + if steps.moreStepsRemaining() { + return fmt.Errorf("can't have more steps after bytes. %v", steps) + } + innerKind := actualCurrValue.Type().Elem().Kind() + if innerKind != reflect.Uint8 { + return fmt.Errorf("unrecognized slice type. %v", innerKind) + } + + if unset { + actualCurrValue.Set(reflect.Zero(actualCurrValue.Type())) + return nil + } + + if setRawBytes { + actualCurrValue.SetBytes([]byte(propertyValue)) + } else { + val, err := base64.StdEncoding.DecodeString(propertyValue) + if err != nil { + return fmt.Errorf("error decoding input value: %v", err) + } + actualCurrValue.SetBytes(val) + } + return nil + case reflect.Bool: if steps.moreStepsRemaining() { return fmt.Errorf("can't have more steps after a bool. %v", steps) @@ -196,7 +234,7 @@ func modifyConfig(curr reflect.Value, steps *navigationSteps, propertyValue stri return nil } - return modifyConfig(currFieldValue.Addr(), steps, propertyValue, unset) + return modifyConfig(currFieldValue.Addr(), steps, propertyValue, unset, setRawBytes) } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/unset.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/unset.go index 555309ccbe5d..f9446df51639 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/unset.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/unset.go @@ -23,17 +23,19 @@ import ( "reflect" "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" ) type unsetOptions struct { - configAccess ConfigAccess + configAccess clientcmd.ConfigAccess propertyName string } const unset_long = `Unsets an individual value in a kubeconfig file PROPERTY_NAME is a dot delimited name where each token represents either a attribute name or a map key. Map keys may not contain dots.` -func NewCmdConfigUnset(out io.Writer, configAccess ConfigAccess) *cobra.Command { +func NewCmdConfigUnset(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &unsetOptions{configAccess: configAccess} cmd := &cobra.Command{ @@ -72,12 +74,12 @@ func (o unsetOptions) run() error { if err != nil { return err } - err = modifyConfig(reflect.ValueOf(config), steps, "", true) + err = modifyConfig(reflect.ValueOf(config), steps, "", true, true) if err != nil { return err } - if err := ModifyConfig(o.configAccess, *config, false); err != nil { + if err := clientcmd.ModifyConfig(o.configAccess, *config, false); err != nil { return err } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/use_context.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/use_context.go index a6a5c26d557c..abfe8bbf91f8 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/use_context.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/use_context.go @@ -23,15 +23,16 @@ import ( "github.com/spf13/cobra" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" ) type useContextOptions struct { - configAccess ConfigAccess + configAccess clientcmd.ConfigAccess contextName string } -func NewCmdConfigUseContext(out io.Writer, configAccess ConfigAccess) *cobra.Command { +func NewCmdConfigUseContext(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command { options := &useContextOptions{configAccess: configAccess} cmd := &cobra.Command{ @@ -68,7 +69,7 @@ func (o useContextOptions) run() error { config.CurrentContext = o.contextName - if err := ModifyConfig(o.configAccess, *config, true); err != nil { + if err := clientcmd.ModifyConfig(o.configAccess, *config, true); err != nil { return err } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/view.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/view.go index 5d253a118fae..1c1ae5df3a09 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/view.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/config/view.go @@ -28,12 +28,12 @@ import ( "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest" "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/flag" ) type ViewOptions struct { - ConfigAccess ConfigAccess - Merge util.BoolFlag + ConfigAccess clientcmd.ConfigAccess + Merge flag.Tristate Flatten bool Minify bool RawByteData bool @@ -50,7 +50,7 @@ kubectl config view kubectl config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}'` ) -func NewCmdConfigView(out io.Writer, ConfigAccess ConfigAccess) *cobra.Command { +func NewCmdConfigView(out io.Writer, ConfigAccess clientcmd.ConfigAccess) *cobra.Command { options := &ViewOptions{ConfigAccess: ConfigAccess} // Default to yaml defaultOutputFormat := "yaml" @@ -67,6 +67,10 @@ func NewCmdConfigView(out io.Writer, ConfigAccess ConfigAccess) *cobra.Command { fmt.Printf("--output wide is not available in kubectl config view; reset to default output format (%s)\n\n", defaultOutputFormat) cmd.Flags().Set("output", defaultOutputFormat) } + if outputFormat == "" { + fmt.Printf("reset to default output format (%s) as --output is empty", defaultOutputFormat) + cmd.Flags().Set("output", defaultOutputFormat) + } printer, _, err := cmdutil.PrinterForCommand(cmd) cmdutil.CheckErr(err) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/convert.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/convert.go index 0f1b10a9fafb..e6810aa21fd0 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/convert.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/convert.go @@ -74,11 +74,12 @@ func NewCmdConvert(f *cmdutil.Factory, out io.Writer) *cobra.Command { usage := "Filename, directory, or URL to file to need to get converted." kubectl.AddJsonFilenameFlag(cmd, &options.filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &options.recursive) cmd.MarkFlagRequired("filename") cmdutil.AddValidateFlags(cmd) cmdutil.AddPrinterFlags(cmd) cmd.Flags().BoolVar(&options.local, "local", true, "If true, convert will NOT try to contact api-server but run locally.") - + cmdutil.AddInclude3rdPartyFlags(cmd) return cmd } @@ -93,6 +94,8 @@ type ConvertOptions struct { printer kubectl.ResourcePrinter outputVersion unversioned.GroupVersion + + recursive bool } // Complete collects information required to run Convert command from command line. @@ -106,11 +109,12 @@ func (o *ConvertOptions) Complete(f *cmdutil.Factory, out io.Writer, cmd *cobra. } // build the builder - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) clientMapper := resource.ClientMapperFunc(f.ClientForMapping) + if o.local { fmt.Fprintln(out, "running in local mode...") - o.builder = resource.NewBuilder(mapper, typer, resource.DisabledClientForMapping{clientMapper}, f.Decoder(true)) + o.builder = resource.NewBuilder(mapper, typer, resource.DisabledClientForMapping{ClientMapper: clientMapper}, f.Decoder(true)) } else { o.builder = resource.NewBuilder(mapper, typer, clientMapper, f.Decoder(true)) schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"), cmdutil.GetFlagString(cmd, "schema-cache-dir")) @@ -125,7 +129,7 @@ func (o *ConvertOptions) Complete(f *cmdutil.Factory, out io.Writer, cmd *cobra. } o.builder = o.builder.NamespaceParam(cmdNamespace). ContinueOnError(). - FilenameParam(false, o.filenames...). + FilenameParam(false, o.recursive, o.filenames...). Flatten() // build the printer @@ -150,15 +154,32 @@ func (o *ConvertOptions) Complete(f *cmdutil.Factory, out io.Writer, cmd *cobra. // RunConvert implements the generic Convert command func (o *ConvertOptions) RunConvert() error { - infos, err := o.builder.Do().Infos() + r := o.builder.Do() + err := r.Err() if err != nil { return err } - objects, err := resource.AsVersionedObject(infos, false, o.outputVersion.String(), o.encoder) + count := 0 + err = r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + infos := []*resource.Info{info} + objects, err := resource.AsVersionedObject(infos, false, o.outputVersion, o.encoder) + if err != nil { + return err + } + + count++ + return o.printer.PrintObj(objects, o.out) + }) if err != nil { return err } - - return o.printer.PrintObj(objects, o.out) + if count == 0 { + return fmt.Errorf("no objects passed to convert") + } + return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create.go index 422a97eb1a39..1fd923aacb1c 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create.go @@ -19,22 +19,20 @@ package cmd import ( "fmt" "io" - "strings" "github.com/spf13/cobra" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" - "k8s.io/kubernetes/pkg/runtime" ) // CreateOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of // referencing the cmd.Flags() type CreateOptions struct { Filenames []string + Recursive bool } const ( @@ -71,9 +69,11 @@ func NewCmdCreate(f *cmdutil.Factory, out io.Writer) *cobra.Command { kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) cmd.MarkFlagRequired("filename") cmdutil.AddValidateFlags(cmd) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) cmdutil.AddOutputFlagsForMutation(cmd) cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddRecordFlag(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) // create subcommands cmd.AddCommand(NewCmdCreateNamespace(f, out)) @@ -101,12 +101,12 @@ func RunCreate(f *cmdutil.Factory, cmd *cobra.Command, out io.Writer, options *C return err } - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). Schema(schema). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). Flatten(). Do() err = r.Err() @@ -136,7 +136,7 @@ func RunCreate(f *cmdutil.Factory, cmd *cobra.Command, out io.Writer, options *C count++ shortOutput := cmdutil.GetFlagString(cmd, "output") == "name" if !shortOutput { - printObjectSpecificMessage(info.Object, out) + f.PrintObjectSpecificMessage(info.Object, out) } cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "created") return nil @@ -150,37 +150,6 @@ func RunCreate(f *cmdutil.Factory, cmd *cobra.Command, out io.Writer, options *C return nil } -func printObjectSpecificMessage(obj runtime.Object, out io.Writer) { - switch obj := obj.(type) { - case *api.Service: - if obj.Spec.Type == api.ServiceTypeNodePort { - msg := fmt.Sprintf( - `You have exposed your service on an external port on all nodes in your -cluster. If you want to expose this service to the external internet, you may -need to set up firewall rules for the service port(s) (%s) to serve traffic. - -See http://releases.k8s.io/release-1.2/docs/user-guide/services-firewalls.md for more details. -`, - makePortsString(obj.Spec.Ports, true)) - out.Write([]byte(msg)) - } - } -} - -func makePortsString(ports []api.ServicePort, useNodePort bool) string { - pieces := make([]string, len(ports)) - for ix := range ports { - var port int - if useNodePort { - port = ports[ix].NodePort - } else { - port = ports[ix].Port - } - pieces[ix] = fmt.Sprintf("%s:%d", strings.ToLower(string(ports[ix].Protocol)), port) - } - return strings.Join(pieces, ",") -} - // createAndRefresh creates an object from input info and refreshes info with that object func createAndRefresh(info *resource.Info) error { obj, err := resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, true, info.Object) @@ -221,8 +190,12 @@ func RunCreateSubcommand(f *cmdutil.Factory, cmd *cobra.Command, out io.Writer, if err != nil { return err } - mapper, typer := f.Object() - gvk, err := typer.ObjectKind(obj) + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) + gvks, _, err := typer.ObjectKinds(obj) + if err != nil { + return err + } + gvk := gvks[0] mapping, err := mapper.RESTMapping(unversioned.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version) if err != nil { return err @@ -255,5 +228,5 @@ func RunCreateSubcommand(f *cmdutil.Factory, cmd *cobra.Command, out io.Writer, return nil } - return f.PrintObject(cmd, obj, out) + return f.PrintObject(cmd, mapper, obj, out) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_configmap.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_configmap.go index 729ac3983968..a81e4c19665d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_configmap.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_configmap.go @@ -40,19 +40,19 @@ symlinks, devices, pipes, etc). ` configMapExample = ` # Create a new configmap named my-config with keys for each file in folder bar - kubectl create configmap generic my-config --from-file=path/to/bar + kubectl create configmap my-config --from-file=path/to/bar # Create a new configmap named my-config with specified keys instead of names on disk - kubectl create configmap generic my-config --from-file=ssh-privatekey=~/.ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub + kubectl create configmap my-config --from-file=key1=/path/to/bar/file1.txt --from-file=key2=/path/to/bar/file2.txt # Create a new configMap named my-config with key1=config1 and key2=config2 - kubectl create configmap generic my-config --from-literal=key1=config1 --from-literal=key2=config2` + kubectl create configmap my-config --from-literal=key1=config1 --from-literal=key2=config2` ) // ConfigMap is a command to ease creating ConfigMaps. func NewCmdCreateConfigMap(f *cmdutil.Factory, cmdOut io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "configmap NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]", + Use: "configmap NAME [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run]", Short: "Create a configMap from a local file, directory or literal value.", Long: configMapLong, Example: configMapExample, @@ -70,7 +70,7 @@ func NewCmdCreateConfigMap(f *cmdutil.Factory, cmdOut io.Writer) *cobra.Command return cmd } -// CreateConfigMap is the implementation of the create configmap generic command. +// CreateConfigMap is the implementation of the create configmap command. func CreateConfigMap(f *cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error { name, err := NameFromCommandArgs(cmd, args) if err != nil { @@ -90,7 +90,7 @@ func CreateConfigMap(f *cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, a return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{ Name: name, StructuredGenerator: generator, - DryRun: cmdutil.GetFlagBool(cmd, "dry-run"), + DryRun: cmdutil.GetDryRunFlag(cmd), OutputFormat: cmdutil.GetFlagString(cmd, "output"), }) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_configmap_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_configmap_test.go new file mode 100644 index 000000000000..fff769a48df7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_configmap_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "net/http" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/unversioned/fake" +) + +func TestCreateConfigMap(t *testing.T) { + configMap := &api.ConfigMap{} + configMap.Name = "my-configmap" + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/configmaps" && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, configMap)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdCreateConfigMap(f, buf) + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{configMap.Name}) + expectedOutput := "configmap/" + configMap.Name + "\n" + if buf.String() != expectedOutput { + t.Errorf("expected output: %s, but got: %s", buf.String(), expectedOutput) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_namespace.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_namespace.go index fcb4be75c7f9..49da9518f6e4 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_namespace.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_namespace.go @@ -51,6 +51,7 @@ func NewCmdCreateNamespace(f *cmdutil.Factory, cmdOut io.Writer) *cobra.Command cmdutil.AddValidateFlags(cmd) cmdutil.AddPrinterFlags(cmd) cmdutil.AddGeneratorFlags(cmd, cmdutil.NamespaceV1GeneratorName) + return cmd } @@ -70,7 +71,7 @@ func CreateNamespace(f *cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, a return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{ Name: name, StructuredGenerator: generator, - DryRun: cmdutil.GetFlagBool(cmd, "dry-run"), + DryRun: cmdutil.GetDryRunFlag(cmd), OutputFormat: cmdutil.GetFlagString(cmd, "output"), }) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_namespace_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_namespace_test.go new file mode 100644 index 000000000000..80483f2f588a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_namespace_test.go @@ -0,0 +1,53 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "net/http" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/unversioned/fake" +) + +func TestCreateNamespace(t *testing.T) { + namespaceObject := &api.Namespace{} + namespaceObject.Name = "my-namespace" + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces" && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, namespaceObject)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdCreateNamespace(f, buf) + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{namespaceObject.Name}) + expectedOutput := "namespace/" + namespaceObject.Name + "\n" + if buf.String() != expectedOutput { + t.Errorf("expected output: %s, but got: %s", buf.String(), expectedOutput) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_secret.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_secret.go index 3028da743dfa..227c1e0ce40b 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_secret.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_secret.go @@ -37,7 +37,9 @@ func NewCmdCreateSecret(f *cmdutil.Factory, cmdOut io.Writer) *cobra.Command { }, } cmd.AddCommand(NewCmdCreateSecretDockerRegistry(f, cmdOut)) + cmd.AddCommand(NewCmdCreateSecretTLS(f, cmdOut)) cmd.AddCommand(NewCmdCreateSecretGeneric(f, cmdOut)) + return cmd } @@ -108,7 +110,7 @@ func CreateSecretGeneric(f *cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Comman return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{ Name: name, StructuredGenerator: generator, - DryRun: cmdutil.GetFlagBool(cmd, "dry-run"), + DryRun: cmdutil.GetDryRunFlag(cmd), OutputFormat: cmdutil.GetFlagString(cmd, "output"), }) } @@ -129,7 +131,7 @@ nodes to pull images on your behalf, they have to have the credentials. You can by creating a dockercfg secret and attaching it to your service account.` secretForDockerRegistryExample = ` # If you don't already have a .dockercfg file, you can create a dockercfg secret directly by using: - $ kubectl create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL` + kubectl create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL` ) // NewCmdCreateSecretDockerRegistry is a macro command for creating secrets to work with Docker registries @@ -155,6 +157,7 @@ func NewCmdCreateSecretDockerRegistry(f *cmdutil.Factory, cmdOut io.Writer) *cob cmd.Flags().String("docker-email", "", "Email for Docker registry") cmd.MarkFlagRequired("docker-email") cmd.Flags().String("docker-server", "https://index.docker.io/v1/", "Server location for Docker registry") + cmdutil.AddInclude3rdPartyFlags(cmd) return cmd } @@ -183,6 +186,68 @@ func CreateSecretDockerRegistry(f *cmdutil.Factory, cmdOut io.Writer, cmd *cobra default: return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName)) } + return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{ + Name: name, + StructuredGenerator: generator, + DryRun: cmdutil.GetDryRunFlag(cmd), + OutputFormat: cmdutil.GetFlagString(cmd, "output"), + }) +} + +const ( + secretForTLSLong = ` +Create a TLS secret from the given public/private key pair. + +The public/private key pair must exist before hand. The public key certificate must be .PEM encoded and match the given private key.` + + secretForTLSExample = ` # Create a new TLS secret named tls-secret with the given key pair: + kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key` +) + +// NewCmdCreateSecretTLS is a macro command for creating secrets to work with Docker registries +func NewCmdCreateSecretTLS(f *cmdutil.Factory, cmdOut io.Writer) *cobra.Command { + cmd := &cobra.Command{ + Use: "tls NAME --cert=path/to/cert/file --key=path/to/key/file [--dry-run]", + Short: "Create a TLS secret.", + Long: secretForTLSLong, + Example: secretForTLSExample, + Run: func(cmd *cobra.Command, args []string) { + err := CreateSecretTLS(f, cmdOut, cmd, args) + cmdutil.CheckErr(err) + }, + } + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddPrinterFlags(cmd) + cmdutil.AddGeneratorFlags(cmd, cmdutil.SecretForTLSV1GeneratorName) + cmd.Flags().String("cert", "", "Path to PEM encoded public key certificate.") + cmd.Flags().String("key", "", "Path to private key associated with given certificate.") + return cmd +} + +// CreateSecretTLS is the implementation of the create secret tls command +func CreateSecretTLS(f *cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error { + name, err := NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + requiredFlags := []string{"cert", "key"} + for _, requiredFlag := range requiredFlags { + if value := cmdutil.GetFlagString(cmd, requiredFlag); len(value) == 0 { + return cmdutil.UsageError(cmd, "flag %s is required", requiredFlag) + } + } + var generator kubectl.StructuredGenerator + switch generatorName := cmdutil.GetFlagString(cmd, "generator"); generatorName { + case cmdutil.SecretForTLSV1GeneratorName: + generator = &kubectl.SecretForTLSGeneratorV1{ + Name: name, + Key: cmdutil.GetFlagString(cmd, "key"), + Cert: cmdutil.GetFlagString(cmd, "cert"), + } + default: + return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not supported.", generatorName)) + } return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{ Name: name, StructuredGenerator: generator, diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_secret_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_secret_test.go new file mode 100644 index 000000000000..ff3dcf162ab5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_secret_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "net/http" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/unversioned/fake" +) + +func TestCreateSecretGeneric(t *testing.T) { + secretObject := &api.Secret{} + secretObject.Name = "my-secret" + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/secrets" && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, secretObject)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdCreateSecretGeneric(f, buf) + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{secretObject.Name}) + expectedOutput := "secret/" + secretObject.Name + "\n" + if buf.String() != expectedOutput { + t.Errorf("expected output: %s, but got: %s", buf.String(), expectedOutput) + } +} + +func TestCreateSecretDockerRegistry(t *testing.T) { + secretObject := &api.Secret{} + secretObject.Name = "my-secret" + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/secrets" && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, secretObject)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdCreateSecretDockerRegistry(f, buf) + cmd.Flags().Set("docker-username", "test-user") + cmd.Flags().Set("docker-password", "test-pass") + cmd.Flags().Set("docker-email", "test-email") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{secretObject.Name}) + expectedOutput := "secret/" + secretObject.Name + "\n" + if buf.String() != expectedOutput { + t.Errorf("expected output: %s, but got: %s", buf.String(), expectedOutput) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_serviceaccount.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_serviceaccount.go index bd6f7eb8f5b5..89d1882c0b10 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_serviceaccount.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_serviceaccount.go @@ -50,6 +50,7 @@ func NewCmdCreateServiceAccount(f *cmdutil.Factory, cmdOut io.Writer) *cobra.Com cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddValidateFlags(cmd) cmdutil.AddPrinterFlags(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) cmdutil.AddGeneratorFlags(cmd, cmdutil.ServiceAccountV1GeneratorName) return cmd } @@ -70,7 +71,7 @@ func CreateServiceAccount(f *cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Comma return RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{ Name: name, StructuredGenerator: generator, - DryRun: cmdutil.GetFlagBool(cmd, "dry-run"), + DryRun: cmdutil.GetDryRunFlag(cmd), OutputFormat: cmdutil.GetFlagString(cmd, "output"), }) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_serviceaccount_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_serviceaccount_test.go new file mode 100644 index 000000000000..927e6132ae88 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_serviceaccount_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "net/http" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/unversioned/fake" +) + +func TestCreateServiceAccount(t *testing.T) { + serviceAccountObject := &api.ServiceAccount{} + serviceAccountObject.Name = "my-service-account" + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/serviceaccounts" && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, serviceAccountObject)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdCreateServiceAccount(f, buf) + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{serviceAccountObject.Name}) + expectedOutput := "serviceaccount/" + serviceAccountObject.Name + "\n" + if buf.String() != expectedOutput { + t.Errorf("expected output: %s, but got: %s", expectedOutput, buf.String()) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_test.go new file mode 100644 index 000000000000..19653ebeeb54 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/create_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "net/http" + "testing" + + "k8s.io/kubernetes/pkg/client/unversioned/fake" +) + +func TestExtraArgsFail(t *testing.T) { + initTestErrorHandler(t) + buf := bytes.NewBuffer([]byte{}) + + f, _, _ := NewAPIFactory() + c := NewCmdCreate(f, buf) + if ValidateArgs(c, []string{"rc"}) == nil { + t.Errorf("unexpected non-error") + } +} + +func TestCreateObject(t *testing.T) { + initTestErrorHandler(t) + _, _, rc := testData() + rc.Items[0].Name = "redis-master-controller" + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers" && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdCreate(f, buf) + cmd.Flags().Set("filename", "../../../examples/guestbook/legacy/redis-master-controller.yaml") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + // uses the name from the file, not the response + if buf.String() != "replicationcontroller/redis-master-controller\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestCreateMultipleObject(t *testing.T) { + initTestErrorHandler(t) + _, svc, rc := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/services" && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil + case p == "/namespaces/test/replicationcontrollers" && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdCreate(f, buf) + cmd.Flags().Set("filename", "../../../examples/guestbook/legacy/redis-master-controller.yaml") + cmd.Flags().Set("filename", "../../../examples/guestbook/frontend-service.yaml") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + // Names should come from the REST response, NOT the files + if buf.String() != "replicationcontroller/rc1\nservice/baz\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestCreateDirectory(t *testing.T) { + initTestErrorHandler(t) + _, _, rc := testData() + rc.Items[0].Name = "name" + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers" && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdCreate(f, buf) + cmd.Flags().Set("filename", "../../../examples/guestbook/legacy") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + if buf.String() != "replicationcontroller/name\nreplicationcontroller/name\nreplicationcontroller/name\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/delete.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/delete.go index 99f65d8eac07..1e5aadfe251d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/delete.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/delete.go @@ -35,6 +35,7 @@ import ( // referencing the cmd.Flags() type DeleteOptions struct { Filenames []string + Recursive bool } const ( @@ -59,6 +60,9 @@ kubectl delete pod,service baz foo # Delete pods and services with label name=myLabel. kubectl delete pods,services -l name=myLabel +# Delete a pod immediately (no graceful shutdown) +kubectl delete pod foo --now + # Delete a pod with UID 1234-56-7890-234234-456456. kubectl delete pod 1234-56-7890-234234-456456 @@ -70,11 +74,12 @@ func NewCmdDelete(f *cmdutil.Factory, out io.Writer) *cobra.Command { options := &DeleteOptions{} // retrieve a list of handled resources from printer as valid args - validArgs := []string{} + validArgs, argAliases := []string{}, []string{} p, err := f.Printer(nil, false, false, false, false, false, false, []string{}) cmdutil.CheckErr(err) if p != nil { validArgs = p.HandledResources() + argAliases = kubectl.ResourceAliases(validArgs) } cmd := &cobra.Command{ @@ -87,17 +92,22 @@ func NewCmdDelete(f *cmdutil.Factory, out io.Writer) *cobra.Command { err := RunDelete(f, out, cmd, args, options) cmdutil.CheckErr(err) }, - ValidArgs: validArgs, + SuggestFor: []string{"rm"}, + ValidArgs: validArgs, + ArgAliases: argAliases, } usage := "Filename, directory, or URL to a file containing the resource to delete." kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) cmd.Flags().StringP("selector", "l", "", "Selector (label query) to filter on.") cmd.Flags().Bool("all", false, "[-all] to select all the specified resources.") cmd.Flags().Bool("ignore-not-found", false, "Treat \"resource not found\" as a successful delete. Defaults to \"true\" when --all is specified.") cmd.Flags().Bool("cascade", true, "If true, cascade the deletion of the resources managed by this resource (e.g. Pods created by a ReplicationController). Default true.") cmd.Flags().Int("grace-period", -1, "Period of time in seconds given to the resource to terminate gracefully. Ignored if negative.") + cmd.Flags().Bool("now", false, "If true, resources are force terminated without graceful deletion (same as --grace-period=0).") cmd.Flags().Duration("timeout", 0, "The length of time to wait before giving up on a delete, zero means determine a timeout from the size of the object") cmdutil.AddOutputFlagsForMutation(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) return cmd } @@ -107,11 +117,11 @@ func RunDelete(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str return err } deleteAll := cmdutil.GetFlagBool(cmd, "all") - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). SelectorParam(cmdutil.GetFlagString(cmd, "selector")). SelectAllParam(deleteAll). ResourceTypeOrNameArgs(false, args...).RequireObject(false). @@ -135,10 +145,18 @@ func RunDelete(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str } } + gracePeriod := cmdutil.GetFlagInt(cmd, "grace-period") + if cmdutil.GetFlagBool(cmd, "now") { + if gracePeriod != -1 { + return fmt.Errorf("--now and --grace-period cannot be specified together") + } + gracePeriod = 0 + } + shortOutput := cmdutil.GetFlagString(cmd, "output") == "name" // By default use a reaper to delete all related resources. if cmdutil.GetFlagBool(cmd, "cascade") { - return ReapResult(r, f, out, cmdutil.GetFlagBool(cmd, "cascade"), ignoreNotFound, cmdutil.GetFlagDuration(cmd, "timeout"), cmdutil.GetFlagInt(cmd, "grace-period"), shortOutput, mapper) + return ReapResult(r, f, out, cmdutil.GetFlagBool(cmd, "cascade"), ignoreNotFound, cmdutil.GetFlagDuration(cmd, "timeout"), gracePeriod, shortOutput, mapper) } return DeleteResult(r, out, ignoreNotFound, shortOutput, mapper) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/delete_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/delete_test.go new file mode 100644 index 000000000000..1e8c964838a4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/delete_test.go @@ -0,0 +1,449 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "net/http" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/fake" +) + +func TestDeleteObjectByTuple(t *testing.T) { + _, _, rc := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/redis-master-controller" && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + default: + // Ensures no GET is performed when deleting by name + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDelete(f, buf) + cmd.Flags().Set("namespace", "test") + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{"replicationcontrollers/redis-master-controller"}) + + if buf.String() != "replicationcontroller/redis-master-controller\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestDeleteNamedObject(t *testing.T) { + _, _, rc := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/redis-master-controller" && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + default: + // Ensures no GET is performed when deleting by name + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDelete(f, buf) + cmd.Flags().Set("namespace", "test") + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{"replicationcontrollers", "redis-master-controller"}) + + if buf.String() != "replicationcontroller/redis-master-controller\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestDeleteObject(t *testing.T) { + _, _, rc := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/redis-master" && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDelete(f, buf) + cmd.Flags().Set("filename", "../../../examples/guestbook/legacy/redis-master-controller.yaml") + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + // uses the name from the file, not the response + if buf.String() != "replicationcontroller/redis-master\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestDeleteObjectNotFound(t *testing.T) { + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/redis-master" && m == "DELETE": + return &http.Response{StatusCode: 404, Header: defaultHeader(), Body: stringBody("")}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDelete(f, buf) + options := &DeleteOptions{ + Filenames: []string{"../../../examples/guestbook/legacy/redis-master-controller.yaml"}, + } + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("output", "name") + err := RunDelete(f, buf, cmd, []string{}, options) + if err == nil || !errors.IsNotFound(err) { + t.Errorf("unexpected error: expected NotFound, got %v", err) + } +} + +func TestDeleteObjectIgnoreNotFound(t *testing.T) { + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/redis-master" && m == "DELETE": + return &http.Response{StatusCode: 404, Header: defaultHeader(), Body: stringBody("")}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDelete(f, buf) + cmd.Flags().Set("filename", "../../../examples/guestbook/legacy/redis-master-controller.yaml") + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("ignore-not-found", "true") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + if buf.String() != "" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestDeleteAllNotFound(t *testing.T) { + _, svc, _ := testData() + + f, tf, codec := NewAPIFactory() + + // Add an item to the list which will result in a 404 on delete + svc.Items = append(svc.Items, api.Service{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + notFoundError := &errors.NewNotFound(api.Resource("services"), "foo").ErrStatus + + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/services" && m == "GET": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, svc)}, nil + case p == "/namespaces/test/services/foo" && m == "DELETE": + return &http.Response{StatusCode: 404, Header: defaultHeader(), Body: objBody(codec, notFoundError)}, nil + case p == "/namespaces/test/services/baz" && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDelete(f, buf) + cmd.Flags().Set("all", "true") + cmd.Flags().Set("cascade", "false") + // Make sure we can explicitly choose to fail on NotFound errors, even with --all + cmd.Flags().Set("ignore-not-found", "false") + cmd.Flags().Set("output", "name") + + err := RunDelete(f, buf, cmd, []string{"services"}, &DeleteOptions{}) + if err == nil || !errors.IsNotFound(err) { + t.Errorf("unexpected error: expected NotFound, got %v", err) + } +} + +func TestDeleteAllIgnoreNotFound(t *testing.T) { + _, svc, _ := testData() + + f, tf, codec := NewAPIFactory() + + // Add an item to the list which will result in a 404 on delete + svc.Items = append(svc.Items, api.Service{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + notFoundError := &errors.NewNotFound(api.Resource("services"), "foo").ErrStatus + + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/services" && m == "GET": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, svc)}, nil + case p == "/namespaces/test/services/foo" && m == "DELETE": + return &http.Response{StatusCode: 404, Header: defaultHeader(), Body: objBody(codec, notFoundError)}, nil + case p == "/namespaces/test/services/baz" && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDelete(f, buf) + cmd.Flags().Set("all", "true") + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{"services"}) + + if buf.String() != "service/baz\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestDeleteMultipleObject(t *testing.T) { + _, svc, rc := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/redis-master" && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + case p == "/namespaces/test/services/frontend" && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDelete(f, buf) + cmd.Flags().Set("filename", "../../../examples/guestbook/legacy/redis-master-controller.yaml") + cmd.Flags().Set("filename", "../../../examples/guestbook/frontend-service.yaml") + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + if buf.String() != "replicationcontroller/redis-master\nservice/frontend\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestDeleteMultipleObjectContinueOnMissing(t *testing.T) { + _, svc, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/redis-master" && m == "DELETE": + return &http.Response{StatusCode: 404, Header: defaultHeader(), Body: stringBody("")}, nil + case p == "/namespaces/test/services/frontend" && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDelete(f, buf) + options := &DeleteOptions{ + Filenames: []string{"../../../examples/guestbook/legacy/redis-master-controller.yaml", "../../../examples/guestbook/frontend-service.yaml"}, + } + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("output", "name") + err := RunDelete(f, buf, cmd, []string{}, options) + if err == nil || !errors.IsNotFound(err) { + t.Errorf("unexpected error: expected NotFound, got %v", err) + } + + if buf.String() != "service/frontend\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestDeleteMultipleResourcesWithTheSameName(t *testing.T) { + _, svc, rc := testData() + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/baz" && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + case p == "/namespaces/test/replicationcontrollers/foo" && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + case p == "/namespaces/test/services/baz" && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil + case p == "/namespaces/test/services/foo" && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil + default: + // Ensures no GET is performed when deleting by name + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdDelete(f, buf) + cmd.Flags().Set("namespace", "test") + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{"replicationcontrollers,services", "baz", "foo"}) + if buf.String() != "replicationcontroller/baz\nreplicationcontroller/foo\nservice/baz\nservice/foo\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestDeleteDirectory(t *testing.T) { + _, _, rc := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case strings.HasPrefix(p, "/namespaces/test/replicationcontrollers/") && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDelete(f, buf) + cmd.Flags().Set("filename", "../../../examples/guestbook/legacy") + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + if buf.String() != "replicationcontroller/frontend\nreplicationcontroller/redis-master\nreplicationcontroller/redis-slave\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestDeleteMultipleSelector(t *testing.T) { + pods, svc, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/pods" && m == "GET": + if req.URL.Query().Get(unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String())) != "a=b" { + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + } + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, nil + case p == "/namespaces/test/services" && m == "GET": + if req.URL.Query().Get(unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String())) != "a=b" { + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + } + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, svc)}, nil + case strings.HasPrefix(p, "/namespaces/test/pods/") && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, nil + case strings.HasPrefix(p, "/namespaces/test/services/") && m == "DELETE": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDelete(f, buf) + cmd.Flags().Set("selector", "a=b") + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{"pods,services"}) + + if buf.String() != "pod/foo\npod/bar\nservice/baz\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/describe.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/describe.go index 466caa6c8073..e16c5756d274 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/describe.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/describe.go @@ -36,6 +36,7 @@ import ( // referencing the cmd.Flags() type DescribeOptions struct { Filenames []string + Recursive bool } const ( @@ -49,11 +50,7 @@ $ kubectl describe TYPE NAME_PREFIX will first check for an exact match on TYPE and NAME_PREFIX. If no such resource exists, it will output details for every resource that has a name prefixed with NAME_PREFIX -Possible resource types include (case insensitive): pods (po), services (svc), -replicationcontrollers (rc), nodes (no), events (ev), limitranges (limits), -persistentvolumes (pv), persistentvolumeclaims (pvc), resourcequotas (quota), -namespaces (ns), serviceaccounts, horizontalpodautoscalers (hpa), -endpoints (ep) or secrets.` +` + kubectl.PossibleResourceTypes describe_example = `# Describe a node kubectl describe nodes kubernetes-minion-emt8.c.myproject.internal @@ -76,6 +73,10 @@ kubectl describe pods frontend` func NewCmdDescribe(f *cmdutil.Factory, out io.Writer) *cobra.Command { options := &DescribeOptions{} + describerSettings := &kubectl.DescriberSettings{} + + validArgs := kubectl.DescribableResources() + argAliases := kubectl.ResourceAliases(validArgs) cmd := &cobra.Command{ Use: "describe (-f FILENAME | TYPE [NAME_PREFIX | -l label] | TYPE/NAME)", @@ -83,18 +84,22 @@ func NewCmdDescribe(f *cmdutil.Factory, out io.Writer) *cobra.Command { Long: describe_long, Example: describe_example, Run: func(cmd *cobra.Command, args []string) { - err := RunDescribe(f, out, cmd, args, options) + err := RunDescribe(f, out, cmd, args, options, describerSettings) cmdutil.CheckErr(err) }, - ValidArgs: kubectl.DescribableResources(), + ValidArgs: validArgs, + ArgAliases: argAliases, } usage := "Filename, directory, or URL to a file containing the resource to describe" kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) cmd.Flags().StringP("selector", "l", "", "Selector (label query) to filter on") + cmd.Flags().BoolVar(&describerSettings.ShowEvents, "show-events", true, "If true, display events related to the described object.") + cmdutil.AddInclude3rdPartyFlags(cmd) return cmd } -func RunDescribe(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *DescribeOptions) error { +func RunDescribe(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *DescribeOptions, describerSettings *kubectl.DescriberSettings) error { selector := cmdutil.GetFlagString(cmd, "selector") cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { @@ -105,11 +110,11 @@ func RunDescribe(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []s return cmdutil.UsageError(cmd, "Required resource not specified.") } - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). SelectorParam(selector). ResourceTypeOrNameArgs(true, args...). Flatten(). @@ -123,7 +128,7 @@ func RunDescribe(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []s infos, err := r.Infos() if err != nil { if apierrors.IsNotFound(err) && len(args) == 2 { - return DescribeMatchingResources(mapper, typer, f, cmdNamespace, args[0], args[1], out, err) + return DescribeMatchingResources(mapper, typer, f, cmdNamespace, args[0], args[1], describerSettings, out, err) } allErrs = append(allErrs, err) } @@ -135,7 +140,7 @@ func RunDescribe(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []s allErrs = append(allErrs, err) continue } - s, err := describer.Describe(info.Namespace, info.Name) + s, err := describer.Describe(info.Namespace, info.Name, *describerSettings) if err != nil { allErrs = append(allErrs, err) continue @@ -146,7 +151,7 @@ func RunDescribe(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []s return utilerrors.NewAggregate(allErrs) } -func DescribeMatchingResources(mapper meta.RESTMapper, typer runtime.ObjectTyper, f *cmdutil.Factory, namespace, rsrc, prefix string, out io.Writer, originalError error) error { +func DescribeMatchingResources(mapper meta.RESTMapper, typer runtime.ObjectTyper, f *cmdutil.Factory, namespace, rsrc, prefix string, describerSettings *kubectl.DescriberSettings, out io.Writer, originalError error) error { r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). NamespaceParam(namespace).DefaultNamespace(). ResourceTypeOrNameArgs(true, rsrc). @@ -170,7 +175,7 @@ func DescribeMatchingResources(mapper meta.RESTMapper, typer runtime.ObjectTyper info := infos[ix] if strings.HasPrefix(info.Name, prefix) { isFound = true - s, err := describer.Describe(info.Namespace, info.Name) + s, err := describer.Describe(info.Namespace, info.Name, *describerSettings) if err != nil { return err } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/describe_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/describe_test.go new file mode 100644 index 000000000000..54f6ce5dbe00 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/describe_test.go @@ -0,0 +1,142 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "fmt" + "net/http" + "testing" + + "k8s.io/kubernetes/pkg/client/unversioned/fake" +) + +// Verifies that schemas that are not in the master tree of Kubernetes can be retrieved via Get. +func TestDescribeUnknownSchemaObject(t *testing.T) { + d := &testDescriber{Output: "test output"} + f, tf, codec := NewTestFactory() + tf.Describer = d + tf.Client = &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &internalType{Name: "foo"})}, + } + tf.Namespace = "non-default" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDescribe(f, buf) + cmd.Run(cmd, []string{"type", "foo"}) + + if d.Name != "foo" || d.Namespace != "non-default" { + t.Errorf("unexpected describer: %#v", d) + } + + if buf.String() != fmt.Sprintf("%s\n\n", d.Output) { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestDescribeObject(t *testing.T) { + _, _, rc := testData() + f, tf, codec := NewAPIFactory() + d := &testDescriber{Output: "test output"} + tf.Describer = d + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/redis-master" && m == "GET": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDescribe(f, buf) + cmd.Flags().Set("filename", "../../../examples/guestbook/legacy/redis-master-controller.yaml") + cmd.Run(cmd, []string{}) + + if d.Name != "redis-master" || d.Namespace != "test" { + t.Errorf("unexpected describer: %#v", d) + } + + if buf.String() != fmt.Sprintf("%s\n\n", d.Output) { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestDescribeListObjects(t *testing.T) { + pods, _, _ := testData() + f, tf, codec := NewAPIFactory() + d := &testDescriber{Output: "test output"} + tf.Describer = d + tf.Client = &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, + } + + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdDescribe(f, buf) + cmd.Run(cmd, []string{"pods"}) + if buf.String() != fmt.Sprintf("%s\n\n%s\n\n", d.Output, d.Output) { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestDescribeObjectShowEvents(t *testing.T) { + pods, _, _ := testData() + f, tf, codec := NewAPIFactory() + d := &testDescriber{Output: "test output"} + tf.Describer = d + tf.Client = &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, + } + + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdDescribe(f, buf) + cmd.Flags().Set("show-events", "true") + cmd.Run(cmd, []string{"pods"}) + if d.Settings.ShowEvents != true { + t.Errorf("ShowEvents = true expected, got ShowEvents = %v", d.Settings.ShowEvents) + } +} + +func TestDescribeObjectSkipEvents(t *testing.T) { + pods, _, _ := testData() + f, tf, codec := NewAPIFactory() + d := &testDescriber{Output: "test output"} + tf.Describer = d + tf.Client = &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, + } + + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdDescribe(f, buf) + cmd.Flags().Set("show-events", "false") + cmd.Run(cmd, []string{"pods"}) + if d.Settings.ShowEvents != false { + t.Errorf("ShowEvents = false expected, got ShowEvents = %v", d.Settings.ShowEvents) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/drain.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/drain.go index 8314c183e844..04ba88732d66 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/drain.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/drain.go @@ -60,7 +60,7 @@ kubectl cordon foo func NewCmdCordon(f *cmdutil.Factory, out io.Writer) *cobra.Command { options := &DrainOptions{factory: f, out: out} - return &cobra.Command{ + cmd := &cobra.Command{ Use: "cordon NODE", Short: "Mark node as unschedulable", Long: cordon_long, @@ -70,6 +70,7 @@ func NewCmdCordon(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmdutil.CheckErr(options.RunCordonOrUncordon(true)) }, } + return cmd } const ( @@ -83,7 +84,7 @@ $ kubectl uncordon foo func NewCmdUncordon(f *cmdutil.Factory, out io.Writer) *cobra.Command { options := &DrainOptions{factory: f, out: out} - return &cobra.Command{ + cmd := &cobra.Command{ Use: "uncordon NODE", Short: "Mark node as schedulable", Long: uncordon_long, @@ -93,6 +94,7 @@ func NewCmdUncordon(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmdutil.CheckErr(options.RunCordonOrUncordon(false)) }, } + return cmd } const ( @@ -103,17 +105,18 @@ Then drain deletes all pods except mirror pods (which cannot be deleted through the API server). If there are DaemonSet-managed pods, drain will not proceed without --ignore-daemonsets, and regardless it will not delete any DaemonSet-managed pods, because those pods would be immediately replaced by the -DaemonSet controller, which ignores unschedulable marknigs. If there are any +DaemonSet controller, which ignores unschedulable markings. If there are any pods that are neither mirror pods nor managed--by ReplicationController, -DaemonSet or Job--, then drain will not delete any pods unless you use --force. +ReplicaSet, DaemonSet or Job--, then drain will not delete any pods unless you +use --force. When you are ready to put the node back into service, use kubectl uncordon, which will make the node schedulable again. ` - drain_example = `# Drain node "foo", even if there are pods not managed by a ReplicationController, Job, or DaemonSet on it. + drain_example = `# Drain node "foo", even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet on it. $ kubectl drain foo --force -# As above, but abort if there are pods not managed by a ReplicationController, Job, or DaemonSet, and use a grace period of 15 minutes. +# As above, but abort if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet, and use a grace period of 15 minutes. $ kubectl drain foo --grace-period=900 ` ) @@ -131,7 +134,7 @@ func NewCmdDrain(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmdutil.CheckErr(options.RunDrain()) }, } - cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController, Job, or DaemonSet.") + cmd.Flags().BoolVar(&options.Force, "force", false, "Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, or DaemonSet.") cmd.Flags().BoolVar(&options.IgnoreDaemonsets, "ignore-daemonsets", false, "Ignore DaemonSet-managed pods.") cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.") return cmd @@ -149,14 +152,14 @@ func (o *DrainOptions) SetupDrain(cmd *cobra.Command, args []string) error { return err } - o.mapper, o.typer = o.factory.Object() + o.mapper, o.typer = o.factory.Object(false) cmdNamespace, _, err := o.factory.DefaultNamespace() if err != nil { return err } - r := o.factory.NewBuilder(). + r := o.factory.NewBuilder(cmdutil.GetIncludeThirdPartyAPIs(cmd)). NamespaceParam(cmdNamespace).DefaultNamespace(). ResourceNames("node", args[0]). Do() @@ -196,13 +199,51 @@ func (o *DrainOptions) RunDrain() error { // any unmanaged pods and the user didn't pass --force, we return that list in // an error. func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) { - pods := []api.Pod{} - podList, err := o.client.Pods(api.NamespaceAll).List(api.ListOptions{FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": o.nodeInfo.Name})}) + pods, unreplicatedPodNames, daemonSetPodNames, err := GetPodsForDeletionOnNodeDrain( + o.client, + o.nodeInfo.Name, + o.factory.Decoder(true), + o.Force, + o.IgnoreDaemonsets, + ) + if err != nil { + return []api.Pod{}, err + } + + daemonSetErrors := !o.IgnoreDaemonsets && len(daemonSetPodNames) > 0 + unreplicatedErrors := !o.Force && len(unreplicatedPodNames) > 0 + + switch { + case daemonSetErrors && unreplicatedErrors: + return []api.Pod{}, errors.New(unmanagedMsg(unreplicatedPodNames, daemonSetPodNames, true)) + case daemonSetErrors && !unreplicatedErrors: + return []api.Pod{}, errors.New(unmanagedMsg([]string{}, daemonSetPodNames, true)) + case unreplicatedErrors && !daemonSetErrors: + return []api.Pod{}, errors.New(unmanagedMsg(unreplicatedPodNames, []string{}, true)) + } + + if len(unreplicatedPodNames) > 0 { + fmt.Fprintf(o.out, "WARNING: About to delete these %s\n", unmanagedMsg(unreplicatedPodNames, []string{}, false)) + } + if len(daemonSetPodNames) > 0 { + fmt.Fprintf(o.out, "WARNING: Skipping %s\n", unmanagedMsg([]string{}, daemonSetPodNames, false)) + } + + return pods, nil +} + +// GetPodsForDeletionOnNodeDrain returns pods that should be deleted on node drain as well as some extra information +// about possibly problematic pods (unreplicated and deamon sets). +func GetPodsForDeletionOnNodeDrain(client *client.Client, nodename string, decoder runtime.Decoder, force bool, + ignoreDeamonSet bool) (pods []api.Pod, unreplicatedPodNames []string, daemonSetPodNames []string, finalError error) { + + pods = []api.Pod{} + unreplicatedPodNames = []string{} + daemonSetPodNames = []string{} + podList, err := client.Pods(api.NamespaceAll).List(api.ListOptions{FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodename})}) if err != nil { - return pods, err + return []api.Pod{}, []string{}, []string{}, err } - unreplicatedPodNames := []string{} - daemonSetPodNames := []string{} for _, pod := range podList.Items { _, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey] @@ -217,11 +258,11 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) { if found { // Now verify that the specified creator actually exists. var sr api.SerializedReference - if err := runtime.DecodeInto(o.factory.Decoder(true), []byte(creatorRef), &sr); err != nil { - return pods, err + if err := runtime.DecodeInto(decoder, []byte(creatorRef), &sr); err != nil { + return []api.Pod{}, []string{}, []string{}, err } if sr.Reference.Kind == "ReplicationController" { - rc, err := o.client.ReplicationControllers(sr.Reference.Namespace).Get(sr.Reference.Name) + rc, err := client.ReplicationControllers(sr.Reference.Namespace).Get(sr.Reference.Name) // Assume the only reason for an error is because the RC is // gone/missing, not for any other cause. TODO(mml): something more // sophisticated than this @@ -229,7 +270,7 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) { replicated = true } } else if sr.Reference.Kind == "DaemonSet" { - ds, err := o.client.DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name) + ds, err := client.DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name) // Assume the only reason for an error is because the DaemonSet is // gone/missing, not for any other cause. TODO(mml): something more @@ -242,7 +283,7 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) { daemonset_pod = true } } else if sr.Reference.Kind == "Job" { - job, err := o.client.ExtensionsClient.Jobs(sr.Reference.Namespace).Get(sr.Reference.Name) + job, err := client.ExtensionsClient.Jobs(sr.Reference.Namespace).Get(sr.Reference.Name) // Assume the only reason for an error is because the Job is // gone/missing, not for any other cause. TODO(mml): something more @@ -250,6 +291,15 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) { if err == nil && job != nil { replicated = true } + } else if sr.Reference.Kind == "ReplicaSet" { + rs, err := client.ExtensionsClient.ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name) + + // Assume the only reason for an error is because the RS is + // gone/missing, not for any other cause. TODO(mml): something more + // sophisticated than this + if err == nil && rs != nil { + replicated = true + } } } @@ -258,41 +308,21 @@ func (o *DrainOptions) getPodsForDeletion() ([]api.Pod, error) { daemonSetPodNames = append(daemonSetPodNames, pod.Name) case !replicated: unreplicatedPodNames = append(unreplicatedPodNames, pod.Name) - if o.Force { + if force { pods = append(pods, pod) } default: pods = append(pods, pod) } } - - daemonSetErrors := !o.IgnoreDaemonsets && len(daemonSetPodNames) > 0 - unreplicatedErrors := !o.Force && len(unreplicatedPodNames) > 0 - - switch { - case daemonSetErrors && unreplicatedErrors: - return []api.Pod{}, errors.New(unmanagedMsg(unreplicatedPodNames, daemonSetPodNames, true)) - case daemonSetErrors && !unreplicatedErrors: - return []api.Pod{}, errors.New(unmanagedMsg([]string{}, daemonSetPodNames, true)) - case unreplicatedErrors && !daemonSetErrors: - return []api.Pod{}, errors.New(unmanagedMsg(unreplicatedPodNames, []string{}, true)) - } - - if len(unreplicatedPodNames) > 0 { - fmt.Fprintf(o.out, "WARNING: About to delete these %s\n", unmanagedMsg(unreplicatedPodNames, []string{}, false)) - } - if len(daemonSetPodNames) > 0 { - fmt.Fprintf(o.out, "WARNING: Skipping %s\n", unmanagedMsg([]string{}, daemonSetPodNames, false)) - } - - return pods, nil + return pods, unreplicatedPodNames, daemonSetPodNames, nil } // Helper for generating errors or warnings about unmanaged pods. func unmanagedMsg(unreplicatedNames []string, daemonSetNames []string, include_guidance bool) string { msgs := []string{} if len(unreplicatedNames) > 0 { - msg := fmt.Sprintf("pods not managed by ReplicationController, Job, or DaemonSet: %s", strings.Join(unreplicatedNames, ",")) + msg := fmt.Sprintf("pods not managed by ReplicationController, ReplicaSet, Job, or DaemonSet: %s", strings.Join(unreplicatedNames, ",")) if include_guidance { msg += " (use --force to override)" } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/drain_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/drain_test.go new file mode 100644 index 000000000000..eb79b0b262e4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/drain_test.go @@ -0,0 +1,536 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "reflect" + "strings" + "testing" + "time" + + "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/conversion" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/runtime" +) + +var node *api.Node +var cordoned_node *api.Node + +func TestMain(m *testing.M) { + // Create a node. + node = &api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "node", + CreationTimestamp: unversioned.Time{Time: time.Now()}, + }, + Spec: api.NodeSpec{ + ExternalID: "node", + }, + Status: api.NodeStatus{}, + } + clone, _ := conversion.NewCloner().DeepCopy(node) + + // A copy of the same node, but cordoned. + cordoned_node = clone.(*api.Node) + cordoned_node.Spec.Unschedulable = true + os.Exit(m.Run()) +} + +func TestCordon(t *testing.T) { + tests := []struct { + description string + node *api.Node + expected *api.Node + cmd func(*cmdutil.Factory, io.Writer) *cobra.Command + arg string + expectFatal bool + }{ + { + description: "node/node syntax", + node: cordoned_node, + expected: node, + cmd: NewCmdUncordon, + arg: "node/node", + expectFatal: false, + }, + { + description: "uncordon for real", + node: cordoned_node, + expected: node, + cmd: NewCmdUncordon, + arg: "node", + expectFatal: false, + }, + { + description: "uncordon does nothing", + node: node, + expected: node, + cmd: NewCmdUncordon, + arg: "node", + expectFatal: false, + }, + { + description: "cordon does nothing", + node: cordoned_node, + expected: cordoned_node, + cmd: NewCmdCordon, + arg: "node", + expectFatal: false, + }, + { + description: "cordon for real", + node: node, + expected: cordoned_node, + cmd: NewCmdCordon, + arg: "node", + expectFatal: false, + }, + { + description: "cordon missing node", + node: node, + expected: node, + cmd: NewCmdCordon, + arg: "bar", + expectFatal: true, + }, + { + description: "uncordon missing node", + node: node, + expected: node, + cmd: NewCmdUncordon, + arg: "bar", + expectFatal: true, + }, + } + + for _, test := range tests { + f, tf, codec := NewAPIFactory() + new_node := &api.Node{} + updated := false + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + m := &MyReq{req} + switch { + case m.isFor("GET", "/nodes/node"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, test.node)}, nil + case m.isFor("GET", "/nodes/bar"): + return &http.Response{StatusCode: 404, Header: defaultHeader(), Body: stringBody("nope")}, nil + case m.isFor("PUT", "/nodes/node"): + data, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatalf("%s: unexpected error: %v", test.description, err) + } + defer req.Body.Close() + if err := runtime.DecodeInto(codec, data, new_node); err != nil { + t.Fatalf("%s: unexpected error: %v", test.description, err) + } + if !reflect.DeepEqual(test.expected.Spec, new_node.Spec) { + t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, test.expected.Spec, new_node.Spec) + } + updated = true + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, new_node)}, nil + default: + t.Fatalf("%s: unexpected request: %v %#v\n%#v", test.description, req.Method, req.URL, req) + return nil, nil + } + }), + } + tf.ClientConfig = defaultClientConfig() + + buf := bytes.NewBuffer([]byte{}) + cmd := test.cmd(f, buf) + + saw_fatal := false + func() { + defer func() { + // Recover from the panic below. + _ = recover() + // Restore cmdutil behavior + cmdutil.DefaultBehaviorOnFatal() + }() + cmdutil.BehaviorOnFatal(func(e string) { saw_fatal = true; panic(e) }) + cmd.SetArgs([]string{test.arg}) + cmd.Execute() + }() + + if test.expectFatal { + if !saw_fatal { + t.Fatalf("%s: unexpected non-error", test.description) + } + if updated { + t.Fatalf("%s: unexpcted update", test.description) + } + } + + if !test.expectFatal && saw_fatal { + t.Fatalf("%s: unexpected error", test.description) + } + if !reflect.DeepEqual(test.expected.Spec, test.node.Spec) && !updated { + t.Fatalf("%s: node never updated", test.description) + } + } +} + +func TestDrain(t *testing.T) { + labels := make(map[string]string) + labels["my_key"] = "my_value" + + rc := api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "rc", + Namespace: "default", + CreationTimestamp: unversioned.Time{Time: time.Now()}, + Labels: labels, + SelfLink: testapi.Default.SelfLink("replicationcontrollers", "rc"), + }, + Spec: api.ReplicationControllerSpec{ + Selector: labels, + }, + } + + rc_anno := make(map[string]string) + rc_anno[controller.CreatedByAnnotation] = refJson(t, &rc) + + rc_pod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "default", + CreationTimestamp: unversioned.Time{Time: time.Now()}, + Labels: labels, + Annotations: rc_anno, + }, + Spec: api.PodSpec{ + NodeName: "node", + }, + } + + ds := extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{ + Name: "ds", + Namespace: "default", + CreationTimestamp: unversioned.Time{Time: time.Now()}, + SelfLink: "/apis/extensions/v1beta1/namespaces/default/daemonsets/ds", + }, + Spec: extensions.DaemonSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: labels}, + }, + } + + ds_anno := make(map[string]string) + ds_anno[controller.CreatedByAnnotation] = refJson(t, &ds) + + ds_pod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "default", + CreationTimestamp: unversioned.Time{Time: time.Now()}, + Labels: labels, + Annotations: ds_anno, + }, + Spec: api.PodSpec{ + NodeName: "node", + }, + } + + job := batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "job", + Namespace: "default", + CreationTimestamp: unversioned.Time{Time: time.Now()}, + SelfLink: "/apis/extensions/v1beta1/namespaces/default/jobs/job", + }, + Spec: batch.JobSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: labels}, + }, + } + + job_pod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "default", + CreationTimestamp: unversioned.Time{Time: time.Now()}, + Labels: labels, + Annotations: map[string]string{controller.CreatedByAnnotation: refJson(t, &job)}, + }, + } + + rs := extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{ + Name: "rs", + Namespace: "default", + CreationTimestamp: unversioned.Time{Time: time.Now()}, + Labels: labels, + SelfLink: testapi.Default.SelfLink("replicasets", "rs"), + }, + Spec: extensions.ReplicaSetSpec{ + Selector: &unversioned.LabelSelector{MatchLabels: labels}, + }, + } + + rs_anno := make(map[string]string) + rs_anno[controller.CreatedByAnnotation] = refJson(t, &rs) + + rs_pod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "default", + CreationTimestamp: unversioned.Time{Time: time.Now()}, + Labels: labels, + Annotations: rs_anno, + }, + Spec: api.PodSpec{ + NodeName: "node", + }, + } + + naked_pod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "default", + CreationTimestamp: unversioned.Time{Time: time.Now()}, + Labels: labels, + }, + Spec: api.PodSpec{ + NodeName: "node", + }, + } + + tests := []struct { + description string + node *api.Node + expected *api.Node + pods []api.Pod + rcs []api.ReplicationController + replicaSets []extensions.ReplicaSet + args []string + expectFatal bool + expectDelete bool + }{ + { + description: "RC-managed pod", + node: node, + expected: cordoned_node, + pods: []api.Pod{rc_pod}, + rcs: []api.ReplicationController{rc}, + args: []string{"node"}, + expectFatal: false, + expectDelete: true, + }, + { + description: "DS-managed pod", + node: node, + expected: cordoned_node, + pods: []api.Pod{ds_pod}, + rcs: []api.ReplicationController{rc}, + args: []string{"node"}, + expectFatal: true, + expectDelete: false, + }, + { + description: "DS-managed pod with --ignore-daemonsets", + node: node, + expected: cordoned_node, + pods: []api.Pod{ds_pod}, + rcs: []api.ReplicationController{rc}, + args: []string{"node", "--ignore-daemonsets"}, + expectFatal: false, + expectDelete: false, + }, + { + description: "Job-managed pod", + node: node, + expected: cordoned_node, + pods: []api.Pod{job_pod}, + rcs: []api.ReplicationController{rc}, + args: []string{"node"}, + expectFatal: false, + expectDelete: true, + }, + { + description: "RS-managed pod", + node: node, + expected: cordoned_node, + pods: []api.Pod{rs_pod}, + replicaSets: []extensions.ReplicaSet{rs}, + args: []string{"node"}, + expectFatal: false, + expectDelete: true, + }, + { + description: "naked pod", + node: node, + expected: cordoned_node, + pods: []api.Pod{naked_pod}, + rcs: []api.ReplicationController{}, + args: []string{"node"}, + expectFatal: true, + expectDelete: false, + }, + { + description: "naked pod with --force", + node: node, + expected: cordoned_node, + pods: []api.Pod{naked_pod}, + rcs: []api.ReplicationController{}, + args: []string{"node", "--force"}, + expectFatal: false, + expectDelete: true, + }, + { + description: "empty node", + node: node, + expected: cordoned_node, + pods: []api.Pod{}, + rcs: []api.ReplicationController{rc}, + args: []string{"node"}, + expectFatal: false, + expectDelete: false, + }, + } + + for _, test := range tests { + new_node := &api.Node{} + deleted := false + f, tf, codec := NewAPIFactory() + + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + m := &MyReq{req} + switch { + case m.isFor("GET", "/nodes/node"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, test.node)}, nil + case m.isFor("GET", "/namespaces/default/replicationcontrollers/rc"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &test.rcs[0])}, nil + case m.isFor("GET", "/namespaces/default/daemonsets/ds"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(testapi.Extensions.Codec(), &ds)}, nil + case m.isFor("GET", "/namespaces/default/jobs/job"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(testapi.Extensions.Codec(), &job)}, nil + case m.isFor("GET", "/namespaces/default/replicasets/rs"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(testapi.Extensions.Codec(), &test.replicaSets[0])}, nil + case m.isFor("GET", "/pods"): + values, err := url.ParseQuery(req.URL.RawQuery) + if err != nil { + t.Fatalf("%s: unexpected error: %v", test.description, err) + } + get_params := make(url.Values) + get_params["fieldSelector"] = []string{"spec.nodeName=node"} + if !reflect.DeepEqual(get_params, values) { + t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, get_params, values) + } + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &api.PodList{Items: test.pods})}, nil + case m.isFor("GET", "/replicationcontrollers"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &api.ReplicationControllerList{Items: test.rcs})}, nil + case m.isFor("PUT", "/nodes/node"): + data, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatalf("%s: unexpected error: %v", test.description, err) + } + defer req.Body.Close() + if err := runtime.DecodeInto(codec, data, new_node); err != nil { + t.Fatalf("%s: unexpected error: %v", test.description, err) + } + if !reflect.DeepEqual(test.expected.Spec, new_node.Spec) { + t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, test.expected.Spec, new_node.Spec) + } + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, new_node)}, nil + case m.isFor("DELETE", "/namespaces/default/pods/bar"): + deleted = true + return &http.Response{StatusCode: 204, Header: defaultHeader(), Body: objBody(codec, &test.pods[0])}, nil + default: + t.Fatalf("%s: unexpected request: %v %#v\n%#v", test.description, req.Method, req.URL, req) + return nil, nil + } + }), + } + tf.ClientConfig = defaultClientConfig() + + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdDrain(f, buf) + + saw_fatal := false + func() { + defer func() { + // Recover from the panic below. + _ = recover() + // Restore cmdutil behavior + cmdutil.DefaultBehaviorOnFatal() + }() + cmdutil.BehaviorOnFatal(func(e string) { saw_fatal = true; panic(e) }) + cmd.SetArgs(test.args) + cmd.Execute() + }() + + if test.expectFatal { + if !saw_fatal { + t.Fatalf("%s: unexpected non-error", test.description) + } + } + + if test.expectDelete { + if !deleted { + t.Fatalf("%s: pod never deleted", test.description) + } + } + if !test.expectDelete { + if deleted { + t.Fatalf("%s: unexpected delete", test.description) + } + } + } +} + +type MyReq struct { + Request *http.Request +} + +func (m *MyReq) isFor(method string, path string) bool { + req := m.Request + + return method == req.Method && (req.URL.Path == path || req.URL.Path == strings.Join([]string{"/api/v1", path}, "") || req.URL.Path == strings.Join([]string{"/apis/extensions/v1beta1", path}, "")) +} + +func refJson(t *testing.T, o runtime.Object) string { + ref, err := api.GetReference(o) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + _, _, codec := NewAPIFactory() + json, err := runtime.Encode(codec, &api.SerializedReference{Reference: *ref}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + return string(json) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/edit.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/edit.go index f43a662c76a1..bd5098b8747f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/edit.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/edit.go @@ -22,12 +22,14 @@ import ( "fmt" "io" "os" - "path" + "path/filepath" + "reflect" gruntime "runtime" "strings" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -35,7 +37,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/util/jsonmerge" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/crlf" "k8s.io/kubernetes/pkg/util/strategicpatch" "k8s.io/kubernetes/pkg/util/yaml" @@ -74,34 +76,55 @@ saved copy to include the latest resource version.` kubectl edit svc/docker-registry --output-version=v1 -o json` ) +// EditOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of +// referencing the cmd.Flags() +type EditOptions struct { + Filenames []string + Recursive bool +} + var errExit = fmt.Errorf("exit directly") -func NewCmdEdit(f *cmdutil.Factory, out io.Writer) *cobra.Command { - filenames := []string{} +func NewCmdEdit(f *cmdutil.Factory, out, errOut io.Writer) *cobra.Command { + options := &EditOptions{} + + // retrieve a list of handled resources from printer as valid args + validArgs, argAliases := []string{}, []string{} + p, err := f.Printer(nil, false, false, false, false, false, false, []string{}) + cmdutil.CheckErr(err) + if p != nil { + validArgs = p.HandledResources() + argAliases = kubectl.ResourceAliases(validArgs) + } + cmd := &cobra.Command{ Use: "edit (RESOURCE/NAME | -f FILENAME)", Short: "Edit a resource on the server", Long: editLong, Example: fmt.Sprintf(editExample), Run: func(cmd *cobra.Command, args []string) { - err := RunEdit(f, out, cmd, args, filenames) + err := RunEdit(f, out, errOut, cmd, args, options) if err == errExit { os.Exit(1) } cmdutil.CheckErr(err) }, + ValidArgs: validArgs, + ArgAliases: argAliases, } usage := "Filename, directory, or URL to file to use to edit the resource" - kubectl.AddJsonFilenameFlag(cmd, &filenames, usage) + kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) cmd.Flags().StringP("output", "o", "yaml", "Output format. One of: yaml|json.") cmd.Flags().String("output-version", "", "Output the formatted object with the given group version (for ex: 'extensions/v1beta1').") cmd.Flags().Bool("windows-line-endings", gruntime.GOOS == "windows", "Use Windows line-endings (default Unix line-endings)") cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddRecordFlag(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) return cmd } -func RunEdit(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, filenames []string) error { +func RunEdit(f *cmdutil.Factory, out, errOut io.Writer, cmd *cobra.Command, args []string, options *EditOptions) error { var printer kubectl.ResourcePrinter var ext string switch format := cmdutil.GetFlagString(cmd, "output"); format { @@ -120,20 +143,26 @@ func RunEdit(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []strin return err } - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) resourceMapper := &resource.Mapper{ ObjectTyper: typer, RESTMapper: mapper, ClientMapper: resource.ClientMapperFunc(f.ClientForMapping), - Decoder: f.Decoder(true), + + // NB: we use `f.Decoder(false)` to get a plain deserializer for + // the resourceMapper, since it's used to read in edits and + // we don't want to convert into the internal version when + // reading in edits (this would cause us to potentially try to + // compare two different GroupVersions). + Decoder: f.Decoder(false), } r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). ResourceTypeOrNameArgs(true, args...). - Latest(). Flatten(). + Latest(). Do() err = r.Err() if err != nil { @@ -155,7 +184,7 @@ func RunEdit(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []strin if err != nil { return err } - objs, err := resource.AsVersionedObjects(infos, defaultVersion.String(), encoder) + originalObj, err := resource.AsVersionedObject(infos, false, defaultVersion, encoder) if err != nil { return err } @@ -169,126 +198,189 @@ func RunEdit(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []strin file string ) -outter: - for i := range objs { - obj := objs[i] - // some bookkeeping - results.header.flush() - containsError := false - - for { - // generate the file to edit - buf := &bytes.Buffer{} - var w io.Writer = buf - if windowsLineEndings { - w = util.NewCRLFWriter(w) + containsError := false + + for { + // infos mutates over time to be the list of things we've tried and failed to edit + // this means that our overall list changes over time. + objToEdit, err := resource.AsVersionedObject(infos, false, defaultVersion, encoder) + if err != nil { + return err + } + + // generate the file to edit + buf := &bytes.Buffer{} + var w io.Writer = buf + if windowsLineEndings { + w = crlf.NewCRLFWriter(w) + } + if err := results.header.writeTo(w); err != nil { + return preservedFile(err, results.file, errOut) + } + if !containsError { + if err := printer.PrintObj(objToEdit, w); err != nil { + return preservedFile(err, results.file, errOut) } - if err := results.header.writeTo(w); err != nil { - return preservedFile(err, results.file, out) + original = buf.Bytes() + } else { + // In case of an error, preserve the edited file. + // Remove the comments (header) from it since we already + // have included the latest header in the buffer above. + buf.Write(manualStrip(edited)) + } + + // launch the editor + editedDiff := edited + edited, file, err = edit.LaunchTempFile(fmt.Sprintf("%s-edit-", filepath.Base(os.Args[0])), ext, buf) + if err != nil { + return preservedFile(err, results.file, errOut) + } + if bytes.Equal(stripComments(editedDiff), stripComments(edited)) { + // Ugly hack right here. We will hit this either (1) when we try to + // save the same changes we tried to save in the previous iteration + // which means our changes are invalid or (2) when we exit the second + // time. The second case is more usual so we can probably live with it. + // TODO: A less hacky fix would be welcome :) + fmt.Fprintln(errOut, "Edit cancelled, no valid changes were saved.") + return nil + } + + // cleanup any file from the previous pass + if len(results.file) > 0 { + os.Remove(results.file) + } + glog.V(4).Infof("User edited:\n%s", string(edited)) + + // Compare content without comments + if bytes.Equal(stripComments(original), stripComments(edited)) { + os.Remove(file) + fmt.Fprintln(errOut, "Edit cancelled, no changes made.") + return nil + } + lines, err := hasLines(bytes.NewBuffer(edited)) + if err != nil { + return preservedFile(err, file, errOut) + } + if !lines { + os.Remove(file) + fmt.Fprintln(errOut, "Edit cancelled, saved file was empty.") + return nil + } + + results = editResults{ + file: file, + } + + // parse the edited file + updates, err := resourceMapper.InfoForData(edited, "edited-file") + if err != nil { + // syntax error + containsError = true + results.header.reasons = append(results.header.reasons, editReason{head: fmt.Sprintf("The edited file had a syntax error: %v", err)}) + continue + } + // not a syntax error as it turns out... + containsError = false + + namespaceVisitor := resource.NewFlattenListVisitor(updates, resourceMapper) + // need to make sure the original namespace wasn't changed while editing + if err = namespaceVisitor.Visit(resource.RequireNamespace(cmdNamespace)); err != nil { + return preservedFile(err, file, errOut) + } + + mutatedObjects := []runtime.Object{} + annotationVisitor := resource.NewFlattenListVisitor(updates, resourceMapper) + // iterate through all items to apply annotations + if err = annotationVisitor.Visit(func(info *resource.Info, incomingErr error) error { + // put configuration annotation in "updates" + if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), info, encoder); err != nil { + return err } - if !containsError { - if err := printer.PrintObj(obj, w); err != nil { - return preservedFile(err, results.file, out) + if cmdutil.ShouldRecord(cmd, info) { + if err := cmdutil.RecordChangeCause(info.Object, f.Command()); err != nil { + return err } - original = buf.Bytes() - } else { - // In case of an error, preserve the edited file. - // Remove the comments (header) from it since we already - // have included the latest header in the buffer above. - buf.Write(manualStrip(edited)) } + mutatedObjects = append(mutatedObjects, info.Object) - // launch the editor - editedDiff := edited - edited, file, err = edit.LaunchTempFile(fmt.Sprintf("%s-edit-", path.Base(os.Args[0])), ext, buf) - if err != nil { - return preservedFile(err, results.file, out) - } - if bytes.Equal(stripComments(editedDiff), stripComments(edited)) { - // Ugly hack right here. We will hit this either (1) when we try to - // save the same changes we tried to save in the previous iteration - // which means our changes are invalid or (2) when we exit the second - // time. The second case is more usual so we can probably live with it. - // TODO: A less hacky fix would be welcome :) - fmt.Fprintln(out, "Edit cancelled, no valid changes were saved.") - continue outter - } + return nil - // cleanup any file from the previous pass - if len(results.file) > 0 { - os.Remove(results.file) - } - glog.V(4).Infof("User edited:\n%s", string(edited)) + }); err != nil { + return preservedFile(err, file, errOut) + } - // Compare content without comments - if bytes.Equal(stripComments(original), stripComments(edited)) { - os.Remove(file) - fmt.Fprintln(out, "Edit cancelled, no changes made.") - continue outter - } - lines, err := hasLines(bytes.NewBuffer(edited)) - if err != nil { - return preservedFile(err, file, out) - } - if !lines { - os.Remove(file) - fmt.Fprintln(out, "Edit cancelled, saved file was empty.") - continue outter - } + // if we mutated a list in the visitor, persist the changes on the overall object + if meta.IsListType(updates.Object) { + meta.SetList(updates.Object, mutatedObjects) + } - results = editResults{ - file: file, - } + patchVisitor := resource.NewFlattenListVisitor(updates, resourceMapper) + err = patchVisitor.Visit(func(info *resource.Info, incomingErr error) error { + currOriginalObj := originalObj - // parse the edited file - updates, err := resourceMapper.InfoForData(edited, "edited-file") - if err != nil { - // syntax error - containsError = true - results.header.reasons = append(results.header.reasons, editReason{head: fmt.Sprintf("The edited file had a syntax error: %v", err)}) - continue - } - // not a syntax error as it turns out... - containsError = false + // if we're editing a list, then navigate the list to find the item that we're currently trying to edit + if meta.IsListType(originalObj) { + currOriginalObj = nil + editObjUID, err := meta.NewAccessor().UID(info.Object) + if err != nil { + return err + } - // put configuration annotation in "updates" - if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), updates, encoder); err != nil { - return preservedFile(err, file, out) - } - if cmdutil.ShouldRecord(cmd, updates) { - err = cmdutil.RecordChangeCause(updates.Object, f.Command()) + listItems, err := meta.ExtractList(originalObj) if err != nil { return err } - } - editedCopy := edited - if editedCopy, err = runtime.Encode(encoder, updates.Object); err != nil { - return preservedFile(err, file, out) - } - visitor := resource.NewFlattenListVisitor(updates, resourceMapper) + // iterate through the list to find the item with the matching UID + for i := range listItems { + originalObjUID, err := meta.NewAccessor().UID(listItems[i]) + if err != nil { + return err + } + if editObjUID == originalObjUID { + currOriginalObj = listItems[i] + break + } + } + if currOriginalObj == nil { + return fmt.Errorf("no original object found for %#v", info.Object) + } + + } - // need to make sure the original namespace wasn't changed while editing - if err = visitor.Visit(resource.RequireNamespace(cmdNamespace)); err != nil { - return preservedFile(err, file, out) + originalSerialization, err := runtime.Encode(encoder, currOriginalObj) + if err != nil { + return err + } + editedSerialization, err := runtime.Encode(encoder, info.Object) + if err != nil { + return err } + // compute the patch on a per-item basis // use strategic merge to create a patch - originalJS, err := yaml.ToJSON(original) + originalJS, err := yaml.ToJSON(originalSerialization) if err != nil { - return preservedFile(err, file, out) + return err } - editedJS, err := yaml.ToJSON(editedCopy) + editedJS, err := yaml.ToJSON(editedSerialization) if err != nil { - return preservedFile(err, file, out) + return err + } + + if reflect.DeepEqual(originalJS, editedJS) { + // no edit, so just skip it. + cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "skipped") + return nil } - patch, err := strategicpatch.CreateStrategicMergePatch(originalJS, editedJS, obj) + + patch, err := strategicpatch.CreateStrategicMergePatch(originalJS, editedJS, currOriginalObj) // TODO: change all jsonmerge to strategicpatch // for checking preconditions preconditions := []jsonmerge.PreconditionFunc{} if err != nil { glog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) - return preservedFile(err, file, out) + return err } else { preconditions = append(preconditions, jsonmerge.RequireKeyUnchanged("apiVersion")) preconditions = append(preconditions, jsonmerge.RequireKeyUnchanged("kind")) @@ -297,42 +389,49 @@ outter: } if hold, msg := jsonmerge.TestPreconditionsHold(patch, preconditions); !hold { - fmt.Fprintf(out, "error: %s", msg) - return preservedFile(nil, file, out) + fmt.Fprintf(errOut, "error: %s", msg) + return preservedFile(nil, file, errOut) } - err = visitor.Visit(func(info *resource.Info, err error) error { - patched, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, api.StrategicMergePatchType, patch) - if err != nil { - glog.V(4).Infof(results.addError(err, info)) - return err - } - info.Refresh(patched, true) - cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "edited") + patched, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, api.StrategicMergePatchType, patch) + if err != nil { + fmt.Fprintln(out, results.addError(err, info)) return nil - }) - if err == nil { - os.Remove(file) - continue outter - } - // Handle all possible errors - // - // 1. retryable: propose kubectl replace -f - // 2. notfound: indicate the location of the saved configuration of the deleted resource - // 3. invalid: retry those on the spot by looping ie. reloading the editor - if results.retryable > 0 { - fmt.Fprintf(out, "You can run `%s replace -f %s` to try this update again.\n", os.Args[0], file) - continue outter } - if results.notfound > 0 { + info.Refresh(patched, true) + cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "edited") + return nil + }) + if err != nil { + return preservedFile(err, results.file, errOut) + } + + // Handle all possible errors + // + // 1. retryable: propose kubectl replace -f + // 2. notfound: indicate the location of the saved configuration of the deleted resource + // 3. invalid: retry those on the spot by looping ie. reloading the editor + if results.retryable > 0 { + fmt.Fprintf(errOut, "You can run `%s replace -f %s` to try this update again.\n", filepath.Base(os.Args[0]), file) + return errExit + } + if results.notfound > 0 { + fmt.Fprintf(errOut, "The edits you made on deleted resources have been saved to %q\n", file) + return errExit + } + + if len(results.edit) == 0 { + if results.notfound == 0 { + os.Remove(file) + } else { fmt.Fprintf(out, "The edits you made on deleted resources have been saved to %q\n", file) - continue outter } - // validation error - containsError = true + return nil } + + // loop again and edit the remaining items + infos = results.edit } - return nil } // editReason preserves a message about the reason this file must be edited again @@ -397,13 +496,13 @@ func (r *editResults) addError(err error, info *resource.Info) string { } } r.header.reasons = append(r.header.reasons, reason) - return fmt.Sprintf("Error: %s %q is invalid", info.Mapping.Resource, info.Name) + return fmt.Sprintf("error: %s %q is invalid", info.Mapping.Resource, info.Name) case errors.IsNotFound(err): r.notfound++ - return fmt.Sprintf("Error: %s %q could not be found on the server", info.Mapping.Resource, info.Name) + return fmt.Sprintf("error: %s %q could not be found on the server", info.Mapping.Resource, info.Name) default: r.retryable++ - return fmt.Sprintf("Error: %s %q could not be patched: %v", info.Mapping.Resource, info.Name, err) + return fmt.Sprintf("error: %s %q could not be patched: %v", info.Mapping.Resource, info.Name, err) } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/exec.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/exec.go index ca981b44e5da..e59925873535 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/exec.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/exec.go @@ -32,6 +32,7 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/client/unversioned/remotecommand" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" ) const ( @@ -87,7 +88,7 @@ func (*DefaultRemoteExecutor) Execute(method string, url *url.URL, config *restc if err != nil { return err } - return exec.Stream(stdin, stdout, stderr, tty) + return exec.Stream(remotecommandserver.SupportedStreamingProtocols, stdin, stdout, stderr, tty) } // ExecOptions declare the arguments accepted by the Exec command diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/exec_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/exec_test.go new file mode 100644 index 000000000000..9b77322a3053 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/exec_test.go @@ -0,0 +1,258 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "testing" + + "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/unversioned/fake" +) + +type fakeRemoteExecutor struct { + method string + url *url.URL + execErr error +} + +func (f *fakeRemoteExecutor) Execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error { + f.method = method + f.url = url + return f.execErr +} + +func TestPodAndContainer(t *testing.T) { + tests := []struct { + args []string + argsLenAtDash int + p *ExecOptions + name string + expectError bool + expectedPod string + expectedContainer string + expectedArgs []string + }{ + { + p: &ExecOptions{}, + argsLenAtDash: -1, + expectError: true, + name: "empty", + }, + { + p: &ExecOptions{PodName: "foo"}, + argsLenAtDash: -1, + expectError: true, + name: "no cmd", + }, + { + p: &ExecOptions{PodName: "foo", ContainerName: "bar"}, + argsLenAtDash: -1, + expectError: true, + name: "no cmd, w/ container", + }, + { + p: &ExecOptions{PodName: "foo"}, + args: []string{"cmd"}, + argsLenAtDash: -1, + expectedPod: "foo", + expectedArgs: []string{"cmd"}, + name: "pod in flags", + }, + { + p: &ExecOptions{}, + args: []string{"foo", "cmd"}, + argsLenAtDash: 0, + expectError: true, + name: "no pod, pod name is behind dash", + }, + { + p: &ExecOptions{}, + args: []string{"foo"}, + argsLenAtDash: -1, + expectError: true, + name: "no cmd, w/o flags", + }, + { + p: &ExecOptions{}, + args: []string{"foo", "cmd"}, + argsLenAtDash: -1, + expectedPod: "foo", + expectedArgs: []string{"cmd"}, + name: "cmd, w/o flags", + }, + { + p: &ExecOptions{}, + args: []string{"foo", "cmd"}, + argsLenAtDash: 1, + expectedPod: "foo", + expectedArgs: []string{"cmd"}, + name: "cmd, cmd is behind dash", + }, + { + p: &ExecOptions{ContainerName: "bar"}, + args: []string{"foo", "cmd"}, + argsLenAtDash: -1, + expectedPod: "foo", + expectedContainer: "bar", + expectedArgs: []string{"cmd"}, + name: "cmd, container in flag", + }, + } + for _, test := range tests { + f, tf, codec := NewAPIFactory() + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { return nil, nil }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{} + + cmd := &cobra.Command{} + options := test.p + err := options.Complete(f, cmd, test.args, test.argsLenAtDash) + if test.expectError && err == nil { + t.Errorf("unexpected non-error (%s)", test.name) + } + if !test.expectError && err != nil { + t.Errorf("unexpected error: %v (%s)", err, test.name) + } + if err != nil { + continue + } + if options.PodName != test.expectedPod { + t.Errorf("expected: %s, got: %s (%s)", test.expectedPod, options.PodName, test.name) + } + if options.ContainerName != test.expectedContainer { + t.Errorf("expected: %s, got: %s (%s)", test.expectedContainer, options.ContainerName, test.name) + } + if !reflect.DeepEqual(test.expectedArgs, options.Command) { + t.Errorf("expected: %v, got %v (%s)", test.expectedArgs, options.Command, test.name) + } + } +} + +func TestExec(t *testing.T) { + version := testapi.Default.GroupVersion().Version + tests := []struct { + name, version, podPath, execPath, container string + pod *api.Pod + execErr bool + }{ + { + name: "pod exec", + version: version, + podPath: "/api/" + version + "/namespaces/test/pods/foo", + execPath: "/api/" + version + "/namespaces/test/pods/foo/exec", + pod: execPod(), + }, + { + name: "pod exec error", + version: version, + podPath: "/api/" + version + "/namespaces/test/pods/foo", + execPath: "/api/" + version + "/namespaces/test/pods/foo/exec", + pod: execPod(), + execErr: true, + }, + } + for _, test := range tests { + f, tf, codec := NewAPIFactory() + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == test.podPath && m == "GET": + body := objBody(codec, test.pod) + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: body}, nil + default: + // Ensures no GET is performed when deleting by name + t.Errorf("%s: unexpected request: %s %#v\n%#v", test.name, req.Method, req.URL, req) + return nil, fmt.Errorf("unexpected request") + } + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &unversioned.GroupVersion{Version: test.version}}} + bufOut := bytes.NewBuffer([]byte{}) + bufErr := bytes.NewBuffer([]byte{}) + bufIn := bytes.NewBuffer([]byte{}) + ex := &fakeRemoteExecutor{} + if test.execErr { + ex.execErr = fmt.Errorf("exec error") + } + params := &ExecOptions{ + PodName: "foo", + ContainerName: "bar", + In: bufIn, + Out: bufOut, + Err: bufErr, + Executor: ex, + } + cmd := &cobra.Command{} + args := []string{"test", "command"} + if err := params.Complete(f, cmd, args, -1); err != nil { + t.Fatal(err) + } + err := params.Run() + if test.execErr && err != ex.execErr { + t.Errorf("%s: Unexpected exec error: %v", test.name, err) + continue + } + if !test.execErr && err != nil { + t.Errorf("%s: Unexpected error: %v", test.name, err) + continue + } + if test.execErr { + continue + } + if ex.url.Path != test.execPath { + t.Errorf("%s: Did not get expected path for exec request", test.name) + continue + } + if ex.method != "POST" { + t.Errorf("%s: Did not get method for exec request: %s", test.name, ex.method) + } + } +} + +func execPod() *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test", ResourceVersion: "10"}, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{ + { + Name: "bar", + }, + }, + }, + Status: api.PodStatus{ + Phase: api.PodRunning, + }, + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/explain.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/explain.go index 2fe12fc12da7..ce959e394083 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/explain.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/explain.go @@ -36,11 +36,7 @@ kubectl explain pods.spec.containers` explainLong = `Documentation of resources. -Possible resource types include: pods (po), services (svc), -replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs), -limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), -resourcequotas (quota), namespaces (ns), horizontalpodautoscalers (hpa) -or endpoints (ep).` +` + kubectl.PossibleResourceTypes ) // NewCmdExplain returns a cobra command for swagger docs @@ -56,6 +52,7 @@ func NewCmdExplain(f *cmdutil.Factory, out io.Writer) *cobra.Command { }, } cmd.Flags().Bool("recursive", false, "Print the fields of fields (Currently only 1 level deep)") + cmdutil.AddInclude3rdPartyFlags(cmd) return cmd } @@ -69,7 +66,7 @@ func RunExplain(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []st apiVersionString := cmdutil.GetFlagString(cmd, "api-version") apiVersion := unversioned.GroupVersion{} - mapper, _ := f.Object() + mapper, _ := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) // TODO: After we figured out the new syntax to separate group and resource, allow // the users to use it in explain (kubectl explain ). // Refer to issue #16039 for why we do this. Refer to PR #15808 that used "/" syntax. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/expose.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/expose.go index af7793b2c80d..7ed7529c34b6 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/expose.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/expose.go @@ -19,6 +19,7 @@ package cmd import ( "fmt" "io" + "regexp" "strings" "github.com/spf13/cobra" @@ -34,17 +35,25 @@ import ( // referencing the cmd.Flags() type ExposeOptions struct { Filenames []string + Recursive bool } const ( - expose_long = `Take a replication controller, service, replica set or pod and expose it as a new Kubernetes service. + expose_resources = ` + pod (po), service (svc), replicationcontroller (rc), + deployment, replicaset (rs) +` -Looks up a replication controller, service, replica set or pod by name and uses the selector for that -resource as the selector for a new service on the specified port. A replica set will be exposed as a -service only if it's selector is convertible to a selector that service supports, i.e. when the -replica set selector contains only the matchLabels component. Note that if no port is specified -via --port and the exposed resource has multiple ports, all will be re-used by the new service. Also -if no labels are specified, the new service will re-use the labels from the resource it exposes.` + expose_long = `Expose a resource as a new Kubernetes service. + +Looks up a deployment, service, replica set, replication controller or pod by name and uses the selector +for that resource as the selector for a new service on the specified port. A deployment or replica set +will be exposed as a service only if its selector is convertible to a selector that service supports, +i.e. when the selector contains only the matchLabels component. Note that if no port is specified via +--port and the exposed resource has multiple ports, all will be re-used by the new service. Also if no +labels are specified, the new service will re-use the labels from the resource it exposes. + +Possible resources include (case insensitive):` + expose_resources expose_example = `# Create a service for a replicated nginx, which serves on port 80 and connects to the containers on port 8000. kubectl expose rc nginx --port=80 --target-port=8000 @@ -62,25 +71,37 @@ kubectl expose service nginx --port=443 --target-port=8443 --name=nginx-https kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream # Create a service for a replicated nginx using replica set, which serves on port 80 and connects to the containers on port 8000. -kubectl expose rs nginx --port=80 --target-port=8000` +kubectl expose rs nginx --port=80 --target-port=8000 + +# Create a service for an nginx deployment, which serves on port 80 and connects to the containers on port 8000. +kubectl expose deployment nginx --port=80 --target-port=8000` ) func NewCmdExposeService(f *cmdutil.Factory, out io.Writer) *cobra.Command { options := &ExposeOptions{} + validArgs, argAliases := []string{}, []string{} + resources := regexp.MustCompile(`\s*,`).Split(expose_resources, -1) + for _, r := range resources { + validArgs = append(validArgs, strings.Fields(r)[0]) + argAliases = kubectl.ResourceAliases(validArgs) + } + cmd := &cobra.Command{ Use: "expose (-f FILENAME | TYPE NAME) [--port=port] [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [--external-ip=external-ip-of-service] [--type=type]", - Short: "Take a replication controller, service or pod and expose it as a new Kubernetes Service", + Short: "Take a replication controller, service, deployment or pod and expose it as a new Kubernetes Service", Long: expose_long, Example: expose_example, Run: func(cmd *cobra.Command, args []string) { err := RunExpose(f, out, cmd, args, options) cmdutil.CheckErr(err) }, + ValidArgs: validArgs, + ArgAliases: argAliases, } cmdutil.AddPrinterFlags(cmd) cmd.Flags().String("generator", "service/v2", "The name of the API generator to use. There are 2 generators: 'service/v1' and 'service/v2'. The only difference between them is that service port in v1 is named 'default', while it is left unnamed in v2. Default is 'service/v2'.") - cmd.Flags().String("protocol", "TCP", "The network protocol for the service to be created. Default is 'tcp'.") + cmd.Flags().String("protocol", "", "The network protocol for the service to be created. Default is 'TCP'.") cmd.Flags().String("port", "", "The port that the service should serve on. Copied from the resource being exposed, if unspecified") cmd.Flags().String("type", "", "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is 'ClusterIP'.") // TODO: remove create-external-load-balancer in code on or after Aug 25, 2016. @@ -89,8 +110,8 @@ func NewCmdExposeService(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().String("load-balancer-ip", "", "IP to assign to to the Load Balancer. If empty, an ephemeral IP will be created and used (cloud-provider specific).") cmd.Flags().String("selector", "", "A label selector to use for this service. Only equality-based selector requirements are supported. If empty (the default) infer the selector from the replication controller or replica set.") cmd.Flags().StringP("labels", "l", "", "Labels to apply to the service created by this call.") - cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without creating it.") cmd.Flags().String("container-port", "", "Synonym for --target-port") + cmd.Flags().MarkDeprecated("container-port", "--container-port will be removed in the future, please use --target-port instead") cmd.Flags().String("target-port", "", "Name or number for the port on the container that the service should direct traffic to. Optional.") cmd.Flags().String("external-ip", "", "Additional external IP address (not managed by Kubernetes) to accept for the service. If this IP is routed to a node, the service can be accessed by this IP in addition to its generated service IP.") cmd.Flags().String("overrides", "", "An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.") @@ -99,6 +120,8 @@ func NewCmdExposeService(f *cmdutil.Factory, out io.Writer) *cobra.Command { usage := "Filename, directory, or URL to a file identifying the resource to expose a service" kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + cmdutil.AddDryRunFlag(cmd) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddRecordFlag(cmd) return cmd @@ -110,28 +133,15 @@ func RunExpose(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str return err } - mapper, typer := f.Object() + mapper, typer := f.Object(false) r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). ContinueOnError(). NamespaceParam(namespace).DefaultNamespace(). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). ResourceTypeOrNameArgs(false, args...). Flatten(). Do() - infos, err := r.Infos() - if err != nil { - return err - } - if len(infos) > 1 { - return fmt.Errorf("multiple resources provided: %v", args) - } - info := infos[0] - mapping := info.ResourceMapping() - if err := f.CanBeExposed(mapping.GroupVersionKind.GroupKind()); err != nil { - return err - } - // Get the input object - inputObject, err := r.Object() + err = r.Err() if err != nil { return err } @@ -144,101 +154,130 @@ func RunExpose(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str return cmdutil.UsageError(cmd, fmt.Sprintf("generator %q not found.", generatorName)) } names := generator.ParamNames() - params := kubectl.MakeParams(cmd, names) - name := info.Name - if len(name) > validation.DNS952LabelMaxLength { - name = name[:validation.DNS952LabelMaxLength] - } - params["default-name"] = name - // For objects that need a pod selector, derive it from the exposed object in case a user - // didn't explicitly specify one via --selector - if s, found := params["selector"]; found && kubectl.IsZero(s) { - s, err := f.MapBasedSelectorForObject(inputObject) + err = r.Visit(func(info *resource.Info, err error) error { if err != nil { - return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't retrieve selectors via --selector flag or introspection: %s", err)) + return err } - params["selector"] = s - } - // For objects that need a port, derive it from the exposed object in case a user - // didn't explicitly specify one via --port - if port, found := params["port"]; found && kubectl.IsZero(port) { - ports, err := f.PortsForObject(inputObject) - if err != nil { - return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't find port via --port flag or introspection: %s", err)) + mapping := info.ResourceMapping() + if err := f.CanBeExposed(mapping.GroupVersionKind.GroupKind()); err != nil { + return err } - switch len(ports) { - case 0: - return cmdutil.UsageError(cmd, "couldn't find port via --port flag or introspection") - case 1: - params["port"] = ports[0] - default: - params["ports"] = strings.Join(ports, ",") + + params := kubectl.MakeParams(cmd, names) + name := info.Name + if len(name) > validation.DNS952LabelMaxLength { + name = name[:validation.DNS952LabelMaxLength] } - } - if kubectl.IsZero(params["labels"]) { - labels, err := f.LabelsForObject(inputObject) + params["default-name"] = name + + // For objects that need a pod selector, derive it from the exposed object in case a user + // didn't explicitly specify one via --selector + if s, found := params["selector"]; found && kubectl.IsZero(s) { + s, err := f.MapBasedSelectorForObject(info.Object) + if err != nil { + return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't retrieve selectors via --selector flag or introspection: %s", err)) + } + params["selector"] = s + } + + // For objects that need a port, derive it from the exposed object in case a user + // didn't explicitly specify one via --port + if port, found := params["port"]; found && kubectl.IsZero(port) { + ports, err := f.PortsForObject(info.Object) + if err != nil { + return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't find port via --port flag or introspection: %s", err)) + } + switch len(ports) { + case 0: + return cmdutil.UsageError(cmd, "couldn't find port via --port flag or introspection") + case 1: + params["port"] = ports[0] + default: + params["ports"] = strings.Join(ports, ",") + } + } + + // Always try to derive protocols from the exposed object, may use + // different protocols for different ports. + if _, found := params["protocol"]; found { + protocolsMap, err := f.ProtocolsForObject(info.Object) + if err != nil { + return cmdutil.UsageError(cmd, fmt.Sprintf("couldn't find protocol via introspection: %s", err)) + } + if protocols := kubectl.MakeProtocols(protocolsMap); !kubectl.IsZero(protocols) { + params["protocols"] = protocols + } + } + + if kubectl.IsZero(params["labels"]) { + labels, err := f.LabelsForObject(info.Object) + if err != nil { + return err + } + params["labels"] = kubectl.MakeLabels(labels) + } + if err = kubectl.ValidateParams(names, params); err != nil { + return err + } + // Check for invalid flags used against the present generator. + if err := kubectl.EnsureFlagsValid(cmd, generators, generatorName); err != nil { + return err + } + + // Generate new object + object, err := generator.Generate(params) if err != nil { return err } - params["labels"] = kubectl.MakeLabels(labels) - } - if err = kubectl.ValidateParams(names, params); err != nil { - return err - } - // Check for invalid flags used against the present generator. - if err := kubectl.EnsureFlagsValid(cmd, generators, generatorName); err != nil { - return err - } - // Generate new object - object, err := generator.Generate(params) - if err != nil { - return err - } + if inline := cmdutil.GetFlagString(cmd, "overrides"); len(inline) > 0 { + codec := runtime.NewCodec(f.JSONEncoder(), f.Decoder(true)) + object, err = cmdutil.Merge(codec, object, inline, mapping.GroupVersionKind.Kind) + if err != nil { + return err + } + } - if inline := cmdutil.GetFlagString(cmd, "overrides"); len(inline) > 0 { - codec := runtime.NewCodec(f.JSONEncoder(), f.Decoder(true)) - object, err = cmdutil.Merge(codec, object, inline, mapping.GroupVersionKind.Kind) + resourceMapper := &resource.Mapper{ + ObjectTyper: typer, + RESTMapper: mapper, + ClientMapper: resource.ClientMapperFunc(f.ClientForMapping), + Decoder: f.Decoder(true), + } + info, err = resourceMapper.InfoForObject(object, nil) if err != nil { return err } - } + if cmdutil.ShouldRecord(cmd, info) { + if err := cmdutil.RecordChangeCause(object, f.Command()); err != nil { + return err + } + } + info.Refresh(object, true) + if cmdutil.GetDryRunFlag(cmd) { + return f.PrintObject(cmd, mapper, object, out) + } + if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), info, f.JSONEncoder()); err != nil { + return err + } - resourceMapper := &resource.Mapper{ - ObjectTyper: typer, - RESTMapper: mapper, - ClientMapper: resource.ClientMapperFunc(f.ClientForMapping), - Decoder: f.Decoder(true), - } - info, err = resourceMapper.InfoForObject(object, nil) - if err != nil { - return err - } - if cmdutil.ShouldRecord(cmd, info) { - if err := cmdutil.RecordChangeCause(object, f.Command()); err != nil { + // Serialize the object with the annotation applied. + object, err = resource.NewHelper(info.Client, info.Mapping).Create(namespace, false, object) + if err != nil { return err } - } - info.Refresh(object, true) - // TODO: extract this flag to a central location, when such a location exists. - if cmdutil.GetFlagBool(cmd, "dry-run") { - return f.PrintObject(cmd, object, out) - } - if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), info, f.JSONEncoder()); err != nil { - return err - } - // Serialize the object with the annotation applied. - object, err = resource.NewHelper(info.Client, info.Mapping).Create(namespace, false, object) + if len(cmdutil.GetFlagString(cmd, "output")) > 0 { + return f.PrintObject(cmd, mapper, object, out) + } + + cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "exposed") + return nil + }) if err != nil { return err } - - if len(cmdutil.GetFlagString(cmd, "output")) > 0 { - return f.PrintObject(cmd, object, out) - } - cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "exposed") return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/expose_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/expose_test.go new file mode 100644 index 000000000000..5ad4cc73131c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/expose_test.go @@ -0,0 +1,410 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "net/http" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/intstr" +) + +func TestRunExposeService(t *testing.T) { + tests := []struct { + name string + args []string + ns string + calls map[string]string + input runtime.Object + flags map[string]string + output runtime.Object + expected string + status int + }{ + { + name: "expose-service-from-service-no-selector-defined", + args: []string{"service", "baz"}, + ns: "test", + calls: map[string]string{ + "GET": "/namespaces/test/services/baz", + "POST": "/namespaces/test/services", + }, + input: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"app": "go"}, + }, + }, + flags: map[string]string{"protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test"}, + output: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "", Labels: map[string]string{"svc": "test"}}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Protocol: api.ProtocolUDP, + Port: 14, + TargetPort: intstr.FromInt(14), + }, + }, + Selector: map[string]string{"app": "go"}, + }, + }, + expected: "service \"foo\" exposed", + status: 200, + }, + { + name: "expose-service-from-service", + args: []string{"service", "baz"}, + ns: "test", + calls: map[string]string{ + "GET": "/namespaces/test/services/baz", + "POST": "/namespaces/test/services", + }, + input: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"app": "go"}, + }, + }, + flags: map[string]string{"selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test"}, + output: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "", Labels: map[string]string{"svc": "test"}}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Protocol: api.ProtocolUDP, + Port: 14, + TargetPort: intstr.FromInt(14), + }, + }, + Selector: map[string]string{"func": "stream"}, + }, + }, + expected: "service \"foo\" exposed", + status: 200, + }, + { + name: "no-name-passed-from-the-cli", + args: []string{"service", "mayor"}, + ns: "default", + calls: map[string]string{ + "GET": "/namespaces/default/services/mayor", + "POST": "/namespaces/default/services", + }, + input: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "mayor", Namespace: "default", ResourceVersion: "12"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"run": "this"}, + }, + }, + // No --name flag specified below. Service will use the rc's name passed via the 'default-name' parameter + flags: map[string]string{"selector": "run=this", "port": "80", "labels": "runas=amayor"}, + output: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "mayor", Namespace: "", Labels: map[string]string{"runas": "amayor"}}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Protocol: api.ProtocolTCP, + Port: 80, + TargetPort: intstr.FromInt(80), + }, + }, + Selector: map[string]string{"run": "this"}, + }, + }, + expected: "service \"mayor\" exposed", + status: 200, + }, + { + name: "expose-service", + args: []string{"service", "baz"}, + ns: "test", + calls: map[string]string{ + "GET": "/namespaces/test/services/baz", + "POST": "/namespaces/test/services", + }, + input: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"app": "go"}, + }, + }, + flags: map[string]string{"selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test", "type": "LoadBalancer", "dry-run": "true"}, + output: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "", Labels: map[string]string{"svc": "test"}}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Protocol: api.ProtocolUDP, + Port: 14, + TargetPort: intstr.FromInt(14), + }, + }, + Selector: map[string]string{"func": "stream"}, + Type: api.ServiceTypeLoadBalancer, + }, + }, + status: 200, + }, + { + name: "expose-affinity-service", + args: []string{"service", "baz"}, + ns: "test", + calls: map[string]string{ + "GET": "/namespaces/test/services/baz", + "POST": "/namespaces/test/services", + }, + input: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"app": "go"}, + }, + }, + flags: map[string]string{"selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test", "type": "LoadBalancer", "session-affinity": "ClientIP", "dry-run": "true"}, + output: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "", Labels: map[string]string{"svc": "test"}}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Protocol: api.ProtocolUDP, + Port: 14, + TargetPort: intstr.FromInt(14), + }, + }, + Selector: map[string]string{"func": "stream"}, + Type: api.ServiceTypeLoadBalancer, + SessionAffinity: api.ServiceAffinityClientIP, + }, + }, + status: 200, + }, + { + name: "expose-from-file", + args: []string{}, + ns: "test", + calls: map[string]string{ + "GET": "/namespaces/test/services/redis-master", + "POST": "/namespaces/test/services", + }, + input: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "redis-master", Namespace: "test", ResourceVersion: "12"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"app": "go"}, + }, + }, + flags: map[string]string{"filename": "../../../examples/guestbook/redis-master-service.yaml", "selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test", "dry-run": "true"}, + output: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Labels: map[string]string{"svc": "test"}}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Protocol: api.ProtocolUDP, + Port: 14, + TargetPort: intstr.FromInt(14), + }, + }, + Selector: map[string]string{"func": "stream"}, + }, + }, + status: 200, + }, + { + name: "truncate-name", + args: []string{"pod", "a-name-that-is-toooo-big-for-a-service"}, + ns: "test", + calls: map[string]string{ + "GET": "/namespaces/test/pods/a-name-that-is-toooo-big-for-a-service", + "POST": "/namespaces/test/services", + }, + input: &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"}, + }, + flags: map[string]string{"selector": "svc=frompod", "port": "90", "labels": "svc=frompod", "generator": "service/v2"}, + output: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "a-name-that-is-toooo-big", Namespace: "", Labels: map[string]string{"svc": "frompod"}}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Protocol: api.ProtocolTCP, + Port: 90, + TargetPort: intstr.FromInt(90), + }, + }, + Selector: map[string]string{"svc": "frompod"}, + }, + }, + expected: "service \"a-name-that-is-toooo-big\" exposed", + status: 200, + }, + { + name: "expose-multiport-object", + args: []string{"service", "foo"}, + ns: "test", + calls: map[string]string{ + "GET": "/namespaces/test/services/foo", + "POST": "/namespaces/test/services", + }, + input: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "", Labels: map[string]string{"svc": "multiport"}}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Protocol: api.ProtocolTCP, + Port: 80, + TargetPort: intstr.FromInt(80), + }, + { + Protocol: api.ProtocolTCP, + Port: 443, + TargetPort: intstr.FromInt(443), + }, + }, + }, + }, + flags: map[string]string{"selector": "svc=fromfoo", "generator": "service/v2", "name": "fromfoo", "dry-run": "true"}, + output: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "fromfoo", Namespace: "", Labels: map[string]string{"svc": "multiport"}}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Name: "port-1", + Protocol: api.ProtocolTCP, + Port: 80, + TargetPort: intstr.FromInt(80), + }, + { + Name: "port-2", + Protocol: api.ProtocolTCP, + Port: 443, + TargetPort: intstr.FromInt(443), + }, + }, + Selector: map[string]string{"svc": "fromfoo"}, + }, + }, + status: 200, + }, + { + name: "expose-multiprotocol-object", + args: []string{"service", "foo"}, + ns: "test", + calls: map[string]string{ + "GET": "/namespaces/test/services/foo", + "POST": "/namespaces/test/services", + }, + input: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "", Labels: map[string]string{"svc": "multiport"}}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Protocol: api.ProtocolTCP, + Port: 80, + TargetPort: intstr.FromInt(80), + }, + { + Protocol: api.ProtocolUDP, + Port: 8080, + TargetPort: intstr.FromInt(8080), + }, + { + Protocol: api.ProtocolUDP, + Port: 8081, + TargetPort: intstr.FromInt(8081), + }, + }, + }, + }, + flags: map[string]string{"selector": "svc=fromfoo", "generator": "service/v2", "name": "fromfoo", "dry-run": "true"}, + output: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "fromfoo", Namespace: "", Labels: map[string]string{"svc": "multiport"}}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Name: "port-1", + Protocol: api.ProtocolTCP, + Port: 80, + TargetPort: intstr.FromInt(80), + }, + { + Name: "port-2", + Protocol: api.ProtocolUDP, + Port: 8080, + TargetPort: intstr.FromInt(8080), + }, + { + Name: "port-3", + Protocol: api.ProtocolUDP, + Port: 8081, + TargetPort: intstr.FromInt(8081), + }, + }, + Selector: map[string]string{"svc": "fromfoo"}, + }, + }, + status: 200, + }, + } + + for _, test := range tests { + f, tf, codec := NewAPIFactory() + tf.Printer = &kubectl.JSONPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == test.calls[m] && m == "GET": + return &http.Response{StatusCode: test.status, Header: defaultHeader(), Body: objBody(codec, test.input)}, nil + case p == test.calls[m] && m == "POST": + return &http.Response{StatusCode: test.status, Header: defaultHeader(), Body: objBody(codec, test.output)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = test.ns + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdExposeService(f, buf) + cmd.SetOutput(buf) + for flag, value := range test.flags { + cmd.Flags().Set(flag, value) + } + cmd.Run(cmd, test.args) + + out := buf.String() + if _, ok := test.flags["dry-run"]; ok { + buf.Reset() + if err := tf.Printer.PrintObj(test.output, buf); err != nil { + t.Errorf("%s: Unexpected error: %v", test.name, err) + continue + } + + test.expected = buf.String() + } + + if !strings.Contains(out, test.expected) { + t.Errorf("%s: Unexpected output! Expected\n%s\ngot\n%s", test.name, test.expected, out) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/get.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/get.go index 5ba06cdbbe5d..f6fbbb1182a8 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/get.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/get.go @@ -26,6 +26,7 @@ import ( cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/watch" ) @@ -33,16 +34,13 @@ import ( // referencing the cmd.Flags() type GetOptions struct { Filenames []string + Recursive bool } const ( get_long = `Display one or many resources. -Possible resource types include (case insensitive): pods (po), services (svc), -replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs), -limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), -resourcequotas (quota), namespaces (ns), endpoints (ep), -horizontalpodautoscalers (hpa), serviceaccounts or secrets. +` + kubectl.PossibleResourceTypes + ` By specifying the output as 'template' and providing a Go template as the value of the --template flag, you can filter the attributes of the fetched resource(s).` @@ -77,11 +75,12 @@ func NewCmdGet(f *cmdutil.Factory, out io.Writer) *cobra.Command { options := &GetOptions{} // retrieve a list of handled resources from printer as valid args - validArgs := []string{} + validArgs, argAliases := []string{}, []string{} p, err := f.Printer(nil, false, false, false, false, false, false, []string{}) cmdutil.CheckErr(err) if p != nil { validArgs = p.HandledResources() + argAliases = kubectl.ResourceAliases(validArgs) } cmd := &cobra.Command{ @@ -93,7 +92,9 @@ func NewCmdGet(f *cmdutil.Factory, out io.Writer) *cobra.Command { err := RunGet(f, out, cmd, args, options) cmdutil.CheckErr(err) }, - ValidArgs: validArgs, + SuggestFor: []string{"list", "ps"}, + ValidArgs: validArgs, + ArgAliases: argAliases, } cmdutil.AddPrinterFlags(cmd) cmd.Flags().StringP("selector", "l", "", "Selector (label query) to filter on") @@ -104,6 +105,8 @@ func NewCmdGet(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().Bool("export", false, "If true, use 'export' for the resources. Exported resources are stripped of cluster-specific information.") usage := "Filename, directory, or URL to a file identifying the resource to get from a server." kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) + cmdutil.AddInclude3rdPartyFlags(cmd) return cmd } @@ -112,7 +115,7 @@ func NewCmdGet(f *cmdutil.Factory, out io.Writer) *cobra.Command { func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *GetOptions) error { selector := cmdutil.GetFlagString(cmd, "selector") allNamespaces := cmdutil.GetFlagBool(cmd, "all-namespaces") - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { @@ -143,7 +146,7 @@ func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string if isWatch || isWatchOnly { r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). SelectorParam(selector). ExportParam(export). ResourceTypeOrNameArgs(true, args...). @@ -196,39 +199,55 @@ func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string return nil } - b := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). + r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). SelectorParam(selector). ExportParam(export). ResourceTypeOrNameArgs(true, args...). ContinueOnError(). - Latest() + Latest(). + Flatten(). + Do() + err = r.Err() + if err != nil { + return err + } + printer, generic, err := cmdutil.PrinterForCommand(cmd) if err != nil { return err } - if generic { - clientConfig, err := f.ClientConfig() + infos := []*resource.Info{} + allErrs := []error{} + err = r.Visit(func(info *resource.Info, err error) error { if err != nil { return err } + infos = append(infos, info) + return nil + }) + if err != nil { + allErrs = append(allErrs, err) + } - singular := false - r := b.Flatten().Do() - infos, err := r.IntoSingular(&singular).Infos() + if generic { + clientConfig, err := f.ClientConfig() if err != nil { return err } + singular := false + r.IntoSingular(&singular) + // the outermost object will be converted to the output-version, but inner // objects can use their mappings version, err := cmdutil.OutputVersion(cmd, clientConfig.GroupVersion) if err != nil { return err } - obj, err := resource.AsVersionedObject(infos, !singular, version.String(), f.JSONEncoder()) + obj, err := resource.AsVersionedObject(infos, !singular, version, f.JSONEncoder()) if err != nil { return err } @@ -236,10 +255,6 @@ func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string return printer.PrintObj(obj, out) } - infos, err := b.Flatten().Do().Infos() - if err != nil { - return err - } objs := make([]runtime.Object, len(infos)) for ix := range infos { objs[ix] = infos[ix].Object @@ -259,9 +274,10 @@ func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string } for ix := range infos { - objs[ix], err = infos[ix].Mapping.ConvertToVersion(infos[ix].Object, version.String()) + objs[ix], err = infos[ix].Mapping.ConvertToVersion(infos[ix].Object, version) if err != nil { - return err + allErrs = append(allErrs, err) + continue } } @@ -290,19 +306,21 @@ func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string if printer == nil || lastMapping == nil || mapping == nil || mapping.Resource != lastMapping.Resource { printer, err = f.PrinterForMapping(cmd, mapping, allNamespaces) if err != nil { - return err + allErrs = append(allErrs, err) + continue } lastMapping = mapping } if _, found := printer.(*kubectl.HumanReadablePrinter); found { if err := printer.PrintObj(original, w); err != nil { - return err + allErrs = append(allErrs, err) } continue } if err := printer.PrintObj(original, w); err != nil { - return err + allErrs = append(allErrs, err) + continue } } - return nil + return utilerrors.NewAggregate(allErrs) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/get_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/get_test.go new file mode 100644 index 000000000000..6f18031c4a87 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/get_test.go @@ -0,0 +1,869 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + encjson "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/json" + "k8s.io/kubernetes/pkg/runtime/serializer/streaming" + "k8s.io/kubernetes/pkg/util/diff" + "k8s.io/kubernetes/pkg/watch" + "k8s.io/kubernetes/pkg/watch/versioned" +) + +func testData() (*api.PodList, *api.ServiceList, *api.ReplicationControllerList) { + pods := &api.PodList{ + ListMeta: unversioned.ListMeta{ + ResourceVersion: "15", + }, + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test", ResourceVersion: "10"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + { + ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "test", ResourceVersion: "11"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + }, + } + svc := &api.ServiceList{ + ListMeta: unversioned.ListMeta{ + ResourceVersion: "16", + }, + Items: []api.Service{ + { + ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"}, + Spec: api.ServiceSpec{ + SessionAffinity: "None", + Type: api.ServiceTypeClusterIP, + }, + }, + }, + } + rc := &api.ReplicationControllerList{ + ListMeta: unversioned.ListMeta{ + ResourceVersion: "17", + }, + Items: []api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{Name: "rc1", Namespace: "test", ResourceVersion: "18"}, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + }, + }, + }, + } + return pods, svc, rc +} + +func testComponentStatusData() *api.ComponentStatusList { + good := api.ComponentStatus{ + Conditions: []api.ComponentCondition{ + {Type: api.ComponentHealthy, Status: api.ConditionTrue, Message: "ok"}, + }, + ObjectMeta: api.ObjectMeta{Name: "servergood"}, + } + + bad := api.ComponentStatus{ + Conditions: []api.ComponentCondition{ + {Type: api.ComponentHealthy, Status: api.ConditionFalse, Message: "", Error: "bad status: 500"}, + }, + ObjectMeta: api.ObjectMeta{Name: "serverbad"}, + } + + unknown := api.ComponentStatus{ + Conditions: []api.ComponentCondition{ + {Type: api.ComponentHealthy, Status: api.ConditionUnknown, Message: "", Error: "fizzbuzz error"}, + }, + ObjectMeta: api.ObjectMeta{Name: "serverunknown"}, + } + + return &api.ComponentStatusList{ + Items: []api.ComponentStatus{good, bad, unknown}, + } +} + +// Verifies that schemas that are not in the master tree of Kubernetes can be retrieved via Get. +func TestGetUnknownSchemaObject(t *testing.T) { + f, tf, codec := NewTestFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &internalType{Name: "foo"})}, + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}} + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + cmd.Run(cmd, []string{"type", "foo"}) + + expected := &internalType{Name: "foo"} + actual := tf.Printer.(*testPrinter).Objects[0] + if !reflect.DeepEqual(expected, actual) { + t.Errorf("unexpected object: %#v", actual) + } + if buf.String() != fmt.Sprintf("%#v", expected) { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +// Verifies that schemas that are not in the master tree of Kubernetes can be retrieved via Get. +// Because api.List is part of the Kube API, resource.Builder has to perform a conversion on +// api.Scheme, which may not have access to all objects, and not all objects are at the same +// internal versioning scheme. This test verifies that two isolated schemes (Test, and api.Scheme) +// can be conjoined into a single output object. +// +// The expected behavior of the `kubectl get` command is: +// 1. objects using unrecognized schemes will always be returned using that scheme/version, "unlikelyversion" in this test; +// 2. if the specified output-version is a recognized, valid Scheme, then the list should use that scheme, and otherwise it will default to the client version, testapi.Default.GroupVersion().String() in this test; +// 3a. if the specified output-version is a recognized, valid Scheme, in which the requested object (replicationcontroller) can be represented, then the object should be returned using that version; +// 3b. otherwise if the specified output-version is unrecognized, but the requested object (replicationcontroller) is recognized by the client's codec, then it will be converted to the client version, testapi.Default.GroupVersion().String() in this test. +func TestGetUnknownSchemaObjectListGeneric(t *testing.T) { + testCases := map[string]struct { + outputVersion string + listVersion string + testtypeVersion string + rcVersion string + }{ + "handles specific version": { + outputVersion: testapi.Default.GroupVersion().String(), + listVersion: testapi.Default.GroupVersion().String(), + testtypeVersion: unlikelyGV.String(), + rcVersion: testapi.Default.GroupVersion().String(), + }, + "handles second specific version": { + outputVersion: "unlikely.group/unlikelyversion", + listVersion: testapi.Default.GroupVersion().String(), + testtypeVersion: unlikelyGV.String(), + rcVersion: testapi.Default.GroupVersion().String(), // see expected behavior 3b + }, + "handles common version": { + outputVersion: testapi.Default.GroupVersion().String(), + listVersion: testapi.Default.GroupVersion().String(), + testtypeVersion: unlikelyGV.String(), + rcVersion: testapi.Default.GroupVersion().String(), + }, + } + for k, test := range testCases { + apiCodec := testapi.Default.Codec() + regularClient := &fake.RESTClient{ + Codec: apiCodec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(apiCodec, &api.ReplicationController{ObjectMeta: api.ObjectMeta{Name: "foo"}})}, nil + }), + } + + f, tf, codec := NewMixedFactory(regularClient) + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &internalType{Name: "foo"})}, nil + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}} + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + cmd.Flags().Set("output", "json") + + cmd.Flags().Set("output-version", test.outputVersion) + err := RunGet(f, buf, cmd, []string{"type/foo", "replicationcontrollers/foo"}, &GetOptions{}) + if err != nil { + t.Errorf("%s: unexpected error: %v", k, err) + continue + } + out := make(map[string]interface{}) + if err := encjson.Unmarshal(buf.Bytes(), &out); err != nil { + t.Errorf("%s: unexpected error: %v\n%s", k, err, buf.String()) + continue + } + if out["apiVersion"] != test.listVersion { + t.Errorf("%s: unexpected list: %#v", k, out) + } + arr := out["items"].([]interface{}) + if arr[0].(map[string]interface{})["apiVersion"] != test.testtypeVersion { + t.Errorf("%s: unexpected list: %#v", k, out) + } + if arr[1].(map[string]interface{})["apiVersion"] != test.rcVersion { + t.Errorf("%s: unexpected list: %#v", k, out) + } + } +} + +// Verifies that schemas that are not in the master tree of Kubernetes can be retrieved via Get. +func TestGetSchemaObject(t *testing.T) { + f, tf, _ := NewTestFactory() + tf.Mapper = testapi.Default.RESTMapper() + tf.Typer = api.Scheme + codec := testapi.Default.Codec() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &api.ReplicationController{ObjectMeta: api.ObjectMeta{Name: "foo"}})}, + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &unversioned.GroupVersion{Version: "v1"}}} + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.Run(cmd, []string{"replicationcontrollers", "foo"}) + + if !strings.Contains(buf.String(), "\"foo\"") { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestGetObjects(t *testing.T) { + pods, _, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + cmd.Run(cmd, []string{"pods", "foo"}) + + expected := []runtime.Object{&pods.Items[0]} + actual := tf.Printer.(*testPrinter).Objects + if !reflect.DeepEqual(expected, actual) { + t.Errorf("unexpected object: %#v", actual) + } + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } +} + +func TestGetSortedObjects(t *testing.T) { + pods := &api.PodList{ + ListMeta: unversioned.ListMeta{ + ResourceVersion: "15", + }, + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{Name: "c", Namespace: "test", ResourceVersion: "10"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + { + ObjectMeta: api.ObjectMeta{Name: "b", Namespace: "test", ResourceVersion: "11"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + { + ObjectMeta: api.ObjectMeta{Name: "a", Namespace: "test", ResourceVersion: "9"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + }, + } + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &unversioned.GroupVersion{Version: "v1"}}} + + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + + // sorting with metedata.name + cmd.Flags().Set("sort-by", ".metadata.name") + cmd.Run(cmd, []string{"pods"}) + + // expect sorted: a,b,c + expected := []runtime.Object{&pods.Items[2], &pods.Items[1], &pods.Items[0]} + actual := tf.Printer.(*testPrinter).Objects + if !reflect.DeepEqual(expected, actual) { + t.Errorf("unexpected object: %#v", actual) + } + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } + +} + +func TestGetObjectsIdentifiedByFile(t *testing.T) { + pods, _, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + cmd.Flags().Set("filename", "../../../examples/cassandra/cassandra-controller.yaml") + cmd.Run(cmd, []string{}) + + expected := []runtime.Object{&pods.Items[0]} + actual := tf.Printer.(*testPrinter).Objects + if !reflect.DeepEqual(expected, actual) { + t.Errorf("unexpected object: %#v", actual) + } + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } +} + +func TestGetListObjects(t *testing.T) { + pods, _, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + cmd.Run(cmd, []string{"pods"}) + + expected, err := extractResourceList([]runtime.Object{pods}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + actual := tf.Printer.(*testPrinter).Objects + if !reflect.DeepEqual(expected, actual) { + t.Errorf("unexpected object: expected %#v, got %#v", expected, actual) + } + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } +} + +func extractResourceList(objs []runtime.Object) ([]runtime.Object, error) { + finalObjs := []runtime.Object{} + for _, obj := range objs { + items, err := meta.ExtractList(obj) + if err != nil { + return nil, err + } + for _, item := range items { + finalObjs = append(finalObjs, item) + } + } + return finalObjs, nil +} + +func TestGetAllListObjects(t *testing.T) { + pods, _, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + cmd.Flags().Set("show-all", "true") + cmd.Run(cmd, []string{"pods"}) + + expected, err := extractResourceList([]runtime.Object{pods}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + actual := tf.Printer.(*testPrinter).Objects + if !reflect.DeepEqual(expected, actual) { + t.Errorf("unexpected object: %#v %#v", expected, actual) + } + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } +} + +func TestGetListComponentStatus(t *testing.T) { + statuses := testComponentStatusData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, statuses)}, + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + cmd.Run(cmd, []string{"componentstatuses"}) + + expected, err := extractResourceList([]runtime.Object{statuses}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + actual := tf.Printer.(*testPrinter).Objects + if !reflect.DeepEqual(expected, actual) { + t.Errorf("unexpected object: expected %#v, got %#v", expected, actual) + } + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } +} + +func TestGetMultipleTypeObjects(t *testing.T) { + pods, svc, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch req.URL.Path { + case "/namespaces/test/pods": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, nil + case "/namespaces/test/services": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, svc)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + cmd.Run(cmd, []string{"pods,services"}) + + expected, err := extractResourceList([]runtime.Object{pods, svc}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + actual := tf.Printer.(*testPrinter).Objects + if !reflect.DeepEqual(expected, actual) { + t.Errorf("unexpected object: %#v", actual) + } + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } +} + +func TestGetMultipleTypeObjectsAsList(t *testing.T) { + pods, svc, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch req.URL.Path { + case "/namespaces/test/pods": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, nil + case "/namespaces/test/services": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, svc)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}} + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + + cmd.Flags().Set("output", "json") + cmd.Run(cmd, []string{"pods,services"}) + + if tf.Printer.(*testPrinter).Objects != nil { + t.Errorf("unexpected print to default printer") + } + + out, err := runtime.Decode(codec, buf.Bytes()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + list, err := meta.ExtractList(out) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if errs := runtime.DecodeList(list, codec); len(errs) > 0 { + t.Fatalf("unexpected error: %v", errs) + } + if err := meta.SetList(out, list); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + expected := &api.List{ + Items: []runtime.Object{ + &pods.Items[0], + &pods.Items[1], + &svc.Items[0], + }, + } + if !reflect.DeepEqual(expected, out) { + t.Errorf("unexpected output: %#v", out) + } +} + +func TestGetMultipleTypeObjectsWithSelector(t *testing.T) { + pods, svc, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + if req.URL.Query().Get(unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String())) != "a=b" { + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + } + switch req.URL.Path { + case "/namespaces/test/pods": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, nil + case "/namespaces/test/services": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, svc)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + + cmd.Flags().Set("selector", "a=b") + cmd.Run(cmd, []string{"pods,services"}) + + expected, err := extractResourceList([]runtime.Object{pods, svc}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + actual := tf.Printer.(*testPrinter).Objects + if !reflect.DeepEqual(expected, actual) { + t.Errorf("unexpected object: %#v", actual) + } + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } +} + +func TestGetMultipleTypeObjectsWithDirectReference(t *testing.T) { + _, svc, _ := testData() + node := &api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.NodeSpec{ + ExternalID: "ext", + }, + } + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch req.URL.Path { + case "/nodes/foo": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, node)}, nil + case "/namespaces/test/services/bar": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + + cmd.Run(cmd, []string{"services/bar", "node/foo"}) + + expected := []runtime.Object{&svc.Items[0], node} + actual := tf.Printer.(*testPrinter).Objects + if !api.Semantic.DeepEqual(expected, actual) { + t.Errorf("unexpected object: %s", diff.ObjectDiff(expected, actual)) + } + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } +} + +func TestGetByNameForcesFlag(t *testing.T) { + pods, _, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + cmd.Run(cmd, []string{"pods", "foo"}) + + showAllFlag, _ := cmd.Flags().GetBool("show-all") + if !showAllFlag { + t.Errorf("expected showAll to be true when getting resource by name") + } +} + +func watchTestData() ([]api.Pod, []watch.Event) { + pods := []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "test", + ResourceVersion: "10", + }, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + } + events := []watch.Event{ + { + Type: watch.Modified, + Object: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "test", + ResourceVersion: "11", + }, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + }, + { + Type: watch.Deleted, + Object: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "test", + ResourceVersion: "12", + }, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + }, + } + return pods, events +} + +func TestWatchSelector(t *testing.T) { + pods, events := watchTestData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + if req.URL.Query().Get(unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String())) != "a=b" { + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + } + switch req.URL.Path { + case "/namespaces/test/pods": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &api.PodList{Items: pods})}, nil + case "/watch/namespaces/test/pods": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: watchBody(codec, events)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + + cmd.Flags().Set("watch", "true") + cmd.Flags().Set("selector", "a=b") + cmd.Run(cmd, []string{"pods"}) + + expected := []runtime.Object{&api.PodList{Items: pods}, events[0].Object, events[1].Object} + actual := tf.Printer.(*testPrinter).Objects + if !reflect.DeepEqual(expected, actual) { + t.Errorf("unexpected object:\nExpected: %#v\n\nGot: %#v\n\n", expected[0], actual[0]) + } + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } +} + +func TestWatchResource(t *testing.T) { + pods, events := watchTestData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch req.URL.Path { + case "/namespaces/test/pods/foo": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods[0])}, nil + case "/watch/namespaces/test/pods/foo": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: watchBody(codec, events)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + + cmd.Flags().Set("watch", "true") + cmd.Run(cmd, []string{"pods", "foo"}) + + expected := []runtime.Object{&pods[0], events[0].Object, events[1].Object} + actual := tf.Printer.(*testPrinter).Objects + if !reflect.DeepEqual(expected, actual) { + t.Errorf("unexpected object:\nExpected: %#v\n\nGot: %#v\n\n", expected, actual) + } + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } +} + +func TestWatchResourceIdentifiedByFile(t *testing.T) { + pods, events := watchTestData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch req.URL.Path { + case "/namespaces/test/replicationcontrollers/cassandra": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods[0])}, nil + case "/watch/namespaces/test/replicationcontrollers/cassandra": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: watchBody(codec, events)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + + cmd.Flags().Set("watch", "true") + cmd.Flags().Set("filename", "../../../examples/cassandra/cassandra-controller.yaml") + cmd.Run(cmd, []string{}) + + expected := []runtime.Object{&pods[0], events[0].Object, events[1].Object} + actual := tf.Printer.(*testPrinter).Objects + if !reflect.DeepEqual(expected, actual) { + t.Errorf("expected object: %#v unexpected object: %#v", expected, actual) + } + + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } +} + +func TestWatchOnlyResource(t *testing.T) { + pods, events := watchTestData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch req.URL.Path { + case "/namespaces/test/pods/foo": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods[0])}, nil + case "/watch/namespaces/test/pods/foo": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: watchBody(codec, events)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + + cmd.Flags().Set("watch-only", "true") + cmd.Run(cmd, []string{"pods", "foo"}) + + expected := []runtime.Object{events[0].Object, events[1].Object} + actual := tf.Printer.(*testPrinter).Objects + if !reflect.DeepEqual(expected, actual) { + t.Errorf("unexpected object: %#v", actual) + } + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } +} + +func watchBody(codec runtime.Codec, events []watch.Event) io.ReadCloser { + buf := bytes.NewBuffer([]byte{}) + enc := versioned.NewEncoder(streaming.NewEncoder(buf, codec), codec) + for i := range events { + enc.Encode(&events[i]) + } + return json.Framer.NewFrameReader(ioutil.NopCloser(buf)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/label.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/label.go index 44db5f87b203..8d87b913963e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/label.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/label.go @@ -26,6 +26,7 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" @@ -39,6 +40,7 @@ import ( // referencing the cmd.Flags() type LabelOptions struct { Filenames []string + Recursive bool } const ( @@ -71,11 +73,12 @@ func NewCmdLabel(f *cmdutil.Factory, out io.Writer) *cobra.Command { options := &LabelOptions{} // retrieve a list of handled resources from printer as valid args - validArgs := []string{} + validArgs, argAliases := []string{}, []string{} p, err := f.Printer(nil, false, false, false, false, false, false, []string{}) cmdutil.CheckErr(err) if p != nil { validArgs = p.HandledResources() + argAliases = kubectl.ResourceAliases(validArgs) } cmd := &cobra.Command{ @@ -87,7 +90,8 @@ func NewCmdLabel(f *cmdutil.Factory, out io.Writer) *cobra.Command { err := RunLabel(f, out, cmd, args, options) cmdutil.CheckErr(err) }, - ValidArgs: validArgs, + ValidArgs: validArgs, + ArgAliases: argAliases, } cmdutil.AddPrinterFlags(cmd) cmd.Flags().Bool("overwrite", false, "If true, allow labels to be overwritten, otherwise reject label updates that overwrite existing labels.") @@ -96,16 +100,18 @@ func NewCmdLabel(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().String("resource-version", "", "If non-empty, the labels update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.") usage := "Filename, directory, or URL to a file identifying the resource to update the labels" kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) - cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without sending it.") + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) + cmdutil.AddDryRunFlag(cmd) cmdutil.AddRecordFlag(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) return cmd } -func validateNoOverwrites(meta *api.ObjectMeta, labels map[string]string) error { +func validateNoOverwrites(accessor meta.Object, labels map[string]string) error { allErrs := []error{} for key := range labels { - if value, found := meta.Labels[key]; found { + if value, found := accessor.GetLabels()[key]; found { allErrs = append(allErrs, fmt.Errorf("'%s' already has a value (%s), and --overwrite is false", key, value)) } } @@ -118,9 +124,12 @@ func parseLabels(spec []string) (map[string]string, []string, error) { for _, labelSpec := range spec { if strings.Index(labelSpec, "=") != -1 { parts := strings.Split(labelSpec, "=") - if len(parts) != 2 || len(parts[1]) == 0 || !validation.IsValidLabelValue(parts[1]) { + if len(parts) != 2 || len(parts[1]) == 0 { return nil, nil, fmt.Errorf("invalid label spec: %v", labelSpec) } + if errs := validation.IsValidLabelValue(parts[1]); len(errs) != 0 { + return nil, nil, fmt.Errorf("invalid label value: %q: %s", labelSpec, strings.Join(errs, ";")) + } labels[parts[0]] = parts[1] } else if strings.HasSuffix(labelSpec, "-") { remove = append(remove, labelSpec[:len(labelSpec)-1]) @@ -137,49 +146,39 @@ func parseLabels(spec []string) (map[string]string, []string, error) { } func labelFunc(obj runtime.Object, overwrite bool, resourceVersion string, labels map[string]string, remove []string) error { - meta, err := api.ObjectMetaFor(obj) + accessor, err := meta.Accessor(obj) if err != nil { return err } if !overwrite { - if err := validateNoOverwrites(meta, labels); err != nil { + if err := validateNoOverwrites(accessor, labels); err != nil { return err } } - if meta.Labels == nil { - meta.Labels = make(map[string]string) + objLabels := accessor.GetLabels() + if objLabels == nil { + objLabels = make(map[string]string) } for key, value := range labels { - meta.Labels[key] = value + objLabels[key] = value } for _, label := range remove { - delete(meta.Labels, label) + delete(objLabels, label) } + accessor.SetLabels(objLabels) if len(resourceVersion) != 0 { - meta.ResourceVersion = resourceVersion + accessor.SetResourceVersion(resourceVersion) } return nil } func RunLabel(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *LabelOptions) error { - resources, labelArgs := []string{}, []string{} - first := true - for _, s := range args { - isLabel := strings.Contains(s, "=") || strings.HasSuffix(s, "-") - switch { - case first && isLabel: - first = false - fallthrough - case !first && isLabel: - labelArgs = append(labelArgs, s) - case first && !isLabel: - resources = append(resources, s) - case !first && !isLabel: - return cmdutil.UsageError(cmd, "all resources must be specified before label changes: %s", s) - } + resources, labelArgs, err := cmdutil.GetResourcesAndPairs(args, "label") + if err != nil { + return err } if len(resources) < 1 && len(options.Filenames) == 0 { return cmdutil.UsageError(cmd, "one or more resources must be specified as or /") @@ -202,11 +201,11 @@ func RunLabel(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri if err != nil { return cmdutil.UsageError(cmd, err.Error()) } - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) b := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). SelectorParam(selector). ResourceTypeOrNameArgs(all, resources...). Flatten(). @@ -231,14 +230,14 @@ func RunLabel(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri var outputObj runtime.Object dataChangeMsg := "not labeled" - if cmdutil.GetFlagBool(cmd, "dry-run") { + if cmdutil.GetDryRunFlag(cmd) { err = labelFunc(info.Object, overwrite, resourceVersion, lbls, remove) if err != nil { return err } outputObj = info.Object } else { - obj, err := info.Mapping.ConvertToVersion(info.Object, info.Mapping.GroupVersionKind.GroupVersion().String()) + obj, err := info.Mapping.ConvertToVersion(info.Object, info.Mapping.GroupVersionKind.GroupVersion()) if err != nil { return err } @@ -247,9 +246,12 @@ func RunLabel(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri if err != nil { return err } - meta, err := api.ObjectMetaFor(obj) + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } for _, label := range remove { - if _, ok := meta.Labels[label]; !ok { + if _, ok := accessor.GetLabels()[label]; !ok { fmt.Fprintf(out, "label %q not found.\n", label) } } @@ -293,7 +295,7 @@ func RunLabel(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri } outputFormat := cmdutil.GetFlagString(cmd, "output") if outputFormat != "" { - return f.PrintObject(cmd, outputObj, out) + return f.PrintObject(cmd, mapper, outputObj, out) } cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, dataChangeMsg) return nil diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/label_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/label_test.go new file mode 100644 index 000000000000..24c2cff20ea8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/label_test.go @@ -0,0 +1,418 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "net/http" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestValidateLabels(t *testing.T) { + tests := []struct { + meta *api.ObjectMeta + labels map[string]string + expectErr bool + test string + }{ + { + meta: &api.ObjectMeta{ + Labels: map[string]string{ + "a": "b", + "c": "d", + }, + }, + labels: map[string]string{ + "a": "c", + "d": "b", + }, + test: "one shared", + expectErr: true, + }, + { + meta: &api.ObjectMeta{ + Labels: map[string]string{ + "a": "b", + "c": "d", + }, + }, + labels: map[string]string{ + "b": "d", + "c": "a", + }, + test: "second shared", + expectErr: true, + }, + { + meta: &api.ObjectMeta{ + Labels: map[string]string{ + "a": "b", + "c": "d", + }, + }, + labels: map[string]string{ + "b": "a", + "d": "c", + }, + test: "no overlap", + }, + { + meta: &api.ObjectMeta{}, + labels: map[string]string{ + "b": "a", + "d": "c", + }, + test: "no labels", + }, + } + for _, test := range tests { + err := validateNoOverwrites(test.meta, test.labels) + if test.expectErr && err == nil { + t.Errorf("%s: unexpected non-error", test.test) + } + if !test.expectErr && err != nil { + t.Errorf("%s: unexpected error: %v", test.test, err) + } + } +} + +func TestParseLabels(t *testing.T) { + tests := []struct { + labels []string + expected map[string]string + expectedRemove []string + expectErr bool + }{ + { + labels: []string{"a=b", "c=d"}, + expected: map[string]string{"a": "b", "c": "d"}, + }, + { + labels: []string{}, + expected: map[string]string{}, + }, + { + labels: []string{"a=b", "c=d", "e-"}, + expected: map[string]string{"a": "b", "c": "d"}, + expectedRemove: []string{"e"}, + }, + { + labels: []string{"ab", "c=d"}, + expectErr: true, + }, + { + labels: []string{"a=b", "c=d", "a-"}, + expectErr: true, + }, + { + labels: []string{"a="}, + expectErr: true, + }, + { + labels: []string{"a=%^$"}, + expectErr: true, + }, + } + for _, test := range tests { + labels, remove, err := parseLabels(test.labels) + if test.expectErr && err == nil { + t.Errorf("unexpected non-error: %v", test) + } + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v %v", err, test) + } + if !reflect.DeepEqual(labels, test.expected) { + t.Errorf("expected: %v, got %v", test.expected, labels) + } + if !reflect.DeepEqual(remove, test.expectedRemove) { + t.Errorf("expected: %v, got %v", test.expectedRemove, remove) + } + } +} + +func TestLabelFunc(t *testing.T) { + tests := []struct { + obj runtime.Object + overwrite bool + version string + labels map[string]string + remove []string + expected runtime.Object + expectErr bool + }{ + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"a": "b"}, + }, + }, + labels: map[string]string{"a": "b"}, + expectErr: true, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"a": "b"}, + }, + }, + labels: map[string]string{"a": "c"}, + overwrite: true, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"a": "c"}, + }, + }, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"a": "b"}, + }, + }, + labels: map[string]string{"c": "d"}, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"a": "b", "c": "d"}, + }, + }, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"a": "b"}, + }, + }, + labels: map[string]string{"c": "d"}, + version: "2", + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"a": "b", "c": "d"}, + ResourceVersion: "2", + }, + }, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"a": "b"}, + }, + }, + labels: map[string]string{}, + remove: []string{"a"}, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{}, + }, + }, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"a": "b", "c": "d"}, + }, + }, + labels: map[string]string{"e": "f"}, + remove: []string{"a"}, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "c": "d", + "e": "f", + }, + }, + }, + }, + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{}, + }, + labels: map[string]string{"a": "b"}, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"a": "b"}, + }, + }, + }, + } + for _, test := range tests { + err := labelFunc(test.obj, test.overwrite, test.version, test.labels, test.remove) + if test.expectErr { + if err == nil { + t.Errorf("unexpected non-error: %v", test) + } + continue + } + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v %v", err, test) + } + if !reflect.DeepEqual(test.obj, test.expected) { + t.Errorf("expected: %v, got %v", test.expected, test.obj) + } + } +} + +func TestLabelErrors(t *testing.T) { + testCases := map[string]struct { + args []string + flags map[string]string + errFn func(error) bool + }{ + "no args": { + args: []string{}, + errFn: func(err error) bool { return strings.Contains(err.Error(), "one or more resources must be specified") }, + }, + "not enough labels": { + args: []string{"pods"}, + errFn: func(err error) bool { return strings.Contains(err.Error(), "at least one label update is required") }, + }, + "no resources": { + args: []string{"pods-"}, + errFn: func(err error) bool { return strings.Contains(err.Error(), "one or more resources must be specified") }, + }, + "no resources 2": { + args: []string{"pods=bar"}, + errFn: func(err error) bool { return strings.Contains(err.Error(), "one or more resources must be specified") }, + }, + } + + for k, testCase := range testCases { + f, tf, _ := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}} + + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdLabel(f, buf) + cmd.SetOutput(buf) + + for k, v := range testCase.flags { + cmd.Flags().Set(k, v) + } + err := RunLabel(f, buf, cmd, testCase.args, &LabelOptions{}) + if !testCase.errFn(err) { + t.Errorf("%s: unexpected error: %v", k, err) + continue + } + if tf.Printer.(*testPrinter).Objects != nil { + t.Errorf("unexpected print to default printer") + } + if buf.Len() > 0 { + t.Errorf("buffer should be empty: %s", string(buf.Bytes())) + } + } +} + +func TestLabelForResourceFromFile(t *testing.T) { + pods, _, _ := testData() + f, tf, codec := NewAPIFactory() + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch req.Method { + case "GET": + switch req.URL.Path { + case "/namespaces/test/replicationcontrollers/cassandra": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + case "PATCH": + switch req.URL.Path { + case "/namespaces/test/replicationcontrollers/cassandra": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + default: + t.Fatalf("unexpected request: %s %#v\n%#v", req.Method, req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}} + + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdLabel(f, buf) + options := &LabelOptions{ + Filenames: []string{"../../../examples/cassandra/cassandra-controller.yaml"}, + } + + err := RunLabel(f, buf, cmd, []string{"a=b"}, options) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !strings.Contains(buf.String(), "labeled") { + t.Errorf("did not set labels: %s", buf.String()) + } +} + +func TestLabelMultipleObjects(t *testing.T) { + pods, _, _ := testData() + f, tf, codec := NewAPIFactory() + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch req.Method { + case "GET": + switch req.URL.Path { + case "/namespaces/test/pods": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + case "PATCH": + switch req.URL.Path { + case "/namespaces/test/pods/foo": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, nil + case "/namespaces/test/pods/bar": + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[1])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + default: + t.Fatalf("unexpected request: %s %#v\n%#v", req.Method, req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}} + + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdLabel(f, buf) + cmd.Flags().Set("all", "true") + + if err := RunLabel(f, buf, cmd, []string{"pods", "a=b"}, &LabelOptions{}); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if strings.Count(buf.String(), "labeled") != len(pods.Items) { + t.Errorf("not all labels are set: %s", buf.String()) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/logs.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/logs.go index fbbc2b802804..b79d8c07d124 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/logs.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/logs.go @@ -61,6 +61,7 @@ type LogsOptions struct { ClientMapper resource.ClientMapper Decoder runtime.Decoder + Object runtime.Object LogsForObject func(object, options runtime.Object) (*restclient.Request, error) Out io.Writer @@ -100,6 +101,7 @@ func NewCmdLogs(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().Bool("interactive", false, "If true, prompt the user for input when required.") cmd.Flags().MarkDeprecated("interactive", "This flag is no longer respected and there is no replacement.") + cmdutil.AddInclude3rdPartyFlags(cmd) return cmd } @@ -150,14 +152,27 @@ func (o *LogsOptions) Complete(f *cmdutil.Factory, out io.Writer, cmd *cobra.Com logOptions.SinceSeconds = &sec } o.Options = logOptions - - o.Mapper, o.Typer = f.Object() - o.Decoder = f.Decoder(true) - o.ClientMapper = resource.ClientMapperFunc(f.ClientForMapping) o.LogsForObject = f.LogsForObject - + o.ClientMapper = resource.ClientMapperFunc(f.ClientForMapping) o.Out = out + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) + decoder := f.Decoder(true) + if o.Object == nil { + infos, err := resource.NewBuilder(mapper, typer, o.ClientMapper, decoder). + NamespaceParam(o.Namespace).DefaultNamespace(). + ResourceNames("pods", o.ResourceArg). + SingleResourceType(). + Do().Infos() + if err != nil { + return err + } + if len(infos) != 1 { + return errors.New("expected a resource") + } + o.Object = infos[0].Object + } + return nil } @@ -178,20 +193,7 @@ func (o LogsOptions) Validate() error { // RunLogs retrieves a pod log func (o LogsOptions) RunLogs() (int64, error) { - infos, err := resource.NewBuilder(o.Mapper, o.Typer, o.ClientMapper, o.Decoder). - NamespaceParam(o.Namespace).DefaultNamespace(). - ResourceNames("pods", o.ResourceArg). - SingleResourceType(). - Do().Infos() - if err != nil { - return 0, err - } - if len(infos) != 1 { - return 0, errors.New("expected a resource") - } - info := infos[0] - - req, err := o.LogsForObject(info.Object, o.Options) + req, err := o.LogsForObject(o.Object, o.Options) if err != nil { return 0, err } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/logs_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/logs_test.go new file mode 100644 index 000000000000..3eaa4e59a23e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/logs_test.go @@ -0,0 +1,140 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + + "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/unversioned/fake" +) + +func TestLog(t *testing.T) { + tests := []struct { + name, version, podPath, logPath, container string + pod *api.Pod + }{ + { + name: "v1 - pod log", + version: "v1", + podPath: "/namespaces/test/pods/foo", + logPath: "/api/v1/namespaces/test/pods/foo/log", + pod: testPod(), + }, + } + for _, test := range tests { + logContent := "test log content" + f, tf, codec := NewAPIFactory() + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == test.podPath && m == "GET": + body := objBody(codec, test.pod) + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: body}, nil + case p == test.logPath && m == "GET": + body := ioutil.NopCloser(bytes.NewBufferString(logContent)) + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: body}, nil + default: + // Ensures no GET is performed when deleting by name + t.Errorf("%s: unexpected request: %#v\n%#v", test.name, req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &unversioned.GroupVersion{Version: test.version}}} + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdLogs(f, buf) + cmd.Flags().Set("namespace", "test") + cmd.Run(cmd, []string{"foo"}) + + if buf.String() != logContent { + t.Errorf("%s: did not get expected log content. Got: %s", test.name, buf.String()) + } + } +} + +func testPod() *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test", ResourceVersion: "10"}, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{ + { + Name: "bar", + }, + }, + }, + } +} + +func TestValidateLogFlags(t *testing.T) { + f, _, _ := NewAPIFactory() + + tests := []struct { + name string + flags map[string]string + expected string + }{ + { + name: "since & since-time", + flags: map[string]string{"since": "1h", "since-time": "2006-01-02T15:04:05Z"}, + expected: "at most one of `sinceTime` or `sinceSeconds` may be specified", + }, + { + name: "negative limit-bytes", + flags: map[string]string{"limit-bytes": "-100"}, + expected: "must be greater than 0", + }, + { + name: "negative tail", + flags: map[string]string{"tail": "-100"}, + expected: "must be greater than or equal to 0", + }, + } + for _, test := range tests { + cmd := NewCmdLogs(f, bytes.NewBuffer([]byte{})) + out := "" + for flag, value := range test.flags { + cmd.Flags().Set(flag, value) + } + // checkErr breaks tests in case of errors, plus we just + // need to check errors returned by the command validation + o := &LogsOptions{} + cmd.Run = func(cmd *cobra.Command, args []string) { + o.Complete(f, os.Stdout, cmd, args) + out = o.Validate().Error() + } + cmd.Run(cmd, []string{"foo"}) + + if !strings.Contains(out, test.expected) { + t.Errorf("%s: expected to find:\n\t%s\nfound:\n\t%s\n", test.name, test.expected, out) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/patch.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/patch.go index 80925a4e9e76..6f892133cc6e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/patch.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/patch.go @@ -37,6 +37,7 @@ var patchTypes = map[string]api.PatchType{"json": api.JSONPatchType, "merge": ap // referencing the cmd.Flags() type PatchOptions struct { Filenames []string + Recursive bool } const ( @@ -44,7 +45,7 @@ const ( JSON and YAML formats are accepted. -Please refer to the models in https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/blob/release-1.2/docs/api-reference/v1/definitions.html to find if a field is mutable.` +Please refer to the models in https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions.html to find if a field is mutable.` patch_example = ` # Partially update a node using strategic merge patch kubectl patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}' @@ -62,6 +63,15 @@ kubectl patch pod valid-pod -type='json' -p='[{"op": "replace", "path": "/spec/c func NewCmdPatch(f *cmdutil.Factory, out io.Writer) *cobra.Command { options := &PatchOptions{} + // retrieve a list of handled resources from printer as valid args + validArgs, argAliases := []string{}, []string{} + p, err := f.Printer(nil, false, false, false, false, false, false, []string{}) + cmdutil.CheckErr(err) + if p != nil { + validArgs = p.HandledResources() + argAliases = kubectl.ResourceAliases(validArgs) + } + cmd := &cobra.Command{ Use: "patch (-f FILENAME | TYPE NAME) -p PATCH", Short: "Update field(s) of a resource using strategic merge patch.", @@ -73,15 +83,19 @@ func NewCmdPatch(f *cmdutil.Factory, out io.Writer) *cobra.Command { err := RunPatch(f, out, cmd, args, shortOutput, options) cmdutil.CheckErr(err) }, + ValidArgs: validArgs, + ArgAliases: argAliases, } cmd.Flags().StringP("patch", "p", "", "The patch to be applied to the resource JSON file.") cmd.MarkFlagRequired("patch") cmd.Flags().String("type", "strategic", fmt.Sprintf("The type of patch being provided; one of %v", sets.StringKeySet(patchTypes).List())) cmdutil.AddOutputFlagsForMutation(cmd) cmdutil.AddRecordFlag(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) usage := "Filename, directory, or URL to a file identifying the resource to update" kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) return cmd } @@ -110,11 +124,11 @@ func RunPatch(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri return fmt.Errorf("unable to parse %q: %v", patch, err) } - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). ResourceTypeOrNameArgs(false, args...). Flatten(). Do() @@ -123,35 +137,41 @@ func RunPatch(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri return err } - infos, err := r.Infos() - if err != nil { - return err - } - if len(infos) > 1 { - return fmt.Errorf("multiple resources provided") - } - info := infos[0] - name, namespace := info.Name, info.Namespace - mapping := info.ResourceMapping() - client, err := f.ClientForMapping(mapping) - if err != nil { - return err - } + count := 0 + err = r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + name, namespace := info.Name, info.Namespace + mapping := info.ResourceMapping() + client, err := f.ClientForMapping(mapping) + if err != nil { + return err + } - helper := resource.NewHelper(client, mapping) - patchedObject, err := helper.Patch(namespace, name, patchType, patchBytes) + helper := resource.NewHelper(client, mapping) + patchedObject, err := helper.Patch(namespace, name, patchType, patchBytes) + if err != nil { + return err + } + if cmdutil.ShouldRecord(cmd, info) { + if err := cmdutil.RecordChangeCause(patchedObject, f.Command()); err == nil { + // don't return an error on failure. The patch itself succeeded, its only the hint for that change that failed + // don't bother checking for failures of this replace, because a failure to indicate the hint doesn't fail the command + // also, don't force the replacement. If the replacement fails on a resourceVersion conflict, then it means this + // record hint is likely to be invalid anyway, so avoid the bad hint + resource.NewHelper(client, mapping).Replace(namespace, name, false, patchedObject) + } + } + count++ + cmdutil.PrintSuccess(mapper, shortOutput, out, "", name, "patched") + return nil + }) if err != nil { return err } - if cmdutil.ShouldRecord(cmd, info) { - if err := cmdutil.RecordChangeCause(patchedObject, f.Command()); err == nil { - // don't return an error on failure. The patch itself succeeded, its only the hint for that change that failed - // don't bother checking for failures of this replace, because a failure to indicate the hint doesn't fail the command - // also, don't force the replacement. If the replacement fails on a resourceVersion conflict, then it means this - // record hint is likely to be invalid anyway, so avoid the bad hint - resource.NewHelper(client, mapping).Replace(namespace, name, false, patchedObject) - } + if count == 0 { + return fmt.Errorf("no objects passed to patch") } - cmdutil.PrintSuccess(mapper, shortOutput, out, "", name, "patched") return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/patch_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/patch_test.go new file mode 100644 index 000000000000..a6e2a5432f27 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/patch_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "net/http" + "testing" + + "k8s.io/kubernetes/pkg/client/unversioned/fake" +) + +func TestPatchObject(t *testing.T) { + _, svc, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/services/frontend" && (m == "PATCH" || m == "GET"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdPatch(f, buf) + cmd.Flags().Set("namespace", "test") + cmd.Flags().Set("patch", `{"spec":{"type":"NodePort"}}`) + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{"services/frontend"}) + + // uses the name from the file, not the response + if buf.String() != "frontend\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestPatchObjectFromFile(t *testing.T) { + _, svc, _ := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/services/frontend" && (m == "PATCH" || m == "GET"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdPatch(f, buf) + cmd.Flags().Set("namespace", "test") + cmd.Flags().Set("patch", `{"spec":{"type":"NodePort"}}`) + cmd.Flags().Set("output", "name") + cmd.Flags().Set("filename", "../../../examples/guestbook/frontend-service.yaml") + cmd.Run(cmd, []string{}) + + // uses the name from the file, not the response + if buf.String() != "frontend\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/portforward.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/portforward.go index 3fb2d3a4dbeb..3d705466f164 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/portforward.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/portforward.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "io" "net/url" "os" "os/signal" @@ -45,14 +46,18 @@ kubectl port-forward mypod :5000 kubectl port-forward mypod 0:5000` ) -func NewCmdPortForward(f *cmdutil.Factory) *cobra.Command { +func NewCmdPortForward(f *cmdutil.Factory, cmdOut, cmdErr io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "port-forward POD [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]", Short: "Forward one or more local ports to a pod.", Long: "Forward one or more local ports to a pod.", Example: portforward_example, Run: func(cmd *cobra.Command, args []string) { - err := RunPortForward(f, cmd, args, &defaultPortForwarder{}) + pf := &defaultPortForwarder{ + cmdOut: cmdOut, + cmdErr: cmdErr, + } + err := RunPortForward(f, cmd, args, pf) cmdutil.CheckErr(err) }, } @@ -65,14 +70,16 @@ type portForwarder interface { ForwardPorts(method string, url *url.URL, config *restclient.Config, ports []string, stopChan <-chan struct{}) error } -type defaultPortForwarder struct{} +type defaultPortForwarder struct { + cmdOut, cmdErr io.Writer +} -func (*defaultPortForwarder) ForwardPorts(method string, url *url.URL, config *restclient.Config, ports []string, stopChan <-chan struct{}) error { +func (f *defaultPortForwarder) ForwardPorts(method string, url *url.URL, config *restclient.Config, ports []string, stopChan <-chan struct{}) error { dialer, err := remotecommand.NewExecutor(config, method, url) if err != nil { return err } - fw, err := portforward.New(dialer, ports, stopChan) + fw, err := portforward.New(dialer, ports, stopChan, f.cmdOut, f.cmdErr) if err != nil { return err } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/portforward_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/portforward_test.go new file mode 100644 index 000000000000..75363d2ca8cc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/portforward_test.go @@ -0,0 +1,176 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "fmt" + "net/http" + "net/url" + "testing" + + "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/unversioned/fake" +) + +type fakePortForwarder struct { + method string + url *url.URL + pfErr error +} + +func (f *fakePortForwarder) ForwardPorts(method string, url *url.URL, config *restclient.Config, ports []string, stopChan <-chan struct{}) error { + f.method = method + f.url = url + return f.pfErr +} + +func TestPortForward(t *testing.T) { + version := testapi.Default.GroupVersion().Version + + tests := []struct { + name, version, podPath, pfPath, container string + pod *api.Pod + pfErr bool + }{ + { + name: "pod portforward", + version: version, + podPath: "/api/" + version + "/namespaces/test/pods/foo", + pfPath: "/api/" + version + "/namespaces/test/pods/foo/portforward", + pod: execPod(), + }, + { + name: "pod portforward error", + version: version, + podPath: "/api/" + version + "/namespaces/test/pods/foo", + pfPath: "/api/" + version + "/namespaces/test/pods/foo/portforward", + pod: execPod(), + pfErr: true, + }, + } + for _, test := range tests { + f, tf, codec := NewAPIFactory() + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == test.podPath && m == "GET": + body := objBody(codec, test.pod) + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: body}, nil + default: + // Ensures no GET is performed when deleting by name + t.Errorf("%s: unexpected request: %#v\n%#v", test.name, req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &unversioned.GroupVersion{Version: test.version}}} + ff := &fakePortForwarder{} + if test.pfErr { + ff.pfErr = fmt.Errorf("pf error") + } + cmd := &cobra.Command{} + cmd.Flags().StringP("pod", "p", "", "Pod name") + err := RunPortForward(f, cmd, []string{"foo", ":5000", ":1000"}, ff) + + if test.pfErr && err != ff.pfErr { + t.Errorf("%s: Unexpected exec error: %v", test.name, err) + } + if !test.pfErr && err != nil { + t.Errorf("%s: Unexpected error: %v", test.name, err) + } + if test.pfErr { + continue + } + + if ff.url.Path != test.pfPath { + t.Errorf("%s: Did not get expected path for portforward request", test.name) + } + if ff.method != "POST" { + t.Errorf("%s: Did not get method for attach request: %s", test.name, ff.method) + } + + } +} + +func TestPortForwardWithPFlag(t *testing.T) { + version := testapi.Default.GroupVersion().Version + + tests := []struct { + name, version, podPath, pfPath, container string + pod *api.Pod + pfErr bool + }{ + { + name: "pod portforward", + version: version, + podPath: "/api/" + version + "/namespaces/test/pods/foo", + pfPath: "/api/" + version + "/namespaces/test/pods/foo/portforward", + pod: execPod(), + }, + { + name: "pod portforward error", + version: version, + podPath: "/api/" + version + "/namespaces/test/pods/foo", + pfPath: "/api/" + version + "/namespaces/test/pods/foo/portforward", + pod: execPod(), + pfErr: true, + }, + } + for _, test := range tests { + f, tf, codec := NewAPIFactory() + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == test.podPath && m == "GET": + body := objBody(codec, test.pod) + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: body}, nil + default: + // Ensures no GET is performed when deleting by name + t.Errorf("%s: unexpected request: %#v\n%#v", test.name, req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &unversioned.GroupVersion{Version: test.version}}} + ff := &fakePortForwarder{} + if test.pfErr { + ff.pfErr = fmt.Errorf("pf error") + } + cmd := &cobra.Command{} + podPtr := cmd.Flags().StringP("pod", "p", "", "Pod name") + *podPtr = "foo" + err := RunPortForward(f, cmd, []string{":5000", ":1000"}, ff) + if test.pfErr && err != ff.pfErr { + t.Errorf("%s: Unexpected exec error: %v", test.name, err) + } + if !test.pfErr && ff.url.Path != test.pfPath { + t.Errorf("%s: Did not get expected path for portforward request", test.name) + } + if !test.pfErr && err != nil { + t.Errorf("%s: Unexpected error: %v", test.name, err) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/replace.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/replace.go index 78a999df04fb..8b98402aaa8d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/replace.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/replace.go @@ -35,6 +35,7 @@ import ( // referencing the cmd.Flags() type ReplaceOptions struct { Filenames []string + Recursive bool } const ( @@ -44,7 +45,7 @@ JSON and YAML formats are accepted. If replacing an existing resource, the complete resource spec must be provided. This can be obtained by $ kubectl get TYPE NAME -o yaml -Please refer to the models in https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/blob/release-1.2/docs/api-reference/v1/definitions.html to find if a field is mutable.` +Please refer to the models in https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions.html to find if a field is mutable.` replace_example = `# Replace a pod using the data in pod.json. kubectl replace -f ./pod.json @@ -82,9 +83,12 @@ func NewCmdReplace(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().Int("grace-period", -1, "Only relevant during a force replace. Period of time in seconds given to the old resource to terminate gracefully. Ignored if negative.") cmd.Flags().Duration("timeout", 0, "Only relevant during a force replace. The length of time to wait before giving up on a delete of the old resource, zero means determine a timeout from the size of the object") cmdutil.AddValidateFlags(cmd) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) cmdutil.AddOutputFlagsForMutation(cmd) cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddRecordFlag(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) + return cmd } @@ -112,12 +116,12 @@ func RunReplace(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []st return forceReplace(f, out, cmd, args, shortOutput, options) } - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). Schema(schema). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). Flatten(). Do() err = r.Err() @@ -147,7 +151,7 @@ func RunReplace(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []st } info.Refresh(obj, true) - printObjectSpecificMessage(obj, out) + f.PrintObjectSpecificMessage(obj, out) cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "replaced") return nil }) @@ -180,11 +184,11 @@ func forceReplace(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args [] } } - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). ResourceTypeOrNameArgs(false, args...).RequireObject(false). Flatten(). Do() @@ -209,7 +213,7 @@ func forceReplace(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args [] Schema(schema). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). Flatten(). Do() err = r.Err() @@ -240,7 +244,7 @@ func forceReplace(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args [] count++ info.Refresh(obj, true) - printObjectSpecificMessage(obj, out) + f.PrintObjectSpecificMessage(obj, out) cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "replaced") return nil }) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/replace_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/replace_test.go new file mode 100644 index 000000000000..47ed751a7348 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/replace_test.go @@ -0,0 +1,193 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "net/http" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/client/unversioned/fake" +) + +func TestReplaceObject(t *testing.T) { + _, _, rc := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/redis-master" && (m == "GET" || m == "PUT" || m == "DELETE"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + case p == "/namespaces/test/replicationcontrollers" && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdReplace(f, buf) + cmd.Flags().Set("filename", "../../../examples/guestbook/legacy/redis-master-controller.yaml") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + // uses the name from the file, not the response + if buf.String() != "replicationcontroller/rc1\n" { + t.Errorf("unexpected output: %s", buf.String()) + } + + buf.Reset() + cmd.Flags().Set("force", "true") + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + if buf.String() != "replicationcontroller/redis-master\nreplicationcontroller/rc1\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestReplaceMultipleObject(t *testing.T) { + _, svc, rc := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/redis-master" && (m == "GET" || m == "PUT" || m == "DELETE"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + case p == "/namespaces/test/replicationcontrollers" && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + case p == "/namespaces/test/services/frontend" && (m == "GET" || m == "PUT" || m == "DELETE"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil + case p == "/namespaces/test/services" && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdReplace(f, buf) + cmd.Flags().Set("filename", "../../../examples/guestbook/legacy/redis-master-controller.yaml") + cmd.Flags().Set("filename", "../../../examples/guestbook/frontend-service.yaml") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + if buf.String() != "replicationcontroller/rc1\nservice/baz\n" { + t.Errorf("unexpected output: %s", buf.String()) + } + + buf.Reset() + cmd.Flags().Set("force", "true") + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + if buf.String() != "replicationcontroller/redis-master\nservice/frontend\nreplicationcontroller/rc1\nservice/baz\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestReplaceDirectory(t *testing.T) { + _, _, rc := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case strings.HasPrefix(p, "/namespaces/test/replicationcontrollers/") && (m == "GET" || m == "PUT" || m == "DELETE"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + case strings.HasPrefix(p, "/namespaces/test/replicationcontrollers") && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdReplace(f, buf) + cmd.Flags().Set("filename", "../../../examples/guestbook/legacy") + cmd.Flags().Set("namespace", "test") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + if buf.String() != "replicationcontroller/rc1\nreplicationcontroller/rc1\nreplicationcontroller/rc1\n" { + t.Errorf("unexpected output: %s", buf.String()) + } + + buf.Reset() + cmd.Flags().Set("force", "true") + cmd.Flags().Set("cascade", "false") + cmd.Run(cmd, []string{}) + + if buf.String() != "replicationcontroller/frontend\nreplicationcontroller/redis-master\nreplicationcontroller/redis-slave\n"+ + "replicationcontroller/rc1\nreplicationcontroller/rc1\nreplicationcontroller/rc1\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestForceReplaceObjectNotFound(t *testing.T) { + _, _, rc := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/redis-master" && m == "DELETE": + return &http.Response{StatusCode: 404, Header: defaultHeader(), Body: stringBody("")}, nil + case p == "/namespaces/test/replicationcontrollers" && m == "POST": + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdReplace(f, buf) + cmd.Flags().Set("filename", "../../../examples/guestbook/legacy/redis-master-controller.yaml") + cmd.Flags().Set("force", "true") + cmd.Flags().Set("cascade", "false") + cmd.Flags().Set("output", "name") + cmd.Run(cmd, []string{}) + + if buf.String() != "replicationcontroller/rc1\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate.go index 221a01bcc116..d47bfaf0aef0 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate.go @@ -97,10 +97,13 @@ func NewCmdRollingUpdate(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.MarkFlagRequired("image") cmd.Flags().String("deployment-label-key", "deployment", "The key to use to differentiate between two different controllers, default 'deployment'. Only relevant when --image is specified, ignored otherwise") cmd.Flags().String("container", "", "Container name which will have its image upgraded. Only relevant when --image is specified, ignored otherwise. Required when using --image on a multi-container pod") - cmd.Flags().Bool("dry-run", false, "If true, print out the changes that would be made, but don't actually make them.") + cmd.Flags().String("image-pull-policy", "", "Explicit policy for when to pull container images. Required when --image is same as existing image, ignored otherwise.") cmd.Flags().Bool("rollback", false, "If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout") + cmdutil.AddDryRunFlag(cmd) cmdutil.AddValidateFlags(cmd) cmdutil.AddPrinterFlags(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) + return cmd } @@ -148,12 +151,13 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg deploymentKey := cmdutil.GetFlagString(cmd, "deployment-label-key") filename := "" image := cmdutil.GetFlagString(cmd, "image") + pullPolicy := cmdutil.GetFlagString(cmd, "image-pull-policy") oldName := args[0] rollback := cmdutil.GetFlagBool(cmd, "rollback") period := cmdutil.GetFlagDuration(cmd, "update-period") interval := cmdutil.GetFlagDuration(cmd, "poll-interval") timeout := cmdutil.GetFlagDuration(cmd, "timeout") - dryrun := cmdutil.GetFlagBool(cmd, "dry-run") + dryrun := cmdutil.GetDryRunFlag(cmd) outputFormat := cmdutil.GetFlagString(cmd, "output") container := cmdutil.GetFlagString(cmd, "container") @@ -189,7 +193,7 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg var keepOldName bool var replicasDefaulted bool - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) if len(filename) != 0 { schema, err := f.Validator(cmdutil.GetFlagBool(cmd, "validate"), cmdutil.GetFlagString(cmd, "schema-cache-dir")) @@ -200,7 +204,7 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg request := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). Schema(schema). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, filename). + FilenameParam(enforceNamespace, false, filename). Do() obj, err := request.Object() if err != nil { @@ -217,8 +221,8 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg } newRc, ok = obj.(*api.ReplicationController) if !ok { - if gvk, err := typer.ObjectKind(obj); err == nil { - return cmdutil.UsageError(cmd, "%s contains a %v not a ReplicationController", filename, gvk) + if gvks, _, err := typer.ObjectKinds(obj); err == nil { + return cmdutil.UsageError(cmd, "%s contains a %v not a ReplicationController", filename, gvks[0]) } glog.V(4).Infof("Object %#v is not a ReplicationController", obj) return cmdutil.UsageError(cmd, "%s does not specify a valid ReplicationController", filename) @@ -231,8 +235,8 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg } } // If the --image option is specified, we need to create a new rc with at least one different selector - // than the old rc. This selector is the hash of the rc, which will differ because the new rc has a - // different image. + // than the old rc. This selector is the hash of the rc, with a suffix to provide uniqueness for + // same-image updates. if len(image) != 0 { codec := api.Codecs.LegacyCodec(client.APIVersion()) keepOldName = len(args) == 1 @@ -246,10 +250,21 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg } fmt.Fprintf(out, "Found existing update in progress (%s), resuming.\n", newRc.Name) } else { + config := &kubectl.NewControllerConfig{ + Namespace: cmdNamespace, + OldName: oldName, + NewName: newName, + Image: image, + Container: container, + DeploymentKey: deploymentKey, + } if oldRc.Spec.Template.Spec.Containers[0].Image == image { - return cmdutil.UsageError(cmd, "Specified --image must be distinct from existing container image") + if len(pullPolicy) == 0 { + return cmdutil.UsageError(cmd, "--image-pull-policy (Always|Never|IfNotPresent) must be provided when --image is the same as existing container image") + } + config.PullPolicy = api.PullPolicy(pullPolicy) } - newRc, err = kubectl.CreateNewControllerFromCurrentController(client, codec, cmdNamespace, oldName, newName, image, container, deploymentKey) + newRc, err = kubectl.CreateNewControllerFromCurrentController(client, codec, config) if err != nil { return err } @@ -260,6 +275,8 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg if err != nil { return err } + // If new image is same as old, the hash may not be distinct, so add a suffix. + oldHash += "-orig" oldRc, err = kubectl.UpdateExistingReplicationController(client, oldRc, cmdNamespace, newRc.Name, deploymentKey, oldHash, out) if err != nil { return err @@ -310,10 +327,10 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg oldRcData.WriteString(oldRc.Name) newRcData.WriteString(newRc.Name) } else { - if err := f.PrintObject(cmd, oldRc, oldRcData); err != nil { + if err := f.PrintObject(cmd, mapper, oldRc, oldRcData); err != nil { return err } - if err := f.PrintObject(cmd, newRc, newRcData); err != nil { + if err := f.PrintObject(cmd, mapper, newRc, newRcData); err != nil { return err } } @@ -358,13 +375,13 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg return err } if outputFormat != "" { - return f.PrintObject(cmd, newRc, out) + return f.PrintObject(cmd, mapper, newRc, out) } - kind, err := api.Scheme.ObjectKind(newRc) + kinds, _, err := api.Scheme.ObjectKinds(newRc) if err != nil { return err } - _, res := meta.KindToResource(kind) + _, res := meta.KindToResource(kinds[0]) cmdutil.PrintSuccess(mapper, false, out, res.Resource, oldName, message) return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate_test.go new file mode 100644 index 000000000000..bf2204b27312 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollingupdate_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "testing" +) + +func TestValidateArgs(t *testing.T) { + f, _, _ := NewAPIFactory() + + tests := []struct { + flags map[string]string + filenames []string + args []string + expectErr bool + testName string + }{ + { + expectErr: true, + testName: "nothing", + }, + { + flags: map[string]string{}, + args: []string{"foo"}, + expectErr: true, + testName: "no file, no image", + }, + { + filenames: []string{"bar.yaml"}, + args: []string{"foo"}, + testName: "valid file example", + }, + { + flags: map[string]string{ + "image": "foo:v2", + }, + args: []string{"foo"}, + testName: "missing second image name", + }, + { + flags: map[string]string{ + "image": "foo:v2", + }, + args: []string{"foo", "foo-v2"}, + testName: "valid image example", + }, + { + flags: map[string]string{ + "image": "foo:v2", + }, + filenames: []string{"bar.yaml"}, + args: []string{"foo", "foo-v2"}, + expectErr: true, + testName: "both filename and image example", + }, + } + for _, test := range tests { + out := &bytes.Buffer{} + cmd := NewCmdRollingUpdate(f, out) + + if test.flags != nil { + for key, val := range test.flags { + cmd.Flags().Set(key, val) + } + } + err := validateArguments(cmd, test.filenames, test.args) + if err != nil && !test.expectErr { + t.Errorf("unexpected error: %v (%s)", err, test.testName) + } + if err == nil && test.expectErr { + t.Errorf("unexpected non-error (%s)", test.testName) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout.go index 2397a10f4115..998edfb64e29 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout.go @@ -24,7 +24,7 @@ import ( ) const ( - rollout_long = `rollout manages a deployment using subcommands like "kubectl rollout undo deployment/abc"` + rollout_long = `Manages a deployment using subcommands like "kubectl rollout undo deployment/abc"` rollout_example = `# Rollback to the previous deployment kubectl rollout undo deployment/abc` rollout_valid_resources = `Valid resource types include: @@ -43,12 +43,13 @@ func NewCmdRollout(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Help() }, } - // subcommands cmd.AddCommand(NewCmdRolloutHistory(f, out)) cmd.AddCommand(NewCmdRolloutPause(f, out)) cmd.AddCommand(NewCmdRolloutResume(f, out)) cmd.AddCommand(NewCmdRolloutUndo(f, out)) + cmd.AddCommand(NewCmdRolloutStatus(f, out)) + return cmd } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_history.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_history.go index 5b487acda984..febb204500f7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_history.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_history.go @@ -23,7 +23,6 @@ import ( "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" - "k8s.io/kubernetes/pkg/util/errors" "github.com/spf13/cobra" ) @@ -32,12 +31,16 @@ import ( // referencing the cmd.Flags() type HistoryOptions struct { Filenames []string + Recursive bool } const ( - history_long = `view previous rollout revisions and configurations.` + history_long = `View previous rollout revisions and configurations.` history_example = `# View the rollout history of a deployment -kubectl rollout history deployment/abc` +kubectl rollout history deployment/abc + +# View the details of deployment revision 3 +kubectl rollout history deployment/abc --revision=3` ) func NewCmdRolloutHistory(f *cmdutil.Factory, out io.Writer) *cobra.Command { @@ -56,6 +59,7 @@ func NewCmdRolloutHistory(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().Int64("revision", 0, "See the details, including podTemplate of the revision specified") usage := "Filename, directory, or URL to a file identifying the resource to get from a server." kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) return cmd } @@ -65,40 +69,40 @@ func RunHistory(f *cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []st } revisionDetail := cmdutil.GetFlagInt64(cmd, "revision") - mapper, typer := f.Object() + mapper, typer := f.Object(false) cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err } - infos, err := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). + r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). ResourceTypeOrNameArgs(true, args...). + ContinueOnError(). Latest(). Flatten(). - Do(). - Infos() + Do() + err = r.Err() if err != nil { return err } - errs := []error{} - for _, info := range infos { + err = r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } mapping := info.ResourceMapping() historyViewer, err := f.HistoryViewer(mapping) if err != nil { - errs = append(errs, err) - continue + return err } historyInfo, err := historyViewer.History(info.Namespace, info.Name) if err != nil { - errs = append(errs, err) - continue + return err } - formattedOutput := "" if revisionDetail > 0 { // Print details of a specific revision template, ok := historyInfo.RevisionToTemplate[revisionDetail] @@ -106,17 +110,16 @@ func RunHistory(f *cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []st return fmt.Errorf("unable to find revision %d of %s %q", revisionDetail, mapping.Resource, info.Name) } fmt.Fprintf(out, "%s %q revision %d\n", mapping.Resource, info.Name, revisionDetail) - formattedOutput, err = kubectl.DescribePodTemplate(template) + kubectl.DescribePodTemplate(template, out) } else { // Print all revisions - formattedOutput, err = kubectl.PrintRolloutHistory(historyInfo, mapping.Resource, info.Name) - } - if err != nil { - errs = append(errs, err) - continue + formattedOutput, printErr := kubectl.PrintRolloutHistory(historyInfo, mapping.Resource, info.Name) + if printErr != nil { + return printErr + } + fmt.Fprintf(out, "%s\n", formattedOutput) } - fmt.Fprintf(out, "%s\n", formattedOutput) - } - - return errors.NewAggregate(errs) + return nil + }) + return err } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_pause.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_pause.go index 5575ab714fae..734f62ef3868 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_pause.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_pause.go @@ -17,7 +17,6 @@ limitations under the License. package rollout import ( - "fmt" "io" "github.com/spf13/cobra" @@ -27,6 +26,7 @@ import ( cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" ) // PauseConfig is the start of the data required to perform the operation. As new fields are added, add them here instead of @@ -35,10 +35,11 @@ type PauseConfig struct { PauseObject func(object runtime.Object) (bool, error) Mapper meta.RESTMapper Typer runtime.ObjectTyper - Info *resource.Info + Infos []*resource.Info Out io.Writer Filenames []string + Recursive bool } const ( @@ -63,13 +64,22 @@ func NewCmdRolloutPause(f *cmdutil.Factory, out io.Writer) *cobra.Command { Long: pause_long, Example: pause_example, Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(opts.CompletePause(f, cmd, out, args)) - cmdutil.CheckErr(opts.RunPause()) + allErrs := []error{} + err := opts.CompletePause(f, cmd, out, args) + if err != nil { + allErrs = append(allErrs, err) + } + err = opts.RunPause() + if err != nil { + allErrs = append(allErrs, err) + } + cmdutil.CheckErr(utilerrors.Flatten(utilerrors.NewAggregate(allErrs))) }, } usage := "Filename, directory, or URL to a file identifying the resource to get from a server." kubectl.AddJsonFilenameFlag(cmd, &opts.Filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &opts.Recursive) return cmd } @@ -78,7 +88,7 @@ func (o *PauseConfig) CompletePause(f *cmdutil.Factory, cmd *cobra.Command, out return cmdutil.UsageError(cmd, cmd.Use) } - o.Mapper, o.Typer = f.Object() + o.Mapper, o.Typer = f.Object(false) o.PauseObject = f.PauseObject o.Out = out @@ -87,32 +97,39 @@ func (o *PauseConfig) CompletePause(f *cmdutil.Factory, cmd *cobra.Command, out return err } - infos, err := resource.NewBuilder(o.Mapper, o.Typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). + r := resource.NewBuilder(o.Mapper, o.Typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, o.Filenames...). + FilenameParam(enforceNamespace, o.Recursive, o.Filenames...). ResourceTypeOrNameArgs(true, args...). - SingleResourceType(). + ContinueOnError(). Latest(). - Do().Infos() + Flatten(). + Do() + err = r.Err() if err != nil { return err } - if len(infos) != 1 { - return fmt.Errorf("rollout pause is only supported on individual resources - %d resources were found", len(infos)) + + o.Infos, err = r.Infos() + if err != nil { + return err } - o.Info = infos[0] return nil } func (o PauseConfig) RunPause() error { - isAlreadyPaused, err := o.PauseObject(o.Info.Object) - if err != nil { - return err - } - if isAlreadyPaused { - cmdutil.PrintSuccess(o.Mapper, false, o.Out, o.Info.Mapping.Resource, o.Info.Name, "already paused") - return nil + allErrs := []error{} + for _, info := range o.Infos { + isAlreadyPaused, err := o.PauseObject(info.Object) + if err != nil { + allErrs = append(allErrs, cmdutil.AddSourceToErr("pausing", info.Source, err)) + continue + } + if isAlreadyPaused { + cmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, "already paused") + continue + } + cmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, "paused") } - cmdutil.PrintSuccess(o.Mapper, false, o.Out, o.Info.Mapping.Resource, o.Info.Name, "paused") - return nil + return utilerrors.NewAggregate(allErrs) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_resume.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_resume.go index dbcd8774ccdd..101d3e67cd28 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_resume.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_resume.go @@ -17,7 +17,6 @@ limitations under the License. package rollout import ( - "fmt" "io" "github.com/spf13/cobra" @@ -27,6 +26,7 @@ import ( cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" ) // ResumeConfig is the start of the data required to perform the operation. As new fields are added, add them here instead of @@ -35,10 +35,11 @@ type ResumeConfig struct { ResumeObject func(object runtime.Object) (bool, error) Mapper meta.RESTMapper Typer runtime.ObjectTyper - Info *resource.Info + Infos []*resource.Info Out io.Writer Filenames []string + Recursive bool } const ( @@ -61,13 +62,22 @@ func NewCmdRolloutResume(f *cmdutil.Factory, out io.Writer) *cobra.Command { Long: resume_long, Example: resume_example, Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(opts.CompleteResume(f, cmd, out, args)) - cmdutil.CheckErr(opts.RunResume()) + allErrs := []error{} + err := opts.CompleteResume(f, cmd, out, args) + if err != nil { + allErrs = append(allErrs, err) + } + err = opts.RunResume() + if err != nil { + allErrs = append(allErrs, err) + } + cmdutil.CheckErr(utilerrors.Flatten(utilerrors.NewAggregate(allErrs))) }, } usage := "Filename, directory, or URL to a file identifying the resource to get from a server." kubectl.AddJsonFilenameFlag(cmd, &opts.Filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &opts.Recursive) return cmd } @@ -76,7 +86,7 @@ func (o *ResumeConfig) CompleteResume(f *cmdutil.Factory, cmd *cobra.Command, ou return cmdutil.UsageError(cmd, cmd.Use) } - o.Mapper, o.Typer = f.Object() + o.Mapper, o.Typer = f.Object(false) o.ResumeObject = f.ResumeObject o.Out = out @@ -85,32 +95,45 @@ func (o *ResumeConfig) CompleteResume(f *cmdutil.Factory, cmd *cobra.Command, ou return err } - infos, err := resource.NewBuilder(o.Mapper, o.Typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). + r := resource.NewBuilder(o.Mapper, o.Typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, o.Filenames...). + FilenameParam(enforceNamespace, o.Recursive, o.Filenames...). ResourceTypeOrNameArgs(true, args...). - SingleResourceType(). + ContinueOnError(). Latest(). - Do().Infos() + Flatten(). + Do() + err = r.Err() if err != nil { return err } - if len(infos) != 1 { - return fmt.Errorf("rollout resume is only supported on individual resources - %d resources were found", len(infos)) + + err = r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + o.Infos = append(o.Infos, info) + return nil + }) + if err != nil { + return err } - o.Info = infos[0] return nil } func (o ResumeConfig) RunResume() error { - isAlreadyResumed, err := o.ResumeObject(o.Info.Object) - if err != nil { - return err - } - if isAlreadyResumed { - cmdutil.PrintSuccess(o.Mapper, false, o.Out, o.Info.Mapping.Resource, o.Info.Name, "already resumed") - return nil + allErrs := []error{} + for _, info := range o.Infos { + isAlreadyResumed, err := o.ResumeObject(info.Object) + if err != nil { + allErrs = append(allErrs, cmdutil.AddSourceToErr("resuming", info.Source, err)) + continue + } + if isAlreadyResumed { + cmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, "already resumed") + continue + } + cmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, "resumed") } - cmdutil.PrintSuccess(o.Mapper, false, o.Out, o.Info.Mapping.Resource, o.Info.Name, "resumed") - return nil + return utilerrors.NewAggregate(allErrs) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_status.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_status.go new file mode 100644 index 000000000000..e0cf32716e46 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_status.go @@ -0,0 +1,142 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rollout + +import ( + "fmt" + "io" + + "k8s.io/kubernetes/pkg/kubectl" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/resource" + "k8s.io/kubernetes/pkg/watch" + + "github.com/spf13/cobra" +) + +// StatusOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of +// referencing the cmd.Flags() +type StatusOptions struct { + Filenames []string + Recursive bool +} + +const ( + status_long = `Watch the status of current rollout, until it's done.` + status_example = `# Watch the rollout status of a deployment +kubectl rollout status deployment/nginx` +) + +func NewCmdRolloutStatus(f *cmdutil.Factory, out io.Writer) *cobra.Command { + options := &StatusOptions{} + + cmd := &cobra.Command{ + Use: "status (TYPE NAME | TYPE/NAME) [flags]", + Short: "Watch rollout status until it's done", + Long: status_long, + Example: status_example, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(RunStatus(f, cmd, out, args, options)) + }, + } + + usage := "Filename, directory, or URL to a file identifying the resource to get from a server." + kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) + return cmd +} + +func RunStatus(f *cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []string, options *StatusOptions) error { + if len(args) == 0 && len(options.Filenames) == 0 { + return cmdutil.UsageError(cmd, "Required resource not specified.") + } + + mapper, typer := f.Object(false) + + cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + if err != nil { + return err + } + + r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). + NamespaceParam(cmdNamespace).DefaultNamespace(). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). + ResourceTypeOrNameArgs(true, args...). + SingleResourceType(). + Latest(). + Do() + err = r.Err() + if err != nil { + return err + } + + infos, err := r.Infos() + if err != nil { + return err + } + if len(infos) != 1 { + return fmt.Errorf("rollout status is only supported on individual resources and resource collections - %d resources were found", len(infos)) + } + info := infos[0] + mapping := info.ResourceMapping() + + obj, err := r.Object() + if err != nil { + return err + } + rv, err := mapping.MetadataAccessor.ResourceVersion(obj) + if err != nil { + return err + } + + statusViewer, err := f.StatusViewer(mapping) + if err != nil { + return err + } + + // check if deployment's has finished the rollout + status, done, err := statusViewer.Status(cmdNamespace, info.Name) + if err != nil { + return err + } + fmt.Fprintf(out, "%s", status) + if done { + return nil + } + + // watch for changes to the deployment + w, err := r.Watch(rv) + if err != nil { + return err + } + + // if the rollout isn't done yet, keep watching deployment status + kubectl.WatchLoop(w, func(e watch.Event) error { + // print deployment's status + status, done, err := statusViewer.Status(cmdNamespace, info.Name) + if err != nil { + return err + } + fmt.Fprintf(out, "%s", status) + // Quit waiting if the rollout is done + if done { + w.Stop() + } + return nil + }) + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_undo.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_undo.go index 4dba41275f21..b0c84a717e05 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_undo.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/rollout/rollout_undo.go @@ -17,7 +17,6 @@ limitations under the License. package rollout import ( - "fmt" "io" "k8s.io/kubernetes/pkg/api/meta" @@ -25,6 +24,7 @@ import ( cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" "github.com/spf13/cobra" ) @@ -32,23 +32,28 @@ import ( // UndoOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of // referencing the cmd.Flags() type UndoOptions struct { - Rollbacker kubectl.Rollbacker - Mapper meta.RESTMapper - Typer runtime.ObjectTyper - Info *resource.Info - ToRevision int64 - Out io.Writer - Filenames []string + Rollbackers []kubectl.Rollbacker + Mapper meta.RESTMapper + Typer runtime.ObjectTyper + Infos []*resource.Info + ToRevision int64 + + Out io.Writer + Filenames []string + Recursive bool } const ( - undo_long = `undo rolls back to a previous rollout.` + undo_long = `Rollback to a previous rollout.` undo_example = `# Rollback to the previous deployment -kubectl rollout undo deployment/abc` +kubectl rollout undo deployment/abc + +# Rollback to deployment revision 3 +kubectl rollout undo deployment/abc --to-revision=3` ) func NewCmdRolloutUndo(f *cmdutil.Factory, out io.Writer) *cobra.Command { - options := &UndoOptions{} + opts := &UndoOptions{} cmd := &cobra.Command{ Use: "undo (TYPE NAME | TYPE/NAME) [flags]", @@ -56,14 +61,23 @@ func NewCmdRolloutUndo(f *cmdutil.Factory, out io.Writer) *cobra.Command { Long: undo_long, Example: undo_example, Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(options.CompleteUndo(f, cmd, out, args)) - cmdutil.CheckErr(options.RunUndo()) + allErrs := []error{} + err := opts.CompleteUndo(f, cmd, out, args) + if err != nil { + allErrs = append(allErrs, err) + } + err = opts.RunUndo() + if err != nil { + allErrs = append(allErrs, err) + } + cmdutil.CheckErr(utilerrors.Flatten(utilerrors.NewAggregate(allErrs))) }, } cmd.Flags().Int64("to-revision", 0, "The revision to rollback to. Default to 0 (last revision).") usage := "Filename, directory, or URL to a file identifying the resource to get from a server." - kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + kubectl.AddJsonFilenameFlag(cmd, &opts.Filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &opts.Recursive) return cmd } @@ -73,7 +87,7 @@ func (o *UndoOptions) CompleteUndo(f *cmdutil.Factory, cmd *cobra.Command, out i } o.ToRevision = cmdutil.GetFlagInt64(cmd, "to-revision") - o.Mapper, o.Typer = f.Object() + o.Mapper, o.Typer = f.Object(false) o.Out = out cmdNamespace, enforceNamespace, err := f.DefaultNamespace() @@ -81,31 +95,43 @@ func (o *UndoOptions) CompleteUndo(f *cmdutil.Factory, cmd *cobra.Command, out i return err } - infos, err := resource.NewBuilder(o.Mapper, o.Typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). + r := resource.NewBuilder(o.Mapper, o.Typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, o.Filenames...). + FilenameParam(enforceNamespace, o.Recursive, o.Filenames...). ResourceTypeOrNameArgs(true, args...). + ContinueOnError(). Latest(). Flatten(). - Do(). - Infos() + Do() + err = r.Err() if err != nil { return err } - if len(infos) != 1 { - return fmt.Errorf("rollout undo is only supported on individual resources - %d resources were found", len(infos)) - } - o.Info = infos[0] - o.Rollbacker, err = f.Rollbacker(o.Info.ResourceMapping()) + err = r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + rollbacker, err := f.Rollbacker(info.ResourceMapping()) + if err != nil { + return err + } + o.Infos = append(o.Infos, info) + o.Rollbackers = append(o.Rollbackers, rollbacker) + return nil + }) return err } func (o *UndoOptions) RunUndo() error { - result, err := o.Rollbacker.Rollback(o.Info.Namespace, o.Info.Name, nil, o.ToRevision, o.Info.Object) - if err != nil { - return err + allErrs := []error{} + for ix, info := range o.Infos { + result, err := o.Rollbackers[ix].Rollback(info.Namespace, info.Name, nil, o.ToRevision, info.Object) + if err != nil { + allErrs = append(allErrs, cmdutil.AddSourceToErr("undoing", info.Source, err)) + continue + } + cmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, result) } - cmdutil.PrintSuccess(o.Mapper, false, o.Out, o.Info.Mapping.Resource, o.Info.Name, result) - return nil + return utilerrors.NewAggregate(allErrs) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/run.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/run.go index 526d18d5ddbb..548cb76256a3 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/run.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/run.go @@ -57,7 +57,7 @@ kubectl run nginx --image=nginx --dry-run kubectl run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }' # Start a single instance of busybox and keep it in the foreground, don't restart it if it exits. -kubectl run -i --tty busybox --image=busybox --restart=Never +kubectl run -i -t busybox --image=busybox --restart=Never # Start the nginx container using the default command, but use custom arguments (arg1 .. argN) for that command. kubectl run nginx --image=nginx -- ... @@ -87,29 +87,30 @@ func NewCmdRun(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) *c addRunFlags(cmd) cmdutil.AddApplyAnnotationFlags(cmd) cmdutil.AddRecordFlag(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) return cmd } func addRunFlags(cmd *cobra.Command) { + cmdutil.AddDryRunFlag(cmd) cmd.Flags().String("generator", "", "The name of the API generator to use. Default is 'deployment/v1beta1' if --restart=Always, otherwise the default is 'job/v1'. This will happen only for cluster version at least 1.2, for olders we will fallback to 'run/v1' for --restart=Always, 'run-pod/v1' for others.") cmd.Flags().String("image", "", "The image for the container to run.") cmd.MarkFlagRequired("image") cmd.Flags().IntP("replicas", "r", 1, "Number of replicas to create for this container. Default is 1.") cmd.Flags().Bool("rm", false, "If true, delete resources created in this command for attached containers.") - cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without sending it.") cmd.Flags().String("overrides", "", "An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.") cmd.Flags().StringSlice("env", []string{}, "Environment variables to set in the container") cmd.Flags().Int("port", -1, "The port that this container exposes. If --expose is true, this is also the port used by the service that is created.") cmd.Flags().Int("hostport", -1, "The host port mapping for the container port. To demonstrate a single-machine container.") cmd.Flags().StringP("labels", "l", "", "Labels to apply to the pod(s).") cmd.Flags().BoolP("stdin", "i", false, "Keep stdin open on the container(s) in the pod, even if nothing is attached.") - cmd.Flags().Bool("tty", false, "Allocated a TTY for each container in the pod. Because -t is currently shorthand for --template, -t is not supported for --tty. This shorthand is deprecated and we expect to adopt -t for --tty soon.") + cmd.Flags().BoolP("tty", "t", false, "Allocated a TTY for each container in the pod.") cmd.Flags().Bool("attach", false, "If true, wait for the Pod to start running, and then attach to the Pod as if 'kubectl attach ...' were called. Default false, unless '-i/--interactive' is set, in which case the default is true.") cmd.Flags().Bool("leave-stdin-open", false, "If the pod is started in interactive mode or with stdin, leave stdin open after the first attach completes. By default, stdin will be closed after the first attach completes.") cmd.Flags().String("restart", "Always", "The restart policy for this Pod. Legal values [Always, OnFailure, Never]. If set to 'Always' a deployment is created for this pod, if set to OnFailure or Never, a job is created for this pod and --replicas must be 1. Default 'Always'") cmd.Flags().Bool("command", false, "If true and extra arguments are present, use them as the 'command' field in the container, rather than the 'args' field which is the default.") - cmd.Flags().String("requests", "", "The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'") - cmd.Flags().String("limits", "", "The resource requirement limits for this container. For example, 'cpu=200m,memory=512Mi'") + cmd.Flags().String("requests", "", "The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'. Note that server side components may assign requests depending on the server configuration, such as limit ranges.") + cmd.Flags().String("limits", "", "The resource requirement limits for this container. For example, 'cpu=200m,memory=512Mi'. Note that server side components may assign limits depending on the server configuration, such as limit ranges.") cmd.Flags().Bool("expose", false, "If true, a public, external service is created for the container(s) which are run") cmd.Flags().String("service-generator", "service/v2", "The name of the generator to use for creating a service. Only used if --expose is true") cmd.Flags().String("service-overrides", "", "An inline JSON override for the generated service object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field. Only used if --expose is true.") @@ -128,7 +129,7 @@ func Run(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *cob interactive := cmdutil.GetFlagBool(cmd, "stdin") tty := cmdutil.GetFlagBool(cmd, "tty") if tty && !interactive { - return cmdutil.UsageError(cmd, "-i/--stdin is required for containers with --tty=true") + return cmdutil.UsageError(cmd, "-i/--stdin is required for containers with -t/--tty=true") } replicas := cmdutil.GetFlagInt(cmd, "replicas") if interactive && replicas != 1 { @@ -256,7 +257,7 @@ func Run(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *cob if err != nil { return err } - _, typer := f.Object() + _, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). ContinueOnError(). NamespaceParam(namespace).DefaultNamespace(). @@ -270,7 +271,7 @@ func Run(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *cob outputFormat := cmdutil.GetFlagString(cmd, "output") if outputFormat != "" { - return f.PrintObject(cmd, obj, cmdOut) + return f.PrintObject(cmd, mapper, obj, cmdOut) } cmdutil.PrintSuccess(mapper, false, cmdOut, mapping.Resource, args[0], "created") return nil @@ -342,6 +343,7 @@ func handleAttachPod(f *cmdutil.Factory, c *client.Client, pod *api.Pod, opts *A opts.Client = c opts.PodName = pod.Name opts.Namespace = pod.Namespace + opts.CommandName = "kubectl attach" if err := opts.Run(); err != nil { fmt.Fprintf(opts.Out, "Error attaching, falling back to logs: %v\n", err) req, err := f.LogsForObject(pod, &api.PodLogOptions{Container: opts.GetContainerName(pod)}) @@ -421,7 +423,7 @@ func generateService(f *cmdutil.Factory, cmd *cobra.Command, args []string, serv } if cmdutil.GetFlagString(cmd, "output") != "" { - return f.PrintObject(cmd, obj, out) + return f.PrintObject(cmd, mapper, obj, out) } cmdutil.PrintSuccess(mapper, false, out, mapping.Resource, args[0], "created") @@ -440,11 +442,12 @@ func createGeneratedObject(f *cmdutil.Factory, cmd *cobra.Command, generator kub return nil, "", nil, nil, err } - mapper, typer := f.Object() - groupVersionKind, err := typer.ObjectKind(obj) + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) + groupVersionKinds, _, err := typer.ObjectKinds(obj) if err != nil { return nil, "", nil, nil, err } + groupVersionKind := groupVersionKinds[0] if len(overrides) > 0 { codec := runtime.NewCodec(f.JSONEncoder(), f.Decoder(true)) @@ -472,8 +475,7 @@ func createGeneratedObject(f *cmdutil.Factory, cmd *cobra.Command, generator kub return nil, "", nil, nil, err } } - // TODO: extract this flag to a central location, when such a location exists. - if !cmdutil.GetFlagBool(cmd, "dry-run") { + if !cmdutil.GetDryRunFlag(cmd) { resourceMapper := &resource.Mapper{ ObjectTyper: typer, RESTMapper: mapper, diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/run_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/run_test.go new file mode 100644 index 000000000000..13cbf036401f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/run_test.go @@ -0,0 +1,332 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "os" + "reflect" + "testing" + + "github.com/spf13/cobra" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/intstr" +) + +func TestGetRestartPolicy(t *testing.T) { + tests := []struct { + input string + interactive bool + expected api.RestartPolicy + expectErr bool + }{ + { + input: "", + expected: api.RestartPolicyAlways, + }, + { + input: "", + interactive: true, + expected: api.RestartPolicyOnFailure, + }, + { + input: string(api.RestartPolicyAlways), + interactive: true, + expected: api.RestartPolicyAlways, + }, + { + input: string(api.RestartPolicyNever), + interactive: true, + expected: api.RestartPolicyNever, + }, + { + input: string(api.RestartPolicyAlways), + expected: api.RestartPolicyAlways, + }, + { + input: string(api.RestartPolicyNever), + expected: api.RestartPolicyNever, + }, + { + input: "foo", + expectErr: true, + }, + } + for _, test := range tests { + cmd := &cobra.Command{} + cmd.Flags().String("restart", "", "dummy restart flag") + cmd.Flags().Lookup("restart").Value.Set(test.input) + policy, err := getRestartPolicy(cmd, test.interactive) + if test.expectErr && err == nil { + t.Error("unexpected non-error") + } + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + if !test.expectErr && policy != test.expected { + t.Errorf("expected: %s, saw: %s (%s:%v)", test.expected, policy, test.input, test.interactive) + } + } +} + +func TestGetEnv(t *testing.T) { + test := struct { + input []string + expected []string + }{ + input: []string{"a=b", "c=d"}, + expected: []string{"a=b", "c=d"}, + } + cmd := &cobra.Command{} + cmd.Flags().StringSlice("env", test.input, "") + + envStrings := cmdutil.GetFlagStringSlice(cmd, "env") + if len(envStrings) != 2 || !reflect.DeepEqual(envStrings, test.expected) { + t.Errorf("expected: %s, saw: %s", test.expected, envStrings) + } +} + +func TestRunArgsFollowDashRules(t *testing.T) { + _, _, rc := testData() + + tests := []struct { + args []string + argsLenAtDash int + expectError bool + name string + }{ + { + args: []string{}, + argsLenAtDash: -1, + expectError: true, + name: "empty", + }, + { + args: []string{"foo"}, + argsLenAtDash: -1, + expectError: false, + name: "no cmd", + }, + { + args: []string{"foo", "sleep"}, + argsLenAtDash: -1, + expectError: false, + name: "cmd no dash", + }, + { + args: []string{"foo", "sleep"}, + argsLenAtDash: 1, + expectError: false, + name: "cmd has dash", + }, + { + args: []string{"foo", "sleep"}, + argsLenAtDash: 0, + expectError: true, + name: "no name", + }, + } + for _, test := range tests { + f, tf, codec := NewAPIFactory() + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: 201, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil + }), + } + tf.Namespace = "test" + tf.ClientConfig = &restclient.Config{} + cmd := NewCmdRun(f, os.Stdin, os.Stdout, os.Stderr) + cmd.Flags().Set("image", "nginx") + cmd.Flags().Set("generator", "run/v1") + err := Run(f, os.Stdin, os.Stdout, os.Stderr, cmd, test.args, test.argsLenAtDash) + if test.expectError && err == nil { + t.Errorf("unexpected non-error (%s)", test.name) + } + if !test.expectError && err != nil { + t.Errorf("unexpected error: %v (%s)", err, test.name) + } + } +} + +func TestGenerateService(t *testing.T) { + + tests := []struct { + port string + args []string + serviceGenerator string + params map[string]interface{} + expectErr bool + name string + service api.Service + expectPOST bool + }{ + { + port: "80", + args: []string{"foo"}, + serviceGenerator: "service/v2", + params: map[string]interface{}{ + "name": "foo", + }, + expectErr: false, + name: "basic", + service: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "TCP", + TargetPort: intstr.FromInt(80), + }, + }, + Selector: map[string]string{ + "run": "foo", + }, + Type: api.ServiceTypeClusterIP, + SessionAffinity: api.ServiceAffinityNone, + }, + }, + expectPOST: true, + }, + { + port: "80", + args: []string{"foo"}, + serviceGenerator: "service/v2", + params: map[string]interface{}{ + "name": "foo", + "labels": "app=bar", + }, + expectErr: false, + name: "custom labels", + service: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"app": "bar"}, + }, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "TCP", + TargetPort: intstr.FromInt(80), + }, + }, + Selector: map[string]string{ + "app": "bar", + }, + Type: api.ServiceTypeClusterIP, + SessionAffinity: api.ServiceAffinityNone, + }, + }, + expectPOST: true, + }, + { + expectErr: true, + name: "missing port", + expectPOST: false, + }, + { + port: "80", + args: []string{"foo"}, + serviceGenerator: "service/v2", + params: map[string]interface{}{ + "name": "foo", + }, + expectErr: false, + name: "dry-run", + expectPOST: false, + }, + } + for _, test := range tests { + sawPOST := false + f, tf, codec := NewAPIFactory() + tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}} + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case test.expectPOST && m == "POST" && p == "/namespaces/namespace/services": + sawPOST = true + body := objBody(codec, &test.service) + data, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("unexpected error: %v", err) + t.FailNow() + } + defer req.Body.Close() + svc := &api.Service{} + if err := runtime.DecodeInto(codec, data, svc); err != nil { + t.Errorf("unexpected error: %v", err) + t.FailNow() + } + // Copy things that are defaulted by the system + test.service.Annotations = svc.Annotations + + if !reflect.DeepEqual(&test.service, svc) { + t.Errorf("expected:\n%v\nsaw:\n%v\n", &test.service, svc) + } + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: body}, nil + default: + // Ensures no GET is performed when deleting by name + t.Errorf("%s: unexpected request: %s %#v\n%#v", test.name, req.Method, req.URL, req) + return nil, fmt.Errorf("unexpected request") + } + }), + } + cmd := &cobra.Command{} + cmd.Flags().String("output", "", "") + cmd.Flags().Bool(cmdutil.ApplyAnnotationsFlag, false, "") + cmd.Flags().Bool("record", false, "Record current kubectl command in the resource annotation.") + cmdutil.AddInclude3rdPartyFlags(cmd) + addRunFlags(cmd) + + if !test.expectPOST { + cmd.Flags().Set("dry-run", "true") + } + + if len(test.port) > 0 { + cmd.Flags().Set("port", test.port) + test.params["port"] = test.port + } + + buff := &bytes.Buffer{} + err := generateService(f, cmd, test.args, test.serviceGenerator, test.params, "namespace", buff) + if test.expectErr { + if err == nil { + t.Error("unexpected non-error") + } + continue + } + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if test.expectPOST != sawPOST { + t.Errorf("expectPost: %v, sawPost: %v", test.expectPOST, sawPOST) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/scale.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/scale.go index b16f90cd9c59..bbd629a7d54d 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/scale.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/scale.go @@ -27,24 +27,24 @@ import ( "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" - utilerrors "k8s.io/kubernetes/pkg/util/errors" ) // ScaleOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of // referencing the cmd.Flags() type ScaleOptions struct { Filenames []string + Recursive bool } const ( - scale_long = `Set a new size for a Replication Controller, Job, or Deployment. + scale_long = `Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job. Scale also allows users to specify one or more preconditions for the scale action. If --current-replicas or --resource-version is specified, it is validated before the scale is attempted, and it is guaranteed that the precondition holds true when the scale is sent to the server.` - scale_example = `# Scale replication controller named 'foo' to 3. -kubectl scale --replicas=3 rc/foo + scale_example = `# Scale a replicaset named 'foo' to 3. +kubectl scale --replicas=3 rs/foo # Scale a resource identified by type and name specified in "foo.yaml" to 3. kubectl scale --replicas=3 -f foo.yaml @@ -67,7 +67,7 @@ func NewCmdScale(f *cmdutil.Factory, out io.Writer) *cobra.Command { Use: "scale [--resource-version=version] [--current-replicas=count] --replicas=COUNT (-f FILENAME | TYPE NAME)", // resize is deprecated Aliases: []string{"resize"}, - Short: "Set a new size for a Replication Controller, Job, or Deployment.", + Short: "Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job.", Long: scale_long, Example: scale_example, Run: func(cmd *cobra.Command, args []string) { @@ -84,9 +84,11 @@ func NewCmdScale(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().Duration("timeout", 0, "The length of time to wait before giving up on a scale operation, zero means don't wait.") cmdutil.AddOutputFlagsForMutation(cmd) cmdutil.AddRecordFlag(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) usage := "Filename, directory, or URL to a file identifying the resource to set a new size" kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) return cmd } @@ -106,11 +108,11 @@ func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri return err } - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). ResourceTypeOrNameArgs(false, args...). Flatten(). Do() @@ -119,43 +121,47 @@ func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri return err } - infos, err := r.Infos() - if err != nil { - return err - } - info := infos[0] - mapping := info.ResourceMapping() - scaler, err := f.Scaler(mapping) - if err != nil { - return err - } + infos := []*resource.Info{} + err = r.Visit(func(info *resource.Info, err error) error { + if err == nil { + infos = append(infos, info) + } + return nil + }) resourceVersion := cmdutil.GetFlagString(cmd, "resource-version") if len(resourceVersion) != 0 && len(infos) > 1 { return fmt.Errorf("cannot use --resource-version with multiple resources") } - currentSize := cmdutil.GetFlagInt(cmd, "current-replicas") - if currentSize != -1 && len(infos) > 1 { - return fmt.Errorf("cannot use --current-replicas with multiple resources") - } - precondition := &kubectl.ScalePrecondition{Size: currentSize, ResourceVersion: resourceVersion} - retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout) - var waitForReplicas *kubectl.RetryParams - if timeout := cmdutil.GetFlagDuration(cmd, "timeout"); timeout != 0 { - waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout) - } - errs := []error{} - for _, info := range infos { + counter := 0 + err = r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + mapping := info.ResourceMapping() + scaler, err := f.Scaler(mapping) + if err != nil { + return err + } + + currentSize := cmdutil.GetFlagInt(cmd, "current-replicas") + precondition := &kubectl.ScalePrecondition{Size: currentSize, ResourceVersion: resourceVersion} + retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout) + + var waitForReplicas *kubectl.RetryParams + if timeout := cmdutil.GetFlagDuration(cmd, "timeout"); timeout != 0 { + waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout) + } + if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil { - errs = append(errs, err) - continue + return err } if cmdutil.ShouldRecord(cmd, info) { patchBytes, err := cmdutil.ChangeResourcePatch(info, f.Command()) if err != nil { - errs = append(errs, err) - continue + return err } mapping := info.ResourceMapping() client, err := f.ClientForMapping(mapping) @@ -165,12 +171,18 @@ func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri helper := resource.NewHelper(client, mapping) _, err = helper.Patch(info.Namespace, info.Name, api.StrategicMergePatchType, patchBytes) if err != nil { - errs = append(errs, err) - continue + return err } } + counter++ cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "scaled") + return nil + }) + if err != nil { + return err } - - return utilerrors.NewAggregate(errs) + if counter == 0 { + return fmt.Errorf("no objects passed to scale") + } + return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/set/helper.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/set/helper.go new file mode 100644 index 000000000000..7a3a04a3d1fe --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/set/helper.go @@ -0,0 +1,151 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package set + +import ( + "fmt" + "io" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/kubectl/resource" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/strategicpatch" +) + +// selectContainers allows one or more containers to be matched against a string or wildcard +func selectContainers(containers []api.Container, spec string) ([]*api.Container, []*api.Container) { + out := []*api.Container{} + skipped := []*api.Container{} + for i, c := range containers { + if selectString(c.Name, spec) { + out = append(out, &containers[i]) + } else { + skipped = append(skipped, &containers[i]) + } + } + return out, skipped +} + +// handlePodUpdateError prints a more useful error to the end user when mutating a pod. +func handlePodUpdateError(out io.Writer, err error, resource string) { + if statusError, ok := err.(*errors.StatusError); ok && errors.IsInvalid(err) { + errorDetails := statusError.Status().Details + if errorDetails.Kind == "Pod" { + all, match := true, false + for _, cause := range errorDetails.Causes { + if cause.Field == "spec" && strings.Contains(cause.Message, "may not update fields other than") { + fmt.Fprintf(out, "error: may not update %s in pod %q directly\n", resource, errorDetails.Name) + match = true + } else { + all = false + } + } + if all && match { + return + } + } + } + + fmt.Fprintf(out, "error: %v\n", err) +} + +// selectString returns true if the provided string matches spec, where spec is a string with +// a non-greedy '*' wildcard operator. +// TODO: turn into a regex and handle greedy matches and backtracking. +func selectString(s, spec string) bool { + if spec == "*" { + return true + } + if !strings.Contains(spec, "*") { + return s == spec + } + + pos := 0 + match := true + parts := strings.Split(spec, "*") + for i, part := range parts { + if len(part) == 0 { + continue + } + next := strings.Index(s[pos:], part) + switch { + // next part not in string + case next < pos: + fallthrough + // first part does not match start of string + case i == 0 && pos != 0: + fallthrough + // last part does not exactly match remaining part of string + case i == (len(parts)-1) && len(s) != (len(part)+next): + match = false + break + default: + pos = next + } + } + return match +} + +// Patch represents the result of a mutation to an object. +type Patch struct { + Info *resource.Info + Err error + + Before []byte + After []byte + Patch []byte +} + +// CalculatePatches calls the mutation function on each provided info object, and generates a strategic merge patch for +// the changes in the object. Encoder must be able to encode the info into the appropriate destination type. If mutateFn +// returns false, the object is not included in the final list of patches. +func CalculatePatches(infos []*resource.Info, encoder runtime.Encoder, mutateFn func(*resource.Info) (bool, error)) []*Patch { + var patches []*Patch + for _, info := range infos { + patch := &Patch{Info: info} + patch.Before, patch.Err = runtime.Encode(encoder, info.Object) + + ok, err := mutateFn(info) + if !ok { + continue + } + if err != nil { + patch.Err = err + } + patches = append(patches, patch) + if patch.Err != nil { + continue + } + + patch.After, patch.Err = runtime.Encode(encoder, info.Object) + if patch.Err != nil { + continue + } + + // TODO: should be via New + versioned, err := info.Mapping.ConvertToVersion(info.Object, info.Mapping.GroupVersionKind.GroupVersion()) + if err != nil { + patch.Err = err + continue + } + + patch.Patch, patch.Err = strategicpatch.CreateTwoWayMergePatch(patch.Before, patch.After, versioned) + } + return patches +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/set/set.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/set/set.go new file mode 100644 index 000000000000..10d3d3790132 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/set/set.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package set + +import ( + "io" + + "github.com/spf13/cobra" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" +) + +const ( + set_long = `Configure application resources + +These commands help you make changes to existing application resources.` + set_example = `` +) + +func NewCmdSet(f *cmdutil.Factory, out io.Writer) *cobra.Command { + + cmd := &cobra.Command{ + Use: "set SUBCOMMAND", + Short: "Set specific features on objects", + Long: set_long, + Example: set_example, + Run: func(cmd *cobra.Command, args []string) { + cmd.Help() + }, + } + + // add subcommands + cmd.AddCommand(NewCmdImage(f, out)) + + return cmd +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_image.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_image.go new file mode 100644 index 000000000000..afe65adf4d5c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_image.go @@ -0,0 +1,239 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package set + +import ( + "fmt" + "io" + + "github.com/spf13/cobra" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/kubectl" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/resource" + "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" +) + +// ImageOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of +// referencing the cmd.Flags() +type ImageOptions struct { + Mapper meta.RESTMapper + Typer runtime.ObjectTyper + Infos []*resource.Info + Encoder runtime.Encoder + Selector string + Out io.Writer + Err io.Writer + Filenames []string + Recursive bool + ShortOutput bool + All bool + Record bool + ChangeCause string + Local bool + Cmd *cobra.Command + + PrintObject func(cmd *cobra.Command, mapper meta.RESTMapper, obj runtime.Object, out io.Writer) error + UpdatePodSpecForObject func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) + Resources []string + ContainerImages map[string]string +} + +const ( + image_resources = ` + pod (po), replicationcontroller (rc), deployment, daemonset (ds), job, replicaset (rs)` + + image_long = `Update existing container image(s) of resources. + +Possible resources include (case insensitive):` + image_resources + + image_example = `# Set a deployment's nginx container image to 'nginx:1.9.1', and its busybox container image to 'busybox'. +kubectl set image deployment/nginx busybox=busybox nginx=nginx:1.9.1 + +# Update all deployments' and rc's nginx container's image to 'nginx:1.9.1' +kubectl set image deployments,rc nginx=nginx:1.9.1 --all + +# Update image of all containers of daemonset abc to 'nginx:1.9.1' +kubectl set image daemonset abc *=nginx:1.9.1 + +# Print result (in yaml format) of updating nginx container image from local file, without hitting the server +kubectl set image -f path/to/file.yaml nginx=nginx:1.9.1 --local -o yaml` +) + +func NewCmdImage(f *cmdutil.Factory, out io.Writer) *cobra.Command { + options := &ImageOptions{ + Out: out, + } + + cmd := &cobra.Command{ + Use: "image (-f FILENAME | TYPE NAME) CONTAINER_NAME_1=CONTAINER_IMAGE_1 ... CONTAINER_NAME_N=CONTAINER_IMAGE_N", + Short: "Update image of a pod template", + Long: image_long, + Example: image_example, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(options.Complete(f, cmd, args)) + cmdutil.CheckErr(options.Validate()) + cmdutil.CheckErr(options.Run()) + }, + } + + cmdutil.AddPrinterFlags(cmd) + usage := "Filename, directory, or URL to a file identifying the resource to get from a server." + kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + cmd.Flags().BoolVar(&options.All, "all", false, "select all resources in the namespace of the specified resource types") + cmd.Flags().StringVarP(&options.Selector, "selector", "l", "", "Selector (label query) to filter on") + cmd.Flags().BoolVar(&options.Local, "local", false, "If true, set image will NOT contact api-server but run locally.") + cmdutil.AddRecordFlag(cmd) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) + return cmd +} + +func (o *ImageOptions) Complete(f *cmdutil.Factory, cmd *cobra.Command, args []string) error { + o.Mapper, o.Typer = f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) + o.UpdatePodSpecForObject = f.UpdatePodSpecForObject + o.Encoder = f.JSONEncoder() + o.ShortOutput = cmdutil.GetFlagString(cmd, "output") == "name" + o.Record = cmdutil.GetRecordFlag(cmd) + o.ChangeCause = f.Command() + o.PrintObject = f.PrintObject + o.Cmd = cmd + + cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + if err != nil { + return err + } + + o.Resources, o.ContainerImages, err = getResourcesAndImages(args) + if err != nil { + return err + } + + builder := resource.NewBuilder(o.Mapper, o.Typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). + ContinueOnError(). + NamespaceParam(cmdNamespace).DefaultNamespace(). + FilenameParam(enforceNamespace, o.Recursive, o.Filenames...). + Flatten() + if !o.Local { + builder = builder. + SelectorParam(o.Selector). + ResourceTypeOrNameArgs(o.All, o.Resources...). + Latest() + } + o.Infos, err = builder.Do().Infos() + if err != nil { + return err + } + + return nil +} + +func (o *ImageOptions) Validate() error { + if len(o.Resources) < 1 && len(o.Filenames) == 0 { + return fmt.Errorf("one or more resources must be specified as or /") + } + if len(o.ContainerImages) < 1 { + return fmt.Errorf("at least one image update is required") + } else if len(o.ContainerImages) > 1 && hasWildcardKey(o.ContainerImages) { + return fmt.Errorf("all containers are already specified by *, but saw more than one container_name=container_image pairs") + } + return nil +} + +func (o *ImageOptions) Run() error { + allErrs := []error{} + + patches := CalculatePatches(o.Infos, o.Encoder, func(info *resource.Info) (bool, error) { + transformed := false + _, err := o.UpdatePodSpecForObject(info.Object, func(spec *api.PodSpec) error { + for name, image := range o.ContainerImages { + containerFound := false + // Find the container to update, and update its image + for i, c := range spec.Containers { + if c.Name == name || name == "*" { + spec.Containers[i].Image = image + containerFound = true + // Perform updates + transformed = true + } + } + // Add a new container if not found + if !containerFound { + allErrs = append(allErrs, fmt.Errorf("error: unable to find container named %q", name)) + } + } + return nil + }) + return transformed, err + }) + + for _, patch := range patches { + info := patch.Info + if patch.Err != nil { + allErrs = append(allErrs, fmt.Errorf("error: %s/%s %v\n", info.Mapping.Resource, info.Name, patch.Err)) + continue + } + + // no changes + if string(patch.Patch) == "{}" || len(patch.Patch) == 0 { + continue + } + + if o.Local { + fmt.Fprintln(o.Out, "running in local mode...") + return o.PrintObject(o.Cmd, o.Mapper, info.Object, o.Out) + } + + // patch the change + obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, api.StrategicMergePatchType, patch.Patch) + if err != nil { + allErrs = append(allErrs, fmt.Errorf("failed to patch image update to pod template: %v\n", err)) + continue + } + info.Refresh(obj, true) + + // record this change (for rollout history) + if o.Record || cmdutil.ContainsChangeCause(info) { + if err := cmdutil.RecordChangeCause(obj, o.ChangeCause); err == nil { + if obj, err = resource.NewHelper(info.Client, info.Mapping).Replace(info.Namespace, info.Name, false, obj); err != nil { + allErrs = append(allErrs, fmt.Errorf("changes to %s/%s can't be recorded: %v\n", info.Mapping.Resource, info.Name, err)) + } + } + } + + info.Refresh(obj, true) + cmdutil.PrintSuccess(o.Mapper, o.ShortOutput, o.Out, info.Mapping.Resource, info.Name, "image updated") + } + return utilerrors.NewAggregate(allErrs) +} + +// getResourcesAndImages retrieves resources and container name:images pair from given args +func getResourcesAndImages(args []string) (resources []string, containerImages map[string]string, err error) { + pairType := "image" + resources, imageArgs, err := cmdutil.GetResourcesAndPairs(args, pairType) + if err != nil { + return + } + containerImages, _, err = cmdutil.ParsePairs(imageArgs, pairType, false) + return +} + +func hasWildcardKey(containerImages map[string]string) bool { + _, ok := containerImages["*"] + return ok +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/stop.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/stop.go index c147259d4f7c..fc25be9aebf0 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/stop.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/stop.go @@ -30,6 +30,7 @@ import ( // referencing the cmd.Flags() type StopOptions struct { Filenames []string + Recursive bool } const ( @@ -69,12 +70,14 @@ func NewCmdStop(f *cmdutil.Factory, out io.Writer) *cobra.Command { } usage := "Filename, directory, or URL to file of resource(s) to be stopped." kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + cmdutil.AddRecursiveFlag(cmd, &options.Recursive) cmd.Flags().StringP("selector", "l", "", "Selector (label query) to filter on.") cmd.Flags().Bool("all", false, "[-all] to select all the specified resources.") cmd.Flags().Bool("ignore-not-found", false, "Treat \"resource not found\" as a successful stop.") cmd.Flags().Int("grace-period", -1, "Period of time in seconds given to the resource to terminate gracefully. Ignored if negative.") cmd.Flags().Duration("timeout", 0, "The length of time to wait before giving up on a delete, zero means determine a timeout from the size of the object") cmdutil.AddOutputFlagsForMutation(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) return cmd } @@ -84,12 +87,12 @@ func RunStop(f *cmdutil.Factory, cmd *cobra.Command, args []string, out io.Write return err } - mapper, typer := f.Object() + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). ContinueOnError(). NamespaceParam(cmdNamespace).DefaultNamespace(). ResourceTypeOrNameArgs(false, args...). - FilenameParam(enforceNamespace, options.Filenames...). + FilenameParam(enforceNamespace, options.Recursive, options.Filenames...). SelectorParam(cmdutil.GetFlagString(cmd, "selector")). SelectAllParam(cmdutil.GetFlagBool(cmd, "all")). Flatten(). diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/taint.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/taint.go new file mode 100644 index 000000000000..8f85b88f65bc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/taint.go @@ -0,0 +1,397 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "fmt" + "io" + "strings" + + "encoding/json" + "github.com/golang/glog" + "github.com/spf13/cobra" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/resource" + "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/strategicpatch" + "k8s.io/kubernetes/pkg/util/validation" +) + +// TaintOptions have the data required to perform the taint operation +type TaintOptions struct { + resources []string + taintsToAdd []api.Taint + removeTaintKeys []string + builder *resource.Builder + selector string + overwrite bool + all bool + f *cmdutil.Factory + out io.Writer + cmd *cobra.Command +} + +const ( + taint_long = `Update the taints on one or more nodes. + +A taint consists of a key, value, and effect. As an argument here, it is expressed as key=value:effect. +The key must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to %[1]d characters. +The value must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to %[1]d characters. +The effect must be NoSchedule or PreferNoSchedule. +Currently taint can only apply to node.` + taint_example = `# Update node 'foo' with a taint with key 'dedicated' and value 'special-user' and effect 'NoSchedule'. +# If a taint with that key already exists, its value and effect are replaced as specified. +kubectl taint nodes foo dedicated=special-user:NoSchedule +# Remove from node 'foo' the taint with key 'dedicated' if one exists. +kubectl taint nodes foo dedicated-` +) + +func NewCmdTaint(f *cmdutil.Factory, out io.Writer) *cobra.Command { + options := &TaintOptions{} + + // retrieve a list of handled resources from printer as valid args + validArgs := []string{} + p, err := f.Printer(nil, false, false, false, false, false, false, []string{}) + cmdutil.CheckErr(err) + if p != nil { + validArgs = p.HandledResources() + } + + cmd := &cobra.Command{ + Use: "taint NODE NAME KEY_1=VAL_1:TAINT_EFFECT_1 ... KEY_N=VAL_N:TAINT_EFFECT_N", + Short: "Update the taints on one or more nodes", + Long: fmt.Sprintf(taint_long, validation.DNS1123SubdomainMaxLength, validation.LabelValueMaxLength), + Example: taint_example, + Run: func(cmd *cobra.Command, args []string) { + if err := options.Complete(f, out, cmd, args); err != nil { + cmdutil.CheckErr(err) + } + if err := options.Validate(args); err != nil { + cmdutil.CheckErr(cmdutil.UsageError(cmd, err.Error())) + } + if err := options.RunTaint(); err != nil { + cmdutil.CheckErr(err) + } + }, + ValidArgs: validArgs, + } + cmdutil.AddValidateFlags(cmd) + + cmdutil.AddPrinterFlags(cmd) + cmdutil.AddInclude3rdPartyFlags(cmd) + cmd.Flags().StringVarP(&options.selector, "selector", "l", "", "Selector (label query) to filter on") + cmd.Flags().BoolVar(&options.overwrite, "overwrite", false, "If true, allow taints to be overwritten, otherwise reject taint updates that overwrite existing taints.") + cmd.Flags().BoolVar(&options.all, "all", false, "select all nodes in the cluster") + return cmd +} + +func deleteTaintByKey(taints []api.Taint, key string) ([]api.Taint, error) { + newTaints := []api.Taint{} + found := false + for _, taint := range taints { + if taint.Key == key { + found = true + continue + } + newTaints = append(newTaints, taint) + } + + if !found { + return nil, fmt.Errorf("taint key=\"%s\" not found.", key) + } + return newTaints, nil +} + +// reorganizeTaints returns the updated set of taints, taking into account old taints that were not updated, +// old taints that were updated, old taints that were deleted, and new taints. +func reorganizeTaints(accessor meta.Object, overwrite bool, taintsToAdd []api.Taint, removeKeys []string) ([]api.Taint, error) { + newTaints := append([]api.Taint{}, taintsToAdd...) + + var oldTaints []api.Taint + var err error + annotations := accessor.GetAnnotations() + if annotations != nil { + if oldTaints, err = api.GetTaintsFromNodeAnnotations(annotations); err != nil { + return nil, err + } + } + + // add taints that already existing but not updated to newTaints + for _, oldTaint := range oldTaints { + existsInNew := false + for _, taint := range newTaints { + if taint.Key == oldTaint.Key { + existsInNew = true + break + } + } + if !existsInNew { + newTaints = append(newTaints, oldTaint) + } + } + + allErrs := []error{} + for _, taintToRemove := range removeKeys { + newTaints, err = deleteTaintByKey(newTaints, taintToRemove) + if err != nil { + allErrs = append(allErrs, err) + } + } + return newTaints, utilerrors.NewAggregate(allErrs) +} + +func parseTaints(spec []string) ([]api.Taint, []string, error) { + var taints []api.Taint + var remove []string + for _, taintSpec := range spec { + if strings.Index(taintSpec, "=") != -1 && strings.Index(taintSpec, ":") != -1 { + parts := strings.Split(taintSpec, "=") + if len(parts) != 2 || len(parts[1]) == 0 || len(validation.IsQualifiedName(parts[0])) > 0 { + return nil, nil, fmt.Errorf("invalid taint spec: %v", taintSpec) + } + + parts2 := strings.Split(parts[1], ":") + errs := validation.IsValidLabelValue(parts2[0]) + if len(parts2) != 2 || len(errs) != 0 { + return nil, nil, fmt.Errorf("invalid taint spec: %v, %s", taintSpec, strings.Join(errs, "; ")) + } + + if parts2[1] != string(api.TaintEffectNoSchedule) && parts2[1] != string(api.TaintEffectPreferNoSchedule) { + return nil, nil, fmt.Errorf("invalid taint spec: %v, unsupported taint effect", taintSpec) + } + + effect := api.TaintEffect(parts2[1]) + newTaint := api.Taint{ + Key: parts[0], + Value: parts2[0], + Effect: effect, + } + + taints = append(taints, newTaint) + } else if strings.HasSuffix(taintSpec, "-") { + remove = append(remove, taintSpec[:len(taintSpec)-1]) + } else { + return nil, nil, fmt.Errorf("unknown taint spec: %v", taintSpec) + } + } + return taints, remove, nil +} + +// Complete adapts from the command line args and factory to the data required. +func (o *TaintOptions) Complete(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) (err error) { + namespace, _, err := f.DefaultNamespace() + if err != nil { + return err + } + + // retrieves resource and taint args from args + // also checks args to verify that all resources are specified before taints + taintArgs := []string{} + metTaintArg := false + for _, s := range args { + isTaint := strings.Contains(s, "=") || strings.HasSuffix(s, "-") + switch { + case !metTaintArg && isTaint: + metTaintArg = true + fallthrough + case metTaintArg && isTaint: + taintArgs = append(taintArgs, s) + case !metTaintArg && !isTaint: + o.resources = append(o.resources, s) + case metTaintArg && !isTaint: + return fmt.Errorf("all resources must be specified before taint changes: %s", s) + } + } + + if len(o.resources) < 1 { + return fmt.Errorf("one or more resources must be specified as ") + } + if len(taintArgs) < 1 { + return fmt.Errorf("at least one taint update is required") + } + + if o.taintsToAdd, o.removeTaintKeys, err = parseTaints(taintArgs); err != nil { + return cmdutil.UsageError(cmd, err.Error()) + } + + mapper, typer := f.Object(cmdutil.GetIncludeThirdPartyAPIs(cmd)) + o.builder = resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). + ContinueOnError(). + NamespaceParam(namespace).DefaultNamespace() + if o.all { + o.builder = o.builder.SelectAllParam(o.all).ResourceTypes("node") + } else { + if len(o.resources) < 2 { + return fmt.Errorf("at least one resource name must be specified since 'all' parameter is not set") + } + o.builder = o.builder.ResourceNames("node", o.resources[1:]...) + } + o.builder = o.builder.SelectorParam(o.selector). + Flatten(). + Latest() + + o.f = f + o.out = out + o.cmd = cmd + + return nil +} + +// Validate checks to the TaintOptions to see if there is sufficient information run the command. +func (o TaintOptions) Validate(args []string) error { + resourceType := strings.ToLower(o.resources[0]) + if resourceType != "node" && resourceType != "nodes" { + return fmt.Errorf("invalid resource type %s, only node(s) is supported", o.resources[0]) + } + + // check the format of taint args and checks removed taints aren't in the new taints list + conflictKeys := []string{} + removeTaintKeysSet := sets.NewString(o.removeTaintKeys...) + for _, taint := range o.taintsToAdd { + if removeTaintKeysSet.Has(taint.Key) { + conflictKeys = append(conflictKeys, taint.Key) + } + } + if len(conflictKeys) > 0 { + return fmt.Errorf("can not both modify and remove the following taint(s) in the same command: %s", strings.Join(conflictKeys, ", ")) + } + + return nil +} + +// RunTaint does the work +func (o TaintOptions) RunTaint() error { + r := o.builder.Do() + if err := r.Err(); err != nil { + return err + } + + return r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + obj, err := info.Mapping.ConvertToVersion(info.Object, info.Mapping.GroupVersionKind.GroupVersion()) + if err != nil { + return err + } + name, namespace := info.Name, info.Namespace + oldData, err := json.Marshal(obj) + if err != nil { + return err + } + + if err := o.updateTaints(obj); err != nil { + return err + } + newData, err := json.Marshal(obj) + if err != nil { + return err + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, obj) + createdPatch := err == nil + if err != nil { + glog.V(2).Infof("couldn't compute patch: %v", err) + } + + mapping := info.ResourceMapping() + client, err := o.f.ClientForMapping(mapping) + if err != nil { + return err + } + helper := resource.NewHelper(client, mapping) + + var outputObj runtime.Object + if createdPatch { + outputObj, err = helper.Patch(namespace, name, api.StrategicMergePatchType, patchBytes) + } else { + outputObj, err = helper.Replace(namespace, name, false, obj) + } + if err != nil { + return err + } + + mapper, _ := o.f.Object(cmdutil.GetIncludeThirdPartyAPIs(o.cmd)) + outputFormat := cmdutil.GetFlagString(o.cmd, "output") + if outputFormat != "" { + return o.f.PrintObject(o.cmd, mapper, outputObj, o.out) + } + + cmdutil.PrintSuccess(mapper, false, o.out, info.Mapping.Resource, info.Name, "tainted") + return nil + }) +} + +// validateNoTaintOverwrites validates that when overwrite is false, to-be-updated taints don't exist in the node taint list (yet) +func validateNoTaintOverwrites(accessor meta.Object, taints []api.Taint) error { + annotations := accessor.GetAnnotations() + if annotations == nil { + return nil + } + + allErrs := []error{} + oldTaints, err := api.GetTaintsFromNodeAnnotations(annotations) + if err != nil { + allErrs = append(allErrs, err) + return utilerrors.NewAggregate(allErrs) + } + + for _, taint := range taints { + for _, oldTaint := range oldTaints { + if taint.Key == oldTaint.Key { + allErrs = append(allErrs, fmt.Errorf("Node '%s' already has a taint (%+v), and --overwrite is false", accessor.GetName(), taint)) + break + } + } + } + return utilerrors.NewAggregate(allErrs) +} + +// updateTaints updates taints of obj +func (o TaintOptions) updateTaints(obj runtime.Object) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + if !o.overwrite { + if err := validateNoTaintOverwrites(accessor, o.taintsToAdd); err != nil { + return err + } + } + + annotations := accessor.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + + newTaints, err := reorganizeTaints(accessor, o.overwrite, o.taintsToAdd, o.removeTaintKeys) + if err != nil { + return err + } + taintsData, err := json.Marshal(newTaints) + if err != nil { + return err + } + annotations[api.TaintsAnnotationKey] = string(taintsData) + accessor.SetAnnotations(annotations) + + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/taint_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/taint_test.go new file mode 100644 index 000000000000..d57b3d2d7e98 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/taint_test.go @@ -0,0 +1,299 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "reflect" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + "k8s.io/kubernetes/pkg/conversion" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/runtime" +) + +func generateNodeAndTaintedNode(oldTaints []api.Taint, newTaints []api.Taint) (*api.Node, *api.Node) { + var taintedNode *api.Node + + oldTaintsData, _ := json.Marshal(oldTaints) + // Create a node. + node := &api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "node-name", + CreationTimestamp: unversioned.Time{Time: time.Now()}, + Annotations: map[string]string{ + api.TaintsAnnotationKey: string(oldTaintsData), + }, + }, + Spec: api.NodeSpec{ + ExternalID: "node-name", + }, + Status: api.NodeStatus{}, + } + clone, _ := conversion.NewCloner().DeepCopy(node) + + newTaintsData, _ := json.Marshal(newTaints) + // A copy of the same node, but tainted. + taintedNode = clone.(*api.Node) + taintedNode.Annotations = map[string]string{ + api.TaintsAnnotationKey: string(newTaintsData), + } + + return node, taintedNode +} + +func AnnotationsHaveEqualTaints(annotationA map[string]string, annotationB map[string]string) bool { + taintsA, err := api.GetTaintsFromNodeAnnotations(annotationA) + if err != nil { + return false + } + taintsB, err := api.GetTaintsFromNodeAnnotations(annotationB) + if err != nil { + return false + } + + if len(taintsA) != len(taintsB) { + return false + } + + for _, taintA := range taintsA { + found := false + for _, taintB := range taintsB { + if reflect.DeepEqual(taintA, taintB) { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +func TestTaint(t *testing.T) { + tests := []struct { + description string + oldTaints []api.Taint + newTaints []api.Taint + args []string + expectFatal bool + expectTaint bool + }{ + // success cases + { + description: "taints a node with effect NoSchedule", + newTaints: []api.Taint{{ + Key: "foo", + Value: "bar", + Effect: "NoSchedule", + }}, + args: []string{"node", "node-name", "foo=bar:NoSchedule"}, + expectFatal: false, + expectTaint: true, + }, + { + description: "taints a node with effect PreferNoSchedule", + newTaints: []api.Taint{{ + Key: "foo", + Value: "bar", + Effect: "PreferNoSchedule", + }}, + args: []string{"node", "node-name", "foo=bar:PreferNoSchedule"}, + expectFatal: false, + expectTaint: true, + }, + { + description: "update an existing taint on the node, change the effect from NoSchedule to PreferNoSchedule", + oldTaints: []api.Taint{{ + Key: "foo", + Value: "bar", + Effect: "NoSchedule", + }}, + newTaints: []api.Taint{{ + Key: "foo", + Value: "bar", + Effect: "PreferNoSchedule", + }}, + args: []string{"node", "node-name", "foo=bar:PreferNoSchedule", "--overwrite"}, + expectFatal: false, + expectTaint: true, + }, + { + description: "taints a node with two taints", + newTaints: []api.Taint{{ + Key: "dedicated", + Value: "namespaceA", + Effect: "NoSchedule", + }, { + Key: "foo", + Value: "bar", + Effect: "PreferNoSchedule", + }}, + args: []string{"node", "node-name", "dedicated=namespaceA:NoSchedule", "foo=bar:PreferNoSchedule"}, + expectFatal: false, + expectTaint: true, + }, + { + description: "node has two taints, remove one of them", + oldTaints: []api.Taint{{ + Key: "dedicated", + Value: "namespaceA", + Effect: "NoSchedule", + }, { + Key: "foo", + Value: "bar", + Effect: "PreferNoSchedule", + }}, + newTaints: []api.Taint{{ + Key: "foo", + Value: "bar", + Effect: "PreferNoSchedule", + }}, + args: []string{"node", "node-name", "dedicated-"}, + expectFatal: false, + expectTaint: true, + }, + { + description: "node has two taints, update one of them and remove the other", + oldTaints: []api.Taint{{ + Key: "dedicated", + Value: "namespaceA", + Effect: "NoSchedule", + }, { + Key: "foo", + Value: "bar", + Effect: "PreferNoSchedule", + }}, + newTaints: []api.Taint{{ + Key: "foo", + Value: "bar", + Effect: "NoSchedule", + }}, + args: []string{"node", "node-name", "dedicated-", "foo=bar:NoSchedule", "--overwrite"}, + expectFatal: false, + expectTaint: true, + }, + + // error cases + { + description: "invalid taint key", + args: []string{"node", "node-name", "nospecialchars^@=banana:NoSchedule"}, + expectFatal: true, + expectTaint: false, + }, + { + description: "invalid taint effect", + args: []string{"node", "node-name", "foo=bar:NoExcute"}, + expectFatal: true, + expectTaint: false, + }, + { + description: "can't update existing taint on the node, since 'overwrite' flag is not set", + oldTaints: []api.Taint{{ + Key: "foo", + Value: "bar", + Effect: "NoSchedule", + }}, + newTaints: []api.Taint{{ + Key: "foo", + Value: "bar", + Effect: "NoSchedule", + }}, + args: []string{"node", "node-name", "foo=bar:PreferNoSchedule"}, + expectFatal: true, + expectTaint: false, + }, + } + + for _, test := range tests { + oldNode, expectNewNode := generateNodeAndTaintedNode(test.oldTaints, test.newTaints) + + new_node := &api.Node{} + tainted := false + f, tf, codec := NewAPIFactory() + + tf.Client = &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + m := &MyReq{req} + switch { + case m.isFor("GET", "/nodes/node-name"): + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, oldNode)}, nil + case m.isFor("PATCH", "/nodes/node-name"), m.isFor("PUT", "/nodes/node-name"): + tainted = true + data, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Fatalf("%s: unexpected error: %v", test.description, err) + } + defer req.Body.Close() + if err := runtime.DecodeInto(codec, data, new_node); err != nil { + t.Fatalf("%s: unexpected error: %v", test.description, err) + } + if !AnnotationsHaveEqualTaints(expectNewNode.Annotations, new_node.Annotations) { + t.Fatalf("%s: expected:\n%v\nsaw:\n%v\n", test.description, expectNewNode.Annotations, new_node.Annotations) + } + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, new_node)}, nil + default: + t.Fatalf("%s: unexpected request: %v %#v\n%#v", test.description, req.Method, req.URL, req) + return nil, nil + } + }), + } + tf.ClientConfig = defaultClientConfig() + + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdTaint(f, buf) + + saw_fatal := false + func() { + defer func() { + // Recover from the panic below. + _ = recover() + // Restore cmdutil behavior + cmdutil.DefaultBehaviorOnFatal() + }() + cmdutil.BehaviorOnFatal(func(e string) { saw_fatal = true; panic(e) }) + cmd.SetArgs(test.args) + cmd.Execute() + }() + + if test.expectFatal { + if !saw_fatal { + t.Fatalf("%s: unexpected non-error", test.description) + } + } + + if test.expectTaint { + if !tainted { + t.Fatalf("%s: node not tainted", test.description) + } + } + if !test.expectTaint { + if tainted { + t.Fatalf("%s: unexpected taint", test.description) + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go index 5e6551cdee7d..43ddf3e9823b 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go @@ -17,6 +17,7 @@ limitations under the License. package util import ( + fed_clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/client/restclient" @@ -26,9 +27,10 @@ import ( func NewClientCache(loader clientcmd.ClientConfig) *ClientCache { return &ClientCache{ - clients: make(map[unversioned.GroupVersion]*client.Client), - configs: make(map[unversioned.GroupVersion]*restclient.Config), - loader: loader, + clients: make(map[unversioned.GroupVersion]*client.Client), + configs: make(map[unversioned.GroupVersion]*restclient.Config), + fedClientSets: make(map[unversioned.GroupVersion]fed_clientset.Interface), + loader: loader, } } @@ -37,6 +39,7 @@ func NewClientCache(loader clientcmd.ClientConfig) *ClientCache { type ClientCache struct { loader clientcmd.ClientConfig clients map[unversioned.GroupVersion]*client.Client + fedClientSets map[unversioned.GroupVersion]fed_clientset.Interface configs map[unversioned.GroupVersion]*restclient.Config defaultConfig *restclient.Config defaultClient *client.Client @@ -125,3 +128,41 @@ func (c *ClientCache) ClientForVersion(version *unversioned.GroupVersion) (*clie return kubeclient, nil } + +func (c *ClientCache) FederationClientSetForVersion(version *unversioned.GroupVersion) (fed_clientset.Interface, error) { + if version != nil { + if clientSet, found := c.fedClientSets[*version]; found { + return clientSet, nil + } + } + config, err := c.ClientConfigForVersion(version) + if err != nil { + return nil, err + } + + // TODO: support multi versions of client with clientset + clientSet, err := fed_clientset.NewForConfig(config) + if err != nil { + return nil, err + } + c.fedClientSets[*config.GroupVersion] = clientSet + + if version != nil { + configCopy := *config + clientSet, err := fed_clientset.NewForConfig(&configCopy) + if err != nil { + return nil, err + } + c.fedClientSets[*version] = clientSet + } + + return clientSet, nil +} + +func (c *ClientCache) FederationClientForVersion(version *unversioned.GroupVersion) (*restclient.RESTClient, error) { + fedClientSet, err := c.FederationClientSetForVersion(version) + if err != nil { + return nil, err + } + return fedClientSet.(*fed_clientset.Clientset).FederationClient.RESTClient, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor.go index f77f8e1372db..1c58d846b546 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor.go @@ -23,13 +23,13 @@ import ( "math/rand" "os" "os/exec" - "os/signal" "path/filepath" "runtime" "strings" - "github.com/docker/docker/pkg/term" "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/util/term" ) const ( @@ -125,7 +125,7 @@ func (e Editor) Launch(path string) error { cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin glog.V(5).Infof("Opening file with editor %v", args) - if err := withSafeTTYAndInterrupts(cmd.Run); err != nil { + if err := (term.TTY{In: os.Stdin, TryDev: true}).Safe(cmd.Run); err != nil { if err, ok := err.(*exec.Error); ok { if err.Err == exec.ErrNotFound { return fmt.Errorf("unable to launch the editor %q", strings.Join(e.Args, " ")) @@ -160,40 +160,6 @@ func (e Editor) LaunchTempFile(prefix, suffix string, r io.Reader) ([]byte, stri return bytes, path, err } -// withSafeTTYAndInterrupts invokes the provided function after the terminal -// state has been stored, and then on any error or termination attempts to -// restore the terminal state to its prior behavior. It also eats signals -// for the duration of the function. -func withSafeTTYAndInterrupts(fn func() error) error { - ch := make(chan os.Signal, 1) - signal.Notify(ch, childSignals...) - defer signal.Stop(ch) - - inFd := os.Stdin.Fd() - if !term.IsTerminal(inFd) { - if f, err := os.Open("/dev/tty"); err == nil { - defer f.Close() - inFd = f.Fd() - } - } - - if term.IsTerminal(inFd) { - state, err := term.SaveState(inFd) - if err != nil { - return err - } - go func() { - if _, ok := <-ch; !ok { - return - } - term.RestoreTerminal(inFd, state) - }() - defer term.RestoreTerminal(inFd, state) - return fn() - } - return fn() -} - func tempFile(prefix, suffix string) (f *os.File, err error) { dir := os.TempDir() diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor_test.go new file mode 100644 index 000000000000..9be83a04276d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/editor_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package editor + +import ( + "bytes" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" +) + +func TestArgs(t *testing.T) { + if e, a := []string{"/bin/bash", "-c \"test\""}, (Editor{Args: []string{"/bin/bash", "-c"}, Shell: true}).args("test"); !reflect.DeepEqual(e, a) { + t.Errorf("unexpected args: %v", a) + } + if e, a := []string{"/bin/bash", "-c", "test"}, (Editor{Args: []string{"/bin/bash", "-c"}, Shell: false}).args("test"); !reflect.DeepEqual(e, a) { + t.Errorf("unexpected args: %v", a) + } + if e, a := []string{"/bin/bash", "-i -c \"test\""}, (Editor{Args: []string{"/bin/bash", "-i -c"}, Shell: true}).args("test"); !reflect.DeepEqual(e, a) { + t.Errorf("unexpected args: %v", a) + } + if e, a := []string{"/test", "test"}, (Editor{Args: []string{"/test"}}).args("test"); !reflect.DeepEqual(e, a) { + t.Errorf("unexpected args: %v", a) + } +} + +func TestEditor(t *testing.T) { + edit := Editor{Args: []string{"cat"}} + testStr := "test something\n" + contents, path, err := edit.LaunchTempFile("", "someprefix", bytes.NewBufferString(testStr)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if _, err := os.Stat(path); err != nil { + t.Fatalf("no temp file: %s", path) + } + defer os.Remove(path) + if disk, err := ioutil.ReadFile(path); err != nil || !bytes.Equal(contents, disk) { + t.Errorf("unexpected file on disk: %v %s", err, string(disk)) + } + if !bytes.Equal(contents, []byte(testStr)) { + t.Errorf("unexpected contents: %s", string(contents)) + } + if !strings.Contains(path, "someprefix") { + t.Errorf("path not expected: %s", path) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go index 78b2503b40b8..3f1c73ed41a9 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go @@ -27,6 +27,7 @@ import ( "os/user" "path" "path/filepath" + "sort" "strconv" "strings" "time" @@ -35,25 +36,34 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" + "k8s.io/kubernetes/federation/apis/federation" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/service" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/apimachinery" "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/metrics" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/client/restclient" client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime/serializer/json" - "k8s.io/kubernetes/pkg/util" + utilflag "k8s.io/kubernetes/pkg/util/flag" + "k8s.io/kubernetes/pkg/watch" ) const ( @@ -69,8 +79,9 @@ type Factory struct { clients *ClientCache flags *pflag.FlagSet - // Returns interfaces for dealing with arbitrary runtime.Objects. - Object func() (meta.RESTMapper, runtime.ObjectTyper) + // Returns interfaces for dealing with arbitrary runtime.Objects. If thirdPartyDiscovery is true, performs API calls + // to discovery dynamic API objects registered by third parties. + Object func(thirdPartyDiscovery bool) (meta.RESTMapper, runtime.ObjectTyper) // Returns interfaces for decoding objects - if toInternal is set, decoded objects will be converted // into their internal form (if possible). Eventually the internal form will be removed as an option, // and only versioned objects will be returned. @@ -96,14 +107,16 @@ type Factory struct { HistoryViewer func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) // Returns a Rollbacker for changing the rollback version of the specified RESTMapping type or an error Rollbacker func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) - // PodSelectorForObject returns the pod selector associated with the provided object - PodSelectorForObject func(object runtime.Object) (string, error) + // Returns a StatusViewer for printing rollout status. + StatusViewer func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) // MapBasedSelectorForObject returns the map-based selector associated with the provided object. If a // new set-based selector is provided, an error is returned if the selector cannot be converted to a // map-based selector MapBasedSelectorForObject func(object runtime.Object) (string, error) // PortsForObject returns the ports associated with the provided object PortsForObject func(object runtime.Object) ([]string, error) + // ProtocolsForObject returns the mapping associated with the provided object + ProtocolsForObject func(object runtime.Object) (map[string]string, error) // LabelsForObject returns the labels associated with the provided object LabelsForObject func(object runtime.Object) (map[string]string, error) // LogsForObject returns a request for the logs associated with the provided object @@ -118,7 +131,7 @@ type Factory struct { SwaggerSchema func(unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) // Returns the default namespace to use in cases where no // other namespace is specified and whether the namespace was - // overriden. + // overridden. DefaultNamespace func() (string, bool, error) // Generators returns the generators for the provided command Generators func(cmdName string) map[string]kubectl.Generator @@ -128,10 +141,15 @@ type Factory struct { CanBeAutoscaled func(kind unversioned.GroupKind) error // AttachablePodForObject returns the pod to which to attach given an object. AttachablePodForObject func(object runtime.Object) (*api.Pod, error) + // UpdatePodSpecForObject will call the provided function on the pod spec this object supports, + // return false if no pod spec is supported, or return an error. + UpdatePodSpecForObject func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) // EditorEnvs returns a group of environment variables that the edit command // can range over in order to determine if the user has specified an editor // of their choice. EditorEnvs func() []string + // PrintObjectSpecificMessage prints object-specific messages on the provided writer + PrintObjectSpecificMessage func(obj runtime.Object, out io.Writer) } const ( @@ -147,6 +165,7 @@ const ( NamespaceV1GeneratorName = "namespace/v1" SecretV1GeneratorName = "secret/v1" SecretForDockerRegistryV1GeneratorName = "secret-for-docker-registry/v1" + SecretForTLSV1GeneratorName = "secret-for-tls/v1" ConfigMapV1GeneratorName = "configmap/v1" ) @@ -176,17 +195,46 @@ func DefaultGenerators(cmdName string) map[string]kubectl.Generator { generators["secret-for-docker-registry"] = map[string]kubectl.Generator{ SecretForDockerRegistryV1GeneratorName: kubectl.SecretForDockerRegistryGeneratorV1{}, } + generators["secret-for-tls"] = map[string]kubectl.Generator{ + SecretForTLSV1GeneratorName: kubectl.SecretForTLSGeneratorV1{}, + } + return generators[cmdName] } +func getGroupVersionKinds(gvks []unversioned.GroupVersionKind, group string) []unversioned.GroupVersionKind { + result := []unversioned.GroupVersionKind{} + for ix := range gvks { + if gvks[ix].Group == group { + result = append(result, gvks[ix]) + } + } + return result +} + +func makeInterfacesFor(versionList []unversioned.GroupVersion) func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + accessor := meta.NewAccessor() + return func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + for ix := range versionList { + if versionList[ix].String() == version.String() { + return &meta.VersionInterfaces{ + ObjectConvertor: thirdpartyresourcedata.NewThirdPartyObjectConverter(api.Scheme), + MetadataAccessor: accessor, + }, nil + } + } + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, versionList) + } +} + // NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { - mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper} + mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()} flags := pflag.NewFlagSet("", pflag.ContinueOnError) - flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags + flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags clientConfig := optionalClientConfig if optionalClientConfig == nil { @@ -199,31 +247,76 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { clients: clients, flags: flags, - Object: func() (meta.RESTMapper, runtime.ObjectTyper) { + // If discoverDynamicAPIs is true, make API calls to the discovery service to find APIs that + // have been dynamically added to the apiserver + Object: func(discoverDynamicAPIs bool) (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := unversioned.GroupVersion{} if cfg.GroupVersion != nil { cmdApiVersion = *cfg.GroupVersion } + if discoverDynamicAPIs { + client, err := clients.ClientForVersion(&unversioned.GroupVersion{Version: "v1"}) + CheckErr(err) - outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}} + versions, gvks, err := GetThirdPartyGroupVersions(client.Discovery()) + CheckErr(err) + if len(versions) > 0 { + priorityMapper, ok := mapper.RESTMapper.(meta.PriorityRESTMapper) + if !ok { + CheckErr(fmt.Errorf("expected PriorityMapper, saw: %v", mapper.RESTMapper)) + return nil, nil + } + multiMapper, ok := priorityMapper.Delegate.(meta.MultiRESTMapper) + if !ok { + CheckErr(fmt.Errorf("unexpected type: %v", mapper.RESTMapper)) + return nil, nil + } + groupsMap := map[string][]unversioned.GroupVersion{} + for _, version := range versions { + groupsMap[version.Group] = append(groupsMap[version.Group], version) + } + for group, versionList := range groupsMap { + preferredExternalVersion := versionList[0] + + thirdPartyMapper, err := kubectl.NewThirdPartyResourceMapper(versionList, getGroupVersionKinds(gvks, group)) + CheckErr(err) + accessor := meta.NewAccessor() + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: versionList, + RESTMapper: thirdPartyMapper, + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: makeInterfacesFor(versionList), + } - // eventually this should allow me choose a group priority based on the order of the discovery doc, for now hardcode a given order + CheckErr(registered.RegisterGroup(groupMeta)) + registered.AddThirdPartyAPIGroupVersions(versionList...) + multiMapper = append(meta.MultiRESTMapper{thirdPartyMapper}, multiMapper...) + } + priorityMapper.Delegate = multiMapper + // Re-assign to the RESTMapper here because priorityMapper is actually a copy, so if we + // don't re-assign, the above assignement won't actually update mapper.RESTMapper + mapper.RESTMapper = priorityMapper + } + } + outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}} priorityRESTMapper := meta.PriorityRESTMapper{ Delegate: outputRESTMapper, ResourcePriority: []unversioned.GroupVersionResource{ {Group: api.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, {Group: extensions.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, {Group: metrics.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, + {Group: federation.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, }, KindPriority: []unversioned.GroupVersionKind{ {Group: api.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, {Group: extensions.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, {Group: metrics.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, + {Group: federation.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, }, } - return priorityRESTMapper, api.Scheme }, Client: func() (*client.Client, error) { @@ -233,25 +326,59 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { return clients.ClientConfigForVersion(nil) }, ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { + gvk := mapping.GroupVersionKind mappingVersion := mapping.GroupVersionKind.GroupVersion() - client, err := clients.ClientForVersion(&mappingVersion) + c, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } - switch mapping.GroupVersionKind.Group { + switch gvk.Group { case api.GroupName: - return client.RESTClient, nil + return c.RESTClient, nil case autoscaling.GroupName: - return client.AutoscalingClient.RESTClient, nil + return c.AutoscalingClient.RESTClient, nil case batch.GroupName: - return client.BatchClient.RESTClient, nil + return c.BatchClient.RESTClient, nil + case policy.GroupName: + return c.PolicyClient.RESTClient, nil + case apps.GroupName: + return c.AppsClient.RESTClient, nil case extensions.GroupName: - return client.ExtensionsClient.RESTClient, nil + return c.ExtensionsClient.RESTClient, nil + case api.SchemeGroupVersion.Group: + return c.RESTClient, nil + case extensions.SchemeGroupVersion.Group: + return c.ExtensionsClient.RESTClient, nil + case federation.GroupName: + return clients.FederationClientForVersion(&mappingVersion) + case rbac.GroupName: + return c.RbacClient.RESTClient, nil + default: + if !registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) { + return nil, fmt.Errorf("unknown api group/version: %s", gvk.String()) + } + cfg, err := clientConfig.ClientConfig() + if err != nil { + return nil, err + } + gv := gvk.GroupVersion() + cfg.GroupVersion = &gv + cfg.APIPath = "/apis" + cfg.Codec = thirdpartyresourcedata.NewCodec(c.ExtensionsClient.RESTClient.Codec(), gvk.Kind) + return restclient.RESTClientFor(cfg) } - return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource) }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() + if mapping.GroupVersionKind.Group == federation.GroupName { + fedClientSet, err := clients.FederationClientSetForVersion(&mappingVersion) + if err != nil { + return nil, err + } + if mapping.GroupVersionKind.Kind == "Cluster" { + return &kubectl.ClusterDescriber{Interface: fedClientSet}, nil + } + } client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err @@ -273,41 +400,6 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil }, - PodSelectorForObject: func(object runtime.Object) (string, error) { - // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) - switch t := object.(type) { - case *api.ReplicationController: - return kubectl.MakeLabels(t.Spec.Selector), nil - case *api.Pod: - if len(t.Labels) == 0 { - return "", fmt.Errorf("the pod has no labels and cannot be exposed") - } - return kubectl.MakeLabels(t.Labels), nil - case *api.Service: - if t.Spec.Selector == nil { - return "", fmt.Errorf("the service has no pod selector set") - } - return kubectl.MakeLabels(t.Spec.Selector), nil - case *extensions.Deployment: - selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) - if err != nil { - return "", fmt.Errorf("invalid label selector: %v", err) - } - return selector.String(), nil - case *extensions.ReplicaSet: - selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) - if err != nil { - return "", fmt.Errorf("failed to convert label selector to selector: %v", err) - } - return selector.String(), nil - default: - gvk, err := api.Scheme.ObjectKind(object) - if err != nil { - return "", err - } - return "", fmt.Errorf("cannot extract pod selector from %v", gvk) - } - }, MapBasedSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { @@ -327,22 +419,22 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { - return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format") + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) } return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil case *extensions.ReplicaSet: // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals // operator, DoubleEquals operator and In operator with only one element in the set. if len(t.Spec.Selector.MatchExpressions) > 0 { - return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format") + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) } return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil default: - gvk, err := api.Scheme.ObjectKind(object) + gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return "", err } - return "", fmt.Errorf("cannot extract pod selector from %v", gvk) + return "", fmt.Errorf("cannot extract pod selector from %v", gvks[0]) } }, PortsForObject: func(object runtime.Object) ([]string, error) { @@ -359,11 +451,32 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { case *extensions.ReplicaSet: return getPorts(t.Spec.Template.Spec), nil default: - gvk, err := api.Scheme.ObjectKind(object) + gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return nil, err } - return nil, fmt.Errorf("cannot extract ports from %v", gvk) + return nil, fmt.Errorf("cannot extract ports from %v", gvks[0]) + } + }, + ProtocolsForObject: func(object runtime.Object) (map[string]string, error) { + // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) + switch t := object.(type) { + case *api.ReplicationController: + return getProtocols(t.Spec.Template.Spec), nil + case *api.Pod: + return getProtocols(t.Spec), nil + case *api.Service: + return getServiceProtocols(t.Spec), nil + case *extensions.Deployment: + return getProtocols(t.Spec.Template.Spec), nil + case *extensions.ReplicaSet: + return getProtocols(t.Spec.Template.Spec), nil + default: + gvks, _, err := api.Scheme.ObjectKinds(object) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("cannot extract protocols from %v", gvks[0]) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { @@ -389,7 +502,8 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { return nil, errors.New("provided options object is not a PodLogOptions") } selector := labels.SelectorFromSet(t.Spec.Selector) - pod, numPods, err := GetFirstPod(c, t.Namespace, selector) + sortBy := func(pods []*api.Pod) sort.Interface { return controller.ActivePods(pods) } + pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy) if err != nil { return nil, err } @@ -408,7 +522,8 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } - pod, numPods, err := GetFirstPod(c, t.Namespace, selector) + sortBy := func(pods []*api.Pod) sort.Interface { return controller.ActivePods(pods) } + pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy) if err != nil { return nil, err } @@ -419,11 +534,11 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil default: - gvk, err := api.Scheme.ObjectKind(object) + gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return nil, err } - return nil, fmt.Errorf("cannot get the logs from %v", gvk) + return nil, fmt.Errorf("cannot get the logs from %v", gvks[0]) } }, PauseObject: func(object runtime.Object) (bool, error) { @@ -441,11 +556,11 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { _, err := c.Extensions().Deployments(t.Namespace).Update(t) return false, err default: - gvk, err := api.Scheme.ObjectKind(object) + gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return false, err } - return false, fmt.Errorf("cannot pause %v", gvk) + return false, fmt.Errorf("cannot pause %v", gvks[0]) } }, ResumeObject: func(object runtime.Object) (bool, error) { @@ -463,11 +578,11 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { _, err := c.Extensions().Deployments(t.Namespace).Update(t) return false, err default: - gvk, err := api.Scheme.ObjectKind(object) + gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return false, err } - return false, fmt.Errorf("cannot resume %v", gvk) + return false, fmt.Errorf("cannot resume %v", gvks[0]) } }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { @@ -503,6 +618,14 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { } return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client) }, + StatusViewer: func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { + mappingVersion := mapping.GroupVersionKind.GroupVersion() + client, err := clients.ClientForVersion(&mappingVersion) + if err != nil { + return nil, err + } + return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind(), client) + }, Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion(nil) @@ -517,8 +640,13 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { } dir = path.Join(cacheDir, version.String()) } + fedClient, err := clients.FederationClientForVersion(nil) + if err != nil { + return nil, err + } return &clientSwaggerSchema{ c: client, + fedc: fedClient, cacheDir: dir, mapper: api.RESTMapper, }, nil @@ -565,53 +693,132 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { switch t := object.(type) { case *api.ReplicationController: selector := labels.SelectorFromSet(t.Spec.Selector) - pod, _, err := GetFirstPod(client, t.Namespace, selector) + sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *extensions.Deployment: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } - pod, _, err := GetFirstPod(client, t.Namespace, selector) + sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) return pod, err - case *extensions.Job: + case *batch.Job: selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) if err != nil { return nil, fmt.Errorf("invalid label selector: %v", err) } - pod, _, err := GetFirstPod(client, t.Namespace, selector) + sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) return pod, err case *api.Pod: return t, nil default: - gvk, err := api.Scheme.ObjectKind(object) + gvks, _, err := api.Scheme.ObjectKinds(object) if err != nil { return nil, err } - return nil, fmt.Errorf("cannot attach to %v: not implemented", gvk) + return nil, fmt.Errorf("cannot attach to %v: not implemented", gvks[0]) + } + }, + // UpdatePodSpecForObject update the pod specification for the provided object + UpdatePodSpecForObject: func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) { + // TODO: replace with a swagger schema based approach (identify pod template via schema introspection) + switch t := obj.(type) { + case *api.Pod: + return true, fn(&t.Spec) + case *api.ReplicationController: + if t.Spec.Template == nil { + t.Spec.Template = &api.PodTemplateSpec{} + } + return true, fn(&t.Spec.Template.Spec) + case *extensions.Deployment: + return true, fn(&t.Spec.Template.Spec) + case *extensions.DaemonSet: + return true, fn(&t.Spec.Template.Spec) + case *extensions.ReplicaSet: + return true, fn(&t.Spec.Template.Spec) + case *apps.PetSet: + return true, fn(&t.Spec.Template.Spec) + case *batch.Job: + return true, fn(&t.Spec.Template.Spec) + default: + return false, fmt.Errorf("the object is not a pod or does not have a pod template") } }, EditorEnvs: func() []string { return []string{"KUBE_EDITOR", "EDITOR"} }, + PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) { + switch obj := obj.(type) { + case *api.Service: + if obj.Spec.Type == api.ServiceTypeNodePort { + msg := fmt.Sprintf( + `You have exposed your service on an external port on all nodes in your +cluster. If you want to expose this service to the external internet, you may +need to set up firewall rules for the service port(s) (%s) to serve traffic. + +See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details. +`, + makePortsString(obj.Spec.Ports, true)) + out.Write([]byte(msg)) + } + + if _, ok := obj.Annotations[service.AnnotationLoadBalancerSourceRangesKey]; ok { + msg := fmt.Sprintf( + `You are using service annotation [service.beta.kubernetes.io/load-balancer-source-ranges]. +It has been promoted to field [loadBalancerSourceRanges] in service spec. This annotation will be deprecated in the future. +Please use the loadBalancerSourceRanges field instead. + +See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details. +`) + out.Write([]byte(msg)) + } + } + }, } } -// GetFirstPod returns the first pod of an object from its namespace and selector and the number of matching pods -func GetFirstPod(client *client.Client, namespace string, selector labels.Selector) (*api.Pod, int, error) { - var pods *api.PodList - for pods == nil || len(pods.Items) == 0 { - var err error - options := api.ListOptions{LabelSelector: selector} - if pods, err = client.Pods(namespace).List(options); err != nil { - return nil, 0, err - } - if len(pods.Items) == 0 { - time.Sleep(2 * time.Second) - } +// GetFirstPod returns a pod matching the namespace and label selector +// and the number of all pods that match the label selector. +func GetFirstPod(client client.Interface, namespace string, selector labels.Selector, timeout time.Duration, sortBy func([]*api.Pod) sort.Interface) (*api.Pod, int, error) { + options := api.ListOptions{LabelSelector: selector} + + podList, err := client.Pods(namespace).List(options) + if err != nil { + return nil, 0, err + } + pods := []*api.Pod{} + for i := range podList.Items { + pod := podList.Items[i] + pods = append(pods, &pod) + } + if len(pods) > 0 { + sort.Sort(sortBy(pods)) + return pods[0], len(podList.Items), nil } - pod := &pods.Items[0] - return pod, len(pods.Items), nil + + // Watch until we observe a pod + options.ResourceVersion = podList.ResourceVersion + w, err := client.Pods(namespace).Watch(options) + if err != nil { + return nil, 0, err + } + defer w.Stop() + + condition := func(event watch.Event) (bool, error) { + return event.Type == watch.Added || event.Type == watch.Modified, nil + } + event, err := watch.Until(timeout, w, condition) + if err != nil { + return nil, 0, err + } + pod, ok := event.Object.(*api.Pod) + if !ok { + return nil, 0, fmt.Errorf("%#v is not a pod event", event) + } + return pod, 1, nil } // Command will stringify and return all environment arguments ie. a command run by a client @@ -639,7 +846,7 @@ func (f *Factory) BindFlags(flags *pflag.FlagSet) { // Normalize all flags that are coming from other packages or pre-configurations // a.k.a. change all "_" to "-". e.g. glog package - flags.SetNormalizeFunc(util.WordSepNormalizeFunc) + flags.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) } // BindCommonFlags adds any flags defined by external projects (not part of pflags) @@ -648,11 +855,35 @@ func (f *Factory) BindExternalFlags(flags *pflag.FlagSet) { flags.AddGoFlagSet(flag.CommandLine) } +func makePortsString(ports []api.ServicePort, useNodePort bool) string { + pieces := make([]string, len(ports)) + for ix := range ports { + var port int32 + if useNodePort { + port = ports[ix].NodePort + } else { + port = ports[ix].Port + } + pieces[ix] = fmt.Sprintf("%s:%d", strings.ToLower(string(ports[ix].Protocol)), port) + } + return strings.Join(pieces, ",") +} + func getPorts(spec api.PodSpec) []string { result := []string{} for _, container := range spec.Containers { for _, port := range container.Ports { - result = append(result, strconv.Itoa(port.ContainerPort)) + result = append(result, strconv.Itoa(int(port.ContainerPort))) + } + } + return result +} + +func getProtocols(spec api.PodSpec) map[string]string { + result := make(map[string]string) + for _, container := range spec.Containers { + for _, port := range container.Ports { + result[strconv.Itoa(int(port.ContainerPort))] = string(port.Protocol) } } return result @@ -662,13 +893,23 @@ func getPorts(spec api.PodSpec) []string { func getServicePorts(spec api.ServiceSpec) []string { result := []string{} for _, servicePort := range spec.Ports { - result = append(result, strconv.Itoa(servicePort.Port)) + result = append(result, strconv.Itoa(int(servicePort.Port))) + } + return result +} + +// Extracts the protocols exposed by a service from the given service spec. +func getServiceProtocols(spec api.ServiceSpec) map[string]string { + result := make(map[string]string) + for _, servicePort := range spec.Ports { + result[strconv.Itoa(int(servicePort.Port))] = string(servicePort.Protocol) } return result } type clientSwaggerSchema struct { c *client.Client + fedc *restclient.RESTClient cacheDir string mapper meta.RESTMapper } @@ -729,7 +970,7 @@ func writeSchemaFile(schemaData []byte, cacheDir, cacheFile, prefix, groupVersio return nil } -func getSchemaAndValidate(c schemaClient, data []byte, prefix, groupVersion, cacheDir string) (err error) { +func getSchemaAndValidate(c schemaClient, data []byte, prefix, groupVersion, cacheDir string, delegate validation.Schema) (err error) { var schemaData []byte var firstSeen bool fullDir, err := substituteUserHome(cacheDir) @@ -750,7 +991,7 @@ func getSchemaAndValidate(c schemaClient, data []byte, prefix, groupVersion, cac return err } } - schema, err := validation.NewSwaggerSchemaFromBytes(schemaData) + schema, err := validation.NewSwaggerSchemaFromBytes(schemaData, delegate) if err != nil { return err } @@ -763,7 +1004,7 @@ func getSchemaAndValidate(c schemaClient, data []byte, prefix, groupVersion, cac if err != nil { return err } - schema, err := validation.NewSwaggerSchemaFromBytes(schemaData) + schema, err := validation.NewSwaggerSchemaFromBytes(schemaData, delegate) if err != nil { return err } @@ -802,21 +1043,50 @@ func (c *clientSwaggerSchema) ValidateBytes(data []byte) error { if c.c.AutoscalingClient == nil { return errors.New("unable to validate: no autoscaling client") } - return getSchemaAndValidate(c.c.AutoscalingClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir) + return getSchemaAndValidate(c.c.AutoscalingClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) + } + if gvk.Group == policy.GroupName { + if c.c.PolicyClient == nil { + return errors.New("unable to validate: no policy client") + } + return getSchemaAndValidate(c.c.PolicyClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) } + if gvk.Group == apps.GroupName { + if c.c.AppsClient == nil { + return errors.New("unable to validate: no autoscaling client") + } + return getSchemaAndValidate(c.c.AppsClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) + } + if gvk.Group == batch.GroupName { if c.c.BatchClient == nil { return errors.New("unable to validate: no batch client") } - return getSchemaAndValidate(c.c.BatchClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir) + return getSchemaAndValidate(c.c.BatchClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) + } + if gvk.Group == rbac.GroupName { + if c.c.RbacClient == nil { + return errors.New("unable to validate: no rbac client") + } + return getSchemaAndValidate(c.c.RbacClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) + } + if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) { + // Don't attempt to validate third party objects + return nil } if gvk.Group == extensions.GroupName { if c.c.ExtensionsClient == nil { return errors.New("unable to validate: no experimental client") } - return getSchemaAndValidate(c.c.ExtensionsClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir) + return getSchemaAndValidate(c.c.ExtensionsClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) + } + if gvk.Group == federation.GroupName { + if c.fedc == nil { + return errors.New("unable to validate: no federation client") + } + return getSchemaAndValidate(c.fedc, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) } - return getSchemaAndValidate(c.c.RESTClient, data, "api", gvk.GroupVersion().String(), c.cacheDir) + return getSchemaAndValidate(c.c.RESTClient, data, "api", gvk.GroupVersion().String(), c.cacheDir, c) } // DefaultClientConfig creates a clientcmd.ClientConfig with the following hierarchy: @@ -874,14 +1144,13 @@ func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig { } // PrintObject prints an api object given command line flags to modify the output format -func (f *Factory) PrintObject(cmd *cobra.Command, obj runtime.Object, out io.Writer) error { - mapper, _ := f.Object() - gvk, err := api.Scheme.ObjectKind(obj) +func (f *Factory) PrintObject(cmd *cobra.Command, mapper meta.RESTMapper, obj runtime.Object, out io.Writer) error { + gvks, _, err := api.Scheme.ObjectKinds(obj) if err != nil { return err } - mapping, err := mapper.RESTMapping(gvk.GroupKind()) + mapping, err := mapper.RESTMapping(gvks[0].GroupKind()) if err != nil { return err } @@ -936,8 +1205,8 @@ func (f *Factory) PrinterForMapping(cmd *cobra.Command, mapping *meta.RESTMappin } // One stop shopping for a Builder -func (f *Factory) NewBuilder() *resource.Builder { - mapper, typer := f.Object() +func (f *Factory) NewBuilder(thirdPartyDiscovery bool) *resource.Builder { + mapper, typer := f.Object(thirdPartyDiscovery) return resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_test.go new file mode 100644 index 000000000000..4af5fcb88831 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_test.go @@ -0,0 +1,716 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "os/user" + "path" + "reflect" + "sort" + "strings" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + "k8s.io/kubernetes/pkg/client/unversioned/testclient" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/flag" + "k8s.io/kubernetes/pkg/watch" +) + +func TestNewFactoryDefaultFlagBindings(t *testing.T) { + factory := NewFactory(nil) + + if !factory.flags.HasFlags() { + t.Errorf("Expected flags, but didn't get any") + } +} + +func TestNewFactoryNoFlagBindings(t *testing.T) { + clientConfig := clientcmd.NewDefaultClientConfig(*clientcmdapi.NewConfig(), &clientcmd.ConfigOverrides{}) + factory := NewFactory(clientConfig) + + if factory.flags.HasFlags() { + t.Errorf("Expected zero flags, but got %v", factory.flags) + } +} + +func TestPortsForObject(t *testing.T) { + f := NewFactory(nil) + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"}, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Ports: []api.ContainerPort{ + { + ContainerPort: 101, + }, + }, + }, + }, + }, + } + + expected := []string{"101"} + got, err := f.PortsForObject(pod) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if len(expected) != len(got) { + t.Fatalf("Ports size mismatch! Expected %d, got %d", len(expected), len(got)) + } + + sort.Strings(expected) + sort.Strings(got) + + for i, port := range got { + if port != expected[i] { + t.Fatalf("Port mismatch! Expected %s, got %s", expected[i], port) + } + } +} + +func TestProtocolsForObject(t *testing.T) { + f := NewFactory(nil) + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"}, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Ports: []api.ContainerPort{ + { + ContainerPort: 101, + Protocol: api.ProtocolTCP, + }, + { + ContainerPort: 102, + Protocol: api.ProtocolUDP, + }, + }, + }, + }, + }, + } + + expected := "101/TCP,102/UDP" + protocolsMap, err := f.ProtocolsForObject(pod) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + got := kubectl.MakeProtocols(protocolsMap) + expectedSlice := strings.Split(expected, ",") + gotSlice := strings.Split(got, ",") + + sort.Strings(expectedSlice) + sort.Strings(gotSlice) + + for i, protocol := range gotSlice { + if protocol != expectedSlice[i] { + t.Fatalf("Protocols mismatch! Expected %s, got %s", expectedSlice[i], protocol) + } + } +} + +func TestLabelsForObject(t *testing.T) { + f := NewFactory(nil) + + tests := []struct { + name string + object runtime.Object + expected string + err error + }{ + { + name: "successful re-use of labels", + object: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", Labels: map[string]string{"svc": "test"}}, + TypeMeta: unversioned.TypeMeta{Kind: "Service", APIVersion: "v1"}, + }, + expected: "svc=test", + err: nil, + }, + { + name: "empty labels", + object: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test", Labels: map[string]string{}}, + TypeMeta: unversioned.TypeMeta{Kind: "Service", APIVersion: "v1"}, + }, + expected: "", + err: nil, + }, + { + name: "nil labels", + object: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "zen", Namespace: "test", Labels: nil}, + TypeMeta: unversioned.TypeMeta{Kind: "Service", APIVersion: "v1"}, + }, + expected: "", + err: nil, + }, + } + + for _, test := range tests { + gotLabels, err := f.LabelsForObject(test.object) + if err != test.err { + t.Fatalf("%s: Error mismatch: Expected %v, got %v", test.name, test.err, err) + } + got := kubectl.MakeLabels(gotLabels) + if test.expected != got { + t.Fatalf("%s: Labels mismatch! Expected %s, got %s", test.name, test.expected, got) + } + + } +} + +func TestCanBeExposed(t *testing.T) { + factory := NewFactory(nil) + tests := []struct { + kind unversioned.GroupKind + expectErr bool + }{ + { + kind: api.Kind("ReplicationController"), + expectErr: false, + }, + { + kind: api.Kind("Node"), + expectErr: true, + }, + } + + for _, test := range tests { + err := factory.CanBeExposed(test.kind) + if test.expectErr && err == nil { + t.Error("unexpected non-error") + } + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + } +} + +func TestFlagUnderscoreRenaming(t *testing.T) { + factory := NewFactory(nil) + + factory.flags.SetNormalizeFunc(flag.WordSepNormalizeFunc) + factory.flags.Bool("valid_flag", false, "bool value") + + // In case of failure of this test check this PR: spf13/pflag#23 + if factory.flags.Lookup("valid_flag").Name != "valid-flag" { + t.Fatalf("Expected flag name to be valid-flag, got %s", factory.flags.Lookup("valid_flag").Name) + } +} + +func loadSchemaForTest() (validation.Schema, error) { + pathToSwaggerSpec := "../../../../api/swagger-spec/" + testapi.Default.GroupVersion().Version + ".json" + data, err := ioutil.ReadFile(pathToSwaggerSpec) + if err != nil { + return nil, err + } + return validation.NewSwaggerSchemaFromBytes(data, nil) +} + +func header() http.Header { + header := http.Header{} + header.Set("Content-Type", runtime.ContentTypeJSON) + return header +} + +func TestRefetchSchemaWhenValidationFails(t *testing.T) { + schema, err := loadSchemaForTest() + if err != nil { + t.Errorf("Error loading schema: %v", err) + t.FailNow() + } + output, err := json.Marshal(schema) + if err != nil { + t.Errorf("Error serializing schema: %v", err) + t.FailNow() + } + requests := map[string]int{} + + c := &fake.RESTClient{ + Codec: testapi.Default.Codec(), + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case strings.HasPrefix(p, "/swaggerapi") && m == "GET": + requests[p] = requests[p] + 1 + return &http.Response{StatusCode: 200, Header: header(), Body: ioutil.NopCloser(bytes.NewBuffer(output))}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + dir := os.TempDir() + "/schemaCache" + os.RemoveAll(dir) + + fullDir, err := substituteUserHome(dir) + if err != nil { + t.Errorf("Error getting fullDir: %v", err) + t.FailNow() + } + cacheFile := path.Join(fullDir, "foo", "bar", schemaFileName) + err = writeSchemaFile(output, fullDir, cacheFile, "foo", "bar") + if err != nil { + t.Errorf("Error building old cache schema: %v", err) + t.FailNow() + } + + obj := &extensions.Deployment{} + data, err := runtime.Encode(testapi.Extensions.Codec(), obj) + if err != nil { + t.Errorf("unexpected error: %v", err) + t.FailNow() + } + + // Re-get request, should use HTTP and write + if getSchemaAndValidate(c, data, "foo", "bar", dir, nil); err != nil { + t.Errorf("unexpected error validating: %v", err) + } + if requests["/swaggerapi/foo/bar"] != 1 { + t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/bar"]) + } +} + +func TestValidateCachesSchema(t *testing.T) { + schema, err := loadSchemaForTest() + if err != nil { + t.Errorf("Error loading schema: %v", err) + t.FailNow() + } + output, err := json.Marshal(schema) + if err != nil { + t.Errorf("Error serializing schema: %v", err) + t.FailNow() + } + requests := map[string]int{} + + c := &fake.RESTClient{ + Codec: testapi.Default.Codec(), + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case strings.HasPrefix(p, "/swaggerapi") && m == "GET": + requests[p] = requests[p] + 1 + return &http.Response{StatusCode: 200, Header: header(), Body: ioutil.NopCloser(bytes.NewBuffer(output))}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + dir := os.TempDir() + "/schemaCache" + os.RemoveAll(dir) + + obj := &api.Pod{} + data, err := runtime.Encode(testapi.Default.Codec(), obj) + if err != nil { + t.Errorf("unexpected error: %v", err) + t.FailNow() + } + + // Initial request, should use HTTP and write + if getSchemaAndValidate(c, data, "foo", "bar", dir, nil); err != nil { + t.Errorf("unexpected error validating: %v", err) + } + if _, err := os.Stat(path.Join(dir, "foo", "bar", schemaFileName)); err != nil { + t.Errorf("unexpected missing cache file: %v", err) + } + if requests["/swaggerapi/foo/bar"] != 1 { + t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/bar"]) + } + + // Same version and group, should skip HTTP + if getSchemaAndValidate(c, data, "foo", "bar", dir, nil); err != nil { + t.Errorf("unexpected error validating: %v", err) + } + if requests["/swaggerapi/foo/bar"] != 2 { + t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/bar"]) + } + + // Different API group, should go to HTTP and write + if getSchemaAndValidate(c, data, "foo", "baz", dir, nil); err != nil { + t.Errorf("unexpected error validating: %v", err) + } + if _, err := os.Stat(path.Join(dir, "foo", "baz", schemaFileName)); err != nil { + t.Errorf("unexpected missing cache file: %v", err) + } + if requests["/swaggerapi/foo/baz"] != 1 { + t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/baz"]) + } + + // Different version, should go to HTTP and write + if getSchemaAndValidate(c, data, "foo2", "bar", dir, nil); err != nil { + t.Errorf("unexpected error validating: %v", err) + } + if _, err := os.Stat(path.Join(dir, "foo2", "bar", schemaFileName)); err != nil { + t.Errorf("unexpected missing cache file: %v", err) + } + if requests["/swaggerapi/foo2/bar"] != 1 { + t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo2/bar"]) + } + + // No cache dir, should go straight to HTTP and not write + if getSchemaAndValidate(c, data, "foo", "blah", "", nil); err != nil { + t.Errorf("unexpected error validating: %v", err) + } + if requests["/swaggerapi/foo/blah"] != 1 { + t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/blah"]) + } + if _, err := os.Stat(path.Join(dir, "foo", "blah", schemaFileName)); err == nil || !os.IsNotExist(err) { + t.Errorf("unexpected cache file error: %v", err) + } +} + +func TestSubstitueUser(t *testing.T) { + usr, err := user.Current() + if err != nil { + t.Logf("SKIPPING TEST: unexpected error: %v", err) + return + } + tests := []struct { + input string + expected string + expectErr bool + }{ + {input: "~/foo", expected: path.Join(os.Getenv("HOME"), "foo")}, + {input: "~" + usr.Username + "/bar", expected: usr.HomeDir + "/bar"}, + {input: "/foo/bar", expected: "/foo/bar"}, + {input: "~doesntexit/bar", expectErr: true}, + } + for _, test := range tests { + output, err := substituteUserHome(test.input) + if test.expectErr { + if err == nil { + t.Error("unexpected non-error") + } + continue + } + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if output != test.expected { + t.Errorf("expected: %s, saw: %s", test.expected, output) + } + } +} + +func newPodList(count, isUnready, isUnhealthy int, labels map[string]string) *api.PodList { + pods := []api.Pod{} + for i := 0; i < count; i++ { + newPod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("pod-%d", i+1), + Namespace: api.NamespaceDefault, + CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, i, 0, time.UTC), + Labels: labels, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Status: api.ConditionTrue, + Type: api.PodReady, + }, + }, + }, + } + pods = append(pods, newPod) + } + if isUnready > -1 && isUnready < count { + pods[isUnready].Status.Conditions[0].Status = api.ConditionFalse + } + if isUnhealthy > -1 && isUnhealthy < count { + pods[isUnhealthy].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 5}} + } + return &api.PodList{ + Items: pods, + } +} + +func TestGetFirstPod(t *testing.T) { + labelSet := map[string]string{"test": "selector"} + tests := []struct { + name string + + podList *api.PodList + watching []watch.Event + sortBy func([]*api.Pod) sort.Interface + + expected *api.Pod + expectedNum int + expectedErr bool + }{ + { + name: "kubectl logs - two ready pods", + podList: newPodList(2, -1, -1, labelSet), + sortBy: func(pods []*api.Pod) sort.Interface { return controller.ActivePods(pods) }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-2", + Namespace: api.NamespaceDefault, + CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 1, 0, time.UTC), + Labels: map[string]string{"test": "selector"}, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Status: api.ConditionTrue, + Type: api.PodReady, + }, + }, + }, + }, + expectedNum: 2, + }, + { + name: "kubectl logs - one unhealthy, one healthy", + podList: newPodList(2, -1, 1, labelSet), + sortBy: func(pods []*api.Pod) sort.Interface { return controller.ActivePods(pods) }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-2", + Namespace: api.NamespaceDefault, + CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 1, 0, time.UTC), + Labels: map[string]string{"test": "selector"}, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Status: api.ConditionTrue, + Type: api.PodReady, + }, + }, + ContainerStatuses: []api.ContainerStatus{{RestartCount: 5}}, + }, + }, + expectedNum: 2, + }, + { + name: "kubectl attach - two ready pods", + podList: newPodList(2, -1, -1, labelSet), + sortBy: func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-1", + Namespace: api.NamespaceDefault, + CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), + Labels: map[string]string{"test": "selector"}, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Status: api.ConditionTrue, + Type: api.PodReady, + }, + }, + }, + }, + expectedNum: 2, + }, + { + name: "kubectl attach - wait for ready pod", + podList: newPodList(1, 1, -1, labelSet), + watching: []watch.Event{ + { + Type: watch.Modified, + Object: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-1", + Namespace: api.NamespaceDefault, + CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), + Labels: map[string]string{"test": "selector"}, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Status: api.ConditionTrue, + Type: api.PodReady, + }, + }, + }, + }, + }, + }, + sortBy: func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-1", + Namespace: api.NamespaceDefault, + CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), + Labels: map[string]string{"test": "selector"}, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Status: api.ConditionTrue, + Type: api.PodReady, + }, + }, + }, + }, + expectedNum: 1, + }, + } + + for i := range tests { + test := tests[i] + client := &testclient.Fake{} + client.PrependReactor("list", "pods", func(action testclient.Action) (handled bool, ret runtime.Object, err error) { + return true, test.podList, nil + }) + if len(test.watching) > 0 { + watcher := watch.NewFake() + for _, event := range test.watching { + switch event.Type { + case watch.Added: + go watcher.Add(event.Object) + case watch.Modified: + go watcher.Modify(event.Object) + } + } + client.PrependWatchReactor("pods", testclient.DefaultWatchReactor(watcher, nil)) + } + selector := labels.Set(labelSet).AsSelector() + + pod, numPods, err := GetFirstPod(client, api.NamespaceDefault, selector, 1*time.Minute, test.sortBy) + if !test.expectedErr && err != nil { + t.Errorf("%s: unexpected error: %v", test.name, err) + continue + } + if test.expectedErr && err == nil { + t.Errorf("%s: expected an error", test.name) + continue + } + if test.expectedNum != numPods { + t.Errorf("%s: expected %d pods, got %d", test.name, test.expectedNum, numPods) + continue + } + if !reflect.DeepEqual(test.expected, pod) { + t.Errorf("%s:\nexpected pod:\n%#v\ngot:\n%#v\n\n", test.name, test.expected, pod) + } + } +} + +func TestPrintObjectSpecificMessage(t *testing.T) { + f := NewFactory(nil) + tests := []struct { + obj runtime.Object + expectOutput bool + }{ + { + obj: &api.Service{}, + expectOutput: false, + }, + { + obj: &api.Pod{}, + expectOutput: false, + }, + { + obj: &api.Service{Spec: api.ServiceSpec{Type: api.ServiceTypeLoadBalancer}}, + expectOutput: false, + }, + { + obj: &api.Service{Spec: api.ServiceSpec{Type: api.ServiceTypeNodePort}}, + expectOutput: true, + }, + } + for _, test := range tests { + buff := &bytes.Buffer{} + f.PrintObjectSpecificMessage(test.obj, buff) + if test.expectOutput && buff.Len() == 0 { + t.Errorf("Expected output, saw none for %v", test.obj) + } + if !test.expectOutput && buff.Len() > 0 { + t.Errorf("Expected no output, saw %s for %v", buff.String(), test.obj) + } + } +} + +func TestMakePortsString(t *testing.T) { + tests := []struct { + ports []api.ServicePort + useNodePort bool + expectedOutput string + }{ + {ports: nil, expectedOutput: ""}, + {ports: []api.ServicePort{}, expectedOutput: ""}, + {ports: []api.ServicePort{ + { + Port: 80, + Protocol: "TCP", + }, + }, + expectedOutput: "tcp:80", + }, + {ports: []api.ServicePort{ + { + Port: 80, + Protocol: "TCP", + }, + { + Port: 8080, + Protocol: "UDP", + }, + { + Port: 9000, + Protocol: "TCP", + }, + }, + expectedOutput: "tcp:80,udp:8080,tcp:9000", + }, + {ports: []api.ServicePort{ + { + Port: 80, + NodePort: 9090, + Protocol: "TCP", + }, + { + Port: 8080, + NodePort: 80, + Protocol: "UDP", + }, + }, + useNodePort: true, + expectedOutput: "tcp:9090,udp:80", + }, + } + for _, test := range tests { + output := makePortsString(test.ports, test.useNodePort) + if output != test.expectedOutput { + t.Errorf("expected: %s, saw: %s.", test.expectedOutput, output) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go index b1b1ac03ce70..d65841e2f387 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go @@ -22,16 +22,16 @@ import ( "fmt" "io" "io/ioutil" - "net/http" "net/url" "os" "strings" "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/typed/discovery" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/resource" @@ -171,10 +171,15 @@ func StandardErrorMessage(err error) (string, bool) { if debugErr, ok := err.(debugError); ok { glog.V(4).Infof(debugErr.DebugError()) } - _, isStatus := err.(errors.APIStatus) + status, isStatus := err.(errors.APIStatus) switch { case isStatus: - return fmt.Sprintf("Error from server: %s", err.Error()), true + switch s := status.Status(); { + case s.Reason == "Unauthorized": + return fmt.Sprintf("error: You must be logged in to the server (%s)", s.Message), true + default: + return fmt.Sprintf("Error from server: %s", err.Error()), true + } case errors.IsUnexpectedObjectError(err): return fmt.Sprintf("Server returned an unexpected response: %s", err.Error()), true } @@ -324,6 +329,16 @@ func GetFlagDuration(cmd *cobra.Command, flag string) time.Duration { func AddValidateFlags(cmd *cobra.Command) { cmd.Flags().Bool("validate", true, "If true, use a schema to validate the input before sending it") cmd.Flags().String("schema-cache-dir", fmt.Sprintf("~/%s/%s", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName), fmt.Sprintf("If non-empty, load/store cached API schemas in this directory, default is '$HOME/%s/%s'", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName)) + cmd.MarkFlagFilename("schema-cache-dir") +} + +func AddRecursiveFlag(cmd *cobra.Command, value *bool) { + cmd.Flags().BoolVarP(value, "recursive", "R", *value, "If true, process directory recursively.") +} + +// AddDryRunFlag adds dry-run flag to a command. Usually used by mutations. +func AddDryRunFlag(cmd *cobra.Command) { + cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without sending it.") } func AddApplyAnnotationFlags(cmd *cobra.Command) { @@ -334,7 +349,7 @@ func AddApplyAnnotationFlags(cmd *cobra.Command) { // TODO: need to take a pass at other generator commands to use this set of flags func AddGeneratorFlags(cmd *cobra.Command, defaultGenerator string) { cmd.Flags().String("generator", defaultGenerator, "The name of the API generator to use.") - cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without sending it.") + AddDryRunFlag(cmd) } func ReadConfigDataFromReader(reader io.Reader, source string) ([]byte, error) { @@ -350,45 +365,6 @@ func ReadConfigDataFromReader(reader io.Reader, source string) ([]byte, error) { return data, nil } -// ReadConfigData reads the bytes from the specified filesystem or network -// location or from stdin if location == "-". -// TODO: replace with resource.Builder -func ReadConfigData(location string) ([]byte, error) { - if len(location) == 0 { - return nil, fmt.Errorf("location given but empty") - } - - if location == "-" { - // Read from stdin. - return ReadConfigDataFromReader(os.Stdin, "stdin ('-')") - } - - // Use the location as a file path or URL. - return ReadConfigDataFromLocation(location) -} - -// TODO: replace with resource.Builder -func ReadConfigDataFromLocation(location string) ([]byte, error) { - // we look for http:// or https:// to determine if valid URL, otherwise do normal file IO - if strings.Index(location, "http://") == 0 || strings.Index(location, "https://") == 0 { - resp, err := http.Get(location) - if err != nil { - return nil, fmt.Errorf("unable to access URL %s: %v\n", location, err) - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return nil, fmt.Errorf("unable to read URL, server reported %d %s", resp.StatusCode, resp.Status) - } - return ReadConfigDataFromReader(resp.Body, location) - } else { - file, err := os.Open(location) - if err != nil { - return nil, fmt.Errorf("unable to read %s: %v\n", location, err) - } - return ReadConfigDataFromReader(file, location) - } -} - // Merge requires JSON serialization // TODO: merge assumes JSON serialization, and does not properly abstract API retrieval func Merge(codec runtime.Codec, dst runtime.Object, fragment, kind string) (runtime.Object, error) { @@ -462,16 +438,22 @@ func GetRecordFlag(cmd *cobra.Command) bool { return GetFlagBool(cmd, "record") } +func GetDryRunFlag(cmd *cobra.Command) bool { + return GetFlagBool(cmd, "dry-run") +} + // RecordChangeCause annotate change-cause to input runtime object. func RecordChangeCause(obj runtime.Object, changeCause string) error { - meta, err := api.ObjectMetaFor(obj) + accessor, err := meta.Accessor(obj) if err != nil { return err } - if meta.Annotations == nil { - meta.Annotations = make(map[string]string) + annotations := accessor.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) } - meta.Annotations[kubectl.ChangeCauseAnnotation] = changeCause + annotations[kubectl.ChangeCauseAnnotation] = changeCause + accessor.SetAnnotations(annotations) return nil } @@ -505,3 +487,110 @@ func ContainsChangeCause(info *resource.Info) bool { func ShouldRecord(cmd *cobra.Command, info *resource.Info) bool { return GetRecordFlag(cmd) || ContainsChangeCause(info) } + +func GetThirdPartyGroupVersions(discovery discovery.DiscoveryInterface) ([]unversioned.GroupVersion, []unversioned.GroupVersionKind, error) { + result := []unversioned.GroupVersion{} + gvks := []unversioned.GroupVersionKind{} + + groupList, err := discovery.ServerGroups() + if err != nil { + // On forbidden or not found, just return empty lists. + if errors.IsForbidden(err) || errors.IsNotFound(err) { + return result, gvks, nil + } + + return nil, nil, err + } + + for ix := range groupList.Groups { + group := &groupList.Groups[ix] + for jx := range group.Versions { + gv, err2 := unversioned.ParseGroupVersion(group.Versions[jx].GroupVersion) + if err2 != nil { + return nil, nil, err + } + // Skip GroupVersionKinds that have been statically registered. + if registered.IsRegisteredVersion(gv) { + continue + } + result = append(result, gv) + + resourceList, err := discovery.ServerResourcesForGroupVersion(group.Versions[jx].GroupVersion) + if err != nil { + return nil, nil, err + } + for kx := range resourceList.APIResources { + gvks = append(gvks, gv.WithKind(resourceList.APIResources[kx].Kind)) + } + } + } + return result, gvks, nil +} + +func GetIncludeThirdPartyAPIs(cmd *cobra.Command) bool { + if cmd.Flags().Lookup("include-extended-apis") == nil { + return false + } + return GetFlagBool(cmd, "include-extended-apis") +} + +func AddInclude3rdPartyFlags(cmd *cobra.Command) { + cmd.Flags().Bool("include-extended-apis", true, "If true, include definitions of new APIs via calls to the API server. [default true]") +} + +// GetResourcesAndPairs retrieves resources and "KEY=VALUE or KEY-" pair args from given args +func GetResourcesAndPairs(args []string, pairType string) (resources []string, pairArgs []string, err error) { + foundPair := false + for _, s := range args { + nonResource := strings.Contains(s, "=") || strings.HasSuffix(s, "-") + switch { + case !foundPair && nonResource: + foundPair = true + fallthrough + case foundPair && nonResource: + pairArgs = append(pairArgs, s) + case !foundPair && !nonResource: + resources = append(resources, s) + case foundPair && !nonResource: + err = fmt.Errorf("all resources must be specified before %s changes: %s", pairType, s) + return + } + } + return +} + +// ParsePairs retrieves new and remove pairs (if supportRemove is true) from "KEY=VALUE or KEY-" pair args +func ParsePairs(pairArgs []string, pairType string, supportRemove bool) (newPairs map[string]string, removePairs []string, err error) { + newPairs = map[string]string{} + if supportRemove { + removePairs = []string{} + } + var invalidBuf bytes.Buffer + + for _, pairArg := range pairArgs { + if strings.Index(pairArg, "=") != -1 { + parts := strings.SplitN(pairArg, "=", 2) + if len(parts) != 2 || len(parts[1]) == 0 { + if invalidBuf.Len() > 0 { + invalidBuf.WriteString(", ") + } + invalidBuf.WriteString(fmt.Sprintf(pairArg)) + } else { + newPairs[parts[0]] = parts[1] + } + } else if supportRemove && strings.HasSuffix(pairArg, "-") { + removePairs = append(removePairs, pairArg[:len(pairArg)-1]) + } else { + if invalidBuf.Len() > 0 { + invalidBuf.WriteString(", ") + } + invalidBuf.WriteString(fmt.Sprintf(pairArg)) + } + } + if invalidBuf.Len() > 0 { + err = fmt.Errorf("invalid %s format: %s", pairType, invalidBuf.String()) + return + } + + return +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers_test.go new file mode 100644 index 000000000000..95c68070d9a1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers_test.go @@ -0,0 +1,304 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "syscall" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func TestMerge(t *testing.T) { + grace := int64(30) + tests := []struct { + obj runtime.Object + fragment string + expected runtime.Object + expectErr bool + kind string + }{ + { + kind: "Pod", + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + }, + fragment: fmt.Sprintf(`{ "apiVersion": "%s" }`, testapi.Default.GroupVersion().String()), + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + }, + /* TODO: uncomment this test once Merge is updated to use + strategic-merge-patch. See #8449. + { + kind: "Pod", + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + api.Container{ + Name: "c1", + Image: "red-image", + }, + api.Container{ + Name: "c2", + Image: "blue-image", + }, + }, + }, + }, + fragment: fmt.Sprintf(`{ "apiVersion": "%s", "spec": { "containers": [ { "name": "c1", "image": "green-image" } ] } }`, testapi.Default.GroupVersion().String()), + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + api.Container{ + Name: "c1", + Image: "green-image", + }, + api.Container{ + Name: "c2", + Image: "blue-image", + }, + }, + }, + }, + }, */ + { + kind: "Pod", + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + }, + fragment: fmt.Sprintf(`{ "apiVersion": "%s", "spec": { "volumes": [ {"name": "v1"}, {"name": "v2"} ] } }`, testapi.Default.GroupVersion().String()), + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.PodSpec{ + Volumes: []api.Volume{ + { + Name: "v1", + VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}, + }, + { + Name: "v2", + VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}, + }, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + TerminationGracePeriodSeconds: &grace, + SecurityContext: &api.PodSecurityContext{}, + }, + }, + }, + { + kind: "Pod", + obj: &api.Pod{}, + fragment: "invalid json", + expected: &api.Pod{}, + expectErr: true, + }, + { + kind: "Service", + obj: &api.Service{}, + fragment: `{ "apiVersion": "badVersion" }`, + expectErr: true, + }, + { + kind: "Service", + obj: &api.Service{ + Spec: api.ServiceSpec{}, + }, + fragment: fmt.Sprintf(`{ "apiVersion": "%s", "spec": { "ports": [ { "port": 0 } ] } }`, testapi.Default.GroupVersion().String()), + expected: &api.Service{ + Spec: api.ServiceSpec{ + SessionAffinity: "None", + Type: api.ServiceTypeClusterIP, + Ports: []api.ServicePort{ + { + Protocol: api.ProtocolTCP, + Port: 0, + }, + }, + }, + }, + }, + { + kind: "Service", + obj: &api.Service{ + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "version": "v1", + }, + }, + }, + fragment: fmt.Sprintf(`{ "apiVersion": "%s", "spec": { "selector": { "version": "v2" } } }`, testapi.Default.GroupVersion().String()), + expected: &api.Service{ + Spec: api.ServiceSpec{ + SessionAffinity: "None", + Type: api.ServiceTypeClusterIP, + Selector: map[string]string{ + "version": "v2", + }, + }, + }, + }, + } + + for i, test := range tests { + out, err := Merge(testapi.Default.Codec(), test.obj, test.fragment, test.kind) + if !test.expectErr { + if err != nil { + t.Errorf("testcase[%d], unexpected error: %v", i, err) + } else if !reflect.DeepEqual(out, test.expected) { + t.Errorf("\n\ntestcase[%d]\nexpected:\n%+v\nsaw:\n%+v", i, test.expected, out) + } + } + if test.expectErr && err == nil { + t.Errorf("testcase[%d], unexpected non-error", i) + } + } +} + +type fileHandler struct { + data []byte +} + +func (f *fileHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) { + if req.URL.Path == "/error" { + res.WriteHeader(http.StatusNotFound) + return + } + res.WriteHeader(http.StatusOK) + res.Write(f.data) +} + +func TestCheckInvalidErr(t *testing.T) { + tests := []struct { + err error + expected string + }{ + { + errors.NewInvalid(api.Kind("Invalid1"), "invalidation", field.ErrorList{field.Invalid(field.NewPath("field"), "single", "details")}), + `Error from server: Invalid1 "invalidation" is invalid: field: Invalid value: "single": details`, + }, + { + errors.NewInvalid(api.Kind("Invalid2"), "invalidation", field.ErrorList{field.Invalid(field.NewPath("field1"), "multi1", "details"), field.Invalid(field.NewPath("field2"), "multi2", "details")}), + `Error from server: Invalid2 "invalidation" is invalid: [field1: Invalid value: "multi1": details, field2: Invalid value: "multi2": details]`, + }, + { + errors.NewInvalid(api.Kind("Invalid3"), "invalidation", field.ErrorList{}), + `Error from server: Invalid3 "invalidation" is invalid: `, + }, + } + + var errReturned string + errHandle := func(err string) { + errReturned = err + } + + for _, test := range tests { + checkErr(test.err, errHandle) + + if errReturned != test.expected { + t.Fatalf("Got: %s, expected: %s", errReturned, test.expected) + } + } +} + +func TestCheckNoResourceMatchError(t *testing.T) { + tests := []struct { + err error + expected string + }{ + { + &meta.NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Resource: "foo"}}, + `the server doesn't have a resource type "foo"`, + }, + { + &meta.NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Version: "theversion", Resource: "foo"}}, + `the server doesn't have a resource type "foo" in version "theversion"`, + }, + { + &meta.NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Group: "thegroup", Version: "theversion", Resource: "foo"}}, + `the server doesn't have a resource type "foo" in group "thegroup" and version "theversion"`, + }, + { + &meta.NoResourceMatchError{PartialResource: unversioned.GroupVersionResource{Group: "thegroup", Resource: "foo"}}, + `the server doesn't have a resource type "foo" in group "thegroup"`, + }, + } + + var errReturned string + errHandle := func(err string) { + errReturned = err + } + + for _, test := range tests { + checkErr(test.err, errHandle) + + if errReturned != test.expected { + t.Fatalf("Got: %s, expected: %s", errReturned, test.expected) + } + } +} + +func TestDumpReaderToFile(t *testing.T) { + testString := "TEST STRING" + tempFile, err := ioutil.TempFile("", "hlpers_test_dump_") + if err != nil { + t.Errorf("unexpected error setting up a temporary file %v", err) + } + defer syscall.Unlink(tempFile.Name()) + defer tempFile.Close() + err = DumpReaderToFile(strings.NewReader(testString), tempFile.Name()) + if err != nil { + t.Errorf("error in DumpReaderToFile: %v", err) + } + data, err := ioutil.ReadFile(tempFile.Name()) + if err != nil { + t.Errorf("error when reading %s: %v", tempFile.Name(), err) + } + stringData := string(data) + if stringData != testString { + t.Fatalf("Wrong file content %s != %s", testString, stringData) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go index c9533725344d..f1153b6cd61c 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go @@ -30,14 +30,12 @@ import ( // AddPrinterFlags adds printing related flags to a command (e.g. output format, no headers, template path) func AddPrinterFlags(cmd *cobra.Command) { - cmd.Flags().StringP("output", "o", "", "Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/release-1.2/docs/user-guide/jsonpath.md].") + cmd.Flags().StringP("output", "o", "", "Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md].") cmd.Flags().String("output-version", "", "Output the formatted object with the given group version (for ex: 'extensions/v1beta1').") cmd.Flags().Bool("no-headers", false, "When using the default output, don't print headers.") cmd.Flags().Bool("show-labels", false, "When printing, show all labels as the last column (default hide labels column)") - // template shorthand -t is deprecated to support -t for --tty - // TODO: remove template flag shorthand -t - cmd.Flags().StringP("template", "t", "", "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].") - cmd.Flags().MarkShorthandDeprecated("template", "please use --template instead") + cmd.Flags().String("template", "", "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].") + cmd.MarkFlagFilename("template") cmd.Flags().String("sort-by", "", "If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. '{.metadata.name}'). The field in the API resource specified by this JSONPath expression must be an integer or a string.") cmd.Flags().BoolP("show-all", "a", false, "When printing, show all resources (default hide terminated pods.)") } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/configmap_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/configmap_test.go new file mode 100644 index 000000000000..d4458bdd0ff6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/configmap_test.go @@ -0,0 +1,123 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +func TestConfigMapGenerate(t *testing.T) { + tests := []struct { + params map[string]interface{} + expected *api.ConfigMap + expectErr bool + }{ + { + params: map[string]interface{}{ + "name": "foo", + }, + expected: &api.ConfigMap{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Data: map[string]string{}, + }, + expectErr: false, + }, + { + params: map[string]interface{}{ + "name": "foo", + "type": "my-type", + }, + expected: &api.ConfigMap{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Data: map[string]string{}, + }, + expectErr: false, + }, + { + params: map[string]interface{}{ + "name": "foo", + "from-literal": []string{"key1=value1", "key2=value2"}, + }, + expected: &api.ConfigMap{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Data: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + expectErr: false, + }, + { + params: map[string]interface{}{ + "name": "foo", + "from-literal": []string{"key1value1"}, + }, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "from-file": []string{"key1=/file=2"}, + }, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "from-file": []string{"key1==value"}, + }, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "from-literal": []string{"key1==value1"}, + }, + expected: &api.ConfigMap{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Data: map[string]string{ + "key1": "=value1", + }, + }, + expectErr: false, + }, + } + generator := ConfigMapGeneratorV1{} + for _, test := range tests { + obj, err := generator.Generate(test.params) + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + if test.expectErr && err != nil { + continue + } + if !reflect.DeepEqual(obj.(*api.ConfigMap), test.expected) { + t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", test.expected, obj.(*api.ConfigMap)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go index fb29a0a7ed28..255ad1de84d1 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go @@ -191,10 +191,10 @@ func (s *CustomColumnsPrinter) printOneObject(obj runtime.Object, parsers []*jso columns := make([]string, len(parsers)) switch u := obj.(type) { case *runtime.Unknown: - if len(u.RawJSON) > 0 { + if len(u.Raw) > 0 { var err error - if obj, err = runtime.Decode(s.Decoder, u.RawJSON); err != nil { - return fmt.Errorf("can't decode object for printing: %v (%s)", err, u.RawJSON) + if obj, err = runtime.Decode(s.Decoder, u.Raw); err != nil { + return fmt.Errorf("can't decode object for printing: %v (%s)", err, u.Raw) } } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/custom_column_printer_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/custom_column_printer_test.go new file mode 100644 index 000000000000..531881a73271 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/custom_column_printer_test.go @@ -0,0 +1,276 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "bytes" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestMassageJSONPath(t *testing.T) { + tests := []struct { + input string + expectedOutput string + expectErr bool + }{ + {input: "foo.bar", expectedOutput: "{.foo.bar}"}, + {input: "{foo.bar}", expectedOutput: "{.foo.bar}"}, + {input: ".foo.bar", expectedOutput: "{.foo.bar}"}, + {input: "{.foo.bar}", expectedOutput: "{.foo.bar}"}, + {input: "", expectedOutput: ""}, + {input: "{foo.bar", expectErr: true}, + {input: "foo.bar}", expectErr: true}, + {input: "{foo.bar}}", expectErr: true}, + {input: "{{foo.bar}", expectErr: true}, + } + for _, test := range tests { + output, err := massageJSONPath(test.input) + if err != nil && !test.expectErr { + t.Errorf("unexpected error: %v", err) + continue + } + if test.expectErr { + if err == nil { + t.Error("unexpected non-error") + } + continue + } + if output != test.expectedOutput { + t.Errorf("input: %s, expected: %s, saw: %s", test.input, test.expectedOutput, output) + } + } +} + +func TestNewColumnPrinterFromSpec(t *testing.T) { + tests := []struct { + spec string + expectedColumns []Column + expectErr bool + name string + }{ + { + spec: "", + expectErr: true, + name: "empty", + }, + { + spec: "invalid", + expectErr: true, + name: "invalid1", + }, + { + spec: "invalid=foobar", + expectErr: true, + name: "invalid2", + }, + { + spec: "invalid,foobar:blah", + expectErr: true, + name: "invalid3", + }, + { + spec: "NAME:metadata.name,API_VERSION:apiVersion", + name: "ok", + expectedColumns: []Column{ + { + Header: "NAME", + FieldSpec: "{.metadata.name}", + }, + { + Header: "API_VERSION", + FieldSpec: "{.apiVersion}", + }, + }, + }, + } + for _, test := range tests { + printer, err := NewCustomColumnsPrinterFromSpec(test.spec, api.Codecs.UniversalDecoder()) + if test.expectErr { + if err == nil { + t.Errorf("[%s] unexpected non-error", test.name) + } + continue + } + if !test.expectErr && err != nil { + t.Errorf("[%s] unexpected error: %v", test.name, err) + continue + } + + if !reflect.DeepEqual(test.expectedColumns, printer.Columns) { + t.Errorf("[%s]\nexpected:\n%v\nsaw:\n%v\n", test.name, test.expectedColumns, printer.Columns) + } + + } +} + +const exampleTemplateOne = `NAME API_VERSION +{metadata.name} {apiVersion}` + +const exampleTemplateTwo = `NAME API_VERSION + {metadata.name} {apiVersion}` + +func TestNewColumnPrinterFromTemplate(t *testing.T) { + tests := []struct { + spec string + expectedColumns []Column + expectErr bool + name string + }{ + { + spec: "", + expectErr: true, + name: "empty", + }, + { + spec: "invalid", + expectErr: true, + name: "invalid1", + }, + { + spec: "invalid=foobar", + expectErr: true, + name: "invalid2", + }, + { + spec: "invalid,foobar:blah", + expectErr: true, + name: "invalid3", + }, + { + spec: exampleTemplateOne, + name: "ok", + expectedColumns: []Column{ + { + Header: "NAME", + FieldSpec: "{.metadata.name}", + }, + { + Header: "API_VERSION", + FieldSpec: "{.apiVersion}", + }, + }, + }, + { + spec: exampleTemplateTwo, + name: "ok-2", + expectedColumns: []Column{ + { + Header: "NAME", + FieldSpec: "{.metadata.name}", + }, + { + Header: "API_VERSION", + FieldSpec: "{.apiVersion}", + }, + }, + }, + } + for _, test := range tests { + reader := bytes.NewBufferString(test.spec) + printer, err := NewCustomColumnsPrinterFromTemplate(reader, api.Codecs.UniversalDecoder()) + if test.expectErr { + if err == nil { + t.Errorf("[%s] unexpected non-error", test.name) + } + continue + } + if !test.expectErr && err != nil { + t.Errorf("[%s] unexpected error: %v", test.name, err) + continue + } + + if !reflect.DeepEqual(test.expectedColumns, printer.Columns) { + t.Errorf("[%s]\nexpected:\n%v\nsaw:\n%v\n", test.name, test.expectedColumns, printer.Columns) + } + + } +} + +func TestColumnPrint(t *testing.T) { + tests := []struct { + columns []Column + obj runtime.Object + expectedOutput string + }{ + { + columns: []Column{ + { + Header: "NAME", + FieldSpec: "{.metadata.name}", + }, + }, + obj: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo"}}, + expectedOutput: `NAME +foo +`, + }, + { + columns: []Column{ + { + Header: "NAME", + FieldSpec: "{.metadata.name}", + }, + }, + obj: &v1.PodList{ + Items: []v1.Pod{ + {ObjectMeta: v1.ObjectMeta{Name: "foo"}}, + {ObjectMeta: v1.ObjectMeta{Name: "bar"}}, + }, + }, + expectedOutput: `NAME +foo +bar +`, + }, + { + columns: []Column{ + { + Header: "NAME", + FieldSpec: "{.metadata.name}", + }, + { + Header: "API_VERSION", + FieldSpec: "{.apiVersion}", + }, + }, + obj: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo"}, TypeMeta: unversioned.TypeMeta{APIVersion: "baz"}}, + expectedOutput: `NAME API_VERSION +foo baz +`, + }, + } + + for _, test := range tests { + printer := &CustomColumnsPrinter{ + Columns: test.columns, + Decoder: api.Codecs.UniversalDecoder(), + } + buffer := &bytes.Buffer{} + if err := printer.PrintObj(test.obj, buffer); err != nil { + t.Errorf("unexpected error: %v", err) + } + if buffer.String() != test.expectedOutput { + t.Errorf("\nexpected:\n'%s'\nsaw\n'%s'\n", test.expectedOutput, buffer.String()) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/describe.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/describe.go index 0a09b3a069c5..c73e405347bc 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/describe.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/describe.go @@ -29,15 +29,19 @@ import ( "time" "github.com/golang/glog" + "k8s.io/kubernetes/federation/apis/federation" + fed_clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" client "k8s.io/kubernetes/pkg/client/unversioned" + adapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset" "k8s.io/kubernetes/pkg/fieldpath" "k8s.io/kubernetes/pkg/fields" qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util" @@ -52,7 +56,13 @@ import ( // if the output could not be generated. Implementers typically // abstract the retrieval of the named object from a remote server. type Describer interface { - Describe(namespace, name string) (output string, err error) + Describe(namespace, name string, describerSettings DescriberSettings) (output string, err error) +} + +// DescriberSettings holds display configuration for each object +// describer to control what is printed. +type DescriberSettings struct { + ShowEvents bool } // ObjectDescriber is an interface for displaying arbitrary objects with extra @@ -93,11 +103,13 @@ func describerMap(c *client.Client) map[unversioned.GroupKind]Describer { extensions.Kind("ReplicaSet"): &ReplicaSetDescriber{c}, extensions.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c}, + extensions.Kind("NetworkPolicy"): &NetworkPolicyDescriber{c}, autoscaling.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c}, extensions.Kind("DaemonSet"): &DaemonSetDescriber{c}, - extensions.Kind("Deployment"): &DeploymentDescriber{clientset.FromUnversionedClient(c)}, + extensions.Kind("Deployment"): &DeploymentDescriber{adapter.FromUnversionedClient(c)}, extensions.Kind("Job"): &JobDescriber{c}, batch.Kind("Job"): &JobDescriber{c}, + apps.Kind("PetSet"): &PetSetDescriber{c}, extensions.Kind("Ingress"): &IngressDescriber{c}, } @@ -148,7 +160,7 @@ type NamespaceDescriber struct { client.Interface } -func (d *NamespaceDescriber) Describe(namespace, name string) (string, error) { +func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { ns, err := d.Namespaces().Get(name) if err != nil { return "", err @@ -168,7 +180,7 @@ func (d *NamespaceDescriber) Describe(namespace, name string) (string, error) { func describeNamespace(namespace *api.Namespace, resourceQuotaList *api.ResourceQuotaList, limitRangeList *api.LimitRangeList) (string, error) { return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", namespace.Name) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(namespace.Labels)) + printLabelsMultiline(out, "Labels", namespace.Labels) fmt.Fprintf(out, "Status:\t%s\n", string(namespace.Status.Phase)) if resourceQuotaList != nil { fmt.Fprintf(out, "\n") @@ -268,7 +280,7 @@ func DescribeResourceQuotas(quotas *api.ResourceQuotaList, w io.Writer) { for _, q := range quotas.Items { fmt.Fprintf(w, "\n Name:\t%s\n", q.Name) if len(q.Spec.Scopes) > 0 { - scopes := []string{} + scopes := make([]string, 0, len(q.Spec.Scopes)) for _, scope := range q.Spec.Scopes { scopes = append(scopes, string(scope)) } @@ -285,7 +297,7 @@ func DescribeResourceQuotas(quotas *api.ResourceQuotaList, w io.Writer) { fmt.Fprintf(w, " Resource\tUsed\tHard\n") fmt.Fprint(w, " --------\t---\t---\n") - resources := []api.ResourceName{} + resources := make([]api.ResourceName, 0, len(q.Status.Hard)) for resource := range q.Status.Hard { resources = append(resources, resource) } @@ -304,7 +316,7 @@ type LimitRangeDescriber struct { client.Interface } -func (d *LimitRangeDescriber) Describe(namespace, name string) (string, error) { +func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { lr := d.LimitRanges(namespace) limitRange, err := lr.Get(name) @@ -391,7 +403,7 @@ type ResourceQuotaDescriber struct { client.Interface } -func (d *ResourceQuotaDescriber) Describe(namespace, name string) (string, error) { +func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { rq := d.ResourceQuotas(namespace) resourceQuota, err := rq.Get(name) @@ -421,7 +433,7 @@ func describeQuota(resourceQuota *api.ResourceQuota) (string, error) { fmt.Fprintf(out, "Name:\t%s\n", resourceQuota.Name) fmt.Fprintf(out, "Namespace:\t%s\n", resourceQuota.Namespace) if len(resourceQuota.Spec.Scopes) > 0 { - scopes := []string{} + scopes := make([]string, 0, len(resourceQuota.Spec.Scopes)) for _, scope := range resourceQuota.Spec.Scopes { scopes = append(scopes, string(scope)) } @@ -437,7 +449,7 @@ func describeQuota(resourceQuota *api.ResourceQuota) (string, error) { fmt.Fprintf(out, "Resource\tUsed\tHard\n") fmt.Fprintf(out, "--------\t----\t----\n") - resources := []api.ResourceName{} + resources := make([]api.ResourceName, 0, len(resourceQuota.Status.Hard)) for resource := range resourceQuota.Status.Hard { resources = append(resources, resource) } @@ -460,29 +472,33 @@ type PodDescriber struct { client.Interface } -func (d *PodDescriber) Describe(namespace, name string) (string, error) { +func (d *PodDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { pod, err := d.Pods(namespace).Get(name) if err != nil { - eventsInterface := d.Events(namespace) - selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil) - options := api.ListOptions{FieldSelector: selector} - events, err2 := eventsInterface.List(options) - if err2 == nil && len(events.Items) > 0 { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Pod '%v': error '%v', but found events.\n", name, err) - DescribeEvents(events, out) - return nil - }) + if describerSettings.ShowEvents { + eventsInterface := d.Events(namespace) + selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil) + options := api.ListOptions{FieldSelector: selector} + events, err2 := eventsInterface.List(options) + if describerSettings.ShowEvents && err2 == nil && len(events.Items) > 0 { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Pod '%v': error '%v', but found events.\n", name, err) + DescribeEvents(events, out) + return nil + }) + } } return "", err } var events *api.EventList - if ref, err := api.GetReference(pod); err != nil { - glog.Errorf("Unable to construct reference to '%#v': %v", pod, err) - } else { - ref.Kind = "" - events, _ = d.Events(namespace).Search(ref) + if describerSettings.ShowEvents { + if ref, err := api.GetReference(pod); err != nil { + glog.Errorf("Unable to construct reference to '%#v': %v", pod, err) + } else { + ref.Kind = "" + events, _ = d.Events(namespace).Search(ref) + } } return describePod(pod, events) @@ -496,7 +512,7 @@ func describePod(pod *api.Pod, events *api.EventList) (string, error) { if pod.Status.StartTime != nil { fmt.Fprintf(out, "Start Time:\t%s\n", pod.Status.StartTime.Time.Format(time.RFC1123Z)) } - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(pod.Labels)) + printLabelsMultiline(out, "Labels", pod.Labels) if pod.DeletionTimestamp != nil { fmt.Fprintf(out, "Status:\tTerminating (expires %s)\n", pod.DeletionTimestamp.Time.Format(time.RFC1123Z)) fmt.Fprintf(out, "Termination Grace Period:\t%ds\n", *pod.DeletionGracePeriodSeconds) @@ -511,8 +527,10 @@ func describePod(pod *api.Pod, events *api.EventList) (string, error) { } fmt.Fprintf(out, "IP:\t%s\n", pod.Status.PodIP) fmt.Fprintf(out, "Controllers:\t%s\n", printControllers(pod.Annotations)) - fmt.Fprintf(out, "Containers:\n") - DescribeContainers(pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), out) + if len(pod.Spec.InitContainers) > 0 { + describeContainers("Init Containers", pod.Spec.InitContainers, pod.Status.InitContainerStatuses, EnvValueRetriever(pod), out, "") + } + describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), out, "") if len(pod.Status.Conditions) > 0 { fmt.Fprint(out, "Conditions:\n Type\tStatus\n") for _, c := range pod.Status.Conditions { @@ -521,7 +539,7 @@ func describePod(pod *api.Pod, events *api.EventList) (string, error) { c.Status) } } - describeVolumes(pod.Spec.Volumes, out) + describeVolumes(pod.Spec.Volumes, out, "") if events != nil { DescribeEvents(events, out) } @@ -541,14 +559,19 @@ func printControllers(annotation map[string]string) string { return "" } -func describeVolumes(volumes []api.Volume, out io.Writer) { +// TODO: Do a better job at indenting, maybe by using a prefix writer +func describeVolumes(volumes []api.Volume, out io.Writer, space string) { if volumes == nil || len(volumes) == 0 { - fmt.Fprint(out, "No volumes.\n") + fmt.Fprintf(out, "%sNo volumes.\n", space) return } - fmt.Fprint(out, "Volumes:\n") + fmt.Fprintf(out, "%sVolumes:\n", space) for _, volume := range volumes { - fmt.Fprintf(out, " %v:\n", volume.Name) + nameIndent := "" + if len(space) > 0 { + nameIndent = " " + } + fmt.Fprintf(out, " %s%v:\n", nameIndent, volume.Name) switch { case volume.VolumeSource.HostPath != nil: printHostPathVolumeSource(volume.VolumeSource.HostPath, out) @@ -677,7 +700,12 @@ func printRBDVolumeSource(rbd *api.RBDVolumeSource, out io.Writer) { func printDownwardAPIVolumeSource(d *api.DownwardAPIVolumeSource, out io.Writer) { fmt.Fprintf(out, " Type:\tDownwardAPI (a volume populated by information about the pod)\n Items:\n") for _, mapping := range d.Items { - fmt.Fprintf(out, " %v -> %v\n", mapping.FieldRef.FieldPath, mapping.Path) + if mapping.FieldRef != nil { + fmt.Fprintf(out, " %v -> %v\n", mapping.FieldRef.FieldPath, mapping.Path) + } + if mapping.ResourceFieldRef != nil { + fmt.Fprintf(out, " %v -> %v\n", mapping.ResourceFieldRef.Resource, mapping.Path) + } } } @@ -685,7 +713,7 @@ type PersistentVolumeDescriber struct { client.Interface } -func (d *PersistentVolumeDescriber) Describe(namespace, name string) (string, error) { +func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.PersistentVolumes() pv, err := c.Get(name) @@ -697,7 +725,7 @@ func (d *PersistentVolumeDescriber) Describe(namespace, name string) (string, er return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", pv.Name) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(pv.Labels)) + printLabelsMultiline(out, "Labels", pv.Labels) fmt.Fprintf(out, "Status:\t%s\n", pv.Status.Phase) if pv.Spec.ClaimRef != nil { fmt.Fprintf(out, "Claim:\t%s\n", pv.Spec.ClaimRef.Namespace+"/"+pv.Spec.ClaimRef.Name) @@ -735,7 +763,7 @@ type PersistentVolumeClaimDescriber struct { client.Interface } -func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string) (string, error) { +func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.PersistentVolumeClaims(namespace) pvc, err := c.Get(name) @@ -743,7 +771,6 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string) (strin return "", err } - labels := labels.FormatLabels(pvc.Labels) storage := pvc.Spec.Resources.Requests[api.ResourceStorage] capacity := "" accessModes := "" @@ -753,29 +780,42 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string) (strin capacity = storage.String() } + events, _ := d.Events(namespace).Search(pvc) + return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", pvc.Name) fmt.Fprintf(out, "Namespace:\t%s\n", pvc.Namespace) fmt.Fprintf(out, "Status:\t%v\n", pvc.Status.Phase) fmt.Fprintf(out, "Volume:\t%s\n", pvc.Spec.VolumeName) - fmt.Fprintf(out, "Labels:\t%s\n", labels) + printLabelsMultiline(out, "Labels", pvc.Labels) fmt.Fprintf(out, "Capacity:\t%s\n", capacity) fmt.Fprintf(out, "Access Modes:\t%s\n", accessModes) + if events != nil { + DescribeEvents(events, out) + } + return nil }) } -// DescribeContainers is exported for consumers in other API groups that have container templates -func DescribeContainers(containers []api.Container, containerStatuses []api.ContainerStatus, resolverFn EnvVarResolverFunc, out io.Writer) { +// TODO: Do a better job at indenting, maybe by using a prefix writer +func describeContainers(label string, containers []api.Container, containerStatuses []api.ContainerStatus, resolverFn EnvVarResolverFunc, out io.Writer, space string) { statuses := map[string]api.ContainerStatus{} for _, status := range containerStatuses { statuses[status.Name] = status } - + if len(containers) == 0 { + fmt.Fprintf(out, "%s%s: \n", space, label) + } else { + fmt.Fprintf(out, "%s%s:\n", space, label) + } for _, container := range containers { status, ok := statuses[container.Name] - - fmt.Fprintf(out, " %v:\n", container.Name) + nameIndent := "" + if len(space) > 0 { + nameIndent = " " + } + fmt.Fprintf(out, " %s%v:\n", nameIndent, container.Name) if ok { fmt.Fprintf(out, " Container ID:\t%s\n", status.ContainerID) } @@ -807,21 +847,25 @@ func DescribeContainers(containers []api.Container, containerStatuses []api.Cont if len(resourceToQoS) > 0 { fmt.Fprintf(out, " QoS Tier:\n") } - for resource, qos := range resourceToQoS { + for _, resource := range SortedQoSResourceNames(resourceToQoS) { + qos := resourceToQoS[resource] fmt.Fprintf(out, " %s:\t%s\n", resource, qos) } - if len(container.Resources.Limits) > 0 { + resources := container.Resources + if len(resources.Limits) > 0 { fmt.Fprintf(out, " Limits:\n") } - for name, quantity := range container.Resources.Limits { + for _, name := range SortedResourceNames(resources.Limits) { + quantity := resources.Limits[name] fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String()) } - if len(container.Resources.Requests) > 0 { + if len(resources.Requests) > 0 { fmt.Fprintf(out, " Requests:\n") } - for name, quantity := range container.Resources.Requests { + for _, name := range SortedResourceNames(resources.Requests) { + quantity := resources.Requests[name] fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String()) } @@ -842,23 +886,41 @@ func DescribeContainers(containers []api.Container, containerStatuses []api.Cont probe := DescribeProbe(container.ReadinessProbe) fmt.Fprintf(out, " Readiness:\t%s\n", probe) } - fmt.Fprintf(out, " Environment Variables:\n") + none := "" + if len(container.Env) == 0 { + none = "\t" + } + fmt.Fprintf(out, " Environment Variables:%s\n", none) for _, e := range container.Env { - if e.ValueFrom != nil && e.ValueFrom.FieldRef != nil { + if e.ValueFrom == nil { + fmt.Fprintf(out, " %s:\t%s\n", e.Name, e.Value) + continue + } + + switch { + case e.ValueFrom.FieldRef != nil: var valueFrom string if resolverFn != nil { valueFrom = resolverFn(e) } fmt.Fprintf(out, " %s:\t%s (%s:%s)\n", e.Name, valueFrom, e.ValueFrom.FieldRef.APIVersion, e.ValueFrom.FieldRef.FieldPath) - } else { - fmt.Fprintf(out, " %s:\t%s\n", e.Name, e.Value) + case e.ValueFrom.ResourceFieldRef != nil: + valueFrom, err := fieldpath.ExtractContainerResourceValue(e.ValueFrom.ResourceFieldRef, &container) + if err != nil { + valueFrom = "" + } + fmt.Fprintf(out, " %s:\t%s (%s)\n", e.Name, valueFrom, e.ValueFrom.ResourceFieldRef.Resource) + case e.ValueFrom.SecretKeyRef != nil: + fmt.Fprintf(out, " %s:\t\n", e.Name, e.ValueFrom.SecretKeyRef.Key, e.ValueFrom.SecretKeyRef.Name) + case e.ValueFrom.ConfigMapKeyRef != nil: + fmt.Fprintf(out, " %s:\t\n", e.Name, e.ValueFrom.ConfigMapKeyRef.Key, e.ValueFrom.ConfigMapKeyRef.Name) } } } } func describeContainerPorts(cPorts []api.ContainerPort) string { - ports := []string{} + ports := make([]string, 0, len(cPorts)) for _, cPort := range cPorts { ports = append(ports, fmt.Sprintf("%d/%s", cPort.ContainerPort, cPort.Protocol)) } @@ -949,7 +1011,7 @@ type ReplicationControllerDescriber struct { client.Interface } -func (d *ReplicationControllerDescriber) Describe(namespace, name string) (string, error) { +func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { rc := d.ReplicationControllers(namespace) pc := d.Pods(namespace) @@ -963,7 +1025,10 @@ func (d *ReplicationControllerDescriber) Describe(namespace, name string) (strin return "", err } - events, _ := d.Events(namespace).Search(controller) + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.Events(namespace).Search(controller) + } return describeReplicationController(controller, events, running, waiting, succeeded, failed) } @@ -978,11 +1043,11 @@ func describeReplicationController(controller *api.ReplicationController, events fmt.Fprintf(out, "Image(s):\t%s\n", "") } fmt.Fprintf(out, "Selector:\t%s\n", labels.FormatLabels(controller.Spec.Selector)) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(controller.Labels)) + printLabelsMultiline(out, "Labels", controller.Labels) fmt.Fprintf(out, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, controller.Spec.Replicas) fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) if controller.Spec.Template != nil { - describeVolumes(controller.Spec.Template.Spec.Volumes, out) + describeVolumes(controller.Spec.Template.Spec.Volumes, out, "") } if events != nil { DescribeEvents(events, out) @@ -991,18 +1056,23 @@ func describeReplicationController(controller *api.ReplicationController, events }) } -func DescribePodTemplate(template *api.PodTemplateSpec) (string, error) { - return tabbedString(func(out io.Writer) error { - if template == nil { - fmt.Fprintf(out, "") - return nil - } - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(template.Labels)) - fmt.Fprintf(out, "Annotations:\t%s\n", labels.FormatLabels(template.Annotations)) - fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&template.Spec)) - describeVolumes(template.Spec.Volumes, out) - return nil - }) +func DescribePodTemplate(template *api.PodTemplateSpec, out io.Writer) { + if template == nil { + fmt.Fprintf(out, " ") + return + } + printLabelsMultiline(out, " Labels", template.Labels) + if len(template.Annotations) > 0 { + printLabelsMultiline(out, " Annotations", template.Annotations) + } + if len(template.Spec.ServiceAccountName) > 0 { + fmt.Fprintf(out, " Service Account:\t%s\n", template.Spec.ServiceAccountName) + } + if len(template.Spec.InitContainers) > 0 { + describeContainers("Init Containers", template.Spec.InitContainers, nil, nil, out, " ") + } + describeContainers("Containers", template.Spec.Containers, nil, nil, out, " ") + describeVolumes(template.Spec.Volumes, out, " ") } // ReplicaSetDescriber generates information about a ReplicaSet and the pods it has created. @@ -1010,7 +1080,7 @@ type ReplicaSetDescriber struct { client.Interface } -func (d *ReplicaSetDescriber) Describe(namespace, name string) (string, error) { +func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { rsc := d.Extensions().ReplicaSets(namespace) pc := d.Pods(namespace) @@ -1029,7 +1099,10 @@ func (d *ReplicaSetDescriber) Describe(namespace, name string) (string, error) { return "", err } - events, _ := d.Events(namespace).Search(rs) + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.Events(namespace).Search(rs) + } return describeReplicaSet(rs, events, running, waiting, succeeded, failed) } @@ -1040,10 +1113,10 @@ func describeReplicaSet(rs *extensions.ReplicaSet, events *api.EventList, runnin fmt.Fprintf(out, "Namespace:\t%s\n", rs.Namespace) fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&rs.Spec.Template.Spec)) fmt.Fprintf(out, "Selector:\t%s\n", unversioned.FormatLabelSelector(rs.Spec.Selector)) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(rs.Labels)) + printLabelsMultiline(out, "Labels", rs.Labels) fmt.Fprintf(out, "Replicas:\t%d current / %d desired\n", rs.Status.Replicas, rs.Spec.Replicas) fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - describeVolumes(rs.Spec.Template.Spec.Volumes, out) + describeVolumes(rs.Spec.Template.Spec.Volumes, out, "") if events != nil { DescribeEvents(events, out) } @@ -1056,18 +1129,21 @@ type JobDescriber struct { client *client.Client } -func (d *JobDescriber) Describe(namespace, name string) (string, error) { +func (d *JobDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { job, err := d.client.Extensions().Jobs(namespace).Get(name) if err != nil { return "", err } - events, _ := d.client.Events(namespace).Search(job) + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.client.Events(namespace).Search(job) + } return describeJob(job, events) } -func describeJob(job *extensions.Job, events *api.EventList) (string, error) { +func describeJob(job *batch.Job, events *api.EventList) (string, error) { return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", job.Name) fmt.Fprintf(out, "Namespace:\t%s\n", job.Namespace) @@ -1086,9 +1162,9 @@ func describeJob(job *extensions.Job, events *api.EventList) (string, error) { if job.Spec.ActiveDeadlineSeconds != nil { fmt.Fprintf(out, "Active Deadline Seconds:\t%ds\n", *job.Spec.ActiveDeadlineSeconds) } - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(job.Labels)) + printLabelsMultiline(out, "Labels", job.Labels) fmt.Fprintf(out, "Pods Statuses:\t%d Running / %d Succeeded / %d Failed\n", job.Status.Active, job.Status.Succeeded, job.Status.Failed) - describeVolumes(job.Spec.Template.Spec.Volumes, out) + describeVolumes(job.Spec.Template.Spec.Volumes, out, "") if events != nil { DescribeEvents(events, out) } @@ -1101,7 +1177,7 @@ type DaemonSetDescriber struct { client.Interface } -func (d *DaemonSetDescriber) Describe(namespace, name string) (string, error) { +func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { dc := d.Extensions().DaemonSets(namespace) pc := d.Pods(namespace) @@ -1119,7 +1195,10 @@ func (d *DaemonSetDescriber) Describe(namespace, name string) (string, error) { return "", err } - events, _ := d.Events(namespace).Search(daemon) + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.Events(namespace).Search(daemon) + } return describeDaemonSet(daemon, events, running, waiting, succeeded, failed) } @@ -1135,7 +1214,7 @@ func describeDaemonSet(daemon *extensions.DaemonSet, events *api.EventList, runn } fmt.Fprintf(out, "Selector:\t%s\n", selector) fmt.Fprintf(out, "Node-Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector)) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(daemon.Labels)) + printLabelsMultiline(out, "Labels", daemon.Labels) fmt.Fprintf(out, "Desired Number of Nodes Scheduled: %d\n", daemon.Status.DesiredNumberScheduled) fmt.Fprintf(out, "Current Number of Nodes Scheduled: %d\n", daemon.Status.CurrentNumberScheduled) fmt.Fprintf(out, "Number of Nodes Misscheduled: %d\n", daemon.Status.NumberMisscheduled) @@ -1152,7 +1231,7 @@ type SecretDescriber struct { client.Interface } -func (d *SecretDescriber) Describe(namespace, name string) (string, error) { +func (d *SecretDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.Secrets(namespace) secret, err := c.Get(name) @@ -1167,8 +1246,8 @@ func describeSecret(secret *api.Secret) (string, error) { return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", secret.Name) fmt.Fprintf(out, "Namespace:\t%s\n", secret.Namespace) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(secret.Labels)) - fmt.Fprintf(out, "Annotations:\t%s\n", labels.FormatLabels(secret.Annotations)) + printLabelsMultiline(out, "Labels", secret.Labels) + printLabelsMultiline(out, "Annotations", secret.Annotations) fmt.Fprintf(out, "\nType:\t%s\n", secret.Type) @@ -1190,13 +1269,13 @@ type IngressDescriber struct { client.Interface } -func (i *IngressDescriber) Describe(namespace, name string) (string, error) { +func (i *IngressDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := i.Extensions().Ingress(namespace) ing, err := c.Get(name) if err != nil { return "", err } - return i.describeIngress(ing) + return i.describeIngress(ing, describerSettings) } func (i *IngressDescriber) describeBackend(ns string, backend *extensions.IngressBackend) string { @@ -1211,7 +1290,7 @@ func (i *IngressDescriber) describeBackend(ns string, backend *extensions.Ingres spName = sp.Name } case intstr.Int: - if int(backend.ServicePort.IntVal) == sp.Port { + if int32(backend.ServicePort.IntVal) == sp.Port { spName = sp.Name } } @@ -1219,7 +1298,7 @@ func (i *IngressDescriber) describeBackend(ns string, backend *extensions.Ingres return formatEndpoints(endpoints, sets.NewString(spName)) } -func (i *IngressDescriber) describeIngress(ing *extensions.Ingress) (string, error) { +func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSettings DescriberSettings) (string, error) { return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%v\n", ing.Name) fmt.Fprintf(out, "Namespace:\t%v\n", ing.Namespace) @@ -1252,9 +1331,11 @@ func (i *IngressDescriber) describeIngress(ing *extensions.Ingress) (string, err } describeIngressAnnotations(out, ing.Annotations) - events, _ := i.Events(ing.Namespace).Search(ing) - if events != nil { - DescribeEvents(events, out) + if describerSettings.ShowEvents { + events, _ := i.Events(ing.Namespace).Search(ing) + if events != nil { + DescribeEvents(events, out) + } } return nil }) @@ -1263,7 +1344,11 @@ func (i *IngressDescriber) describeIngress(ing *extensions.Ingress) (string, err func describeIngressTLS(out io.Writer, ingTLS []extensions.IngressTLS) { fmt.Fprintf(out, "TLS:\n") for _, t := range ingTLS { - fmt.Fprintf(out, " %v terminates %v\n", t.SecretName, strings.Join(t.Hosts, ",")) + if t.SecretName == "" { + fmt.Fprintf(out, " SNI routes %v\n", strings.Join(t.Hosts, ",")) + } else { + fmt.Fprintf(out, " %v terminates %v\n", t.SecretName, strings.Join(t.Hosts, ",")) + } } return } @@ -1287,7 +1372,7 @@ type ServiceDescriber struct { client.Interface } -func (d *ServiceDescriber) Describe(namespace, name string) (string, error) { +func (d *ServiceDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.Services(namespace) service, err := c.Get(name) @@ -1296,8 +1381,10 @@ func (d *ServiceDescriber) Describe(namespace, name string) (string, error) { } endpoints, _ := d.Endpoints(namespace).Get(name) - events, _ := d.Events(namespace).Search(service) - + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.Events(namespace).Search(service) + } return describeService(service, endpoints, events) } @@ -1324,7 +1411,7 @@ func describeService(service *api.Service, endpoints *api.Endpoints, events *api return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", service.Name) fmt.Fprintf(out, "Namespace:\t%s\n", service.Namespace) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(service.Labels)) + printLabelsMultiline(out, "Labels", service.Labels) fmt.Fprintf(out, "Selector:\t%s\n", labels.FormatLabels(service.Spec.Selector)) fmt.Fprintf(out, "Type:\t%s\n", service.Spec.Type) fmt.Fprintf(out, "IP:\t%s\n", service.Spec.ClusterIP) @@ -1358,7 +1445,7 @@ type EndpointsDescriber struct { client.Interface } -func (d *EndpointsDescriber) Describe(namespace, name string) (string, error) { +func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.Endpoints(namespace) ep, err := c.Get(name) @@ -1366,7 +1453,10 @@ func (d *EndpointsDescriber) Describe(namespace, name string) (string, error) { return "", err } - events, _ := d.Events(namespace).Search(ep) + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.Events(namespace).Search(ep) + } return describeEndpoints(ep, events) } @@ -1375,13 +1465,13 @@ func describeEndpoints(ep *api.Endpoints, events *api.EventList) (string, error) return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", ep.Name) fmt.Fprintf(out, "Namespace:\t%s\n", ep.Namespace) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(ep.Labels)) + printLabelsMultiline(out, "Labels", ep.Labels) fmt.Fprintf(out, "Subsets:\n") for i := range ep.Subsets { subset := &ep.Subsets[i] - addresses := []string{} + addresses := make([]string, 0, len(subset.Addresses)) for _, addr := range subset.Addresses { addresses = append(addresses, addr.IP) } @@ -1391,7 +1481,7 @@ func describeEndpoints(ep *api.Endpoints, events *api.EventList) (string, error) } fmt.Fprintf(out, " Addresses:\t%s\n", addressesString) - notReadyAddresses := []string{} + notReadyAddresses := make([]string, 0, len(subset.NotReadyAddresses)) for _, addr := range subset.NotReadyAddresses { notReadyAddresses = append(notReadyAddresses, addr.IP) } @@ -1428,7 +1518,7 @@ type ServiceAccountDescriber struct { client.Interface } -func (d *ServiceAccountDescriber) Describe(namespace, name string) (string, error) { +func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.ServiceAccounts(namespace) serviceAccount, err := c.Get(name) @@ -1458,7 +1548,7 @@ func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Sec return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", serviceAccount.Name) fmt.Fprintf(out, "Namespace:\t%s\n", serviceAccount.Namespace) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(serviceAccount.Labels)) + printLabelsMultiline(out, "Labels", serviceAccount.Labels) fmt.Fprintln(out) var ( @@ -1509,7 +1599,7 @@ type NodeDescriber struct { client.Interface } -func (d *NodeDescriber) Describe(namespace, name string) (string, error) { +func (d *NodeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { mc := d.Nodes() node, err := mc.Get(name) if err != nil { @@ -1532,12 +1622,14 @@ func (d *NodeDescriber) Describe(namespace, name string) (string, error) { } var events *api.EventList - if ref, err := api.GetReference(node); err != nil { - glog.Errorf("Unable to construct reference to '%#v': %v", node, err) - } else { - // TODO: We haven't decided the namespace for Node object yet. - ref.UID = types.UID(ref.Name) - events, _ = d.Events("").Search(ref) + if describerSettings.ShowEvents { + if ref, err := api.GetReference(node); err != nil { + glog.Errorf("Unable to construct reference to '%#v': %v", node, err) + } else { + // TODO: We haven't decided the namespace for Node object yet. + ref.UID = types.UID(ref.Name) + events, _ = d.Events("").Search(ref) + } } return describeNode(node, nodeNonTerminatedPodsList, events, canViewPods) @@ -1546,7 +1638,8 @@ func (d *NodeDescriber) Describe(namespace, name string) (string, error) { func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events *api.EventList, canViewPods bool) (string, error) { return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", node.Name) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(node.Labels)) + printLabelsMultiline(out, "Labels", node.Labels) + printTaintsInAnnotationMultiline(out, "Taints", node.Annotations) fmt.Fprintf(out, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z)) fmt.Fprintf(out, "Phase:\t%v\n", node.Status.Phase) if len(node.Status.Conditions) > 0 { @@ -1562,7 +1655,7 @@ func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events c.Message) } } - var addresses []string + addresses := make([]string, 0, len(node.Status.Addresses)) for _, address := range node.Status.Addresses { addresses = append(addresses, address.Address) } @@ -1580,6 +1673,8 @@ func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events fmt.Fprintf(out, " Boot ID:\t%s\n", node.Status.NodeInfo.BootID) fmt.Fprintf(out, " Kernel Version:\t%s\n", node.Status.NodeInfo.KernelVersion) fmt.Fprintf(out, " OS Image:\t%s\n", node.Status.NodeInfo.OSImage) + fmt.Fprintf(out, " Operating System:\t%s\n", node.Status.NodeInfo.OperatingSystem) + fmt.Fprintf(out, " Architecture:\t%s\n", node.Status.NodeInfo.Architecture) fmt.Fprintf(out, " Container Runtime Version:\t%s\n", node.Status.NodeInfo.ContainerRuntimeVersion) fmt.Fprintf(out, " Kubelet Version:\t%s\n", node.Status.NodeInfo.KubeletVersion) fmt.Fprintf(out, " Kube-Proxy Version:\t%s\n", node.Status.NodeInfo.KubeProxyVersion) @@ -1604,28 +1699,69 @@ func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events }) } +type PetSetDescriber struct { + client *client.Client +} + +func (p *PetSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + ps, err := p.client.Apps().PetSets(namespace).Get(name) + if err != nil { + return "", err + } + pc := p.client.Pods(namespace) + + selector, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + return "", err + } + + running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector) + if err != nil { + return "", err + } + + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", ps.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", ps.Namespace) + fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&ps.Spec.Template.Spec)) + fmt.Fprintf(out, "Selector:\t%s\n", unversioned.FormatLabelSelector(ps.Spec.Selector)) + fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(ps.Labels)) + fmt.Fprintf(out, "Replicas:\t%d current / %d desired\n", ps.Status.Replicas, ps.Spec.Replicas) + fmt.Fprintf(out, "Annotations:\t%s\n", labels.FormatLabels(ps.Annotations)) + fmt.Fprintf(out, "CreationTimestamp:\t%s\n", ps.CreationTimestamp.Time.Format(time.RFC1123Z)) + fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) + describeVolumes(ps.Spec.Template.Spec.Volumes, out, "") + if describerSettings.ShowEvents { + events, _ := p.client.Events(namespace).Search(ps) + if events != nil { + DescribeEvents(events, out) + } + } + return nil + }) +} + // HorizontalPodAutoscalerDescriber generates information about a horizontal pod autoscaler. type HorizontalPodAutoscalerDescriber struct { client *client.Client } -func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string) (string, error) { - hpa, err := d.client.Extensions().HorizontalPodAutoscalers(namespace).Get(name) +func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + hpa, err := d.client.Autoscaling().HorizontalPodAutoscalers(namespace).Get(name) if err != nil { return "", err } return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", hpa.Name) fmt.Fprintf(out, "Namespace:\t%s\n", hpa.Namespace) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(hpa.Labels)) - fmt.Fprintf(out, "Annotations:\t%s\n", labels.FormatLabels(hpa.Annotations)) + printLabelsMultiline(out, "Labels", hpa.Labels) + printLabelsMultiline(out, "Annotations", hpa.Annotations) fmt.Fprintf(out, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, "Reference:\t%s/%s/%s\n", - hpa.Spec.ScaleRef.Kind, - hpa.Spec.ScaleRef.Name, - hpa.Spec.ScaleRef.Subresource) - if hpa.Spec.CPUUtilization != nil { - fmt.Fprintf(out, "Target CPU utilization:\t%d%%\n", hpa.Spec.CPUUtilization.TargetPercentage) + fmt.Fprintf(out, "Reference:\t%s/%s\n", + hpa.Spec.ScaleTargetRef.Kind, + hpa.Spec.ScaleTargetRef.Name) + if hpa.Spec.TargetCPUUtilizationPercentage != nil { + fmt.Fprintf(out, "Target CPU utilization:\t%d%%\n", *hpa.Spec.TargetCPUUtilizationPercentage) fmt.Fprintf(out, "Current CPU utilization:\t") if hpa.Status.CurrentCPUUtilizationPercentage != nil { fmt.Fprintf(out, "%d%%\n", *hpa.Status.CurrentCPUUtilizationPercentage) @@ -1641,9 +1777,9 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string) (str fmt.Fprintf(out, "Max replicas:\t%d\n", hpa.Spec.MaxReplicas) // TODO: switch to scale subresource once the required code is submitted. - if strings.ToLower(hpa.Spec.ScaleRef.Kind) == "replicationcontroller" { + if strings.ToLower(hpa.Spec.ScaleTargetRef.Kind) == "replicationcontroller" { fmt.Fprintf(out, "ReplicationController pods:\t") - rc, err := d.client.ReplicationControllers(hpa.Namespace).Get(hpa.Spec.ScaleRef.Name) + rc, err := d.client.ReplicationControllers(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Name) if err == nil { fmt.Fprintf(out, "%d current / %d desired\n", rc.Status.Replicas, rc.Spec.Replicas) } else { @@ -1651,9 +1787,11 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string) (str } } - events, _ := d.client.Events(namespace).Search(hpa) - if events != nil { - DescribeEvents(events, out) + if describerSettings.ShowEvents { + events, _ := d.client.Events(namespace).Search(hpa) + if events != nil { + DescribeEvents(events, out) + } } return nil }) @@ -1678,7 +1816,7 @@ func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit)) } - fmt.Fprint(out, "Allocated resources:\n (Total limits may be over 100%, i.e., overcommitted. More info: http://releases.k8s.io/HEAD/docs/user-guide/compute-resources.md)\n CPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") + fmt.Fprint(out, "Allocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted. More info: http://releases.k8s.io/HEAD/docs/user-guide/compute-resources.md)\n CPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") fmt.Fprint(out, " ------------\t----------\t---------------\t-------------\n") reqs, limits, err := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList) if err != nil { @@ -1719,15 +1857,17 @@ func getPodsTotalRequestsAndLimits(podList *api.PodList) (reqs map[api.ResourceN for podReqName, podReqValue := range podReqs { if value, ok := reqs[podReqName]; !ok { reqs[podReqName] = *podReqValue.Copy() - } else if err = value.Add(podReqValue); err != nil { - return nil, nil, err + } else { + value.Add(podReqValue) + reqs[podReqName] = value } } for podLimitName, podLimitValue := range podLimits { if value, ok := limits[podLimitName]; !ok { limits[podLimitName] = *podLimitValue.Copy() - } else if err = value.Add(podLimitValue); err != nil { - return nil, nil, err + } else { + value.Add(podLimitValue) + limits[podLimitName] = value } } } @@ -1760,7 +1900,7 @@ type DeploymentDescriber struct { clientset.Interface } -func (dd *DeploymentDescriber) Describe(namespace, name string) (string, error) { +func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { d, err := dd.Extensions().Deployments(namespace).Get(name) if err != nil { return "", err @@ -1773,7 +1913,7 @@ func (dd *DeploymentDescriber) Describe(namespace, name string) (string, error) fmt.Fprintf(out, "Name:\t%s\n", d.ObjectMeta.Name) fmt.Fprintf(out, "Namespace:\t%s\n", d.ObjectMeta.Namespace) fmt.Fprintf(out, "CreationTimestamp:\t%s\n", d.CreationTimestamp.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(d.Labels)) + printLabelsMultiline(out, "Labels", d.Labels) fmt.Fprintf(out, "Selector:\t%s\n", selector) fmt.Fprintf(out, "Replicas:\t%d updated | %d total | %d available | %d unavailable\n", d.Status.UpdatedReplicas, d.Spec.Replicas, d.Status.AvailableReplicas, d.Status.UnavailableReplicas) fmt.Fprintf(out, "StrategyType:\t%s\n", d.Spec.Strategy.Type) @@ -1794,9 +1934,11 @@ func (dd *DeploymentDescriber) Describe(namespace, name string) (string, error) } fmt.Fprintf(out, "NewReplicaSet:\t%s\n", printReplicaSetsByLabels(newRSs)) } - events, err := dd.Core().Events(namespace).Search(d) - if err == nil && events != nil { - DescribeEvents(events, out) + if describerSettings.ShowEvents { + events, err := dd.Core().Events(namespace).Search(d) + if err == nil && events != nil { + DescribeEvents(events, out) + } } return nil }) @@ -1832,7 +1974,7 @@ func getDaemonSetsForLabels(c client.DaemonSetInterface, labelsToMatch labels.La func printReplicationControllersByLabels(matchingRCs []*api.ReplicationController) string { // Format the matching RC's into strings. - var rcStrings []string + rcStrings := make([]string, 0, len(matchingRCs)) for _, controller := range matchingRCs { rcStrings = append(rcStrings, fmt.Sprintf("%s (%d/%d replicas created)", controller.Name, controller.Status.Replicas, controller.Spec.Replicas)) } @@ -1846,7 +1988,7 @@ func printReplicationControllersByLabels(matchingRCs []*api.ReplicationControlle func printReplicaSetsByLabels(matchingRSs []*extensions.ReplicaSet) string { // Format the matching ReplicaSets into strings. - var rsStrings []string + rsStrings := make([]string, 0, len(matchingRSs)) for _, rs := range matchingRSs { rsStrings = append(rsStrings, fmt.Sprintf("%s (%d/%d replicas created)", rs.Name, rs.Status.Replicas, rs.Spec.Replicas)) } @@ -1884,7 +2026,7 @@ type ConfigMapDescriber struct { client.Interface } -func (d *ConfigMapDescriber) Describe(namespace, name string) (string, error) { +func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { c := d.ConfigMaps(namespace) configMap, err := c.Get(name) @@ -1899,8 +2041,8 @@ func describeConfigMap(configMap *api.ConfigMap) (string, error) { return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", configMap.Name) fmt.Fprintf(out, "Namespace:\t%s\n", configMap.Namespace) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(configMap.Labels)) - fmt.Fprintf(out, "Annotations:\t%s\n", labels.FormatLabels(configMap.Annotations)) + printLabelsMultiline(out, "Labels", configMap.Labels) + printLabelsMultiline(out, "Annotations", configMap.Annotations) fmt.Fprintf(out, "\nData\n====\n") for k, v := range configMap.Data { @@ -1911,9 +2053,92 @@ func describeConfigMap(configMap *api.ConfigMap) (string, error) { }) } +type ClusterDescriber struct { + fed_clientset.Interface +} + +func (d *ClusterDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + cluster, err := d.Federation().Clusters().Get(name) + if err != nil { + return "", err + } + return describeCluster(cluster) +} + +func describeCluster(cluster *federation.Cluster) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", cluster.Name) + fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(cluster.Labels)) + + fmt.Fprintf(out, "ServerAddressByClientCIDRs:\n ClientCIDR\tServerAddress\n") + fmt.Fprintf(out, " ----\t----\n") + for _, cidrAddr := range cluster.Spec.ServerAddressByClientCIDRs { + fmt.Fprintf(out, " %v \t%v\n\n", cidrAddr.ClientCIDR, cidrAddr.ServerAddress) + } + + if len(cluster.Status.Conditions) > 0 { + fmt.Fprint(out, "Conditions:\n Type\tStatus\tLastUpdateTime\tLastTransitionTime\tReason\tMessage\n") + fmt.Fprint(out, " ----\t------\t-----------------\t------------------\t------\t-------\n") + for _, c := range cluster.Status.Conditions { + fmt.Fprintf(out, " %v \t%v \t%s \t%s \t%v \t%v\n", + c.Type, + c.Status, + c.LastProbeTime.Time.Format(time.RFC1123Z), + c.LastTransitionTime.Time.Format(time.RFC1123Z), + c.Reason, + c.Message) + } + } + + fmt.Fprintf(out, "Version:\t%s\n", cluster.Status.Version) + + if len(cluster.Status.Capacity) > 0 { + fmt.Fprintf(out, "Capacity:\n") + for resource, value := range cluster.Status.Capacity { + fmt.Fprintf(out, " %s:\t%s\n", resource, value.String()) + } + } + + if len(cluster.Status.Allocatable) > 0 { + fmt.Fprintf(out, "Allocatable:\n") + for resource, value := range cluster.Status.Allocatable { + fmt.Fprintf(out, " %s:\t%s\n", resource, value.String()) + } + } + return nil + }) +} + +// NetworkPolicyDescriber generates information about a NetworkPolicy +type NetworkPolicyDescriber struct { + client.Interface +} + +func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.Extensions().NetworkPolicies(namespace) + + networkPolicy, err := c.Get(name) + if err != nil { + return "", err + } + + return describeNetworkPolicy(networkPolicy) +} + +func describeNetworkPolicy(networkPolicy *extensions.NetworkPolicy) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", networkPolicy.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", networkPolicy.Namespace) + printLabelsMultiline(out, "Labels", networkPolicy.Labels) + printLabelsMultiline(out, "Annotations", networkPolicy.Annotations) + + return nil + }) +} + // newErrNoDescriber creates a new ErrNoDescriber with the names of the provided types. func newErrNoDescriber(types ...reflect.Type) error { - names := []string{} + names := make([]string, 0, len(types)) for _, t := range types { names = append(names, t.String()) } @@ -1952,7 +2177,7 @@ func (d *Describers) DescribeObject(exact interface{}, extra ...interface{}) (st return fns[0].Describe(exact, extra...) } - types := []reflect.Type{} + types := make([]reflect.Type, 0, len(extra)) for _, obj := range extra { types = append(types, reflect.TypeOf(obj)) } @@ -1977,14 +2202,15 @@ func (d *Describers) Add(fns ...interface{}) error { if ft.Kind() != reflect.Func { return fmt.Errorf("expected func, got: %v", ft) } - if ft.NumIn() == 0 { + numIn := ft.NumIn() + if numIn == 0 { return fmt.Errorf("expected at least one 'in' params, got: %v", ft) } if ft.NumOut() != 2 { return fmt.Errorf("expected two 'out' params - (string, error), got: %v", ft) } - types := []reflect.Type{} - for i := 0; i < ft.NumIn(); i++ { + types := make([]reflect.Type, 0, numIn) + for i := 0; i < numIn; i++ { types = append(types, ft.In(i)) } if ft.Out(0) != reflect.TypeOf(string("")) { @@ -2050,3 +2276,74 @@ func (fn typeFunc) Describe(exact interface{}, extra ...interface{}) (string, er } return s, err } + +// printLabelsMultiline prints multiple labels with a proper alignment. +func printLabelsMultiline(out io.Writer, title string, labels map[string]string) { + printLabelsMultilineWithIndent(out, "", title, "\t", labels) +} + +// printLabelsMultiline prints multiple labels with a user-defined alignment. +func printLabelsMultilineWithIndent(out io.Writer, initialIndent, title, innerIndent string, labels map[string]string) { + + fmt.Fprintf(out, "%s%s:%s", initialIndent, title, innerIndent) + + if labels == nil || len(labels) == 0 { + fmt.Fprintln(out, "") + return + } + + // to print labels in the sorted order + keys := make([]string, 0, len(labels)) + for key := range labels { + keys = append(keys, key) + } + sort.Strings(keys) + + for i, key := range keys { + if i != 0 { + fmt.Fprint(out, initialIndent) + fmt.Fprint(out, innerIndent) + } + fmt.Fprintf(out, "%s=%s\n", key, labels[key]) + i++ + } +} + +// printTaintsMultiline prints multiple taints with a proper alignment. +func printTaintsInAnnotationMultiline(out io.Writer, title string, annotations map[string]string) { + taints, err := api.GetTaintsFromNodeAnnotations(annotations) + if err != nil { + taints = []api.Taint{} + } + printTaintsMultilineWithIndent(out, "", title, "\t", taints) +} + +// printTaintsMultilineWithIndent prints multiple taints with a user-defined alignment. +func printTaintsMultilineWithIndent(out io.Writer, initialIndent, title, innerIndent string, taints []api.Taint) { + fmt.Fprintf(out, "%s%s:%s", initialIndent, title, innerIndent) + + if taints == nil || len(taints) == 0 { + fmt.Fprintln(out, "") + return + } + + // to print taints in the sorted order + keys := make([]string, 0, len(taints)) + for _, taint := range taints { + keys = append(keys, taint.Key) + } + sort.Strings(keys) + + for i, key := range keys { + for _, taint := range taints { + if taint.Key == key { + if i != 0 { + fmt.Fprint(out, initialIndent) + fmt.Fprint(out, innerIndent) + } + fmt.Fprintf(out, "%s=%s:%s\n", taint.Key, taint.Value, taint.Effect) + i++ + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/describe_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/describe_test.go new file mode 100644 index 000000000000..a9e0a1be0b26 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/describe_test.go @@ -0,0 +1,700 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "testing" + "time" + + "k8s.io/kubernetes/federation/apis/federation" + fed_fake "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/fake" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/testclient" +) + +type describeClient struct { + T *testing.T + Namespace string + Err error + client.Interface +} + +func TestDescribePod(t *testing.T) { + fake := testclient.NewSimpleFake(&api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "foo", + }, + }) + c := &describeClient{T: t, Namespace: "foo", Interface: fake} + d := PodDescriber{c} + out, err := d.Describe("foo", "bar", DescriberSettings{ShowEvents: true}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !strings.Contains(out, "bar") || !strings.Contains(out, "Status:") { + t.Errorf("unexpected out: %s", out) + } +} + +func TestDescribeService(t *testing.T) { + fake := testclient.NewSimpleFake(&api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "foo", + }, + }) + c := &describeClient{T: t, Namespace: "foo", Interface: fake} + d := ServiceDescriber{c} + out, err := d.Describe("foo", "bar", DescriberSettings{ShowEvents: true}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !strings.Contains(out, "Labels:") || !strings.Contains(out, "bar") { + t.Errorf("unexpected out: %s", out) + } +} + +func TestPodDescribeResultsSorted(t *testing.T) { + // Arrange + fake := testclient.NewSimpleFake(&api.EventList{ + Items: []api.Event{ + { + Source: api.EventSource{Component: "kubelet"}, + Message: "Item 1", + FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), + LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), + Count: 1, + Type: api.EventTypeNormal, + }, + { + Source: api.EventSource{Component: "scheduler"}, + Message: "Item 2", + FirstTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), + LastTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), + Count: 1, + Type: api.EventTypeNormal, + }, + { + Source: api.EventSource{Component: "kubelet"}, + Message: "Item 3", + FirstTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), + LastTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), + Count: 1, + Type: api.EventTypeNormal, + }, + }, + }) + c := &describeClient{T: t, Namespace: "foo", Interface: fake} + d := PodDescriber{c} + + // Act + out, err := d.Describe("foo", "bar", DescriberSettings{ShowEvents: true}) + + // Assert + if err != nil { + t.Errorf("unexpected error: %v", err) + } + VerifyDatesInOrder(out, "\n" /* rowDelimiter */, "\t" /* columnDelimiter */, t) +} + +func TestDescribeContainers(t *testing.T) { + testCases := []struct { + container api.Container + status api.ContainerStatus + expectedElements []string + }{ + // Running state. + { + container: api.Container{Name: "test", Image: "image"}, + status: api.ContainerStatus{ + Name: "test", + State: api.ContainerState{ + Running: &api.ContainerStateRunning{ + StartedAt: unversioned.NewTime(time.Now()), + }, + }, + Ready: true, + RestartCount: 7, + }, + expectedElements: []string{"test", "State", "Running", "Ready", "True", "Restart Count", "7", "Image", "image", "Started"}, + }, + // Waiting state. + { + container: api.Container{Name: "test", Image: "image"}, + status: api.ContainerStatus{ + Name: "test", + State: api.ContainerState{ + Waiting: &api.ContainerStateWaiting{ + Reason: "potato", + }, + }, + Ready: true, + RestartCount: 7, + }, + expectedElements: []string{"test", "State", "Waiting", "Ready", "True", "Restart Count", "7", "Image", "image", "Reason", "potato"}, + }, + // Terminated state. + { + container: api.Container{Name: "test", Image: "image"}, + status: api.ContainerStatus{ + Name: "test", + State: api.ContainerState{ + Terminated: &api.ContainerStateTerminated{ + StartedAt: unversioned.NewTime(time.Now()), + FinishedAt: unversioned.NewTime(time.Now()), + Reason: "potato", + ExitCode: 2, + }, + }, + Ready: true, + RestartCount: 7, + }, + expectedElements: []string{"test", "State", "Terminated", "Ready", "True", "Restart Count", "7", "Image", "image", "Reason", "potato", "Started", "Finished", "Exit Code", "2"}, + }, + // Last Terminated + { + container: api.Container{Name: "test", Image: "image"}, + status: api.ContainerStatus{ + Name: "test", + State: api.ContainerState{ + Running: &api.ContainerStateRunning{ + StartedAt: unversioned.NewTime(time.Now()), + }, + }, + LastTerminationState: api.ContainerState{ + Terminated: &api.ContainerStateTerminated{ + StartedAt: unversioned.NewTime(time.Now().Add(time.Second * 3)), + FinishedAt: unversioned.NewTime(time.Now()), + Reason: "crashing", + ExitCode: 3, + }, + }, + Ready: true, + RestartCount: 7, + }, + expectedElements: []string{"test", "State", "Terminated", "Ready", "True", "Restart Count", "7", "Image", "image", "Started", "Finished", "Exit Code", "2", "crashing", "3"}, + }, + // No state defaults to waiting. + { + container: api.Container{Name: "test", Image: "image"}, + status: api.ContainerStatus{ + Name: "test", + Ready: true, + RestartCount: 7, + }, + expectedElements: []string{"test", "State", "Waiting", "Ready", "True", "Restart Count", "7", "Image", "image"}, + }, + // Env + { + container: api.Container{Name: "test", Image: "image", Env: []api.EnvVar{{Name: "envname", Value: "xyz"}}}, + status: api.ContainerStatus{ + Name: "test", + Ready: true, + RestartCount: 7, + }, + expectedElements: []string{"test", "State", "Waiting", "Ready", "True", "Restart Count", "7", "Image", "image", "envname", "xyz"}, + }, + // Command + { + container: api.Container{Name: "test", Image: "image", Command: []string{"sleep", "1000"}}, + status: api.ContainerStatus{ + Name: "test", + Ready: true, + RestartCount: 7, + }, + expectedElements: []string{"test", "State", "Waiting", "Ready", "True", "Restart Count", "7", "Image", "image", "sleep", "1000"}, + }, + // Args + { + container: api.Container{Name: "test", Image: "image", Args: []string{"time", "1000"}}, + status: api.ContainerStatus{ + Name: "test", + Ready: true, + RestartCount: 7, + }, + expectedElements: []string{"test", "State", "Waiting", "Ready", "True", "Restart Count", "7", "Image", "image", "time", "1000"}, + }, + // QoS classes + { + container: api.Container{ + Name: "test", + Image: "image", + }, + status: api.ContainerStatus{ + Name: "test", + Ready: true, + RestartCount: 7, + }, + expectedElements: []string{"cpu", "BestEffort", "memory", "BestEffort"}, + }, + // Using limits. + { + container: api.Container{ + Name: "test", + Image: "image", + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("1000"), + api.ResourceName(api.ResourceMemory): resource.MustParse("4G"), + api.ResourceName(api.ResourceStorage): resource.MustParse("20G"), + }, + }, + }, + status: api.ContainerStatus{ + Name: "test", + Ready: true, + RestartCount: 7, + }, + expectedElements: []string{"cpu", "1k", "memory", "4G", "storage", "20G"}, + }, + // Using requests. + { + container: api.Container{ + Name: "test", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("1000"), + api.ResourceName(api.ResourceMemory): resource.MustParse("4G"), + api.ResourceName(api.ResourceStorage): resource.MustParse("20G"), + }, + }, + }, + expectedElements: []string{"cpu", "1k", "memory", "4G", "storage", "20G"}, + }, + } + + for i, testCase := range testCases { + out := new(bytes.Buffer) + pod := api.Pod{ + Spec: api.PodSpec{ + Containers: []api.Container{testCase.container}, + }, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{testCase.status}, + }, + } + describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(&pod), out, "") + output := out.String() + for _, expected := range testCase.expectedElements { + if !strings.Contains(output, expected) { + t.Errorf("Test case %d: expected to find %q in output: %q", i, expected, output) + } + } + } +} + +func TestDescribers(t *testing.T) { + first := &api.Event{} + second := &api.Pod{} + var third *api.Pod + testErr := fmt.Errorf("test") + d := Describers{} + d.Add( + func(e *api.Event, p *api.Pod) (string, error) { + if e != first { + t.Errorf("first argument not equal: %#v", e) + } + if p != second { + t.Errorf("second argument not equal: %#v", p) + } + return "test", testErr + }, + ) + if out, err := d.DescribeObject(first, second); out != "test" || err != testErr { + t.Errorf("unexpected result: %s %v", out, err) + } + + if out, err := d.DescribeObject(first, second, third); out != "" || err == nil { + t.Errorf("unexpected result: %s %v", out, err) + } else { + if noDescriber, ok := err.(ErrNoDescriber); ok { + if !reflect.DeepEqual(noDescriber.Types, []string{"*api.Event", "*api.Pod", "*api.Pod"}) { + t.Errorf("unexpected describer: %v", err) + } + } else { + t.Errorf("unexpected error type: %v", err) + } + } + + d.Add( + func(e *api.Event) (string, error) { + if e != first { + t.Errorf("first argument not equal: %#v", e) + } + return "simpler", testErr + }, + ) + if out, err := d.DescribeObject(first); out != "simpler" || err != testErr { + t.Errorf("unexpected result: %s %v", out, err) + } +} + +func TestDefaultDescribers(t *testing.T) { + out, err := DefaultObjectDescriber.DescribeObject(&api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !strings.Contains(out, "foo") { + t.Errorf("unexpected output: %s", out) + } + + out, err = DefaultObjectDescriber.DescribeObject(&api.Service{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !strings.Contains(out, "foo") { + t.Errorf("unexpected output: %s", out) + } + + out, err = DefaultObjectDescriber.DescribeObject(&api.ReplicationController{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !strings.Contains(out, "foo") { + t.Errorf("unexpected output: %s", out) + } + + out, err = DefaultObjectDescriber.DescribeObject(&api.Node{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !strings.Contains(out, "foo") { + t.Errorf("unexpected output: %s", out) + } +} + +func TestGetPodsTotalRequests(t *testing.T) { + testCases := []struct { + pods *api.PodList + expectedReqs, expectedLimits map[api.ResourceName]resource.Quantity + }{ + { + pods: &api.PodList{ + Items: []api.Pod{ + { + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("1"), + api.ResourceName(api.ResourceMemory): resource.MustParse("300Mi"), + api.ResourceName(api.ResourceStorage): resource.MustParse("1G"), + }, + }, + }, + { + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("90m"), + api.ResourceName(api.ResourceMemory): resource.MustParse("120Mi"), + api.ResourceName(api.ResourceStorage): resource.MustParse("200M"), + }, + }, + }, + }, + }, + }, + { + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("60m"), + api.ResourceName(api.ResourceMemory): resource.MustParse("43Mi"), + api.ResourceName(api.ResourceStorage): resource.MustParse("500M"), + }, + }, + }, + { + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("34m"), + api.ResourceName(api.ResourceMemory): resource.MustParse("83Mi"), + api.ResourceName(api.ResourceStorage): resource.MustParse("700M"), + }, + }, + }, + }, + }, + }, + }, + }, + expectedReqs: map[api.ResourceName]resource.Quantity{ + api.ResourceName(api.ResourceCPU): resource.MustParse("1.184"), + api.ResourceName(api.ResourceMemory): resource.MustParse("546Mi"), + api.ResourceName(api.ResourceStorage): resource.MustParse("2.4G"), + }, + }, + } + + for _, testCase := range testCases { + reqs, _, err := getPodsTotalRequestsAndLimits(testCase.pods) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if !api.Semantic.DeepEqual(reqs, testCase.expectedReqs) { + t.Errorf("Expected %v, got %v", testCase.expectedReqs, reqs) + } + } +} + +func TestPersistentVolumeDescriber(t *testing.T) { + tests := map[string]*api.PersistentVolume{ + + "hostpath": { + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{}, + }, + }, + }, + "gce": { + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + }, + }, + "ebs": { + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{}, + }, + }, + }, + "nfs": { + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + NFS: &api.NFSVolumeSource{}, + }, + }, + }, + "iscsi": { + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + ISCSI: &api.ISCSIVolumeSource{}, + }, + }, + }, + "gluster": { + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + Glusterfs: &api.GlusterfsVolumeSource{}, + }, + }, + }, + "rbd": { + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + RBD: &api.RBDVolumeSource{}, + }, + }, + }, + } + + for name, pv := range tests { + fake := testclient.NewSimpleFake(pv) + c := PersistentVolumeDescriber{fake} + str, err := c.Describe("foo", "bar", DescriberSettings{ShowEvents: true}) + if err != nil { + t.Errorf("Unexpected error for test %s: %v", name, err) + } + if str == "" { + t.Errorf("Unexpected empty string for test %s. Expected PV Describer output", name) + } + } +} + +func TestDescribeDeployment(t *testing.T) { + fake := fake.NewSimpleClientset(&extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "foo", + }, + Spec: extensions.DeploymentSpec{ + Template: api.PodTemplateSpec{}, + }, + }) + d := DeploymentDescriber{fake} + out, err := d.Describe("foo", "bar", DescriberSettings{ShowEvents: true}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !strings.Contains(out, "bar") || !strings.Contains(out, "foo") { + t.Errorf("unexpected out: %s", out) + } +} + +func TestDescribeCluster(t *testing.T) { + cluster := federation.Cluster{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + ResourceVersion: "4", + Labels: map[string]string{ + "name": "foo", + }, + }, + Spec: federation.ClusterSpec{ + ServerAddressByClientCIDRs: []federation.ServerAddressByClientCIDR{ + { + ClientCIDR: "0.0.0.0/0", + ServerAddress: "localhost:8888", + }, + }, + }, + Status: federation.ClusterStatus{ + Conditions: []federation.ClusterCondition{ + {Type: federation.ClusterReady, Status: api.ConditionTrue}, + }, + }, + } + fake := fed_fake.NewSimpleClientset(&cluster) + d := ClusterDescriber{Interface: fake} + out, err := d.Describe("any", "foo", DescriberSettings{ShowEvents: true}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !strings.Contains(out, "foo") || !strings.Contains(out, "Version:") { + t.Errorf("unexpected out: %s", out) + } +} + +func TestDescribeEvents(t *testing.T) { + + events := &api.EventList{ + Items: []api.Event{ + { + Source: api.EventSource{Component: "kubelet"}, + Message: "Item 1", + FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), + LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), + Count: 1, + Type: api.EventTypeNormal, + }, + }, + } + + m := map[string]Describer{ + "DaemonSetDescriber": &DaemonSetDescriber{ + testclient.NewSimpleFake(&extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "foo", + }, + }, events), + }, + "DeploymentDescriber": &DeploymentDescriber{ + fake.NewSimpleClientset(&extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "foo", + }, + }, events), + }, + "EndpointsDescriber": &EndpointsDescriber{ + testclient.NewSimpleFake(&api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "foo", + }, + }, events), + }, + // TODO(jchaloup): add tests for: + // - HorizontalPodAutoscalerDescriber + // - IngressDescriber + // - JobDescriber + "NodeDescriber": &NodeDescriber{ + testclient.NewSimpleFake(&api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "foo", + SelfLink: "url/url/url", + }, + }, events), + }, + "PodDescriber": &PodDescriber{ + testclient.NewSimpleFake(&api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "foo", + SelfLink: "url/url/url", + }, + }, events), + }, + "ReplicaSetDescriber": &ReplicaSetDescriber{ + testclient.NewSimpleFake(&extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "foo", + }, + }, events), + }, + "ReplicationControllerDescriber": &ReplicationControllerDescriber{ + testclient.NewSimpleFake(&api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "foo", + }, + }, events), + }, + "Service": &ServiceDescriber{ + testclient.NewSimpleFake(&api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Namespace: "foo", + }, + }, events), + }, + } + + for name, d := range m { + out, err := d.Describe("foo", "bar", DescriberSettings{ShowEvents: true}) + if err != nil { + t.Errorf("unexpected error for %q: %v", name, err) + } + if !strings.Contains(out, "bar") { + t.Errorf("unexpected out for %q: %s", name, out) + } + if !strings.Contains(out, "Events:") { + t.Errorf("events not found for %q when ShowEvents=true: %s", name, out) + } + + out, err = d.Describe("foo", "bar", DescriberSettings{ShowEvents: false}) + if err != nil { + t.Errorf("unexpected error for %q: %s", name, err) + } + if !strings.Contains(out, "bar") { + t.Errorf("unexpected out for %q: %s", name, out) + } + if strings.Contains(out, "Events:") { + t.Errorf("events found for %q when ShowEvents=false: %s", name, out) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/generate.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/generate.go index 6a71b619d80e..ea254bcb5487 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/generate.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/generate.go @@ -134,6 +134,34 @@ func MakeParams(cmd *cobra.Command, params []GeneratorParam) map[string]interfac return result } +func MakeProtocols(protocols map[string]string) string { + out := []string{} + for key, value := range protocols { + out = append(out, fmt.Sprintf("%s/%s", key, value)) + } + return strings.Join(out, ",") +} + +func ParseProtocols(protocols interface{}) (map[string]string, error) { + protocolsString, isString := protocols.(string) + if !isString { + return nil, fmt.Errorf("expected string, found %v", protocols) + } + if len(protocolsString) == 0 { + return nil, fmt.Errorf("no protocols passed") + } + portProtocolMap := map[string]string{} + protocolsSlice := strings.Split(protocolsString, ",") + for ix := range protocolsSlice { + portProtocol := strings.Split(protocolsSlice[ix], "/") + if len(portProtocol) != 2 { + return nil, fmt.Errorf("unexpected port protocol mapping: %s", protocolsSlice[ix]) + } + portProtocolMap[portProtocol[0]] = portProtocol[1] + } + return portProtocolMap, nil +} + func MakeLabels(labels map[string]string) string { out := []string{} for key, value := range labels { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/generate_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/generate_test.go new file mode 100644 index 000000000000..62823f9dc0de --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/generate_test.go @@ -0,0 +1,140 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "reflect" + "testing" + + "github.com/spf13/cobra" +) + +type TestStruct struct { + val int +} + +func TestIsZero(t *testing.T) { + tests := []struct { + val interface{} + expectZero bool + }{ + {"", true}, + {nil, true}, + {0, true}, + {TestStruct{}, true}, + {"foo", false}, + {1, false}, + {TestStruct{val: 2}, false}, + } + + for _, test := range tests { + output := IsZero(test.val) + if output != test.expectZero { + t.Errorf("expected: %v, saw %v", test.expectZero, output) + } + } +} + +func TestValidateParams(t *testing.T) { + tests := []struct { + paramSpec []GeneratorParam + params map[string]interface{} + valid bool + }{ + { + paramSpec: []GeneratorParam{}, + params: map[string]interface{}{}, + valid: true, + }, + { + paramSpec: []GeneratorParam{ + {Name: "foo"}, + }, + params: map[string]interface{}{}, + valid: true, + }, + { + paramSpec: []GeneratorParam{ + {Name: "foo", Required: true}, + }, + params: map[string]interface{}{ + "foo": "bar", + }, + valid: true, + }, + { + paramSpec: []GeneratorParam{ + {Name: "foo", Required: true}, + }, + params: map[string]interface{}{ + "baz": "blah", + "foo": "bar", + }, + valid: true, + }, + { + paramSpec: []GeneratorParam{ + {Name: "foo", Required: true}, + {Name: "baz", Required: true}, + }, + params: map[string]interface{}{ + "baz": "blah", + "foo": "bar", + }, + valid: true, + }, + { + paramSpec: []GeneratorParam{ + {Name: "foo", Required: true}, + {Name: "baz", Required: true}, + }, + params: map[string]interface{}{ + "foo": "bar", + }, + valid: false, + }, + } + for _, test := range tests { + err := ValidateParams(test.paramSpec, test.params) + if test.valid && err != nil { + t.Errorf("unexpected error: %v", err) + } + if !test.valid && err == nil { + t.Errorf("unexpected non-error") + } + } +} + +func TestMakeParams(t *testing.T) { + cmd := &cobra.Command{} + cmd.Flags().String("foo", "bar", "") + cmd.Flags().String("baz", "", "") + cmd.Flags().Set("baz", "blah") + + paramSpec := []GeneratorParam{ + {Name: "foo", Required: true}, + {Name: "baz", Required: true}, + } + expected := map[string]interface{}{ + "foo": "bar", + "baz": "blah", + } + params := MakeParams(cmd, paramSpec) + if !reflect.DeepEqual(params, expected) { + t.Errorf("\nexpected:\n%v\nsaw:\n%v", expected, params) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/history.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/history.go index 37cb9e0b29a2..938f03295088 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/history.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/history.go @@ -19,16 +19,15 @@ package kubectl import ( "fmt" "io" - "sort" - "strconv" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apis/extensions" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/runtime" deploymentutil "k8s.io/kubernetes/pkg/util/deployment" - "k8s.io/kubernetes/pkg/util/errors" + sliceutil "k8s.io/kubernetes/pkg/util/slice" ) const ( @@ -75,7 +74,10 @@ func (h *DeploymentHistoryViewer) History(namespace, name string) (HistoryInfo, if err != nil { return historyInfo, fmt.Errorf("failed to retrieve new replica set from deployment %s: %v", name, err) } - allRSs := append(allOldRSs, newRS) + allRSs := allOldRSs + if newRS != nil { + allRSs = append(allRSs, newRS) + } for _, rs := range allRSs { v, err := deploymentutil.Revision(rs) if err != nil { @@ -86,7 +88,9 @@ func (h *DeploymentHistoryViewer) History(namespace, name string) (HistoryInfo, if historyInfo.RevisionToTemplate[v].Annotations == nil { historyInfo.RevisionToTemplate[v].Annotations = make(map[string]string) } - historyInfo.RevisionToTemplate[v].Annotations[ChangeCauseAnnotation] = changeCause + if len(changeCause) > 0 { + historyInfo.RevisionToTemplate[v].Annotations[ChangeCauseAnnotation] = changeCause + } } return historyInfo, nil } @@ -97,38 +101,32 @@ func PrintRolloutHistory(historyInfo HistoryInfo, resource, name string) (string return fmt.Sprintf("No rollout history found in %s %q", resource, name), nil } // Sort the revisionToChangeCause map by revision - var revisions []string - for k := range historyInfo.RevisionToTemplate { - revisions = append(revisions, strconv.FormatInt(k, 10)) + revisions := make([]int64, 0, len(historyInfo.RevisionToTemplate)) + for r := range historyInfo.RevisionToTemplate { + revisions = append(revisions, r) } - sort.Strings(revisions) + sliceutil.SortInts64(revisions) return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "%s %q:\n", resource, name) fmt.Fprintf(out, "REVISION\tCHANGE-CAUSE\n") - errs := []error{} for _, r := range revisions { // Find the change-cause of revision r - r64, err := strconv.ParseInt(r, 10, 64) - if err != nil { - errs = append(errs, err) - continue - } - changeCause := historyInfo.RevisionToTemplate[r64].Annotations[ChangeCauseAnnotation] + changeCause := historyInfo.RevisionToTemplate[r].Annotations[ChangeCauseAnnotation] if len(changeCause) == 0 { changeCause = "" } - fmt.Fprintf(out, "%s\t%s\n", r, changeCause) + fmt.Fprintf(out, "%d\t%s\n", r, changeCause) } - return errors.NewAggregate(errs) + return nil }) } // getChangeCause returns the change-cause annotation of the input object func getChangeCause(obj runtime.Object) string { - meta, err := api.ObjectMetaFor(obj) + accessor, err := meta.Accessor(obj) if err != nil { return "" } - return meta.Annotations[ChangeCauseAnnotation] + return accessor.GetAnnotations()[ChangeCauseAnnotation] } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/kubectl.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/kubectl.go index 844a53781967..9f5cb22ff8e5 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/kubectl.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/kubectl.go @@ -28,14 +28,22 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" ) -const kubectlAnnotationPrefix = "kubectl.kubernetes.io/" +const ( + kubectlAnnotationPrefix = "kubectl.kubernetes.io/" + // TODO: auto-generate this + PossibleResourceTypes = `Possible resource types include (case insensitive): pods (po), services (svc), deployments, +replicasets (rs), replicationcontrollers (rc), nodes (no), events (ev), limitranges (limits), +persistentvolumes (pv), persistentvolumeclaims (pvc), resourcequotas (quota), namespaces (ns), +serviceaccounts (sa), ingresses (ing), horizontalpodautoscalers (hpa), daemonsets (ds), configmaps, +componentstatuses (cs), endpoints (ep), and secrets.` +) type NamespaceInfo struct { Namespace string } func listOfImages(spec *api.PodSpec) []string { - var images []string + images := make([]string, 0, len(spec.Containers)) for _, container := range spec.Containers { images = append(images, container.Image) } @@ -46,6 +54,28 @@ func makeImageList(spec *api.PodSpec) string { return strings.Join(listOfImages(spec), ",") } +func NewThirdPartyResourceMapper(gvs []unversioned.GroupVersion, gvks []unversioned.GroupVersionKind) (meta.RESTMapper, error) { + mapper := meta.NewDefaultRESTMapper(gvs, func(gv unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + for ix := range gvs { + if gvs[ix].Group == gv.Group && gvs[ix].Version == gv.Version { + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: meta.NewAccessor(), + }, nil + } + } + groupVersions := make([]string, 0, len(gvs)) + for ix := range gvs { + groupVersions = append(groupVersions, gvs[ix].String()) + } + return nil, fmt.Errorf("unsupported storage version: %s (valid: %s)", gv.String(), strings.Join(groupVersions, ", ")) + }) + for ix := range gvks { + mapper.Add(gvks[ix], meta.RESTScopeNamespace) + } + return mapper, nil +} + // OutputVersionMapper is a RESTMapper that will prefer mappings that // correspond to a preferred output version (if feasible) type OutputVersionMapper struct { @@ -130,6 +160,7 @@ var shortForms = map[string]string{ "quota": "resourcequotas", "rc": "replicationcontrollers", "rs": "replicasets", + "sa": "serviceaccounts", "svc": "services", } @@ -144,6 +175,35 @@ func expandResourceShortcut(resource unversioned.GroupVersionResource) unversion return resource } +// ResourceAliases returns the resource shortcuts and plural forms for the given resources. +func ResourceAliases(rs []string) []string { + as := make([]string, 0, len(rs)) + plurals := make(map[string]struct{}, len(rs)) + for _, r := range rs { + var plural string + switch { + case r == "endpoints": + plural = r // exception. "endpoint" does not exist. Why? + case strings.HasSuffix(r, "y"): + plural = r[0:len(r)-1] + "ies" + case strings.HasSuffix(r, "s"): + plural = r + "es" + default: + plural = r + "s" + } + as = append(as, plural) + + plurals[plural] = struct{}{} + } + + for sf, r := range shortForms { + if _, found := plurals[r]; found { + as = append(as, sf) + } + } + return as +} + // parseFileSource parses the source given. Acceptable formats include: // // 1. source-path: the basename will become the key name @@ -169,7 +229,12 @@ func parseFileSource(source string) (keyName, filePath string, err error) { // parseLiteralSource parses the source key=val pair func parseLiteralSource(source string) (keyName, value string, err error) { - items := strings.Split(source, "=") + // leading equal is invalid + if strings.Index(source, "=") == 0 { + return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) + } + // split after the first equal (so values can have the = character) + items := strings.SplitN(source, "=", 2) if len(items) != 2 { return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/kubectl_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/kubectl_test.go new file mode 100644 index 000000000000..1cd69d11e9da --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/kubectl_test.go @@ -0,0 +1,200 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "testing" +) + +func TestParseFileSource(t *testing.T) { + cases := []struct { + name string + input string + key string + filepath string + err bool + }{ + { + name: "success 1", + input: "boo=zoo", + key: "boo", + filepath: "zoo", + err: false, + }, + { + name: "success 2", + input: "boo=/path/to/zoo", + key: "boo", + filepath: "/path/to/zoo", + err: false, + }, + { + name: "success 3", + input: "boo-2=/1/2/3/4/5/zab.txt", + key: "boo-2", + filepath: "/1/2/3/4/5/zab.txt", + err: false, + }, + { + name: "success 4", + input: "boo-=this/seems/weird.txt", + key: "boo-", + filepath: "this/seems/weird.txt", + err: false, + }, + { + name: "success 5", + input: "-key=some/path", + key: "-key", + filepath: "some/path", + err: false, + }, + { + name: "invalid 1", + input: "key==some/path", + err: true, + }, + { + name: "invalid 2", + input: "=key=some/path", + err: true, + }, + { + name: "invalid 3", + input: "==key=/some/other/path", + err: true, + }, + { + name: "invalid 4", + input: "=key", + err: true, + }, + { + name: "invalid 5", + input: "key=", + err: true, + }, + } + + for _, tc := range cases { + key, filepath, err := parseFileSource(tc.input) + if err != nil { + if tc.err { + continue + } + + t.Errorf("%v: unexpected error: %v", tc.name, err) + continue + } + + if tc.err { + t.Errorf("%v: unexpected success", tc.name) + continue + } + + if e, a := tc.key, key; e != a { + t.Errorf("%v: expected key %v; got %v", tc.name, e, a) + continue + } + + if e, a := tc.filepath, filepath; e != a { + t.Errorf("%v: expected filepath %v; got %v", tc.name, e, a) + } + } +} + +func TestParseLiteralSource(t *testing.T) { + cases := []struct { + name string + input string + key string + value string + err bool + }{ + { + name: "success 1", + input: "key=value", + key: "key", + value: "value", + err: false, + }, + { + name: "success 2", + input: "key=value/with/slashes", + key: "key", + value: "value/with/slashes", + err: false, + }, + { + name: "err 1", + input: "key==value", + key: "key", + value: "=value", + err: false, + }, + { + name: "err 2", + input: "key=value=", + key: "key", + value: "value=", + err: false, + }, + { + name: "err 3", + input: "key2=value==", + key: "key2", + value: "value==", + err: false, + }, + { + name: "err 4", + input: "==key", + err: true, + }, + { + name: "err 5", + input: "=key=", + err: true, + }, + } + + for _, tc := range cases { + key, value, err := parseLiteralSource(tc.input) + if err != nil { + if tc.err { + continue + } + + t.Errorf("%v: unexpected error: %v", tc.name, err) + continue + } + + if tc.err { + t.Errorf("%v: unexpected success", tc.name) + continue + } + + if e, a := tc.key, key; e != a { + t.Errorf("%v: expected key %v; got %v", tc.name, e, a) + continue + } + + if e, a := tc.value, value; e != a { + t.Errorf("%v: expected value %v; got %v", tc.name, e, a) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/namespace_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/namespace_test.go new file mode 100644 index 000000000000..70e961ccfc20 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/namespace_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +func TestNamespaceGenerate(t *testing.T) { + tests := []struct { + params map[string]interface{} + expected *api.Namespace + expectErr bool + index int + }{ + { + params: map[string]interface{}{ + "name": "foo", + }, + expected: &api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + }, + expectErr: false, + }, + { + params: map[string]interface{}{}, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": 1, + }, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": nil, + }, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name_wrong_key": "some_value", + }, + expectErr: true, + }, + { + params: map[string]interface{}{ + "NAME": "some_value", + }, + expectErr: true, + }, + } + generator := NamespaceGeneratorV1{} + for index, test := range tests { + obj, err := generator.Generate(test.params) + switch { + case test.expectErr && err != nil: + continue // loop, since there's no output to check + case test.expectErr && err == nil: + t.Errorf("%v: expected error and didn't get one", index) + continue // loop, no expected output object + case !test.expectErr && err != nil: + t.Errorf("%v: expected error and didn't get one", index) + continue // loop, no output object + case !test.expectErr && err == nil: + // do nothing and drop through + } + if !reflect.DeepEqual(obj.(*api.Namespace), test.expected) { + t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", test.expected, obj.(*api.Namespace)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/proxy_server_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/proxy_server_test.go new file mode 100644 index 000000000000..79d1365a33e7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/proxy_server_test.go @@ -0,0 +1,333 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "path/filepath" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/client/restclient" +) + +func TestAccept(t *testing.T) { + tests := []struct { + acceptPaths string + rejectPaths string + acceptHosts string + path string + host string + method string + expectAccept bool + }{ + + { + acceptPaths: DefaultPathAcceptRE, + rejectPaths: DefaultPathRejectRE, + acceptHosts: DefaultHostAcceptRE, + path: "/api/v1/pods", + host: "127.0.0.1", + method: "GET", + expectAccept: true, + }, + { + acceptPaths: DefaultPathAcceptRE, + rejectPaths: DefaultPathRejectRE, + acceptHosts: DefaultHostAcceptRE, + path: "/api/v1/pods", + host: "localhost", + method: "GET", + expectAccept: true, + }, + { + acceptPaths: DefaultPathAcceptRE, + rejectPaths: DefaultPathRejectRE, + acceptHosts: DefaultHostAcceptRE, + path: "/api/v1/pods/foo/exec", + host: "127.0.0.1", + method: "GET", + expectAccept: false, + }, + { + acceptPaths: DefaultPathAcceptRE, + rejectPaths: DefaultPathRejectRE, + acceptHosts: DefaultHostAcceptRE, + path: "/api/v1/pods/foo/attach", + host: "127.0.0.1", + method: "GET", + expectAccept: false, + }, + { + acceptPaths: DefaultPathAcceptRE, + rejectPaths: DefaultPathRejectRE, + acceptHosts: DefaultHostAcceptRE, + path: "/api/v1/pods", + host: "evil.com", + method: "GET", + expectAccept: false, + }, + { + acceptPaths: DefaultPathAcceptRE, + rejectPaths: DefaultPathRejectRE, + acceptHosts: DefaultHostAcceptRE, + path: "/api/v1/pods", + host: "localhost.evil.com", + method: "GET", + expectAccept: false, + }, + { + acceptPaths: DefaultPathAcceptRE, + rejectPaths: DefaultPathRejectRE, + acceptHosts: DefaultHostAcceptRE, + path: "/api/v1/pods", + host: "127a0b0c1", + method: "GET", + expectAccept: false, + }, + { + acceptPaths: DefaultPathAcceptRE, + rejectPaths: DefaultPathRejectRE, + acceptHosts: DefaultHostAcceptRE, + path: "/ui", + host: "localhost", + method: "GET", + expectAccept: true, + }, + { + acceptPaths: DefaultPathAcceptRE, + rejectPaths: DefaultPathRejectRE, + acceptHosts: DefaultHostAcceptRE, + path: "/api/v1/pods", + host: "localhost", + method: "POST", + expectAccept: false, + }, + { + acceptPaths: DefaultPathAcceptRE, + rejectPaths: DefaultPathRejectRE, + acceptHosts: DefaultHostAcceptRE, + path: "/api/v1/pods/somepod", + host: "localhost", + method: "PUT", + expectAccept: false, + }, + { + acceptPaths: DefaultPathAcceptRE, + rejectPaths: DefaultPathRejectRE, + acceptHosts: DefaultHostAcceptRE, + path: "/api/v1/pods/somepod", + host: "localhost", + method: "PATCH", + expectAccept: false, + }, + } + for _, test := range tests { + filter := &FilterServer{ + AcceptPaths: MakeRegexpArrayOrDie(test.acceptPaths), + RejectPaths: MakeRegexpArrayOrDie(test.rejectPaths), + AcceptHosts: MakeRegexpArrayOrDie(test.acceptHosts), + RejectMethods: MakeRegexpArrayOrDie(DefaultMethodRejectRE), + } + accept := filter.accept(test.method, test.path, test.host) + if accept != test.expectAccept { + t.Errorf("expected: %v, got %v for %#v", test.expectAccept, accept, test) + } + } +} + +func TestRegexpMatch(t *testing.T) { + tests := []struct { + str string + regexps string + expectMatch bool + }{ + { + str: "foo", + regexps: "bar,.*", + expectMatch: true, + }, + { + str: "foo", + regexps: "bar,fo.*", + expectMatch: true, + }, + { + str: "bar", + regexps: "bar,fo.*", + expectMatch: true, + }, + { + str: "baz", + regexps: "bar,fo.*", + expectMatch: false, + }, + } + for _, test := range tests { + match := matchesRegexp(test.str, MakeRegexpArrayOrDie(test.regexps)) + if test.expectMatch != match { + t.Errorf("expected: %v, found: %v, for %s and %v", test.expectMatch, match, test.str, test.regexps) + } + } +} + +func TestFileServing(t *testing.T) { + const ( + fname = "test.txt" + data = "This is test data" + ) + dir, err := ioutil.TempDir("", "data") + if err != nil { + t.Fatalf("error creating tmp dir: %v", err) + } + if err := ioutil.WriteFile(filepath.Join(dir, fname), []byte(data), 0755); err != nil { + t.Fatalf("error writing tmp file: %v", err) + } + + const prefix = "/foo/" + handler := newFileHandler(prefix, dir) + server := httptest.NewServer(handler) + defer server.Close() + + url := server.URL + prefix + fname + res, err := http.Get(url) + if err != nil { + t.Fatalf("http.Get(%q) error: %v", url, err) + } + defer res.Body.Close() + + if res.StatusCode != http.StatusOK { + t.Errorf("res.StatusCode = %d; want %d", res.StatusCode, http.StatusOK) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("error reading resp body: %v", err) + } + if string(b) != data { + t.Errorf("have %q; want %q", string(b), data) + } +} + +func TestAPIRequests(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + fmt.Fprintf(w, "%s %s %s", r.Method, r.RequestURI, string(b)) + })) + defer ts.Close() + + // httptest.NewServer should always generate a valid URL. + target, _ := url.Parse(ts.URL) + proxy := newProxy(target) + + tests := []struct{ method, body string }{ + {"GET", ""}, + {"DELETE", ""}, + {"POST", "test payload"}, + {"PUT", "test payload"}, + } + + const path = "/api/test?fields=ID%3Dfoo&labels=key%3Dvalue" + for i, tt := range tests { + r, err := http.NewRequest(tt.method, path, strings.NewReader(tt.body)) + if err != nil { + t.Errorf("error creating request: %v", err) + continue + } + w := httptest.NewRecorder() + proxy.ServeHTTP(w, r) + if w.Code != http.StatusOK { + t.Errorf("%d: proxy.ServeHTTP w.Code = %d; want %d", i, w.Code, http.StatusOK) + } + want := strings.Join([]string{tt.method, path, tt.body}, " ") + if w.Body.String() != want { + t.Errorf("%d: response body = %q; want %q", i, w.Body.String(), want) + } + } +} + +func TestPathHandling(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, r.URL.Path) + })) + defer ts.Close() + + table := []struct { + prefix string + reqPath string + expectPath string + }{ + {"/api/", "/metrics", "404 page not found\n"}, + {"/api/", "/api/metrics", "/api/metrics"}, + {"/api/", "/api/v1/pods/", "/api/v1/pods/"}, + {"/", "/metrics", "/metrics"}, + {"/", "/api/v1/pods/", "/api/v1/pods/"}, + {"/custom/", "/metrics", "404 page not found\n"}, + {"/custom/", "/api/metrics", "404 page not found\n"}, + {"/custom/", "/api/v1/pods/", "404 page not found\n"}, + {"/custom/", "/custom/api/metrics", "/api/metrics"}, + {"/custom/", "/custom/api/v1/pods/", "/api/v1/pods/"}, + } + + cc := &restclient.Config{ + Host: ts.URL, + } + + for _, item := range table { + func() { + p, err := NewProxyServer("", item.prefix, "/not/used/for/this/test", nil, cc) + if err != nil { + t.Fatalf("%#v: %v", item, err) + } + pts := httptest.NewServer(p.handler) + defer pts.Close() + + r, err := http.Get(pts.URL + item.reqPath) + if err != nil { + t.Fatalf("%#v: %v", item, err) + } + body, err := ioutil.ReadAll(r.Body) + r.Body.Close() + if err != nil { + t.Fatalf("%#v: %v", item, err) + } + if e, a := item.expectPath, string(body); e != a { + t.Errorf("%#v: Wanted %q, got %q", item, e, a) + } + }() + } +} + +func TestExtractHost(t *testing.T) { + fixtures := map[string]string{ + "localhost:8085": "localhost", + "marmalade": "marmalade", + } + for header, expected := range fixtures { + host := extractHost(header) + if host != expected { + t.Fatalf("%s != %s", host, expected) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/builder.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/builder.go index 7b7bc150402f..8853be8a9f51 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/builder.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/builder.go @@ -36,6 +36,8 @@ import ( var FileExtensions = []string{".json", ".yaml", ".yml"} var InputExtensions = append(FileExtensions, "stdin") +const defaultHttpGetAttempts int = 3 + // Builder provides convenience functions for taking arguments and parameters // from the command line and converting them to a list of resources to iterate // over using the Visitor interface. @@ -98,7 +100,7 @@ func (b *Builder) Schema(schema validation.Schema) *Builder { // will cause an error. // If ContinueOnError() is set prior to this method, objects on the path that are not // recognized will be ignored (but logged at V(2)). -func (b *Builder) FilenameParam(enforceNamespace bool, paths ...string) *Builder { +func (b *Builder) FilenameParam(enforceNamespace, recursive bool, paths ...string) *Builder { for _, s := range paths { switch { case s == "-": @@ -109,9 +111,9 @@ func (b *Builder) FilenameParam(enforceNamespace bool, paths ...string) *Builder b.errs = append(b.errs, fmt.Errorf("the URL passed to filename %q is not valid: %v", s, err)) continue } - b.URL(url) + b.URL(defaultHttpGetAttempts, url) default: - b.Path(s) + b.Path(recursive, s) } } @@ -123,11 +125,12 @@ func (b *Builder) FilenameParam(enforceNamespace bool, paths ...string) *Builder } // URL accepts a number of URLs directly. -func (b *Builder) URL(urls ...*url.URL) *Builder { +func (b *Builder) URL(httpAttemptCount int, urls ...*url.URL) *Builder { for _, u := range urls { b.paths = append(b.paths, &URLVisitor{ - URL: u, - StreamVisitor: NewStreamVisitor(nil, b.mapper, u.String(), b.schema), + URL: u, + StreamVisitor: NewStreamVisitor(nil, b.mapper, u.String(), b.schema), + HttpAttemptCount: httpAttemptCount, }) } return b @@ -157,7 +160,7 @@ func (b *Builder) Stream(r io.Reader, name string) *Builder { // FileVisitor is streaming the content to a StreamVisitor. If ContinueOnError() is set // prior to this method being called, objects on the path that are unrecognized will be // ignored (but logged at V(2)). -func (b *Builder) Path(paths ...string) *Builder { +func (b *Builder) Path(recursive bool, paths ...string) *Builder { for _, p := range paths { _, err := os.Stat(p) if os.IsNotExist(err) { @@ -169,7 +172,7 @@ func (b *Builder) Path(paths ...string) *Builder { continue } - visitors, err := ExpandPathsToFileVisitors(b.mapper, p, false, FileExtensions, b.schema) + visitors, err := ExpandPathsToFileVisitors(b.mapper, p, recursive, FileExtensions, b.schema) if err != nil { b.errs = append(b.errs, fmt.Errorf("error reading %q: %v", p, err)) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/builder_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/builder_test.go new file mode 100644 index 000000000000..66076f9e32dd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/builder_test.go @@ -0,0 +1,1246 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "reflect" + "strings" + "testing" + + "github.com/ghodss/yaml" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/streaming" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + utiltesting "k8s.io/kubernetes/pkg/util/testing" + "k8s.io/kubernetes/pkg/watch" + "k8s.io/kubernetes/pkg/watch/versioned" +) + +func stringBody(body string) io.ReadCloser { + return ioutil.NopCloser(bytes.NewReader([]byte(body))) +} + +func watchBody(events ...watch.Event) string { + buf := &bytes.Buffer{} + codec := testapi.Default.Codec() + enc := versioned.NewEncoder(streaming.NewEncoder(buf, codec), codec) + for _, e := range events { + enc.Encode(&e) + } + return buf.String() +} + +func fakeClient() ClientMapper { + return ClientMapperFunc(func(*meta.RESTMapping) (RESTClient, error) { + return &fake.RESTClient{}, nil + }) +} + +func fakeClientWith(testName string, t *testing.T, data map[string]string) ClientMapper { + return ClientMapperFunc(func(*meta.RESTMapping) (RESTClient, error) { + return &fake.RESTClient{ + Codec: testapi.Default.Codec(), + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + p := req.URL.Path + q := req.URL.RawQuery + if len(q) != 0 { + p = p + "?" + q + } + body, ok := data[p] + if !ok { + t.Fatalf("%s: unexpected request: %s (%s)\n%#v", testName, p, req.URL, req) + } + header := http.Header{} + header.Set("Content-Type", runtime.ContentTypeJSON) + return &http.Response{ + StatusCode: http.StatusOK, + Header: header, + Body: stringBody(body), + }, nil + }), + }, nil + }) +} + +func testData() (*api.PodList, *api.ServiceList) { + pods := &api.PodList{ + ListMeta: unversioned.ListMeta{ + ResourceVersion: "15", + }, + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test", ResourceVersion: "10"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + { + ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "test", ResourceVersion: "11"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + }, + } + svc := &api.ServiceList{ + ListMeta: unversioned.ListMeta{ + ResourceVersion: "16", + }, + Items: []api.Service{ + { + ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"}, + Spec: api.ServiceSpec{ + Type: "ClusterIP", + SessionAffinity: "None", + }, + }, + }, + } + return pods, svc +} + +func streamTestData() (io.Reader, *api.PodList, *api.ServiceList) { + pods, svc := testData() + r, w := io.Pipe() + go func() { + defer w.Close() + w.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), pods))) + w.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), svc))) + }() + return r, pods, svc +} + +func JSONToYAMLOrDie(in []byte) []byte { + data, err := yaml.JSONToYAML(in) + if err != nil { + panic(err) + } + return data +} + +func streamYAMLTestData() (io.Reader, *api.PodList, *api.ServiceList) { + pods, svc := testData() + r, w := io.Pipe() + go func() { + defer w.Close() + w.Write(JSONToYAMLOrDie([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), pods)))) + w.Write([]byte("\n---\n")) + w.Write(JSONToYAMLOrDie([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), svc)))) + }() + return r, pods, svc +} + +func streamTestObject(obj runtime.Object) io.Reader { + r, w := io.Pipe() + go func() { + defer w.Close() + w.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), obj))) + }() + return r +} + +type testVisitor struct { + InjectErr error + Infos []*Info +} + +func (v *testVisitor) Handle(info *Info, err error) error { + if err != nil { + return err + } + v.Infos = append(v.Infos, info) + return v.InjectErr +} + +func (v *testVisitor) Objects() []runtime.Object { + objects := []runtime.Object{} + for i := range v.Infos { + objects = append(objects, v.Infos[i].Object) + } + return objects +} + +var aPod string = ` +{ + "kind": "Pod", + "apiVersion": "` + testapi.Default.GroupVersion().String() + `", + "metadata": { + "name": "busybox{id}", + "labels": { + "name": "busybox{id}" + } + }, + "spec": { + "containers": [ + { + "name": "busybox", + "image": "busybox", + "command": [ + "sleep", + "3600" + ], + "imagePullPolicy": "IfNotPresent" + } + ], + "restartPolicy": "Always" + } +} +` + +var aRC string = ` +{ + "kind": "ReplicationController", + "apiVersion": "` + testapi.Default.GroupVersion().String() + `", + "metadata": { + "name": "busybox{id}", + "labels": { + "app": "busybox" + } + }, + "spec": { + "replicas": 1, + "template": { + "metadata": { + "name": "busybox{id}", + "labels": { + "app": "busybox{id}" + } + }, + "spec": { + "containers": [ + { + "name": "busybox", + "image": "busybox", + "command": [ + "sleep", + "3600" + ], + "imagePullPolicy": "IfNotPresent" + } + ], + "restartPolicy": "Always" + } + } + } +} +` + +func TestPathBuilderAndVersionedObjectNotDefaulted(t *testing.T) { + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + FilenameParam(false, false, "../../../docs/user-guide/update-demo/kitten-rc.yaml") + + test := &testVisitor{} + singular := false + + err := b.Do().IntoSingular(&singular).Visit(test.Handle) + if err != nil || !singular || len(test.Infos) != 1 { + t.Fatalf("unexpected response: %v %t %#v", err, singular, test.Infos) + } + + info := test.Infos[0] + if info.Name != "update-demo-kitten" || info.Namespace != "" || info.Object == nil { + t.Errorf("unexpected info: %#v", info) + } + version, ok := info.VersionedObject.(*v1.ReplicationController) + // versioned object does not have defaulting applied + if info.VersionedObject == nil || !ok || version.Spec.Replicas != nil { + t.Errorf("unexpected versioned object: %#v", info.VersionedObject) + } +} + +func TestNodeBuilder(t *testing.T) { + node := &api.Node{ + ObjectMeta: api.ObjectMeta{Name: "node1", Namespace: "should-not-have", ResourceVersion: "10"}, + Spec: api.NodeSpec{}, + Status: api.NodeStatus{ + Capacity: api.ResourceList{ + api.ResourceCPU: resource.MustParse("1000m"), + api.ResourceMemory: resource.MustParse("1Mi"), + }, + }, + } + r, w := io.Pipe() + go func() { + defer w.Close() + w.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), node))) + }() + + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + NamespaceParam("test").Stream(r, "STDIN") + + test := &testVisitor{} + + err := b.Do().Visit(test.Handle) + if err != nil || len(test.Infos) != 1 { + t.Fatalf("unexpected response: %v %#v", err, test.Infos) + } + info := test.Infos[0] + if info.Name != "node1" || info.Namespace != "" || info.Object == nil { + t.Errorf("unexpected info: %#v", info) + } +} + +func createTestDir(t *testing.T, path string) { + if err := os.MkdirAll(path, 0750); err != nil { + t.Fatalf("error creating test dir: %v", err) + } +} + +func writeTestFile(t *testing.T, path string, contents string) { + if err := ioutil.WriteFile(path, []byte(contents), 0644); err != nil { + t.Fatalf("error creating test file %#v", err) + } +} + +func TestPathBuilderWithMultiple(t *testing.T) { + // create test dirs + tmpDir, err := utiltesting.MkTmpdir("recursive_test_multiple") + if err != nil { + t.Fatalf("error creating temp dir: %v", err) + } + createTestDir(t, fmt.Sprintf("%s/%s", tmpDir, "recursive/pod/pod_1")) + createTestDir(t, fmt.Sprintf("%s/%s", tmpDir, "recursive/rc/rc_1")) + createTestDir(t, fmt.Sprintf("%s/%s", tmpDir, "inode/hardlink")) + defer os.RemoveAll(tmpDir) + + // create test files + writeTestFile(t, fmt.Sprintf("%s/recursive/pod/busybox.json", tmpDir), strings.Replace(aPod, "{id}", "0", -1)) + writeTestFile(t, fmt.Sprintf("%s/recursive/pod/pod_1/busybox.json", tmpDir), strings.Replace(aPod, "{id}", "1", -1)) + writeTestFile(t, fmt.Sprintf("%s/recursive/rc/busybox.json", tmpDir), strings.Replace(aRC, "{id}", "0", -1)) + writeTestFile(t, fmt.Sprintf("%s/recursive/rc/rc_1/busybox.json", tmpDir), strings.Replace(aRC, "{id}", "1", -1)) + writeTestFile(t, fmt.Sprintf("%s/inode/hardlink/busybox.json", tmpDir), strings.Replace(aPod, "{id}", "0", -1)) + if err := os.Link(fmt.Sprintf("%s/inode/hardlink/busybox.json", tmpDir), fmt.Sprintf("%s/inode/hardlink/busybox-link.json", tmpDir)); err != nil { + t.Fatalf("error creating test file: %v", err) + } + + tests := []struct { + name string + object runtime.Object + recursive bool + directory string + expectedNames []string + }{ + {"pod", &api.Pod{}, false, "../../../examples/pod", []string{"nginx"}}, + {"recursive-pod", &api.Pod{}, true, fmt.Sprintf("%s/recursive/pod", tmpDir), []string{"busybox0", "busybox1"}}, + {"rc", &api.ReplicationController{}, false, "../../../examples/guestbook/legacy/redis-master-controller.yaml", []string{"redis-master"}}, + {"recursive-rc", &api.ReplicationController{}, true, fmt.Sprintf("%s/recursive/rc", tmpDir), []string{"busybox0", "busybox1"}}, + {"hardlink", &api.Pod{}, false, fmt.Sprintf("%s/inode/hardlink/busybox-link.json", tmpDir), []string{"busybox0"}}, + {"hardlink", &api.Pod{}, true, fmt.Sprintf("%s/inode/hardlink/busybox-link.json", tmpDir), []string{"busybox0"}}, + } + + for _, test := range tests { + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + FilenameParam(false, test.recursive, test.directory). + NamespaceParam("test").DefaultNamespace() + + testVisitor := &testVisitor{} + singular := false + + err := b.Do().IntoSingular(&singular).Visit(testVisitor.Handle) + if err != nil { + t.Fatalf("unexpected response: %v %t %#v %s", err, singular, testVisitor.Infos, test.name) + } + + info := testVisitor.Infos + + for i, v := range info { + switch test.object.(type) { + case *api.Pod: + if _, ok := v.Object.(*api.Pod); !ok || v.Name != test.expectedNames[i] || v.Namespace != "test" { + t.Errorf("unexpected info: %#v", v) + } + case *api.ReplicationController: + if _, ok := v.Object.(*api.ReplicationController); !ok || v.Name != test.expectedNames[i] || v.Namespace != "test" { + t.Errorf("unexpected info: %#v", v) + } + } + } + } +} + +func TestPathBuilderWithMultipleInvalid(t *testing.T) { + // create test dirs + tmpDir, err := utiltesting.MkTmpdir("recursive_test_multiple_invalid") + if err != nil { + t.Fatalf("error creating temp dir: %v", err) + } + createTestDir(t, fmt.Sprintf("%s/%s", tmpDir, "inode/symlink/pod")) + defer os.RemoveAll(tmpDir) + + // create test files + writeTestFile(t, fmt.Sprintf("%s/inode/symlink/pod/busybox.json", tmpDir), strings.Replace(aPod, "{id}", "0", -1)) + if err := os.Symlink(fmt.Sprintf("%s/inode/symlink/pod", tmpDir), fmt.Sprintf("%s/inode/symlink/pod-link", tmpDir)); err != nil { + t.Fatalf("error creating test file: %v", err) + } + if err := os.Symlink(fmt.Sprintf("%s/inode/symlink/loop", tmpDir), fmt.Sprintf("%s/inode/symlink/loop", tmpDir)); err != nil { + t.Fatalf("error creating test file: %v", err) + } + + tests := []struct { + name string + recursive bool + directory string + }{ + {"symlink", false, fmt.Sprintf("%s/inode/symlink/pod-link", tmpDir)}, + {"symlink", true, fmt.Sprintf("%s/inode/symlink/pod-link", tmpDir)}, + {"loop", false, fmt.Sprintf("%s/inode/symlink/loop", tmpDir)}, + {"loop", true, fmt.Sprintf("%s/inode/symlink/loop", tmpDir)}, + } + + for _, test := range tests { + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + FilenameParam(false, test.recursive, test.directory). + NamespaceParam("test").DefaultNamespace() + + testVisitor := &testVisitor{} + singular := false + + err := b.Do().IntoSingular(&singular).Visit(testVisitor.Handle) + if err == nil { + t.Fatalf("unexpected response: %v %t %#v %s", err, singular, testVisitor.Infos, test.name) + } + } +} + +func TestDirectoryBuilder(t *testing.T) { + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + FilenameParam(false, false, "../../../examples/guestbook/legacy"). + NamespaceParam("test").DefaultNamespace() + + test := &testVisitor{} + singular := false + + err := b.Do().IntoSingular(&singular).Visit(test.Handle) + if err != nil || singular || len(test.Infos) < 3 { + t.Fatalf("unexpected response: %v %t %#v", err, singular, test.Infos) + } + + found := false + for _, info := range test.Infos { + if info.Name == "redis-master" && info.Namespace == "test" && info.Object != nil { + found = true + } + } + if !found { + t.Errorf("unexpected responses: %#v", test.Infos) + } +} + +func TestNamespaceOverride(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: "foo", Name: "test"}}))) + })) + defer s.Close() + + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + FilenameParam(false, false, s.URL). + NamespaceParam("test") + + test := &testVisitor{} + + err := b.Do().Visit(test.Handle) + if err != nil || len(test.Infos) != 1 && test.Infos[0].Namespace != "foo" { + t.Fatalf("unexpected response: %v %#v", err, test.Infos) + } + + b = NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + FilenameParam(true, false, s.URL). + NamespaceParam("test") + + test = &testVisitor{} + + err = b.Do().Visit(test.Handle) + if err == nil { + t.Fatalf("expected namespace error. got: %#v", test.Infos) + } +} + +func TestURLBuilder(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: "foo", Name: "test"}}))) + w.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: "foo", Name: "test1"}}))) + })) + defer s.Close() + + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + FilenameParam(false, false, s.URL). + NamespaceParam("foo") + + test := &testVisitor{} + + err := b.Do().Visit(test.Handle) + if err != nil || len(test.Infos) != 2 { + t.Fatalf("unexpected response: %v %#v", err, test.Infos) + } + info := test.Infos[0] + if info.Name != "test" || info.Namespace != "foo" || info.Object == nil { + t.Errorf("unexpected info: %#v", info) + } + + info = test.Infos[1] + if info.Name != "test1" || info.Namespace != "foo" || info.Object == nil { + t.Errorf("unexpected info: %#v", info) + } + +} + +func TestURLBuilderRequireNamespace(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: "foo", Name: "test"}}))) + })) + defer s.Close() + + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + FilenameParam(false, false, s.URL). + NamespaceParam("test").RequireNamespace() + + test := &testVisitor{} + singular := false + + err := b.Do().IntoSingular(&singular).Visit(test.Handle) + if err == nil || !singular || len(test.Infos) != 0 { + t.Fatalf("unexpected response: %v %t %#v", err, singular, test.Infos) + } +} + +func TestResourceByName(t *testing.T) { + pods, _ := testData() + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClientWith("", t, map[string]string{ + "/namespaces/test/pods/foo": runtime.EncodeOrDie(testapi.Default.Codec(), &pods.Items[0]), + }), testapi.Default.Codec()). + NamespaceParam("test") + + test := &testVisitor{} + singular := false + + if b.Do().Err() == nil { + t.Errorf("unexpected non-error") + } + + b.ResourceTypeOrNameArgs(true, "pods", "foo") + + err := b.Do().IntoSingular(&singular).Visit(test.Handle) + if err != nil || !singular || len(test.Infos) != 1 { + t.Fatalf("unexpected response: %v %t %#v", err, singular, test.Infos) + } + if !reflect.DeepEqual(&pods.Items[0], test.Objects()[0]) { + t.Errorf("unexpected object: %#v", test.Objects()[0]) + } + + mapping, err := b.Do().ResourceMapping() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if mapping.Resource != "pods" { + t.Errorf("unexpected resource mapping: %#v", mapping) + } +} + +func TestMultipleResourceByTheSameName(t *testing.T) { + pods, svcs := testData() + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClientWith("", t, map[string]string{ + "/namespaces/test/pods/foo": runtime.EncodeOrDie(testapi.Default.Codec(), &pods.Items[0]), + "/namespaces/test/pods/baz": runtime.EncodeOrDie(testapi.Default.Codec(), &pods.Items[1]), + "/namespaces/test/services/foo": runtime.EncodeOrDie(testapi.Default.Codec(), &svcs.Items[0]), + "/namespaces/test/services/baz": runtime.EncodeOrDie(testapi.Default.Codec(), &svcs.Items[0]), + }), testapi.Default.Codec()). + NamespaceParam("test") + + test := &testVisitor{} + singular := false + + if b.Do().Err() == nil { + t.Errorf("unexpected non-error") + } + + b.ResourceTypeOrNameArgs(true, "pods,services", "foo", "baz") + + err := b.Do().IntoSingular(&singular).Visit(test.Handle) + if err != nil || singular || len(test.Infos) != 4 { + t.Fatalf("unexpected response: %v %t %#v", err, singular, test.Infos) + } + if !api.Semantic.DeepDerivative([]runtime.Object{&pods.Items[0], &pods.Items[1], &svcs.Items[0], &svcs.Items[0]}, test.Objects()) { + t.Errorf("unexpected visited objects: %#v", test.Objects()) + } + + if _, err := b.Do().ResourceMapping(); err == nil { + t.Errorf("unexpected non-error") + } +} + +func TestResourceNames(t *testing.T) { + pods, svc := testData() + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClientWith("", t, map[string]string{ + "/namespaces/test/pods/foo": runtime.EncodeOrDie(testapi.Default.Codec(), &pods.Items[0]), + "/namespaces/test/services/baz": runtime.EncodeOrDie(testapi.Default.Codec(), &svc.Items[0]), + }), testapi.Default.Codec()). + NamespaceParam("test") + + test := &testVisitor{} + + if b.Do().Err() == nil { + t.Errorf("unexpected non-error") + } + + b.ResourceNames("pods", "foo", "services/baz") + + err := b.Do().Visit(test.Handle) + if err != nil || len(test.Infos) != 2 { + t.Fatalf("unexpected response: %v %#v", err, test.Infos) + } + if !reflect.DeepEqual(&pods.Items[0], test.Objects()[0]) { + t.Errorf("unexpected object: \n%#v, expected: \n%#v", test.Objects()[0], &pods.Items[0]) + } + if !reflect.DeepEqual(&svc.Items[0], test.Objects()[1]) { + t.Errorf("unexpected object: \n%#v, expected: \n%#v", test.Objects()[1], &svc.Items[0]) + } +} + +func TestResourceByNameWithoutRequireObject(t *testing.T) { + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClientWith("", t, map[string]string{}), testapi.Default.Codec()). + NamespaceParam("test") + + test := &testVisitor{} + singular := false + + if b.Do().Err() == nil { + t.Errorf("unexpected non-error") + } + + b.ResourceTypeOrNameArgs(true, "pods", "foo").RequireObject(false) + + err := b.Do().IntoSingular(&singular).Visit(test.Handle) + if err != nil || !singular || len(test.Infos) != 1 { + t.Fatalf("unexpected response: %v %t %#v", err, singular, test.Infos) + } + if test.Infos[0].Name != "foo" { + t.Errorf("unexpected name: %#v", test.Infos[0].Name) + } + if test.Infos[0].Object != nil { + t.Errorf("unexpected object: %#v", test.Infos[0].Object) + } + + mapping, err := b.Do().ResourceMapping() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if mapping.GroupVersionKind.Kind != "Pod" || mapping.Resource != "pods" { + t.Errorf("unexpected resource mapping: %#v", mapping) + } +} + +func TestResourceByNameAndEmptySelector(t *testing.T) { + pods, _ := testData() + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClientWith("", t, map[string]string{ + "/namespaces/test/pods/foo": runtime.EncodeOrDie(testapi.Default.Codec(), &pods.Items[0]), + }), testapi.Default.Codec()). + NamespaceParam("test"). + SelectorParam(""). + ResourceTypeOrNameArgs(true, "pods", "foo") + + singular := false + infos, err := b.Do().IntoSingular(&singular).Infos() + if err != nil || !singular || len(infos) != 1 { + t.Fatalf("unexpected response: %v %t %#v", err, singular, infos) + } + if !reflect.DeepEqual(&pods.Items[0], infos[0].Object) { + t.Errorf("unexpected object: %#v", infos[0]) + } + + mapping, err := b.Do().ResourceMapping() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if mapping.Resource != "pods" { + t.Errorf("unexpected resource mapping: %#v", mapping) + } +} + +func TestSelector(t *testing.T) { + pods, svc := testData() + labelKey := unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String()) + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClientWith("", t, map[string]string{ + "/namespaces/test/pods?" + labelKey + "=a%3Db": runtime.EncodeOrDie(testapi.Default.Codec(), pods), + "/namespaces/test/services?" + labelKey + "=a%3Db": runtime.EncodeOrDie(testapi.Default.Codec(), svc), + }), testapi.Default.Codec()). + SelectorParam("a=b"). + NamespaceParam("test"). + Flatten() + + test := &testVisitor{} + singular := false + + if b.Do().Err() == nil { + t.Errorf("unexpected non-error") + } + + b.ResourceTypeOrNameArgs(true, "pods,service") + + err := b.Do().IntoSingular(&singular).Visit(test.Handle) + if err != nil || singular || len(test.Infos) != 3 { + t.Fatalf("unexpected response: %v %t %#v", err, singular, test.Infos) + } + if !api.Semantic.DeepDerivative([]runtime.Object{&pods.Items[0], &pods.Items[1], &svc.Items[0]}, test.Objects()) { + t.Errorf("unexpected visited objects: %#v", test.Objects()) + } + + if _, err := b.Do().ResourceMapping(); err == nil { + t.Errorf("unexpected non-error") + } +} + +func TestSelectorRequiresKnownTypes(t *testing.T) { + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + SelectorParam("a=b"). + NamespaceParam("test"). + ResourceTypes("unknown") + + if b.Do().Err() == nil { + t.Errorf("unexpected non-error") + } +} + +func TestSingleResourceType(t *testing.T) { + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + SelectorParam("a=b"). + SingleResourceType(). + ResourceTypeOrNameArgs(true, "pods,services") + + if b.Do().Err() == nil { + t.Errorf("unexpected non-error") + } +} + +func TestResourceTuple(t *testing.T) { + expectNoErr := func(err error) bool { return err == nil } + expectErr := func(err error) bool { return err != nil } + testCases := map[string]struct { + args []string + errFn func(error) bool + }{ + "valid": { + args: []string{"pods/foo"}, + errFn: expectNoErr, + }, + "valid multiple with name indirection": { + args: []string{"pods/foo", "pod/bar"}, + errFn: expectNoErr, + }, + "valid multiple with namespaced and non-namespaced types": { + args: []string{"nodes/foo", "pod/bar"}, + errFn: expectNoErr, + }, + "mixed arg types": { + args: []string{"pods/foo", "bar"}, + errFn: expectErr, + }, + /*"missing resource": { + args: []string{"pods/foo2"}, + errFn: expectNoErr, // not an error because resources are lazily visited + },*/ + "comma in resource": { + args: []string{",pods/foo"}, + errFn: expectErr, + }, + "multiple types in resource": { + args: []string{"pods,services/foo"}, + errFn: expectErr, + }, + "unknown resource type": { + args: []string{"unknown/foo"}, + errFn: expectErr, + }, + "leading slash": { + args: []string{"/bar"}, + errFn: expectErr, + }, + "trailing slash": { + args: []string{"bar/"}, + errFn: expectErr, + }, + } + for k, testCase := range testCases { + for _, requireObject := range []bool{true, false} { + expectedRequests := map[string]string{} + if requireObject { + pods, _ := testData() + expectedRequests = map[string]string{ + "/namespaces/test/pods/foo": runtime.EncodeOrDie(testapi.Default.Codec(), &pods.Items[0]), + "/namespaces/test/pods/bar": runtime.EncodeOrDie(testapi.Default.Codec(), &pods.Items[0]), + "/nodes/foo": runtime.EncodeOrDie(testapi.Default.Codec(), &api.Node{ObjectMeta: api.ObjectMeta{Name: "foo"}}), + } + } + + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClientWith(k, t, expectedRequests), testapi.Default.Codec()). + NamespaceParam("test").DefaultNamespace(). + ResourceTypeOrNameArgs(true, testCase.args...).RequireObject(requireObject) + + r := b.Do() + + if !testCase.errFn(r.Err()) { + t.Errorf("%s: unexpected error: %v", k, r.Err()) + } + if r.Err() != nil { + continue + } + switch { + case (r.singular && len(testCase.args) != 1), + (!r.singular && len(testCase.args) == 1): + t.Errorf("%s: result had unexpected singular value", k) + } + info, err := r.Infos() + if err != nil { + // test error + continue + } + if len(info) != len(testCase.args) { + t.Errorf("%s: unexpected number of infos returned: %#v", k, info) + } + } + } +} + +func TestStream(t *testing.T) { + r, pods, rc := streamTestData() + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + NamespaceParam("test").Stream(r, "STDIN").Flatten() + + test := &testVisitor{} + singular := false + + err := b.Do().IntoSingular(&singular).Visit(test.Handle) + if err != nil || singular || len(test.Infos) != 3 { + t.Fatalf("unexpected response: %v %t %#v", err, singular, test.Infos) + } + if !api.Semantic.DeepDerivative([]runtime.Object{&pods.Items[0], &pods.Items[1], &rc.Items[0]}, test.Objects()) { + t.Errorf("unexpected visited objects: %#v", test.Objects()) + } +} + +func TestYAMLStream(t *testing.T) { + r, pods, rc := streamYAMLTestData() + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + NamespaceParam("test").Stream(r, "STDIN").Flatten() + + test := &testVisitor{} + singular := false + + err := b.Do().IntoSingular(&singular).Visit(test.Handle) + if err != nil || singular || len(test.Infos) != 3 { + t.Fatalf("unexpected response: %v %t %#v", err, singular, test.Infos) + } + if !api.Semantic.DeepDerivative([]runtime.Object{&pods.Items[0], &pods.Items[1], &rc.Items[0]}, test.Objects()) { + t.Errorf("unexpected visited objects: %#v", test.Objects()) + } +} + +func TestMultipleObject(t *testing.T) { + r, pods, svc := streamTestData() + obj, err := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + NamespaceParam("test").Stream(r, "STDIN").Flatten(). + Do().Object() + + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + expected := &api.List{ + Items: []runtime.Object{ + &pods.Items[0], + &pods.Items[1], + &svc.Items[0], + }, + } + if !api.Semantic.DeepDerivative(expected, obj) { + t.Errorf("unexpected visited objects: %#v", obj) + } +} + +func TestContinueOnErrorVisitor(t *testing.T) { + r, _, _ := streamTestData() + req := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + ContinueOnError(). + NamespaceParam("test").Stream(r, "STDIN").Flatten(). + Do() + count := 0 + testErr := fmt.Errorf("test error") + err := req.Visit(func(_ *Info, _ error) error { + count++ + if count > 1 { + return testErr + } + return nil + }) + if err == nil { + t.Fatalf("unexpected error: %v", err) + } + if count != 3 { + t.Fatalf("did not visit all infos: %d", count) + } + agg, ok := err.(utilerrors.Aggregate) + if !ok { + t.Fatalf("unexpected error: %v", err) + } + if len(agg.Errors()) != 2 || agg.Errors()[0] != testErr || agg.Errors()[1] != testErr { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestSingularObject(t *testing.T) { + obj, err := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + NamespaceParam("test").DefaultNamespace(). + FilenameParam(false, false, "../../../examples/guestbook/legacy/redis-master-controller.yaml"). + Flatten(). + Do().Object() + + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + rc, ok := obj.(*api.ReplicationController) + if !ok { + t.Fatalf("unexpected object: %#v", obj) + } + if rc.Name != "redis-master" || rc.Namespace != "test" { + t.Errorf("unexpected controller: %#v", rc) + } +} + +func TestSingularObjectNoExtension(t *testing.T) { + obj, err := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + NamespaceParam("test").DefaultNamespace(). + FilenameParam(false, false, "../../../examples/pod"). + Flatten(). + Do().Object() + + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + pod, ok := obj.(*api.Pod) + if !ok { + t.Fatalf("unexpected object: %#v", obj) + } + if pod.Name != "nginx" || pod.Namespace != "test" { + t.Errorf("unexpected pod: %#v", pod) + } +} + +func TestSingularRootScopedObject(t *testing.T) { + node := &api.Node{ObjectMeta: api.ObjectMeta{Name: "test"}, Spec: api.NodeSpec{ExternalID: "test"}} + r := streamTestObject(node) + infos, err := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + NamespaceParam("test").DefaultNamespace(). + Stream(r, "STDIN"). + Flatten(). + Do().Infos() + + if err != nil || len(infos) != 1 { + t.Fatalf("unexpected error: %v", err) + } + + if infos[0].Namespace != "" { + t.Errorf("namespace should be empty: %#v", infos[0]) + } + n, ok := infos[0].Object.(*api.Node) + if !ok { + t.Fatalf("unexpected object: %#v", infos[0].Object) + } + if n.Name != "test" || n.Namespace != "" { + t.Errorf("unexpected object: %#v", n) + } +} + +func TestListObject(t *testing.T) { + pods, _ := testData() + labelKey := unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String()) + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClientWith("", t, map[string]string{ + "/namespaces/test/pods?" + labelKey + "=a%3Db": runtime.EncodeOrDie(testapi.Default.Codec(), pods), + }), testapi.Default.Codec()). + SelectorParam("a=b"). + NamespaceParam("test"). + ResourceTypeOrNameArgs(true, "pods"). + Flatten() + + obj, err := b.Do().Object() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + list, ok := obj.(*api.List) + if !ok { + t.Fatalf("unexpected object: %#v", obj) + } + if list.ResourceVersion != pods.ResourceVersion || len(list.Items) != 2 { + t.Errorf("unexpected list: %#v", list) + } + + mapping, err := b.Do().ResourceMapping() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if mapping.Resource != "pods" { + t.Errorf("unexpected resource mapping: %#v", mapping) + } +} + +func TestListObjectWithDifferentVersions(t *testing.T) { + pods, svc := testData() + labelKey := unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String()) + obj, err := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClientWith("", t, map[string]string{ + "/namespaces/test/pods?" + labelKey + "=a%3Db": runtime.EncodeOrDie(testapi.Default.Codec(), pods), + "/namespaces/test/services?" + labelKey + "=a%3Db": runtime.EncodeOrDie(testapi.Default.Codec(), svc), + }), testapi.Default.Codec()). + SelectorParam("a=b"). + NamespaceParam("test"). + ResourceTypeOrNameArgs(true, "pods,services"). + Flatten(). + Do().Object() + + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + list, ok := obj.(*api.List) + if !ok { + t.Fatalf("unexpected object: %#v", obj) + } + // resource version differs between type lists, so it's not possible to get a single version. + if list.ResourceVersion != "" || len(list.Items) != 3 { + t.Errorf("unexpected list: %#v", list) + } +} + +func TestWatch(t *testing.T) { + _, svc := testData() + w, err := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClientWith("", t, map[string]string{ + "/watch/namespaces/test/services/redis-master?resourceVersion=12": watchBody(watch.Event{ + Type: watch.Added, + Object: &svc.Items[0], + }), + }), testapi.Default.Codec()). + NamespaceParam("test").DefaultNamespace(). + FilenameParam(false, false, "../../../examples/guestbook/redis-master-service.yaml").Flatten(). + Do().Watch("12") + + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + defer w.Stop() + ch := w.ResultChan() + select { + case obj := <-ch: + if obj.Type != watch.Added { + t.Fatalf("unexpected watch event %#v", obj) + } + service, ok := obj.Object.(*api.Service) + if !ok { + t.Fatalf("unexpected object: %#v", obj) + } + if service.Name != "baz" || service.ResourceVersion != "12" { + t.Errorf("unexpected service: %#v", service) + } + } +} + +func TestWatchMultipleError(t *testing.T) { + _, err := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + NamespaceParam("test").DefaultNamespace(). + FilenameParam(false, false, "../../../examples/guestbook/legacy/redis-master-controller.yaml").Flatten(). + FilenameParam(false, false, "../../../examples/guestbook/legacy/redis-master-controller.yaml").Flatten(). + Do().Watch("") + + if err == nil { + t.Fatalf("unexpected non-error") + } +} + +func TestLatest(t *testing.T) { + r, _, _ := streamTestData() + newPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test", ResourceVersion: "13"}, + } + newPod2 := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "test", ResourceVersion: "14"}, + } + newSvc := &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "15"}, + } + + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClientWith("", t, map[string]string{ + "/namespaces/test/pods/foo": runtime.EncodeOrDie(testapi.Default.Codec(), newPod), + "/namespaces/test/pods/bar": runtime.EncodeOrDie(testapi.Default.Codec(), newPod2), + "/namespaces/test/services/baz": runtime.EncodeOrDie(testapi.Default.Codec(), newSvc), + }), testapi.Default.Codec()). + NamespaceParam("other").Stream(r, "STDIN").Flatten().Latest() + + test := &testVisitor{} + singular := false + + err := b.Do().IntoSingular(&singular).Visit(test.Handle) + if err != nil || singular || len(test.Infos) != 3 { + t.Fatalf("unexpected response: %v %t %#v", err, singular, test.Infos) + } + if !api.Semantic.DeepDerivative([]runtime.Object{newPod, newPod2, newSvc}, test.Objects()) { + t.Errorf("unexpected visited objects: %#v", test.Objects()) + } +} + +func TestReceiveMultipleErrors(t *testing.T) { + pods, svc := testData() + + r, w := io.Pipe() + go func() { + defer w.Close() + w.Write([]byte(`{}`)) + w.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), &pods.Items[0]))) + }() + + r2, w2 := io.Pipe() + go func() { + defer w2.Close() + w2.Write([]byte(`{}`)) + w2.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), &svc.Items[0]))) + }() + + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()). + Stream(r, "1").Stream(r2, "2"). + ContinueOnError() + + test := &testVisitor{} + singular := false + + err := b.Do().IntoSingular(&singular).Visit(test.Handle) + if err == nil || singular || len(test.Infos) != 2 { + t.Fatalf("unexpected response: %v %t %#v", err, singular, test.Infos) + } + + errs, ok := err.(utilerrors.Aggregate) + if !ok { + t.Fatalf("unexpected error: %v", reflect.TypeOf(err)) + } + if len(errs.Errors()) != 2 { + t.Errorf("unexpected errors %v", errs) + } +} + +func TestReplaceAliases(t *testing.T) { + tests := []struct { + name string + arg string + expected string + }{ + { + name: "no-replacement", + arg: "service", + expected: "service", + }, + { + name: "all-replacement", + arg: "all", + expected: "rc,svc,pods,pvc", + }, + { + name: "alias-in-comma-separated-arg", + arg: "all,secrets", + expected: "rc,svc,pods,pvc,secrets", + }, + } + + b := NewBuilder(testapi.Default.RESTMapper(), api.Scheme, fakeClient(), testapi.Default.Codec()) + + for _, test := range tests { + replaced := b.replaceAliases(test.arg) + if replaced != test.expected { + t.Errorf("%s: unexpected argument: expected %s, got %s", test.name, test.expected, replaced) + } + } +} + +func TestHasNames(t *testing.T) { + tests := []struct { + args []string + expectedHasName bool + expectedError error + }{ + { + args: []string{""}, + expectedHasName: false, + expectedError: nil, + }, + { + args: []string{"rc"}, + expectedHasName: false, + expectedError: nil, + }, + { + args: []string{"rc,pod,svc"}, + expectedHasName: false, + expectedError: nil, + }, + { + args: []string{"rc/foo"}, + expectedHasName: true, + expectedError: nil, + }, + { + args: []string{"rc", "foo"}, + expectedHasName: true, + expectedError: nil, + }, + { + args: []string{"rc,pod,svc", "foo"}, + expectedHasName: true, + expectedError: nil, + }, + { + args: []string{"rc/foo", "rc/bar", "rc/zee"}, + expectedHasName: true, + expectedError: nil, + }, + { + args: []string{"rc/foo", "bar"}, + expectedHasName: false, + expectedError: fmt.Errorf("when passing arguments in resource/name form, all arguments must include the resource"), + }, + } + for _, test := range tests { + hasNames, err := HasNames(test.args) + if !reflect.DeepEqual(test.expectedError, err) { + t.Errorf("expected HasName to error %v, got %s", test.expectedError, err) + } + if hasNames != test.expectedHasName { + t.Errorf("expected HasName to return %v for %s", test.expectedHasName, test.args) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/helper_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/helper_test.go new file mode 100644 index 000000000000..0cd438b05939 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/helper_test.go @@ -0,0 +1,514 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" +) + +func objBody(obj runtime.Object) io.ReadCloser { + return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), obj)))) +} + +func header() http.Header { + header := http.Header{} + header.Set("Content-Type", runtime.ContentTypeJSON) + return header +} + +// splitPath returns the segments for a URL path. +func splitPath(path string) []string { + path = strings.Trim(path, "/") + if path == "" { + return []string{} + } + return strings.Split(path, "/") +} + +func TestHelperDelete(t *testing.T) { + tests := []struct { + Err bool + Req func(*http.Request) bool + Resp *http.Response + HttpErr error + }{ + { + HttpErr: errors.New("failure"), + Err: true, + }, + { + Resp: &http.Response{ + StatusCode: http.StatusNotFound, + Header: header(), + Body: objBody(&unversioned.Status{Status: unversioned.StatusFailure}), + }, + Err: true, + }, + { + Resp: &http.Response{ + StatusCode: http.StatusOK, + Header: header(), + Body: objBody(&unversioned.Status{Status: unversioned.StatusSuccess}), + }, + Req: func(req *http.Request) bool { + if req.Method != "DELETE" { + t.Errorf("unexpected method: %#v", req) + return false + } + parts := splitPath(req.URL.Path) + if len(parts) < 3 { + t.Errorf("expected URL path to have 3 parts: %s", req.URL.Path) + return false + } + if parts[1] != "bar" { + t.Errorf("url doesn't contain namespace: %#v", req) + return false + } + if parts[2] != "foo" { + t.Errorf("url doesn't contain name: %#v", req) + return false + } + return true + }, + }, + } + for _, test := range tests { + client := &fake.RESTClient{ + Codec: testapi.Default.Codec(), + Resp: test.Resp, + Err: test.HttpErr, + } + modifier := &Helper{ + RESTClient: client, + NamespaceScoped: true, + } + err := modifier.Delete("bar", "foo") + if (err != nil) != test.Err { + t.Errorf("unexpected error: %t %v", test.Err, err) + } + if err != nil { + continue + } + if test.Req != nil && !test.Req(client.Req) { + t.Errorf("unexpected request: %#v", client.Req) + } + } +} + +func TestHelperCreate(t *testing.T) { + expectPost := func(req *http.Request) bool { + if req.Method != "POST" { + t.Errorf("unexpected method: %#v", req) + return false + } + parts := splitPath(req.URL.Path) + if parts[1] != "bar" { + t.Errorf("url doesn't contain namespace: %#v", req) + return false + } + return true + } + + tests := []struct { + Resp *http.Response + HttpErr error + Modify bool + Object runtime.Object + + ExpectObject runtime.Object + Err bool + Req func(*http.Request) bool + }{ + { + HttpErr: errors.New("failure"), + Err: true, + }, + { + Resp: &http.Response{ + StatusCode: http.StatusNotFound, + Header: header(), + Body: objBody(&unversioned.Status{Status: unversioned.StatusFailure}), + }, + Err: true, + }, + { + Resp: &http.Response{ + StatusCode: http.StatusOK, + Header: header(), + Body: objBody(&unversioned.Status{Status: unversioned.StatusSuccess}), + }, + Object: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}, + ExpectObject: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}, + Req: expectPost, + }, + { + Modify: false, + Object: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"}}, + ExpectObject: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"}}, + Resp: &http.Response{StatusCode: http.StatusOK, Header: header(), Body: objBody(&unversioned.Status{Status: unversioned.StatusSuccess})}, + Req: expectPost, + }, + { + Modify: true, + Object: &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + ExpectObject: &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + Resp: &http.Response{StatusCode: http.StatusOK, Header: header(), Body: objBody(&unversioned.Status{Status: unversioned.StatusSuccess})}, + Req: expectPost, + }, + } + for i, test := range tests { + client := &fake.RESTClient{ + Codec: testapi.Default.Codec(), + Resp: test.Resp, + Err: test.HttpErr, + } + modifier := &Helper{ + RESTClient: client, + Versioner: testapi.Default.MetadataAccessor(), + NamespaceScoped: true, + } + _, err := modifier.Create("bar", test.Modify, test.Object) + if (err != nil) != test.Err { + t.Errorf("%d: unexpected error: %t %v", i, test.Err, err) + } + if err != nil { + continue + } + if test.Req != nil && !test.Req(client.Req) { + t.Errorf("%d: unexpected request: %#v", i, client.Req) + } + body, err := ioutil.ReadAll(client.Req.Body) + if err != nil { + t.Fatalf("%d: unexpected error: %#v", i, err) + } + t.Logf("got body: %s", string(body)) + expect := []byte{} + if test.ExpectObject != nil { + expect = []byte(runtime.EncodeOrDie(testapi.Default.Codec(), test.ExpectObject)) + } + if !reflect.DeepEqual(expect, body) { + t.Errorf("%d: unexpected body: %s (expected %s)", i, string(body), string(expect)) + } + + } +} + +func TestHelperGet(t *testing.T) { + tests := []struct { + Err bool + Req func(*http.Request) bool + Resp *http.Response + HttpErr error + }{ + { + HttpErr: errors.New("failure"), + Err: true, + }, + { + Resp: &http.Response{ + StatusCode: http.StatusNotFound, + Header: header(), + Body: objBody(&unversioned.Status{Status: unversioned.StatusFailure}), + }, + Err: true, + }, + { + Resp: &http.Response{ + StatusCode: http.StatusOK, + Header: header(), + Body: objBody(&api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}), + }, + Req: func(req *http.Request) bool { + if req.Method != "GET" { + t.Errorf("unexpected method: %#v", req) + return false + } + parts := splitPath(req.URL.Path) + if parts[1] != "bar" { + t.Errorf("url doesn't contain namespace: %#v", req) + return false + } + if parts[2] != "foo" { + t.Errorf("url doesn't contain name: %#v", req) + return false + } + return true + }, + }, + } + for _, test := range tests { + client := &fake.RESTClient{ + Codec: testapi.Default.Codec(), + Resp: test.Resp, + Err: test.HttpErr, + } + modifier := &Helper{ + RESTClient: client, + NamespaceScoped: true, + } + obj, err := modifier.Get("bar", "foo", false) + if (err != nil) != test.Err { + t.Errorf("unexpected error: %t %v", test.Err, err) + } + if err != nil { + continue + } + if obj.(*api.Pod).Name != "foo" { + t.Errorf("unexpected object: %#v", obj) + } + if test.Req != nil && !test.Req(client.Req) { + t.Errorf("unexpected request: %#v", client.Req) + } + } +} + +func TestHelperList(t *testing.T) { + tests := []struct { + Err bool + Req func(*http.Request) bool + Resp *http.Response + HttpErr error + }{ + { + HttpErr: errors.New("failure"), + Err: true, + }, + { + Resp: &http.Response{ + StatusCode: http.StatusNotFound, + Header: header(), + Body: objBody(&unversioned.Status{Status: unversioned.StatusFailure}), + }, + Err: true, + }, + { + Resp: &http.Response{ + StatusCode: http.StatusOK, + Header: header(), + Body: objBody(&api.PodList{ + Items: []api.Pod{{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + }, + }, + }), + }, + Req: func(req *http.Request) bool { + if req.Method != "GET" { + t.Errorf("unexpected method: %#v", req) + return false + } + if req.URL.Path != "/namespaces/bar" { + t.Errorf("url doesn't contain name: %#v", req.URL) + return false + } + if req.URL.Query().Get(unversioned.LabelSelectorQueryParam(testapi.Default.GroupVersion().String())) != labels.SelectorFromSet(labels.Set{"foo": "baz"}).String() { + t.Errorf("url doesn't contain query parameters: %#v", req.URL) + return false + } + return true + }, + }, + } + for _, test := range tests { + client := &fake.RESTClient{ + Codec: testapi.Default.Codec(), + Resp: test.Resp, + Err: test.HttpErr, + } + modifier := &Helper{ + RESTClient: client, + NamespaceScoped: true, + } + obj, err := modifier.List("bar", testapi.Default.GroupVersion().String(), labels.SelectorFromSet(labels.Set{"foo": "baz"}), false) + if (err != nil) != test.Err { + t.Errorf("unexpected error: %t %v", test.Err, err) + } + if err != nil { + continue + } + if obj.(*api.PodList).Items[0].Name != "foo" { + t.Errorf("unexpected object: %#v", obj) + } + if test.Req != nil && !test.Req(client.Req) { + t.Errorf("unexpected request: %#v", client.Req) + } + } +} + +func TestHelperReplace(t *testing.T) { + expectPut := func(path string, req *http.Request) bool { + if req.Method != "PUT" { + t.Errorf("unexpected method: %#v", req) + return false + } + if req.URL.Path != path { + t.Errorf("unexpected url: %v", req.URL) + return false + } + return true + } + + tests := []struct { + Resp *http.Response + HTTPClient *http.Client + HttpErr error + Overwrite bool + Object runtime.Object + Namespace string + NamespaceScoped bool + + ExpectPath string + ExpectObject runtime.Object + Err bool + Req func(string, *http.Request) bool + }{ + { + Namespace: "bar", + NamespaceScoped: true, + HttpErr: errors.New("failure"), + Err: true, + }, + { + Namespace: "bar", + NamespaceScoped: true, + Object: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}, + Resp: &http.Response{ + StatusCode: http.StatusNotFound, + Header: header(), + Body: objBody(&unversioned.Status{Status: unversioned.StatusFailure}), + }, + Err: true, + }, + { + Namespace: "bar", + NamespaceScoped: true, + Object: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}, + ExpectPath: "/namespaces/bar/foo", + ExpectObject: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}, + Resp: &http.Response{ + StatusCode: http.StatusOK, + Header: header(), + Body: objBody(&unversioned.Status{Status: unversioned.StatusSuccess}), + }, + Req: expectPut, + }, + // namespace scoped resource + { + Namespace: "bar", + NamespaceScoped: true, + Object: &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + ExpectPath: "/namespaces/bar/foo", + ExpectObject: &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + Overwrite: true, + HTTPClient: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + if req.Method == "PUT" { + return &http.Response{StatusCode: http.StatusOK, Header: header(), Body: objBody(&unversioned.Status{Status: unversioned.StatusSuccess})}, nil + } + return &http.Response{StatusCode: http.StatusOK, Header: header(), Body: objBody(&api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"}})}, nil + }), + Req: expectPut, + }, + // cluster scoped resource + { + Object: &api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + }, + ExpectObject: &api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"}, + }, + Overwrite: true, + ExpectPath: "/foo", + HTTPClient: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + if req.Method == "PUT" { + return &http.Response{StatusCode: http.StatusOK, Header: header(), Body: objBody(&unversioned.Status{Status: unversioned.StatusSuccess})}, nil + } + return &http.Response{StatusCode: http.StatusOK, Header: header(), Body: objBody(&api.Node{ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"}})}, nil + }), + Req: expectPut, + }, + { + Namespace: "bar", + NamespaceScoped: true, + Object: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"}}, + ExpectPath: "/namespaces/bar/foo", + ExpectObject: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"}}, + Resp: &http.Response{StatusCode: http.StatusOK, Header: header(), Body: objBody(&unversioned.Status{Status: unversioned.StatusSuccess})}, + Req: expectPut, + }, + } + for i, test := range tests { + client := &fake.RESTClient{ + Client: test.HTTPClient, + Codec: testapi.Default.Codec(), + Resp: test.Resp, + Err: test.HttpErr, + } + modifier := &Helper{ + RESTClient: client, + Versioner: testapi.Default.MetadataAccessor(), + NamespaceScoped: test.NamespaceScoped, + } + _, err := modifier.Replace(test.Namespace, "foo", test.Overwrite, test.Object) + if (err != nil) != test.Err { + t.Errorf("%d: unexpected error: %t %v", i, test.Err, err) + } + if err != nil { + continue + } + if test.Req != nil && !test.Req(test.ExpectPath, client.Req) { + t.Errorf("%d: unexpected request: %#v", i, client.Req) + } + body, err := ioutil.ReadAll(client.Req.Body) + if err != nil { + t.Fatalf("%d: unexpected error: %#v", i, err) + } + expect := []byte{} + if test.ExpectObject != nil { + expect = []byte(runtime.EncodeOrDie(testapi.Default.Codec(), test.ExpectObject)) + } + if !reflect.DeepEqual(expect, body) { + t.Errorf("%d: unexpected body: %s", i, string(body)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go index 25fd97d90bd9..7f9eec28f7cf 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go @@ -22,6 +22,8 @@ import ( "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata" "k8s.io/kubernetes/pkg/runtime" ) @@ -53,6 +55,17 @@ func (m *Mapper) InfoForData(data []byte, source string) (*Info, error) { if err != nil { return nil, fmt.Errorf("unable to decode %q: %v", source, err) } + var obj runtime.Object + var versioned runtime.Object + if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) { + obj, err = runtime.Decode(thirdpartyresourcedata.NewDecoder(nil, gvk.Kind), data) + versioned = obj + } else { + obj, versioned = versions.Last(), versions.First() + } + if err != nil { + return nil, fmt.Errorf("unable to decode %q: %v [%v]", source, err, gvk) + } mapping, err := m.RESTMapping(gvk.GroupKind(), gvk.Version) if err != nil { return nil, fmt.Errorf("unable to recognize %q: %v", source, err) @@ -63,10 +76,6 @@ func (m *Mapper) InfoForData(data []byte, source string) (*Info, error) { return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) } - // TODO: decoding the version object is convenient, but questionable. This is used by apply - // and rolling-update today, but both of those cases should probably be requesting the raw - // object and performing their own decoding. - obj, versioned := versions.Last(), versions.First() name, _ := mapping.MetadataAccessor.Name(obj) namespace, _ := mapping.MetadataAccessor.Namespace(obj) resourceVersion, _ := mapping.MetadataAccessor.ResourceVersion(obj) @@ -87,7 +96,7 @@ func (m *Mapper) InfoForData(data []byte, source string) (*Info, error) { // if the object cannot be introspected. Name and namespace will be set into Info // if the mapping's MetadataAccessor can retrieve them. func (m *Mapper) InfoForObject(obj runtime.Object, preferredGVKs []unversioned.GroupVersionKind) (*Info, error) { - groupVersionKinds, err := m.ObjectKinds(obj) + groupVersionKinds, _, err := m.ObjectKinds(obj) if err != nil { return nil, fmt.Errorf("unable to get type info from the object %q: %v", reflect.TypeOf(obj), err) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/result.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/result.go index 8d726ab7b732..562fc0cc3596 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/result.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/result.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/runtime" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/sets" @@ -210,7 +211,7 @@ func (r *Result) Watch(resourceVersion string) (watch.Interface, error) { // the objects as children, or if only a single Object is present, as that object. The provided // version will be preferred as the conversion target, but the Object's mapping version will be // used if that version is not present. -func AsVersionedObject(infos []*Info, forceList bool, version string, encoder runtime.Encoder) (runtime.Object, error) { +func AsVersionedObject(infos []*Info, forceList bool, version unversioned.GroupVersion, encoder runtime.Encoder) (runtime.Object, error) { objects, err := AsVersionedObjects(infos, version, encoder) if err != nil { return nil, err @@ -221,7 +222,7 @@ func AsVersionedObject(infos []*Info, forceList bool, version string, encoder ru object = objects[0] } else { object = &api.List{Items: objects} - converted, err := tryConvert(api.Scheme, object, version, registered.GroupOrDie(api.GroupName).GroupVersion.Version) + converted, err := tryConvert(api.Scheme, object, version, registered.GroupOrDie(api.GroupName).GroupVersion) if err != nil { return nil, err } @@ -233,7 +234,7 @@ func AsVersionedObject(infos []*Info, forceList bool, version string, encoder ru // AsVersionedObjects converts a list of infos into versioned objects. The provided // version will be preferred as the conversion target, but the Object's mapping version will be // used if that version is not present. -func AsVersionedObjects(infos []*Info, version string, encoder runtime.Encoder) ([]runtime.Object, error) { +func AsVersionedObjects(infos []*Info, version unversioned.GroupVersion, encoder runtime.Encoder) ([]runtime.Object, error) { objects := []runtime.Object{} for _, info := range infos { if info.Object == nil { @@ -241,22 +242,28 @@ func AsVersionedObjects(infos []*Info, version string, encoder runtime.Encoder) } // TODO: use info.VersionedObject as the value? + switch obj := info.Object.(type) { + case *extensions.ThirdPartyResourceData: + objects = append(objects, &runtime.Unknown{Raw: obj.Data}) + continue + } // objects that are not part of api.Scheme must be converted to JSON // TODO: convert to map[string]interface{}, attach to runtime.Unknown? - if len(version) > 0 { - if _, err := api.Scheme.ObjectKind(info.Object); runtime.IsNotRegisteredError(err) { + if !version.IsEmpty() { + if _, _, err := api.Scheme.ObjectKinds(info.Object); runtime.IsNotRegisteredError(err) { // TODO: ideally this would encode to version, but we don't expose multiple codecs here. data, err := runtime.Encode(encoder, info.Object) if err != nil { return nil, err } - objects = append(objects, &runtime.Unknown{RawJSON: data}) + // TODO: Set ContentEncoding and ContentType. + objects = append(objects, &runtime.Unknown{Raw: data}) continue } } - converted, err := tryConvert(info.Mapping.ObjectConvertor, info.Object, version, info.Mapping.GroupVersionKind.GroupVersion().String()) + converted, err := tryConvert(info.Mapping.ObjectConvertor, info.Object, version, info.Mapping.GroupVersionKind.GroupVersion()) if err != nil { return nil, err } @@ -267,10 +274,10 @@ func AsVersionedObjects(infos []*Info, version string, encoder runtime.Encoder) // tryConvert attempts to convert the given object to the provided versions in order. This function assumes // the object is in internal version. -func tryConvert(convertor runtime.ObjectConvertor, object runtime.Object, versions ...string) (runtime.Object, error) { +func tryConvert(convertor runtime.ObjectConvertor, object runtime.Object, versions ...unversioned.GroupVersion) (runtime.Object, error) { var last error for _, version := range versions { - if len(version) == 0 { + if version.IsEmpty() { return object, nil } obj, err := convertor.ConvertToVersion(object, version) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go index 76569389c7dd..288bfa85acad 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go @@ -24,6 +24,7 @@ import ( "net/url" "os" "path/filepath" + "time" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" @@ -221,20 +222,67 @@ func ValidateSchema(data []byte, schema validation.Schema) error { type URLVisitor struct { URL *url.URL *StreamVisitor + HttpAttemptCount int } func (v *URLVisitor) Visit(fn VisitorFunc) error { - res, err := http.Get(v.URL.String()) + body, err := readHttpWithRetries(httpgetImpl, time.Second, v.URL.String(), v.HttpAttemptCount) if err != nil { return err } - defer res.Body.Close() - if res.StatusCode != 200 { - return fmt.Errorf("unable to read URL %q, server reported %d %s", v.URL, res.StatusCode, res.Status) + defer body.Close() + v.StreamVisitor.Reader = body + return v.StreamVisitor.Visit(fn) +} + +// readHttpWithRetries tries to http.Get the v.URL retries times before giving up. +func readHttpWithRetries(get httpget, duration time.Duration, u string, attempts int) (io.ReadCloser, error) { + var err error + var body io.ReadCloser + if attempts <= 0 { + return nil, fmt.Errorf("http attempts must be greater than 0, was %d", attempts) } + for i := 0; i < attempts; i++ { + var statusCode int + var status string + if i > 0 { + time.Sleep(duration) + } - v.StreamVisitor.Reader = res.Body - return v.StreamVisitor.Visit(fn) + // Try to get the URL + statusCode, status, body, err = get(u) + + // Retry Errors + if err != nil { + continue + } + + // Error - Set the error condition from the StatusCode + if statusCode != 200 { + err = fmt.Errorf("unable to read URL %q, server reported %d %s", u, statusCode, status) + } + + if statusCode >= 500 && statusCode < 600 { + // Retry 500's + continue + } else { + // Don't retry other StatusCodes + break + } + } + return body, err +} + +// httpget Defines function to retrieve a url and return the results. Exists for unit test stubbing. +type httpget func(url string) (int, string, io.ReadCloser, error) + +// httpgetImpl Implements a function to retrieve a url and return the results. +func httpgetImpl(url string) (int, string, io.ReadCloser, error) { + resp, err := http.Get(url) + if err != nil { + return 0, "", nil, err + } + return resp.StatusCode, resp.Status, resp.Body, nil } // DecoratedVisitor will invoke the decorators in order prior to invoking the visitor function @@ -474,14 +522,15 @@ func (v *StreamVisitor) Visit(fn VisitorFunc) error { } return err } - ext.RawJSON = bytes.TrimSpace(ext.RawJSON) - if len(ext.RawJSON) == 0 || bytes.Equal(ext.RawJSON, []byte("null")) { + // TODO: This needs to be able to handle object in other encodings and schemas. + ext.Raw = bytes.TrimSpace(ext.Raw) + if len(ext.Raw) == 0 || bytes.Equal(ext.Raw, []byte("null")) { continue } - if err := ValidateSchema(ext.RawJSON, v.Schema); err != nil { + if err := ValidateSchema(ext.Raw, v.Schema); err != nil { return fmt.Errorf("error validating %q: %v", v.Source, err) } - info, err := v.InfoForData(ext.RawJSON, v.Source) + info, err := v.InfoForData(ext.Raw, v.Source) if err != nil { if fnErr := fn(info, err); fnErr != nil { return fnErr diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/visitor_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/visitor_test.go new file mode 100644 index 000000000000..e781c90c2493 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource/visitor_test.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestVisitorHttpGet(t *testing.T) { + // Test retries on errors + i := 0 + expectedErr := fmt.Errorf("Failed to get http") + actualBytes, actualErr := readHttpWithRetries(func(url string) (int, string, io.ReadCloser, error) { + assert.Equal(t, "hello", url) + i++ + if i > 2 { + return 0, "", nil, expectedErr + } + return 0, "", nil, fmt.Errorf("Unexpected error") + }, 0, "hello", 3) + assert.Equal(t, expectedErr, actualErr) + assert.Nil(t, actualBytes) + assert.Equal(t, 3, i) + + // Test that 500s are retried. + i = 0 + actualBytes, actualErr = readHttpWithRetries(func(url string) (int, string, io.ReadCloser, error) { + assert.Equal(t, "hello", url) + i++ + return 501, "Status", nil, nil + }, 0, "hello", 3) + assert.Error(t, actualErr) + assert.Nil(t, actualBytes) + assert.Equal(t, 3, i) + + // Test that 300s are not retried + i = 0 + actualBytes, actualErr = readHttpWithRetries(func(url string) (int, string, io.ReadCloser, error) { + assert.Equal(t, "hello", url) + i++ + return 300, "Status", nil, nil + }, 0, "hello", 3) + assert.Error(t, actualErr) + assert.Nil(t, actualBytes) + assert.Equal(t, 1, i) + + // Test attempt count is respected + i = 0 + actualBytes, actualErr = readHttpWithRetries(func(url string) (int, string, io.ReadCloser, error) { + assert.Equal(t, "hello", url) + i++ + return 501, "Status", nil, nil + }, 0, "hello", 1) + assert.Error(t, actualErr) + assert.Nil(t, actualBytes) + assert.Equal(t, 1, i) + + // Test attempts less than 1 results in an error + i = 0 + b := bytes.Buffer{} + actualBytes, actualErr = readHttpWithRetries(func(url string) (int, string, io.ReadCloser, error) { + return 200, "Status", ioutil.NopCloser(&b), nil + }, 0, "hello", 0) + assert.Error(t, actualErr) + assert.Nil(t, actualBytes) + assert.Equal(t, 0, i) + + // Test Success + i = 0 + b = bytes.Buffer{} + actualBytes, actualErr = readHttpWithRetries(func(url string) (int, string, io.ReadCloser, error) { + assert.Equal(t, "hello", url) + i++ + if i > 1 { + return 200, "Status", ioutil.NopCloser(&b), nil + } + return 501, "Status", nil, nil + }, 0, "hello", 3) + assert.Nil(t, actualErr) + assert.NotNil(t, actualBytes) + assert.Equal(t, 2, i) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource_printer.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource_printer.go index 7f9d4ab0ef73..b97267a49831 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource_printer.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource_printer.go @@ -32,9 +32,13 @@ import ( "github.com/ghodss/yaml" "github.com/golang/glog" + "k8s.io/kubernetes/federation/apis/federation" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" @@ -65,7 +69,8 @@ func GetPrinter(format, formatArgument string) (ResourcePrinter, bool, error) { printer = &YAMLPrinter{} case "name": printer = &NamePrinter{ - Typer: runtime.ObjectTyperToTyper(api.Scheme), + // TODO: this is wrong, these should be provided as an argument to GetPrinter + Typer: api.Scheme, Decoder: api.Codecs.UniversalDecoder(), } case "template", "go-template": @@ -179,7 +184,7 @@ func (p *VersionedPrinter) PrintObj(obj runtime.Object, w io.Writer) error { if version.IsEmpty() { continue } - converted, err := p.convertor.ConvertToVersion(obj, version.String()) + converted, err := p.convertor.ConvertToVersion(obj, version) if runtime.IsNotRegisteredError(err) { continue } @@ -199,14 +204,12 @@ func (p *VersionedPrinter) HandledResources() []string { // NamePrinter is an implementation of ResourcePrinter which outputs "resource/name" pair of an object. type NamePrinter struct { Decoder runtime.Decoder - Typer runtime.Typer + Typer runtime.ObjectTyper } // PrintObj is an implementation of ResourcePrinter.PrintObj which decodes the object // and print "resource/name" pair. If the object is a List, print all items in it. func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error { - gvk, _, _ := p.Typer.ObjectKind(obj) - if meta.IsListType(obj) { items, err := meta.ExtractList(obj) if err != nil { @@ -232,10 +235,9 @@ func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error { } } - if gvk != nil { + if gvks, _, err := p.Typer.ObjectKinds(obj); err == nil { // TODO: this is wrong, it assumes that meta knows about all Kinds - should take a RESTMapper - _, resource := meta.KindToResource(*gvk) - + _, resource := meta.KindToResource(gvks[0]) fmt.Fprintf(w, "%s/%s\n", resource.Resource, name) } else { fmt.Fprintf(w, "/%s\n", name) @@ -255,6 +257,12 @@ type JSONPrinter struct { // PrintObj is an implementation of ResourcePrinter.PrintObj which simply writes the object to the Writer. func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + switch obj := obj.(type) { + case *runtime.Unknown: + _, err := w.Write(obj.Raw) + return err + } + data, err := json.Marshal(obj) if err != nil { return err @@ -281,6 +289,16 @@ type YAMLPrinter struct { // PrintObj prints the data as YAML. func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + switch obj := obj.(type) { + case *runtime.Unknown: + data, err := yaml.JSONToYAML(obj.Raw) + if err != nil { + return err + } + _, err = w.Write(data) + return err + } + output, err := yaml.Marshal(obj) if err != nil { return err @@ -401,6 +419,7 @@ var replicaSetColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"} var jobColumns = []string{"NAME", "DESIRED", "SUCCESSFUL", "AGE"} var serviceColumns = []string{"NAME", "CLUSTER-IP", "EXTERNAL-IP", "PORT(S)", "AGE"} var ingressColumns = []string{"NAME", "RULE", "BACKEND", "ADDRESS", "AGE"} +var petSetColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"} var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"} var nodeColumns = []string{"NAME", "STATUS", "AGE"} var daemonSetColumns = []string{"NAME", "DESIRED", "CURRENT", "NODE-SELECTOR", "AGE"} @@ -414,11 +433,16 @@ var persistentVolumeColumns = []string{"NAME", "CAPACITY", "ACCESSMODES", "STATU var persistentVolumeClaimColumns = []string{"NAME", "STATUS", "VOLUME", "CAPACITY", "ACCESSMODES", "AGE"} var componentStatusColumns = []string{"NAME", "STATUS", "MESSAGE", "ERROR"} var thirdPartyResourceColumns = []string{"NAME", "DESCRIPTION", "VERSION(S)"} + +// TODO: consider having 'KIND' for third party resource data +var thirdPartyResourceDataColumns = []string{"NAME", "LABELS", "DATA"} var horizontalPodAutoscalerColumns = []string{"NAME", "REFERENCE", "TARGET", "CURRENT", "MINPODS", "MAXPODS", "AGE"} var withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too. var deploymentColumns = []string{"NAME", "DESIRED", "CURRENT", "UP-TO-DATE", "AVAILABLE", "AGE"} var configMapColumns = []string{"NAME", "DATA", "AGE"} var podSecurityPolicyColumns = []string{"NAME", "PRIV", "CAPS", "VOLUMEPLUGINS", "SELINUX", "RUNASUSER"} +var clusterColumns = []string{"NAME", "STATUS", "VERSION", "AGE"} +var networkPolicyColumns = []string{"NAME", "POD-SELECTOR", "AGE"} // addDefaultHandlers adds print handlers for default Kubernetes types. func (h *HumanReadablePrinter) addDefaultHandlers() { @@ -438,6 +462,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() { h.Handler(serviceColumns, printServiceList) h.Handler(ingressColumns, printIngress) h.Handler(ingressColumns, printIngressList) + h.Handler(petSetColumns, printPetSet) + h.Handler(petSetColumns, printPetSetList) h.Handler(endpointColumns, printEndpoints) h.Handler(endpointColumns, printEndpointsList) h.Handler(nodeColumns, printNode) @@ -470,6 +496,12 @@ func (h *HumanReadablePrinter) addDefaultHandlers() { h.Handler(configMapColumns, printConfigMapList) h.Handler(podSecurityPolicyColumns, printPodSecurityPolicy) h.Handler(podSecurityPolicyColumns, printPodSecurityPolicyList) + h.Handler(thirdPartyResourceDataColumns, printThirdPartyResourceData) + h.Handler(thirdPartyResourceDataColumns, printThirdPartyResourceDataList) + h.Handler(clusterColumns, printCluster) + h.Handler(clusterColumns, printClusterList) + h.Handler(networkPolicyColumns, printNetworkPolicy) + h.Handler(networkPolicyColumns, printNetworkPolicyList) } func (h *HumanReadablePrinter) unknown(data []byte, w io.Writer) error { @@ -567,22 +599,51 @@ func printPodBase(pod *api.Pod, w io.Writer, options PrintOptions) error { reason = pod.Status.Reason } - for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { - container := pod.Status.ContainerStatuses[i] - - restarts += container.RestartCount - if container.State.Waiting != nil && container.State.Waiting.Reason != "" { - reason = container.State.Waiting.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { - reason = container.State.Terminated.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" { - if container.State.Terminated.Signal != 0 { - reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) + initializing := false + for i := range pod.Status.InitContainerStatuses { + container := pod.Status.InitContainerStatuses[i] + switch { + case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0: + continue + case container.State.Terminated != nil: + // initialization is failed + if len(container.State.Terminated.Reason) == 0 { + if container.State.Terminated.Signal != 0 { + reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal) + } else { + reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode) + } } else { - reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) + reason = "Init:" + container.State.Terminated.Reason + } + initializing = true + case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing": + reason = "Init:" + container.State.Waiting.Reason + initializing = true + default: + reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers)) + initializing = true + } + break + } + if !initializing { + for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { + container := pod.Status.ContainerStatuses[i] + + restarts += int(container.RestartCount) + if container.State.Waiting != nil && container.State.Waiting.Reason != "" { + reason = container.State.Waiting.Reason + } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { + reason = container.State.Terminated.Reason + } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" { + if container.State.Terminated.Signal != 0 { + reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) + } else { + reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) + } + } else if container.Ready && container.State.Running != nil { + readyContainers++ } - } else if container.Ready && container.State.Running != nil { - readyContainers++ } } if pod.DeletionTimestamp != nil { @@ -607,7 +668,12 @@ func printPodBase(pod *api.Pod, w io.Writer, options PrintOptions) error { if options.Wide { nodeName := pod.Spec.NodeName - if _, err := fmt.Fprintf(w, "\t%s", + podIP := pod.Status.PodIP + if podIP == "" { + podIP = "" + } + if _, err := fmt.Fprintf(w, "\t%s\t%s", + podIP, nodeName, ); err != nil { return err @@ -770,7 +836,39 @@ func printReplicaSetList(list *extensions.ReplicaSetList, w io.Writer, options P return nil } -func printJob(job *extensions.Job, w io.Writer, options PrintOptions) error { +func printCluster(c *federation.Cluster, w io.Writer, options PrintOptions) error { + var statuses []string + for _, condition := range c.Status.Conditions { + if condition.Status == api.ConditionTrue { + statuses = append(statuses, string(condition.Type)) + } else { + statuses = append(statuses, "Not"+string(condition.Type)) + } + } + if len(statuses) == 0 { + statuses = append(statuses, "Unknown") + } + + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", + c.Name, + strings.Join(statuses, ","), + c.Status.Version, + translateTimestamp(c.CreationTimestamp), + ); err != nil { + return err + } + return nil +} +func printClusterList(list *federation.ClusterList, w io.Writer, options PrintOptions) error { + for _, rs := range list.Items { + if err := printCluster(&rs, w, options); err != nil { + return err + } + } + return nil +} + +func printJob(job *batch.Job, w io.Writer, options PrintOptions) error { name := job.Name namespace := job.Namespace containers := job.Spec.Template.Spec.Containers @@ -823,7 +921,7 @@ func printJob(job *extensions.Job, w io.Writer, options PrintOptions) error { return nil } -func printJobList(list *extensions.JobList, w io.Writer, options PrintOptions) error { +func printJobList(list *batch.JobList, w io.Writer, options PrintOptions) error { for _, job := range list.Items { if err := printJob(&job, w, options); err != nil { return err @@ -855,16 +953,19 @@ func getServiceExternalIP(svc *api.Service) string { if len(svc.Spec.ExternalIPs) > 0 { return strings.Join(svc.Spec.ExternalIPs, ",") } - return "nodes" + return "" case api.ServiceTypeLoadBalancer: lbIps := loadBalancerStatusStringer(svc.Status.LoadBalancer) if len(svc.Spec.ExternalIPs) > 0 { result := append(strings.Split(lbIps, ","), svc.Spec.ExternalIPs...) return strings.Join(result, ",") } - return lbIps + if len(lbIps) > 0 { + return lbIps + } + return "" } - return "unknown" + return "" } func makePortString(ports []api.ServicePort) string { @@ -994,6 +1095,53 @@ func printIngressList(ingressList *extensions.IngressList, w io.Writer, options return nil } +func printPetSet(ps *apps.PetSet, w io.Writer, options PrintOptions) error { + name := ps.Name + namespace := ps.Namespace + containers := ps.Spec.Template.Spec.Containers + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + desiredReplicas := ps.Spec.Replicas + currentReplicas := ps.Status.Replicas + if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s", + name, + desiredReplicas, + currentReplicas, + translateTimestamp(ps.CreationTimestamp), + ); err != nil { + return err + } + if options.Wide { + if err := layoutContainers(containers, w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "\t%s", unversioned.FormatLabelSelector(ps.Spec.Selector)); err != nil { + return err + } + } + if _, err := fmt.Fprint(w, appendLabels(ps.Labels, options.ColumnLabels)); err != nil { + return err + } + if _, err := fmt.Fprint(w, appendAllLabels(options.ShowLabels, ps.Labels)); err != nil { + return err + } + + return nil +} + +func printPetSetList(petSetList *apps.PetSetList, w io.Writer, options PrintOptions) error { + for _, ps := range petSetList.Items { + if err := printPetSet(&ps, w, options); err != nil { + return err + } + } + return nil +} + func printDaemonSet(ds *extensions.DaemonSet, w io.Writer, options PrintOptions) error { name := ds.Name namespace := ds.Namespace @@ -1452,7 +1600,7 @@ func printThirdPartyResource(rsrc *extensions.ThirdPartyResource, w io.Writer, o versions := make([]string, len(rsrc.Versions)) for ix := range rsrc.Versions { version := &rsrc.Versions[ix] - versions[ix] = fmt.Sprintf("%s/%s", version.APIGroup, version.Name) + versions[ix] = fmt.Sprintf("%s", version.Name) } versionsString := strings.Join(versions, ",") if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", rsrc.Name, rsrc.Description, versionsString); err != nil { @@ -1471,6 +1619,35 @@ func printThirdPartyResourceList(list *extensions.ThirdPartyResourceList, w io.W return nil } +func truncate(str string, maxLen int) string { + if len(str) > maxLen { + return str[0:maxLen] + "..." + } + return str +} + +func printThirdPartyResourceData(rsrc *extensions.ThirdPartyResourceData, w io.Writer, options PrintOptions) error { + l := labels.FormatLabels(rsrc.Labels) + truncateCols := 50 + if options.Wide { + truncateCols = 100 + } + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", rsrc.Name, l, truncate(string(rsrc.Data), truncateCols)); err != nil { + return err + } + return nil +} + +func printThirdPartyResourceDataList(list *extensions.ThirdPartyResourceDataList, w io.Writer, options PrintOptions) error { + for _, item := range list.Items { + if err := printThirdPartyResourceData(&item, w, options); err != nil { + return err + } + } + + return nil +} + func printDeployment(deployment *extensions.Deployment, w io.Writer, options PrintOptions) error { if options.WithNamespace { if _, err := fmt.Fprintf(w, "%s\t", deployment.Namespace); err != nil { @@ -1502,16 +1679,15 @@ func printDeploymentList(list *extensions.DeploymentList, w io.Writer, options P return nil } -func printHorizontalPodAutoscaler(hpa *extensions.HorizontalPodAutoscaler, w io.Writer, options PrintOptions) error { +func printHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, w io.Writer, options PrintOptions) error { namespace := hpa.Namespace name := hpa.Name - reference := fmt.Sprintf("%s/%s/%s", - hpa.Spec.ScaleRef.Kind, - hpa.Spec.ScaleRef.Name, - hpa.Spec.ScaleRef.Subresource) + reference := fmt.Sprintf("%s/%s", + hpa.Spec.ScaleTargetRef.Kind, + hpa.Spec.ScaleTargetRef.Name) target := "" - if hpa.Spec.CPUUtilization != nil { - target = fmt.Sprintf("%d%%", hpa.Spec.CPUUtilization.TargetPercentage) + if hpa.Spec.TargetCPUUtilizationPercentage != nil { + target = fmt.Sprintf("%d%%", *hpa.Spec.TargetCPUUtilizationPercentage) } current := "" if hpa.Status.CurrentCPUUtilizationPercentage != nil { @@ -1546,7 +1722,7 @@ func printHorizontalPodAutoscaler(hpa *extensions.HorizontalPodAutoscaler, w io. return err } -func printHorizontalPodAutoscalerList(list *extensions.HorizontalPodAutoscalerList, w io.Writer, options PrintOptions) error { +func printHorizontalPodAutoscalerList(list *autoscaling.HorizontalPodAutoscalerList, w io.Writer, options PrintOptions) error { for i := range list.Items { if err := printHorizontalPodAutoscaler(&list.Items[i], w, options); err != nil { return err @@ -1584,9 +1760,9 @@ func printConfigMapList(list *api.ConfigMapList, w io.Writer, options PrintOptio } func printPodSecurityPolicy(item *extensions.PodSecurityPolicy, w io.Writer, options PrintOptions) error { - _, err := fmt.Fprintf(w, "%s\t%t\t%v\t%t\t%s\t%s\n", item.Name, item.Spec.Privileged, - item.Spec.Capabilities, item.Spec.Volumes, item.Spec.SELinux.Rule, - item.Spec.RunAsUser.Rule) + _, err := fmt.Fprintf(w, "%s\t%t\t%v\t%s\t%s\t%s\t%s\t%t\t%v\n", item.Name, item.Spec.Privileged, + item.Spec.AllowedCapabilities, item.Spec.SELinux.Rule, + item.Spec.RunAsUser.Rule, item.Spec.FSGroup.Rule, item.Spec.SupplementalGroups.Rule, item.Spec.ReadOnlyRootFilesystem, item.Spec.Volumes) return err } @@ -1600,6 +1776,34 @@ func printPodSecurityPolicyList(list *extensions.PodSecurityPolicyList, w io.Wri return nil } +func printNetworkPolicy(networkPolicy *extensions.NetworkPolicy, w io.Writer, options PrintOptions) error { + name := networkPolicy.Name + namespace := networkPolicy.Namespace + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "%s\t%v\t%s", name, unversioned.FormatLabelSelector(&networkPolicy.Spec.PodSelector), translateTimestamp(networkPolicy.CreationTimestamp)); err != nil { + return err + } + if _, err := fmt.Fprint(w, appendLabels(networkPolicy.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, appendAllLabels(options.ShowLabels, networkPolicy.Labels)) + return err +} + +func printNetworkPolicyList(list *extensions.NetworkPolicyList, w io.Writer, options PrintOptions) error { + for i := range list.Items { + if err := printNetworkPolicy(&list.Items[i], w, options); err != nil { + return err + } + } + return nil +} + func appendLabels(itemLabels map[string]string, columnLabels []string) string { var buffer bytes.Buffer @@ -1634,10 +1838,7 @@ func appendAllLabels(showLabels bool, itemLabels map[string]string) string { func appendLabelTabs(columnLabels []string) string { var buffer bytes.Buffer - for i := range columnLabels { - // NB: This odd dance is to make the loop both compatible with go 1.3 and - // pass `gofmt -s` - _ = i + for range columnLabels { buffer.WriteString("\t") } buffer.WriteString("\n") @@ -1678,12 +1879,12 @@ func formatLabelHeaders(columnLabels []string) []string { func formatWideHeaders(wide bool, t reflect.Type) []string { if wide { if t.String() == "*api.Pod" || t.String() == "*api.PodList" { - return []string{"NODE"} + return []string{"IP", "NODE"} } if t.String() == "*api.ReplicationController" || t.String() == "*api.ReplicationControllerList" { return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} } - if t.String() == "*extensions.Job" || t.String() == "*extensions.JobList" { + if t.String() == "*batch.Job" || t.String() == "*batch.JobList" { return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} } if t.String() == "*api.Service" || t.String() == "*api.ServiceList" { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource_printer_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource_printer_test.go new file mode 100644 index 000000000000..e9e1f1cd74ab --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/resource_printer_test.go @@ -0,0 +1,1450 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strings" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + kubectltesting "k8s.io/kubernetes/pkg/kubectl/testing" + "k8s.io/kubernetes/pkg/runtime" + yamlserializer "k8s.io/kubernetes/pkg/runtime/serializer/yaml" + "k8s.io/kubernetes/pkg/util/diff" + "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/pkg/util/sets" + + "github.com/ghodss/yaml" +) + +func init() { + api.Scheme.AddKnownTypes(testapi.Default.InternalGroupVersion(), &kubectltesting.TestStruct{}) + api.Scheme.AddKnownTypes(*testapi.Default.GroupVersion(), &kubectltesting.TestStruct{}) +} + +var testData = kubectltesting.TestStruct{ + Key: "testValue", + Map: map[string]int{"TestSubkey": 1}, + StringList: []string{"a", "b", "c"}, + IntList: []int{1, 2, 3}, +} + +func TestVersionedPrinter(t *testing.T) { + original := &kubectltesting.TestStruct{Key: "value"} + p := NewVersionedPrinter( + ResourcePrinterFunc(func(obj runtime.Object, w io.Writer) error { + if obj == original { + t.Fatalf("object should not be identical: %#v", obj) + } + if obj.(*kubectltesting.TestStruct).Key != "value" { + t.Fatalf("object was not converted: %#v", obj) + } + return nil + }), + api.Scheme, + *testapi.Default.GroupVersion(), + ) + if err := p.PrintObj(original, nil); err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestPrintDefault(t *testing.T) { + printer, found, err := GetPrinter("", "") + if err != nil { + t.Fatalf("unexpected error: %#v", err) + } + if found { + t.Errorf("no printer should have been found: %#v / %v", printer, err) + } +} + +type TestPrintType struct { + Data string +} + +func (obj *TestPrintType) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } + +type TestUnknownType struct{} + +func (obj *TestUnknownType) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } + +func TestPrinter(t *testing.T) { + //test inputs + simpleTest := &TestPrintType{"foo"} + podTest := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + podListTest := &api.PodList{ + Items: []api.Pod{ + {ObjectMeta: api.ObjectMeta{Name: "foo"}}, + {ObjectMeta: api.ObjectMeta{Name: "bar"}}, + }, + } + emptyListTest := &api.PodList{} + testapi, err := api.Scheme.ConvertToVersion(podTest, *testapi.Default.GroupVersion()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + printerTests := []struct { + Name string + Format string + FormatArgument string + Input runtime.Object + Expect string + }{ + {"test json", "json", "", simpleTest, "{\n \"Data\": \"foo\"\n}\n"}, + {"test yaml", "yaml", "", simpleTest, "Data: foo\n"}, + {"test template", "template", "{{if .id}}{{.id}}{{end}}{{if .metadata.name}}{{.metadata.name}}{{end}}", + podTest, "foo"}, + {"test jsonpath", "jsonpath", "{.metadata.name}", podTest, "foo"}, + {"test jsonpath list", "jsonpath", "{.items[*].metadata.name}", podListTest, "foo bar"}, + {"test jsonpath empty list", "jsonpath", "{.items[*].metadata.name}", emptyListTest, ""}, + {"test name", "name", "", podTest, "pod/foo\n"}, + {"emits versioned objects", "template", "{{.kind}}", testapi, "Pod"}, + } + for _, test := range printerTests { + buf := bytes.NewBuffer([]byte{}) + printer, found, err := GetPrinter(test.Format, test.FormatArgument) + if err != nil || !found { + t.Errorf("in %s, unexpected error: %#v", test.Name, err) + } + if err := printer.PrintObj(test.Input, buf); err != nil { + t.Errorf("in %s, unexpected error: %#v", test.Name, err) + } + if buf.String() != test.Expect { + t.Errorf("in %s, expect %q, got %q", test.Name, test.Expect, buf.String()) + } + } + +} + +func TestBadPrinter(t *testing.T) { + badPrinterTests := []struct { + Name string + Format string + FormatArgument string + Error error + }{ + {"empty template", "template", "", fmt.Errorf("template format specified but no template given")}, + {"bad template", "template", "{{ .Name", fmt.Errorf("error parsing template {{ .Name, template: output:1: unclosed action\n")}, + {"bad templatefile", "templatefile", "", fmt.Errorf("templatefile format specified but no template file given")}, + {"bad jsonpath", "jsonpath", "{.Name", fmt.Errorf("error parsing jsonpath {.Name, unclosed action\n")}, + } + for _, test := range badPrinterTests { + _, _, err := GetPrinter(test.Format, test.FormatArgument) + if err == nil || err.Error() != test.Error.Error() { + t.Errorf("in %s, expect %s, got %s", test.Name, test.Error, err) + } + } +} + +func testPrinter(t *testing.T, printer ResourcePrinter, unmarshalFunc func(data []byte, v interface{}) error) { + buf := bytes.NewBuffer([]byte{}) + + err := printer.PrintObj(&testData, buf) + if err != nil { + t.Fatal(err) + } + var poutput kubectltesting.TestStruct + // Verify that given function runs without error. + err = unmarshalFunc(buf.Bytes(), &poutput) + if err != nil { + t.Fatal(err) + } + // Use real decode function to undo the versioning process. + poutput = kubectltesting.TestStruct{} + s := yamlserializer.NewDecodingSerializer(testapi.Default.Codec()) + if err := runtime.DecodeInto(s, buf.Bytes(), &poutput); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testData, poutput) { + t.Errorf("Test data and unmarshaled data are not equal: %v", diff.ObjectDiff(poutput, testData)) + } + + obj := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + } + buf.Reset() + printer.PrintObj(obj, buf) + var objOut api.Pod + // Verify that given function runs without error. + err = unmarshalFunc(buf.Bytes(), &objOut) + if err != nil { + t.Fatalf("unexpected error: %#v", err) + } + // Use real decode function to undo the versioning process. + objOut = api.Pod{} + if err := runtime.DecodeInto(s, buf.Bytes(), &objOut); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(obj, &objOut) { + t.Errorf("Unexpected inequality:\n%v", diff.ObjectDiff(obj, &objOut)) + } +} + +func TestYAMLPrinter(t *testing.T) { + testPrinter(t, &YAMLPrinter{}, yaml.Unmarshal) +} + +func TestJSONPrinter(t *testing.T) { + testPrinter(t, &JSONPrinter{}, json.Unmarshal) +} + +func PrintCustomType(obj *TestPrintType, w io.Writer, options PrintOptions) error { + _, err := fmt.Fprintf(w, "%s", obj.Data) + return err +} + +func ErrorPrintHandler(obj *TestPrintType, w io.Writer, options PrintOptions) error { + return fmt.Errorf("ErrorPrintHandler error") +} + +func TestCustomTypePrinting(t *testing.T) { + columns := []string{"Data"} + printer := NewHumanReadablePrinter(false, false, false, false, false, false, []string{}) + printer.Handler(columns, PrintCustomType) + + obj := TestPrintType{"test object"} + buffer := &bytes.Buffer{} + err := printer.PrintObj(&obj, buffer) + if err != nil { + t.Fatalf("An error occurred printing the custom type: %#v", err) + } + expectedOutput := "Data\ntest object" + if buffer.String() != expectedOutput { + t.Errorf("The data was not printed as expected. Expected:\n%s\nGot:\n%s", expectedOutput, buffer.String()) + } +} + +func TestPrintHandlerError(t *testing.T) { + columns := []string{"Data"} + printer := NewHumanReadablePrinter(false, false, false, false, false, false, []string{}) + printer.Handler(columns, ErrorPrintHandler) + obj := TestPrintType{"test object"} + buffer := &bytes.Buffer{} + err := printer.PrintObj(&obj, buffer) + if err == nil || err.Error() != "ErrorPrintHandler error" { + t.Errorf("Did not get the expected error: %#v", err) + } +} + +func TestUnknownTypePrinting(t *testing.T) { + printer := NewHumanReadablePrinter(false, false, false, false, false, false, []string{}) + buffer := &bytes.Buffer{} + err := printer.PrintObj(&TestUnknownType{}, buffer) + if err == nil { + t.Errorf("An error was expected from printing unknown type") + } +} + +func TestTemplatePanic(t *testing.T) { + tmpl := `{{and ((index .currentState.info "foo").state.running.startedAt) .currentState.info.net.state.running.startedAt}}` + printer, err := NewTemplatePrinter([]byte(tmpl)) + if err != nil { + t.Fatalf("tmpl fail: %v", err) + } + buffer := &bytes.Buffer{} + err = printer.PrintObj(&api.Pod{}, buffer) + if err == nil { + t.Fatalf("expected that template to crash") + } + if buffer.String() == "" { + t.Errorf("no debugging info was printed") + } +} + +func TestNamePrinter(t *testing.T) { + tests := map[string]struct { + obj runtime.Object + expect string + }{ + "singleObject": { + &api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + }, + "pod/foo\n"}, + "List": { + &v1.List{ + TypeMeta: unversioned.TypeMeta{ + Kind: "List", + }, + Items: []runtime.RawExtension{ + { + Raw: []byte(`{"kind": "Pod", "apiVersion": "v1", "metadata": { "name": "foo"}}`), + }, + { + Raw: []byte(`{"kind": "Pod", "apiVersion": "v1", "metadata": { "name": "bar"}}`), + }, + }, + }, + "pod/foo\npod/bar\n"}, + } + printer, _, _ := GetPrinter("name", "") + for name, item := range tests { + buff := &bytes.Buffer{} + err := printer.PrintObj(item.obj, buff) + if err != nil { + t.Errorf("%v: unexpected err: %v", name, err) + continue + } + got := buff.String() + if item.expect != got { + t.Errorf("%v: expected %v, got %v", name, item.expect, got) + } + } +} + +func TestTemplateStrings(t *testing.T) { + // This unit tests the "exists" function as well as the template from update.sh + table := map[string]struct { + pod api.Pod + expect string + }{ + "nilInfo": {api.Pod{}, "false"}, + "emptyInfo": {api.Pod{Status: api.PodStatus{ContainerStatuses: []api.ContainerStatus{}}}, "false"}, + "fooExists": { + api.Pod{ + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + { + Name: "foo", + }, + }, + }, + }, + "false", + }, + "barExists": { + api.Pod{ + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + { + Name: "bar", + }, + }, + }, + }, + "false", + }, + "bothExist": { + api.Pod{ + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + { + Name: "foo", + }, + { + Name: "bar", + }, + }, + }, + }, + "false", + }, + "barValid": { + api.Pod{ + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + { + Name: "foo", + }, + { + Name: "bar", + State: api.ContainerState{ + Running: &api.ContainerStateRunning{ + StartedAt: unversioned.Time{}, + }, + }, + }, + }, + }, + }, + "false", + }, + "bothValid": { + api.Pod{ + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + { + Name: "foo", + State: api.ContainerState{ + Running: &api.ContainerStateRunning{ + StartedAt: unversioned.Time{}, + }, + }, + }, + { + Name: "bar", + State: api.ContainerState{ + Running: &api.ContainerStateRunning{ + StartedAt: unversioned.Time{}, + }, + }, + }, + }, + }, + }, + "true", + }, + } + // The point of this test is to verify that the below template works. + tmpl := `{{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "foo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}` + p, err := NewTemplatePrinter([]byte(tmpl)) + if err != nil { + t.Fatalf("tmpl fail: %v", err) + } + + printer := NewVersionedPrinter(p, api.Scheme, *testapi.Default.GroupVersion()) + + for name, item := range table { + buffer := &bytes.Buffer{} + err = printer.PrintObj(&item.pod, buffer) + if err != nil { + t.Errorf("%v: unexpected err: %v", name, err) + continue + } + actual := buffer.String() + if len(actual) == 0 { + actual = "false" + } + if e := item.expect; e != actual { + t.Errorf("%v: expected %v, got %v", name, e, actual) + } + } +} + +func TestPrinters(t *testing.T) { + om := func(name string) api.ObjectMeta { return api.ObjectMeta{Name: name} } + templatePrinter, err := NewTemplatePrinter([]byte("{{.name}}")) + if err != nil { + t.Fatal(err) + } + templatePrinter2, err := NewTemplatePrinter([]byte("{{len .items}}")) + if err != nil { + t.Fatal(err) + } + jsonpathPrinter, err := NewJSONPathPrinter("{.metadata.name}") + if err != nil { + t.Fatal(err) + } + printers := map[string]ResourcePrinter{ + "humanReadable": NewHumanReadablePrinter(true, false, false, false, false, false, []string{}), + "humanReadableHeaders": NewHumanReadablePrinter(false, false, false, false, false, false, []string{}), + "json": &JSONPrinter{}, + "yaml": &YAMLPrinter{}, + "template": templatePrinter, + "template2": templatePrinter2, + "jsonpath": jsonpathPrinter, + "name": &NamePrinter{ + Typer: api.Scheme, + Decoder: api.Codecs.UniversalDecoder(), + }, + } + objects := map[string]runtime.Object{ + "pod": &api.Pod{ObjectMeta: om("pod")}, + "emptyPodList": &api.PodList{}, + "nonEmptyPodList": &api.PodList{Items: []api.Pod{{}}}, + "endpoints": &api.Endpoints{ + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}, {IP: "localhost"}}, + Ports: []api.EndpointPort{{Port: 8080}}, + }}}, + } + // map of printer name to set of objects it should fail on. + expectedErrors := map[string]sets.String{ + "template2": sets.NewString("pod", "emptyPodList", "endpoints"), + "jsonpath": sets.NewString("emptyPodList", "nonEmptyPodList", "endpoints"), + } + + for pName, p := range printers { + for oName, obj := range objects { + b := &bytes.Buffer{} + if err := p.PrintObj(obj, b); err != nil { + if set, found := expectedErrors[pName]; found && set.Has(oName) { + // expected error + continue + } + t.Errorf("printer '%v', object '%v'; error: '%v'", pName, oName, err) + } + } + } +} + +func TestPrintEventsResultSorted(t *testing.T) { + // Arrange + printer := NewHumanReadablePrinter(false /* noHeaders */, false, false, false, false, false, []string{}) + + obj := api.EventList{ + Items: []api.Event{ + { + Source: api.EventSource{Component: "kubelet"}, + Message: "Item 1", + FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), + LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), + Count: 1, + Type: api.EventTypeNormal, + }, + { + Source: api.EventSource{Component: "scheduler"}, + Message: "Item 2", + FirstTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), + LastTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), + Count: 1, + Type: api.EventTypeNormal, + }, + { + Source: api.EventSource{Component: "kubelet"}, + Message: "Item 3", + FirstTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), + LastTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), + Count: 1, + Type: api.EventTypeNormal, + }, + }, + } + buffer := &bytes.Buffer{} + + // Act + err := printer.PrintObj(&obj, buffer) + + // Assert + if err != nil { + t.Fatalf("An error occurred printing the EventList: %#v", err) + } + out := buffer.String() + VerifyDatesInOrder(out, "\n" /* rowDelimiter */, " " /* columnDelimiter */, t) +} + +func TestPrintNodeStatus(t *testing.T) { + printer := NewHumanReadablePrinter(false, false, false, false, false, false, []string{}) + table := []struct { + node api.Node + status string + }{ + { + node: api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo1"}, + Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}}}, + }, + status: "Ready", + }, + { + node: api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo2"}, + Spec: api.NodeSpec{Unschedulable: true}, + Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}}}, + }, + status: "Ready,SchedulingDisabled", + }, + { + node: api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo3"}, + Status: api.NodeStatus{Conditions: []api.NodeCondition{ + {Type: api.NodeReady, Status: api.ConditionTrue}, + {Type: api.NodeReady, Status: api.ConditionTrue}}}, + }, + status: "Ready", + }, + { + node: api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo4"}, + Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionFalse}}}, + }, + status: "NotReady", + }, + { + node: api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo5"}, + Spec: api.NodeSpec{Unschedulable: true}, + Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionFalse}}}, + }, + status: "NotReady,SchedulingDisabled", + }, + { + node: api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo6"}, + Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: "InvalidValue", Status: api.ConditionTrue}}}, + }, + status: "Unknown", + }, + { + node: api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo7"}, + Status: api.NodeStatus{Conditions: []api.NodeCondition{{}}}, + }, + status: "Unknown", + }, + { + node: api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo8"}, + Spec: api.NodeSpec{Unschedulable: true}, + Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: "InvalidValue", Status: api.ConditionTrue}}}, + }, + status: "Unknown,SchedulingDisabled", + }, + { + node: api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo9"}, + Spec: api.NodeSpec{Unschedulable: true}, + Status: api.NodeStatus{Conditions: []api.NodeCondition{{}}}, + }, + status: "Unknown,SchedulingDisabled", + }, + } + + for _, test := range table { + buffer := &bytes.Buffer{} + err := printer.PrintObj(&test.node, buffer) + if err != nil { + t.Fatalf("An error occurred printing Node: %#v", err) + } + if !contains(strings.Fields(buffer.String()), test.status) { + t.Fatalf("Expect printing node %s with status %#v, got: %#v", test.node.Name, test.status, buffer.String()) + } + } +} + +func contains(fields []string, field string) bool { + for _, v := range fields { + if v == field { + return true + } + } + return false +} + +func TestPrintHunmanReadableIngressWithColumnLabels(t *testing.T) { + ingress := extensions.Ingress{ + ObjectMeta: api.ObjectMeta{ + Name: "test1", + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + Labels: map[string]string{ + "app_name": "kubectl_test_ingress", + }, + }, + Spec: extensions.IngressSpec{ + Backend: &extensions.IngressBackend{ + ServiceName: "svc", + ServicePort: intstr.FromInt(93), + }, + }, + Status: extensions.IngressStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + { + IP: "2.3.4.5", + Hostname: "localhost.localdomain", + }, + }, + }, + }, + } + buff := bytes.Buffer{} + printIngress(&ingress, &buff, PrintOptions{false, false, false, false, false, false, []string{"app_name"}}) + output := string(buff.Bytes()) + appName := ingress.ObjectMeta.Labels["app_name"] + if !strings.Contains(output, appName) { + t.Errorf("expected to container app_name label value %s, but doesn't %s", appName, output) + } +} + +func TestPrintHumanReadableService(t *testing.T) { + tests := []api.Service{ + { + Spec: api.ServiceSpec{ + ClusterIP: "1.2.3.4", + Type: "LoadBalancer", + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "TCP", + }, + }, + }, + Status: api.ServiceStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + { + IP: "2.3.4.5", + }, + { + IP: "3.4.5.6", + }, + }, + }, + }, + }, + { + Spec: api.ServiceSpec{ + ClusterIP: "1.2.3.4", + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "TCP", + }, + { + Port: 8090, + Protocol: "UDP", + }, + { + Port: 8000, + Protocol: "TCP", + }, + }, + }, + }, + { + Spec: api.ServiceSpec{ + ClusterIP: "1.2.3.4", + Type: "LoadBalancer", + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "TCP", + }, + { + Port: 8090, + Protocol: "UDP", + }, + { + Port: 8000, + Protocol: "TCP", + }, + }, + }, + Status: api.ServiceStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + { + IP: "2.3.4.5", + }, + }, + }, + }, + }, + { + Spec: api.ServiceSpec{ + ClusterIP: "1.2.3.4", + Type: "LoadBalancer", + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "TCP", + }, + { + Port: 8090, + Protocol: "UDP", + }, + { + Port: 8000, + Protocol: "TCP", + }, + }, + }, + Status: api.ServiceStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + { + IP: "2.3.4.5", + }, + { + IP: "3.4.5.6", + }, + { + IP: "5.6.7.8", + Hostname: "host5678", + }, + }, + }, + }, + }, + } + + for _, svc := range tests { + buff := bytes.Buffer{} + printService(&svc, &buff, PrintOptions{false, false, false, false, false, false, []string{}}) + output := string(buff.Bytes()) + ip := svc.Spec.ClusterIP + if !strings.Contains(output, ip) { + t.Errorf("expected to contain ClusterIP %s, but doesn't: %s", ip, output) + } + + for _, ingress := range svc.Status.LoadBalancer.Ingress { + ip = ingress.IP + if !strings.Contains(output, ip) { + t.Errorf("expected to contain ingress ip %s, but doesn't: %s", ip, output) + } + } + + for _, port := range svc.Spec.Ports { + portSpec := fmt.Sprintf("%d/%s", port.Port, port.Protocol) + if !strings.Contains(output, portSpec) { + t.Errorf("expected to contain port: %s, but doesn't: %s", portSpec, output) + } + } + // Each service should print on one line + if 1 != strings.Count(output, "\n") { + t.Errorf("expected a single newline, found %d", strings.Count(output, "\n")) + } + } +} + +func TestPrintHumanReadableWithNamespace(t *testing.T) { + namespaceName := "testnamespace" + name := "test" + table := []struct { + obj runtime.Object + isNamespaced bool + }{ + { + obj: &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, + }, + isNamespaced: true, + }, + { + obj: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, + Spec: api.ReplicationControllerSpec{ + Replicas: 2, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "name": "foo", + "type": "production", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Image: "foo/bar", + TerminationMessagePath: api.TerminationMessagePathDefault, + ImagePullPolicy: api.PullIfNotPresent, + }, + }, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSDefault, + NodeSelector: map[string]string{ + "baz": "blah", + }, + }, + }, + }, + }, + isNamespaced: true, + }, + { + obj: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, + Spec: api.ServiceSpec{ + ClusterIP: "1.2.3.4", + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "TCP", + }, + }, + }, + Status: api.ServiceStatus{ + LoadBalancer: api.LoadBalancerStatus{ + Ingress: []api.LoadBalancerIngress{ + { + IP: "2.3.4.5", + }, + }, + }, + }, + }, + isNamespaced: true, + }, + { + obj: &api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}, {IP: "localhost"}}, + Ports: []api.EndpointPort{{Port: 8080}}, + }, + }}, + isNamespaced: true, + }, + { + obj: &api.Namespace{ + ObjectMeta: api.ObjectMeta{Name: name}, + }, + isNamespaced: false, + }, + { + obj: &api.Secret{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, + }, + isNamespaced: true, + }, + { + obj: &api.ServiceAccount{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, + Secrets: []api.ObjectReference{}, + }, + isNamespaced: true, + }, + { + obj: &api.Node{ + ObjectMeta: api.ObjectMeta{Name: name}, + Status: api.NodeStatus{}, + }, + isNamespaced: false, + }, + { + obj: &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, + Spec: api.PersistentVolumeSpec{}, + }, + isNamespaced: false, + }, + { + obj: &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, + Spec: api.PersistentVolumeClaimSpec{}, + }, + isNamespaced: true, + }, + { + obj: &api.Event{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, + Source: api.EventSource{Component: "kubelet"}, + Message: "Item 1", + FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), + LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), + Count: 1, + Type: api.EventTypeNormal, + }, + isNamespaced: true, + }, + { + obj: &api.LimitRange{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, + }, + isNamespaced: true, + }, + { + obj: &api.ResourceQuota{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, + }, + isNamespaced: true, + }, + { + obj: &api.ComponentStatus{ + Conditions: []api.ComponentCondition{ + {Type: api.ComponentHealthy, Status: api.ConditionTrue, Message: "ok", Error: ""}, + }, + }, + isNamespaced: false, + }, + } + + for _, test := range table { + if test.isNamespaced { + // Expect output to include namespace when requested. + printer := NewHumanReadablePrinter(false, true, false, false, false, false, []string{}) + buffer := &bytes.Buffer{} + err := printer.PrintObj(test.obj, buffer) + if err != nil { + t.Fatalf("An error occurred printing object: %#v", err) + } + matched := contains(strings.Fields(buffer.String()), fmt.Sprintf("%s", namespaceName)) + if !matched { + t.Errorf("Expect printing object to contain namespace: %#v", test.obj) + } + } else { + // Expect error when trying to get all namespaces for un-namespaced object. + printer := NewHumanReadablePrinter(false, true, false, false, false, false, []string{}) + buffer := &bytes.Buffer{} + err := printer.PrintObj(test.obj, buffer) + if err == nil { + t.Errorf("Expected error when printing un-namespaced type") + } + } + } +} + +func TestPrintPod(t *testing.T) { + tests := []struct { + pod api.Pod + expect string + }{ + { + // Test name, num of containers, restarts, container ready status + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "test1"}, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Phase: "podPhase", + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }, + "test1\t1/2\tpodPhase\t6\t", + }, + { + // Test container error overwrites pod phase + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "test2"}, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Phase: "podPhase", + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerWaitingReason"}}, RestartCount: 3}, + }, + }, + }, + "test2\t1/2\tContainerWaitingReason\t6\t", + }, + { + // Test the same as the above but with Terminated state and the first container overwrites the rest + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "test3"}, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Phase: "podPhase", + ContainerStatuses: []api.ContainerStatus{ + {State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerWaitingReason"}}, RestartCount: 3}, + {State: api.ContainerState{Terminated: &api.ContainerStateTerminated{Reason: "ContainerTerminatedReason"}}, RestartCount: 3}, + }, + }, + }, + "test3\t0/2\tContainerWaitingReason\t6\t", + }, + { + // Test ready is not enough for reporting running + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "test4"}, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Phase: "podPhase", + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {Ready: true, RestartCount: 3}, + }, + }, + }, + "test4\t1/2\tpodPhase\t6\t", + }, + { + // Test ready is not enough for reporting running + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "test5"}, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Reason: "OutOfDisk", + Phase: "podPhase", + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {Ready: true, RestartCount: 3}, + }, + }, + }, + "test5\t1/2\tOutOfDisk\t6\t", + }, + } + + buf := bytes.NewBuffer([]byte{}) + for _, test := range tests { + printPod(&test.pod, buf, PrintOptions{false, false, false, true, false, false, []string{}}) + // We ignore time + if !strings.HasPrefix(buf.String(), test.expect) { + t.Fatalf("Expected: %s, got: %s", test.expect, buf.String()) + } + buf.Reset() + } +} + +func TestPrintNonTerminatedPod(t *testing.T) { + tests := []struct { + pod api.Pod + expect string + }{ + { + // Test pod phase Running should be printed + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "test1"}, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Phase: api.PodRunning, + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }, + "test1\t1/2\tRunning\t6\t", + }, + { + // Test pod phase Pending should be printed + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "test2"}, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Phase: api.PodPending, + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }, + "test2\t1/2\tPending\t6\t", + }, + { + // Test pod phase Unknown should be printed + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "test3"}, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Phase: api.PodUnknown, + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }, + "test3\t1/2\tUnknown\t6\t", + }, + { + // Test pod phase Succeeded shouldn't be printed + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "test4"}, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Phase: api.PodSucceeded, + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }, + "", + }, + { + // Test pod phase Failed shouldn't be printed + api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "test5"}, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Phase: api.PodFailed, + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {Ready: true, RestartCount: 3}, + }, + }, + }, + "", + }, + } + + buf := bytes.NewBuffer([]byte{}) + for _, test := range tests { + printPod(&test.pod, buf, PrintOptions{false, false, false, false, false, false, []string{}}) + // We ignore time + if !strings.HasPrefix(buf.String(), test.expect) { + t.Fatalf("Expected: %s, got: %s", test.expect, buf.String()) + } + buf.Reset() + } +} + +func TestPrintPodWithLabels(t *testing.T) { + tests := []struct { + pod api.Pod + labelColumns []string + startsWith string + endsWith string + }{ + { + // Test name, num of containers, restarts, container ready status + api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "test1", + Labels: map[string]string{"col1": "asd", "COL2": "zxc"}, + }, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Phase: "podPhase", + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }, + []string{"col1", "COL2"}, + "test1\t1/2\tpodPhase\t6\t", + "\tasd\tzxc\n", + }, + { + // Test name, num of containers, restarts, container ready status + api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "test1", + Labels: map[string]string{"col1": "asd", "COL2": "zxc"}, + }, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Phase: "podPhase", + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }, + []string{}, + "test1\t1/2\tpodPhase\t6\t", + "\n", + }, + } + + buf := bytes.NewBuffer([]byte{}) + for _, test := range tests { + printPod(&test.pod, buf, PrintOptions{false, false, false, false, false, false, test.labelColumns}) + // We ignore time + if !strings.HasPrefix(buf.String(), test.startsWith) || !strings.HasSuffix(buf.String(), test.endsWith) { + t.Fatalf("Expected to start with: %s and end with: %s, but got: %s", test.startsWith, test.endsWith, buf.String()) + } + buf.Reset() + } +} + +type stringTestList []struct { + name, got, exp string +} + +func TestTranslateTimestamp(t *testing.T) { + tl := stringTestList{ + {"a while from now", translateTimestamp(unversioned.Time{Time: time.Now().Add(2.1e9)}), ""}, + {"almost now", translateTimestamp(unversioned.Time{Time: time.Now().Add(1.9e9)}), "0s"}, + {"now", translateTimestamp(unversioned.Time{Time: time.Now()}), "0s"}, + {"unknown", translateTimestamp(unversioned.Time{}), ""}, + {"30 seconds ago", translateTimestamp(unversioned.Time{Time: time.Now().Add(-3e10)}), "30s"}, + {"5 minutes ago", translateTimestamp(unversioned.Time{Time: time.Now().Add(-3e11)}), "5m"}, + {"an hour ago", translateTimestamp(unversioned.Time{Time: time.Now().Add(-6e12)}), "1h"}, + {"2 days ago", translateTimestamp(unversioned.Time{Time: time.Now().UTC().AddDate(0, 0, -2)}), "2d"}, + {"months ago", translateTimestamp(unversioned.Time{Time: time.Now().UTC().AddDate(0, 0, -90)}), "90d"}, + {"10 years ago", translateTimestamp(unversioned.Time{Time: time.Now().UTC().AddDate(-10, 0, 0)}), "10y"}, + } + for _, test := range tl { + if test.got != test.exp { + t.Errorf("On %v, expected '%v', but got '%v'", + test.name, test.exp, test.got) + } + } +} + +func TestPrintDeployment(t *testing.T) { + tests := []struct { + deployment extensions.Deployment + expect string + }{ + { + extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + Name: "test1", + CreationTimestamp: unversioned.Time{Time: time.Now().Add(1.9e9)}, + }, + Spec: extensions.DeploymentSpec{ + Replicas: 5, + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + }, + }, + Status: extensions.DeploymentStatus{ + Replicas: 10, + UpdatedReplicas: 2, + AvailableReplicas: 1, + UnavailableReplicas: 4, + }, + }, + "test1\t5\t10\t2\t1\t0s\n", + }, + } + + buf := bytes.NewBuffer([]byte{}) + for _, test := range tests { + printDeployment(&test.deployment, buf, PrintOptions{false, false, false, true, false, false, []string{}}) + if buf.String() != test.expect { + t.Fatalf("Expected: %s, got: %s", test.expect, buf.String()) + } + buf.Reset() + } +} + +func TestPrintDaemonSet(t *testing.T) { + tests := []struct { + ds extensions.DaemonSet + startsWith string + }{ + { + extensions.DaemonSet{ + ObjectMeta: api.ObjectMeta{ + Name: "test1", + CreationTimestamp: unversioned.Time{Time: time.Now().Add(1.9e9)}, + }, + Spec: extensions.DaemonSetSpec{ + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + }, + }, + Status: extensions.DaemonSetStatus{ + CurrentNumberScheduled: 2, + DesiredNumberScheduled: 3, + }, + }, + "test1\t3\t2\t\t0s\n", + }, + } + + buf := bytes.NewBuffer([]byte{}) + for _, test := range tests { + printDaemonSet(&test.ds, buf, PrintOptions{false, false, false, false, false, false, []string{}}) + if !strings.HasPrefix(buf.String(), test.startsWith) { + t.Fatalf("Expected to start with %s but got %s", test.startsWith, buf.String()) + } + buf.Reset() + } +} + +func TestPrintJob(t *testing.T) { + completions := int32(2) + tests := []struct { + job batch.Job + expect string + }{ + { + batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "job1", + CreationTimestamp: unversioned.Time{Time: time.Now().Add(1.9e9)}, + }, + Spec: batch.JobSpec{ + Completions: &completions, + }, + Status: batch.JobStatus{ + Succeeded: 1, + }, + }, + "job1\t2\t1\t0s\n", + }, + { + batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "job2", + CreationTimestamp: unversioned.Time{Time: time.Now().AddDate(-10, 0, 0)}, + }, + Spec: batch.JobSpec{ + Completions: nil, + }, + Status: batch.JobStatus{ + Succeeded: 0, + }, + }, + "job2\t\t0\t10y\n", + }, + } + + buf := bytes.NewBuffer([]byte{}) + for _, test := range tests { + printJob(&test.job, buf, PrintOptions{false, false, false, true, false, false, []string{}}) + if buf.String() != test.expect { + t.Fatalf("Expected: %s, got: %s", test.expect, buf.String()) + } + buf.Reset() + } +} + +func TestPrintPodShowLabels(t *testing.T) { + tests := []struct { + pod api.Pod + startsWith string + endsWith string + showLabels bool + }{ + { + // Test name, num of containers, restarts, container ready status + api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "test1", + Labels: map[string]string{"col1": "asd", "COL2": "zxc"}, + }, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Phase: "podPhase", + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }, + "test1\t1/2\tpodPhase\t6\t", + "\tCOL2=zxc,col1=asd\n", + true, + }, + { + // Test name, num of containers, restarts, container ready status + api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "test1", + Labels: map[string]string{"col3": "asd", "COL4": "zxc"}, + }, + Spec: api.PodSpec{Containers: make([]api.Container, 2)}, + Status: api.PodStatus{ + Phase: "podPhase", + ContainerStatuses: []api.ContainerStatus{ + {Ready: true, RestartCount: 3, State: api.ContainerState{Running: &api.ContainerStateRunning{}}}, + {RestartCount: 3}, + }, + }, + }, + "test1\t1/2\tpodPhase\t6\t", + "\n", + false, + }, + } + + buf := bytes.NewBuffer([]byte{}) + for _, test := range tests { + printPod(&test.pod, buf, PrintOptions{false, false, false, false, test.showLabels, false, []string{}}) + // We ignore time + if !strings.HasPrefix(buf.String(), test.startsWith) || !strings.HasSuffix(buf.String(), test.endsWith) { + t.Fatalf("Expected to start with: %s and end with: %s, but got: %s", test.startsWith, test.endsWith, buf.String()) + } + buf.Reset() + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rollback.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rollback.go index 9f53b25af6f3..2e4f92b3050f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rollback.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rollback.go @@ -49,6 +49,10 @@ type DeploymentRollbacker struct { } func (r *DeploymentRollbacker) Rollback(namespace, name string, updatedAnnotations map[string]string, toRevision int64, obj runtime.Object) (string, error) { + d := obj.(*extensions.Deployment) + if d.Spec.Paused { + return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume' and try again") + } deploymentRollback := &extensions.DeploymentRollback{ Name: name, UpdatedAnnotations: updatedAnnotations, diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go index e69d889a73f4..ea5bb887c378 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go @@ -83,6 +83,10 @@ type RollingUpdaterConfig struct { // further, ensuring that total number of pods running at any time during // the update is atmost 130% of desired pods. MaxSurge intstr.IntOrString + // OnProgress is invoked if set during each scale cycle, to allow the caller to perform additional logic or + // abort the scale. If an error is returned the cleanup method will not be invoked. The percentage value + // is a synthetic "progress" calculation that represents the approximate percentage completion. + OnProgress func(oldRc, newRc *api.ReplicationController, percentage int) error } // RollingUpdaterCleanupPolicy is a cleanup action to take after the @@ -114,7 +118,7 @@ type RollingUpdater struct { // cleanup performs post deployment cleanup tasks for newRc and oldRc. cleanup func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error // getReadyPods returns the amount of old and new ready pods. - getReadyPods func(oldRc, newRc *api.ReplicationController) (int, int, error) + getReadyPods func(oldRc, newRc *api.ReplicationController) (int32, int32, error) } // NewRollingUpdater creates a RollingUpdater from a client. @@ -169,11 +173,12 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { fmt.Fprintf(out, "Created %s\n", newRc.Name) } // Extract the desired replica count from the controller. - desired, err := strconv.Atoi(newRc.Annotations[desiredReplicasAnnotation]) + desiredAnnotation, err := strconv.Atoi(newRc.Annotations[desiredReplicasAnnotation]) if err != nil { return fmt.Errorf("Unable to parse annotation for %s: %s=%s", newRc.Name, desiredReplicasAnnotation, newRc.Annotations[desiredReplicasAnnotation]) } + desired := int32(desiredAnnotation) // Extract the original replica count from the old controller, adding the // annotation if it doesn't yet exist. _, hasOriginalAnnotation := oldRc.Annotations[originalReplicasAnnotation] @@ -185,7 +190,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { if existing.Annotations == nil { existing.Annotations = map[string]string{} } - existing.Annotations[originalReplicasAnnotation] = strconv.Itoa(existing.Spec.Replicas) + existing.Annotations[originalReplicasAnnotation] = strconv.Itoa(int(existing.Spec.Replicas)) updated, err := r.c.ReplicationControllers(existing.Namespace).Update(existing) if err != nil { return err @@ -204,7 +209,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { } // The minumum pods which must remain available througout the update // calculated for internal convenience. - minAvailable := integer.IntMax(0, desired-maxUnavailable) + minAvailable := int32(integer.IntMax(0, int(desired-maxUnavailable))) // If the desired new scale is 0, then the max unavailable is necessarily // the effective scale of the old RC regardless of the configuration // (equivalent to 100% maxUnavailable). @@ -216,6 +221,26 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { fmt.Fprintf(out, "Scaling up %s from %d to %d, scaling down %s from %d to 0 (keep %d pods available, don't exceed %d pods)\n", newRc.Name, newRc.Spec.Replicas, desired, oldRc.Name, oldRc.Spec.Replicas, minAvailable, desired+maxSurge) + // give a caller incremental notification and allow them to exit early + goal := desired - newRc.Spec.Replicas + if goal < 0 { + goal = -goal + } + progress := func(complete bool) error { + if config.OnProgress == nil { + return nil + } + progress := desired - newRc.Spec.Replicas + if progress < 0 { + progress = -progress + } + percentage := 100 + if !complete && goal > 0 { + percentage = int((goal - progress) * 100 / goal) + } + return config.OnProgress(oldRc, newRc, percentage) + } + // Scale newRc and oldRc until newRc has the desired number of replicas and // oldRc has 0 replicas. progressDeadline := time.Now().UnixNano() + config.Timeout.Nanoseconds() @@ -231,6 +256,11 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { } newRc = scaledRc + // notify the caller if necessary + if err := progress(false); err != nil { + return err + } + // Wait between scaling operations for things to settle. time.Sleep(config.UpdatePeriod) @@ -241,6 +271,11 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { } oldRc = scaledRc + // notify the caller if necessary + if err := progress(false); err != nil { + return err + } + // If we are making progress, continue to advance the progress deadline. // Otherwise, time out with an error. progressMade := (newRc.Spec.Replicas != newReplicas) || (oldRc.Spec.Replicas != oldReplicas) @@ -251,6 +286,11 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { } } + // notify the caller if necessary + if err := progress(true); err != nil { + return err + } + // Housekeeping and cleanup policy execution. return r.cleanup(oldRc, newRc, config) } @@ -258,7 +298,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { // scaleUp scales up newRc to desired by whatever increment is possible given // the configured surge threshold. scaleUp will safely no-op as necessary when // it detects redundancy or other relevant conditions. -func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desired, maxSurge, maxUnavailable int, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*api.ReplicationController, error) { +func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desired, maxSurge, maxUnavailable int32, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*api.ReplicationController, error) { // If we're already at the desired, do nothing. if newRc.Spec.Replicas == desired { return newRc, nil @@ -291,7 +331,7 @@ func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desire // scaleDown scales down oldRc to 0 at whatever decrement possible given the // thresholds defined on the config. scaleDown will safely no-op as necessary // when it detects redundancy or other relevant conditions. -func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int, config *RollingUpdaterConfig) (*api.ReplicationController, error) { +func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int32, config *RollingUpdaterConfig) (*api.ReplicationController, error) { // Already scaled down; do nothing. if oldRc.Spec.Replicas == 0 { return oldRc, nil @@ -356,10 +396,10 @@ func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, r // readyPods returns the old and new ready counts for their pods. // If a pod is observed as being ready, it's considered ready even // if it later becomes notReady. -func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController) (int, int, error) { +func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController) (int32, int32, error) { controllers := []*api.ReplicationController{oldRc, newRc} - oldReady := 0 - newReady := 0 + oldReady := int32(0) + newReady := int32(0) for i := range controllers { controller := controllers[i] @@ -504,19 +544,28 @@ func LoadExistingNextReplicationController(c client.ReplicationControllersNamesp return newRc, err } -func CreateNewControllerFromCurrentController(c client.Interface, codec runtime.Codec, namespace, oldName, newName, image, container, deploymentKey string) (*api.ReplicationController, error) { +type NewControllerConfig struct { + Namespace string + OldName, NewName string + Image string + Container string + DeploymentKey string + PullPolicy api.PullPolicy +} + +func CreateNewControllerFromCurrentController(c client.Interface, codec runtime.Codec, cfg *NewControllerConfig) (*api.ReplicationController, error) { containerIndex := 0 // load the old RC into the "new" RC - newRc, err := c.ReplicationControllers(namespace).Get(oldName) + newRc, err := c.ReplicationControllers(cfg.Namespace).Get(cfg.OldName) if err != nil { return nil, err } - if len(container) != 0 { + if len(cfg.Container) != 0 { containerFound := false for i, c := range newRc.Spec.Template.Spec.Containers { - if c.Name == container { + if c.Name == cfg.Container { containerIndex = i containerFound = true break @@ -524,31 +573,34 @@ func CreateNewControllerFromCurrentController(c client.Interface, codec runtime. } if !containerFound { - return nil, fmt.Errorf("container %s not found in pod", container) + return nil, fmt.Errorf("container %s not found in pod", cfg.Container) } } - if len(newRc.Spec.Template.Spec.Containers) > 1 && len(container) == 0 { + if len(newRc.Spec.Template.Spec.Containers) > 1 && len(cfg.Container) == 0 { return nil, goerrors.New("Must specify container to update when updating a multi-container pod") } if len(newRc.Spec.Template.Spec.Containers) == 0 { return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc)) } - newRc.Spec.Template.Spec.Containers[containerIndex].Image = image + newRc.Spec.Template.Spec.Containers[containerIndex].Image = cfg.Image + if len(cfg.PullPolicy) != 0 { + newRc.Spec.Template.Spec.Containers[containerIndex].ImagePullPolicy = cfg.PullPolicy + } newHash, err := api.HashObject(newRc, codec) if err != nil { return nil, err } - if len(newName) == 0 { - newName = fmt.Sprintf("%s-%s", newRc.Name, newHash) + if len(cfg.NewName) == 0 { + cfg.NewName = fmt.Sprintf("%s-%s", newRc.Name, newHash) } - newRc.Name = newName + newRc.Name = cfg.NewName - newRc.Spec.Selector[deploymentKey] = newHash - newRc.Spec.Template.Labels[deploymentKey] = newHash + newRc.Spec.Selector[cfg.DeploymentKey] = newHash + newRc.Spec.Template.Labels[cfg.DeploymentKey] = newHash // Clear resource version after hashing so that identical updates get different hashes. newRc.ResourceVersion = "" return newRc, nil diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rolling_updater_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rolling_updater_test.go new file mode 100644 index 000000000000..d55dd7dbb4b2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rolling_updater_test.go @@ -0,0 +1,1664 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/client/restclient" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/fake" + "k8s.io/kubernetes/pkg/client/unversioned/testclient" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/pkg/util/sets" +) + +func oldRc(replicas int, original int) *api.ReplicationController { + return &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo-v1", + UID: "7764ae47-9092-11e4-8393-42010af018ff", + Annotations: map[string]string{ + originalReplicasAnnotation: fmt.Sprintf("%d", original), + }, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: int32(replicas), + Selector: map[string]string{"version": "v1"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Name: "foo-v1", + Labels: map[string]string{"version": "v1"}, + }, + }, + }, + Status: api.ReplicationControllerStatus{ + Replicas: int32(replicas), + }, + } +} + +func newRc(replicas int, desired int) *api.ReplicationController { + rc := oldRc(replicas, replicas) + rc.Spec.Template = &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Name: "foo-v2", + Labels: map[string]string{"version": "v2"}, + }, + } + rc.Spec.Selector = map[string]string{"version": "v2"} + rc.ObjectMeta = api.ObjectMeta{ + Name: "foo-v2", + Annotations: map[string]string{ + desiredReplicasAnnotation: fmt.Sprintf("%d", desired), + sourceIdAnnotation: "foo-v1:7764ae47-9092-11e4-8393-42010af018ff", + }, + } + return rc +} + +// TestUpdate performs complex scenario testing for rolling updates. It +// provides fine grained control over the states for each update interval to +// allow the expression of as many edge cases as possible. +func TestUpdate(t *testing.T) { + // up represents a simulated scale up event and expectation + type up struct { + // to is the expected replica count for a scale-up + to int + } + // down represents a simulated scale down event and expectation + type down struct { + // oldReady is the number of oldRc replicas which will be seen + // as ready during the scale down attempt + oldReady int + // newReady is the number of newRc replicas which will be seen + // as ready during the scale up attempt + newReady int + // to is the expected replica count for the scale down + to int + // noop and to are mutually exclusive; if noop is true, that means for + // this down event, no scaling attempt should be made (for example, if + // by scaling down, the readiness minimum would be crossed.) + noop bool + } + + tests := []struct { + name string + // oldRc is the "from" deployment + oldRc *api.ReplicationController + // newRc is the "to" deployment + newRc *api.ReplicationController + // whether newRc existed (false means it was created) + newRcExists bool + maxUnavail intstr.IntOrString + maxSurge intstr.IntOrString + // expected is the sequence of up/down events that will be simulated and + // verified + expected []interface{} + // output is the expected textual output written + output string + }{ + { + name: "10->10 30/0 fast readiness", + oldRc: oldRc(10, 10), + newRc: newRc(0, 10), + newRcExists: false, + maxUnavail: intstr.FromString("30%"), + maxSurge: intstr.FromString("0%"), + expected: []interface{}{ + down{oldReady: 10, newReady: 0, to: 7}, + up{3}, + down{oldReady: 7, newReady: 3, to: 4}, + up{6}, + down{oldReady: 4, newReady: 6, to: 1}, + up{9}, + down{oldReady: 1, newReady: 9, to: 0}, + up{10}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 7 pods available, don't exceed 10 pods) +Scaling foo-v1 down to 7 +Scaling foo-v2 up to 3 +Scaling foo-v1 down to 4 +Scaling foo-v2 up to 6 +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 9 +Scaling foo-v1 down to 0 +Scaling foo-v2 up to 10 +`, + }, + { + name: "10->10 30/0 delayed readiness", + oldRc: oldRc(10, 10), + newRc: newRc(0, 10), + newRcExists: false, + maxUnavail: intstr.FromString("30%"), + maxSurge: intstr.FromString("0%"), + expected: []interface{}{ + down{oldReady: 10, newReady: 0, to: 7}, + up{3}, + down{oldReady: 7, newReady: 0, noop: true}, + down{oldReady: 7, newReady: 1, to: 6}, + up{4}, + down{oldReady: 6, newReady: 4, to: 3}, + up{7}, + down{oldReady: 3, newReady: 7, to: 0}, + up{10}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 7 pods available, don't exceed 10 pods) +Scaling foo-v1 down to 7 +Scaling foo-v2 up to 3 +Scaling foo-v1 down to 6 +Scaling foo-v2 up to 4 +Scaling foo-v1 down to 3 +Scaling foo-v2 up to 7 +Scaling foo-v1 down to 0 +Scaling foo-v2 up to 10 +`, + }, { + name: "10->10 30/0 fast readiness, continuation", + oldRc: oldRc(7, 10), + newRc: newRc(3, 10), + newRcExists: false, + maxUnavail: intstr.FromString("30%"), + maxSurge: intstr.FromString("0%"), + expected: []interface{}{ + down{oldReady: 7, newReady: 3, to: 4}, + up{6}, + down{oldReady: 4, newReady: 6, to: 1}, + up{9}, + down{oldReady: 1, newReady: 9, to: 0}, + up{10}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 3 to 10, scaling down foo-v1 from 7 to 0 (keep 7 pods available, don't exceed 10 pods) +Scaling foo-v1 down to 4 +Scaling foo-v2 up to 6 +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 9 +Scaling foo-v1 down to 0 +Scaling foo-v2 up to 10 +`, + }, { + name: "10->10 30/0 fast readiness, continued after restart which prevented first scale-up", + oldRc: oldRc(7, 10), + newRc: newRc(0, 10), + newRcExists: false, + maxUnavail: intstr.FromString("30%"), + maxSurge: intstr.FromString("0%"), + expected: []interface{}{ + down{oldReady: 7, newReady: 0, noop: true}, + up{3}, + down{oldReady: 7, newReady: 3, to: 4}, + up{6}, + down{oldReady: 4, newReady: 6, to: 1}, + up{9}, + down{oldReady: 1, newReady: 9, to: 0}, + up{10}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 7 to 0 (keep 7 pods available, don't exceed 10 pods) +Scaling foo-v2 up to 3 +Scaling foo-v1 down to 4 +Scaling foo-v2 up to 6 +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 9 +Scaling foo-v1 down to 0 +Scaling foo-v2 up to 10 +`, + }, { + name: "10->10 0/30 fast readiness", + oldRc: oldRc(10, 10), + newRc: newRc(0, 10), + newRcExists: false, + maxUnavail: intstr.FromString("0%"), + maxSurge: intstr.FromString("30%"), + expected: []interface{}{ + up{3}, + down{oldReady: 10, newReady: 3, to: 7}, + up{6}, + down{oldReady: 7, newReady: 6, to: 4}, + up{9}, + down{oldReady: 4, newReady: 9, to: 1}, + up{10}, + down{oldReady: 1, newReady: 10, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 13 pods) +Scaling foo-v2 up to 3 +Scaling foo-v1 down to 7 +Scaling foo-v2 up to 6 +Scaling foo-v1 down to 4 +Scaling foo-v2 up to 9 +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 10 +Scaling foo-v1 down to 0 +`, + }, { + name: "10->10 0/30 delayed readiness", + oldRc: oldRc(10, 10), + newRc: newRc(0, 10), + newRcExists: false, + maxUnavail: intstr.FromString("0%"), + maxSurge: intstr.FromString("30%"), + expected: []interface{}{ + up{3}, + down{oldReady: 10, newReady: 0, noop: true}, + down{oldReady: 10, newReady: 1, to: 9}, + up{4}, + down{oldReady: 9, newReady: 3, to: 7}, + up{6}, + down{oldReady: 7, newReady: 6, to: 4}, + up{9}, + down{oldReady: 4, newReady: 9, to: 1}, + up{10}, + down{oldReady: 1, newReady: 9, noop: true}, + down{oldReady: 1, newReady: 10, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 13 pods) +Scaling foo-v2 up to 3 +Scaling foo-v1 down to 9 +Scaling foo-v2 up to 4 +Scaling foo-v1 down to 7 +Scaling foo-v2 up to 6 +Scaling foo-v1 down to 4 +Scaling foo-v2 up to 9 +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 10 +Scaling foo-v1 down to 0 +`, + }, { + name: "10->10 10/20 fast readiness", + oldRc: oldRc(10, 10), + newRc: newRc(0, 10), + newRcExists: false, + maxUnavail: intstr.FromString("10%"), + maxSurge: intstr.FromString("20%"), + expected: []interface{}{ + up{2}, + down{oldReady: 10, newReady: 2, to: 7}, + up{5}, + down{oldReady: 7, newReady: 5, to: 4}, + up{8}, + down{oldReady: 4, newReady: 8, to: 1}, + up{10}, + down{oldReady: 1, newReady: 10, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 9 pods available, don't exceed 12 pods) +Scaling foo-v2 up to 2 +Scaling foo-v1 down to 7 +Scaling foo-v2 up to 5 +Scaling foo-v1 down to 4 +Scaling foo-v2 up to 8 +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 10 +Scaling foo-v1 down to 0 +`, + }, { + name: "10->10 10/20 delayed readiness", + oldRc: oldRc(10, 10), + newRc: newRc(0, 10), + newRcExists: false, + maxUnavail: intstr.FromString("10%"), + maxSurge: intstr.FromString("20%"), + expected: []interface{}{ + up{2}, + down{oldReady: 10, newReady: 2, to: 7}, + up{5}, + down{oldReady: 7, newReady: 4, to: 5}, + up{7}, + down{oldReady: 5, newReady: 4, noop: true}, + down{oldReady: 5, newReady: 7, to: 2}, + up{10}, + down{oldReady: 2, newReady: 9, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 9 pods available, don't exceed 12 pods) +Scaling foo-v2 up to 2 +Scaling foo-v1 down to 7 +Scaling foo-v2 up to 5 +Scaling foo-v1 down to 5 +Scaling foo-v2 up to 7 +Scaling foo-v1 down to 2 +Scaling foo-v2 up to 10 +Scaling foo-v1 down to 0 +`, + }, { + name: "10->10 10/20 fast readiness continued after restart which prevented first scale-down", + oldRc: oldRc(10, 10), + newRc: newRc(2, 10), + newRcExists: false, + maxUnavail: intstr.FromString("10%"), + maxSurge: intstr.FromString("20%"), + expected: []interface{}{ + down{oldReady: 10, newReady: 2, to: 7}, + up{5}, + down{oldReady: 7, newReady: 5, to: 4}, + up{8}, + down{oldReady: 4, newReady: 8, to: 1}, + up{10}, + down{oldReady: 1, newReady: 10, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 2 to 10, scaling down foo-v1 from 10 to 0 (keep 9 pods available, don't exceed 12 pods) +Scaling foo-v1 down to 7 +Scaling foo-v2 up to 5 +Scaling foo-v1 down to 4 +Scaling foo-v2 up to 8 +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 10 +Scaling foo-v1 down to 0 +`, + }, { + name: "10->10 0/100 fast readiness", + oldRc: oldRc(10, 10), + newRc: newRc(0, 10), + newRcExists: false, + maxUnavail: intstr.FromString("0%"), + maxSurge: intstr.FromString("100%"), + expected: []interface{}{ + up{10}, + down{oldReady: 10, newReady: 10, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 20 pods) +Scaling foo-v2 up to 10 +Scaling foo-v1 down to 0 +`, + }, { + name: "10->10 0/100 delayed readiness", + oldRc: oldRc(10, 10), + newRc: newRc(0, 10), + newRcExists: false, + maxUnavail: intstr.FromString("0%"), + maxSurge: intstr.FromString("100%"), + expected: []interface{}{ + up{10}, + down{oldReady: 10, newReady: 0, noop: true}, + down{oldReady: 10, newReady: 2, to: 8}, + down{oldReady: 8, newReady: 7, to: 3}, + down{oldReady: 3, newReady: 10, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 20 pods) +Scaling foo-v2 up to 10 +Scaling foo-v1 down to 8 +Scaling foo-v1 down to 3 +Scaling foo-v1 down to 0 +`, + }, { + name: "10->10 100/0 fast readiness", + oldRc: oldRc(10, 10), + newRc: newRc(0, 10), + newRcExists: false, + maxUnavail: intstr.FromString("100%"), + maxSurge: intstr.FromString("0%"), + expected: []interface{}{ + down{oldReady: 10, newReady: 0, to: 0}, + up{10}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 0 pods available, don't exceed 10 pods) +Scaling foo-v1 down to 0 +Scaling foo-v2 up to 10 +`, + }, { + name: "1->1 25/25 maintain minimum availability", + oldRc: oldRc(1, 1), + newRc: newRc(0, 1), + newRcExists: false, + maxUnavail: intstr.FromString("25%"), + maxSurge: intstr.FromString("25%"), + expected: []interface{}{ + up{1}, + down{oldReady: 1, newReady: 0, noop: true}, + down{oldReady: 1, newReady: 1, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods) +Scaling foo-v2 up to 1 +Scaling foo-v1 down to 0 +`, + }, { + name: "1->1 0/10 delayed readiness", + oldRc: oldRc(1, 1), + newRc: newRc(0, 1), + newRcExists: false, + maxUnavail: intstr.FromString("0%"), + maxSurge: intstr.FromString("10%"), + expected: []interface{}{ + up{1}, + down{oldReady: 1, newReady: 0, noop: true}, + down{oldReady: 1, newReady: 1, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods) +Scaling foo-v2 up to 1 +Scaling foo-v1 down to 0 +`, + }, { + name: "1->1 10/10 delayed readiness", + oldRc: oldRc(1, 1), + newRc: newRc(0, 1), + newRcExists: false, + maxUnavail: intstr.FromString("10%"), + maxSurge: intstr.FromString("10%"), + expected: []interface{}{ + up{1}, + down{oldReady: 1, newReady: 0, noop: true}, + down{oldReady: 1, newReady: 1, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods) +Scaling foo-v2 up to 1 +Scaling foo-v1 down to 0 +`, + }, { + name: "3->3 1/1 fast readiness (absolute values)", + oldRc: oldRc(3, 3), + newRc: newRc(0, 3), + newRcExists: false, + maxUnavail: intstr.FromInt(0), + maxSurge: intstr.FromInt(1), + expected: []interface{}{ + up{1}, + down{oldReady: 3, newReady: 1, to: 2}, + up{2}, + down{oldReady: 2, newReady: 2, to: 1}, + up{3}, + down{oldReady: 1, newReady: 3, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 3, scaling down foo-v1 from 3 to 0 (keep 3 pods available, don't exceed 4 pods) +Scaling foo-v2 up to 1 +Scaling foo-v1 down to 2 +Scaling foo-v2 up to 2 +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 3 +Scaling foo-v1 down to 0 +`, + }, { + name: "10->10 0/20 fast readiness, continued after restart which resulted in partial first scale-up", + oldRc: oldRc(6, 10), + newRc: newRc(5, 10), + newRcExists: false, + maxUnavail: intstr.FromString("0%"), + maxSurge: intstr.FromString("20%"), + expected: []interface{}{ + up{6}, + down{oldReady: 6, newReady: 6, to: 4}, + up{8}, + down{oldReady: 4, newReady: 8, to: 2}, + up{10}, + down{oldReady: 1, newReady: 10, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 5 to 10, scaling down foo-v1 from 6 to 0 (keep 10 pods available, don't exceed 12 pods) +Scaling foo-v2 up to 6 +Scaling foo-v1 down to 4 +Scaling foo-v2 up to 8 +Scaling foo-v1 down to 2 +Scaling foo-v2 up to 10 +Scaling foo-v1 down to 0 +`, + }, { + name: "10->20 0/300 fast readiness", + oldRc: oldRc(10, 10), + newRc: newRc(0, 20), + newRcExists: false, + maxUnavail: intstr.FromString("0%"), + maxSurge: intstr.FromString("300%"), + expected: []interface{}{ + up{20}, + down{oldReady: 10, newReady: 20, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 20, scaling down foo-v1 from 10 to 0 (keep 20 pods available, don't exceed 80 pods) +Scaling foo-v2 up to 20 +Scaling foo-v1 down to 0 +`, + }, { + name: "1->1 0/1 scale down unavailable rc to a ready rc (rollback)", + oldRc: oldRc(1, 1), + newRc: newRc(1, 1), + newRcExists: true, + maxUnavail: intstr.FromInt(0), + maxSurge: intstr.FromInt(1), + expected: []interface{}{ + up{1}, + down{oldReady: 0, newReady: 1, to: 0}, + }, + output: `Continuing update with existing controller foo-v2. +Scaling up foo-v2 from 1 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods) +Scaling foo-v1 down to 0 +`, + }, + { + name: "3->0 1/1 desired 0 (absolute values)", + oldRc: oldRc(3, 3), + newRc: newRc(0, 0), + newRcExists: true, + maxUnavail: intstr.FromInt(1), + maxSurge: intstr.FromInt(1), + expected: []interface{}{ + down{oldReady: 3, newReady: 0, to: 0}, + }, + output: `Continuing update with existing controller foo-v2. +Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 1 pods) +Scaling foo-v1 down to 0 +`, + }, + { + name: "3->0 10/10 desired 0 (percentages)", + oldRc: oldRc(3, 3), + newRc: newRc(0, 0), + newRcExists: true, + maxUnavail: intstr.FromString("10%"), + maxSurge: intstr.FromString("10%"), + expected: []interface{}{ + down{oldReady: 3, newReady: 0, to: 0}, + }, + output: `Continuing update with existing controller foo-v2. +Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 0 pods) +Scaling foo-v1 down to 0 +`, + }, + { + name: "3->0 10/10 desired 0 (create new RC)", + oldRc: oldRc(3, 3), + newRc: newRc(0, 0), + newRcExists: false, + maxUnavail: intstr.FromString("10%"), + maxSurge: intstr.FromString("10%"), + expected: []interface{}{ + down{oldReady: 3, newReady: 0, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 0 pods) +Scaling foo-v1 down to 0 +`, + }, + { + name: "0->0 1/1 desired 0 (absolute values)", + oldRc: oldRc(0, 0), + newRc: newRc(0, 0), + newRcExists: true, + maxUnavail: intstr.FromInt(1), + maxSurge: intstr.FromInt(1), + expected: []interface{}{ + down{oldReady: 0, newReady: 0, to: 0}, + }, + output: `Continuing update with existing controller foo-v2. +Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 0 to 0 (keep 0 pods available, don't exceed 1 pods) +`, + }, { + name: "30->2 50%/0", + oldRc: oldRc(30, 30), + newRc: newRc(0, 2), + newRcExists: false, + maxUnavail: intstr.FromString("50%"), + maxSurge: intstr.FromInt(0), + expected: []interface{}{ + down{oldReady: 30, newReady: 0, to: 1}, + up{1}, + down{oldReady: 1, newReady: 2, to: 0}, + up{2}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 30 to 0 (keep 1 pods available, don't exceed 2 pods) +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 1 +Scaling foo-v1 down to 0 +Scaling foo-v2 up to 2 +`, + }, + { + name: "2->2 1/0 blocked oldRc", + oldRc: oldRc(2, 2), + newRc: newRc(0, 2), + newRcExists: false, + maxUnavail: intstr.FromInt(1), + maxSurge: intstr.FromInt(0), + expected: []interface{}{ + down{oldReady: 1, newReady: 0, to: 1}, + up{1}, + down{oldReady: 1, newReady: 1, to: 0}, + up{2}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 1 pods available, don't exceed 2 pods) +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 1 +Scaling foo-v1 down to 0 +Scaling foo-v2 up to 2 +`, + }, + { + name: "1->1 1/0 allow maxUnavailability", + oldRc: oldRc(1, 1), + newRc: newRc(0, 1), + newRcExists: false, + maxUnavail: intstr.FromString("1%"), + maxSurge: intstr.FromInt(0), + expected: []interface{}{ + down{oldReady: 1, newReady: 0, to: 0}, + up{1}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 0 pods available, don't exceed 1 pods) +Scaling foo-v1 down to 0 +Scaling foo-v2 up to 1 +`, + }, + { + name: "1->2 25/25 complex asymetric deployment", + oldRc: oldRc(1, 1), + newRc: newRc(0, 2), + newRcExists: false, + maxUnavail: intstr.FromString("25%"), + maxSurge: intstr.FromString("25%"), + expected: []interface{}{ + up{2}, + down{oldReady: 1, newReady: 2, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 1 to 0 (keep 2 pods available, don't exceed 3 pods) +Scaling foo-v2 up to 2 +Scaling foo-v1 down to 0 +`, + }, + { + name: "2->2 25/1 maxSurge trumps maxUnavailable", + oldRc: oldRc(2, 2), + newRc: newRc(0, 2), + newRcExists: false, + maxUnavail: intstr.FromString("25%"), + maxSurge: intstr.FromString("1%"), + expected: []interface{}{ + up{1}, + down{oldReady: 2, newReady: 1, to: 1}, + up{2}, + down{oldReady: 1, newReady: 2, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 2 pods available, don't exceed 3 pods) +Scaling foo-v2 up to 1 +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 2 +Scaling foo-v1 down to 0 +`, + }, + { + name: "2->2 25/0 maxUnavailable resolves to zero, then one", + oldRc: oldRc(2, 2), + newRc: newRc(0, 2), + newRcExists: false, + maxUnavail: intstr.FromString("25%"), + maxSurge: intstr.FromString("0%"), + expected: []interface{}{ + down{oldReady: 2, newReady: 0, to: 1}, + up{1}, + down{oldReady: 1, newReady: 1, to: 0}, + up{2}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 1 pods available, don't exceed 2 pods) +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 1 +Scaling foo-v1 down to 0 +Scaling foo-v2 up to 2 +`, + }, + } + + for i, test := range tests { + // Extract expectations into some makeshift FIFOs so they can be returned + // in the correct order from the right places. This lets scale downs be + // expressed a single event even though the data is used from multiple + // interface calls. + oldReady := []int{} + newReady := []int{} + upTo := []int{} + downTo := []int{} + for _, event := range test.expected { + switch e := event.(type) { + case down: + oldReady = append(oldReady, e.oldReady) + newReady = append(newReady, e.newReady) + if !e.noop { + downTo = append(downTo, e.to) + } + case up: + upTo = append(upTo, e.to) + } + } + + // Make a way to get the next item from our FIFOs. Returns -1 if the array + // is empty. + next := func(s *[]int) int { + slice := *s + v := -1 + if len(slice) > 0 { + v = slice[0] + if len(slice) > 1 { + *s = slice[1:] + } else { + *s = []int{} + } + } + return v + } + t.Logf("running test %d (%s) (up: %v, down: %v, oldReady: %v, newReady: %v)", i, test.name, upTo, downTo, oldReady, newReady) + updater := &RollingUpdater{ + ns: "default", + scaleAndWait: func(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) { + // Return a scale up or scale down expectation depending on the rc, + // and throw errors if there is no expectation expressed for this + // call. + expected := -1 + switch { + case rc == test.newRc: + t.Logf("scaling up %s to %d", rc.Name, rc.Spec.Replicas) + expected = next(&upTo) + case rc == test.oldRc: + t.Logf("scaling down %s to %d", rc.Name, rc.Spec.Replicas) + expected = next(&downTo) + } + if expected == -1 { + t.Fatalf("unexpected scale of %s to %d", rc.Name, rc.Spec.Replicas) + } else if e, a := expected, int(rc.Spec.Replicas); e != a { + t.Fatalf("expected scale of %s to %d, got %d", rc.Name, e, a) + } + // Simulate the scale. + rc.Status.Replicas = rc.Spec.Replicas + return rc, nil + }, + getOrCreateTargetController: func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) { + // Simulate a create vs. update of an existing controller. + return test.newRc, test.newRcExists, nil + }, + cleanup: func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error { + return nil + }, + } + // Set up a mock readiness check which handles the test assertions. + updater.getReadyPods = func(oldRc, newRc *api.ReplicationController) (int32, int32, error) { + // Return simulated readiness, and throw an error if this call has no + // expectations defined. + oldReady := next(&oldReady) + newReady := next(&newReady) + if oldReady == -1 || newReady == -1 { + t.Fatalf("unexpected getReadyPods call for:\noldRc: %+v\nnewRc: %+v", oldRc, newRc) + } + return int32(oldReady), int32(newReady), nil + } + var buffer bytes.Buffer + config := &RollingUpdaterConfig{ + Out: &buffer, + OldRc: test.oldRc, + NewRc: test.newRc, + UpdatePeriod: 0, + Interval: time.Millisecond, + Timeout: time.Millisecond, + CleanupPolicy: DeleteRollingUpdateCleanupPolicy, + MaxUnavailable: test.maxUnavail, + MaxSurge: test.maxSurge, + } + err := updater.Update(config) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if buffer.String() != test.output { + t.Errorf("Bad output. expected:\n%s\ngot:\n%s", test.output, buffer.String()) + } + } +} + +// TestUpdate_progressTimeout ensures that an update which isn't making any +// progress will eventually time out with a specified error. +func TestUpdate_progressTimeout(t *testing.T) { + oldRc := oldRc(2, 2) + newRc := newRc(0, 2) + updater := &RollingUpdater{ + ns: "default", + scaleAndWait: func(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) { + // Do nothing. + return rc, nil + }, + getOrCreateTargetController: func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) { + return newRc, false, nil + }, + cleanup: func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error { + return nil + }, + } + updater.getReadyPods = func(oldRc, newRc *api.ReplicationController) (int32, int32, error) { + // Coerce a timeout by pods never becoming ready. + return 0, 0, nil + } + var buffer bytes.Buffer + config := &RollingUpdaterConfig{ + Out: &buffer, + OldRc: oldRc, + NewRc: newRc, + UpdatePeriod: 0, + Interval: time.Millisecond, + Timeout: time.Millisecond, + CleanupPolicy: DeleteRollingUpdateCleanupPolicy, + MaxUnavailable: intstr.FromInt(0), + MaxSurge: intstr.FromInt(1), + } + err := updater.Update(config) + if err == nil { + t.Fatalf("expected an error") + } + if e, a := "timed out waiting for any update progress to be made", err.Error(); e != a { + t.Fatalf("expected error message: %s, got: %s", e, a) + } +} + +func TestUpdate_assignOriginalAnnotation(t *testing.T) { + oldRc := oldRc(1, 1) + delete(oldRc.Annotations, originalReplicasAnnotation) + newRc := newRc(1, 1) + var updatedOldRc *api.ReplicationController + fake := &testclient.Fake{} + fake.AddReactor("*", "*", func(action testclient.Action) (handled bool, ret runtime.Object, err error) { + switch a := action.(type) { + case testclient.GetAction: + return true, oldRc, nil + case testclient.UpdateAction: + updatedOldRc = a.GetObject().(*api.ReplicationController) + return true, updatedOldRc, nil + } + return false, nil, nil + }) + updater := &RollingUpdater{ + c: fake, + ns: "default", + scaleAndWait: func(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) { + return rc, nil + }, + getOrCreateTargetController: func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) { + return newRc, false, nil + }, + cleanup: func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error { + return nil + }, + getReadyPods: func(oldRc, newRc *api.ReplicationController) (int32, int32, error) { + return 1, 1, nil + }, + } + var buffer bytes.Buffer + config := &RollingUpdaterConfig{ + Out: &buffer, + OldRc: oldRc, + NewRc: newRc, + UpdatePeriod: 0, + Interval: time.Millisecond, + Timeout: time.Millisecond, + CleanupPolicy: DeleteRollingUpdateCleanupPolicy, + MaxUnavailable: intstr.FromString("100%"), + } + err := updater.Update(config) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if updatedOldRc == nil { + t.Fatalf("expected rc to be updated") + } + if e, a := "1", updatedOldRc.Annotations[originalReplicasAnnotation]; e != a { + t.Fatalf("expected annotation value %s, got %s", e, a) + } +} + +func TestRollingUpdater_multipleContainersInPod(t *testing.T) { + tests := []struct { + oldRc *api.ReplicationController + newRc *api.ReplicationController + + container string + image string + deploymentKey string + }{ + { + oldRc: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{ + "dk": "old", + }, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "dk": "old", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "container1", + Image: "image1", + }, + { + Name: "container2", + Image: "image2", + }, + }, + }, + }, + }, + }, + newRc: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{ + "dk": "old", + }, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "dk": "old", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "container1", + Image: "newimage", + }, + { + Name: "container2", + Image: "image2", + }, + }, + }, + }, + }, + }, + container: "container1", + image: "newimage", + deploymentKey: "dk", + }, + { + oldRc: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + }, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{ + "dk": "old", + }, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "dk": "old", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "container1", + Image: "image1", + }, + }, + }, + }, + }, + }, + newRc: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + }, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{ + "dk": "old", + }, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "dk": "old", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "container1", + Image: "newimage", + }, + }, + }, + }, + }, + }, + container: "container1", + image: "newimage", + deploymentKey: "dk", + }, + } + + for _, test := range tests { + fake := &testclient.Fake{} + fake.AddReactor("*", "*", func(action testclient.Action) (handled bool, ret runtime.Object, err error) { + switch action.(type) { + case testclient.GetAction: + return true, test.oldRc, nil + } + return false, nil, nil + }) + + codec := testapi.Default.Codec() + + deploymentHash, err := api.HashObject(test.newRc, codec) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + test.newRc.Spec.Selector[test.deploymentKey] = deploymentHash + test.newRc.Spec.Template.Labels[test.deploymentKey] = deploymentHash + test.newRc.Name = fmt.Sprintf("%s-%s", test.newRc.Name, deploymentHash) + + config := &NewControllerConfig{ + OldName: test.oldRc.ObjectMeta.Name, + NewName: test.newRc.ObjectMeta.Name, + Image: test.image, + Container: test.container, + DeploymentKey: test.deploymentKey, + } + updatedRc, err := CreateNewControllerFromCurrentController(fake, codec, config) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(updatedRc, test.newRc) { + t.Errorf("expected:\n%#v\ngot:\n%#v\n", test.newRc, updatedRc) + } + } +} + +// TestRollingUpdater_cleanupWithClients ensures that the cleanup policy is +// correctly implemented. +func TestRollingUpdater_cleanupWithClients(t *testing.T) { + rc := oldRc(2, 2) + rcExisting := newRc(1, 3) + + tests := []struct { + name string + policy RollingUpdaterCleanupPolicy + responses []runtime.Object + expected []string + }{ + { + name: "preserve", + policy: PreserveRollingUpdateCleanupPolicy, + responses: []runtime.Object{rcExisting}, + expected: []string{ + "get", + "update", + "get", + "get", + }, + }, + { + name: "delete", + policy: DeleteRollingUpdateCleanupPolicy, + responses: []runtime.Object{rcExisting}, + expected: []string{ + "get", + "update", + "get", + "get", + "delete", + }, + }, + { + name: "rename", + policy: RenameRollingUpdateCleanupPolicy, + responses: []runtime.Object{rcExisting}, + expected: []string{ + "get", + "update", + "get", + "get", + "delete", + "create", + "delete", + }, + }, + } + + for _, test := range tests { + fake := testclient.NewSimpleFake(test.responses...) + updater := &RollingUpdater{ + ns: "default", + c: fake, + } + config := &RollingUpdaterConfig{ + Out: ioutil.Discard, + OldRc: rc, + NewRc: rcExisting, + UpdatePeriod: 0, + Interval: time.Millisecond, + Timeout: time.Millisecond, + CleanupPolicy: test.policy, + } + err := updater.cleanupWithClients(rc, rcExisting, config) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(fake.Actions()) != len(test.expected) { + t.Fatalf("%s: unexpected actions: %v, expected %v", test.name, fake.Actions(), test.expected) + } + for j, action := range fake.Actions() { + if e, a := test.expected[j], action.GetVerb(); e != a { + t.Errorf("%s: unexpected action: expected %s, got %s", test.name, e, a) + } + } + } +} + +func TestFindSourceController(t *testing.T) { + ctrl1 := api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Annotations: map[string]string{ + sourceIdAnnotation: "bar:1234", + }, + }, + } + ctrl2 := api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + Annotations: map[string]string{ + sourceIdAnnotation: "foo:12345", + }, + }, + } + ctrl3 := api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{ + sourceIdAnnotation: "baz:45667", + }, + }, + } + tests := []struct { + list *api.ReplicationControllerList + expectedController *api.ReplicationController + err error + name string + expectError bool + }{ + { + list: &api.ReplicationControllerList{}, + expectError: true, + }, + { + list: &api.ReplicationControllerList{ + Items: []api.ReplicationController{ctrl1}, + }, + name: "foo", + expectError: true, + }, + { + list: &api.ReplicationControllerList{ + Items: []api.ReplicationController{ctrl1}, + }, + name: "bar", + expectedController: &ctrl1, + }, + { + list: &api.ReplicationControllerList{ + Items: []api.ReplicationController{ctrl1, ctrl2}, + }, + name: "bar", + expectedController: &ctrl1, + }, + { + list: &api.ReplicationControllerList{ + Items: []api.ReplicationController{ctrl1, ctrl2}, + }, + name: "foo", + expectedController: &ctrl2, + }, + { + list: &api.ReplicationControllerList{ + Items: []api.ReplicationController{ctrl1, ctrl2, ctrl3}, + }, + name: "baz", + expectedController: &ctrl3, + }, + } + for _, test := range tests { + fakeClient := testclient.NewSimpleFake(test.list) + ctrl, err := FindSourceController(fakeClient, "default", test.name) + if test.expectError && err == nil { + t.Errorf("unexpected non-error") + } + if !test.expectError && err != nil { + t.Errorf("unexpected error") + } + if !reflect.DeepEqual(ctrl, test.expectedController) { + t.Errorf("expected:\n%v\ngot:\n%v\n", test.expectedController, ctrl) + } + } +} + +func TestUpdateExistingReplicationController(t *testing.T) { + tests := []struct { + rc *api.ReplicationController + name string + deploymentKey string + deploymentValue string + + expectedRc *api.ReplicationController + expectErr bool + }{ + { + rc: &api.ReplicationController{ + Spec: api.ReplicationControllerSpec{ + Template: &api.PodTemplateSpec{}, + }, + }, + name: "foo", + deploymentKey: "dk", + deploymentValue: "some-hash", + + expectedRc: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{ + "kubectl.kubernetes.io/next-controller-id": "foo", + }, + }, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{ + "dk": "some-hash", + }, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "dk": "some-hash", + }, + }, + }, + }, + }, + }, + { + rc: &api.ReplicationController{ + Spec: api.ReplicationControllerSpec{ + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "dk": "some-other-hash", + }, + }, + }, + Selector: map[string]string{ + "dk": "some-other-hash", + }, + }, + }, + name: "foo", + deploymentKey: "dk", + deploymentValue: "some-hash", + + expectedRc: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{ + "kubectl.kubernetes.io/next-controller-id": "foo", + }, + }, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{ + "dk": "some-other-hash", + }, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "dk": "some-other-hash", + }, + }, + }, + }, + }, + }, + } + for _, test := range tests { + buffer := &bytes.Buffer{} + fakeClient := testclient.NewSimpleFake(test.expectedRc) + rc, err := UpdateExistingReplicationController(fakeClient, test.rc, "default", test.name, test.deploymentKey, test.deploymentValue, buffer) + if !reflect.DeepEqual(rc, test.expectedRc) { + t.Errorf("expected:\n%#v\ngot:\n%#v\n", test.expectedRc, rc) + } + if test.expectErr && err == nil { + t.Errorf("unexpected non-error") + } + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + } +} + +func TestUpdateWithRetries(t *testing.T) { + codec := testapi.Default.Codec() + rc := &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "rc", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{ + "foo": "bar", + }, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + }, + } + + // Test end to end updating of the rc with retries. Essentially make sure the update handler + // sees the right updates, failures in update/get are handled properly, and that the updated + // rc with new resource version is returned to the caller. Without any of these rollingupdate + // will fail cryptically. + newRc := *rc + newRc.ResourceVersion = "2" + newRc.Spec.Selector["baz"] = "foobar" + header := http.Header{} + header.Set("Content-Type", runtime.ContentTypeJSON) + updates := []*http.Response{ + {StatusCode: 500, Header: header, Body: objBody(codec, &api.ReplicationController{})}, + {StatusCode: 500, Header: header, Body: objBody(codec, &api.ReplicationController{})}, + {StatusCode: 200, Header: header, Body: objBody(codec, &newRc)}, + } + gets := []*http.Response{ + {StatusCode: 500, Header: header, Body: objBody(codec, &api.ReplicationController{})}, + {StatusCode: 200, Header: header, Body: objBody(codec, rc)}, + } + fakeClient := &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == testapi.Default.ResourcePath("replicationcontrollers", "default", "rc") && m == "PUT": + update := updates[0] + updates = updates[1:] + // We should always get an update with a valid rc even when the get fails. The rc should always + // contain the update. + if c, ok := readOrDie(t, req, codec).(*api.ReplicationController); !ok || !reflect.DeepEqual(rc, c) { + t.Errorf("Unexpected update body, got %+v expected %+v", c, rc) + } else if sel, ok := c.Spec.Selector["baz"]; !ok || sel != "foobar" { + t.Errorf("Expected selector label update, got %+v", c.Spec.Selector) + } else { + delete(c.Spec.Selector, "baz") + } + return update, nil + case p == testapi.Default.ResourcePath("replicationcontrollers", "default", "rc") && m == "GET": + get := gets[0] + gets = gets[1:] + return get, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + clientConfig := &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}} + client := client.NewOrDie(clientConfig) + client.Client = fakeClient.Client + + if rc, err := updateWithRetries( + client.ReplicationControllers("default"), rc, func(c *api.ReplicationController) { + c.Spec.Selector["baz"] = "foobar" + }); err != nil { + t.Errorf("unexpected error: %v", err) + } else if sel, ok := rc.Spec.Selector["baz"]; !ok || sel != "foobar" || rc.ResourceVersion != "2" { + t.Errorf("Expected updated rc, got %+v", rc) + } + if len(updates) != 0 || len(gets) != 0 { + t.Errorf("Remaining updates %+v gets %+v", updates, gets) + } +} + +func readOrDie(t *testing.T, req *http.Request, codec runtime.Codec) runtime.Object { + data, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("Error reading: %v", err) + t.FailNow() + } + obj, err := runtime.Decode(codec, data) + if err != nil { + t.Errorf("error decoding: %v", err) + t.FailNow() + } + return obj +} + +func objBody(codec runtime.Codec, obj runtime.Object) io.ReadCloser { + return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj)))) +} + +func TestAddDeploymentHash(t *testing.T) { + buf := &bytes.Buffer{} + codec := testapi.Default.Codec() + rc := &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{Name: "rc"}, + Spec: api.ReplicationControllerSpec{ + Selector: map[string]string{ + "foo": "bar", + }, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + }, + } + + podList := &api.PodList{ + Items: []api.Pod{ + {ObjectMeta: api.ObjectMeta{Name: "foo"}}, + {ObjectMeta: api.ObjectMeta{Name: "bar"}}, + {ObjectMeta: api.ObjectMeta{Name: "baz"}}, + }, + } + + seen := sets.String{} + updatedRc := false + fakeClient := &fake.RESTClient{ + Codec: codec, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", runtime.ContentTypeJSON) + switch p, m := req.URL.Path, req.Method; { + case p == testapi.Default.ResourcePath("pods", "default", "") && m == "GET": + if req.URL.RawQuery != "labelSelector=foo%3Dbar" { + t.Errorf("Unexpected query string: %s", req.URL.RawQuery) + } + return &http.Response{StatusCode: 200, Header: header, Body: objBody(codec, podList)}, nil + case p == testapi.Default.ResourcePath("pods", "default", "foo") && m == "PUT": + seen.Insert("foo") + obj := readOrDie(t, req, codec) + podList.Items[0] = *(obj.(*api.Pod)) + return &http.Response{StatusCode: 200, Header: header, Body: objBody(codec, &podList.Items[0])}, nil + case p == testapi.Default.ResourcePath("pods", "default", "bar") && m == "PUT": + seen.Insert("bar") + obj := readOrDie(t, req, codec) + podList.Items[1] = *(obj.(*api.Pod)) + return &http.Response{StatusCode: 200, Header: header, Body: objBody(codec, &podList.Items[1])}, nil + case p == testapi.Default.ResourcePath("pods", "default", "baz") && m == "PUT": + seen.Insert("baz") + obj := readOrDie(t, req, codec) + podList.Items[2] = *(obj.(*api.Pod)) + return &http.Response{StatusCode: 200, Header: header, Body: objBody(codec, &podList.Items[2])}, nil + case p == testapi.Default.ResourcePath("replicationcontrollers", "default", "rc") && m == "PUT": + updatedRc = true + return &http.Response{StatusCode: 200, Header: header, Body: objBody(codec, rc)}, nil + default: + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + clientConfig := &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}} + client := client.NewOrDie(clientConfig) + client.Client = fakeClient.Client + + if _, err := AddDeploymentKeyToReplicationController(rc, client, "dk", "hash", api.NamespaceDefault, buf); err != nil { + t.Errorf("unexpected error: %v", err) + } + for _, pod := range podList.Items { + if !seen.Has(pod.Name) { + t.Errorf("Missing update for pod: %s", pod.Name) + } + } + if !updatedRc { + t.Errorf("Failed to update replication controller with new labels") + } +} + +func TestRollingUpdater_readyPods(t *testing.T) { + mkpod := func(owner *api.ReplicationController, ready bool) *api.Pod { + labels := map[string]string{} + for k, v := range owner.Spec.Selector { + labels[k] = v + } + status := api.ConditionTrue + if !ready { + status = api.ConditionFalse + } + return &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod", + Labels: labels, + }, + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Type: api.PodReady, + Status: status, + }, + }, + }, + } + } + + tests := []struct { + oldRc *api.ReplicationController + newRc *api.ReplicationController + // expectated old/new ready counts + oldReady int32 + newReady int32 + // pods owned by the rcs; indicate whether they're ready + oldPods []bool + newPods []bool + }{ + { + oldRc: oldRc(4, 4), + newRc: newRc(4, 4), + oldReady: 4, + newReady: 2, + oldPods: []bool{ + true, + true, + true, + true, + }, + newPods: []bool{ + true, + false, + true, + false, + }, + }, + { + oldRc: oldRc(4, 4), + newRc: newRc(4, 4), + oldReady: 0, + newReady: 1, + oldPods: []bool{ + false, + }, + newPods: []bool{ + true, + }, + }, + { + oldRc: oldRc(4, 4), + newRc: newRc(4, 4), + oldReady: 1, + newReady: 0, + oldPods: []bool{ + true, + }, + newPods: []bool{ + false, + }, + }, + } + + for i, test := range tests { + t.Logf("evaluating test %d", i) + // Populate the fake client with pods associated with their owners. + pods := []runtime.Object{} + for _, ready := range test.oldPods { + pods = append(pods, mkpod(test.oldRc, ready)) + } + for _, ready := range test.newPods { + pods = append(pods, mkpod(test.newRc, ready)) + } + client := testclient.NewSimpleFake(pods...) + + updater := &RollingUpdater{ + ns: "default", + c: client, + } + oldReady, newReady, err := updater.readyPods(test.oldRc, test.newRc) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if e, a := test.oldReady, oldReady; e != a { + t.Errorf("expected old ready %d, got %d", e, a) + } + if e, a := test.newReady, newReady; e != a { + t.Errorf("expected new ready %d, got %d", e, a) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rollout_status.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rollout_status.go new file mode 100644 index 000000000000..dc39865d450d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/rollout_status.go @@ -0,0 +1,57 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + client "k8s.io/kubernetes/pkg/client/unversioned" +) + +// StatusViewer provides an interface for resources that provides rollout status. +type StatusViewer interface { + Status(namespace, name string) (string, bool, error) +} + +func StatusViewerFor(kind unversioned.GroupKind, c client.Interface) (StatusViewer, error) { + switch kind { + case extensions.Kind("Deployment"): + return &DeploymentStatusViewer{c.Extensions()}, nil + } + return nil, fmt.Errorf("no status viewer has been implemented for %v", kind) +} + +type DeploymentStatusViewer struct { + c client.ExtensionsInterface +} + +// Status returns a message describing deployment status, and a bool value indicating if the status is considered done +func (s *DeploymentStatusViewer) Status(namespace, name string) (string, bool, error) { + deployment, err := s.c.Deployments(namespace).Get(name) + if err != nil { + return "", false, err + } + if deployment.Generation <= deployment.Status.ObservedGeneration { + if deployment.Status.UpdatedReplicas == deployment.Spec.Replicas { + return fmt.Sprintf("deployment %s successfully rolled out\n", name), true, nil + } + return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Status.UpdatedReplicas, deployment.Spec.Replicas), false, nil + } + return fmt.Sprintf("Waiting for deployment spec update to be observed...\n"), false, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/run.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/run.go index 688b570e7d18..4e076c07195f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/run.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/run.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/batch" batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/runtime" @@ -104,7 +105,7 @@ func (DeploymentV1Beta1) Generate(genericParams map[string]interface{}) (runtime Labels: labels, }, Spec: extensions.DeploymentSpec{ - Replicas: count, + Replicas: int32(count), Selector: &unversioned.LabelSelector{MatchLabels: labels}, Template: api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ @@ -280,12 +281,12 @@ func (JobV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object } podSpec.RestartPolicy = restartPolicy - job := extensions.Job{ + job := batch.Job{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: labels, }, - Spec: extensions.JobSpec{ + Spec: batch.JobSpec{ Selector: &unversioned.LabelSelector{ MatchLabels: labels, }, @@ -433,7 +434,7 @@ func populateResourceList(spec string) (api.ResourceList, error) { if err != nil { return nil, err } - result[resourceName] = *resourceQuantity + result[resourceName] = resourceQuantity } return result, nil } @@ -457,7 +458,7 @@ func populateV1ResourceList(spec string) (v1.ResourceList, error) { if err != nil { return nil, err } - result[resourceName] = *resourceQuantity + result[resourceName] = resourceQuantity } return result, nil } @@ -604,7 +605,7 @@ func (BasicReplicationController) Generate(genericParams map[string]interface{}) Labels: labels, }, Spec: api.ReplicationControllerSpec{ - Replicas: count, + Replicas: int32(count), Selector: labels, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ @@ -679,11 +680,11 @@ func updatePodPorts(params map[string]string, podSpec *api.PodSpec) (err error) if port > 0 { podSpec.Containers[0].Ports = []api.ContainerPort{ { - ContainerPort: port, + ContainerPort: int32(port), }, } if hostPort > 0 { - podSpec.Containers[0].Ports[0].HostPort = hostPort + podSpec.Containers[0].Ports[0].HostPort = int32(hostPort) } } return nil @@ -826,7 +827,7 @@ func (BasicPod) Generate(genericParams map[string]interface{}) (runtime.Object, } func parseEnvs(envArray []string) ([]api.EnvVar, error) { - envs := []api.EnvVar{} + envs := make([]api.EnvVar, 0, len(envArray)) for _, env := range envArray { pos := strings.Index(env, "=") if pos == -1 { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/run_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/run_test.go new file mode 100644 index 000000000000..88238a8fb05c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/run_test.go @@ -0,0 +1,887 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func TestGenerate(t *testing.T) { + tests := []struct { + params map[string]interface{} + expected *api.ReplicationController + expectErr bool + }{ + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "port": "-1", + }, + expected: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"run": "foo"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + }, + }, + }, + }, + }, + }, + }, + + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "port": "-1", + "env": []string{"a=b", "c=d"}, + }, + expected: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"run": "foo"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + Env: []api.EnvVar{ + { + Name: "a", + Value: "b", + }, + { + Name: "c", + Value: "d", + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "port": "-1", + "args": []string{"bar", "baz", "blah"}, + }, + expected: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"run": "foo"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + Args: []string{"bar", "baz", "blah"}, + }, + }, + }, + }, + }, + }, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "port": "-1", + "args": []string{"bar", "baz", "blah"}, + "command": "true", + }, + expected: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"run": "foo"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + Command: []string{"bar", "baz", "blah"}, + }, + }, + }, + }, + }, + }, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "port": "80", + }, + expected: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"run": "foo"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + Ports: []api.ContainerPort{ + { + ContainerPort: 80, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "port": "80", + "hostport": "80", + }, + expected: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"run": "foo"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + Ports: []api.ContainerPort{ + { + ContainerPort: 80, + HostPort: 80, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "hostport": "80", + }, + expected: nil, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "labels": "foo=bar,baz=blah", + }, + expected: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"foo": "bar", "baz": "blah"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + }, + }, + }, + }, + }, + }, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "hostport": "80", + }, + expected: nil, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "labels": "foo=bar,baz=blah", + "requests": "cpu100m,memory=100Mi", + }, + expected: nil, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "labels": "foo=bar,baz=blah", + "requests": "cpu=100m&memory=100Mi", + }, + expected: nil, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "labels": "foo=bar,baz=blah", + "requests": "cpu=", + }, + expected: nil, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "labels": "foo=bar,baz=blah", + "requests": "cpu=100m,memory=100Mi", + "limits": "cpu=400m,memory=200Mi", + }, + expected: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"foo": "bar", "baz": "blah"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: api.ResourceList{ + api.ResourceCPU: resource.MustParse("400m"), + api.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + generator := BasicReplicationController{} + for i, test := range tests { + obj, err := generator.Generate(test.params) + t.Logf("%d: %#v", i, obj) + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + continue + } + if test.expectErr && err != nil { + continue + } + if !reflect.DeepEqual(obj.(*api.ReplicationController).Spec.Template, test.expected.Spec.Template) { + t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", test.expected.Spec.Template, obj.(*api.ReplicationController).Spec.Template) + } + } +} + +func TestGeneratePod(t *testing.T) { + tests := []struct { + params map[string]interface{} + expected *api.Pod + expectErr bool + }{ + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "port": "-1", + }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + ImagePullPolicy: api.PullIfNotPresent, + }, + }, + DNSPolicy: api.DNSClusterFirst, + RestartPolicy: api.RestartPolicyAlways, + }, + }, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "env": []string{"a", "c"}, + }, + + expected: nil, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "env": []string{"a=b", "c=d"}, + }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + ImagePullPolicy: api.PullIfNotPresent, + Env: []api.EnvVar{ + { + Name: "a", + Value: "b", + }, + { + Name: "c", + Value: "d", + }, + }, + }, + }, + DNSPolicy: api.DNSClusterFirst, + RestartPolicy: api.RestartPolicyAlways, + }, + }, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "port": "80", + }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + ImagePullPolicy: api.PullIfNotPresent, + Ports: []api.ContainerPort{ + { + ContainerPort: 80, + }, + }, + }, + }, + DNSPolicy: api.DNSClusterFirst, + RestartPolicy: api.RestartPolicyAlways, + }, + }, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "port": "80", + "hostport": "80", + }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + ImagePullPolicy: api.PullIfNotPresent, + Ports: []api.ContainerPort{ + { + ContainerPort: 80, + HostPort: 80, + }, + }, + }, + }, + DNSPolicy: api.DNSClusterFirst, + RestartPolicy: api.RestartPolicyAlways, + }, + }, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "hostport": "80", + }, + expected: nil, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "labels": "foo=bar,baz=blah", + }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + ImagePullPolicy: api.PullIfNotPresent, + }, + }, + DNSPolicy: api.DNSClusterFirst, + RestartPolicy: api.RestartPolicyAlways, + }, + }, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "labels": "foo=bar,baz=blah", + "stdin": "true", + }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + ImagePullPolicy: api.PullIfNotPresent, + Stdin: true, + StdinOnce: true, + }, + }, + DNSPolicy: api.DNSClusterFirst, + RestartPolicy: api.RestartPolicyAlways, + }, + }, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "labels": "foo=bar,baz=blah", + "stdin": "true", + "leave-stdin-open": "true", + }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + ImagePullPolicy: api.PullIfNotPresent, + Stdin: true, + StdinOnce: false, + }, + }, + DNSPolicy: api.DNSClusterFirst, + RestartPolicy: api.RestartPolicyAlways, + }, + }, + }, + } + generator := BasicPod{} + for _, test := range tests { + obj, err := generator.Generate(test.params) + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + if test.expectErr && err != nil { + continue + } + if !reflect.DeepEqual(obj.(*api.Pod), test.expected) { + t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", test.expected, obj.(*api.Pod)) + } + } +} + +func TestGenerateDeployment(t *testing.T) { + tests := []struct { + params map[string]interface{} + expected *extensions.Deployment + expectErr bool + }{ + { + params: map[string]interface{}{ + "labels": "foo=bar,baz=blah", + "name": "foo", + "replicas": "3", + "image": "someimage", + "port": "80", + "hostport": "80", + "stdin": "true", + "command": "true", + "args": []string{"bar", "baz", "blah"}, + "env": []string{"a=b", "c=d"}, + "requests": "cpu=100m,memory=100Mi", + "limits": "cpu=400m,memory=200Mi", + }, + expected: &extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + Spec: extensions.DeploymentSpec{ + Replicas: 3, + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar", "baz": "blah"}}, + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + Stdin: true, + Ports: []api.ContainerPort{ + { + ContainerPort: 80, + HostPort: 80, + }, + }, + Command: []string{"bar", "baz", "blah"}, + Env: []api.EnvVar{ + { + Name: "a", + Value: "b", + }, + { + Name: "c", + Value: "d", + }, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: api.ResourceList{ + api.ResourceCPU: resource.MustParse("400m"), + api.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + generator := DeploymentV1Beta1{} + for _, test := range tests { + obj, err := generator.Generate(test.params) + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + if test.expectErr && err != nil { + continue + } + if !reflect.DeepEqual(obj.(*extensions.Deployment), test.expected) { + t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", test.expected, obj.(*extensions.Deployment)) + } + } +} + +func TestGenerateJob(t *testing.T) { + tests := []struct { + params map[string]interface{} + expected *batch.Job + expectErr bool + }{ + { + params: map[string]interface{}{ + "labels": "foo=bar,baz=blah", + "name": "foo", + "image": "someimage", + "port": "80", + "hostport": "80", + "stdin": "true", + "leave-stdin-open": "true", + "command": "true", + "args": []string{"bar", "baz", "blah"}, + "env": []string{"a=b", "c=d"}, + "requests": "cpu=100m,memory=100Mi", + "limits": "cpu=400m,memory=200Mi", + "restart": "OnFailure", + }, + expected: &batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + Spec: batch.JobSpec{ + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + ManualSelector: newBool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyOnFailure, + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + Stdin: true, + StdinOnce: false, + Ports: []api.ContainerPort{ + { + ContainerPort: 80, + HostPort: 80, + }, + }, + Command: []string{"bar", "baz", "blah"}, + Env: []api.EnvVar{ + { + Name: "a", + Value: "b", + }, + { + Name: "c", + Value: "d", + }, + }, + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: api.ResourceList{ + api.ResourceCPU: resource.MustParse("400m"), + api.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + generator := JobV1Beta1{} + for _, test := range tests { + obj, err := generator.Generate(test.params) + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + if test.expectErr && err != nil { + continue + } + if !reflect.DeepEqual(obj.(*batch.Job), test.expected) { + t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", test.expected, obj.(*batch.Job)) + } + } +} + +func TestParseEnv(t *testing.T) { + tests := []struct { + envArray []string + expected []api.EnvVar + expectErr bool + test string + }{ + { + envArray: []string{ + "THIS_ENV=isOK", + "HAS_COMMAS=foo,bar", + "HAS_EQUALS=jJnro54iUu75xNy==", + }, + expected: []api.EnvVar{ + { + Name: "THIS_ENV", + Value: "isOK", + }, + { + Name: "HAS_COMMAS", + Value: "foo,bar", + }, + { + Name: "HAS_EQUALS", + Value: "jJnro54iUu75xNy==", + }, + }, + expectErr: false, + test: "test case 1", + }, + { + envArray: []string{ + "WITH_OUT_EQUALS", + }, + expected: []api.EnvVar{}, + expectErr: true, + test: "test case 2", + }, + { + envArray: []string{ + "WITH_OUT_VALUES=", + }, + expected: []api.EnvVar{}, + expectErr: true, + test: "test case 3", + }, + { + envArray: []string{ + "=WITH_OUT_NAME", + }, + expected: []api.EnvVar{}, + expectErr: true, + test: "test case 4", + }, + } + + for _, test := range tests { + envs, err := parseEnvs(test.envArray) + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v (%s)", err, test.test) + } + if test.expectErr && err != nil { + continue + } + if !reflect.DeepEqual(envs, test.expected) { + t.Errorf("\nexpected:\n%#v\nsaw:\n%#v (%s)", test.expected, envs, test.test) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/scale.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/scale.go index 14951c028733..bef93d9909bf 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/scale.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/scale.go @@ -48,7 +48,7 @@ func ScalerFor(kind unversioned.GroupKind, c client.Interface) (Scaler, error) { case extensions.Kind("ReplicaSet"): return &ReplicaSetScaler{c.Extensions()}, nil case extensions.Kind("Job"), batch.Kind("Job"): - return &JobScaler{c.Extensions()}, nil // Either kind of job can be scaled with Extensions interface. + return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface. case extensions.Kind("Deployment"): return &DeploymentScaler{c.Extensions()}, nil } @@ -129,8 +129,8 @@ func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name s // ValidateReplicationController ensures that the preconditions match. Returns nil if they are valid, an error otherwise func (precondition *ScalePrecondition) ValidateReplicationController(controller *api.ReplicationController) error { - if precondition.Size != -1 && controller.Spec.Replicas != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(controller.Spec.Replicas)} + if precondition.Size != -1 && int(controller.Spec.Replicas) != precondition.Size { + return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(controller.Spec.Replicas))} } if len(precondition.ResourceVersion) != 0 && controller.ResourceVersion != precondition.ResourceVersion { return PreconditionError{"resource version", precondition.ResourceVersion, controller.ResourceVersion} @@ -152,7 +152,7 @@ func (scaler *ReplicationControllerScaler) ScaleSimple(namespace, name string, p return err } } - controller.Spec.Replicas = int(newSize) + controller.Spec.Replicas = int32(newSize) // TODO: do retry on 409 errors here? if _, err := scaler.c.ReplicationControllers(namespace).Update(controller); err != nil { if errors.IsInvalid(err) { @@ -191,8 +191,8 @@ func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize // ValidateReplicaSet ensures that the preconditions match. Returns nil if they are valid, an error otherwise func (precondition *ScalePrecondition) ValidateReplicaSet(replicaSet *extensions.ReplicaSet) error { - if precondition.Size != -1 && replicaSet.Spec.Replicas != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(replicaSet.Spec.Replicas)} + if precondition.Size != -1 && int(replicaSet.Spec.Replicas) != precondition.Size { + return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(replicaSet.Spec.Replicas))} } if len(precondition.ResourceVersion) != 0 && replicaSet.ResourceVersion != precondition.ResourceVersion { return PreconditionError{"resource version", precondition.ResourceVersion, replicaSet.ResourceVersion} @@ -214,7 +214,7 @@ func (scaler *ReplicaSetScaler) ScaleSimple(namespace, name string, precondition return err } } - rs.Spec.Replicas = int(newSize) + rs.Spec.Replicas = int32(newSize) // TODO: do retry on 409 errors here? if _, err := scaler.c.ReplicaSets(namespace).Update(rs); err != nil { if errors.IsInvalid(err) { @@ -252,12 +252,12 @@ func (scaler *ReplicaSetScaler) Scale(namespace, name string, newSize uint, prec } // ValidateJob ensures that the preconditions match. Returns nil if they are valid, an error otherwise. -func (precondition *ScalePrecondition) ValidateJob(job *extensions.Job) error { +func (precondition *ScalePrecondition) ValidateJob(job *batch.Job) error { if precondition.Size != -1 && job.Spec.Parallelism == nil { return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), "nil"} } - if precondition.Size != -1 && *job.Spec.Parallelism != precondition.Size { - return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), strconv.Itoa(*job.Spec.Parallelism)} + if precondition.Size != -1 && int(*job.Spec.Parallelism) != precondition.Size { + return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), strconv.Itoa(int(*job.Spec.Parallelism))} } if len(precondition.ResourceVersion) != 0 && job.ResourceVersion != precondition.ResourceVersion { return PreconditionError{"resource version", precondition.ResourceVersion, job.ResourceVersion} @@ -266,7 +266,7 @@ func (precondition *ScalePrecondition) ValidateJob(job *extensions.Job) error { } type JobScaler struct { - c client.ExtensionsInterface + c client.BatchInterface } // ScaleSimple is responsible for updating job's parallelism. @@ -280,7 +280,7 @@ func (scaler *JobScaler) ScaleSimple(namespace, name string, preconditions *Scal return err } } - parallelism := int(newSize) + parallelism := int32(newSize) job.Spec.Parallelism = ¶llelism if _, err := scaler.c.Jobs(namespace).Update(job); err != nil { if errors.IsInvalid(err) { @@ -319,8 +319,8 @@ func (scaler *JobScaler) Scale(namespace, name string, newSize uint, preconditio // ValidateDeployment ensures that the preconditions match. Returns nil if they are valid, an error otherwise. func (precondition *ScalePrecondition) ValidateDeployment(deployment *extensions.Deployment) error { - if precondition.Size != -1 && deployment.Spec.Replicas != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(deployment.Spec.Replicas)} + if precondition.Size != -1 && int(deployment.Spec.Replicas) != precondition.Size { + return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(deployment.Spec.Replicas))} } if len(precondition.ResourceVersion) != 0 && deployment.ResourceVersion != precondition.ResourceVersion { return PreconditionError{"resource version", precondition.ResourceVersion, deployment.ResourceVersion} @@ -346,7 +346,7 @@ func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, precondition // TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528). // For now I'm falling back to regular Deployment update operation. - deployment.Spec.Replicas = int(newSize) + deployment.Spec.Replicas = int32(newSize) if _, err := scaler.c.Deployments(namespace).Update(deployment); err != nil { if errors.IsInvalid(err) { return ScaleError{ScaleUpdateInvalidFailure, deployment.ResourceVersion, err} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/scale_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/scale_test.go new file mode 100644 index 000000000000..0dc55cda18ed --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/scale_test.go @@ -0,0 +1,729 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "errors" + "testing" + + "k8s.io/kubernetes/pkg/api" + kerrors "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/testclient" +) + +type ErrorReplicationControllers struct { + testclient.FakeReplicationControllers + invalid bool +} + +func (c *ErrorReplicationControllers) Update(controller *api.ReplicationController) (*api.ReplicationController, error) { + if c.invalid { + return nil, kerrors.NewInvalid(api.Kind(controller.Kind), controller.Name, nil) + } + return nil, errors.New("Replication controller update failure") +} + +type ErrorReplicationControllerClient struct { + testclient.Fake + invalid bool +} + +func (c *ErrorReplicationControllerClient) ReplicationControllers(namespace string) client.ReplicationControllerInterface { + return &ErrorReplicationControllers{testclient.FakeReplicationControllers{Fake: &c.Fake, Namespace: namespace}, c.invalid} +} + +func TestReplicationControllerScaleRetry(t *testing.T) { + fake := &ErrorReplicationControllerClient{Fake: testclient.Fake{}, invalid: false} + scaler := ReplicationControllerScaler{fake} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + namespace := "default" + + scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count) + pass, err := scaleFunc() + if pass { + t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) + } + if err != nil { + t.Errorf("Did not expect an error on update failure, got %v", err) + } + preconditions = ScalePrecondition{3, ""} + scaleFunc = ScaleCondition(&scaler, &preconditions, namespace, name, count) + pass, err = scaleFunc() + if err == nil { + t.Errorf("Expected error on precondition failure") + } +} + +func TestReplicationControllerScaleInvalid(t *testing.T) { + fake := &ErrorReplicationControllerClient{Fake: testclient.Fake{}, invalid: true} + scaler := ReplicationControllerScaler{fake} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + namespace := "default" + + scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count) + pass, err := scaleFunc() + if pass { + t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) + } + e, ok := err.(ScaleError) + if err == nil || !ok || e.FailureType != ScaleUpdateInvalidFailure { + t.Errorf("Expected error on invalid update failure, got %v", err) + } +} + +func TestReplicationControllerScale(t *testing.T) { + fake := &testclient.Fake{} + scaler := ReplicationControllerScaler{fake} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + scaler.Scale("default", name, count, &preconditions, nil, nil) + + actions := fake.Actions() + if len(actions) != 2 { + t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) + } + if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "replicationcontrollers" || action.GetName() != name { + t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name) + } + if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "replicationcontrollers" || action.GetObject().(*api.ReplicationController).Spec.Replicas != int32(count) { + t.Errorf("unexpected action %v, expected update-replicationController with replicas = %d", actions[1], count) + } +} + +func TestReplicationControllerScaleFailsPreconditions(t *testing.T) { + fake := testclient.NewSimpleFake(&api.ReplicationController{ + Spec: api.ReplicationControllerSpec{ + Replicas: 10, + }, + }) + scaler := ReplicationControllerScaler{fake} + preconditions := ScalePrecondition{2, ""} + count := uint(3) + name := "foo" + scaler.Scale("default", name, count, &preconditions, nil, nil) + + actions := fake.Actions() + if len(actions) != 1 { + t.Errorf("unexpected actions: %v, expected 1 action (get)", actions) + } + if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "replicationcontrollers" || action.GetName() != name { + t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name) + } +} + +func TestValidateReplicationController(t *testing.T) { + tests := []struct { + preconditions ScalePrecondition + controller api.ReplicationController + expectError bool + test string + }{ + { + preconditions: ScalePrecondition{-1, ""}, + expectError: false, + test: "defaults", + }, + { + preconditions: ScalePrecondition{-1, ""}, + controller: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 10, + }, + }, + expectError: false, + test: "defaults 2", + }, + { + preconditions: ScalePrecondition{0, ""}, + controller: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + }, + }, + expectError: false, + test: "size matches", + }, + { + preconditions: ScalePrecondition{-1, "foo"}, + controller: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 10, + }, + }, + expectError: false, + test: "resource version matches", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + controller: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 10, + }, + }, + expectError: false, + test: "both match", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + controller: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 20, + }, + }, + expectError: true, + test: "size different", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + controller: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "bar", + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 10, + }, + }, + expectError: true, + test: "version different", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + controller: api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "bar", + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 20, + }, + }, + expectError: true, + test: "both different", + }, + } + for _, test := range tests { + err := test.preconditions.ValidateReplicationController(&test.controller) + if err != nil && !test.expectError { + t.Errorf("unexpected error: %v (%s)", err, test.test) + } + if err == nil && test.expectError { + t.Errorf("unexpected non-error: %v (%s)", err, test.test) + } + } +} + +type ErrorJobs struct { + testclient.FakeJobsV1 + invalid bool +} + +func (c *ErrorJobs) Update(job *batch.Job) (*batch.Job, error) { + if c.invalid { + return nil, kerrors.NewInvalid(batch.Kind(job.Kind), job.Name, nil) + } + return nil, errors.New("Job update failure") +} + +func (c *ErrorJobs) Get(name string) (*batch.Job, error) { + zero := int32(0) + return &batch.Job{ + Spec: batch.JobSpec{ + Parallelism: &zero, + }, + }, nil +} + +type ErrorJobClient struct { + testclient.FakeBatch + invalid bool +} + +func (c *ErrorJobClient) Jobs(namespace string) client.JobInterface { + return &ErrorJobs{testclient.FakeJobsV1{Fake: &c.FakeBatch, Namespace: namespace}, c.invalid} +} + +func TestJobScaleRetry(t *testing.T) { + fake := &ErrorJobClient{FakeBatch: testclient.FakeBatch{}, invalid: false} + scaler := JobScaler{fake} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + namespace := "default" + + scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count) + pass, err := scaleFunc() + if pass != false { + t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) + } + if err != nil { + t.Errorf("Did not expect an error on update failure, got %v", err) + } + preconditions = ScalePrecondition{3, ""} + scaleFunc = ScaleCondition(&scaler, &preconditions, namespace, name, count) + pass, err = scaleFunc() + if err == nil { + t.Errorf("Expected error on precondition failure") + } +} + +func TestJobScale(t *testing.T) { + fake := &testclient.FakeBatch{Fake: &testclient.Fake{}} + scaler := JobScaler{fake} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + scaler.Scale("default", name, count, &preconditions, nil, nil) + + actions := fake.Actions() + if len(actions) != 2 { + t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) + } + if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "jobs" || action.GetName() != name { + t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name) + } + if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "jobs" || *action.GetObject().(*batch.Job).Spec.Parallelism != int32(count) { + t.Errorf("unexpected action %v, expected update-job with parallelism = %d", actions[1], count) + } +} + +func TestJobScaleInvalid(t *testing.T) { + fake := &ErrorJobClient{FakeBatch: testclient.FakeBatch{}, invalid: true} + scaler := JobScaler{fake} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + namespace := "default" + + scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count) + pass, err := scaleFunc() + if pass { + t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) + } + e, ok := err.(ScaleError) + if err == nil || !ok || e.FailureType != ScaleUpdateInvalidFailure { + t.Errorf("Expected error on invalid update failure, got %v", err) + } +} + +func TestJobScaleFailsPreconditions(t *testing.T) { + ten := int32(10) + fake := testclient.NewSimpleFake(&batch.Job{ + Spec: batch.JobSpec{ + Parallelism: &ten, + }, + }) + scaler := JobScaler{&testclient.FakeBatch{Fake: fake}} + preconditions := ScalePrecondition{2, ""} + count := uint(3) + name := "foo" + scaler.Scale("default", name, count, &preconditions, nil, nil) + + actions := fake.Actions() + if len(actions) != 1 { + t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions) + } + if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "jobs" || action.GetName() != name { + t.Errorf("unexpected action: %v, expected get-job %s", actions[0], name) + } +} + +func TestValidateJob(t *testing.T) { + zero, ten, twenty := int32(0), int32(10), int32(20) + tests := []struct { + preconditions ScalePrecondition + job batch.Job + expectError bool + test string + }{ + { + preconditions: ScalePrecondition{-1, ""}, + expectError: false, + test: "defaults", + }, + { + preconditions: ScalePrecondition{-1, ""}, + job: batch.Job{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: batch.JobSpec{ + Parallelism: &ten, + }, + }, + expectError: false, + test: "defaults 2", + }, + { + preconditions: ScalePrecondition{0, ""}, + job: batch.Job{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: batch.JobSpec{ + Parallelism: &zero, + }, + }, + expectError: false, + test: "size matches", + }, + { + preconditions: ScalePrecondition{-1, "foo"}, + job: batch.Job{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: batch.JobSpec{ + Parallelism: &ten, + }, + }, + expectError: false, + test: "resource version matches", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + job: batch.Job{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: batch.JobSpec{ + Parallelism: &ten, + }, + }, + expectError: false, + test: "both match", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + job: batch.Job{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: batch.JobSpec{ + Parallelism: &twenty, + }, + }, + expectError: true, + test: "size different", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + job: batch.Job{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + }, + expectError: true, + test: "parallelism nil", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + job: batch.Job{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "bar", + }, + Spec: batch.JobSpec{ + Parallelism: &ten, + }, + }, + expectError: true, + test: "version different", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + job: batch.Job{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "bar", + }, + Spec: batch.JobSpec{ + Parallelism: &twenty, + }, + }, + expectError: true, + test: "both different", + }, + } + for _, test := range tests { + err := test.preconditions.ValidateJob(&test.job) + if err != nil && !test.expectError { + t.Errorf("unexpected error: %v (%s)", err, test.test) + } + if err == nil && test.expectError { + t.Errorf("unexpected non-error: %v (%s)", err, test.test) + } + } +} + +type ErrorDeployments struct { + testclient.FakeDeployments + invalid bool +} + +func (c *ErrorDeployments) Update(deployment *extensions.Deployment) (*extensions.Deployment, error) { + if c.invalid { + return nil, kerrors.NewInvalid(extensions.Kind(deployment.Kind), deployment.Name, nil) + } + return nil, errors.New("deployment update failure") +} + +func (c *ErrorDeployments) Get(name string) (*extensions.Deployment, error) { + return &extensions.Deployment{ + Spec: extensions.DeploymentSpec{ + Replicas: 0, + }, + }, nil +} + +type ErrorDeploymentClient struct { + testclient.FakeExperimental + invalid bool +} + +func (c *ErrorDeploymentClient) Deployments(namespace string) client.DeploymentInterface { + return &ErrorDeployments{testclient.FakeDeployments{Fake: &c.FakeExperimental, Namespace: namespace}, c.invalid} +} + +func TestDeploymentScaleRetry(t *testing.T) { + fake := &ErrorDeploymentClient{FakeExperimental: testclient.FakeExperimental{Fake: &testclient.Fake{}}, invalid: false} + scaler := &DeploymentScaler{fake} + preconditions := &ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + namespace := "default" + + scaleFunc := ScaleCondition(scaler, preconditions, namespace, name, count) + pass, err := scaleFunc() + if pass != false { + t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) + } + if err != nil { + t.Errorf("Did not expect an error on update failure, got %v", err) + } + preconditions = &ScalePrecondition{3, ""} + scaleFunc = ScaleCondition(scaler, preconditions, namespace, name, count) + pass, err = scaleFunc() + if err == nil { + t.Errorf("Expected error on precondition failure") + } +} + +func TestDeploymentScale(t *testing.T) { + fake := &testclient.FakeExperimental{Fake: &testclient.Fake{}} + scaler := DeploymentScaler{fake} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + scaler.Scale("default", name, count, &preconditions, nil, nil) + + actions := fake.Actions() + if len(actions) != 2 { + t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) + } + if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "deployments" || action.GetName() != name { + t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name) + } + if action, ok := actions[1].(testclient.UpdateAction); !ok || action.GetResource() != "deployments" || action.GetObject().(*extensions.Deployment).Spec.Replicas != int32(count) { + t.Errorf("unexpected action %v, expected update-deployment with replicas = %d", actions[1], count) + } +} + +func TestDeploymentScaleInvalid(t *testing.T) { + fake := &ErrorDeploymentClient{FakeExperimental: testclient.FakeExperimental{Fake: &testclient.Fake{}}, invalid: true} + scaler := DeploymentScaler{fake} + preconditions := ScalePrecondition{-1, ""} + count := uint(3) + name := "foo" + namespace := "default" + + scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count) + pass, err := scaleFunc() + if pass { + t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass) + } + e, ok := err.(ScaleError) + if err == nil || !ok || e.FailureType != ScaleUpdateInvalidFailure { + t.Errorf("Expected error on invalid update failure, got %v", err) + } +} + +func TestDeploymentScaleFailsPreconditions(t *testing.T) { + fake := testclient.NewSimpleFake(&extensions.Deployment{ + Spec: extensions.DeploymentSpec{ + Replicas: 10, + }, + }) + scaler := DeploymentScaler{&testclient.FakeExperimental{Fake: fake}} + preconditions := ScalePrecondition{2, ""} + count := uint(3) + name := "foo" + scaler.Scale("default", name, count, &preconditions, nil, nil) + + actions := fake.Actions() + if len(actions) != 1 { + t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions) + } + if action, ok := actions[0].(testclient.GetAction); !ok || action.GetResource() != "deployments" || action.GetName() != name { + t.Errorf("unexpected action: %v, expected get-deployment %s", actions[0], name) + } +} + +func TestValidateDeployment(t *testing.T) { + zero, ten, twenty := int32(0), int32(10), int32(20) + tests := []struct { + preconditions ScalePrecondition + deployment extensions.Deployment + expectError bool + test string + }{ + { + preconditions: ScalePrecondition{-1, ""}, + expectError: false, + test: "defaults", + }, + { + preconditions: ScalePrecondition{-1, ""}, + deployment: extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: extensions.DeploymentSpec{ + Replicas: ten, + }, + }, + expectError: false, + test: "defaults 2", + }, + { + preconditions: ScalePrecondition{0, ""}, + deployment: extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: extensions.DeploymentSpec{ + Replicas: zero, + }, + }, + expectError: false, + test: "size matches", + }, + { + preconditions: ScalePrecondition{-1, "foo"}, + deployment: extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: extensions.DeploymentSpec{ + Replicas: ten, + }, + }, + expectError: false, + test: "resource version matches", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + deployment: extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: extensions.DeploymentSpec{ + Replicas: ten, + }, + }, + expectError: false, + test: "both match", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + deployment: extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + Spec: extensions.DeploymentSpec{ + Replicas: twenty, + }, + }, + expectError: true, + test: "size different", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + deployment: extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "foo", + }, + }, + expectError: true, + test: "no replicas", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + deployment: extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "bar", + }, + Spec: extensions.DeploymentSpec{ + Replicas: ten, + }, + }, + expectError: true, + test: "version different", + }, + { + preconditions: ScalePrecondition{10, "foo"}, + deployment: extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + ResourceVersion: "bar", + }, + Spec: extensions.DeploymentSpec{ + Replicas: twenty, + }, + }, + expectError: true, + test: "both different", + }, + } + for _, test := range tests { + err := test.preconditions.ValidateDeployment(&test.deployment) + if err != nil && !test.expectError { + t.Errorf("unexpected error: %v (%s)", err, test.test) + } + if err == nil && test.expectError { + t.Errorf("unexpected non-error: %v (%s)", err, test.test) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry_test.go new file mode 100644 index 000000000000..65d8d397d7f2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry_test.go @@ -0,0 +1,81 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +func TestSecretForDockerRegistryGenerate(t *testing.T) { + username, password, email, server := "test-user", "test-password", "test-user@example.org", "https://index.docker.io/v1/" + secretData, err := handleDockercfgContent(username, password, email, server) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + tests := map[string]struct { + params map[string]interface{} + expected *api.Secret + expectErr bool + }{ + "test-valid-use": { + params: map[string]interface{}{ + "name": "foo", + "docker-server": server, + "docker-username": username, + "docker-password": password, + "docker-email": email, + }, + expected: &api.Secret{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Data: map[string][]byte{ + api.DockerConfigKey: secretData, + }, + Type: api.SecretTypeDockercfg, + }, + expectErr: false, + }, + "test-missing-required-param": { + params: map[string]interface{}{ + "name": "foo", + "docker-server": server, + "docker-password": password, + "docker-email": email, + }, + expectErr: true, + }, + } + + generator := SecretForDockerRegistryGeneratorV1{} + for _, test := range tests { + obj, err := generator.Generate(test.params) + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + if test.expectErr && err != nil { + continue + } + if !reflect.DeepEqual(obj.(*api.Secret), test.expected) { + t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", test.expected, obj.(*api.Secret)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go new file mode 100644 index 000000000000..05061d25974b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go @@ -0,0 +1,124 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/runtime" +) + +// SecretForTLSGeneratorV1 supports stable generation of a TLS secret. +type SecretForTLSGeneratorV1 struct { + // Name is the name of this TLS secret. + Name string + // Key is the path to the user's private key. + Key string + // Cert is the path to the user's public key certificate. + Cert string +} + +// Ensure it supports the generator pattern that uses parameter injection +var _ Generator = &SecretForTLSGeneratorV1{} + +// Ensure it supports the generator pattern that uses parameters specified during construction +var _ StructuredGenerator = &SecretForTLSGeneratorV1{} + +// Generate returns a secret using the specified parameters +func (s SecretForTLSGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + err := ValidateParams(s.ParamNames(), genericParams) + if err != nil { + return nil, err + } + params := map[string]string{} + for key, value := range genericParams { + strVal, isString := value.(string) + if !isString { + return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) + } + params[key] = strVal + } + delegate := &SecretForTLSGeneratorV1{ + Name: params["name"], + Key: params["key"], + Cert: params["cert"], + } + return delegate.StructuredGenerate() +} + +// StructuredGenerate outputs a secret object using the configured fields +func (s SecretForTLSGeneratorV1) StructuredGenerate() (runtime.Object, error) { + if err := s.validate(); err != nil { + return nil, err + } + tlsCrt, err := readFile(s.Cert) + if err != nil { + return nil, err + } + tlsKey, err := readFile(s.Key) + if err != nil { + return nil, err + } + secret := &api.Secret{} + secret.Name = s.Name + secret.Type = api.SecretTypeTLS + secret.Data = map[string][]byte{} + secret.Data[api.TLSCertKey] = []byte(tlsCrt) + secret.Data[api.TLSPrivateKeyKey] = []byte(tlsKey) + return secret, nil +} + +// readFile just reads a file into a byte array. +func readFile(file string) ([]byte, error) { + b, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, fmt.Errorf("Cannot read file %v, %v", file, err) + } + return b, nil +} + +// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern +func (s SecretForTLSGeneratorV1) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"name", true}, + {"key", true}, + {"cert", true}, + } +} + +// validate validates required fields are set to support structured generation +func (s SecretForTLSGeneratorV1) validate() error { + // TODO: This is not strictly necessary. We can generate a self signed cert + // if no key/cert is given. The only requiredment is that we either get both + // or none. See test/e2e/ingress_utils for self signed cert generation. + if len(s.Key) == 0 { + return fmt.Errorf("key must be specified.") + } + if len(s.Cert) == 0 { + return fmt.Errorf("certificate must be specified.") + } + if _, err := tls.LoadX509KeyPair(s.Cert, s.Key); err != nil { + return fmt.Errorf("failed to load key pair %v", err) + } + // TODO: Add more validation. + // 1. If the certificate contains intermediates, it is a valid chain. + // 2. Format etc. + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/secret_for_tls_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/secret_for_tls_test.go new file mode 100644 index 000000000000..f24403fd2214 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/secret_for_tls_test.go @@ -0,0 +1,204 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "os" + "path" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + utiltesting "k8s.io/kubernetes/pkg/util/testing" +) + +var rsaCertPEM = `-----BEGIN CERTIFICATE----- +MIIB0zCCAX2gAwIBAgIJAI/M7BYjwB+uMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQwHhcNMTIwOTEyMjE1MjAyWhcNMTUwOTEyMjE1MjAyWjBF +MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50 +ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANLJ +hPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wok/4xIA+ui35/MmNa +rtNuC+BdZ1tMuVCPFZcCAwEAAaNQME4wHQYDVR0OBBYEFJvKs8RfJaXTH08W+SGv +zQyKn0H8MB8GA1UdIwQYMBaAFJvKs8RfJaXTH08W+SGvzQyKn0H8MAwGA1UdEwQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADQQBJlffJHybjDGxRMqaRmDhX0+6v02TUKZsW +r5QuVbpQhH6u+0UgcW0jp9QwpxoPTLTWGXEWBBBurxFwiCBhkQ+V +-----END CERTIFICATE----- +` + +var rsaKeyPEM = `-----BEGIN RSA PRIVATE KEY----- +MIIBOwIBAAJBANLJhPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wo +k/4xIA+ui35/MmNartNuC+BdZ1tMuVCPFZcCAwEAAQJAEJ2N+zsR0Xn8/Q6twa4G +6OB1M1WO+k+ztnX/1SvNeWu8D6GImtupLTYgjZcHufykj09jiHmjHx8u8ZZB/o1N +MQIhAPW+eyZo7ay3lMz1V01WVjNKK9QSn1MJlb06h/LuYv9FAiEA25WPedKgVyCW +SmUwbPw8fnTcpqDWE3yTO3vKcebqMSsCIBF3UmVue8YU3jybC3NxuXq3wNm34R8T +xVLHwDXh/6NJAiEAl2oHGGLz64BuAfjKrqwz7qMYr9HCLIe/YsoWq/olzScCIQDi +D2lWusoe2/nEqfDVVWGWlyJ7yOmqaVm/iNUN9B2N2g== +-----END RSA PRIVATE KEY----- +` + +const mismatchRSAKeyPEM = `-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC/665h55hWD4V2 +kiQ+B/G9NNfBw69eBibEhI9vWkPUyn36GO2r3HPtRE63wBfFpV486ns9DoZnnAYE +JaGjVNCCqS5tQyMBWp843o66KBrEgBpuddChigvyul33FhD1ImFnN+Vy0ajOJ+1/ +Zai28zBXWbxCWEbqz7s8e2UsPlBd0Caj4gcd32yD2BwiHqzB8odToWRUT7l+pS8R +qA1BruQvtjEIrcoWVlE170ZYe7+Apm96A+WvtVRkozPynxHF8SuEiw4hAh0lXR6b +4zZz4tZVV8ev2HpffveV/68GiCyeFDbglqd4sZ/Iga/rwu7bVY/BzFApHwu2hmmV +XLnaa3uVAgMBAAECggEAG+kvnCdtPR7Wvw6z3J2VJ3oW4qQNzfPBEZVhssUC1mB4 +f7W+Yt8VsOzdMdXq3yCUmvFS6OdC3rCPI21Bm5pLFKV8DgHUhm7idwfO4/3PHsKu +lV/m7odAA5Xc8oEwCCZu2e8EHHWnQgwGex+SsMCfSCTRvyhNb/qz9TDQ3uVVFL9e +9a4OKqZl/GlRspJSuXhy+RSVulw9NjeX1VRjIbhqpdXAmQNXgShA+gZSQh8T/tgv +XQYsMtg+FUDvcunJQf4OW5BY7IenYBV/GvsnJU8L7oD0wjNSAwe/iLKqV/NpYhre +QR4DsGnmoRYlUlHdHFTTJpReDjWm+vH3T756yDdFAQKBgQD2/sP5dM/aEW7Z1TgS +TG4ts1t8Rhe9escHxKZQR81dfOxBeCJMBDm6ySfR8rvyUM4VsogxBL/RhRQXsjJM +7wN08MhdiXG0J5yy/oNo8W6euD8m8Mk1UmqcZjSgV4vA7zQkvkr6DRJdybKsT9mE +jouEwev8sceS6iBpPw/+Ws8z1QKBgQDG6uYHMfMcS844xKQQWhargdN2XBzeG6TV +YXfNFstNpD84d9zIbpG/AKJF8fKrseUhXkJhkDjFGJTriD3QQsntOFaDOrHMnveV +zGzvC4OTFUUFHe0SVJ0HuLf8YCHoZ+DXEeCKCN6zBXnUue+bt3NvLOf2yN5o9kYx +SIa8O1vIwQKBgEdONXWG65qg/ceVbqKZvhUjen3eHmxtTZhIhVsX34nlzq73567a +aXArMnvB/9Bs05IgAIFmRZpPOQW+RBdByVWxTabzTwgbh3mFUJqzWKQpvNGZIf1q +1axhNUA1BfulEwCojyyxKWQ6HoLwanOCU3T4JxDEokEfpku8EPn1bWwhAoGAAN8A +eOGYHfSbB5ac3VF3rfKYmXkXy0U1uJV/r888vq9Mc5PazKnnS33WOBYyKNxTk4zV +H5ZBGWPdKxbipmnUdox7nIGCS9IaZXaKt5VGUzuRnM8fvafPNDxz2dAV9e2Wh3qV +kCUvzHrmqK7TxMvN3pvEvEju6GjDr+2QYXylD0ECgYAGK5r+y+EhtKkYFLeYReUt +znvSsWq+JCQH/cmtZLaVOldCaMRL625hSl3XPPcMIHE14xi3d4njoXWzvzPcg8L6 +vNXk3GiNldACS+vwk4CwEqe5YlZRm5doD07wIdsg2zRlnKsnXNM152OwgmcchDul +rLTt0TTazzwBCgCD0Jkoqg== +-----END PRIVATE KEY-----` + +func tearDown(tmpDir string) { + err := os.RemoveAll(tmpDir) + if err != nil { + fmt.Printf("Error in cleaning up test: %v", err) + } +} + +func write(path, contents string, t *testing.T) { + f, err := os.Create(path) + if err != nil { + t.Fatalf("Failed to create %v.", path) + } + _, err = f.WriteString(contents) + if err != nil { + t.Fatalf("Failed to write to %v.", path) + } +} + +func writeKeyPair(tmpDirPath, key, cert string, t *testing.T) (keyPath, certPath string) { + keyPath = path.Join(tmpDirPath, "tls.key") + certPath = path.Join(tmpDirPath, "tls.cert") + write(keyPath, key, t) + write(certPath, cert, t) + return +} + +func TestSecretForTLSGenerate(t *testing.T) { + invalidCertTmpDir := utiltesting.MkTmpdirOrDie("tls-test") + defer tearDown(invalidCertTmpDir) + invalidKeyPath, invalidCertPath := writeKeyPair(invalidCertTmpDir, "test", "test", t) + + validCertTmpDir := utiltesting.MkTmpdirOrDie("tls-test") + defer tearDown(validCertTmpDir) + validKeyPath, validCertPath := writeKeyPair(validCertTmpDir, rsaKeyPEM, rsaCertPEM, t) + + mismatchCertTmpDir := utiltesting.MkTmpdirOrDie("tls-mismatch-test") + defer tearDown(mismatchCertTmpDir) + mismatchKeyPath, mismatchCertPath := writeKeyPair(mismatchCertTmpDir, mismatchRSAKeyPEM, rsaCertPEM, t) + + tests := map[string]struct { + params map[string]interface{} + expected *api.Secret + expectErr bool + }{ + "test-valid-tls-secret": { + params: map[string]interface{}{ + "name": "foo", + "key": validKeyPath, + "cert": validCertPath, + }, + expected: &api.Secret{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Data: map[string][]byte{ + api.TLSCertKey: []byte(rsaCertPEM), + api.TLSPrivateKeyKey: []byte(rsaKeyPEM), + }, + Type: api.SecretTypeTLS, + }, + expectErr: false, + }, + "test-invalid-key-pair": { + params: map[string]interface{}{ + "name": "foo", + "key": invalidKeyPath, + "cert": invalidCertPath, + }, + expected: &api.Secret{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Data: map[string][]byte{ + api.TLSCertKey: []byte("test"), + api.TLSPrivateKeyKey: []byte("test"), + }, + Type: api.SecretTypeTLS, + }, + expectErr: true, + }, + "test-mismatched-key-pair": { + params: map[string]interface{}{ + "name": "foo", + "key": mismatchKeyPath, + "cert": mismatchCertPath, + }, + expected: &api.Secret{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Data: map[string][]byte{ + api.TLSCertKey: []byte(rsaCertPEM), + api.TLSPrivateKeyKey: []byte(mismatchRSAKeyPEM), + }, + Type: api.SecretTypeTLS, + }, + expectErr: true, + }, + "test-missing-required-param": { + params: map[string]interface{}{ + "name": "foo", + "key": "/tmp/foo.key", + }, + expectErr: true, + }, + } + + generator := SecretForTLSGeneratorV1{} + for _, test := range tests { + obj, err := generator.Generate(test.params) + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + if test.expectErr && err != nil { + continue + } + if !reflect.DeepEqual(obj.(*api.Secret), test.expected) { + t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", test.expected, obj.(*api.Secret)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/secret_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/secret_test.go new file mode 100644 index 000000000000..944c42356b3c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/secret_test.go @@ -0,0 +1,124 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +func TestSecretGenerate(t *testing.T) { + tests := []struct { + params map[string]interface{} + expected *api.Secret + expectErr bool + }{ + { + params: map[string]interface{}{ + "name": "foo", + }, + expected: &api.Secret{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Data: map[string][]byte{}, + }, + expectErr: false, + }, + { + params: map[string]interface{}{ + "name": "foo", + "type": "my-type", + }, + expected: &api.Secret{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Data: map[string][]byte{}, + Type: "my-type", + }, + expectErr: false, + }, + { + params: map[string]interface{}{ + "name": "foo", + "from-literal": []string{"key1=value1", "key2=value2"}, + }, + expected: &api.Secret{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Data: map[string][]byte{ + "key1": []byte("value1"), + "key2": []byte("value2"), + }, + }, + expectErr: false, + }, + { + params: map[string]interface{}{ + "name": "foo", + "from-literal": []string{"key1value1"}, + }, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "from-file": []string{"key1=/file=2"}, + }, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "from-file": []string{"key1==value"}, + }, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "from-literal": []string{"key1==value1"}, + }, + expected: &api.Secret{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Data: map[string][]byte{ + "key1": []byte("=value1"), + }, + }, + expectErr: false, + }, + } + generator := SecretGeneratorV1{} + for _, test := range tests { + obj, err := generator.Generate(test.params) + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + if test.expectErr && err != nil { + continue + } + if !reflect.DeepEqual(obj.(*api.Secret), test.expected) { + t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", test.expected, obj.(*api.Secret)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/service.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/service.go index 57249e463a1b..f67b3a11b49a 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/service.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/service.go @@ -65,6 +65,9 @@ func paramNames() []GeneratorParam { {"load-balancer-ip", false}, {"type", false}, {"protocol", false}, + // protocols will be used to keep port-protocol mapping derived from + // exposed object + {"protocols", false}, {"container-port", false}, // alias of target-port {"target-port", false}, {"port-name", false}, @@ -112,6 +115,15 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { // Leave the port unnamed. servicePortName = "" } + + protocolsString, found := params["protocols"] + var portProtocolMap map[string]string + if found && len(protocolsString) > 0 { + portProtocolMap, err = ParseProtocols(protocolsString) + if err != nil { + return nil, err + } + } // ports takes precedence over port since it will be // specified only when the user hasn't specified a port // via --port and the exposed object has multiple ports. @@ -122,6 +134,7 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { return nil, fmt.Errorf("'port' is a required parameter.") } } + portStringSlice := strings.Split(portString, ",") for i, stillPortString := range portStringSlice { port, err := strconv.Atoi(stillPortString) @@ -134,10 +147,26 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { if len(portStringSlice) > 1 { name = fmt.Sprintf("port-%d", i+1) } + protocol := params["protocol"] + + switch { + case len(protocol) == 0 && len(portProtocolMap) == 0: + // Default to TCP, what the flag was doing previously. + protocol = "TCP" + case len(protocol) > 0 && len(portProtocolMap) > 0: + // User has specified the --protocol while exposing a multiprotocol resource + // We should stomp multiple protocols with the one specified ie. do nothing + case len(protocol) == 0 && len(portProtocolMap) > 0: + // no --protocol and we expose a multiprotocol resource + protocol = "TCP" // have the default so we can stay sane + if exposeProtocol, found := portProtocolMap[stillPortString]; found { + protocol = exposeProtocol + } + } ports = append(ports, api.ServicePort{ Name: name, - Port: port, - Protocol: api.Protocol(params["protocol"]), + Port: int32(port), + Protocol: api.Protocol(protocol), }) } @@ -171,7 +200,7 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { // should be the same as Port for i := range service.Spec.Ports { port := service.Spec.Ports[i].Port - service.Spec.Ports[i].TargetPort = intstr.FromInt(port) + service.Spec.Ports[i].TargetPort = intstr.FromInt(int(port)) } } if params["create-external-load-balancer"] == "true" { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/service_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/service_test.go new file mode 100644 index 000000000000..e65df05b5b38 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/service_test.go @@ -0,0 +1,489 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/intstr" +) + +func TestGenerateService(t *testing.T) { + tests := []struct { + generator Generator + params map[string]interface{} + expected api.Service + }{ + { + generator: ServiceGeneratorV2{}, + params: map[string]interface{}{ + "selector": "foo=bar,baz=blah", + "name": "test", + "port": "80", + "protocol": "TCP", + "container-port": "1234", + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "TCP", + TargetPort: intstr.FromInt(1234), + }, + }, + }, + }, + }, + { + + generator: ServiceGeneratorV2{}, + params: map[string]interface{}{ + "selector": "foo=bar,baz=blah", + "name": "test", + "port": "80", + "protocol": "UDP", + "container-port": "foobar", + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "UDP", + TargetPort: intstr.FromString("foobar"), + }, + }, + }, + }, + }, + { + generator: ServiceGeneratorV2{}, + params: map[string]interface{}{ + "selector": "foo=bar,baz=blah", + "labels": "key1=value1,key2=value2", + "name": "test", + "port": "80", + "protocol": "TCP", + "container-port": "1234", + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + Labels: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "TCP", + TargetPort: intstr.FromInt(1234), + }, + }, + }, + }, + }, + { + generator: ServiceGeneratorV2{}, + params: map[string]interface{}{ + "selector": "foo=bar,baz=blah", + "name": "test", + "port": "80", + "protocol": "UDP", + "container-port": "foobar", + "external-ip": "1.2.3.4", + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "UDP", + TargetPort: intstr.FromString("foobar"), + }, + }, + ExternalIPs: []string{"1.2.3.4"}, + }, + }, + }, + { + generator: ServiceGeneratorV2{}, + params: map[string]interface{}{ + "selector": "foo=bar,baz=blah", + "name": "test", + "port": "80", + "protocol": "UDP", + "container-port": "foobar", + "external-ip": "1.2.3.4", + "create-external-load-balancer": "true", + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "UDP", + TargetPort: intstr.FromString("foobar"), + }, + }, + Type: api.ServiceTypeLoadBalancer, + ExternalIPs: []string{"1.2.3.4"}, + }, + }, + }, + { + generator: ServiceGeneratorV2{}, + params: map[string]interface{}{ + "selector": "foo=bar,baz=blah", + "name": "test", + "port": "80", + "protocol": "UDP", + "container-port": "foobar", + "type": string(api.ServiceTypeNodePort), + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "UDP", + TargetPort: intstr.FromString("foobar"), + }, + }, + Type: api.ServiceTypeNodePort, + }, + }, + }, + { + generator: ServiceGeneratorV2{}, + params: map[string]interface{}{ + "selector": "foo=bar,baz=blah", + "name": "test", + "port": "80", + "protocol": "UDP", + "container-port": "foobar", + "create-external-load-balancer": "true", // ignored when type is present + "type": string(api.ServiceTypeNodePort), + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + Ports: []api.ServicePort{ + { + Port: 80, + Protocol: "UDP", + TargetPort: intstr.FromString("foobar"), + }, + }, + Type: api.ServiceTypeNodePort, + }, + }, + }, + { + generator: ServiceGeneratorV1{}, + params: map[string]interface{}{ + "selector": "foo=bar,baz=blah", + "name": "test", + "port": "80", + "protocol": "TCP", + "container-port": "1234", + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + Ports: []api.ServicePort{ + { + Name: "default", + Port: 80, + Protocol: "TCP", + TargetPort: intstr.FromInt(1234), + }, + }, + }, + }, + }, + { + generator: ServiceGeneratorV1{}, + params: map[string]interface{}{ + "selector": "foo=bar,baz=blah", + "name": "test", + "port": "80", + "protocol": "TCP", + "container-port": "1234", + "session-affinity": "ClientIP", + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + "baz": "blah", + }, + Ports: []api.ServicePort{ + { + Name: "default", + Port: 80, + Protocol: "TCP", + TargetPort: intstr.FromInt(1234), + }, + }, + SessionAffinity: api.ServiceAffinityClientIP, + }, + }, + }, + { + generator: ServiceGeneratorV1{}, + params: map[string]interface{}{ + "selector": "foo=bar", + "name": "test", + "ports": "80,443", + "protocol": "TCP", + "container-port": "foobar", + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + }, + Ports: []api.ServicePort{ + { + Name: "port-1", + Port: 80, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromString("foobar"), + }, + { + Name: "port-2", + Port: 443, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromString("foobar"), + }, + }, + }, + }, + }, + { + generator: ServiceGeneratorV2{}, + params: map[string]interface{}{ + "selector": "foo=bar", + "name": "test", + "ports": "80,443", + "protocol": "UDP", + "target-port": "1234", + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + }, + Ports: []api.ServicePort{ + { + Name: "port-1", + Port: 80, + Protocol: api.ProtocolUDP, + TargetPort: intstr.FromInt(1234), + }, + { + Name: "port-2", + Port: 443, + Protocol: api.ProtocolUDP, + TargetPort: intstr.FromInt(1234), + }, + }, + }, + }, + }, + { + generator: ServiceGeneratorV2{}, + params: map[string]interface{}{ + "selector": "foo=bar", + "name": "test", + "ports": "80,443", + "protocol": "TCP", + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + }, + Ports: []api.ServicePort{ + { + Name: "port-1", + Port: 80, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + { + Name: "port-2", + Port: 443, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(443), + }, + }, + }, + }, + }, + { + generator: ServiceGeneratorV2{}, + params: map[string]interface{}{ + "selector": "foo=bar", + "name": "test", + "ports": "80,8080", + "protocols": "8080/UDP", + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + }, + Ports: []api.ServicePort{ + { + Name: "port-1", + Port: 80, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + { + Name: "port-2", + Port: 8080, + Protocol: api.ProtocolUDP, + TargetPort: intstr.FromInt(8080), + }, + }, + }, + }, + }, + { + generator: ServiceGeneratorV2{}, + params: map[string]interface{}{ + "selector": "foo=bar", + "name": "test", + "ports": "80,8080,8081", + "protocols": "8080/UDP,8081/TCP", + }, + expected: api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{ + "foo": "bar", + }, + Ports: []api.ServicePort{ + { + Name: "port-1", + Port: 80, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(80), + }, + { + Name: "port-2", + Port: 8080, + Protocol: api.ProtocolUDP, + TargetPort: intstr.FromInt(8080), + }, + { + Name: "port-3", + Port: 8081, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(8081), + }, + }, + }, + }, + }, + } + for _, test := range tests { + obj, err := test.generator.Generate(test.params) + if !reflect.DeepEqual(obj, &test.expected) { + t.Errorf("expected:\n%#v\ngot\n%#v\n", &test.expected, obj) + } + if err != nil { + t.Errorf("unexpected error: %v", err) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/serviceaccount_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/serviceaccount_test.go new file mode 100644 index 000000000000..ca000f85f2e5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/serviceaccount_test.go @@ -0,0 +1,60 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +func TestServiceAccountGenerate(t *testing.T) { + tests := []struct { + name string + expected *api.ServiceAccount + expectErr bool + }{ + { + name: "foo", + expected: &api.ServiceAccount{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + }, + expectErr: false, + }, + { + expectErr: true, + }, + } + for _, test := range tests { + generator := ServiceAccountGeneratorV1{ + Name: test.name, + } + obj, err := generator.StructuredGenerate() + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + if test.expectErr && err != nil { + continue + } + if !reflect.DeepEqual(obj.(*api.ServiceAccount), test.expected) { + t.Errorf("\nexpected:\n%#v\nsaw:\n%#v", test.expected, obj.(*api.ServiceAccount)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorted_event_list_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorted_event_list_test.go new file mode 100644 index 000000000000..471069ed212a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorted_event_list_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "sort" + "strings" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// VerifyDatesInOrder checks the start of each line for a RFC1123Z date +// and posts error if all subsequent dates are not equal or increasing +func VerifyDatesInOrder( + resultToTest, rowDelimiter, columnDelimiter string, t *testing.T) { + lines := strings.Split(resultToTest, rowDelimiter) + var previousTime time.Time + for _, str := range lines { + columns := strings.Split(str, columnDelimiter) + if len(columns) > 0 { + currentTime, err := time.Parse(time.RFC1123Z, columns[0]) + if err == nil { + if previousTime.After(currentTime) { + t.Errorf( + "Output is not sorted by time. %s should be listed after %s. Complete output: %s", + previousTime.Format(time.RFC1123Z), + currentTime.Format(time.RFC1123Z), + resultToTest) + } + previousTime = currentTime + } + } + } + +} + +func TestSortableEvents(t *testing.T) { + // Arrange + list := SortableEvents([]api.Event{ + { + Source: api.EventSource{Component: "kubelet"}, + Message: "Item 1", + FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), + LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), + Count: 1, + Type: api.EventTypeNormal, + }, + { + Source: api.EventSource{Component: "scheduler"}, + Message: "Item 2", + FirstTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), + LastTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), + Count: 1, + Type: api.EventTypeNormal, + }, + { + Source: api.EventSource{Component: "kubelet"}, + Message: "Item 3", + FirstTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), + LastTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), + Count: 1, + Type: api.EventTypeNormal, + }, + }) + + // Act + sort.Sort(list) + + // Assert + if list[0].Message != "Item 2" || + list[1].Message != "Item 3" || + list[2].Message != "Item 1" { + t.Fatal("List is not sorted by time. List: ", list) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go index ffaa08ee46f3..98c67344a3e3 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go @@ -17,7 +17,10 @@ limitations under the License. package kubectl import ( + "sort" + "k8s.io/kubernetes/pkg/api" + qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util" ) type SortableResourceNames []api.ResourceName @@ -34,6 +37,16 @@ func (list SortableResourceNames) Less(i, j int) bool { return list[i] < list[j] } +// SortedResourceNames returns the sorted resource names of a resource list. +func SortedResourceNames(list api.ResourceList) []api.ResourceName { + resources := make([]api.ResourceName, 0, len(list)) + for res := range list { + resources = append(resources, res) + } + sort.Sort(SortableResourceNames(resources)) + return resources +} + type SortableResourceQuotas []api.ResourceQuota func (list SortableResourceQuotas) Len() int { @@ -47,3 +60,13 @@ func (list SortableResourceQuotas) Swap(i, j int) { func (list SortableResourceQuotas) Less(i, j int) bool { return list[i].Name < list[j].Name } + +// SortedQoSResourceNames returns the sorted resource names of a QoS list. +func SortedQoSResourceNames(list qosutil.QoSList) []api.ResourceName { + resources := make([]api.ResourceName, 0, len(list)) + for res := range list { + resources = append(resources, res) + } + sort.Sort(SortableResourceNames(resources)) + return resources +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list_test.go new file mode 100644 index 000000000000..c0e4855584f1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list_test.go @@ -0,0 +1,50 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "reflect" + "sort" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +func TestSortableResourceNamesSorting(t *testing.T) { + want := SortableResourceNames{ + api.ResourceName(""), + api.ResourceName("42"), + api.ResourceName("bar"), + api.ResourceName("foo"), + api.ResourceName("foo"), + api.ResourceName("foobar"), + } + + in := SortableResourceNames{ + api.ResourceName("foo"), + api.ResourceName("42"), + api.ResourceName("foobar"), + api.ResourceName("foo"), + api.ResourceName("bar"), + api.ResourceName(""), + } + + sort.Sort(in) + if !reflect.DeepEqual(in, want) { + t.Errorf("got %v, want %v", in, want) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go index 49e36b882e9b..577ee0d9ae66 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go @@ -23,8 +23,10 @@ import ( "sort" "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/integer" "k8s.io/kubernetes/pkg/util/jsonpath" "github.com/golang/glog" @@ -97,7 +99,7 @@ func SortObjects(decoder runtime.Decoder, objs []runtime.Object, fieldInput stri switch u := item.(type) { case *runtime.Unknown: var err error - if objs[ix], _, err = decoder.Decode(u.RawJSON, nil, nil); err != nil { + if objs[ix], _, err = decoder.Decode(u.Raw, nil, nil); err != nil { return nil, err } } @@ -153,6 +155,29 @@ func isLess(i, j reflect.Value) (bool, error) { return i.String() < j.String(), nil case reflect.Ptr: return isLess(i.Elem(), j.Elem()) + case reflect.Struct: + // sort unversioned.Time + in := i.Interface() + if t, ok := in.(unversioned.Time); ok { + return t.Before(j.Interface().(unversioned.Time)), nil + } + // fallback to the fields comparision + for idx := 0; idx < i.NumField(); idx++ { + less, err := isLess(i.Field(idx), j.Field(idx)) + if err != nil || !less { + return less, err + } + } + return true, nil + case reflect.Array, reflect.Slice: + // note: the length of i and j may be different + for idx := 0; idx < integer.IntMin(i.Len(), j.Len()); idx++ { + less, err := isLess(i.Index(idx), j.Index(idx)) + if err != nil || !less { + return less, err + } + } + return true, nil default: return false, fmt.Errorf("unsortable type: %v", i.Kind()) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorting_printer_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorting_printer_test.go new file mode 100644 index 000000000000..b9f9f1be9849 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/sorting_printer_test.go @@ -0,0 +1,279 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "reflect" + "testing" + + internal "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + api "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" +) + +func encodeOrDie(obj runtime.Object) []byte { + data, err := runtime.Encode(internal.Codecs.LegacyCodec(api.SchemeGroupVersion), obj) + if err != nil { + panic(err.Error()) + } + return data +} + +func TestSortingPrinter(t *testing.T) { + intPtr := func(val int32) *int32 { return &val } + + a := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "a", + }, + } + + b := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "b", + }, + } + + c := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "c", + }, + } + + tests := []struct { + obj runtime.Object + sort runtime.Object + field string + name string + }{ + { + name: "in-order-already", + obj: &api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + Name: "a", + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "b", + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "c", + }, + }, + }, + }, + sort: &api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + Name: "a", + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "b", + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "c", + }, + }, + }, + }, + field: "{.metadata.name}", + }, + { + name: "reverse-order", + obj: &api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + Name: "b", + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "c", + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "a", + }, + }, + }, + }, + sort: &api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + Name: "a", + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "b", + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "c", + }, + }, + }, + }, + field: "{.metadata.name}", + }, + { + name: "random-order-timestamp", + obj: &api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + CreationTimestamp: unversioned.Unix(300, 0), + }, + }, + { + ObjectMeta: api.ObjectMeta{ + CreationTimestamp: unversioned.Unix(100, 0), + }, + }, + { + ObjectMeta: api.ObjectMeta{ + CreationTimestamp: unversioned.Unix(200, 0), + }, + }, + }, + }, + sort: &api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + CreationTimestamp: unversioned.Unix(100, 0), + }, + }, + { + ObjectMeta: api.ObjectMeta{ + CreationTimestamp: unversioned.Unix(200, 0), + }, + }, + { + ObjectMeta: api.ObjectMeta{ + CreationTimestamp: unversioned.Unix(300, 0), + }, + }, + }, + }, + field: "{.metadata.creationTimestamp}", + }, + { + name: "random-order-numbers", + obj: &api.ReplicationControllerList{ + Items: []api.ReplicationController{ + { + Spec: api.ReplicationControllerSpec{ + Replicas: intPtr(5), + }, + }, + { + Spec: api.ReplicationControllerSpec{ + Replicas: intPtr(1), + }, + }, + { + Spec: api.ReplicationControllerSpec{ + Replicas: intPtr(9), + }, + }, + }, + }, + sort: &api.ReplicationControllerList{ + Items: []api.ReplicationController{ + { + Spec: api.ReplicationControllerSpec{ + Replicas: intPtr(1), + }, + }, + { + Spec: api.ReplicationControllerSpec{ + Replicas: intPtr(5), + }, + }, + { + Spec: api.ReplicationControllerSpec{ + Replicas: intPtr(9), + }, + }, + }, + }, + field: "{.spec.replicas}", + }, + { + name: "v1.List in order", + obj: &api.List{ + Items: []runtime.RawExtension{ + {Raw: encodeOrDie(a)}, + {Raw: encodeOrDie(b)}, + {Raw: encodeOrDie(c)}, + }, + }, + sort: &api.List{ + Items: []runtime.RawExtension{ + {Raw: encodeOrDie(a)}, + {Raw: encodeOrDie(b)}, + {Raw: encodeOrDie(c)}, + }, + }, + field: "{.metadata.name}", + }, + { + name: "v1.List in reverse", + obj: &api.List{ + Items: []runtime.RawExtension{ + {Raw: encodeOrDie(c)}, + {Raw: encodeOrDie(b)}, + {Raw: encodeOrDie(a)}, + }, + }, + sort: &api.List{ + Items: []runtime.RawExtension{ + {Raw: encodeOrDie(a)}, + {Raw: encodeOrDie(b)}, + {Raw: encodeOrDie(c)}, + }, + }, + field: "{.metadata.name}", + }, + } + for _, test := range tests { + sort := &SortingPrinter{SortField: test.field, Decoder: internal.Codecs.UniversalDecoder()} + if err := sort.sortObj(test.obj); err != nil { + t.Errorf("unexpected error: %v (%s)", err, test.name) + continue + } + if !reflect.DeepEqual(test.obj, test.sort) { + t.Errorf("[%s]\nexpected:\n%v\nsaw:\n%v", test.name, test.sort, test.obj) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/stop.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/stop.go index 9112decf7ed5..d784ef24ce2b 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/stop.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/stop.go @@ -314,9 +314,9 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio } func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - jobs := reaper.Extensions().Jobs(namespace) + jobs := reaper.Batch().Jobs(namespace) pods := reaper.Pods(namespace) - scaler, err := ScalerFor(extensions.Kind("Job"), *reaper) + scaler, err := ScalerFor(batch.Kind("Job"), *reaper) if err != nil { return err } @@ -367,7 +367,7 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) { // set deployment's history and scale to 0 // TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527 - d.Spec.RevisionHistoryLimit = util.IntPtr(0) + d.Spec.RevisionHistoryLimit = util.Int32Ptr(0) d.Spec.Replicas = 0 d.Spec.Paused = true }) @@ -378,7 +378,7 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati // Use observedGeneration to determine if the deployment controller noticed the pause. if err := deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return deployments.Get(name) - }, deployment.Generation, 10*time.Millisecond, 1*time.Minute); err != nil { + }, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil { return err } @@ -396,7 +396,8 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati errList := []error{} for _, rc := range rsList.Items { if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil { - if !errors.IsNotFound(err) { + scaleGetErr, ok := err.(*ScaleError) + if !errors.IsNotFound(err) || ok && !errors.IsNotFound(scaleGetErr.ActualError) { errList = append(errList, err) } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/stop_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/stop_test.go new file mode 100644 index 000000000000..dfc161051e73 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/stop_test.go @@ -0,0 +1,717 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "reflect" + "strings" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/testclient" + "k8s.io/kubernetes/pkg/runtime" + deploymentutil "k8s.io/kubernetes/pkg/util/deployment" +) + +func TestReplicationControllerStop(t *testing.T) { + name := "foo" + ns := "default" + tests := []struct { + Name string + Objs []runtime.Object + StopError error + ExpectedActions []string + }{ + { + Name: "OnlyOneRC", + Objs: []runtime.Object{ + &api.ReplicationController{ // GET + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1"}}, + }, + &api.ReplicationControllerList{ // LIST + Items: []api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1"}}, + }, + }, + }, + }, + StopError: nil, + ExpectedActions: []string{"get", "list", "get", "update", "get", "get", "delete"}, + }, + { + Name: "NoOverlapping", + Objs: []runtime.Object{ + &api.ReplicationController{ // GET + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1"}}, + }, + &api.ReplicationControllerList{ // LIST + Items: []api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{ + Name: "baz", + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k3": "v3"}}, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1"}}, + }, + }, + }, + }, + StopError: nil, + ExpectedActions: []string{"get", "list", "get", "update", "get", "get", "delete"}, + }, + { + Name: "OverlappingError", + Objs: []runtime.Object{ + + &api.ReplicationController{ // GET + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1"}}, + }, + &api.ReplicationControllerList{ // LIST + Items: []api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{ + Name: "baz", + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1", "k2": "v2"}}, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1"}}, + }, + }, + }, + }, + StopError: fmt.Errorf("Detected overlapping controllers for rc foo: baz, please manage deletion individually with --cascade=false."), + ExpectedActions: []string{"get", "list"}, + }, + + { + Name: "OverlappingButSafeDelete", + Objs: []runtime.Object{ + + &api.ReplicationController{ // GET + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1", "k2": "v2"}}, + }, + &api.ReplicationControllerList{ // LIST + Items: []api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{ + Name: "baz", + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}}, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "zaz", + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1"}}, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1", "k2": "v2"}}, + }, + }, + }, + }, + + StopError: fmt.Errorf("Detected overlapping controllers for rc foo: baz,zaz, please manage deletion individually with --cascade=false."), + ExpectedActions: []string{"get", "list"}, + }, + + { + Name: "TwoExactMatchRCs", + Objs: []runtime.Object{ + + &api.ReplicationController{ // GET + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1"}}, + }, + &api.ReplicationControllerList{ // LIST + Items: []api.ReplicationController{ + { + ObjectMeta: api.ObjectMeta{ + Name: "zaz", + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1"}}, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 0, + Selector: map[string]string{"k1": "v1"}}, + }, + }, + }, + }, + + StopError: nil, + ExpectedActions: []string{"get", "list", "delete"}, + }, + } + + for _, test := range tests { + fake := testclient.NewSimpleFake(test.Objs...) + reaper := ReplicationControllerReaper{fake, time.Millisecond, time.Millisecond} + err := reaper.Stop(ns, name, 0, nil) + if !reflect.DeepEqual(err, test.StopError) { + t.Errorf("%s unexpected error: %v", test.Name, err) + continue + } + + actions := fake.Actions() + if len(actions) != len(test.ExpectedActions) { + t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions)) + continue + } + for i, verb := range test.ExpectedActions { + if actions[i].GetResource() != "replicationcontrollers" { + t.Errorf("%s unexpected action: %+v, expected %s-replicationController", test.Name, actions[i], verb) + } + if actions[i].GetVerb() != verb { + t.Errorf("%s unexpected action: %+v, expected %s-replicationController", test.Name, actions[i], verb) + } + } + } +} + +func TestReplicaSetStop(t *testing.T) { + name := "foo" + ns := "default" + tests := []struct { + Name string + Objs []runtime.Object + StopError error + ExpectedActions []string + }{ + { + Name: "OnlyOneRS", + Objs: []runtime.Object{ + &extensions.ReplicaSet{ // GET + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: 0, + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, + }, + }, + &extensions.ReplicaSetList{ // LIST + Items: []extensions.ReplicaSet{ + { + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: 0, + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, + }, + }, + }, + }, + }, + StopError: nil, + ExpectedActions: []string{"get", "get", "update", "get", "get", "delete"}, + }, + { + Name: "NoOverlapping", + Objs: []runtime.Object{ + &extensions.ReplicaSet{ // GET + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: 0, + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, + }, + }, + &extensions.ReplicaSetList{ // LIST + Items: []extensions.ReplicaSet{ + { + ObjectMeta: api.ObjectMeta{ + Name: "baz", + Namespace: ns, + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: 0, + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"k3": "v3"}}, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: 0, + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, + }, + }, + }, + }, + }, + StopError: nil, + ExpectedActions: []string{"get", "get", "update", "get", "get", "delete"}, + }, + // TODO: Implement tests for overlapping replica sets, similar to replication controllers, + // when the overlapping checks are implemented for replica sets. + } + + for _, test := range tests { + fake := testclient.NewSimpleFake(test.Objs...) + reaper := ReplicaSetReaper{fake, time.Millisecond, time.Millisecond} + err := reaper.Stop(ns, name, 0, nil) + if !reflect.DeepEqual(err, test.StopError) { + t.Errorf("%s unexpected error: %v", test.Name, err) + continue + } + + actions := fake.Actions() + if len(actions) != len(test.ExpectedActions) { + t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions)) + continue + } + for i, verb := range test.ExpectedActions { + if actions[i].GetResource() != "replicasets" { + t.Errorf("%s unexpected action: %+v, expected %s-replicaSet", test.Name, actions[i], verb) + } + if actions[i].GetVerb() != verb { + t.Errorf("%s unexpected action: %+v, expected %s-replicaSet", test.Name, actions[i], verb) + } + } + } +} + +func TestJobStop(t *testing.T) { + name := "foo" + ns := "default" + zero := int32(0) + tests := []struct { + Name string + Objs []runtime.Object + StopError error + ExpectedActions []string + }{ + { + Name: "OnlyOneJob", + Objs: []runtime.Object{ + &batch.Job{ // GET + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: batch.JobSpec{ + Parallelism: &zero, + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"k1": "v1"}, + }, + }, + }, + &batch.JobList{ // LIST + Items: []batch.Job{ + { + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: batch.JobSpec{ + Parallelism: &zero, + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"k1": "v1"}, + }, + }, + }, + }, + }, + }, + StopError: nil, + ExpectedActions: []string{"get:jobs", "get:jobs", "update:jobs", + "get:jobs", "get:jobs", "list:pods", "delete:jobs"}, + }, + { + Name: "JobWithDeadPods", + Objs: []runtime.Object{ + &batch.Job{ // GET + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: batch.JobSpec{ + Parallelism: &zero, + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"k1": "v1"}, + }, + }, + }, + &batch.JobList{ // LIST + Items: []batch.Job{ + { + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: batch.JobSpec{ + Parallelism: &zero, + Selector: &unversioned.LabelSelector{ + MatchLabels: map[string]string{"k1": "v1"}, + }, + }, + }, + }, + }, + &api.PodList{ // LIST + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + Name: "pod1", + Namespace: ns, + Labels: map[string]string{"k1": "v1"}, + }, + }, + }, + }, + }, + StopError: nil, + ExpectedActions: []string{"get:jobs", "get:jobs", "update:jobs", + "get:jobs", "get:jobs", "list:pods", "delete:pods", "delete:jobs"}, + }, + } + + for _, test := range tests { + fake := testclient.NewSimpleFake(test.Objs...) + reaper := JobReaper{fake, time.Millisecond, time.Millisecond} + err := reaper.Stop(ns, name, 0, nil) + if !reflect.DeepEqual(err, test.StopError) { + t.Errorf("%s unexpected error: %v", test.Name, err) + continue + } + + actions := fake.Actions() + if len(actions) != len(test.ExpectedActions) { + t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions)) + continue + } + for i, expAction := range test.ExpectedActions { + action := strings.Split(expAction, ":") + if actions[i].GetVerb() != action[0] { + t.Errorf("%s unexpected verb: %+v, expected %s", test.Name, actions[i], expAction) + } + if actions[i].GetResource() != action[1] { + t.Errorf("%s unexpected resource: %+v, expected %s", test.Name, actions[i], expAction) + } + } + } +} + +func TestDeploymentStop(t *testing.T) { + name := "foo" + ns := "default" + deployment := extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: extensions.DeploymentSpec{ + Replicas: 0, + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, + }, + Status: extensions.DeploymentStatus{ + Replicas: 0, + }, + } + template := deploymentutil.GetNewReplicaSetTemplate(&deployment) + tests := []struct { + Name string + Objs []runtime.Object + StopError error + ExpectedActions []string + }{ + { + Name: "SimpleDeployment", + Objs: []runtime.Object{ + &extensions.Deployment{ // GET + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: extensions.DeploymentSpec{ + Replicas: 0, + Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, + }, + Status: extensions.DeploymentStatus{ + Replicas: 0, + }, + }, + }, + StopError: nil, + ExpectedActions: []string{"get:deployments", "update:deployments", + "get:deployments", "list:replicasets", "delete:deployments"}, + }, + { + Name: "Deployment with single replicaset", + Objs: []runtime.Object{ + &deployment, // GET + &extensions.ReplicaSetList{ // LIST + Items: []extensions.ReplicaSet{ + { + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: extensions.ReplicaSetSpec{ + Template: template, + }, + }, + }, + }, + }, + StopError: nil, + ExpectedActions: []string{"get:deployments", "update:deployments", + "get:deployments", "list:replicasets", "get:replicasets", + "get:replicasets", "update:replicasets", "get:replicasets", + "get:replicasets", "delete:replicasets", "delete:deployments"}, + }, + } + + for _, test := range tests { + fake := testclient.NewSimpleFake(test.Objs...) + reaper := DeploymentReaper{fake, time.Millisecond, time.Millisecond} + err := reaper.Stop(ns, name, 0, nil) + if !reflect.DeepEqual(err, test.StopError) { + t.Errorf("%s unexpected error: %v", test.Name, err) + continue + } + + actions := fake.Actions() + if len(actions) != len(test.ExpectedActions) { + t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions)) + continue + } + for i, expAction := range test.ExpectedActions { + action := strings.Split(expAction, ":") + if actions[i].GetVerb() != action[0] { + t.Errorf("%s unexpected verb: %+v, expected %s", test.Name, actions[i], expAction) + } + if actions[i].GetResource() != action[1] { + t.Errorf("%s unexpected resource: %+v, expected %s", test.Name, actions[i], expAction) + } + if len(action) == 3 && actions[i].GetSubresource() != action[2] { + t.Errorf("%s unexpected subresource: %+v, expected %s", test.Name, actions[i], expAction) + } + } + } +} + +type noSuchPod struct { + *testclient.FakePods +} + +func (c *noSuchPod) Get(name string) (*api.Pod, error) { + return nil, fmt.Errorf("%s does not exist", name) +} + +type noDeleteService struct { + *testclient.FakeServices +} + +func (c *noDeleteService) Delete(service string) error { + return fmt.Errorf("I'm afraid I can't do that, Dave") +} + +type reaperFake struct { + *testclient.Fake + noSuchPod, noDeleteService bool +} + +func (c *reaperFake) Pods(namespace string) client.PodInterface { + pods := &testclient.FakePods{Fake: c.Fake, Namespace: namespace} + if c.noSuchPod { + return &noSuchPod{pods} + } + return pods +} + +func (c *reaperFake) Services(namespace string) client.ServiceInterface { + services := &testclient.FakeServices{Fake: c.Fake, Namespace: namespace} + if c.noDeleteService { + return &noDeleteService{services} + } + return services +} + +func TestSimpleStop(t *testing.T) { + tests := []struct { + fake *reaperFake + kind unversioned.GroupKind + actions []testclient.Action + expectError bool + test string + }{ + { + fake: &reaperFake{ + Fake: &testclient.Fake{}, + }, + kind: api.Kind("Pod"), + actions: []testclient.Action{ + testclient.NewGetAction("pods", api.NamespaceDefault, "foo"), + testclient.NewDeleteAction("pods", api.NamespaceDefault, "foo"), + }, + expectError: false, + test: "stop pod succeeds", + }, + { + fake: &reaperFake{ + Fake: &testclient.Fake{}, + }, + kind: api.Kind("Service"), + actions: []testclient.Action{ + testclient.NewGetAction("services", api.NamespaceDefault, "foo"), + testclient.NewDeleteAction("services", api.NamespaceDefault, "foo"), + }, + expectError: false, + test: "stop service succeeds", + }, + { + fake: &reaperFake{ + Fake: &testclient.Fake{}, + noSuchPod: true, + }, + kind: api.Kind("Pod"), + actions: []testclient.Action{}, + expectError: true, + test: "stop pod fails, no pod", + }, + { + fake: &reaperFake{ + Fake: &testclient.Fake{}, + noDeleteService: true, + }, + kind: api.Kind("Service"), + actions: []testclient.Action{ + testclient.NewGetAction("services", api.NamespaceDefault, "foo"), + }, + expectError: true, + test: "stop service fails, can't delete", + }, + } + for _, test := range tests { + fake := test.fake + reaper, err := ReaperFor(test.kind, fake) + if err != nil { + t.Errorf("unexpected error: %v (%s)", err, test.test) + } + err = reaper.Stop("default", "foo", 0, nil) + if err != nil && !test.expectError { + t.Errorf("unexpected error: %v (%s)", err, test.test) + } + if err == nil { + if test.expectError { + t.Errorf("unexpected non-error: %v (%s)", err, test.test) + } + } + actions := fake.Actions() + if len(test.actions) != len(actions) { + t.Errorf("unexpected actions: %v; expected %v (%s)", actions, test.actions, test.test) + } + for i, action := range actions { + testAction := test.actions[i] + if action != testAction { + t.Errorf("unexpected action: %#v; expected %v (%s)", action, testAction, test.test) + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/testing/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/testing/types.generated.go index b3dd3628da06..6b0facba0889 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/testing/types.generated.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/testing/types.generated.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/OWNERS b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/OWNERS new file mode 100644 index 000000000000..c0b8d3ac6db0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/OWNERS @@ -0,0 +1,4 @@ +assignees: + - dchen1107 + - vishh + - yujuhong diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats/types.go new file mode 100644 index 000000000000..90c566d9d3a7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats/types.go @@ -0,0 +1,215 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stats + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// Summary is a top-level container for holding NodeStats and PodStats. +type Summary struct { + // Overall node stats. + Node NodeStats `json:"node"` + // Per-pod stats. + Pods []PodStats `json:"pods"` +} + +// NodeStats holds node-level unprocessed sample stats. +type NodeStats struct { + // Reference to the measured Node. + NodeName string `json:"nodeName"` + // Stats of system daemons tracked as raw containers. + // The system containers are named according to the SystemContainer* constants. + SystemContainers []ContainerStats `json:"systemContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // The time at which data collection for the node-scoped (i.e. aggregate) stats was (re)started. + StartTime unversioned.Time `json:"startTime"` + // Stats pertaining to CPU resources. + CPU *CPUStats `json:"cpu,omitempty"` + // Stats pertaining to memory (RAM) resources. + Memory *MemoryStats `json:"memory,omitempty"` + // Stats pertaining to network resources. + Network *NetworkStats `json:"network,omitempty"` + // Stats pertaining to total usage of filesystem resources on the rootfs used by node k8s components. + // NodeFs.Used is the total bytes used on the filesystem. + Fs *FsStats `json:"fs,omitempty"` + // Stats about the underlying container runtime. + Runtime *RuntimeStats `json:"runtime,omitempty"` +} + +// Stats pertaining to the underlying container runtime. +type RuntimeStats struct { + // Stats about the underlying filesystem where container images are stored. + // This filesystem could be the same as the primary (root) filesystem. + // Usage here refers to the total number of bytes occupied by images on the filesystem. + ImageFs *FsStats `json:"imageFs,omitempty"` +} + +const ( + // Container name for the system container tracking Kubelet usage. + SystemContainerKubelet = "kubelet" + // Container name for the system container tracking the runtime (e.g. docker or rkt) usage. + SystemContainerRuntime = "runtime" + // Container name for the system container tracking non-kubernetes processes. + SystemContainerMisc = "misc" +) + +// PodStats holds pod-level unprocessed sample stats. +type PodStats struct { + // Reference to the measured Pod. + PodRef PodReference `json:"podRef"` + // The time at which data collection for the pod-scoped (e.g. network) stats was (re)started. + StartTime unversioned.Time `json:"startTime"` + // Stats of containers in the measured pod. + Containers []ContainerStats `json:"containers" patchStrategy:"merge" patchMergeKey:"name"` + // Stats pertaining to network resources. + Network *NetworkStats `json:"network,omitempty"` + // Stats pertaining to volume usage of filesystem resources. + // VolumeStats.UsedBytes is the number of bytes used by the Volume + VolumeStats []VolumeStats `json:"volume,omitempty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// ContainerStats holds container-level unprocessed sample stats. +type ContainerStats struct { + // Reference to the measured container. + Name string `json:"name"` + // The time at which data collection for this container was (re)started. + StartTime unversioned.Time `json:"startTime"` + // Stats pertaining to CPU resources. + CPU *CPUStats `json:"cpu,omitempty"` + // Stats pertaining to memory (RAM) resources. + Memory *MemoryStats `json:"memory,omitempty"` + // Stats pertaining to container rootfs usage of filesystem resources. + // Rootfs.UsedBytes is the number of bytes used for the container write layer. + Rootfs *FsStats `json:"rootfs,omitempty"` + // Stats pertaining to container logs usage of filesystem resources. + // Logs.UsedBytes is the number of bytes used for the container logs. + Logs *FsStats `json:"logs,omitempty"` + // User defined metrics that are exposed by containers in the pod. Typically, we expect only one container in the pod to be exposing user defined metrics. In the event of multiple containers exposing metrics, they will be combined here. + UserDefinedMetrics []UserDefinedMetric `json:"userDefinedMetrics,omitmepty" patchStrategy:"merge" patchMergeKey:"name"` +} + +// PodReference contains enough information to locate the referenced pod. +type PodReference struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + UID string `json:"uid"` +} + +// NetworkStats contains data about network resources. +type NetworkStats struct { + // The time at which these stats were updated. + Time unversioned.Time `json:"time"` + // Cumulative count of bytes received. + RxBytes *uint64 `json:"rxBytes,omitempty"` + // Cumulative count of receive errors encountered. + RxErrors *uint64 `json:"rxErrors,omitempty"` + // Cumulative count of bytes transmitted. + TxBytes *uint64 `json:"txBytes,omitempty"` + // Cumulative count of transmit errors encountered. + TxErrors *uint64 `json:"txErrors,omitempty"` +} + +// CPUStats contains data about CPU usage. +type CPUStats struct { + // The time at which these stats were updated. + Time unversioned.Time `json:"time"` + // Total CPU usage (sum of all cores) averaged over the sample window. + // The "core" unit can be interpreted as CPU core-nanoseconds per second. + UsageNanoCores *uint64 `json:"usageNanoCores,omitempty"` + // Cumulative CPU usage (sum of all cores) since object creation. + UsageCoreNanoSeconds *uint64 `json:"usageCoreNanoSeconds,omitempty"` +} + +// MemoryStats contains data about memory usage. +type MemoryStats struct { + // The time at which these stats were updated. + Time unversioned.Time `json:"time"` + // Available memory for use. This is defined as the memory limit - workingSetBytes. + // If memory limit is undefined, the available bytes is omitted. + AvailableBytes *uint64 `json:"availableBytes,omitempty"` + // Total memory in use. This includes all memory regardless of when it was accessed. + UsageBytes *uint64 `json:"usageBytes,omitempty"` + // The amount of working set memory. This includes recently accessed memory, + // dirty memory, and kernel memory. WorkingSetBytes is <= UsageBytes + WorkingSetBytes *uint64 `json:"workingSetBytes,omitempty"` + // The amount of anonymous and swap cache memory (includes transparent + // hugepages). + RSSBytes *uint64 `json:"rssBytes,omitempty"` + // Cumulative number of minor page faults. + PageFaults *uint64 `json:"pageFaults,omitempty"` + // Cumulative number of major page faults. + MajorPageFaults *uint64 `json:"majorPageFaults,omitempty"` +} + +// VolumeStats contains data about Volume filesystem usage. +type VolumeStats struct { + // Embedded FsStats + FsStats + // Name is the name given to the Volume + Name string `json:"name,omitempty"` +} + +// FsStats contains data about filesystem usage. +type FsStats struct { + // AvailableBytes represents the storage space available (bytes) for the filesystem. + AvailableBytes *uint64 `json:"availableBytes,omitempty"` + // CapacityBytes represents the total capacity (bytes) of the filesystems underlying storage. + CapacityBytes *uint64 `json:"capacityBytes,omitempty"` + // UsedBytes represents the bytes used for a specific task on the filesystem. + // This may differ from the total bytes used on the filesystem and may not equal CapacityBytes - AvailableBytes. + // e.g. For ContainerStats.Rootfs this is the bytes used by the container rootfs on the filesystem. + UsedBytes *uint64 `json:"usedBytes,omitempty"` +} + +// UserDefinedMetricType defines how the metric should be interpreted by the user. +type UserDefinedMetricType string + +const ( + // Instantaneous value. May increase or decrease. + MetricGauge UserDefinedMetricType = "gauge" + + // A counter-like value that is only expected to increase. + MetricCumulative UserDefinedMetricType = "cumulative" + + // Rate over a time period. + MetricDelta UserDefinedMetricType = "delta" +) + +// UserDefinedMetricDescriptor contains metadata that describes a user defined metric. +type UserDefinedMetricDescriptor struct { + // The name of the metric. + Name string `json:"name"` + + // Type of the metric. + Type UserDefinedMetricType `json:"type"` + + // Display Units for the stats. + Units string `json:"units"` + + // Metadata labels associated with this metric. + Labels map[string]string `json:"labels,omitempty"` +} + +// UserDefinedMetric represents a metric defined and generate by users. +type UserDefinedMetric struct { + UserDefinedMetricDescriptor `json:",inline"` + // The time at which these stats were updated. + Time unversioned.Time `json:"time"` + // Value of the metric. Float64s have 53 bit precision. + // We do not forsee any metrics exceeding that value. + Value float64 `json:"value"` +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go new file mode 100644 index 000000000000..6ba118b37198 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go @@ -0,0 +1,216 @@ +// +build cgo,linux + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cadvisor + +import ( + "flag" + "fmt" + "net/http" + "regexp" + "time" + + "github.com/golang/glog" + "github.com/google/cadvisor/cache/memory" + cadvisorMetrics "github.com/google/cadvisor/container" + "github.com/google/cadvisor/events" + cadvisorfs "github.com/google/cadvisor/fs" + cadvisorhttp "github.com/google/cadvisor/http" + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" + "github.com/google/cadvisor/manager" + "github.com/google/cadvisor/utils/sysfs" + "k8s.io/kubernetes/pkg/util/runtime" +) + +type cadvisorClient struct { + runtime string + manager.Manager +} + +var _ Interface = new(cadvisorClient) + +// TODO(vmarmol): Make configurable. +// The amount of time for which to keep stats in memory. +const statsCacheDuration = 2 * time.Minute +const maxHousekeepingInterval = 15 * time.Second +const defaultHousekeepingInterval = 10 * time.Second +const allowDynamicHousekeeping = true + +func init() { + // Override cAdvisor flag defaults. + flagOverrides := map[string]string{ + // Override the default cAdvisor housekeeping interval. + "housekeeping_interval": defaultHousekeepingInterval.String(), + // Disable event storage by default. + "event_storage_event_limit": "default=0", + "event_storage_age_limit": "default=0", + } + for name, defaultValue := range flagOverrides { + if f := flag.Lookup(name); f != nil { + f.DefValue = defaultValue + f.Value.Set(defaultValue) + } else { + glog.Errorf("Expected cAdvisor flag %q not found", name) + } + } +} + +// Creates a cAdvisor and exports its API on the specified port if port > 0. +func New(port uint, runtime string) (Interface, error) { + sysFs, err := sysfs.NewRealSysFs() + if err != nil { + return nil, err + } + + // Create and start the cAdvisor container manager. + m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping, cadvisorMetrics.MetricSet{cadvisorMetrics.NetworkTcpUsageMetrics: struct{}{}}) + if err != nil { + return nil, err + } + + cadvisorClient := &cadvisorClient{ + runtime: runtime, + Manager: m, + } + + err = cadvisorClient.exportHTTP(port) + if err != nil { + return nil, err + } + return cadvisorClient, nil +} + +func (cc *cadvisorClient) Start() error { + return cc.Manager.Start() +} + +func (cc *cadvisorClient) exportHTTP(port uint) error { + // Register the handlers regardless as this registers the prometheus + // collector properly. + mux := http.NewServeMux() + err := cadvisorhttp.RegisterHandlers(mux, cc, "", "", "", "") + if err != nil { + return err + } + + re := regexp.MustCompile(`^k8s_(?P[^_\.]+)[^_]+_(?P[^_]+)_(?P[^_]+)`) + reCaptureNames := re.SubexpNames() + cadvisorhttp.RegisterPrometheusHandler(mux, cc, "/metrics", func(name string) map[string]string { + extraLabels := map[string]string{} + matches := re.FindStringSubmatch(name) + for i, match := range matches { + if len(reCaptureNames[i]) > 0 { + extraLabels[re.SubexpNames()[i]] = match + } + } + return extraLabels + }) + + // Only start the http server if port > 0 + if port > 0 { + serv := &http.Server{ + Addr: fmt.Sprintf(":%d", port), + Handler: mux, + } + + // TODO(vmarmol): Remove this when the cAdvisor port is once again free. + // If export failed, retry in the background until we are able to bind. + // This allows an existing cAdvisor to be killed before this one registers. + go func() { + defer runtime.HandleCrash() + + err := serv.ListenAndServe() + for err != nil { + glog.Infof("Failed to register cAdvisor on port %d, retrying. Error: %v", port, err) + time.Sleep(time.Minute) + err = serv.ListenAndServe() + } + }() + } + + return nil +} + +func (cc *cadvisorClient) ContainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + return cc.GetContainerInfo(name, req) +} + +func (cc *cadvisorClient) ContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) { + return cc.GetContainerInfoV2(name, options) +} + +func (cc *cadvisorClient) VersionInfo() (*cadvisorapi.VersionInfo, error) { + return cc.GetVersionInfo() +} + +func (cc *cadvisorClient) SubcontainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) { + infos, err := cc.SubcontainersInfo(name, req) + if err != nil && len(infos) == 0 { + return nil, err + } + + result := make(map[string]*cadvisorapi.ContainerInfo, len(infos)) + for _, info := range infos { + result[info.Name] = info + } + return result, err +} + +func (cc *cadvisorClient) MachineInfo() (*cadvisorapi.MachineInfo, error) { + return cc.GetMachineInfo() +} + +func (cc *cadvisorClient) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) { + var label string + + switch cc.runtime { + case "docker": + label = cadvisorfs.LabelDockerImages + case "rkt": + label = cadvisorfs.LabelRktImages + default: + return cadvisorapiv2.FsInfo{}, fmt.Errorf("ImagesFsInfo: unknown runtime: %v", cc.runtime) + } + + return cc.getFsInfo(label) +} + +func (cc *cadvisorClient) RootFsInfo() (cadvisorapiv2.FsInfo, error) { + return cc.getFsInfo(cadvisorfs.LabelSystemRoot) +} + +func (cc *cadvisorClient) getFsInfo(label string) (cadvisorapiv2.FsInfo, error) { + res, err := cc.GetFsInfo(label) + if err != nil { + return cadvisorapiv2.FsInfo{}, err + } + if len(res) == 0 { + return cadvisorapiv2.FsInfo{}, fmt.Errorf("failed to find information for the filesystem labeled %q", label) + } + // TODO(vmarmol): Handle this better when a label has more than one image filesystem. + if len(res) > 1 { + glog.Warningf("More than one filesystem labeled %q: %#v. Only using the first one", label, res) + } + + return res[0], nil +} + +func (cc *cadvisorClient) WatchEvents(request *events.Request) (*events.EventChannel, error) { + return cc.WatchForEvents(request) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_unsupported.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_unsupported.go new file mode 100644 index 000000000000..e5f281d64699 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_unsupported.go @@ -0,0 +1,78 @@ +// +build !cgo !linux + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cadvisor + +import ( + "errors" + + "github.com/google/cadvisor/events" + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" +) + +type cadvisorUnsupported struct { +} + +var _ Interface = new(cadvisorUnsupported) + +func New(port uint, runtime string) (Interface, error) { + return &cadvisorUnsupported{}, nil +} + +var unsupportedErr = errors.New("cAdvisor is unsupported in this build") + +func (cu *cadvisorUnsupported) Start() error { + return unsupportedErr +} + +func (cu *cadvisorUnsupported) DockerContainer(name string, req *cadvisorapi.ContainerInfoRequest) (cadvisorapi.ContainerInfo, error) { + return cadvisorapi.ContainerInfo{}, unsupportedErr +} + +func (cu *cadvisorUnsupported) ContainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + return nil, unsupportedErr +} + +func (cu *cadvisorUnsupported) ContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) { + return nil, unsupportedErr +} + +func (cu *cadvisorUnsupported) SubcontainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) { + return nil, unsupportedErr +} + +func (cu *cadvisorUnsupported) MachineInfo() (*cadvisorapi.MachineInfo, error) { + return nil, unsupportedErr +} + +func (cu *cadvisorUnsupported) VersionInfo() (*cadvisorapi.VersionInfo, error) { + return nil, unsupportedErr +} + +func (cu *cadvisorUnsupported) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) { + return cadvisorapiv2.FsInfo{}, unsupportedErr +} + +func (cu *cadvisorUnsupported) RootFsInfo() (cadvisorapiv2.FsInfo, error) { + return cadvisorapiv2.FsInfo{}, unsupportedErr +} + +func (cu *cadvisorUnsupported) WatchEvents(request *events.Request) (*events.EventChannel, error) { + return nil, unsupportedErr +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/doc.go new file mode 100644 index 000000000000..8e1c076c68db --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Kubelet interactions with cAdvisor. +package cadvisor diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/testing/cadvisor_fake.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/testing/cadvisor_fake.go new file mode 100644 index 000000000000..f73597ea69b4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/testing/cadvisor_fake.go @@ -0,0 +1,75 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "github.com/google/cadvisor/events" + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" + "k8s.io/kubernetes/pkg/kubelet/cadvisor" +) + +// Fake cAdvisor implementation. +type Fake struct { +} + +var _ cadvisor.Interface = new(Fake) + +func (c *Fake) Start() error { + return nil +} + +func (c *Fake) ContainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + return new(cadvisorapi.ContainerInfo), nil +} + +func (c *Fake) ContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) { + return map[string]cadvisorapiv2.ContainerInfo{}, nil +} + +func (c *Fake) SubcontainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) { + return map[string]*cadvisorapi.ContainerInfo{}, nil +} + +func (c *Fake) DockerContainer(name string, req *cadvisorapi.ContainerInfoRequest) (cadvisorapi.ContainerInfo, error) { + return cadvisorapi.ContainerInfo{}, nil +} + +func (c *Fake) MachineInfo() (*cadvisorapi.MachineInfo, error) { + // Simulate a matchin with 1 core and 3.75GB of memory. + // We set it to non-zero values to make non-zero-capacity machines in Kubemark. + return &cadvisorapi.MachineInfo{ + NumCores: 1, + MemoryCapacity: 4026531840, + }, nil +} + +func (c *Fake) VersionInfo() (*cadvisorapi.VersionInfo, error) { + return new(cadvisorapi.VersionInfo), nil +} + +func (c *Fake) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) { + return cadvisorapiv2.FsInfo{}, nil +} + +func (c *Fake) RootFsInfo() (cadvisorapiv2.FsInfo, error) { + return cadvisorapiv2.FsInfo{}, nil +} + +func (c *Fake) WatchEvents(request *events.Request) (*events.EventChannel, error) { + return new(events.EventChannel), nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/testing/cadvisor_mock.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/testing/cadvisor_mock.go new file mode 100644 index 000000000000..97b5091ed6a8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/testing/cadvisor_mock.go @@ -0,0 +1,85 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "github.com/google/cadvisor/events" + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" + "github.com/stretchr/testify/mock" + "k8s.io/kubernetes/pkg/kubelet/cadvisor" +) + +type Mock struct { + mock.Mock +} + +var _ cadvisor.Interface = new(Mock) + +func (c *Mock) Start() error { + args := c.Called() + return args.Error(0) +} + +// ContainerInfo is a mock implementation of Interface.ContainerInfo. +func (c *Mock) ContainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + args := c.Called(name, req) + return args.Get(0).(*cadvisorapi.ContainerInfo), args.Error(1) +} + +// ContainerInfoV2 is a mock implementation of Interface.ContainerInfoV2. +func (c *Mock) ContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) { + args := c.Called(name, options) + return args.Get(0).(map[string]cadvisorapiv2.ContainerInfo), args.Error(1) +} + +func (c *Mock) SubcontainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) { + args := c.Called(name, req) + return args.Get(0).(map[string]*cadvisorapi.ContainerInfo), args.Error(1) +} + +// DockerContainer is a mock implementation of Interface.DockerContainer. +func (c *Mock) DockerContainer(name string, req *cadvisorapi.ContainerInfoRequest) (cadvisorapi.ContainerInfo, error) { + args := c.Called(name, req) + return args.Get(0).(cadvisorapi.ContainerInfo), args.Error(1) +} + +// MachineInfo is a mock implementation of Interface.MachineInfo. +func (c *Mock) MachineInfo() (*cadvisorapi.MachineInfo, error) { + args := c.Called() + return args.Get(0).(*cadvisorapi.MachineInfo), args.Error(1) +} + +func (c *Mock) VersionInfo() (*cadvisorapi.VersionInfo, error) { + args := c.Called() + return args.Get(0).(*cadvisorapi.VersionInfo), args.Error(1) +} + +func (c *Mock) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) { + args := c.Called() + return args.Get(0).(cadvisorapiv2.FsInfo), args.Error(1) +} + +func (c *Mock) RootFsInfo() (cadvisorapiv2.FsInfo, error) { + args := c.Called() + return args.Get(0).(cadvisorapiv2.FsInfo), args.Error(1) +} + +func (c *Mock) WatchEvents(request *events.Request) (*events.EventChannel, error) { + args := c.Called() + return args.Get(0).(*events.EventChannel), args.Error(1) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/types.go new file mode 100644 index 000000000000..fbf7b9e058c5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/types.go @@ -0,0 +1,44 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cadvisor + +import ( + "github.com/google/cadvisor/events" + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" +) + +// Interface is an abstract interface for testability. It abstracts the interface to cAdvisor. +type Interface interface { + Start() error + DockerContainer(name string, req *cadvisorapi.ContainerInfoRequest) (cadvisorapi.ContainerInfo, error) + ContainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) + ContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) + SubcontainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) + MachineInfo() (*cadvisorapi.MachineInfo, error) + + VersionInfo() (*cadvisorapi.VersionInfo, error) + + // Returns usage information about the filesystem holding Docker images. + ImagesFsInfo() (cadvisorapiv2.FsInfo, error) + + // Returns usage information about the root filesystem. + RootFsInfo() (cadvisorapiv2.FsInfo, error) + + // Get events streamed through passedChannel that fit the request. + WatchEvents(request *events.Request) (*events.EventChannel, error) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/util.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/util.go new file mode 100644 index 000000000000..2dac21756521 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cadvisor/util.go @@ -0,0 +1,35 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cadvisor + +import ( + cadvisorApi "github.com/google/cadvisor/info/v1" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" +) + +func CapacityFromMachineInfo(info *cadvisorApi.MachineInfo) api.ResourceList { + c := api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity( + int64(info.NumCores*1000), + resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity( + int64(info.MemoryCapacity), + resource.BinarySI), + } + return c +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/client/kubelet_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/client/kubelet_client.go new file mode 100644 index 000000000000..bbaade4eeead --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/client/kubelet_client.go @@ -0,0 +1,138 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "errors" + "fmt" + "net" + "net/http" + "strings" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/transport" + utilnet "k8s.io/kubernetes/pkg/util/net" +) + +type KubeletClientConfig struct { + // Default port - used if no information about Kubelet port can be found in Node.NodeStatus.DaemonEndpoints. + Port uint + EnableHttps bool + + // TLSClientConfig contains settings to enable transport layer security + restclient.TLSClientConfig + + // Server requires Bearer authentication + BearerToken string + + // HTTPTimeout is used by the client to timeout http requests to Kubelet. + HTTPTimeout time.Duration + + // Dial is a custom dialer used for the client + Dial func(net, addr string) (net.Conn, error) +} + +// KubeletClient is an interface for all kubelet functionality +type KubeletClient interface { + ConnectionInfoGetter +} + +type ConnectionInfoGetter interface { + GetConnectionInfo(ctx api.Context, nodeName string) (scheme string, port uint, transport http.RoundTripper, err error) +} + +// HTTPKubeletClient is the default implementation of KubeletHealthchecker, accesses the kubelet over HTTP. +type HTTPKubeletClient struct { + Client *http.Client + Config *KubeletClientConfig +} + +func MakeTransport(config *KubeletClientConfig) (http.RoundTripper, error) { + tlsConfig, err := transport.TLSConfigFor(config.transportConfig()) + if err != nil { + return nil, err + } + + rt := http.DefaultTransport + if config.Dial != nil || tlsConfig != nil { + rt = utilnet.SetTransportDefaults(&http.Transport{ + Dial: config.Dial, + TLSClientConfig: tlsConfig, + }) + } + + return transport.HTTPWrappersForConfig(config.transportConfig(), rt) +} + +// TODO: this structure is questionable, it should be using client.Config and overriding defaults. +func NewStaticKubeletClient(config *KubeletClientConfig) (KubeletClient, error) { + transport, err := MakeTransport(config) + if err != nil { + return nil, err + } + c := &http.Client{ + Transport: transport, + Timeout: config.HTTPTimeout, + } + return &HTTPKubeletClient{ + Client: c, + Config: config, + }, nil +} + +// In default HTTPKubeletClient ctx is unused. +func (c *HTTPKubeletClient) GetConnectionInfo(ctx api.Context, nodeName string) (string, uint, http.RoundTripper, error) { + if errs := validation.ValidateNodeName(nodeName, false); len(errs) != 0 { + return "", 0, nil, fmt.Errorf("invalid node name: %s", strings.Join(errs, ";")) + } + scheme := "http" + if c.Config.EnableHttps { + scheme = "https" + } + return scheme, c.Config.Port, c.Client.Transport, nil +} + +// FakeKubeletClient is a fake implementation of KubeletClient which returns an error +// when called. It is useful to pass to the master in a test configuration with +// no kubelets. +type FakeKubeletClient struct{} + +func (c FakeKubeletClient) GetConnectionInfo(ctx api.Context, nodeName string) (string, uint, http.RoundTripper, error) { + return "", 0, nil, errors.New("Not Implemented") +} + +// transportConfig converts a client config to an appropriate transport config. +func (c *KubeletClientConfig) transportConfig() *transport.Config { + cfg := &transport.Config{ + TLS: transport.TLSConfig{ + CAFile: c.CAFile, + CAData: c.CAData, + CertFile: c.CertFile, + CertData: c.CertData, + KeyFile: c.KeyFile, + KeyData: c.KeyData, + }, + BearerToken: c.BearerToken, + } + if c.EnableHttps && !cfg.HasCA() { + cfg.TLS.Insecure = true + } + return cfg +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/client/kubelet_client_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/client/kubelet_client_test.go new file mode 100644 index 000000000000..a8352a9d8a54 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/client/kubelet_client_test.go @@ -0,0 +1,126 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "encoding/json" + "net/http/httptest" + "net/url" + "testing" + + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/probe" + utiltesting "k8s.io/kubernetes/pkg/util/testing" +) + +func TestHTTPKubeletClient(t *testing.T) { + expectObj := probe.Success + body, err := json.Marshal(expectObj) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: string(body), + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + + if _, err := url.Parse(testServer.URL); err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestNewKubeletClient(t *testing.T) { + config := &KubeletClientConfig{ + EnableHttps: false, + } + + client, err := NewStaticKubeletClient(config) + if err != nil { + t.Errorf("Error while trying to create a client: %v", err) + } + if client == nil { + t.Error("client is nil.") + } +} + +func TestNewKubeletClientTLSInvalid(t *testing.T) { + config := &KubeletClientConfig{ + EnableHttps: true, + //Invalid certificate and key path + TLSClientConfig: restclient.TLSClientConfig{ + CertFile: "../../client/testdata/mycertinvalid.cer", + KeyFile: "../../client/testdata/mycertinvalid.key", + CAFile: "../../client/testdata/myCA.cer", + }, + } + + client, err := NewStaticKubeletClient(config) + if err == nil { + t.Errorf("Expected an error") + } + if client != nil { + t.Error("client should be nil as we provided invalid cert file") + } +} + +func TestNewKubeletClientTLSValid(t *testing.T) { + config := &KubeletClientConfig{ + Port: 1234, + EnableHttps: true, + TLSClientConfig: restclient.TLSClientConfig{ + CertFile: "../../client/testdata/mycertvalid.cer", + // TLS Configuration, only applies if EnableHttps is true. + KeyFile: "../../client/testdata/mycertvalid.key", + // TLS Configuration, only applies if EnableHttps is true. + CAFile: "../../client/testdata/myCA.cer", + }, + } + + client, err := NewStaticKubeletClient(config) + if err != nil { + t.Errorf("Not expecting an error #%v", err) + } + if client == nil { + t.Error("client should not be nil") + } + + { + scheme, port, transport, err := client.GetConnectionInfo(nil, "foo") + if err != nil { + t.Errorf("Error getting info: %v", err) + } + if scheme != "https" { + t.Errorf("Expected https, got %s", scheme) + } + if port != 1234 { + t.Errorf("Expected 1234, got %d", port) + } + if transport == nil { + t.Errorf("Expected transport, got nil") + } + } + + { + _, _, _, err := client.GetConnectionInfo(nil, "foo bar") + if err == nil { + t.Errorf("Expected error getting connection info for invalid node name, got none") + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go new file mode 100644 index 000000000000..e18bc6865e27 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cm + +import ( + "k8s.io/kubernetes/pkg/api" +) + +// Manages the containers running on a machine. +type ContainerManager interface { + // Runs the container manager's housekeeping. + // - Ensures that the Docker daemon is in a container. + // - Creates the system container where all non-containerized processes run. + Start() error + + // Returns resources allocated to system cgroups in the machine. + // These cgroups include the system and Kubernetes services. + SystemCgroupsLimit() api.ResourceList + + // Returns a NodeConfig that is being used by the container manager. + GetNodeConfig() NodeConfig + + // Returns internal Status. + Status() Status +} + +type NodeConfig struct { + RuntimeCgroupsName string + SystemCgroupsName string + KubeletCgroupsName string + ContainerRuntime string +} + +type Status struct { + // Any soft requirements that were unsatisfied. + SoftRequirements error +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go new file mode 100644 index 000000000000..a548bf7834db --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go @@ -0,0 +1,628 @@ +// +build linux + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cm + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/blang/semver" + "github.com/golang/glog" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/cgroups/fs" + "github.com/opencontainers/runc/libcontainer/configs" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/kubelet/cadvisor" + "k8s.io/kubernetes/pkg/util" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/util/oom" + "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/sets" + utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" + "k8s.io/kubernetes/pkg/util/wait" +) + +const ( + // The percent of the machine memory capacity. The value is used to calculate + // docker memory resource container's hardlimit to workaround docker memory + // leakage issue. Please see kubernetes/issues/9881 for more detail. + DockerMemoryLimitThresholdPercent = 70 + // The minimum memory limit allocated to docker container: 150Mi + MinDockerMemoryLimit = 150 * 1024 * 1024 + + dockerProcessName = "docker" + dockerPidFile = "/var/run/docker.pid" + containerdProcessName = "docker-containerd" + containerdPidFile = "/run/docker/libcontainerd/docker-containerd.pid" +) + +var ( + // The docker version in which containerd was introduced. + containerdVersion = semver.MustParse("1.11.0") +) + +// A non-user container tracked by the Kubelet. +type systemContainer struct { + // Absolute name of the container. + name string + + // CPU limit in millicores. + cpuMillicores int64 + + // Function that ensures the state of the container. + // m is the cgroup manager for the specified container. + ensureStateFunc func(m *fs.Manager) error + + // Manager for the cgroups of the external container. + manager *fs.Manager +} + +func newSystemCgroups(containerName string) *systemContainer { + return &systemContainer{ + name: containerName, + manager: createManager(containerName), + } +} + +type containerManagerImpl struct { + sync.RWMutex + cadvisorInterface cadvisor.Interface + mountUtil mount.Interface + NodeConfig + status Status + // External containers being managed. + systemContainers []*systemContainer + periodicTasks []func() +} + +type features struct { + cpuHardcapping bool +} + +var _ ContainerManager = &containerManagerImpl{} + +// checks if the required cgroups subsystems are mounted. +// As of now, only 'cpu' and 'memory' are required. +// cpu quota is a soft requirement. +func validateSystemRequirements(mountUtil mount.Interface) (features, error) { + const ( + cgroupMountType = "cgroup" + localErr = "system validation failed" + ) + var ( + cpuMountPoint string + f features + ) + mountPoints, err := mountUtil.List() + if err != nil { + return f, fmt.Errorf("%s - %v", localErr, err) + } + + expectedCgroups := sets.NewString("cpu", "cpuacct", "cpuset", "memory") + for _, mountPoint := range mountPoints { + if mountPoint.Type == cgroupMountType { + for _, opt := range mountPoint.Opts { + if expectedCgroups.Has(opt) { + expectedCgroups.Delete(opt) + } + if opt == "cpu" { + cpuMountPoint = mountPoint.Path + } + } + } + } + + if expectedCgroups.Len() > 0 { + return f, fmt.Errorf("%s - Following Cgroup subsystem not mounted: %v", localErr, expectedCgroups.List()) + } + + // Check if cpu quota is available. + // CPU cgroup is required and so it expected to be mounted at this point. + periodExists, err := util.FileExists(path.Join(cpuMountPoint, "cpu.cfs_period_us")) + if err != nil { + glog.Errorf("failed to detect if CPU cgroup cpu.cfs_period_us is available - %v", err) + } + quotaExists, err := util.FileExists(path.Join(cpuMountPoint, "cpu.cfs_quota_us")) + if err != nil { + glog.Errorf("failed to detect if CPU cgroup cpu.cfs_quota_us is available - %v", err) + } + if quotaExists && periodExists { + f.cpuHardcapping = true + } + return f, nil +} + +// TODO(vmarmol): Add limits to the system containers. +// Takes the absolute name of the specified containers. +// Empty container name disables use of the specified container. +func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.Interface, nodeConfig NodeConfig) (ContainerManager, error) { + return &containerManagerImpl{ + cadvisorInterface: cadvisorInterface, + mountUtil: mountUtil, + NodeConfig: nodeConfig, + }, nil +} + +// Create a cgroup container manager. +func createManager(containerName string) *fs.Manager { + return &fs.Manager{ + Cgroups: &configs.Cgroup{ + Parent: "/", + Name: containerName, + Resources: &configs.Resources{ + AllowAllDevices: true, + }, + }, + } +} + +// TODO: plumb this up as a flag to Kubelet in a future PR +type KernelTunableBehavior string + +const ( + KernelTunableWarn KernelTunableBehavior = "warn" + KernelTunableError KernelTunableBehavior = "error" + KernelTunableModify KernelTunableBehavior = "modify" +) + +// setupKernelTunables validates kernel tunable flags are set as expected +// depending upon the specified option, it will either warn, error, or modify the kernel tunable flags +func setupKernelTunables(option KernelTunableBehavior) error { + desiredState := map[string]int{ + utilsysctl.VmOvercommitMemory: utilsysctl.VmOvercommitMemoryAlways, + utilsysctl.VmPanicOnOOM: utilsysctl.VmPanicOnOOMInvokeOOMKiller, + utilsysctl.KernelPanic: utilsysctl.KernelPanicRebootTimeout, + utilsysctl.KernelPanicOnOops: utilsysctl.KernelPanicOnOopsAlways, + } + + errList := []error{} + for flag, expectedValue := range desiredState { + val, err := utilsysctl.GetSysctl(flag) + if err != nil { + errList = append(errList, err) + continue + } + if val == expectedValue { + continue + } + + switch option { + case KernelTunableError: + errList = append(errList, fmt.Errorf("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)) + case KernelTunableWarn: + glog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) + case KernelTunableModify: + glog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) + err = utilsysctl.SetSysctl(flag, expectedValue) + if err != nil { + errList = append(errList, err) + } + } + } + return utilerrors.NewAggregate(errList) +} + +func (cm *containerManagerImpl) setupNode() error { + f, err := validateSystemRequirements(cm.mountUtil) + if err != nil { + return err + } + if !f.cpuHardcapping { + cm.status.SoftRequirements = fmt.Errorf("CPU hardcapping unsupported") + } + // TODO: plumb kernel tunable options into container manager, right now, we modify by default + if err := setupKernelTunables(KernelTunableModify); err != nil { + return err + } + + systemContainers := []*systemContainer{} + if cm.ContainerRuntime == "docker" { + if cm.RuntimeCgroupsName != "" { + cont := newSystemCgroups(cm.RuntimeCgroupsName) + info, err := cm.cadvisorInterface.MachineInfo() + var capacity = api.ResourceList{} + if err != nil { + } else { + capacity = cadvisor.CapacityFromMachineInfo(info) + } + memoryLimit := (int64(capacity.Memory().Value() * DockerMemoryLimitThresholdPercent / 100)) + if memoryLimit < MinDockerMemoryLimit { + glog.Warningf("Memory limit %d for container %s is too small, reset it to %d", memoryLimit, cm.RuntimeCgroupsName, MinDockerMemoryLimit) + memoryLimit = MinDockerMemoryLimit + } + + glog.V(2).Infof("Configure resource-only container %s with memory limit: %d", cm.RuntimeCgroupsName, memoryLimit) + + dockerContainer := &fs.Manager{ + Cgroups: &configs.Cgroup{ + Parent: "/", + Name: cm.RuntimeCgroupsName, + Resources: &configs.Resources{ + Memory: memoryLimit, + MemorySwap: -1, + AllowAllDevices: true, + }, + }, + } + dockerVersion := getDockerVersion(cm.cadvisorInterface) + cont.ensureStateFunc = func(manager *fs.Manager) error { + return ensureDockerInContainer(dockerVersion, -900, dockerContainer) + } + systemContainers = append(systemContainers, cont) + } else { + cm.periodicTasks = append(cm.periodicTasks, func() { + cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile) + if err != nil { + glog.Error(err) + return + } + glog.V(2).Infof("Discovered runtime cgroups name: %s", cont) + cm.Lock() + defer cm.Unlock() + cm.RuntimeCgroupsName = cont + }) + } + } + + if cm.SystemCgroupsName != "" { + if cm.SystemCgroupsName == "/" { + return fmt.Errorf("system container cannot be root (\"/\")") + } + cont := newSystemCgroups(cm.SystemCgroupsName) + rootContainer := &fs.Manager{ + Cgroups: &configs.Cgroup{ + Parent: "/", + Name: "/", + }, + } + cont.ensureStateFunc = func(manager *fs.Manager) error { + return ensureSystemCgroups(rootContainer, manager) + } + systemContainers = append(systemContainers, cont) + } + + if cm.KubeletCgroupsName != "" { + cont := newSystemCgroups(cm.KubeletCgroupsName) + manager := fs.Manager{ + Cgroups: &configs.Cgroup{ + Parent: "/", + Name: cm.KubeletCgroupsName, + Resources: &configs.Resources{ + AllowAllDevices: true, + }, + }, + } + cont.ensureStateFunc = func(_ *fs.Manager) error { + return manager.Apply(os.Getpid()) + } + systemContainers = append(systemContainers, cont) + } else { + cm.periodicTasks = append(cm.periodicTasks, func() { + cont, err := getContainer(os.Getpid()) + if err != nil { + glog.Errorf("failed to find cgroups of kubelet - %v", err) + return + } + cm.Lock() + defer cm.Unlock() + + cm.KubeletCgroupsName = cont + }) + } + + cm.systemContainers = systemContainers + return nil +} + +func getContainerNameForProcess(name, pidFile string) (string, error) { + pids, err := getPidsForProcess(name, pidFile) + if err != nil { + return "", fmt.Errorf("failed to detect process id for %q - %v", name, err) + } + if len(pids) == 0 { + return "", nil + } + cont, err := getContainer(pids[0]) + if err != nil { + return "", err + } + return cont, nil +} + +func (cm *containerManagerImpl) GetNodeConfig() NodeConfig { + cm.RLock() + defer cm.RUnlock() + return cm.NodeConfig +} + +func (cm *containerManagerImpl) Status() Status { + cm.RLock() + defer cm.RUnlock() + return cm.status +} + +func (cm *containerManagerImpl) Start() error { + // Setup the node + if err := cm.setupNode(); err != nil { + return err + } + // Don't run a background thread if there are no ensureStateFuncs. + numEnsureStateFuncs := 0 + for _, cont := range cm.systemContainers { + if cont.ensureStateFunc != nil { + numEnsureStateFuncs++ + } + } + if numEnsureStateFuncs >= 0 { + // Run ensure state functions every minute. + go wait.Until(func() { + for _, cont := range cm.systemContainers { + if cont.ensureStateFunc != nil { + if err := cont.ensureStateFunc(cont.manager); err != nil { + glog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err) + } + } + } + }, time.Minute, wait.NeverStop) + + } + + if len(cm.periodicTasks) > 0 { + go wait.Until(func() { + for _, task := range cm.periodicTasks { + if task != nil { + task() + } + } + }, 5*time.Minute, wait.NeverStop) + } + + return nil +} + +func (cm *containerManagerImpl) SystemCgroupsLimit() api.ResourceList { + cpuLimit := int64(0) + + // Sum up resources of all external containers. + for _, cont := range cm.systemContainers { + cpuLimit += cont.cpuMillicores + } + + return api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity( + cpuLimit, + resource.DecimalSI), + } +} + +func isProcessRunningInHost(pid int) (bool, error) { + // Get init mount namespace. Mount namespace is unique for all containers. + initMntNs, err := os.Readlink("/proc/1/ns/mnt") + if err != nil { + return false, fmt.Errorf("failed to find mount namespace of init process") + } + processMntNs, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/mnt", pid)) + if err != nil { + return false, fmt.Errorf("failed to find mount namespace of process %q", pid) + } + return initMntNs == processMntNs, nil +} + +func getPidFromPidFile(pidFile string) (int, error) { + file, err := os.Open(pidFile) + if err != nil { + return 0, fmt.Errorf("error opening pid file %s: %v", pidFile, err) + } + defer file.Close() + + data, err := ioutil.ReadAll(file) + if err != nil { + return 0, fmt.Errorf("error reading pid file %s: %v", pidFile, err) + } + + pid, err := strconv.Atoi(string(data)) + if err != nil { + return 0, fmt.Errorf("error parsing %s as a number: %v", string(data), err) + } + + return pid, nil +} + +func getPidsForProcess(name, pidFile string) ([]int, error) { + if len(pidFile) > 0 { + if pid, err := getPidFromPidFile(pidFile); err == nil { + return []int{pid}, nil + } else { + // log the error and fall back to pidof + runtime.HandleError(err) + } + } + + out, err := exec.Command("pidof", name).Output() + if err != nil { + return []int{}, fmt.Errorf("failed to find pid of %q: %v", name, err) + } + + // The output of pidof is a list of pids. + pids := []int{} + for _, pidStr := range strings.Split(strings.TrimSpace(string(out)), " ") { + pid, err := strconv.Atoi(pidStr) + if err != nil { + continue + } + pids = append(pids, pid) + } + return pids, nil +} + +// Ensures that the Docker daemon is in the desired container. +func ensureDockerInContainer(dockerVersion semver.Version, oomScoreAdj int, manager *fs.Manager) error { + type process struct{ name, file string } + dockerProcs := []process{{dockerProcessName, dockerPidFile}} + if dockerVersion.GTE(containerdVersion) { + dockerProcs = append(dockerProcs, process{containerdProcessName, containerdPidFile}) + } + + var errs []error + for _, proc := range dockerProcs { + pids, err := getPidsForProcess(proc.name, proc.file) + if err != nil { + errs = append(errs, fmt.Errorf("failed to get pids for %q: %v", proc.name, err)) + continue + } + + // Move if the pid is not already in the desired container. + for _, pid := range pids { + if err := ensureProcessInContainer(pid, oomScoreAdj, manager); err != nil { + errs = append(errs, fmt.Errorf("errors moving %q pid: %v", proc.name, err)) + } + } + } + return utilerrors.NewAggregate(errs) +} + +func ensureProcessInContainer(pid int, oomScoreAdj int, manager *fs.Manager) error { + if runningInHost, err := isProcessRunningInHost(pid); err != nil { + // Err on the side of caution. Avoid moving the docker daemon unless we are able to identify its context. + return err + } else if !runningInHost { + // Process is running inside a container. Don't touch that. + return nil + } + + var errs []error + cont, err := getContainer(pid) + if err != nil { + errs = append(errs, fmt.Errorf("failed to find container of PID %d: %v", pid, err)) + } + + if cont != manager.Cgroups.Name { + err = manager.Apply(pid) + if err != nil { + errs = append(errs, fmt.Errorf("failed to move PID %d (in %q) to %q", pid, cont, manager.Cgroups.Name)) + } + } + + // Also apply oom-score-adj to processes + oomAdjuster := oom.NewOOMAdjuster() + if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil { + errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d", oomScoreAdj, pid)) + } + return utilerrors.NewAggregate(errs) +} + +// Gets the (CPU) container the specified pid is in. +func getContainer(pid int) (string, error) { + cgs, err := cgroups.ParseCgroupFile(fmt.Sprintf("/proc/%d/cgroup", pid)) + if err != nil { + return "", err + } + + cg, ok := cgs["cpu"] + if ok { + return cg, nil + } + + return "", cgroups.NewNotFoundError("cpu") +} + +// Ensures the system container is created and all non-kernel threads and process 1 +// without a container are moved to it. +// +// The reason of leaving kernel threads at root cgroup is that we don't want to tie the +// execution of these threads with to-be defined /system quota and create priority inversions. +// +func ensureSystemCgroups(rootContainer *fs.Manager, manager *fs.Manager) error { + // Move non-kernel PIDs to the system container. + attemptsRemaining := 10 + var errs []error + for attemptsRemaining >= 0 { + // Only keep errors on latest attempt. + errs = []error{} + attemptsRemaining-- + + allPids, err := rootContainer.GetPids() + if err != nil { + errs = append(errs, fmt.Errorf("failed to list PIDs for root: %v", err)) + continue + } + + // Remove kernel pids and other protected PIDs (pid 1, PIDs already in system & kubelet containers) + pids := make([]int, 0, len(allPids)) + for _, pid := range allPids { + if pid == 1 || isKernelPid(pid) { + continue + } + + pids = append(pids, pid) + } + glog.Infof("Found %d PIDs in root, %d of them are not to be moved", len(allPids), len(allPids)-len(pids)) + + // Check if we have moved all the non-kernel PIDs. + if len(pids) == 0 { + break + } + + glog.Infof("Moving non-kernel processes: %v", pids) + for _, pid := range pids { + err := manager.Apply(pid) + if err != nil { + errs = append(errs, fmt.Errorf("failed to move PID %d into the system container %q: %v", pid, manager.Cgroups.Name, err)) + } + } + + } + if attemptsRemaining < 0 { + errs = append(errs, fmt.Errorf("ran out of attempts to create system containers %q", manager.Cgroups.Name)) + } + + return utilerrors.NewAggregate(errs) +} + +// Determines whether the specified PID is a kernel PID. +func isKernelPid(pid int) bool { + // Kernel threads have no associated executable. + _, err := os.Readlink(fmt.Sprintf("/proc/%d/exe", pid)) + return err != nil +} + +// Helper for getting the docker version. +func getDockerVersion(cadvisor cadvisor.Interface) semver.Version { + var fallback semver.Version // Fallback to zero-value by default. + versions, err := cadvisor.VersionInfo() + if err != nil { + glog.Errorf("Error requesting cAdvisor VersionInfo: %v", err) + return fallback + } + dockerVersion, err := semver.Parse(versions.DockerVersion) + if err != nil { + glog.Errorf("Error parsing docker version %q: %v", versions.DockerVersion, err) + return fallback + } + return dockerVersion +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux_test.go new file mode 100644 index 000000000000..34e91c83b69c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux_test.go @@ -0,0 +1,164 @@ +// +build linux + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cm + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "k8s.io/kubernetes/pkg/util/mount" +) + +type fakeMountInterface struct { + mountPoints []mount.MountPoint +} + +func (mi *fakeMountInterface) Mount(source string, target string, fstype string, options []string) error { + return fmt.Errorf("unsupported") +} + +func (mi *fakeMountInterface) Unmount(target string) error { + return fmt.Errorf("unsupported") +} + +func (mi *fakeMountInterface) List() ([]mount.MountPoint, error) { + return mi.mountPoints, nil +} + +func (mi *fakeMountInterface) IsLikelyNotMountPoint(file string) (bool, error) { + return false, fmt.Errorf("unsupported") +} + +func fakeContainerMgrMountInt() mount.Interface { + return &fakeMountInterface{ + []mount.MountPoint{ + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpuset"}, + }, + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpu"}, + }, + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpuacct"}, + }, + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "memory"}, + }, + }, + } +} + +func TestCgroupMountValidationSuccess(t *testing.T) { + f, err := validateSystemRequirements(fakeContainerMgrMountInt()) + assert.Nil(t, err) + assert.False(t, f.cpuHardcapping, "cpu hardcapping is expected to be disabled") +} + +func TestCgroupMountValidationMemoryMissing(t *testing.T) { + mountInt := &fakeMountInterface{ + []mount.MountPoint{ + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpuset"}, + }, + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpu"}, + }, + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpuacct"}, + }, + }, + } + _, err := validateSystemRequirements(mountInt) + assert.Error(t, err) +} + +func TestCgroupMountValidationMultipleSubsytem(t *testing.T) { + mountInt := &fakeMountInterface{ + []mount.MountPoint{ + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpuset", "memory"}, + }, + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpu"}, + }, + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpuacct"}, + }, + }, + } + _, err := validateSystemRequirements(mountInt) + assert.Nil(t, err) +} + +func TestSoftRequirementsValidationSuccess(t *testing.T) { + req := require.New(t) + tempDir, err := ioutil.TempDir("", "") + req.NoError(err) + req.NoError(ioutil.WriteFile(path.Join(tempDir, "cpu.cfs_period_us"), []byte("0"), os.ModePerm)) + req.NoError(ioutil.WriteFile(path.Join(tempDir, "cpu.cfs_quota_us"), []byte("0"), os.ModePerm)) + mountInt := &fakeMountInterface{ + []mount.MountPoint{ + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpuset"}, + }, + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpu"}, + Path: tempDir, + }, + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpuacct", "memory"}, + }, + }, + } + f, err := validateSystemRequirements(mountInt) + assert.NoError(t, err) + assert.True(t, f.cpuHardcapping, "cpu hardcapping is expected to be enabled") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go new file mode 100644 index 000000000000..4bca506c2fab --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go @@ -0,0 +1,47 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cm + +import ( + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" +) + +type containerManagerStub struct{} + +var _ ContainerManager = &containerManagerStub{} + +func (cm *containerManagerStub) Start() error { + glog.V(2).Infof("Starting stub container manager") + return nil +} + +func (cm *containerManagerStub) SystemCgroupsLimit() api.ResourceList { + return api.ResourceList{} +} + +func (cm *containerManagerStub) GetNodeConfig() NodeConfig { + return NodeConfig{} +} + +func (cm *containerManagerStub) Status() Status { + return Status{} +} + +func NewStubContainerManager() ContainerManager { + return &containerManagerStub{} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported.go new file mode 100644 index 000000000000..426c95ca4cc9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported.go @@ -0,0 +1,52 @@ +// +build !linux + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cm + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/kubelet/cadvisor" + "k8s.io/kubernetes/pkg/util/mount" +) + +type unsupportedContainerManager struct { +} + +var _ ContainerManager = &unsupportedContainerManager{} + +func (unsupportedContainerManager) Start() error { + return fmt.Errorf("Container Manager is unsupported in this build") +} + +func (unsupportedContainerManager) SystemCgroupsLimit() api.ResourceList { + return api.ResourceList{} +} + +func (unsupportedContainerManager) GetNodeConfig() NodeConfig { + return NodeConfig{} +} + +func (cm *unsupportedContainerManager) Status() Status { + return Status{} +} + +func NewContainerManager(_ mount.Interface, _ cadvisor.Interface, _ NodeConfig) (ContainerManager, error) { + return &unsupportedContainerManager{}, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported_test.go new file mode 100644 index 000000000000..48a4f04fdb13 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported_test.go @@ -0,0 +1,72 @@ +// +build !linux + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cm + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/util/mount" +) + +type fakeMountInterface struct { + mountPoints []mount.MountPoint +} + +func (mi *fakeMountInterface) Mount(source string, target string, fstype string, options []string) error { + return fmt.Errorf("unsupported") +} + +func (mi *fakeMountInterface) Unmount(target string) error { + return fmt.Errorf("unsupported") +} + +func (mi *fakeMountInterface) List() ([]mount.MountPoint, error) { + return mi.mountPoints, nil +} + +func (mi *fakeMountInterface) IsLikelyNotMountPoint(file string) (bool, error) { + return false, fmt.Errorf("unsupported") +} + +func fakeContainerMgrMountInt() mount.Interface { + return &fakeMountInterface{ + []mount.MountPoint{ + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpuset"}, + }, + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpu"}, + }, + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "cpuacct"}, + }, + { + Device: "cgroup", + Type: "cgroup", + Opts: []string{"rw", "relatime", "memory"}, + }, + }, + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go new file mode 100644 index 000000000000..c3baed8d0421 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go @@ -0,0 +1,44 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Reads the pod configuration from the Kubernetes apiserver. +package config + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/fields" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" +) + +// NewSourceApiserver creates a config source that watches and pulls from the apiserver. +func NewSourceApiserver(c *clientset.Clientset, nodeName string, updates chan<- interface{}) { + lw := cache.NewListWatchFromClient(c.CoreClient, "pods", api.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, nodeName)) + newSourceApiserverFromLW(lw, updates) +} + +// newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver. +func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}) { + send := func(objs []interface{}) { + var pods []*api.Pod + for _, o := range objs { + pods = append(pods, o.(*api.Pod)) + } + updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.ApiserverSource} + } + cache.NewReflector(lw, &api.Pod{}, cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc), 0).Run() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/apiserver_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/apiserver_test.go new file mode 100644 index 000000000000..d7be8a7fed5f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/apiserver_test.go @@ -0,0 +1,192 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +type fakePodLW struct { + listResp runtime.Object + watchResp watch.Interface +} + +func (lw fakePodLW) List(options api.ListOptions) (runtime.Object, error) { + return lw.listResp, nil +} + +func (lw fakePodLW) Watch(options api.ListOptions) (watch.Interface, error) { + return lw.watchResp, nil +} + +var _ cache.ListerWatcher = fakePodLW{} + +func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) { + pod1v1 := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "p"}, + Spec: api.PodSpec{Containers: []api.Container{{Image: "image/one"}}}} + pod1v2 := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "p"}, + Spec: api.PodSpec{Containers: []api.Container{{Image: "image/two"}}}} + pod2 := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "q"}, + Spec: api.PodSpec{Containers: []api.Container{{Image: "image/blah"}}}} + + // Setup fake api client. + fakeWatch := watch.NewFake() + lw := fakePodLW{ + listResp: &api.PodList{Items: []api.Pod{*pod1v1}}, + watchResp: fakeWatch, + } + + ch := make(chan interface{}) + + newSourceApiserverFromLW(lw, ch) + + got, ok := <-ch + if !ok { + t.Errorf("Unable to read from channel when expected") + } + update := got.(kubetypes.PodUpdate) + expected := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod1v1) + if !api.Semantic.DeepEqual(expected, update) { + t.Errorf("Expected %#v; Got %#v", expected, update) + } + + // Add another pod + fakeWatch.Add(pod2) + got, ok = <-ch + if !ok { + t.Errorf("Unable to read from channel when expected") + } + update = got.(kubetypes.PodUpdate) + // Could be sorted either of these two ways: + expectedA := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod1v1, pod2) + expectedB := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod2, pod1v1) + + if !api.Semantic.DeepEqual(expectedA, update) && !api.Semantic.DeepEqual(expectedB, update) { + t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, update) + } + + // Modify pod1 + fakeWatch.Modify(pod1v2) + got, ok = <-ch + if !ok { + t.Errorf("Unable to read from channel when expected") + } + update = got.(kubetypes.PodUpdate) + expectedA = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod1v2, pod2) + expectedB = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod2, pod1v2) + + if !api.Semantic.DeepEqual(expectedA, update) && !api.Semantic.DeepEqual(expectedB, update) { + t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, update) + } + + // Delete pod1 + fakeWatch.Delete(pod1v2) + got, ok = <-ch + if !ok { + t.Errorf("Unable to read from channel when expected") + } + update = got.(kubetypes.PodUpdate) + expected = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod2) + if !api.Semantic.DeepEqual(expected, update) { + t.Errorf("Expected %#v, Got %#v", expected, update) + } + + // Delete pod2 + fakeWatch.Delete(pod2) + got, ok = <-ch + if !ok { + t.Errorf("Unable to read from channel when expected") + } + update = got.(kubetypes.PodUpdate) + expected = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource) + if !api.Semantic.DeepEqual(expected, update) { + t.Errorf("Expected %#v, Got %#v", expected, update) + } +} + +func TestNewSourceApiserver_TwoNamespacesSameName(t *testing.T) { + pod1 := api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "p", Namespace: "one"}, + Spec: api.PodSpec{Containers: []api.Container{{Image: "image/one"}}}} + pod2 := api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "p", Namespace: "two"}, + Spec: api.PodSpec{Containers: []api.Container{{Image: "image/blah"}}}} + + // Setup fake api client. + fakeWatch := watch.NewFake() + lw := fakePodLW{ + listResp: &api.PodList{Items: []api.Pod{pod1, pod2}}, + watchResp: fakeWatch, + } + + ch := make(chan interface{}) + + newSourceApiserverFromLW(lw, ch) + + got, ok := <-ch + if !ok { + t.Errorf("Unable to read from channel when expected") + } + update := got.(kubetypes.PodUpdate) + // Make sure that we get both pods. Catches bug #2294. + if !(len(update.Pods) == 2) { + t.Errorf("Expected %d, Got %d", 2, len(update.Pods)) + } + + // Delete pod1 + fakeWatch.Delete(&pod1) + got, ok = <-ch + if !ok { + t.Errorf("Unable to read from channel when expected") + } + update = got.(kubetypes.PodUpdate) + if !(len(update.Pods) == 1) { + t.Errorf("Expected %d, Got %d", 1, len(update.Pods)) + } +} + +func TestNewSourceApiserverInitialEmptySendsEmptyPodUpdate(t *testing.T) { + // Setup fake api client. + fakeWatch := watch.NewFake() + lw := fakePodLW{ + listResp: &api.PodList{Items: []api.Pod{}}, + watchResp: fakeWatch, + } + + ch := make(chan interface{}) + + newSourceApiserverFromLW(lw, ch) + + got, ok := <-ch + if !ok { + t.Errorf("Unable to read from channel when expected") + } + update := got.(kubetypes.PodUpdate) + expected := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource) + if !api.Semantic.DeepEqual(expected, update) { + t.Errorf("Expected %#v; Got %#v", expected, update) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/common.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/common.go new file mode 100644 index 000000000000..0838699ec262 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/common.go @@ -0,0 +1,141 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Common logic used by both http and file channels. +package config + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/apimachinery/registered" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/hash" + utilyaml "k8s.io/kubernetes/pkg/util/yaml" + + "github.com/golang/glog" +) + +// Generate a pod name that is unique among nodes by appending the nodeName. +func generatePodName(name, nodeName string) string { + return fmt.Sprintf("%s-%s", name, nodeName) +} + +func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName string) error { + if len(pod.UID) == 0 { + hasher := md5.New() + if isFile { + fmt.Fprintf(hasher, "host:%s", nodeName) + fmt.Fprintf(hasher, "file:%s", source) + } else { + fmt.Fprintf(hasher, "url:%s", source) + } + hash.DeepHashObject(hasher, pod) + pod.UID = types.UID(hex.EncodeToString(hasher.Sum(nil)[0:])) + glog.V(5).Infof("Generated UID %q pod %q from %s", pod.UID, pod.Name, source) + } + + pod.Name = generatePodName(pod.Name, nodeName) + glog.V(5).Infof("Generated Name %q for UID %q from URL %s", pod.Name, pod.UID, source) + + if pod.Namespace == "" { + pod.Namespace = kubetypes.NamespaceDefault + } + glog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source) + + // Set the Host field to indicate this pod is scheduled on the current node. + pod.Spec.NodeName = nodeName + + pod.ObjectMeta.SelfLink = getSelfLink(pod.Name, pod.Namespace) + + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + // The generated UID is the hash of the file. + pod.Annotations[kubetypes.ConfigHashAnnotationKey] = string(pod.UID) + + // Set the default status to pending. + pod.Status.Phase = api.PodPending + return nil +} + +func getSelfLink(name, namespace string) string { + var selfLink string + if len(namespace) == 0 { + namespace = api.NamespaceDefault + } + selfLink = fmt.Sprintf("/api/"+registered.GroupOrDie(api.GroupName).GroupVersion.Version+"/pods/namespaces/%s/%s", name, namespace) + return selfLink +} + +type defaultFunc func(pod *api.Pod) error + +func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *api.Pod, err error) { + // JSON is valid YAML, so this should work for everything. + json, err := utilyaml.ToJSON(data) + if err != nil { + return false, nil, err + } + obj, err := runtime.Decode(api.Codecs.UniversalDecoder(), json) + if err != nil { + return false, pod, err + } + // Check whether the object could be converted to single pod. + if _, ok := obj.(*api.Pod); !ok { + err = fmt.Errorf("invalid pod: %+v", obj) + return false, pod, err + } + newPod := obj.(*api.Pod) + // Apply default values and validate the pod. + if err = defaultFn(newPod); err != nil { + return true, pod, err + } + if errs := validation.ValidatePod(newPod); len(errs) > 0 { + err = fmt.Errorf("invalid pod: %v", errs) + return true, pod, err + } + return true, newPod, nil +} + +func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods api.PodList, err error) { + obj, err := runtime.Decode(api.Codecs.UniversalDecoder(), data) + if err != nil { + return false, pods, err + } + // Check whether the object could be converted to list of pods. + if _, ok := obj.(*api.PodList); !ok { + err = fmt.Errorf("invalid pods list: %#v", obj) + return false, pods, err + } + newPods := obj.(*api.PodList) + // Apply default values and validate pods. + for i := range newPods.Items { + newPod := &newPods.Items[i] + if err = defaultFn(newPod); err != nil { + return true, pods, err + } + if errs := validation.ValidatePod(newPod); len(errs) > 0 { + err = fmt.Errorf("invalid pod: %v", errs) + return true, pods, err + } + } + return true, *newPods, err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/common_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/common_test.go new file mode 100644 index 000000000000..80eab2f4e402 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/common_test.go @@ -0,0 +1,157 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/securitycontext" +) + +func noDefault(*api.Pod) error { return nil } + +func TestDecodeSinglePod(t *testing.T) { + grace := int64(30) + pod := &api.Pod{ + TypeMeta: unversioned.TypeMeta{ + APIVersion: "", + }, + ObjectMeta: api.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + TerminationGracePeriodSeconds: &grace, + Containers: []api.Container{{ + Name: "image", + Image: "test/image", + ImagePullPolicy: "IfNotPresent", + TerminationMessagePath: "/dev/termination-log", + SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), + }}, + SecurityContext: &api.PodSecurityContext{}, + }, + } + json, err := runtime.Encode(testapi.Default.Codec(), pod) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + parsed, podOut, err := tryDecodeSinglePod(json, noDefault) + if !parsed { + t.Errorf("expected to have parsed file: (%s)", string(json)) + } + if err != nil { + t.Errorf("unexpected error: %v (%s)", err, string(json)) + } + if !reflect.DeepEqual(pod, podOut) { + t.Errorf("expected:\n%#v\ngot:\n%#v\n%s", pod, podOut, string(json)) + } + + for _, gv := range registered.EnabledVersionsForGroup(api.GroupName) { + s, _ := api.Codecs.SerializerForFileExtension("yaml") + encoder := api.Codecs.EncoderForVersion(s, gv) + yaml, err := runtime.Encode(encoder, pod) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + parsed, podOut, err = tryDecodeSinglePod(yaml, noDefault) + if !parsed { + t.Errorf("expected to have parsed file: (%s)", string(yaml)) + } + if err != nil { + t.Errorf("unexpected error: %v (%s)", err, string(yaml)) + } + if !reflect.DeepEqual(pod, podOut) { + t.Errorf("expected:\n%#v\ngot:\n%#v\n%s", pod, podOut, string(yaml)) + } + } +} + +func TestDecodePodList(t *testing.T) { + grace := int64(30) + pod := &api.Pod{ + TypeMeta: unversioned.TypeMeta{ + APIVersion: "", + }, + ObjectMeta: api.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + TerminationGracePeriodSeconds: &grace, + Containers: []api.Container{{ + Name: "image", + Image: "test/image", + ImagePullPolicy: "IfNotPresent", + TerminationMessagePath: "/dev/termination-log", + SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), + }}, + SecurityContext: &api.PodSecurityContext{}, + }, + } + podList := &api.PodList{ + Items: []api.Pod{*pod}, + } + json, err := runtime.Encode(testapi.Default.Codec(), podList) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + parsed, podListOut, err := tryDecodePodList(json, noDefault) + if !parsed { + t.Errorf("expected to have parsed file: (%s)", string(json)) + } + if err != nil { + t.Errorf("unexpected error: %v (%s)", err, string(json)) + } + if !reflect.DeepEqual(podList, &podListOut) { + t.Errorf("expected:\n%#v\ngot:\n%#v\n%s", podList, &podListOut, string(json)) + } + + for _, gv := range registered.EnabledVersionsForGroup(api.GroupName) { + s, _ := api.Codecs.SerializerForFileExtension("yaml") + encoder := api.Codecs.EncoderForVersion(s, gv) + yaml, err := runtime.Encode(encoder, podList) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + parsed, podListOut, err = tryDecodePodList(yaml, noDefault) + if !parsed { + t.Errorf("expected to have parsed file: (%s): %v", string(yaml), err) + continue + } + if err != nil { + t.Errorf("unexpected error: %v (%s)", err, string(yaml)) + continue + } + if !reflect.DeepEqual(podList, &podListOut) { + t.Errorf("expected:\n%#v\ngot:\n%#v\n%s", pod, &podListOut, string(yaml)) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/config.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/config.go new file mode 100644 index 000000000000..0d190e01e043 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/config.go @@ -0,0 +1,497 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "reflect" + "sync" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/client/record" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/util/config" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// PodConfigNotificationMode describes how changes are sent to the update channel. +type PodConfigNotificationMode int + +const ( + // PodConfigNotificationUnknown is the default value for + // PodConfigNotificationMode when uninitialized. + PodConfigNotificationUnknown = iota + // PodConfigNotificationSnapshot delivers the full configuration as a SET whenever + // any change occurs. + PodConfigNotificationSnapshot + // PodConfigNotificationSnapshotAndUpdates delivers an UPDATE message whenever pods are + // changed, and a SET message if there are any additions or removals. + PodConfigNotificationSnapshotAndUpdates + // PodConfigNotificationIncremental delivers ADD, UPDATE, REMOVE, RECONCILE to the update channel. + PodConfigNotificationIncremental +) + +// PodConfig is a configuration mux that merges many sources of pod configuration into a single +// consistent structure, and then delivers incremental change notifications to listeners +// in order. +type PodConfig struct { + pods *podStorage + mux *config.Mux + + // the channel of denormalized changes passed to listeners + updates chan kubetypes.PodUpdate + + // contains the list of all configured sources + sourcesLock sync.Mutex + sources sets.String +} + +// NewPodConfig creates an object that can merge many configuration sources into a stream +// of normalized updates to a pod configuration. +func NewPodConfig(mode PodConfigNotificationMode, recorder record.EventRecorder) *PodConfig { + updates := make(chan kubetypes.PodUpdate, 50) + storage := newPodStorage(updates, mode, recorder) + podConfig := &PodConfig{ + pods: storage, + mux: config.NewMux(storage), + updates: updates, + sources: sets.String{}, + } + return podConfig +} + +// Channel creates or returns a config source channel. The channel +// only accepts PodUpdates +func (c *PodConfig) Channel(source string) chan<- interface{} { + c.sourcesLock.Lock() + defer c.sourcesLock.Unlock() + c.sources.Insert(source) + return c.mux.Channel(source) +} + +// SeenAllSources returns true if seenSources contains all sources in the +// config, and also this config has received a SET message from each source. +func (c *PodConfig) SeenAllSources(seenSources sets.String) bool { + if c.pods == nil { + return false + } + glog.V(6).Infof("Looking for %v, have seen %v", c.sources.List(), seenSources) + return seenSources.HasAll(c.sources.List()...) && c.pods.seenSources(c.sources.List()...) +} + +// Updates returns a channel of updates to the configuration, properly denormalized. +func (c *PodConfig) Updates() <-chan kubetypes.PodUpdate { + return c.updates +} + +// Sync requests the full configuration be delivered to the update channel. +func (c *PodConfig) Sync() { + c.pods.Sync() +} + +// podStorage manages the current pod state at any point in time and ensures updates +// to the channel are delivered in order. Note that this object is an in-memory source of +// "truth" and on creation contains zero entries. Once all previously read sources are +// available, then this object should be considered authoritative. +type podStorage struct { + podLock sync.RWMutex + // map of source name to pod name to pod reference + pods map[string]map[string]*api.Pod + mode PodConfigNotificationMode + + // ensures that updates are delivered in strict order + // on the updates channel + updateLock sync.Mutex + updates chan<- kubetypes.PodUpdate + + // contains the set of all sources that have sent at least one SET + sourcesSeenLock sync.Mutex + sourcesSeen sets.String + + // the EventRecorder to use + recorder record.EventRecorder +} + +// TODO: PodConfigNotificationMode could be handled by a listener to the updates channel +// in the future, especially with multiple listeners. +// TODO: allow initialization of the current state of the store with snapshotted version. +func newPodStorage(updates chan<- kubetypes.PodUpdate, mode PodConfigNotificationMode, recorder record.EventRecorder) *podStorage { + return &podStorage{ + pods: make(map[string]map[string]*api.Pod), + mode: mode, + updates: updates, + sourcesSeen: sets.String{}, + recorder: recorder, + } +} + +// Merge normalizes a set of incoming changes from different sources into a map of all Pods +// and ensures that redundant changes are filtered out, and then pushes zero or more minimal +// updates onto the update channel. Ensures that updates are delivered in order. +func (s *podStorage) Merge(source string, change interface{}) error { + s.updateLock.Lock() + defer s.updateLock.Unlock() + + seenBefore := s.sourcesSeen.Has(source) + adds, updates, deletes, reconciles := s.merge(source, change) + firstSet := !seenBefore && s.sourcesSeen.Has(source) + + // deliver update notifications + switch s.mode { + case PodConfigNotificationIncremental: + if len(deletes.Pods) > 0 { + s.updates <- *deletes + } + if len(adds.Pods) > 0 { + s.updates <- *adds + } + if len(updates.Pods) > 0 { + s.updates <- *updates + } + if firstSet && len(adds.Pods) == 0 && len(updates.Pods) == 0 { + // Send an empty update when first seeing the source and there are + // no ADD or UPDATE pods from the source. This signals kubelet that + // the source is ready. + s.updates <- *adds + } + // Only add reconcile support here, because kubelet doesn't support Snapshot update now. + if len(reconciles.Pods) > 0 { + s.updates <- *reconciles + } + + case PodConfigNotificationSnapshotAndUpdates: + if len(deletes.Pods) > 0 || len(adds.Pods) > 0 || firstSet { + s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubetypes.SET, Source: source} + } + if len(updates.Pods) > 0 { + s.updates <- *updates + } + + case PodConfigNotificationSnapshot: + if len(updates.Pods) > 0 || len(deletes.Pods) > 0 || len(adds.Pods) > 0 || firstSet { + s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubetypes.SET, Source: source} + } + + case PodConfigNotificationUnknown: + fallthrough + default: + panic(fmt.Sprintf("unsupported PodConfigNotificationMode: %#v", s.mode)) + } + + return nil +} + +func (s *podStorage) merge(source string, change interface{}) (adds, updates, deletes, reconciles *kubetypes.PodUpdate) { + s.podLock.Lock() + defer s.podLock.Unlock() + + addPods := []*api.Pod{} + updatePods := []*api.Pod{} + deletePods := []*api.Pod{} + reconcilePods := []*api.Pod{} + + pods := s.pods[source] + if pods == nil { + pods = make(map[string]*api.Pod) + } + + // updatePodFunc is the local function which updates the pod cache *oldPods* with new pods *newPods*. + // After updated, new pod will be stored in the pod cache *pods*. + // Notice that *pods* and *oldPods* could be the same cache. + updatePodsFunc := func(newPods []*api.Pod, oldPods, pods map[string]*api.Pod) { + filtered := filterInvalidPods(newPods, source, s.recorder) + for _, ref := range filtered { + name := kubecontainer.GetPodFullName(ref) + // Annotate the pod with the source before any comparison. + if ref.Annotations == nil { + ref.Annotations = make(map[string]string) + } + ref.Annotations[kubetypes.ConfigSourceAnnotationKey] = source + if existing, found := oldPods[name]; found { + pods[name] = existing + needUpdate, needReconcile := checkAndUpdatePod(existing, ref) + if needUpdate { + updatePods = append(updatePods, existing) + } else if needReconcile { + reconcilePods = append(reconcilePods, existing) + } + continue + } + recordFirstSeenTime(ref) + pods[name] = ref + addPods = append(addPods, ref) + } + } + + update := change.(kubetypes.PodUpdate) + switch update.Op { + case kubetypes.ADD, kubetypes.UPDATE: + if update.Op == kubetypes.ADD { + glog.V(4).Infof("Adding new pods from source %s : %v", source, update.Pods) + } else { + glog.V(4).Infof("Updating pods from source %s : %v", source, update.Pods) + } + updatePodsFunc(update.Pods, pods, pods) + + case kubetypes.REMOVE: + glog.V(4).Infof("Removing a pod %v", update) + for _, value := range update.Pods { + name := kubecontainer.GetPodFullName(value) + if existing, found := pods[name]; found { + // this is a delete + delete(pods, name) + deletePods = append(deletePods, existing) + continue + } + // this is a no-op + } + + case kubetypes.SET: + glog.V(4).Infof("Setting pods for source %s", source) + s.markSourceSet(source) + // Clear the old map entries by just creating a new map + oldPods := pods + pods = make(map[string]*api.Pod) + updatePodsFunc(update.Pods, oldPods, pods) + for name, existing := range oldPods { + if _, found := pods[name]; !found { + // this is a delete + deletePods = append(deletePods, existing) + } + } + + default: + glog.Warningf("Received invalid update type: %v", update) + + } + + s.pods[source] = pods + + adds = &kubetypes.PodUpdate{Op: kubetypes.ADD, Pods: copyPods(addPods), Source: source} + updates = &kubetypes.PodUpdate{Op: kubetypes.UPDATE, Pods: copyPods(updatePods), Source: source} + deletes = &kubetypes.PodUpdate{Op: kubetypes.REMOVE, Pods: copyPods(deletePods), Source: source} + reconciles = &kubetypes.PodUpdate{Op: kubetypes.RECONCILE, Pods: copyPods(reconcilePods), Source: source} + + return adds, updates, deletes, reconciles +} + +func (s *podStorage) markSourceSet(source string) { + s.sourcesSeenLock.Lock() + defer s.sourcesSeenLock.Unlock() + s.sourcesSeen.Insert(source) +} + +func (s *podStorage) seenSources(sources ...string) bool { + s.sourcesSeenLock.Lock() + defer s.sourcesSeenLock.Unlock() + return s.sourcesSeen.HasAll(sources...) +} + +func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) { + names := sets.String{} + for i, pod := range pods { + var errlist field.ErrorList + if errs := validation.ValidatePod(pod); len(errs) != 0 { + errlist = append(errlist, errs...) + // If validation fails, don't trust it any further - + // even Name could be bad. + } else { + name := kubecontainer.GetPodFullName(pod) + if names.Has(name) { + // TODO: when validation becomes versioned, this gets a bit + // more complicated. + errlist = append(errlist, field.Duplicate(field.NewPath("metadata", "name"), pod.Name)) + } else { + names.Insert(name) + } + } + if len(errlist) > 0 { + name := bestPodIdentString(pod) + err := errlist.ToAggregate() + glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err) + recorder.Eventf(pod, api.EventTypeWarning, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err) + continue + } + filtered = append(filtered, pod) + } + return +} + +// Annotations that the kubelet adds to the pod. +var localAnnotations = []string{ + kubetypes.ConfigSourceAnnotationKey, + kubetypes.ConfigMirrorAnnotationKey, + kubetypes.ConfigFirstSeenAnnotationKey, +} + +func isLocalAnnotationKey(key string) bool { + for _, localKey := range localAnnotations { + if key == localKey { + return true + } + } + return false +} + +// isAnnotationMapEqual returns true if the existing annotation Map is equal to candidate except +// for local annotations. +func isAnnotationMapEqual(existingMap, candidateMap map[string]string) bool { + if candidateMap == nil { + candidateMap = make(map[string]string) + } + for k, v := range candidateMap { + if isLocalAnnotationKey(k) { + continue + } + if existingValue, ok := existingMap[k]; ok && existingValue == v { + continue + } + return false + } + for k := range existingMap { + if isLocalAnnotationKey(k) { + continue + } + // stale entry in existing map. + if _, exists := candidateMap[k]; !exists { + return false + } + } + return true +} + +// recordFirstSeenTime records the first seen time of this pod. +func recordFirstSeenTime(pod *api.Pod) { + glog.V(4).Infof("Receiving a new pod %q", format.Pod(pod)) + pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = kubetypes.NewTimestamp().GetString() +} + +// updateAnnotations returns an Annotation map containing the api annotation map plus +// locally managed annotations +func updateAnnotations(existing, ref *api.Pod) { + annotations := make(map[string]string, len(ref.Annotations)+len(localAnnotations)) + for k, v := range ref.Annotations { + annotations[k] = v + } + for _, k := range localAnnotations { + if v, ok := existing.Annotations[k]; ok { + annotations[k] = v + } + } + existing.Annotations = annotations +} + +func podsDifferSemantically(existing, ref *api.Pod) bool { + if reflect.DeepEqual(existing.Spec, ref.Spec) && + reflect.DeepEqual(existing.Labels, ref.Labels) && + reflect.DeepEqual(existing.DeletionTimestamp, ref.DeletionTimestamp) && + reflect.DeepEqual(existing.DeletionGracePeriodSeconds, ref.DeletionGracePeriodSeconds) && + isAnnotationMapEqual(existing.Annotations, ref.Annotations) { + return false + } + return true +} + +// checkAndUpdatePod updates existing, and: +// * if ref makes a meaningful change, returns needUpdate=true +// * if ref makes no meaningful change, but changes the pod status, returns needReconcile=true +// * else return both false +// Now, needUpdate and needReconcile should never be both true +func checkAndUpdatePod(existing, ref *api.Pod) (needUpdate, needReconcile bool) { + // TODO: it would be better to update the whole object and only preserve certain things + // like the source annotation or the UID (to ensure safety) + if !podsDifferSemantically(existing, ref) { + // this is not an update + // Only check reconcile when it is not an update, because if the pod is going to + // be updated, an extra reconcile is unnecessary + if !reflect.DeepEqual(existing.Status, ref.Status) { + // Pod with changed pod status needs reconcile, because kubelet should + // be the source of truth of pod status. + existing.Status = ref.Status + needReconcile = true + } + return + } + // this is an update + + // Overwrite the first-seen time with the existing one. This is our own + // internal annotation, there is no need to update. + ref.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = existing.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] + + existing.Spec = ref.Spec + existing.Labels = ref.Labels + existing.DeletionTimestamp = ref.DeletionTimestamp + existing.DeletionGracePeriodSeconds = ref.DeletionGracePeriodSeconds + existing.Status = ref.Status + updateAnnotations(existing, ref) + needUpdate = true + return +} + +// Sync sends a copy of the current state through the update channel. +func (s *podStorage) Sync() { + s.updateLock.Lock() + defer s.updateLock.Unlock() + s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubetypes.SET, Source: kubetypes.AllSource} +} + +// Object implements config.Accessor +func (s *podStorage) MergedState() interface{} { + s.podLock.RLock() + defer s.podLock.RUnlock() + pods := make([]*api.Pod, 0) + for _, sourcePods := range s.pods { + for _, podRef := range sourcePods { + pod, err := api.Scheme.Copy(podRef) + if err != nil { + glog.Errorf("unable to copy pod: %v", err) + } + pods = append(pods, pod.(*api.Pod)) + } + } + return pods +} + +func bestPodIdentString(pod *api.Pod) string { + namespace := pod.Namespace + if namespace == "" { + namespace = "" + } + name := pod.Name + if name == "" { + name = "" + } + return fmt.Sprintf("%s.%s", name, namespace) +} + +func copyPods(sourcePods []*api.Pod) []*api.Pod { + pods := []*api.Pod{} + for _, source := range sourcePods { + // Use a deep copy here just in case + pod, err := api.Scheme.Copy(source) + if err != nil { + glog.Errorf("unable to copy pod: %v", err) + } + pods = append(pods, pod.(*api.Pod)) + } + return pods +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/config_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/config_test.go new file mode 100644 index 000000000000..5bc6a114a5ae --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/config_test.go @@ -0,0 +1,396 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "math/rand" + "reflect" + "sort" + "strconv" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/conversion" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/securitycontext" + "k8s.io/kubernetes/pkg/types" +) + +const ( + TestSource = "test" +) + +func expectEmptyChannel(t *testing.T, ch <-chan interface{}) { + select { + case update := <-ch: + t.Errorf("Expected no update in channel, Got %v", update) + default: + } +} + +type sortedPods []*api.Pod + +func (s sortedPods) Len() int { + return len(s) +} +func (s sortedPods) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s sortedPods) Less(i, j int) bool { + return s[i].Namespace < s[j].Namespace +} + +func CreateValidPod(name, namespace string) *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: types.UID(name), // for the purpose of testing, this is unique enough + Name: name, + Namespace: namespace, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{ + { + Name: "ctr", + Image: "image", + ImagePullPolicy: "IfNotPresent", + SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), + }, + }, + }, + } +} + +func CreatePodUpdate(op kubetypes.PodOperation, source string, pods ...*api.Pod) kubetypes.PodUpdate { + return kubetypes.PodUpdate{Pods: pods, Op: op, Source: source} +} + +func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubetypes.PodUpdate, *PodConfig) { + eventBroadcaster := record.NewBroadcaster() + config := NewPodConfig(mode, eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet"})) + channel := config.Channel(TestSource) + ch := config.Updates() + return channel, ch, config +} + +func expectPodUpdate(t *testing.T, ch <-chan kubetypes.PodUpdate, expected ...kubetypes.PodUpdate) { + for i := range expected { + update := <-ch + sort.Sort(sortedPods(update.Pods)) + sort.Sort(sortedPods(expected[i].Pods)) + // Make copies of the expected/actual update to compare all fields + // except for "Pods", which are compared separately below. + expectedCopy, updateCopy := expected[i], update + expectedCopy.Pods, updateCopy.Pods = nil, nil + if !api.Semantic.DeepEqual(expectedCopy, updateCopy) { + t.Fatalf("Expected %#v, Got %#v", expectedCopy, updateCopy) + } + + if len(expected[i].Pods) != len(update.Pods) { + t.Fatalf("Expected %#v, Got %#v", expected[i], update) + } + // Compare pods one by one. This is necessary because we don't want to + // compare local annotations. + for j := range expected[i].Pods { + if podsDifferSemantically(expected[i].Pods[j], update.Pods[j]) || !reflect.DeepEqual(expected[i].Pods[j].Status, update.Pods[j].Status) { + t.Fatalf("Expected %#v, Got %#v", expected[i].Pods[j], update.Pods[j]) + } + } + } + expectNoPodUpdate(t, ch) +} + +func expectNoPodUpdate(t *testing.T, ch <-chan kubetypes.PodUpdate) { + select { + case update := <-ch: + t.Errorf("Expected no update in channel, Got %#v", update) + default: + } +} + +func TestNewPodAdded(t *testing.T) { + channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) + + // see an update + podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))) + + config.Sync() + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, kubetypes.AllSource, CreateValidPod("foo", "new"))) +} + +func TestNewPodAddedInvalidNamespace(t *testing.T) { + channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) + + // see an update + podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "")) + channel <- podUpdate + config.Sync() + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, kubetypes.AllSource)) +} + +func TestNewPodAddedDefaultNamespace(t *testing.T) { + channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) + + // see an update + podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "default")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "default"))) + + config.Sync() + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, kubetypes.AllSource, CreateValidPod("foo", "default"))) +} + +func TestNewPodAddedDifferentNamespaces(t *testing.T) { + channel, ch, config := createPodConfigTester(PodConfigNotificationIncremental) + + // see an update + podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "default")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "default"))) + + // see an update in another namespace + podUpdate = CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))) + + config.Sync() + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, kubetypes.AllSource, CreateValidPod("foo", "default"), CreateValidPod("foo", "new"))) +} + +func TestInvalidPodFiltered(t *testing.T) { + channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) + + // see an update + podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))) + + // add an invalid update + podUpdate = CreatePodUpdate(kubetypes.UPDATE, TestSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + channel <- podUpdate + expectNoPodUpdate(t, ch) +} + +func TestNewPodAddedSnapshotAndUpdates(t *testing.T) { + channel, ch, config := createPodConfigTester(PodConfigNotificationSnapshotAndUpdates) + + // see an set + podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo", "new"))) + + config.Sync() + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, kubetypes.AllSource, CreateValidPod("foo", "new"))) + + // container updates are separated as UPDATE + pod := *podUpdate.Pods[0] + pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} + channel <- CreatePodUpdate(kubetypes.ADD, TestSource, &pod) + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, &pod)) +} + +func TestNewPodAddedSnapshot(t *testing.T) { + channel, ch, config := createPodConfigTester(PodConfigNotificationSnapshot) + + // see an set + podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo", "new"))) + + config.Sync() + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, kubetypes.AllSource, CreateValidPod("foo", "new"))) + + // container updates are separated as UPDATE + pod := *podUpdate.Pods[0] + pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} + channel <- CreatePodUpdate(kubetypes.ADD, TestSource, &pod) + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, TestSource, &pod)) +} + +func TestNewPodAddedUpdatedRemoved(t *testing.T) { + channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) + + // should register an add + podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))) + + // should ignore ADDs that are identical + expectNoPodUpdate(t, ch) + + // an kubetypes.ADD should be converted to kubetypes.UPDATE + pod := CreateValidPod("foo", "new") + pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} + podUpdate = CreatePodUpdate(kubetypes.ADD, TestSource, pod) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod)) + + podUpdate = CreatePodUpdate(kubetypes.REMOVE, TestSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}}) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.REMOVE, TestSource, pod)) +} + +func TestNewPodAddedUpdatedSet(t *testing.T) { + channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) + + // should register an add + podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"), CreateValidPod("foo2", "new"), CreateValidPod("foo3", "new")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"), CreateValidPod("foo2", "new"), CreateValidPod("foo3", "new"))) + + // should ignore ADDs that are identical + expectNoPodUpdate(t, ch) + + // should be converted to an kubetypes.ADD, kubetypes.REMOVE, and kubetypes.UPDATE + pod := CreateValidPod("foo2", "new") + pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} + podUpdate = CreatePodUpdate(kubetypes.SET, TestSource, pod, CreateValidPod("foo3", "new"), CreateValidPod("foo4", "new")) + channel <- podUpdate + expectPodUpdate(t, ch, + CreatePodUpdate(kubetypes.REMOVE, TestSource, CreateValidPod("foo", "new")), + CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo4", "new")), + CreatePodUpdate(kubetypes.UPDATE, TestSource, pod)) +} + +func TestNewPodAddedSetReconciled(t *testing.T) { + // Create and touch new test pods, return the new pods and touched pod. We should create new pod list + // before touching to avoid data race. + newTestPods := func(touchStatus, touchSpec bool) ([]*api.Pod, *api.Pod) { + pods := []*api.Pod{ + CreateValidPod("changable-pod-0", "new"), + CreateValidPod("constant-pod-1", "new"), + CreateValidPod("constant-pod-2", "new"), + } + if touchStatus { + pods[0].Status = api.PodStatus{Message: strconv.Itoa(rand.Int())} + } + if touchSpec { + pods[0].Spec.Containers[0].Name = strconv.Itoa(rand.Int()) + } + return pods, pods[0] + } + for _, op := range []kubetypes.PodOperation{ + kubetypes.ADD, + kubetypes.SET, + } { + var podWithStatusChange *api.Pod + pods, _ := newTestPods(false, false) + channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) + + // Use SET to initialize the config, especially initialize the source set + channel <- CreatePodUpdate(kubetypes.SET, TestSource, pods...) + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, pods...)) + + // If status is not changed, no reconcile should be triggered + channel <- CreatePodUpdate(op, TestSource, pods...) + expectNoPodUpdate(t, ch) + + // If the pod status is changed and not updated, a reconcile should be triggered + pods, podWithStatusChange = newTestPods(true, false) + channel <- CreatePodUpdate(op, TestSource, pods...) + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.RECONCILE, TestSource, podWithStatusChange)) + + // If the pod status is changed, but the pod is also updated, no reconcile should be triggered + pods, podWithStatusChange = newTestPods(true, true) + channel <- CreatePodUpdate(op, TestSource, pods...) + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, podWithStatusChange)) + } +} + +func TestInitialEmptySet(t *testing.T) { + for _, test := range []struct { + mode PodConfigNotificationMode + op kubetypes.PodOperation + }{ + {PodConfigNotificationIncremental, kubetypes.ADD}, + {PodConfigNotificationSnapshot, kubetypes.SET}, + {PodConfigNotificationSnapshotAndUpdates, kubetypes.SET}, + } { + channel, ch, _ := createPodConfigTester(test.mode) + + // should register an empty PodUpdate operation + podUpdate := CreatePodUpdate(kubetypes.SET, TestSource) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(test.op, TestSource)) + + // should ignore following empty sets + podUpdate = CreatePodUpdate(kubetypes.SET, TestSource) + channel <- podUpdate + podUpdate = CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(test.op, TestSource, CreateValidPod("foo", "new"))) + } +} + +func TestPodUpdateAnnotations(t *testing.T) { + channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) + + pod := CreateValidPod("foo2", "new") + pod.Annotations = make(map[string]string, 0) + pod.Annotations["kubernetes.io/blah"] = "blah" + + clone, err := conversion.NewCloner().DeepCopy(pod) + if err != nil { + t.Fatalf("%v", err) + } + + podUpdate := CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo1", "new"), clone.(*api.Pod), CreateValidPod("foo3", "new")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new"))) + + pod.Annotations["kubenetes.io/blah"] = "superblah" + podUpdate = CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod)) + + pod.Annotations["kubernetes.io/otherblah"] = "doh" + podUpdate = CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod)) + + delete(pod.Annotations, "kubernetes.io/blah") + podUpdate = CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod)) +} + +func TestPodUpdateLabels(t *testing.T) { + channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) + + pod := CreateValidPod("foo2", "new") + pod.Labels = make(map[string]string, 0) + pod.Labels["key"] = "value" + + clone, err := conversion.NewCloner().DeepCopy(pod) + if err != nil { + t.Fatalf("%v", err) + } + + podUpdate := CreatePodUpdate(kubetypes.SET, TestSource, clone.(*api.Pod)) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, pod)) + + pod.Labels["key"] = "newValue" + podUpdate = CreatePodUpdate(kubetypes.SET, TestSource, pod) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod)) + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/doc.go new file mode 100644 index 000000000000..511d05522086 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config implements the pod configuration readers. +package config diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/file.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/file.go new file mode 100644 index 000000000000..da5cd74007d2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/file.go @@ -0,0 +1,161 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Reads the pod configuration from file or a directory of files. +package config + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "time" + + "k8s.io/kubernetes/pkg/api" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/util/wait" + + "github.com/golang/glog" +) + +type sourceFile struct { + path string + nodeName string + updates chan<- interface{} +} + +func NewSourceFile(path string, nodeName string, period time.Duration, updates chan<- interface{}) { + config := &sourceFile{ + path: path, + nodeName: nodeName, + updates: updates, + } + glog.V(1).Infof("Watching path %q", path) + go wait.Until(config.run, period, wait.NeverStop) +} + +func (s *sourceFile) run() { + if err := s.extractFromPath(); err != nil { + glog.Errorf("Unable to read config path %q: %v", s.path, err) + } +} + +func (s *sourceFile) applyDefaults(pod *api.Pod, source string) error { + return applyDefaults(pod, source, true, s.nodeName) +} + +func (s *sourceFile) extractFromPath() error { + path := s.path + statInfo, err := os.Stat(path) + if err != nil { + if !os.IsNotExist(err) { + return err + } + // Emit an update with an empty PodList to allow FileSource to be marked as seen + s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource} + return fmt.Errorf("path does not exist, ignoring") + } + + switch { + case statInfo.Mode().IsDir(): + pods, err := s.extractFromDir(path) + if err != nil { + return err + } + s.updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.FileSource} + + case statInfo.Mode().IsRegular(): + pod, err := s.extractFromFile(path) + if err != nil { + return err + } + s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{pod}, Op: kubetypes.SET, Source: kubetypes.FileSource} + + default: + return fmt.Errorf("path is not a directory or file") + } + + return nil +} + +// Get as many pod configs as we can from a directory. Return an error if and only if something +// prevented us from reading anything at all. Do not return an error if only some files +// were problematic. +func (s *sourceFile) extractFromDir(name string) ([]*api.Pod, error) { + dirents, err := filepath.Glob(filepath.Join(name, "[^.]*")) + if err != nil { + return nil, fmt.Errorf("glob failed: %v", err) + } + + pods := make([]*api.Pod, 0) + if len(dirents) == 0 { + return pods, nil + } + + sort.Strings(dirents) + for _, path := range dirents { + statInfo, err := os.Stat(path) + if err != nil { + glog.V(1).Infof("Can't get metadata for %q: %v", path, err) + continue + } + + switch { + case statInfo.Mode().IsDir(): + glog.V(1).Infof("Not recursing into config path %q", path) + case statInfo.Mode().IsRegular(): + pod, err := s.extractFromFile(path) + if err != nil { + glog.V(1).Infof("Can't process config file %q: %v", path, err) + } else { + pods = append(pods, pod) + } + default: + glog.V(1).Infof("Config path %q is not a directory or file: %v", path, statInfo.Mode()) + } + } + return pods, nil +} + +func (s *sourceFile) extractFromFile(filename string) (pod *api.Pod, err error) { + glog.V(3).Infof("Reading config file %q", filename) + file, err := os.Open(filename) + if err != nil { + return pod, err + } + defer file.Close() + + data, err := ioutil.ReadAll(file) + if err != nil { + return pod, err + } + + defaultFn := func(pod *api.Pod) error { + return s.applyDefaults(pod, filename) + } + + parsed, pod, podErr := tryDecodeSinglePod(data, defaultFn) + if parsed { + if podErr != nil { + return pod, podErr + } + return pod, nil + } + + return pod, fmt.Errorf("%v: read '%v', but couldn't parse as pod(%v).\n", + filename, string(data), podErr) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/file_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/file_test.go new file mode 100644 index 000000000000..fad1f227b991 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/file_test.go @@ -0,0 +1,196 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "io/ioutil" + "os" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/validation" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/securitycontext" + utiltesting "k8s.io/kubernetes/pkg/util/testing" + "k8s.io/kubernetes/pkg/util/wait" +) + +func TestExtractFromNonExistentFile(t *testing.T) { + ch := make(chan interface{}, 1) + c := sourceFile{"/some/fake/file", "localhost", ch} + err := c.extractFromPath() + if err == nil { + t.Errorf("Expected error") + } +} + +func TestUpdateOnNonExistentFile(t *testing.T) { + ch := make(chan interface{}) + NewSourceFile("random_non_existent_path", "localhost", time.Millisecond, ch) + select { + case got := <-ch: + update := got.(kubetypes.PodUpdate) + expected := CreatePodUpdate(kubetypes.SET, kubetypes.FileSource) + if !api.Semantic.DeepDerivative(expected, update) { + t.Fatalf("Expected %#v, Got %#v", expected, update) + } + + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Expected update, timeout instead") + } +} + +func writeTestFile(t *testing.T, dir, name string, contents string) *os.File { + file, err := ioutil.TempFile(os.TempDir(), "test_pod_config") + if err != nil { + t.Fatalf("Unable to create test file %#v", err) + } + file.Close() + if err := ioutil.WriteFile(file.Name(), []byte(contents), 0555); err != nil { + t.Fatalf("Unable to write test file %#v", err) + } + return file +} + +func TestReadPodsFromFile(t *testing.T) { + hostname := "random-test-hostname" + grace := int64(30) + var testCases = []struct { + desc string + pod runtime.Object + expected kubetypes.PodUpdate + }{ + { + desc: "Simple pod", + pod: &api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: api.ObjectMeta{ + Name: "test", + UID: "12345", + Namespace: "mynamespace", + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "image", Image: "test/image", SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults()}}, + SecurityContext: &api.PodSecurityContext{}, + }, + Status: api.PodStatus{ + Phase: api.PodPending, + }, + }, + expected: CreatePodUpdate(kubetypes.SET, kubetypes.FileSource, &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "test-" + hostname, + UID: "12345", + Namespace: "mynamespace", + Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "12345"}, + SelfLink: getSelfLink("test-"+hostname, "mynamespace"), + }, + Spec: api.PodSpec{ + NodeName: hostname, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + TerminationGracePeriodSeconds: &grace, + Containers: []api.Container{{ + Name: "image", + Image: "test/image", + TerminationMessagePath: "/dev/termination-log", + ImagePullPolicy: "Always", + SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults()}}, + SecurityContext: &api.PodSecurityContext{}, + }, + Status: api.PodStatus{ + Phase: api.PodPending, + }, + }), + }, + } + + for _, testCase := range testCases { + func() { + var versionedPod runtime.Object + err := testapi.Default.Converter().Convert(&testCase.pod, &versionedPod) + if err != nil { + t.Fatalf("%s: error in versioning the pod: %v", testCase.desc, err) + } + fileContents, err := runtime.Encode(testapi.Default.Codec(), versionedPod) + if err != nil { + t.Fatalf("%s: error in encoding the pod: %v", testCase.desc, err) + } + + file := writeTestFile(t, os.TempDir(), "test_pod_config", string(fileContents)) + defer os.Remove(file.Name()) + + ch := make(chan interface{}) + NewSourceFile(file.Name(), hostname, time.Millisecond, ch) + select { + case got := <-ch: + update := got.(kubetypes.PodUpdate) + for _, pod := range update.Pods { + if errs := validation.ValidatePod(pod); len(errs) > 0 { + t.Errorf("%s: Invalid pod %#v, %#v", testCase.desc, pod, errs) + } + } + if !api.Semantic.DeepEqual(testCase.expected, update) { + t.Errorf("%s: Expected %#v, Got %#v", testCase.desc, testCase.expected, update) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("%s: Expected update, timeout instead", testCase.desc) + } + }() + } +} + +func TestExtractFromBadDataFile(t *testing.T) { + file := writeTestFile(t, os.TempDir(), "test_pod_config", string([]byte{1, 2, 3})) + defer os.Remove(file.Name()) + + ch := make(chan interface{}, 1) + c := sourceFile{file.Name(), "localhost", ch} + err := c.extractFromPath() + if err == nil { + t.Fatalf("Expected error") + } + expectEmptyChannel(t, ch) +} + +func TestExtractFromEmptyDir(t *testing.T) { + dirName, err := utiltesting.MkTmpdir("file-test") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer os.RemoveAll(dirName) + + ch := make(chan interface{}, 1) + c := sourceFile{dirName, "localhost", ch} + err = c.extractFromPath() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + update := (<-ch).(kubetypes.PodUpdate) + expected := CreatePodUpdate(kubetypes.SET, kubetypes.FileSource) + if !api.Semantic.DeepEqual(expected, update) { + t.Errorf("Expected %#v, Got %#v", expected, update) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/http.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/http.go new file mode 100644 index 000000000000..0752e5fa3021 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/http.go @@ -0,0 +1,141 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Reads the pod configuration from an HTTP GET response. +package config + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "time" + + "k8s.io/kubernetes/pkg/api" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/util/wait" + + "github.com/golang/glog" +) + +type sourceURL struct { + url string + header http.Header + nodeName string + updates chan<- interface{} + data []byte + failureLogs int + client *http.Client +} + +func NewSourceURL(url string, header http.Header, nodeName string, period time.Duration, updates chan<- interface{}) { + config := &sourceURL{ + url: url, + header: header, + nodeName: nodeName, + updates: updates, + data: nil, + // Timing out requests leads to retries. This client is only used to + // read the the manifest URL passed to kubelet. + client: &http.Client{Timeout: 10 * time.Second}, + } + glog.V(1).Infof("Watching URL %s", url) + go wait.Until(config.run, period, wait.NeverStop) +} + +func (s *sourceURL) run() { + if err := s.extractFromURL(); err != nil { + // Don't log this multiple times per minute. The first few entries should be + // enough to get the point across. + if s.failureLogs < 3 { + glog.Warningf("Failed to read pods from URL: %v", err) + } else if s.failureLogs == 3 { + glog.Warningf("Failed to read pods from URL. Dropping verbosity of this message to V(4): %v", err) + } else { + glog.V(4).Infof("Failed to read pods from URL: %v", err) + } + s.failureLogs++ + } else { + if s.failureLogs > 0 { + glog.Info("Successfully read pods from URL.") + s.failureLogs = 0 + } + } +} + +func (s *sourceURL) applyDefaults(pod *api.Pod) error { + return applyDefaults(pod, s.url, false, s.nodeName) +} + +func (s *sourceURL) extractFromURL() error { + req, err := http.NewRequest("GET", s.url, nil) + if err != nil { + return err + } + req.Header = s.header + resp, err := s.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + if resp.StatusCode != 200 { + return fmt.Errorf("%v: %v", s.url, resp.Status) + } + if len(data) == 0 { + // Emit an update with an empty PodList to allow HTTPSource to be marked as seen + s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{}, Op: kubetypes.SET, Source: kubetypes.HTTPSource} + return fmt.Errorf("zero-length data received from %v", s.url) + } + // Short circuit if the data has not changed since the last time it was read. + if bytes.Compare(data, s.data) == 0 { + return nil + } + s.data = data + + // First try as it is a single pod. + parsed, pod, singlePodErr := tryDecodeSinglePod(data, s.applyDefaults) + if parsed { + if singlePodErr != nil { + // It parsed but could not be used. + return singlePodErr + } + s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{pod}, Op: kubetypes.SET, Source: kubetypes.HTTPSource} + return nil + } + + // That didn't work, so try a list of pods. + parsed, podList, multiPodErr := tryDecodePodList(data, s.applyDefaults) + if parsed { + if multiPodErr != nil { + // It parsed but could not be used. + return multiPodErr + } + pods := make([]*api.Pod, 0) + for i := range podList.Items { + pods = append(pods, &podList.Items[i]) + } + s.updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.HTTPSource} + return nil + } + + return fmt.Errorf("%v: received '%v', but couldn't parse as "+ + "single (%v) or multiple pods (%v).\n", + s.url, string(data), singlePodErr, multiPodErr) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/http_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/http_test.go new file mode 100644 index 000000000000..b30bf6ab0ca3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/http_test.go @@ -0,0 +1,354 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/validation" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/runtime" + utiltesting "k8s.io/kubernetes/pkg/util/testing" +) + +func TestURLErrorNotExistNoUpdate(t *testing.T) { + ch := make(chan interface{}) + NewSourceURL("http://localhost:49575/_not_found_", http.Header{}, "localhost", time.Millisecond, ch) + select { + case got := <-ch: + t.Errorf("Expected no update, Got %#v", got) + case <-time.After(2 * time.Millisecond): + } +} + +func TestExtractFromHttpBadness(t *testing.T) { + ch := make(chan interface{}, 1) + c := sourceURL{"http://localhost:49575/_not_found_", http.Header{}, "other", ch, nil, 0, http.DefaultClient} + if err := c.extractFromURL(); err == nil { + t.Errorf("Expected error") + } + expectEmptyChannel(t, ch) +} + +func TestExtractInvalidPods(t *testing.T) { + var testCases = []struct { + desc string + pod *api.Pod + }{ + { + desc: "No version", + pod: &api.Pod{TypeMeta: unversioned.TypeMeta{APIVersion: ""}}, + }, + { + desc: "Invalid version", + pod: &api.Pod{TypeMeta: unversioned.TypeMeta{APIVersion: "v1betta2"}}, + }, + { + desc: "Invalid volume name", + pod: &api.Pod{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()}, + Spec: api.PodSpec{ + Volumes: []api.Volume{{Name: "_INVALID_"}}, + }, + }, + }, + { + desc: "Duplicate volume names", + pod: &api.Pod{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()}, + Spec: api.PodSpec{ + Volumes: []api.Volume{{Name: "repeated"}, {Name: "repeated"}}, + }, + }, + }, + { + desc: "Unspecified container name", + pod: &api.Pod{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()}, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: ""}}, + }, + }, + }, + { + desc: "Invalid container name", + pod: &api.Pod{ + TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.GroupVersion().String()}, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "_INVALID_"}}, + }, + }, + }, + } + for _, testCase := range testCases { + data, err := json.Marshal(testCase.pod) + if err != nil { + t.Fatalf("%s: Some weird json problem: %v", testCase.desc, err) + } + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: string(data), + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + ch := make(chan interface{}, 1) + c := sourceURL{testServer.URL, http.Header{}, "localhost", ch, nil, 0, http.DefaultClient} + if err := c.extractFromURL(); err == nil { + t.Errorf("%s: Expected error", testCase.desc) + } + } +} + +func TestExtractPodsFromHTTP(t *testing.T) { + hostname := "different-value" + + grace := int64(30) + var testCases = []struct { + desc string + pods runtime.Object + expected kubetypes.PodUpdate + }{ + { + desc: "Single pod", + pods: &api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: api.ObjectMeta{ + Name: "foo", + UID: "111", + Namespace: "mynamespace", + }, + Spec: api.PodSpec{ + NodeName: hostname, + Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}}, + SecurityContext: &api.PodSecurityContext{}, + }, + Status: api.PodStatus{ + Phase: api.PodPending, + }, + }, + expected: CreatePodUpdate(kubetypes.SET, + kubetypes.HTTPSource, + &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "111", + Name: "foo" + "-" + hostname, + Namespace: "mynamespace", + Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"}, + SelfLink: getSelfLink("foo-"+hostname, "mynamespace"), + }, + Spec: api.PodSpec{ + NodeName: hostname, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + SecurityContext: &api.PodSecurityContext{}, + TerminationGracePeriodSeconds: &grace, + + Containers: []api.Container{{ + Name: "1", + Image: "foo", + TerminationMessagePath: "/dev/termination-log", + ImagePullPolicy: "Always", + }}, + }, + Status: api.PodStatus{ + Phase: api.PodPending, + }, + }), + }, + { + desc: "Multiple pods", + pods: &api.PodList{ + TypeMeta: unversioned.TypeMeta{ + Kind: "PodList", + APIVersion: "", + }, + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + Name: "foo", + UID: "111", + }, + Spec: api.PodSpec{ + NodeName: hostname, + Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}}, + SecurityContext: &api.PodSecurityContext{}, + }, + Status: api.PodStatus{ + Phase: api.PodPending, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "bar", + UID: "222", + }, + Spec: api.PodSpec{ + NodeName: hostname, + Containers: []api.Container{{Name: "2", Image: "bar:bartag", ImagePullPolicy: ""}}, + SecurityContext: &api.PodSecurityContext{}, + }, + Status: api.PodStatus{ + Phase: api.PodPending, + }, + }, + }, + }, + expected: CreatePodUpdate(kubetypes.SET, + kubetypes.HTTPSource, + &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "111", + Name: "foo" + "-" + hostname, + Namespace: "default", + Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"}, + SelfLink: getSelfLink("foo-"+hostname, kubetypes.NamespaceDefault), + }, + Spec: api.PodSpec{ + NodeName: hostname, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + TerminationGracePeriodSeconds: &grace, + SecurityContext: &api.PodSecurityContext{}, + + Containers: []api.Container{{ + Name: "1", + Image: "foo", + TerminationMessagePath: "/dev/termination-log", + ImagePullPolicy: "Always", + }}, + }, + Status: api.PodStatus{ + Phase: api.PodPending, + }, + }, + &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "222", + Name: "bar" + "-" + hostname, + Namespace: "default", + Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "222"}, + SelfLink: getSelfLink("bar-"+hostname, kubetypes.NamespaceDefault), + }, + Spec: api.PodSpec{ + NodeName: hostname, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + TerminationGracePeriodSeconds: &grace, + SecurityContext: &api.PodSecurityContext{}, + + Containers: []api.Container{{ + Name: "2", + Image: "bar:bartag", + TerminationMessagePath: "/dev/termination-log", + ImagePullPolicy: "IfNotPresent", + }}, + }, + Status: api.PodStatus{ + Phase: api.PodPending, + }, + }), + }, + } + + for _, testCase := range testCases { + var versionedPods runtime.Object + err := testapi.Default.Converter().Convert(&testCase.pods, &versionedPods) + if err != nil { + t.Fatalf("%s: error in versioning the pods: %s", testCase.desc, err) + } + data, err := runtime.Encode(testapi.Default.Codec(), versionedPods) + if err != nil { + t.Fatalf("%s: error in encoding the pod: %v", testCase.desc, err) + } + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: string(data), + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + ch := make(chan interface{}, 1) + c := sourceURL{testServer.URL, http.Header{}, hostname, ch, nil, 0, http.DefaultClient} + if err := c.extractFromURL(); err != nil { + t.Errorf("%s: Unexpected error: %v", testCase.desc, err) + continue + } + update := (<-ch).(kubetypes.PodUpdate) + + if !api.Semantic.DeepEqual(testCase.expected, update) { + t.Errorf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update) + } + for _, pod := range update.Pods { + if errs := validation.ValidatePod(pod); len(errs) != 0 { + t.Errorf("%s: Expected no validation errors on %#v, Got %v", testCase.desc, pod, errs.ToAggregate()) + } + } + } +} + +func TestURLWithHeader(t *testing.T) { + pod := &api.Pod{ + TypeMeta: unversioned.TypeMeta{ + APIVersion: testapi.Default.GroupVersion().String(), + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: "foo", + UID: "111", + Namespace: "mynamespace", + }, + Spec: api.PodSpec{ + NodeName: "localhost", + Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}}, + }, + } + data, err := json.Marshal(pod) + if err != nil { + t.Fatalf("Unexpected json marshalling error: %v", err) + } + fakeHandler := utiltesting.FakeHandler{ + StatusCode: 200, + ResponseBody: string(data), + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + ch := make(chan interface{}, 1) + header := make(http.Header) + header.Set("Metadata-Flavor", "Google") + c := sourceURL{testServer.URL, header, "localhost", ch, nil, 0, http.DefaultClient} + if err := c.extractFromURL(); err != nil { + t.Fatalf("Unexpected error extracting from URL: %v", err) + } + update := (<-ch).(kubetypes.PodUpdate) + + headerVal := fakeHandler.RequestReceived.Header["Metadata-Flavor"] + if len(headerVal) != 1 || headerVal[0] != "Google" { + t.Errorf("Header missing expected entry %v. Got %v", header, fakeHandler.RequestReceived.Header) + } + if len(update.Pods) != 1 { + t.Errorf("Received wrong number of pods, expected one: %v", update.Pods) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/sources.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/sources.go new file mode 100644 index 000000000000..e26b9de28a49 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/config/sources.go @@ -0,0 +1,67 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config implements the pod configuration readers. +package config + +import ( + "sync" + + "k8s.io/kubernetes/pkg/util/sets" +) + +// SourcesReadyFn is function that returns true if the specified sources have been seen. +type SourcesReadyFn func(sourcesSeen sets.String) bool + +// SourcesReady tracks the set of configured sources seen by the kubelet. +type SourcesReady interface { + // AddSource adds the specified source to the set of sources managed. + AddSource(source string) + // AllReady returns true if the currently configured sources have all been seen. + AllReady() bool +} + +// NewSourcesReady returns a SourcesReady with the specified function. +func NewSourcesReady(sourcesReadyFn SourcesReadyFn) SourcesReady { + return &sourcesImpl{ + sourcesSeen: sets.NewString(), + sourcesReadyFn: sourcesReadyFn, + } +} + +// sourcesImpl implements SourcesReady. It is thread-safe. +type sourcesImpl struct { + // lock protects access to sources seen. + lock sync.RWMutex + // set of sources seen. + sourcesSeen sets.String + // sourcesReady is a function that evaluates if the sources are ready. + sourcesReadyFn SourcesReadyFn +} + +// Add adds the specified source to the set of sources managed. +func (s *sourcesImpl) AddSource(source string) { + s.lock.Lock() + defer s.lock.Unlock() + s.sourcesSeen.Insert(source) +} + +// AllReady returns true if each configured source is ready. +func (s *sourcesImpl) AllReady() bool { + s.lock.RLock() + defer s.lock.RUnlock() + return s.sourcesReadyFn(s.sourcesSeen) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/cache.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/cache.go new file mode 100644 index 000000000000..219ad49f397a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/cache.go @@ -0,0 +1,199 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "sync" + "time" + + "k8s.io/kubernetes/pkg/types" +) + +// Cache stores the PodStatus for the pods. It represents *all* the visible +// pods/containers in the container runtime. All cache entries are at least as +// new or newer than the global timestamp (set by UpdateTime()), while +// individual entries may be slightly newer than the global timestamp. If a pod +// has no states known by the runtime, Cache returns an empty PodStatus object +// with ID populated. +// +// Cache provides two methods to retrive the PodStatus: the non-blocking Get() +// and the blocking GetNewerThan() method. The component responsible for +// populating the cache is expected to call Delete() to explicitly free the +// cache entries. +type Cache interface { + Get(types.UID) (*PodStatus, error) + Set(types.UID, *PodStatus, error, time.Time) + // GetNewerThan is a blocking call that only returns the status + // when it is newer than the given time. + GetNewerThan(types.UID, time.Time) (*PodStatus, error) + Delete(types.UID) + UpdateTime(time.Time) +} + +type data struct { + // Status of the pod. + status *PodStatus + // Error got when trying to inspect the pod. + err error + // Time when the data was last modfied. + modified time.Time +} + +type subRecord struct { + time time.Time + ch chan *data +} + +// cache implements Cache. +type cache struct { + // Lock which guards all internal data structures. + lock sync.RWMutex + // Map that stores the pod statuses. + pods map[types.UID]*data + // A global timestamp represents how fresh the cached data is. All + // cache content is at the least newer than this timestamp. Note that the + // timestamp is nil after initialization, and will only become non-nil when + // it is ready to serve the cached statuses. + timestamp *time.Time + // Map that stores the subscriber records. + subscribers map[types.UID][]*subRecord +} + +// NewCache creates a pod cache. +func NewCache() Cache { + return &cache{pods: map[types.UID]*data{}, subscribers: map[types.UID][]*subRecord{}} +} + +// Get returns the PodStatus for the pod; callers are expected not to +// modify the objects returned. +func (c *cache) Get(id types.UID) (*PodStatus, error) { + c.lock.RLock() + defer c.lock.RUnlock() + d := c.get(id) + return d.status, d.err +} + +func (c *cache) GetNewerThan(id types.UID, minTime time.Time) (*PodStatus, error) { + ch := c.subscribe(id, minTime) + d := <-ch + return d.status, d.err +} + +// Set sets the PodStatus for the pod. +func (c *cache) Set(id types.UID, status *PodStatus, err error, timestamp time.Time) { + c.lock.Lock() + defer c.lock.Unlock() + defer c.notify(id, timestamp) + c.pods[id] = &data{status: status, err: err, modified: timestamp} +} + +// Delete removes the entry of the pod. +func (c *cache) Delete(id types.UID) { + c.lock.Lock() + defer c.lock.Unlock() + delete(c.pods, id) +} + +// UpdateTime modifies the global timestamp of the cache and notify +// subscribers if needed. +func (c *cache) UpdateTime(timestamp time.Time) { + c.lock.Lock() + defer c.lock.Unlock() + c.timestamp = ×tamp + // Notify all the subscribers if the condition is met. + for id := range c.subscribers { + c.notify(id, *c.timestamp) + } +} + +func makeDefaultData(id types.UID) *data { + return &data{status: &PodStatus{ID: id}, err: nil} +} + +func (c *cache) get(id types.UID) *data { + d, ok := c.pods[id] + if !ok { + // Cache should store *all* pod/container information known by the + // container runtime. A cache miss indicates that there are no states + // regarding the pod last time we queried the container runtime. + // What this *really* means is that there are no visible pod/containers + // associated with this pod. Simply return an default (mostly empty) + // PodStatus to reflect this. + return makeDefaultData(id) + } + return d +} + +// getIfNewerThan returns the data it is newer than the given time. +// Otherwise, it returns nil. The caller should acquire the lock. +func (c *cache) getIfNewerThan(id types.UID, minTime time.Time) *data { + d, ok := c.pods[id] + globalTimestampIsNewer := (c.timestamp != nil && c.timestamp.After(minTime)) + if !ok && globalTimestampIsNewer { + // Status is not cached, but the global timestamp is newer than + // minTime, return the default status. + return makeDefaultData(id) + } + if ok && (d.modified.After(minTime) || globalTimestampIsNewer) { + // Status is cached, return status if either of the following is true. + // * status was modified after minTime + // * the global timestamp of the cache is newer than minTime. + return d + } + // The pod status is not ready. + return nil +} + +// notify sends notifications for pod with the given id, if the requirements +// are met. Note that the caller should acquire the lock. +func (c *cache) notify(id types.UID, timestamp time.Time) { + list, ok := c.subscribers[id] + if !ok { + // No one to notify. + return + } + newList := []*subRecord{} + for i, r := range list { + if timestamp.Before(r.time) { + // Doesn't meet the time requirement; keep the record. + newList = append(newList, list[i]) + continue + } + r.ch <- c.get(id) + close(r.ch) + } + if len(newList) == 0 { + delete(c.subscribers, id) + } else { + c.subscribers[id] = newList + } +} + +func (c *cache) subscribe(id types.UID, timestamp time.Time) chan *data { + ch := make(chan *data, 1) + c.lock.Lock() + defer c.lock.Unlock() + d := c.getIfNewerThan(id, timestamp) + if d != nil { + // If the cache entry is ready, send the data and return immediately. + ch <- d + return ch + } + // Add the subscription record. + c.subscribers[id] = append(c.subscribers[id], &subRecord{time: timestamp, ch: ch}) + return ch +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/cache_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/cache_test.go new file mode 100644 index 000000000000..5755005d841a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/cache_test.go @@ -0,0 +1,210 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "fmt" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "k8s.io/kubernetes/pkg/types" +) + +func newTestCache() *cache { + c := NewCache() + return c.(*cache) +} + +func TestCacheNotInitialized(t *testing.T) { + cache := newTestCache() + // If the global timestamp is not set, always return nil. + d := cache.getIfNewerThan(types.UID("1234"), time.Time{}) + assert.True(t, d == nil, "should return nil since cache is not initialized") +} + +func getTestPodIDAndStatus(numContainers int) (types.UID, *PodStatus) { + id := types.UID(strconv.FormatInt(time.Now().UnixNano(), 10)) + name := fmt.Sprintf("cache-foo-%s", string(id)) + namespace := "ns" + var status *PodStatus + if numContainers > 0 { + status = &PodStatus{ID: id, Name: name, Namespace: namespace} + } else { + status = &PodStatus{ID: id} + } + for i := 0; i < numContainers; i++ { + status.ContainerStatuses = append(status.ContainerStatuses, &ContainerStatus{Name: string(i)}) + } + return id, status +} + +func TestGetIfNewerThanWhenPodExists(t *testing.T) { + cache := newTestCache() + timestamp := time.Now() + + cases := []struct { + cacheTime time.Time + modified time.Time + expected bool + }{ + { + // Both the global cache timestamp and the modified time are newer + // than the timestamp. + cacheTime: timestamp.Add(time.Second), + modified: timestamp, + expected: true, + }, + { + // Global cache timestamp is newer, but the pod entry modified + // time is older than the given timestamp. This means that the + // entry is up-to-date even though it hasn't changed for a while. + cacheTime: timestamp.Add(time.Second), + modified: timestamp.Add(-time.Second * 10), + expected: true, + }, + { + // Global cache timestamp is older, but the pod entry modified + // time is newer than the given timestamp. This means that the + // entry is up-to-date but the rest of the cache are still being + // updated. + cacheTime: timestamp.Add(-time.Second), + modified: timestamp.Add(time.Second * 3), + expected: true, + }, + { + // Both the global cache timestamp and the modified time are older + // than the given timestamp. + cacheTime: timestamp.Add(-time.Second), + modified: timestamp.Add(-time.Second), + expected: false, + }, + } + for i, c := range cases { + podID, status := getTestPodIDAndStatus(2) + cache.UpdateTime(c.cacheTime) + cache.Set(podID, status, nil, c.modified) + d := cache.getIfNewerThan(podID, timestamp) + assert.Equal(t, c.expected, d != nil, "test[%d]", i) + } +} + +func TestGetPodNewerThanWhenPodDoesNotExist(t *testing.T) { + cache := newTestCache() + cacheTime := time.Now() + cache.UpdateTime(cacheTime) + podID := types.UID("1234") + + cases := []struct { + timestamp time.Time + expected bool + }{ + { + timestamp: cacheTime.Add(-time.Second), + expected: true, + }, + { + timestamp: cacheTime.Add(time.Second), + expected: false, + }, + } + for i, c := range cases { + d := cache.getIfNewerThan(podID, c.timestamp) + assert.Equal(t, c.expected, d != nil, "test[%d]", i) + } +} + +func TestCacheSetAndGet(t *testing.T) { + cache := NewCache() + cases := []struct { + numContainers int + error error + }{ + {numContainers: 3, error: nil}, + {numContainers: 2, error: fmt.Errorf("unable to get status")}, + {numContainers: 0, error: nil}, + } + for i, c := range cases { + podID, status := getTestPodIDAndStatus(c.numContainers) + cache.Set(podID, status, c.error, time.Time{}) + // Read back the status and error stored in cache and make sure they + // match the original ones. + actualStatus, actualErr := cache.Get(podID) + assert.Equal(t, status, actualStatus, "test[%d]", i) + assert.Equal(t, c.error, actualErr, "test[%d]", i) + } +} + +func TestCacheGetPodDoesNotExist(t *testing.T) { + cache := NewCache() + podID, status := getTestPodIDAndStatus(0) + // If the pod does not exist in cache, cache should return an status + // object with id filled. + actualStatus, actualErr := cache.Get(podID) + assert.Equal(t, status, actualStatus) + assert.Equal(t, nil, actualErr) +} + +func TestDelete(t *testing.T) { + cache := &cache{pods: map[types.UID]*data{}} + // Write a new pod status into the cache. + podID, status := getTestPodIDAndStatus(3) + cache.Set(podID, status, nil, time.Time{}) + actualStatus, actualErr := cache.Get(podID) + assert.Equal(t, status, actualStatus) + assert.Equal(t, nil, actualErr) + // Delete the pod from cache, and verify that we get an empty status. + cache.Delete(podID) + expectedStatus := &PodStatus{ID: podID} + actualStatus, actualErr = cache.Get(podID) + assert.Equal(t, expectedStatus, actualStatus) + assert.Equal(t, nil, actualErr) +} + +func verifyNotification(t *testing.T, ch chan *data, expectNotification bool) { + if expectNotification { + assert.True(t, len(ch) > 0, "Did not receive notification") + } else { + assert.True(t, len(ch) < 1, "Should not have triggered the notification") + } + // Drain the channel. + for i := 0; i < len(ch); i++ { + <-ch + } +} + +func TestRegisterNotification(t *testing.T) { + cache := newTestCache() + cacheTime := time.Now() + cache.UpdateTime(cacheTime) + + podID, status := getTestPodIDAndStatus(1) + ch := cache.subscribe(podID, cacheTime.Add(time.Second)) + verifyNotification(t, ch, false) + cache.Set(podID, status, nil, cacheTime.Add(time.Second)) + // The Set operation should've triggered the notification. + verifyNotification(t, ch, true) + + podID, _ = getTestPodIDAndStatus(1) + + ch = cache.subscribe(podID, cacheTime.Add(time.Second)) + verifyNotification(t, ch, false) + cache.UpdateTime(cacheTime.Add(time.Second * 2)) + // The advance of cache timestamp should've triggered the notification. + verifyNotification(t, ch, true) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/container_gc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/container_gc.go new file mode 100644 index 000000000000..cd69c1ab45e4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/container_gc.go @@ -0,0 +1,68 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "fmt" + "time" +) + +// Specified a policy for garbage collecting containers. +type ContainerGCPolicy struct { + // Minimum age at which a container can be garbage collected, zero for no limit. + MinAge time.Duration + + // Max number of dead containers any single pod (UID, container name) pair is + // allowed to have, less than zero for no limit. + MaxPerPodContainer int + + // Max number of total dead containers, less than zero for no limit. + MaxContainers int +} + +// Manages garbage collection of dead containers. +// +// Implementation is thread-compatible. +type ContainerGC interface { + // Garbage collect containers. + GarbageCollect() error +} + +// TODO(vmarmol): Preferentially remove pod infra containers. +type realContainerGC struct { + // Container runtime + runtime Runtime + + // Policy for garbage collection. + policy ContainerGCPolicy +} + +// New ContainerGC instance with the specified policy. +func NewContainerGC(runtime Runtime, policy ContainerGCPolicy) (ContainerGC, error) { + if policy.MinAge < 0 { + return nil, fmt.Errorf("invalid minimum garbage collection age: %v", policy.MinAge) + } + + return &realContainerGC{ + runtime: runtime, + policy: policy, + }, nil +} + +func (cgc *realContainerGC) GarbageCollect() error { + return cgc.runtime.GarbageCollect(cgc.policy) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/container_reference_manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/container_reference_manager.go new file mode 100644 index 000000000000..1f44389c7f17 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/container_reference_manager.go @@ -0,0 +1,60 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "sync" + + "k8s.io/kubernetes/pkg/api" +) + +// RefManager manages the references for the containers. +// The references are used for reporting events such as creation, +// failure, etc. This manager is thread-safe, no locks are necessary +// for the caller. +type RefManager struct { + sync.RWMutex + containerIDToRef map[ContainerID]*api.ObjectReference +} + +// NewRefManager creates and returns a container reference manager +// with empty contents. +func NewRefManager() *RefManager { + return &RefManager{containerIDToRef: make(map[ContainerID]*api.ObjectReference)} +} + +// SetRef stores a reference to a pod's container, associating it with the given container ID. +func (c *RefManager) SetRef(id ContainerID, ref *api.ObjectReference) { + c.Lock() + defer c.Unlock() + c.containerIDToRef[id] = ref +} + +// ClearRef forgets the given container id and its associated container reference. +func (c *RefManager) ClearRef(id ContainerID) { + c.Lock() + defer c.Unlock() + delete(c.containerIDToRef, id) +} + +// GetRef returns the container reference of the given ID, or (nil, false) if none is stored. +func (c *RefManager) GetRef(id ContainerID) (ref *api.ObjectReference, ok bool) { + c.RLock() + defer c.RUnlock() + ref, ok = c.containerIDToRef[id] + return ref, ok +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/event.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/event.go new file mode 100644 index 000000000000..949dfcccc0e7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/event.go @@ -0,0 +1,69 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +const ( + // Container event reason list + CreatedContainer = "Created" + StartedContainer = "Started" + FailedToCreateContainer = "Failed" + FailedToStartContainer = "Failed" + KillingContainer = "Killing" + BackOffStartContainer = "BackOff" + + // Image event reason list + PullingImage = "Pulling" + PulledImage = "Pulled" + FailedToPullImage = "Failed" + FailedToInspectImage = "InspectFailed" + ErrImageNeverPullPolicy = "ErrImageNeverPull" + BackOffPullImage = "BackOff" + + // kubelet event reason list + NodeReady = "NodeReady" + NodeNotReady = "NodeNotReady" + NodeSchedulable = "NodeSchedulable" + NodeNotSchedulable = "NodeNotSchedulable" + StartingKubelet = "Starting" + KubeletSetupFailed = "KubeletSetupFailed" + FailedMountVolume = "FailedMount" + HostPortConflict = "HostPortConflict" + NodeSelectorMismatching = "NodeSelectorMismatching" + InsufficientFreeCPU = "InsufficientFreeCPU" + InsufficientFreeMemory = "InsufficientFreeMemory" + OutOfDisk = "OutOfDisk" + HostNetworkNotSupported = "HostNetworkNotSupported" + UndefinedShaper = "NilShaper" + NodeRebooted = "Rebooted" + + // Image manager event reason list + InvalidDiskCapacity = "InvalidDiskCapacity" + FreeDiskSpaceFailed = "FreeDiskSpaceFailed" + + // Probe event reason list + ContainerUnhealthy = "Unhealthy" + + // Pod worker event reason list + FailedSync = "FailedSync" + + // Config event reason list + FailedValidation = "FailedValidation" + + // Lifecycle hooks + FailedPostStartHook = "FailedPostStartHook" + FailedPreStopHook = "FailedPreStopHook" +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/helpers.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/helpers.go new file mode 100644 index 000000000000..ed0311df2a8b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/helpers.go @@ -0,0 +1,187 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "hash/adler32" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" + hashutil "k8s.io/kubernetes/pkg/util/hash" + "k8s.io/kubernetes/third_party/golang/expansion" + + "github.com/golang/glog" +) + +// HandlerRunner runs a lifecycle handler for a container. +type HandlerRunner interface { + Run(containerID ContainerID, pod *api.Pod, container *api.Container, handler *api.Handler) (string, error) +} + +// RuntimeHelper wraps kubelet to make container runtime +// able to get necessary informations like the RunContainerOptions, DNS settings. +type RuntimeHelper interface { + GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*RunContainerOptions, error) + GetClusterDNS(pod *api.Pod) (dnsServers []string, dnsSearches []string, err error) + GetPodDir(podUID types.UID) string + GeneratePodHostNameAndDomain(pod *api.Pod) (hostname string, hostDomain string, err error) +} + +// ShouldContainerBeRestarted checks whether a container needs to be restarted. +// TODO(yifan): Think about how to refactor this. +func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatus *PodStatus) bool { + // Get latest container status. + status := podStatus.FindContainerStatusByName(container.Name) + // If the container was never started before, we should start it. + // NOTE(random-liu): If all historical containers were GC'd, we'll also return true here. + if status == nil { + return true + } + // Check whether container is running + if status.State == ContainerStateRunning { + return false + } + // Always restart container in unknown state now + if status.State == ContainerStateUnknown { + return true + } + // Check RestartPolicy for dead container + if pod.Spec.RestartPolicy == api.RestartPolicyNever { + glog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) + return false + } + if pod.Spec.RestartPolicy == api.RestartPolicyOnFailure { + // Check the exit code. + if status.ExitCode == 0 { + glog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) + return false + } + } + return true +} + +// TODO(random-liu): Convert PodStatus to running Pod, should be deprecated soon +func ConvertPodStatusToRunningPod(podStatus *PodStatus) Pod { + runningPod := Pod{ + ID: podStatus.ID, + Name: podStatus.Name, + Namespace: podStatus.Namespace, + } + for _, containerStatus := range podStatus.ContainerStatuses { + if containerStatus.State != ContainerStateRunning { + continue + } + container := &Container{ + ID: containerStatus.ID, + Name: containerStatus.Name, + Image: containerStatus.Image, + Hash: containerStatus.Hash, + State: containerStatus.State, + } + runningPod.Containers = append(runningPod.Containers, container) + } + return runningPod +} + +// HashContainer returns the hash of the container. It is used to compare +// the running container with its desired spec. +func HashContainer(container *api.Container) uint64 { + hash := adler32.New() + hashutil.DeepHashObject(hash, *container) + return uint64(hash.Sum32()) +} + +// EnvVarsToMap constructs a map of environment name to value from a slice +// of env vars. +func EnvVarsToMap(envs []EnvVar) map[string]string { + result := map[string]string{} + for _, env := range envs { + result[env.Name] = env.Value + } + + return result +} + +func ExpandContainerCommandAndArgs(container *api.Container, envs []EnvVar) (command []string, args []string) { + mapping := expansion.MappingFuncFor(EnvVarsToMap(envs)) + + if len(container.Command) != 0 { + for _, cmd := range container.Command { + command = append(command, expansion.Expand(cmd, mapping)) + } + } + + if len(container.Args) != 0 { + for _, arg := range container.Args { + args = append(args, expansion.Expand(arg, mapping)) + } + } + + return command, args +} + +// Create an event recorder to record object's event except implicitly required container's, like infra container. +func FilterEventRecorder(recorder record.EventRecorder) record.EventRecorder { + return &innerEventRecorder{ + recorder: recorder, + } +} + +type innerEventRecorder struct { + recorder record.EventRecorder +} + +func (irecorder *innerEventRecorder) shouldRecordEvent(object runtime.Object) (*api.ObjectReference, bool) { + if object == nil { + return nil, false + } + if ref, ok := object.(*api.ObjectReference); ok { + if !strings.HasPrefix(ref.FieldPath, ImplicitContainerPrefix) { + return ref, true + } + } + return nil, false +} + +func (irecorder *innerEventRecorder) Event(object runtime.Object, eventtype, reason, message string) { + if ref, ok := irecorder.shouldRecordEvent(object); ok { + irecorder.recorder.Event(ref, eventtype, reason, message) + } +} + +func (irecorder *innerEventRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + if ref, ok := irecorder.shouldRecordEvent(object); ok { + irecorder.recorder.Eventf(ref, eventtype, reason, messageFmt, args...) + } + +} + +func (irecorder *innerEventRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) { + if ref, ok := irecorder.shouldRecordEvent(object); ok { + irecorder.recorder.PastEventf(ref, timestamp, eventtype, reason, messageFmt, args...) + } +} + +// Pod must not be nil. +func IsHostNetworkPod(pod *api.Pod) bool { + return pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostNetwork +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/helpers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/helpers_test.go new file mode 100644 index 000000000000..435790c954c6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/helpers_test.go @@ -0,0 +1,213 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +func TestEnvVarsToMap(t *testing.T) { + vars := []EnvVar{ + { + Name: "foo", + Value: "bar", + }, + { + Name: "zoo", + Value: "baz", + }, + } + + varMap := EnvVarsToMap(vars) + + if e, a := len(vars), len(varMap); e != a { + t.Errorf("Unexpected map length; expected: %d, got %d", e, a) + } + + if a := varMap["foo"]; a != "bar" { + t.Errorf("Unexpected value of key 'foo': %v", a) + } + + if a := varMap["zoo"]; a != "baz" { + t.Errorf("Unexpected value of key 'zoo': %v", a) + } +} + +func TestExpandCommandAndArgs(t *testing.T) { + cases := []struct { + name string + container *api.Container + envs []EnvVar + expectedCommand []string + expectedArgs []string + }{ + { + name: "none", + container: &api.Container{}, + }, + { + name: "command expanded", + container: &api.Container{ + Command: []string{"foo", "$(VAR_TEST)", "$(VAR_TEST2)"}, + }, + envs: []EnvVar{ + { + Name: "VAR_TEST", + Value: "zoo", + }, + { + Name: "VAR_TEST2", + Value: "boo", + }, + }, + expectedCommand: []string{"foo", "zoo", "boo"}, + }, + { + name: "args expanded", + container: &api.Container{ + Args: []string{"zap", "$(VAR_TEST)", "$(VAR_TEST2)"}, + }, + envs: []EnvVar{ + { + Name: "VAR_TEST", + Value: "hap", + }, + { + Name: "VAR_TEST2", + Value: "trap", + }, + }, + expectedArgs: []string{"zap", "hap", "trap"}, + }, + { + name: "both expanded", + container: &api.Container{ + Command: []string{"$(VAR_TEST2)--$(VAR_TEST)", "foo", "$(VAR_TEST3)"}, + Args: []string{"foo", "$(VAR_TEST)", "$(VAR_TEST2)"}, + }, + envs: []EnvVar{ + { + Name: "VAR_TEST", + Value: "zoo", + }, + { + Name: "VAR_TEST2", + Value: "boo", + }, + { + Name: "VAR_TEST3", + Value: "roo", + }, + }, + expectedCommand: []string{"boo--zoo", "foo", "roo"}, + expectedArgs: []string{"foo", "zoo", "boo"}, + }, + } + + for _, tc := range cases { + actualCommand, actualArgs := ExpandContainerCommandAndArgs(tc.container, tc.envs) + + if e, a := tc.expectedCommand, actualCommand; !reflect.DeepEqual(e, a) { + t.Errorf("%v: unexpected command; expected %v, got %v", tc.name, e, a) + } + + if e, a := tc.expectedArgs, actualArgs; !reflect.DeepEqual(e, a) { + t.Errorf("%v: unexpected args; expected %v, got %v", tc.name, e, a) + } + + } +} + +func TestShouldContainerBeRestarted(t *testing.T) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "no-history"}, + {Name: "alive"}, + {Name: "succeed"}, + {Name: "failed"}, + {Name: "unknown"}, + }, + }, + } + podStatus := &PodStatus{ + ID: pod.UID, + Name: pod.Name, + Namespace: pod.Namespace, + ContainerStatuses: []*ContainerStatus{ + { + Name: "alive", + State: ContainerStateRunning, + }, + { + Name: "succeed", + State: ContainerStateExited, + ExitCode: 0, + }, + { + Name: "failed", + State: ContainerStateExited, + ExitCode: 1, + }, + { + Name: "alive", + State: ContainerStateExited, + ExitCode: 2, + }, + { + Name: "unknown", + State: ContainerStateUnknown, + }, + { + Name: "failed", + State: ContainerStateExited, + ExitCode: 3, + }, + }, + } + policies := []api.RestartPolicy{ + api.RestartPolicyNever, + api.RestartPolicyOnFailure, + api.RestartPolicyAlways, + } + expected := map[string][]bool{ + "no-history": {true, true, true}, + "alive": {false, false, false}, + "succeed": {false, false, true}, + "failed": {false, true, true}, + "unknown": {true, true, true}, + } + for _, c := range pod.Spec.Containers { + for i, policy := range policies { + pod.Spec.RestartPolicy = policy + e := expected[c.Name][i] + r := ShouldContainerBeRestarted(&c, pod, podStatus) + if r != e { + t.Errorf("Restart for container %q with restart policy %q expected %t, got %t", + c.Name, policy, e, r) + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/image_puller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/image_puller.go new file mode 100644 index 000000000000..60f6ee4a2408 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/image_puller.go @@ -0,0 +1,123 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "fmt" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +// imagePuller pulls the image using Runtime.PullImage(). +// It will check the presence of the image, and report the 'image pulling', +// 'image pulled' events correspondingly. +type imagePuller struct { + recorder record.EventRecorder + runtime Runtime + backOff *flowcontrol.Backoff +} + +// enforce compatibility. +var _ ImagePuller = &imagePuller{} + +// NewImagePuller takes an event recorder and container runtime to create a +// image puller that wraps the container runtime's PullImage interface. +func NewImagePuller(recorder record.EventRecorder, runtime Runtime, imageBackOff *flowcontrol.Backoff) ImagePuller { + return &imagePuller{ + recorder: recorder, + runtime: runtime, + backOff: imageBackOff, + } +} + +// shouldPullImage returns whether we should pull an image according to +// the presence and pull policy of the image. +func shouldPullImage(container *api.Container, imagePresent bool) bool { + if container.ImagePullPolicy == api.PullNever { + return false + } + + if container.ImagePullPolicy == api.PullAlways || + (container.ImagePullPolicy == api.PullIfNotPresent && (!imagePresent)) { + return true + } + + return false +} + +// records an event using ref, event msg. log to glog using prefix, msg, logFn +func (puller *imagePuller) logIt(ref *api.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) { + if ref != nil { + puller.recorder.Event(ref, eventtype, event, msg) + } else { + logFn(fmt.Sprint(prefix, " ", msg)) + } +} + +// PullImage pulls the image for the specified pod and container. +func (puller *imagePuller) PullImage(pod *api.Pod, container *api.Container, pullSecrets []api.Secret) (error, string) { + logPrefix := fmt.Sprintf("%s/%s", pod.Name, container.Image) + ref, err := GenerateContainerRef(pod, container) + if err != nil { + glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err) + } + + spec := ImageSpec{container.Image} + present, err := puller.runtime.IsImagePresent(spec) + if err != nil { + msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err) + puller.logIt(ref, api.EventTypeWarning, FailedToInspectImage, logPrefix, msg, glog.Warning) + return ErrImageInspect, msg + } + + if !shouldPullImage(container, present) { + if present { + msg := fmt.Sprintf("Container image %q already present on machine", container.Image) + puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, msg, glog.Info) + return nil, "" + } else { + msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image) + puller.logIt(ref, api.EventTypeWarning, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) + return ErrImageNeverPull, msg + } + } + + backOffKey := fmt.Sprintf("%s_%s", pod.UID, container.Image) + if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) { + msg := fmt.Sprintf("Back-off pulling image %q", container.Image) + puller.logIt(ref, api.EventTypeNormal, BackOffPullImage, logPrefix, msg, glog.Info) + return ErrImagePullBackOff, msg + } + puller.logIt(ref, api.EventTypeNormal, PullingImage, logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info) + if err := puller.runtime.PullImage(spec, pullSecrets); err != nil { + puller.logIt(ref, api.EventTypeWarning, FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning) + puller.backOff.Next(backOffKey, puller.backOff.Clock.Now()) + if err == RegistryUnavailable { + msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image) + return err, msg + } else { + return ErrImagePull, err.Error() + } + } + puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info) + puller.backOff.DeleteEntry(backOffKey) + puller.backOff.GC() + return nil, "" +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/image_puller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/image_puller_test.go new file mode 100644 index 000000000000..baabc0012c44 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/image_puller_test.go @@ -0,0 +1,123 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container_test + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + . "k8s.io/kubernetes/pkg/kubelet/container" + ctest "k8s.io/kubernetes/pkg/kubelet/container/testing" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +func TestPuller(t *testing.T) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "test_pod", + Namespace: "test-ns", + UID: "bar", + ResourceVersion: "42", + SelfLink: "/api/v1/pods/foo", + }} + + cases := []struct { + containerImage string + policy api.PullPolicy + calledFunctions []string + inspectErr error + pullerErr error + expectedErr []error + }{ + { // pull missing image + containerImage: "missing_image", + policy: api.PullIfNotPresent, + calledFunctions: []string{"IsImagePresent", "PullImage"}, + inspectErr: nil, + pullerErr: nil, + expectedErr: []error{nil}}, + + { // image present, dont pull + containerImage: "present_image", + policy: api.PullIfNotPresent, + calledFunctions: []string{"IsImagePresent"}, + inspectErr: nil, + pullerErr: nil, + expectedErr: []error{nil, nil, nil}}, + // image present, pull it + {containerImage: "present_image", + policy: api.PullAlways, + calledFunctions: []string{"IsImagePresent", "PullImage"}, + inspectErr: nil, + pullerErr: nil, + expectedErr: []error{nil, nil, nil}}, + // missing image, error PullNever + {containerImage: "missing_image", + policy: api.PullNever, + calledFunctions: []string{"IsImagePresent"}, + inspectErr: nil, + pullerErr: nil, + expectedErr: []error{ErrImageNeverPull, ErrImageNeverPull, ErrImageNeverPull}}, + // missing image, unable to inspect + {containerImage: "missing_image", + policy: api.PullIfNotPresent, + calledFunctions: []string{"IsImagePresent"}, + inspectErr: errors.New("unknown inspectError"), + pullerErr: nil, + expectedErr: []error{ErrImageInspect, ErrImageInspect, ErrImageInspect}}, + // missing image, unable to fetch + {containerImage: "typo_image", + policy: api.PullIfNotPresent, + calledFunctions: []string{"IsImagePresent", "PullImage"}, + inspectErr: nil, + pullerErr: errors.New("404"), + expectedErr: []error{ErrImagePull, ErrImagePull, ErrImagePullBackOff, ErrImagePull, ErrImagePullBackOff, ErrImagePullBackOff}}, + } + + for i, c := range cases { + container := &api.Container{ + Name: "container_name", + Image: c.containerImage, + ImagePullPolicy: c.policy, + } + + backOff := flowcontrol.NewBackOff(time.Second, time.Minute) + fakeClock := util.NewFakeClock(time.Now()) + backOff.Clock = fakeClock + + fakeRuntime := &ctest.FakeRuntime{} + fakeRecorder := &record.FakeRecorder{} + puller := NewImagePuller(fakeRecorder, fakeRuntime, backOff) + + fakeRuntime.ImageList = []Image{{"present_image", nil, nil, 1}} + fakeRuntime.Err = c.pullerErr + fakeRuntime.InspectErr = c.inspectErr + + for tick, expected := range c.expectedErr { + fakeClock.Step(time.Second) + err, _ := puller.PullImage(pod, container, nil) + fakeRuntime.AssertCalls(c.calledFunctions) + assert.Equal(t, expected, err, "in test %d tick=%d", i, tick) + } + + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/os.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/os.go new file mode 100644 index 000000000000..21c0264e2af4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/os.go @@ -0,0 +1,86 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "io/ioutil" + "os" + "time" +) + +// OSInterface collects system level operations that need to be mocked out +// during tests. +type OSInterface interface { + MkdirAll(path string, perm os.FileMode) error + Symlink(oldname string, newname string) error + Stat(path string) (os.FileInfo, error) + Remove(path string) error + Create(path string) (*os.File, error) + Hostname() (name string, err error) + Chtimes(path string, atime time.Time, mtime time.Time) error + Pipe() (r *os.File, w *os.File, err error) + ReadDir(dirname string) ([]os.FileInfo, error) +} + +// RealOS is used to dispatch the real system level operaitons. +type RealOS struct{} + +// MkDir will will call os.Mkdir to create a directory. +func (RealOS) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +// Symlink will call os.Symlink to create a symbolic link. +func (RealOS) Symlink(oldname string, newname string) error { + return os.Symlink(oldname, newname) +} + +// Stat will call os.Stat to get the FileInfo for a given path +func (RealOS) Stat(path string) (os.FileInfo, error) { + return os.Stat(path) +} + +// Remove will call os.Remove to remove the path. +func (RealOS) Remove(path string) error { + return os.Remove(path) +} + +// Create will call os.Create to create and return a file +// at path. +func (RealOS) Create(path string) (*os.File, error) { + return os.Create(path) +} + +// Hostname will call os.Hostname to return the hostname. +func (RealOS) Hostname() (name string, err error) { + return os.Hostname() +} + +// Chtimes will call os.Chtimes to change the atime and mtime of the path +func (RealOS) Chtimes(path string, atime time.Time, mtime time.Time) error { + return os.Chtimes(path, atime, mtime) +} + +// Pipe will call os.Pipe to return a connected pair of pipe. +func (RealOS) Pipe() (r *os.File, w *os.File, err error) { + return os.Pipe() +} + +// ReadDir will call ioutil.ReadDir to return the files under the directory. +func (RealOS) ReadDir(dirname string) ([]os.FileInfo, error) { + return ioutil.ReadDir(dirname) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/term_unsupported.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/pty_linux.go similarity index 78% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/term_unsupported.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/pty_linux.go index 4c0b788166d4..cbc36f6d3ff7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/term_unsupported.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/pty_linux.go @@ -1,4 +1,4 @@ -// +build windows +// +build linux /* Copyright 2015 The Kubernetes Authors All rights reserved. @@ -16,11 +16,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package editor +package container import ( "os" + "os/exec" + + "github.com/kr/pty" ) -// childSignals are the allowed signals that can be sent to children in Windows to terminate -var childSignals = []os.Signal{os.Interrupt} +func StartPty(c *exec.Cmd) (*os.File, error) { + return pty.Start(c) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/pty_unsupported.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/pty_unsupported.go new file mode 100644 index 000000000000..b48a999b0a96 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/pty_unsupported.go @@ -0,0 +1,28 @@ +// +build !linux + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "os" + "os/exec" +) + +func StartPty(c *exec.Cmd) (pty *os.File, err error) { + return nil, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/ref.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/ref.go new file mode 100644 index 000000000000..ebfff2ebf703 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/ref.go @@ -0,0 +1,71 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" +) + +var ImplicitContainerPrefix string = "implicitly required container " + +// GenerateContainerRef returns an *api.ObjectReference which references the given container +// within the given pod. Returns an error if the reference can't be constructed or the +// container doesn't actually belong to the pod. +// +// This function will return an error if the provided Pod does not have a selfLink, +// but we expect selfLink to be populated at all call sites for the function. +func GenerateContainerRef(pod *api.Pod, container *api.Container) (*api.ObjectReference, error) { + fieldPath, err := fieldPath(pod, container) + if err != nil { + // TODO: figure out intelligent way to refer to containers that we implicitly + // start (like the pod infra container). This is not a good way, ugh. + fieldPath = ImplicitContainerPrefix + container.Name + } + ref, err := api.GetPartialReference(pod, fieldPath) + if err != nil { + return nil, err + } + return ref, nil +} + +// fieldPath returns a fieldPath locating container within pod. +// Returns an error if the container isn't part of the pod. +func fieldPath(pod *api.Pod, container *api.Container) (string, error) { + for i := range pod.Spec.Containers { + here := &pod.Spec.Containers[i] + if here.Name == container.Name { + if here.Name == "" { + return fmt.Sprintf("spec.containers[%d]", i), nil + } else { + return fmt.Sprintf("spec.containers{%s}", here.Name), nil + } + } + } + for i := range pod.Spec.InitContainers { + here := &pod.Spec.InitContainers[i] + if here.Name == container.Name { + if here.Name == "" { + return fmt.Sprintf("spec.initContainers[%d]", i), nil + } else { + return fmt.Sprintf("spec.initContainers{%s}", here.Name), nil + } + } + } + return "", fmt.Errorf("container %#v not found in pod %#v", container, pod) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/ref_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/ref_test.go new file mode 100644 index 000000000000..18cc6672e920 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/ref_test.go @@ -0,0 +1,212 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +func TestFieldPath(t *testing.T) { + pod := &api.Pod{Spec: api.PodSpec{Containers: []api.Container{ + {Name: "foo"}, + {Name: "bar"}, + {Name: ""}, + {Name: "baz"}, + }}} + table := map[string]struct { + pod *api.Pod + container *api.Container + path string + success bool + }{ + "basic": {pod, &api.Container{Name: "foo"}, "spec.containers{foo}", true}, + "basic2": {pod, &api.Container{Name: "baz"}, "spec.containers{baz}", true}, + "emptyName": {pod, &api.Container{Name: ""}, "spec.containers[2]", true}, + "basicSamePointer": {pod, &pod.Spec.Containers[0], "spec.containers{foo}", true}, + "missing": {pod, &api.Container{Name: "qux"}, "", false}, + } + + for name, item := range table { + res, err := fieldPath(item.pod, item.container) + if item.success == false { + if err == nil { + t.Errorf("%v: unexpected non-error", name) + } + continue + } + if err != nil { + t.Errorf("%v: unexpected error: %v", name, err) + continue + } + if e, a := item.path, res; e != a { + t.Errorf("%v: wanted %v, got %v", name, e, a) + } + } +} + +func TestGenerateContainerRef(t *testing.T) { + var ( + okPod = api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + APIVersion: testapi.Default.GroupVersion().String(), + }, + ObjectMeta: api.ObjectMeta{ + Name: "ok", + Namespace: "test-ns", + UID: "bar", + ResourceVersion: "42", + SelfLink: "/api/" + testapi.Default.GroupVersion().String() + "/pods/foo", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "by-name", + }, + {}, + }, + }, + } + noSelfLinkPod = okPod + defaultedSelfLinkPod = okPod + ) + noSelfLinkPod.Kind = "" + noSelfLinkPod.APIVersion = "" + noSelfLinkPod.ObjectMeta.SelfLink = "" + defaultedSelfLinkPod.ObjectMeta.SelfLink = "/api/" + testapi.Default.GroupVersion().String() + "/pods/ok" + + cases := []struct { + name string + pod *api.Pod + container *api.Container + expected *api.ObjectReference + success bool + }{ + { + name: "by-name", + pod: &okPod, + container: &api.Container{ + Name: "by-name", + }, + expected: &api.ObjectReference{ + Kind: "Pod", + APIVersion: testapi.Default.GroupVersion().String(), + Name: "ok", + Namespace: "test-ns", + UID: "bar", + ResourceVersion: "42", + FieldPath: ".spec.containers{by-name}", + }, + success: true, + }, + { + name: "no-name", + pod: &okPod, + container: &api.Container{}, + expected: &api.ObjectReference{ + Kind: "Pod", + APIVersion: testapi.Default.GroupVersion().String(), + Name: "ok", + Namespace: "test-ns", + UID: "bar", + ResourceVersion: "42", + FieldPath: ".spec.containers[1]", + }, + success: true, + }, + { + name: "no-selflink", + pod: &noSelfLinkPod, + container: &api.Container{}, + expected: nil, + success: false, + }, + { + name: "defaulted-selflink", + pod: &defaultedSelfLinkPod, + container: &api.Container{ + Name: "by-name", + }, + expected: &api.ObjectReference{ + Kind: "Pod", + APIVersion: testapi.Default.GroupVersion().String(), + Name: "ok", + Namespace: "test-ns", + UID: "bar", + ResourceVersion: "42", + FieldPath: ".spec.containers{by-name}", + }, + success: true, + }, + { + name: "implicitly-required", + pod: &okPod, + container: &api.Container{ + Name: "net", + }, + expected: &api.ObjectReference{ + Kind: "Pod", + APIVersion: testapi.Default.GroupVersion().String(), + Name: "ok", + Namespace: "test-ns", + UID: "bar", + ResourceVersion: "42", + FieldPath: "implicitly required container net", + }, + success: true, + }, + } + + for _, tc := range cases { + actual, err := GenerateContainerRef(tc.pod, tc.container) + if err != nil { + if tc.success { + t.Errorf("%v: unexpected error: %v", tc.name, err) + } + + continue + } + + if !tc.success { + t.Errorf("%v: unexpected success", tc.name) + continue + } + + if e, a := tc.expected.Kind, actual.Kind; e != a { + t.Errorf("%v: kind: expected %v, got %v", tc.name, e, a) + } + if e, a := tc.expected.APIVersion, actual.APIVersion; e != a { + t.Errorf("%v: apiVersion: expected %v, got %v", tc.name, e, a) + } + if e, a := tc.expected.Name, actual.Name; e != a { + t.Errorf("%v: name: expected %v, got %v", tc.name, e, a) + } + if e, a := tc.expected.Namespace, actual.Namespace; e != a { + t.Errorf("%v: namespace: expected %v, got %v", tc.name, e, a) + } + if e, a := tc.expected.UID, actual.UID; e != a { + t.Errorf("%v: uid: expected %v, got %v", tc.name, e, a) + } + if e, a := tc.expected.ResourceVersion, actual.ResourceVersion; e != a { + t.Errorf("%v: kind: expected %v, got %v", tc.name, e, a) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/runtime.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/runtime.go new file mode 100644 index 000000000000..e7510359c394 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/runtime.go @@ -0,0 +1,505 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/volume" +) + +type Version interface { + // Compare compares two versions of the runtime. On success it returns -1 + // if the version is less than the other, 1 if it is greater than the other, + // or 0 if they are equal. + Compare(other string) (int, error) + // String returns a string that represents the version. + String() string +} + +// ImageSpec is an internal representation of an image. Currently, it wraps the +// value of a Container's Image field, but in the future it will include more detailed +// information about the different image types. +type ImageSpec struct { + Image string +} + +// ImageStats contains statistics about all the images currently available. +type ImageStats struct { + // Total amount of storage consumed by existing images. + TotalStorageBytes uint64 +} + +// Runtime interface defines the interfaces that should be implemented +// by a container runtime. +// Thread safety is required from implementations of this interface. +type Runtime interface { + // Type returns the type of the container runtime. + Type() string + + // Version returns the version information of the container runtime. + Version() (Version, error) + + // APIVersion returns the cached API version information of the container + // runtime. Implementation is expected to update this cache periodically. + // This may be different from the runtime engine's version. + // TODO(random-liu): We should fold this into Version() + APIVersion() (Version, error) + // Status returns error if the runtime is unhealthy; nil otherwise. + Status() error + // GetPods returns a list of containers grouped by pods. The boolean parameter + // specifies whether the runtime returns all containers including those already + // exited and dead containers (used for garbage collection). + GetPods(all bool) ([]*Pod, error) + // GarbageCollect removes dead containers using the specified container gc policy + GarbageCollect(gcPolicy ContainerGCPolicy) error + // Syncs the running pod into the desired pod. + SyncPod(pod *api.Pod, apiPodStatus api.PodStatus, podStatus *PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) PodSyncResult + // KillPod kills all the containers of a pod. Pod may be nil, running pod must not be. + // TODO(random-liu): Return PodSyncResult in KillPod. + // gracePeriodOverride if specified allows the caller to override the pod default grace period. + // only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data. + // it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios. + KillPod(pod *api.Pod, runningPod Pod, gracePeriodOverride *int64) error + // GetPodStatus retrieves the status of the pod, including the + // information of all containers in the pod that are visble in Runtime. + GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) + // PullImage pulls an image from the network to local storage using the supplied + // secrets if necessary. + PullImage(image ImageSpec, pullSecrets []api.Secret) error + // IsImagePresent checks whether the container image is already in the local storage. + IsImagePresent(image ImageSpec) (bool, error) + // Gets all images currently on the machine. + ListImages() ([]Image, error) + // Removes the specified image. + RemoveImage(image ImageSpec) error + // Returns Image statistics. + ImageStats() (*ImageStats, error) + // Returns the filesystem path of the pod's network namespace; if the + // runtime does not handle namespace creation itself, or cannot return + // the network namespace path, it should return an error. + // TODO: Change ContainerID to a Pod ID since the namespace is shared + // by all containers in the pod. + GetNetNS(containerID ContainerID) (string, error) + // TODO(vmarmol): Unify pod and containerID args. + // GetContainerLogs returns logs of a specific container. By + // default, it returns a snapshot of the container log. Set 'follow' to true to + // stream the log. Set 'follow' to false and specify the number of lines (e.g. + // "100" or "all") to tail the log. + GetContainerLogs(pod *api.Pod, containerID ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) + // ContainerCommandRunner encapsulates the command runner interfaces for testability. + ContainerCommandRunner + // ContainerAttach encapsulates the attaching to containers for testability + ContainerAttacher +} + +type ContainerAttacher interface { + AttachContainer(id ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) (err error) +} + +// CommandRunner encapsulates the command runner interfaces for testability. +type ContainerCommandRunner interface { + // Runs the command in the container of the specified pod using nsenter. + // Attaches the processes stdin, stdout, and stderr. Optionally uses a + // tty. + ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error + // Forward the specified port from the specified pod to the stream. + PortForward(pod *Pod, port uint16, stream io.ReadWriteCloser) error +} + +// ImagePuller wraps Runtime.PullImage() to pull a container image. +// It will check the presence of the image, and report the 'image pulling', +// 'image pulled' events correspondingly. +type ImagePuller interface { + PullImage(pod *api.Pod, container *api.Container, pullSecrets []api.Secret) (error, string) +} + +// Pod is a group of containers. +type Pod struct { + // The ID of the pod, which can be used to retrieve a particular pod + // from the pod list returned by GetPods(). + ID types.UID + // The name and namespace of the pod, which is readable by human. + Name string + Namespace string + // List of containers that belongs to this pod. It may contain only + // running containers, or mixed with dead ones (when GetPods(true)). + Containers []*Container +} + +// PodPair contains both runtime#Pod and api#Pod +type PodPair struct { + // APIPod is the api.Pod + APIPod *api.Pod + // RunningPod is the pod defined defined in pkg/kubelet/container/runtime#Pod + RunningPod *Pod +} + +// ContainerID is a type that identifies a container. +type ContainerID struct { + // The type of the container runtime. e.g. 'docker', 'rkt'. + Type string + // The identification of the container, this is comsumable by + // the underlying container runtime. (Note that the container + // runtime interface still takes the whole struct as input). + ID string +} + +func BuildContainerID(typ, ID string) ContainerID { + return ContainerID{Type: typ, ID: ID} +} + +// Convenience method for creating a ContainerID from an ID string. +func ParseContainerID(containerID string) ContainerID { + var id ContainerID + if err := id.ParseString(containerID); err != nil { + glog.Error(err) + } + return id +} + +func (c *ContainerID) ParseString(data string) error { + // Trim the quotes and split the type and ID. + parts := strings.Split(strings.Trim(data, "\""), "://") + if len(parts) != 2 { + return fmt.Errorf("invalid container ID: %q", data) + } + c.Type, c.ID = parts[0], parts[1] + return nil +} + +func (c *ContainerID) String() string { + return fmt.Sprintf("%s://%s", c.Type, c.ID) +} + +func (c *ContainerID) IsEmpty() bool { + return *c == ContainerID{} +} + +func (c *ContainerID) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", c.String())), nil +} + +func (c *ContainerID) UnmarshalJSON(data []byte) error { + return c.ParseString(string(data)) +} + +// DockerID is an ID of docker container. It is a type to make it clear when we're working with docker container Ids +type DockerID string + +func (id DockerID) ContainerID() ContainerID { + return ContainerID{ + Type: "docker", + ID: string(id), + } +} + +type ContainerState string + +const ( + ContainerStateRunning ContainerState = "running" + ContainerStateExited ContainerState = "exited" + // This unknown encompasses all the states that we currently don't care. + ContainerStateUnknown ContainerState = "unknown" +) + +// Container provides the runtime information for a container, such as ID, hash, +// state of the container. +type Container struct { + // The ID of the container, used by the container runtime to identify + // a container. + ID ContainerID + // The name of the container, which should be the same as specified by + // api.Container. + Name string + // The image name of the container, this also includes the tag of the image, + // the expected form is "NAME:TAG". + Image string + // Hash of the container, used for comparison. Optional for containers + // not managed by kubelet. + Hash uint64 + // State is the state of the container. + State ContainerState +} + +// PodStatus represents the status of the pod and its containers. +// api.PodStatus can be derived from examining PodStatus and api.Pod. +type PodStatus struct { + // ID of the pod. + ID types.UID + // Name of the pod. + Name string + // Namspace of the pod. + Namespace string + // IP of the pod. + IP string + // Status of containers in the pod. + ContainerStatuses []*ContainerStatus +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + // ID of the container. + ID ContainerID + // Name of the container. + Name string + // Status of the container. + State ContainerState + // Creation time of the container. + CreatedAt time.Time + // Start time of the container. + StartedAt time.Time + // Finish time of the container. + FinishedAt time.Time + // Exit code of the container. + ExitCode int + // Name of the image, this also includes the tag of the image, + // the expected form is "NAME:TAG". + Image string + // ID of the image. + ImageID string + // Hash of the container, used for comparison. + Hash uint64 + // Number of times that the container has been restarted. + RestartCount int + // A string explains why container is in such a status. + Reason string + // Message written by the container before exiting (stored in + // TerminationMessagePath). + Message string +} + +// FindContainerStatusByName returns container status in the pod status with the given name. +// When there are multiple containers' statuses with the same name, the first match will be returned. +func (podStatus *PodStatus) FindContainerStatusByName(containerName string) *ContainerStatus { + for _, containerStatus := range podStatus.ContainerStatuses { + if containerStatus.Name == containerName { + return containerStatus + } + } + return nil +} + +// Get container status of all the running containers in a pod +func (podStatus *PodStatus) GetRunningContainerStatuses() []*ContainerStatus { + runnningContainerStatues := []*ContainerStatus{} + for _, containerStatus := range podStatus.ContainerStatuses { + if containerStatus.State == ContainerStateRunning { + runnningContainerStatues = append(runnningContainerStatues, containerStatus) + } + } + return runnningContainerStatues +} + +// Basic information about a container image. +type Image struct { + // ID of the image. + ID string + // Other names by which this image is known. + RepoTags []string + // Digests by which this image is known. + RepoDigests []string + // The size of the image in bytes. + Size int64 +} + +type EnvVar struct { + Name string + Value string +} + +type Mount struct { + // Name of the volume mount. + Name string + // Path of the mount within the container. + ContainerPath string + // Path of the mount on the host. + HostPath string + // Whether the mount is read-only. + ReadOnly bool + // Whether the mount needs SELinux relabeling + SELinuxRelabel bool +} + +type PortMapping struct { + // Name of the port mapping + Name string + // Protocol of the port mapping. + Protocol api.Protocol + // The port number within the container. + ContainerPort int + // The port number on the host. + HostPort int + // The host IP. + HostIP string +} + +// RunContainerOptions specify the options which are necessary for running containers +type RunContainerOptions struct { + // The environment variables list. + Envs []EnvVar + // The mounts for the containers. + Mounts []Mount + // The host devices mapped into the containers. + Devices []string + // The port mappings for the containers. + PortMappings []PortMapping + // If the container has specified the TerminationMessagePath, then + // this directory will be used to create and mount the log file to + // container.TerminationMessagePath + PodContainerDir string + // The list of DNS servers for the container to use. + DNS []string + // The list of DNS search domains. + DNSSearch []string + // The parent cgroup to pass to Docker + CgroupParent string + // The type of container rootfs + ReadOnly bool + // hostname for pod containers + Hostname string +} + +// VolumeInfo contains information about the volume. +type VolumeInfo struct { + // Mounter is the volume's mounter + Mounter volume.Mounter + // SELinuxLabeled indicates whether this volume has had the + // pod's SELinux label applied to it or not + SELinuxLabeled bool +} + +type VolumeMap map[string]VolumeInfo + +type Pods []*Pod + +// FindPodByID finds and returns a pod in the pod list by UID. It will return an empty pod +// if not found. +func (p Pods) FindPodByID(podUID types.UID) Pod { + for i := range p { + if p[i].ID == podUID { + return *p[i] + } + } + return Pod{} +} + +// FindPodByFullName finds and returns a pod in the pod list by the full name. +// It will return an empty pod if not found. +func (p Pods) FindPodByFullName(podFullName string) Pod { + for i := range p { + if BuildPodFullName(p[i].Name, p[i].Namespace) == podFullName { + return *p[i] + } + } + return Pod{} +} + +// FindPod combines FindPodByID and FindPodByFullName, it finds and returns a pod in the +// pod list either by the full name or the pod ID. It will return an empty pod +// if not found. +func (p Pods) FindPod(podFullName string, podUID types.UID) Pod { + if len(podFullName) > 0 { + return p.FindPodByFullName(podFullName) + } + return p.FindPodByID(podUID) +} + +// FindContainerByName returns a container in the pod with the given name. +// When there are multiple containers with the same name, the first match will +// be returned. +func (p *Pod) FindContainerByName(containerName string) *Container { + for _, c := range p.Containers { + if c.Name == containerName { + return c + } + } + return nil +} + +func (p *Pod) FindContainerByID(id ContainerID) *Container { + for _, c := range p.Containers { + if c.ID == id { + return c + } + } + return nil +} + +// ToAPIPod converts Pod to api.Pod. Note that if a field in api.Pod has no +// corresponding field in Pod, the field would not be populated. +func (p *Pod) ToAPIPod() *api.Pod { + var pod api.Pod + pod.UID = p.ID + pod.Name = p.Name + pod.Namespace = p.Namespace + + for _, c := range p.Containers { + var container api.Container + container.Name = c.Name + container.Image = c.Image + pod.Spec.Containers = append(pod.Spec.Containers, container) + } + return &pod +} + +// IsEmpty returns true if the pod is empty. +func (p *Pod) IsEmpty() bool { + return reflect.DeepEqual(p, &Pod{}) +} + +// GetPodFullName returns a name that uniquely identifies a pod. +func GetPodFullName(pod *api.Pod) string { + // Use underscore as the delimiter because it is not allowed in pod name + // (DNS subdomain format), while allowed in the container name format. + return pod.Name + "_" + pod.Namespace +} + +// Build the pod full name from pod name and namespace. +func BuildPodFullName(name, namespace string) string { + return name + "_" + namespace +} + +// Parse the pod full name. +func ParsePodFullName(podFullName string) (string, string, error) { + parts := strings.Split(podFullName, "_") + if len(parts) != 2 { + return "", "", fmt.Errorf("failed to parse the pod full name %q", podFullName) + } + return parts[0], parts[1], nil +} + +// Option is a functional option type for Runtime, useful for +// completely optional settings. +type Option func(Runtime) + +// Sort the container statuses by creation time. +type SortContainerStatusesByCreationTime []*ContainerStatus + +func (s SortContainerStatusesByCreationTime) Len() int { return len(s) } +func (s SortContainerStatusesByCreationTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s SortContainerStatusesByCreationTime) Less(i, j int) bool { + return s[i].CreatedAt.Before(s[j].CreatedAt) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache.go new file mode 100644 index 000000000000..0926107da5a0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache.go @@ -0,0 +1,96 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "sync" + "time" +) + +var ( + // TODO(yifan): Maybe set the them as parameters for NewCache(). + defaultCachePeriod = time.Second * 2 +) + +type RuntimeCache interface { + GetPods() ([]*Pod, error) + ForceUpdateIfOlder(time.Time) error +} + +type podsGetter interface { + GetPods(bool) ([]*Pod, error) +} + +// NewRuntimeCache creates a container runtime cache. +func NewRuntimeCache(getter podsGetter) (RuntimeCache, error) { + return &runtimeCache{ + getter: getter, + }, nil +} + +// runtimeCache caches a list of pods. It records a timestamp (cacheTime) right +// before updating the pods, so the timestamp is at most as new as the pods +// (and can be slightly older). The timestamp always moves forward. Callers are +// expected not to modify the pods returned from GetPods. +type runtimeCache struct { + sync.Mutex + // The underlying container runtime used to update the cache. + getter podsGetter + // Last time when cache was updated. + cacheTime time.Time + // The content of the cache. + pods []*Pod +} + +// GetPods returns the cached pods if they are not outdated; otherwise, it +// retrieves the latest pods and return them. +func (r *runtimeCache) GetPods() ([]*Pod, error) { + r.Lock() + defer r.Unlock() + if time.Since(r.cacheTime) > defaultCachePeriod { + if err := r.updateCache(); err != nil { + return nil, err + } + } + return r.pods, nil +} + +func (r *runtimeCache) ForceUpdateIfOlder(minExpectedCacheTime time.Time) error { + r.Lock() + defer r.Unlock() + if r.cacheTime.Before(minExpectedCacheTime) { + return r.updateCache() + } + return nil +} + +func (r *runtimeCache) updateCache() error { + pods, timestamp, err := r.getPodsWithTimestamp() + if err != nil { + return err + } + r.pods, r.cacheTime = pods, timestamp + return nil +} + +// getPodsWithTimestamp records a timestamp and retrieves pods from the getter. +func (r *runtimeCache) getPodsWithTimestamp() ([]*Pod, time.Time, error) { + // Always record the timestamp before getting the pods to avoid stale pods. + timestamp := time.Now() + pods, err := r.getter.GetPods(false) + return pods, timestamp, err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache_fake.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache_fake.go new file mode 100644 index 000000000000..0b6e7d868bfd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache_fake.go @@ -0,0 +1,42 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +// TestRunTimeCache embeds runtimeCache with some additional methods for testing. +// It must be declared in the container package to have visibility to runtimeCache. +// It cannot be in a "..._test.go" file in order for runtime_cache_test.go to have cross-package visibility to it. +// (cross-package declarations in test files cannot be used from dot imports if this package is vendored) +type TestRuntimeCache struct { + runtimeCache +} + +func (r *TestRuntimeCache) UpdateCacheWithLock() error { + r.Lock() + defer r.Unlock() + return r.updateCache() +} + +func (r *TestRuntimeCache) GetCachedPods() []*Pod { + r.Lock() + defer r.Unlock() + return r.pods +} + +func NewTestRuntimeCache(getter podsGetter) *TestRuntimeCache { + c, _ := NewRuntimeCache(getter) + return &TestRuntimeCache{*c.(*runtimeCache)} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache_test.go new file mode 100644 index 000000000000..d66e07bfc58b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/runtime_cache_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container_test + +import ( + "reflect" + "testing" + "time" + + . "k8s.io/kubernetes/pkg/kubelet/container" + ctest "k8s.io/kubernetes/pkg/kubelet/container/testing" +) + +func TestGetPods(t *testing.T) { + runtime := &ctest.FakeRuntime{} + expected := []*Pod{{ID: "1111"}, {ID: "2222"}, {ID: "3333"}} + runtime.PodList = expected + cache := NewTestRuntimeCache(runtime) + actual, err := cache.GetPods() + if err != nil { + t.Errorf("unexpected error %v", err) + } + if !reflect.DeepEqual(expected, actual) { + t.Errorf("expected %#v, got %#v", expected, actual) + } +} + +func TestForceUpdateIfOlder(t *testing.T) { + runtime := &ctest.FakeRuntime{} + cache := NewTestRuntimeCache(runtime) + + // Cache old pods. + oldpods := []*Pod{{ID: "1111"}} + runtime.PodList = oldpods + cache.UpdateCacheWithLock() + + // Update the runtime to new pods. + newpods := []*Pod{{ID: "1111"}, {ID: "2222"}, {ID: "3333"}} + runtime.PodList = newpods + + // An older timestamp should not force an update. + cache.ForceUpdateIfOlder(time.Now().Add(-20 * time.Minute)) + actual := cache.GetCachedPods() + if !reflect.DeepEqual(oldpods, actual) { + t.Errorf("expected %#v, got %#v", oldpods, actual) + } + + // A newer timestamp should force an update. + cache.ForceUpdateIfOlder(time.Now().Add(20 * time.Second)) + actual = cache.GetCachedPods() + if !reflect.DeepEqual(newpods, actual) { + t.Errorf("expected %#v, got %#v", newpods, actual) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/serialized_image_puller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/serialized_image_puller.go new file mode 100644 index 000000000000..3b5c4689f549 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/serialized_image_puller.go @@ -0,0 +1,141 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "fmt" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/util/wait" +) + +type imagePullRequest struct { + spec ImageSpec + container *api.Container + pullSecrets []api.Secret + logPrefix string + ref *api.ObjectReference + returnChan chan<- error +} + +// serializedImagePuller pulls the image using Runtime.PullImage(). +// It will check the presence of the image, and report the 'image pulling', +// 'image pulled' events correspondingly. +type serializedImagePuller struct { + recorder record.EventRecorder + runtime Runtime + backOff *flowcontrol.Backoff + pullRequests chan *imagePullRequest +} + +// enforce compatibility. +var _ ImagePuller = &serializedImagePuller{} + +// NewSerializedImagePuller takes an event recorder and container runtime to create a +// image puller that wraps the container runtime's PullImage interface. +// Pulls one image at a time. +// Issue #10959 has the rationale behind serializing image pulls. +func NewSerializedImagePuller(recorder record.EventRecorder, runtime Runtime, imageBackOff *flowcontrol.Backoff) ImagePuller { + imagePuller := &serializedImagePuller{ + recorder: recorder, + runtime: runtime, + backOff: imageBackOff, + pullRequests: make(chan *imagePullRequest, 10), + } + go wait.Until(imagePuller.pullImages, time.Second, wait.NeverStop) + return imagePuller +} + +// records an event using ref, event msg. log to glog using prefix, msg, logFn +func (puller *serializedImagePuller) logIt(ref *api.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) { + if ref != nil { + puller.recorder.Event(ref, eventtype, event, msg) + } else { + logFn(fmt.Sprint(prefix, " ", msg)) + } +} + +// PullImage pulls the image for the specified pod and container. +func (puller *serializedImagePuller) PullImage(pod *api.Pod, container *api.Container, pullSecrets []api.Secret) (error, string) { + logPrefix := fmt.Sprintf("%s/%s", pod.Name, container.Image) + ref, err := GenerateContainerRef(pod, container) + if err != nil { + glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err) + } + + spec := ImageSpec{container.Image} + present, err := puller.runtime.IsImagePresent(spec) + if err != nil { + msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err) + puller.logIt(ref, api.EventTypeWarning, FailedToInspectImage, logPrefix, msg, glog.Warning) + return ErrImageInspect, msg + } + + if !shouldPullImage(container, present) { + if present { + msg := fmt.Sprintf("Container image %q already present on machine", container.Image) + puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, msg, glog.Info) + return nil, "" + } else { + msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image) + puller.logIt(ref, api.EventTypeWarning, ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) + return ErrImageNeverPull, msg + } + } + + backOffKey := fmt.Sprintf("%s_%s", pod.Name, container.Image) + if puller.backOff.IsInBackOffSinceUpdate(backOffKey, puller.backOff.Clock.Now()) { + msg := fmt.Sprintf("Back-off pulling image %q", container.Image) + puller.logIt(ref, api.EventTypeNormal, BackOffPullImage, logPrefix, msg, glog.Info) + return ErrImagePullBackOff, msg + } + + // enqueue image pull request and wait for response. + returnChan := make(chan error) + puller.pullRequests <- &imagePullRequest{ + spec: spec, + container: container, + pullSecrets: pullSecrets, + logPrefix: logPrefix, + ref: ref, + returnChan: returnChan, + } + if err = <-returnChan; err != nil { + puller.logIt(ref, api.EventTypeWarning, FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning) + puller.backOff.Next(backOffKey, puller.backOff.Clock.Now()) + if err == RegistryUnavailable { + msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image) + return err, msg + } else { + return ErrImagePull, err.Error() + } + } + puller.logIt(ref, api.EventTypeNormal, PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info) + puller.backOff.GC() + return nil, "" +} + +func (puller *serializedImagePuller) pullImages() { + for pullRequest := range puller.pullRequests { + puller.logIt(pullRequest.ref, api.EventTypeNormal, PullingImage, pullRequest.logPrefix, fmt.Sprintf("pulling image %q", pullRequest.container.Image), glog.Info) + pullRequest.returnChan <- puller.runtime.PullImage(pullRequest.spec, pullRequest.pullSecrets) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/serialized_image_puller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/serialized_image_puller_test.go new file mode 100644 index 000000000000..4fea2b274825 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/serialized_image_puller_test.go @@ -0,0 +1,123 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container_test + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + . "k8s.io/kubernetes/pkg/kubelet/container" + ctest "k8s.io/kubernetes/pkg/kubelet/container/testing" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +func TestSerializedPuller(t *testing.T) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "test_pod", + Namespace: "test-ns", + UID: "bar", + ResourceVersion: "42", + SelfLink: "/api/v1/pods/foo", + }} + + cases := []struct { + containerImage string + policy api.PullPolicy + calledFunctions []string + inspectErr error + pullerErr error + expectedErr []error + }{ + { // pull missing image + containerImage: "missing_image", + policy: api.PullIfNotPresent, + calledFunctions: []string{"IsImagePresent", "PullImage"}, + inspectErr: nil, + pullerErr: nil, + expectedErr: []error{nil}}, + + { // image present, dont pull + containerImage: "present_image", + policy: api.PullIfNotPresent, + calledFunctions: []string{"IsImagePresent"}, + inspectErr: nil, + pullerErr: nil, + expectedErr: []error{nil, nil, nil}}, + // image present, pull it + {containerImage: "present_image", + policy: api.PullAlways, + calledFunctions: []string{"IsImagePresent", "PullImage"}, + inspectErr: nil, + pullerErr: nil, + expectedErr: []error{nil, nil, nil}}, + // missing image, error PullNever + {containerImage: "missing_image", + policy: api.PullNever, + calledFunctions: []string{"IsImagePresent"}, + inspectErr: nil, + pullerErr: nil, + expectedErr: []error{ErrImageNeverPull, ErrImageNeverPull, ErrImageNeverPull}}, + // missing image, unable to inspect + {containerImage: "missing_image", + policy: api.PullIfNotPresent, + calledFunctions: []string{"IsImagePresent"}, + inspectErr: errors.New("unknown inspectError"), + pullerErr: nil, + expectedErr: []error{ErrImageInspect, ErrImageInspect, ErrImageInspect}}, + // missing image, unable to fetch + {containerImage: "typo_image", + policy: api.PullIfNotPresent, + calledFunctions: []string{"IsImagePresent", "PullImage"}, + inspectErr: nil, + pullerErr: errors.New("404"), + expectedErr: []error{ErrImagePull, ErrImagePull, ErrImagePullBackOff, ErrImagePull, ErrImagePullBackOff, ErrImagePullBackOff}}, + } + + for i, c := range cases { + container := &api.Container{ + Name: "container_name", + Image: c.containerImage, + ImagePullPolicy: c.policy, + } + + backOff := flowcontrol.NewBackOff(time.Second, time.Minute) + fakeClock := util.NewFakeClock(time.Now()) + backOff.Clock = fakeClock + + fakeRuntime := &ctest.FakeRuntime{} + fakeRecorder := &record.FakeRecorder{} + puller := NewSerializedImagePuller(fakeRecorder, fakeRuntime, backOff) + + fakeRuntime.ImageList = []Image{{"present_image", nil, nil, 0}} + fakeRuntime.Err = c.pullerErr + fakeRuntime.InspectErr = c.inspectErr + + for tick, expected := range c.expectedErr { + fakeClock.Step(time.Second) + err, _ := puller.PullImage(pod, container, nil) + fakeRuntime.AssertCalls(c.calledFunctions) + assert.Equal(t, expected, err, "in test %d tick=%d", i, tick) + } + + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/sync_result.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/sync_result.go new file mode 100644 index 000000000000..6a196f602b2a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/sync_result.go @@ -0,0 +1,137 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "errors" + "fmt" + + utilerrors "k8s.io/kubernetes/pkg/util/errors" +) + +// TODO(random-liu): We need to better organize runtime errors for introspection. + +// Container Terminated and Kubelet is backing off the restart +var ErrCrashLoopBackOff = errors.New("CrashLoopBackOff") + +var ( + // Container image pull failed, kubelet is backing off image pull + ErrImagePullBackOff = errors.New("ImagePullBackOff") + + // Unable to inspect image + ErrImageInspect = errors.New("ImageInspectError") + + // General image pull error + ErrImagePull = errors.New("ErrImagePull") + + // Required Image is absent on host and PullPolicy is NeverPullImage + ErrImageNeverPull = errors.New("ErrImageNeverPull") + + // ErrContainerNotFound returned when a container in the given pod with the + // given container name was not found, amongst those managed by the kubelet. + ErrContainerNotFound = errors.New("no matching container") + + // Get http error when pulling image from registry + RegistryUnavailable = errors.New("RegistryUnavailable") +) + +var ( + ErrRunContainer = errors.New("RunContainerError") + ErrKillContainer = errors.New("KillContainerError") + ErrVerifyNonRoot = errors.New("VerifyNonRootError") + ErrRunInitContainer = errors.New("RunInitContainerError") +) + +var ( + ErrSetupNetwork = errors.New("SetupNetworkError") + ErrTeardownNetwork = errors.New("TeardownNetworkError") +) + +// SyncAction indicates different kind of actions in SyncPod() and KillPod(). Now there are only actions +// about start/kill container and setup/teardown network. +type SyncAction string + +const ( + StartContainer SyncAction = "StartContainer" + KillContainer SyncAction = "KillContainer" + SetupNetwork SyncAction = "SetupNetwork" + TeardownNetwork SyncAction = "TeardownNetwork" + InitContainer SyncAction = "InitContainer" +) + +// SyncResult is the result of sync action. +type SyncResult struct { + // The associated action of the result + Action SyncAction + // The target of the action, now the target can only be: + // * Container: Target should be container name + // * Network: Target is useless now, we just set it as pod full name now + Target interface{} + // Brief error reason + Error error + // Human readable error reason + Message string +} + +// NewSyncResult generates new SyncResult with specific Action and Target +func NewSyncResult(action SyncAction, target interface{}) *SyncResult { + return &SyncResult{Action: action, Target: target} +} + +// Fail fails the SyncResult with specific error and message +func (r *SyncResult) Fail(err error, msg string) { + r.Error, r.Message = err, msg +} + +// PodSyncResult is the summary result of SyncPod() and KillPod() +type PodSyncResult struct { + // Result of different sync actions + SyncResults []*SyncResult + // Error encountered in SyncPod() and KillPod() that is not already included in SyncResults + SyncError error +} + +// AddSyncResult adds multiple SyncResult to current PodSyncResult +func (p *PodSyncResult) AddSyncResult(result ...*SyncResult) { + p.SyncResults = append(p.SyncResults, result...) +} + +// AddPodSyncResult merges a PodSyncResult to current one +func (p *PodSyncResult) AddPodSyncResult(result PodSyncResult) { + p.AddSyncResult(result.SyncResults...) + p.SyncError = result.SyncError +} + +// Fail fails the PodSyncResult with an error occurred in SyncPod() and KillPod() itself +func (p *PodSyncResult) Fail(err error) { + p.SyncError = err +} + +// Error returns an error summarizing all the errors in PodSyncResult +func (p *PodSyncResult) Error() error { + errlist := []error{} + if p.SyncError != nil { + errlist = append(errlist, fmt.Errorf("failed to SyncPod: %v\n", p.SyncError)) + } + for _, result := range p.SyncResults { + if result.Error != nil { + errlist = append(errlist, fmt.Errorf("failed to %q for %q with %v: %q\n", result.Action, result.Target, + result.Error, result.Message)) + } + } + return utilerrors.NewAggregate(errlist) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/sync_result_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/sync_result_test.go new file mode 100644 index 000000000000..a510d8a92204 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/sync_result_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package container + +import ( + "errors" + "testing" +) + +func TestPodSyncResult(t *testing.T) { + okResults := []*SyncResult{ + NewSyncResult(StartContainer, "container_0"), + NewSyncResult(SetupNetwork, "pod"), + } + errResults := []*SyncResult{ + NewSyncResult(KillContainer, "container_1"), + NewSyncResult(TeardownNetwork, "pod"), + } + errResults[0].Fail(errors.New("error_0"), "message_0") + errResults[1].Fail(errors.New("error_1"), "message_1") + + // If the PodSyncResult doesn't contain error result, it should not be error + result := PodSyncResult{} + result.AddSyncResult(okResults...) + if result.Error() != nil { + t.Errorf("PodSyncResult should not be error: %v", result) + } + + // If the PodSyncResult contains error result, it should be error + result = PodSyncResult{} + result.AddSyncResult(okResults...) + result.AddSyncResult(errResults...) + if result.Error() == nil { + t.Errorf("PodSyncResult should be error: %q", result) + } + + // If the PodSyncResult is failed, it should be error + result = PodSyncResult{} + result.AddSyncResult(okResults...) + result.Fail(errors.New("error")) + if result.Error() == nil { + t.Errorf("PodSyncResult should be error: %q", result) + } + + // If the PodSyncResult is added an error PodSyncResult, it should be error + errResult := PodSyncResult{} + errResult.AddSyncResult(errResults...) + result = PodSyncResult{} + result.AddSyncResult(okResults...) + result.AddPodSyncResult(errResult) + if result.Error() == nil { + t.Errorf("PodSyncResult should be error: %q", result) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_cache.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_cache.go new file mode 100644 index 000000000000..db7a82e5a18f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_cache.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "time" + + "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" +) + +type fakeCache struct { + runtime container.Runtime +} + +func NewFakeCache(runtime container.Runtime) container.Cache { + return &fakeCache{runtime: runtime} +} + +func (c *fakeCache) Get(id types.UID) (*container.PodStatus, error) { + return c.runtime.GetPodStatus(id, "", "") +} + +func (c *fakeCache) GetNewerThan(id types.UID, minTime time.Time) (*container.PodStatus, error) { + return c.Get(id) +} + +func (c *fakeCache) Set(id types.UID, status *container.PodStatus, err error, timestamp time.Time) { +} + +func (c *fakeCache) Delete(id types.UID) { +} + +func (c *fakeCache) UpdateTime(_ time.Time) { +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime.go new file mode 100644 index 000000000000..db2aeacf3e5b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/testing/fake_runtime.go @@ -0,0 +1,363 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "io" + "reflect" + "sync" + "time" + + "k8s.io/kubernetes/pkg/api" + . "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/volume" +) + +// FakeRuntime is a fake container runtime for testing. +type FakeRuntime struct { + sync.Mutex + CalledFunctions []string + PodList []*Pod + AllPodList []*Pod + ImageList []Image + APIPodStatus api.PodStatus + PodStatus PodStatus + StartedPods []string + KilledPods []string + StartedContainers []string + KilledContainers []string + VersionInfo string + APIVersionInfo string + RuntimeType string + Err error + InspectErr error + StatusErr error +} + +// FakeRuntime should implement Runtime. +var _ Runtime = &FakeRuntime{} + +type FakeVersion struct { + Version string +} + +func (fv *FakeVersion) String() string { + return fv.Version +} + +func (fv *FakeVersion) Compare(other string) (int, error) { + result := 0 + if fv.Version > other { + result = 1 + } else if fv.Version < other { + result = -1 + } + return result, nil +} + +type podsGetter interface { + GetPods(bool) ([]*Pod, error) +} + +type FakeRuntimeCache struct { + getter podsGetter +} + +func NewFakeRuntimeCache(getter podsGetter) RuntimeCache { + return &FakeRuntimeCache{getter} +} + +func (f *FakeRuntimeCache) GetPods() ([]*Pod, error) { + return f.getter.GetPods(false) +} + +func (f *FakeRuntimeCache) ForceUpdateIfOlder(time.Time) error { + return nil +} + +// ClearCalls resets the FakeRuntime to the initial state. +func (f *FakeRuntime) ClearCalls() { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = []string{} + f.PodList = []*Pod{} + f.AllPodList = []*Pod{} + f.APIPodStatus = api.PodStatus{} + f.StartedPods = []string{} + f.KilledPods = []string{} + f.StartedContainers = []string{} + f.KilledContainers = []string{} + f.VersionInfo = "" + f.RuntimeType = "" + f.Err = nil + f.InspectErr = nil + f.StatusErr = nil +} + +func (f *FakeRuntime) assertList(expect []string, test []string) error { + if !reflect.DeepEqual(expect, test) { + return fmt.Errorf("expected %#v, got %#v", expect, test) + } + return nil +} + +// AssertCalls test if the invoked functions are as expected. +func (f *FakeRuntime) AssertCalls(calls []string) error { + f.Lock() + defer f.Unlock() + return f.assertList(calls, f.CalledFunctions) +} + +func (f *FakeRuntime) AssertStartedPods(pods []string) error { + f.Lock() + defer f.Unlock() + return f.assertList(pods, f.StartedPods) +} + +func (f *FakeRuntime) AssertKilledPods(pods []string) error { + f.Lock() + defer f.Unlock() + return f.assertList(pods, f.KilledPods) +} + +func (f *FakeRuntime) AssertStartedContainers(containers []string) error { + f.Lock() + defer f.Unlock() + return f.assertList(containers, f.StartedContainers) +} + +func (f *FakeRuntime) AssertKilledContainers(containers []string) error { + f.Lock() + defer f.Unlock() + return f.assertList(containers, f.KilledContainers) +} + +func (f *FakeRuntime) Type() string { + return f.RuntimeType +} + +func (f *FakeRuntime) Version() (Version, error) { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "Version") + return &FakeVersion{Version: f.VersionInfo}, f.Err +} + +func (f *FakeRuntime) APIVersion() (Version, error) { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "APIVersion") + return &FakeVersion{Version: f.APIVersionInfo}, f.Err +} + +func (f *FakeRuntime) Status() error { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "Status") + return f.StatusErr +} + +func (f *FakeRuntime) GetPods(all bool) ([]*Pod, error) { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "GetPods") + if all { + return f.AllPodList, f.Err + } + return f.PodList, f.Err +} + +func (f *FakeRuntime) SyncPod(pod *api.Pod, _ api.PodStatus, _ *PodStatus, _ []api.Secret, backOff *flowcontrol.Backoff) (result PodSyncResult) { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "SyncPod") + f.StartedPods = append(f.StartedPods, string(pod.UID)) + for _, c := range pod.Spec.Containers { + f.StartedContainers = append(f.StartedContainers, c.Name) + } + // TODO(random-liu): Add SyncResult for starting and killing containers + if f.Err != nil { + result.Fail(f.Err) + } + return +} + +func (f *FakeRuntime) KillPod(pod *api.Pod, runningPod Pod, gracePeriodOverride *int64) error { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "KillPod") + f.KilledPods = append(f.KilledPods, string(runningPod.ID)) + for _, c := range runningPod.Containers { + f.KilledContainers = append(f.KilledContainers, c.Name) + } + return f.Err +} + +func (f *FakeRuntime) RunContainerInPod(container api.Container, pod *api.Pod, volumeMap map[string]volume.VolumePlugin) error { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "RunContainerInPod") + f.StartedContainers = append(f.StartedContainers, container.Name) + + pod.Spec.Containers = append(pod.Spec.Containers, container) + for _, c := range pod.Spec.Containers { + if c.Name == container.Name { // Container already in the pod. + return f.Err + } + } + pod.Spec.Containers = append(pod.Spec.Containers, container) + return f.Err +} + +func (f *FakeRuntime) KillContainerInPod(container api.Container, pod *api.Pod) error { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "KillContainerInPod") + f.KilledContainers = append(f.KilledContainers, container.Name) + + var containers []api.Container + for _, c := range pod.Spec.Containers { + if c.Name == container.Name { + continue + } + containers = append(containers, c) + } + return f.Err +} + +func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "GetPodStatus") + status := f.PodStatus + return &status, f.Err +} + +func (f *FakeRuntime) ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "ExecInContainer") + return f.Err +} + +func (f *FakeRuntime) AttachContainer(containerID ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "AttachContainer") + return f.Err +} + +func (f *FakeRuntime) GetContainerLogs(pod *api.Pod, containerID ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "GetContainerLogs") + return f.Err +} + +func (f *FakeRuntime) PullImage(image ImageSpec, pullSecrets []api.Secret) error { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "PullImage") + return f.Err +} + +func (f *FakeRuntime) IsImagePresent(image ImageSpec) (bool, error) { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "IsImagePresent") + for _, i := range f.ImageList { + if i.ID == image.Image { + return true, nil + } + } + return false, f.InspectErr +} + +func (f *FakeRuntime) ListImages() ([]Image, error) { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "ListImages") + return f.ImageList, f.Err +} + +func (f *FakeRuntime) RemoveImage(image ImageSpec) error { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "RemoveImage") + index := 0 + for i := range f.ImageList { + if f.ImageList[i].ID == image.Image { + index = i + break + } + } + f.ImageList = append(f.ImageList[:index], f.ImageList[index+1:]...) + + return f.Err +} + +func (f *FakeRuntime) PortForward(pod *Pod, port uint16, stream io.ReadWriteCloser) error { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "PortForward") + return f.Err +} + +func (f *FakeRuntime) GetNetNS(containerID ContainerID) (string, error) { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "GetNetNS") + return "", f.Err +} + +func (f *FakeRuntime) GarbageCollect(gcPolicy ContainerGCPolicy) error { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "GarbageCollect") + return f.Err +} + +func (f *FakeRuntime) ImageStats() (*ImageStats, error) { + f.Lock() + defer f.Unlock() + + f.CalledFunctions = append(f.CalledFunctions, "ImageStats") + return nil, f.Err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/testing/os.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/testing/os.go new file mode 100644 index 000000000000..f926661944ef --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/testing/os.go @@ -0,0 +1,93 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "errors" + "os" + "time" +) + +// FakeOS mocks out certain OS calls to avoid perturbing the filesystem +// If a member of the form `*Fn` is set, that function will be called in place +// of the real call. +type FakeOS struct { + StatFn func(string) (os.FileInfo, error) + ReadDirFn func(string) ([]os.FileInfo, error) + HostName string + Removes []string + Files map[string][]*os.FileInfo +} + +func NewFakeOS() *FakeOS { + return &FakeOS{ + Removes: []string{}, + Files: make(map[string][]*os.FileInfo), + } +} + +// Mkdir is a fake call that just returns nil. +func (FakeOS) MkdirAll(path string, perm os.FileMode) error { + return nil +} + +// Symlink is a fake call that just returns nil. +func (FakeOS) Symlink(oldname string, newname string) error { + return nil +} + +// Stat is a fake that returns an error +func (f FakeOS) Stat(path string) (os.FileInfo, error) { + if f.StatFn != nil { + return f.StatFn(path) + } + return nil, errors.New("unimplemented testing mock") +} + +// Remove is a fake call that returns nil. +func (f *FakeOS) Remove(path string) error { + f.Removes = append(f.Removes, path) + return nil +} + +// Create is a fake call that returns nil. +func (FakeOS) Create(path string) (*os.File, error) { + return nil, nil +} + +// Hostname is a fake call that returns nil. +func (f *FakeOS) Hostname() (name string, err error) { + return f.HostName, nil +} + +// Chtimes is a fake call that returns nil. +func (FakeOS) Chtimes(path string, atime time.Time, mtime time.Time) error { + return nil +} + +// Pipe is a fake call that returns nil. +func (FakeOS) Pipe() (r *os.File, w *os.File, err error) { + return nil, nil, nil +} + +// ReadDir is a fake call that returns the files under the directory. +func (f *FakeOS) ReadDir(dirname string) ([]os.FileInfo, error) { + if f.ReadDirFn != nil { + return f.ReadDirFn(dirname) + } + return nil, errors.New("unimplemented testing mock") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/testing/runtime_mock.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/testing/runtime_mock.go new file mode 100644 index 000000000000..3f269249b399 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container/testing/runtime_mock.go @@ -0,0 +1,144 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "io" + + "github.com/stretchr/testify/mock" + "k8s.io/kubernetes/pkg/api" + . "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/volume" +) + +type Mock struct { + mock.Mock +} + +var _ Runtime = new(Mock) + +func (r *Mock) Start() error { + args := r.Called() + return args.Error(0) +} + +func (r *Mock) Type() string { + args := r.Called() + return args.Get(0).(string) +} + +func (r *Mock) Version() (Version, error) { + args := r.Called() + return args.Get(0).(Version), args.Error(1) +} + +func (r *Mock) APIVersion() (Version, error) { + args := r.Called() + return args.Get(0).(Version), args.Error(1) +} + +func (r *Mock) Status() error { + args := r.Called() + return args.Error(0) +} + +func (r *Mock) GetPods(all bool) ([]*Pod, error) { + args := r.Called(all) + return args.Get(0).([]*Pod), args.Error(1) +} + +func (r *Mock) SyncPod(pod *api.Pod, apiStatus api.PodStatus, status *PodStatus, secrets []api.Secret, backOff *flowcontrol.Backoff) PodSyncResult { + args := r.Called(pod, apiStatus, status, secrets, backOff) + return args.Get(0).(PodSyncResult) +} + +func (r *Mock) KillPod(pod *api.Pod, runningPod Pod, gracePeriodOverride *int64) error { + args := r.Called(pod, runningPod, gracePeriodOverride) + return args.Error(0) +} + +func (r *Mock) RunContainerInPod(container api.Container, pod *api.Pod, volumeMap map[string]volume.VolumePlugin) error { + args := r.Called(pod, pod, volumeMap) + return args.Error(0) +} + +func (r *Mock) KillContainerInPod(container api.Container, pod *api.Pod) error { + args := r.Called(pod, pod) + return args.Error(0) +} + +func (r *Mock) GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) { + args := r.Called(uid, name, namespace) + return args.Get(0).(*PodStatus), args.Error(1) +} + +func (r *Mock) ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + args := r.Called(containerID, cmd, stdin, stdout, stderr, tty) + return args.Error(0) +} + +func (r *Mock) AttachContainer(containerID ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + args := r.Called(containerID, stdin, stdout, stderr, tty) + return args.Error(0) +} + +func (r *Mock) GetContainerLogs(pod *api.Pod, containerID ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) { + args := r.Called(pod, containerID, logOptions, stdout, stderr) + return args.Error(0) +} + +func (r *Mock) PullImage(image ImageSpec, pullSecrets []api.Secret) error { + args := r.Called(image, pullSecrets) + return args.Error(0) +} + +func (r *Mock) IsImagePresent(image ImageSpec) (bool, error) { + args := r.Called(image) + return args.Get(0).(bool), args.Error(1) +} + +func (r *Mock) ListImages() ([]Image, error) { + args := r.Called() + return args.Get(0).([]Image), args.Error(1) +} + +func (r *Mock) RemoveImage(image ImageSpec) error { + args := r.Called(image) + return args.Error(0) +} + +func (r *Mock) PortForward(pod *Pod, port uint16, stream io.ReadWriteCloser) error { + args := r.Called(pod, port, stream) + return args.Error(0) +} + +func (r *Mock) GetNetNS(containerID ContainerID) (string, error) { + args := r.Called(containerID) + return "", args.Error(0) +} + +func (r *Mock) GarbageCollect(gcPolicy ContainerGCPolicy) error { + args := r.Called(gcPolicy) + return args.Error(0) +} + +func (r *Mock) ImageStats() (*ImageStats, error) { + args := r.Called() + return args.Get(0).(*ImageStats), args.Error(1) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container_bridge.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container_bridge.go new file mode 100644 index 000000000000..e151dc709b47 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/container_bridge.go @@ -0,0 +1,167 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "bytes" + "net" + "os" + "os/exec" + "regexp" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/util" +) + +var cidrRegexp = regexp.MustCompile(`inet ([0-9a-fA-F.:]*/[0-9]*)`) + +func createCBR0(wantCIDR *net.IPNet, babysitDaemons bool) error { + // recreate cbr0 with wantCIDR + if err := exec.Command("brctl", "addbr", "cbr0").Run(); err != nil { + glog.Error(err) + return err + } + if err := exec.Command("ip", "addr", "add", wantCIDR.String(), "dev", "cbr0").Run(); err != nil { + glog.Error(err) + return err + } + if err := exec.Command("ip", "link", "set", "dev", "cbr0", "mtu", "1460", "up").Run(); err != nil { + glog.Error(err) + return err + } + // Stop docker so that babysitter process can restart it again with proper configurations and + // checkpoint file (https://github.com/docker/docker/issues/18283). It is safe to kill docker + // process here since CIDR can be changed only once for a given node object, and node is marked + // as NotReady until the docker daemon is restarted with the newly configured custom bridge. + // TODO (dawnchen): Remove this once corrupted checkpoint issue is fixed. + // + // For now just log the error. The containerRuntime check will catch docker failures. + // TODO (dawnchen) figure out what we should do for rkt here. + if babysitDaemons { + if err := exec.Command("pkill", "-KILL", "docker").Run(); err != nil { + glog.Error(err) + } + } else if util.UsingSystemdInitSystem() { + if err := exec.Command("systemctl", "restart", "docker").Run(); err != nil { + glog.Error(err) + } + } else { + if err := exec.Command("service", "docker", "restart").Run(); err != nil { + glog.Error(err) + } + } + glog.V(2).Info("Recreated cbr0 and restarted docker") + return nil +} + +func ensureCbr0(wantCIDR *net.IPNet, promiscuous, babysitDaemons bool) error { + exists, err := cbr0Exists() + if err != nil { + return err + } + if !exists { + glog.V(2).Infof("CBR0 doesn't exist, attempting to create it with range: %s", wantCIDR) + return createCBR0(wantCIDR, babysitDaemons) + } + if !cbr0CidrCorrect(wantCIDR) { + glog.V(2).Infof("Attempting to recreate cbr0 with address range: %s", wantCIDR) + + // delete cbr0 + if err := exec.Command("ip", "link", "set", "dev", "cbr0", "down").Run(); err != nil { + glog.Error(err) + return err + } + if err := exec.Command("brctl", "delbr", "cbr0").Run(); err != nil { + glog.Error(err) + return err + } + if err := createCBR0(wantCIDR, babysitDaemons); err != nil { + glog.Error(err) + return err + } + } + // Put the container bridge into promiscuous mode to force it to accept hairpin packets. + // TODO: Remove this once the kernel bug (#20096) is fixed. + if promiscuous { + // Checking if the bridge is in promiscuous mode is as expensive and more brittle than + // simply setting the flag every time. + if err := exec.Command("ip", "link", "set", "cbr0", "promisc", "on").Run(); err != nil { + glog.Error(err) + return err + } + } + return nil +} + +// Check if cbr0 network interface is configured or not, and take action +// when the configuration is missing on the node, and propagate the rest +// error to kubelet to handle. +func cbr0Exists() (bool, error) { + if _, err := os.Stat("/sys/class/net/cbr0"); err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return true, nil +} + +func cbr0CidrCorrect(wantCIDR *net.IPNet) bool { + output, err := exec.Command("ip", "addr", "show", "cbr0").Output() + if err != nil { + return false + } + match := cidrRegexp.FindSubmatch(output) + if len(match) < 2 { + return false + } + cbr0IP, cbr0CIDR, err := net.ParseCIDR(string(match[1])) + if err != nil { + glog.Errorf("Couldn't parse CIDR: %q", match[1]) + return false + } + cbr0CIDR.IP = cbr0IP + + glog.V(5).Infof("Want cbr0 CIDR: %s, have cbr0 CIDR: %s", wantCIDR, cbr0CIDR) + return wantCIDR.IP.Equal(cbr0IP) && bytes.Equal(wantCIDR.Mask, cbr0CIDR.Mask) +} + +// TODO(dawnchen): Using pkg/util/iptables +// nonMasqueradeCIDR is the CIDR for our internal IP range; traffic to IPs outside this range will use IP masquerade. +func ensureIPTablesMasqRule(nonMasqueradeCIDR string) error { + // Check if the MASQUERADE rule exist or not + if err := exec.Command("iptables", + "-t", "nat", + "-C", "POSTROUTING", + "!", "-d", nonMasqueradeCIDR, + "-m", "addrtype", "!", "--dst-type", "LOCAL", + "-j", "MASQUERADE").Run(); err == nil { + // The MASQUERADE rule exists + return nil + } + + glog.Infof("MASQUERADE rule doesn't exist, recreate it (with nonMasqueradeCIDR %s)", nonMasqueradeCIDR) + if err := exec.Command("iptables", + "-t", "nat", + "-A", "POSTROUTING", + "!", "-d", nonMasqueradeCIDR, + "-m", "addrtype", "!", "--dst-type", "LOCAL", + "-j", "MASQUERADE").Run(); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/custommetrics/custom_metrics.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/custommetrics/custom_metrics.go new file mode 100644 index 000000000000..05a628a440bc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/custommetrics/custom_metrics.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package custommetrics contains support for instrumenting cAdvisor to gather custom metrics from pods. +package custommetrics + +import ( + "path" + + "k8s.io/kubernetes/pkg/api" +) + +const ( + CustomMetricsDefinitionContainerFile = "definition.json" + + CustomMetricsDefinitionDir = "/etc/custom-metrics" +) + +// Alpha implementation. +// Returns a path to a cAdvisor-specific custom metrics configuration. +func GetCAdvisorCustomMetricsDefinitionPath(container *api.Container) (*string, error) { + // Assuemes that the container has Custom Metrics enabled if it has "/etc/custom-metrics" directory + // mounted as a volume. Custom Metrics definition is expected to be in "definition.json". + if container.VolumeMounts != nil { + for _, volumeMount := range container.VolumeMounts { + if path.Clean(volumeMount.MountPath) == path.Clean(CustomMetricsDefinitionDir) { + // TODO: add definition file validation. + definitionPath := path.Clean(path.Join(volumeMount.MountPath, CustomMetricsDefinitionContainerFile)) + return &definitionPath, nil + } + } + } + // No Custom Metrics definition available. + return nil, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/custommetrics/custom_metrics_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/custommetrics/custom_metrics_test.go new file mode 100644 index 000000000000..54892ad1bd0a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/custommetrics/custom_metrics_test.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package custommetrics + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/kubernetes/pkg/api" +) + +func TestGetCAdvisorCustomMetricsDefinitionPath(t *testing.T) { + + regularContainer := &api.Container{ + Name: "test_container", + } + + cmContainer := &api.Container{ + Name: "test_container", + VolumeMounts: []api.VolumeMount{ + { + Name: "cm", + MountPath: CustomMetricsDefinitionDir, + }, + }, + } + path, err := GetCAdvisorCustomMetricsDefinitionPath(regularContainer) + assert.Nil(t, path) + assert.NoError(t, err) + + path, err = GetCAdvisorCustomMetricsDefinitionPath(cmContainer) + assert.NotEmpty(t, *path) + assert.NoError(t, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/disk_manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/disk_manager.go new file mode 100644 index 000000000000..edf749b4f8b8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/disk_manager.go @@ -0,0 +1,137 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "sync" + "time" + + "github.com/golang/glog" + cadvisorapi "github.com/google/cadvisor/info/v2" + "k8s.io/kubernetes/pkg/kubelet/cadvisor" +) + +// Manages policy for diskspace management for disks holding docker images and root fs. + +// mb is used to easily convert an int to an mb +const mb = 1024 * 1024 + +// Implementation is thread-safe. +type diskSpaceManager interface { + // Checks the available disk space + IsRootDiskSpaceAvailable() (bool, error) + IsRuntimeDiskSpaceAvailable() (bool, error) +} + +type DiskSpacePolicy struct { + // free disk space threshold for filesystem holding docker images. + DockerFreeDiskMB int + // free disk space threshold for root filesystem. Host volumes are created on root fs. + RootFreeDiskMB int +} + +type fsInfo struct { + Usage int64 + Capacity int64 + Available int64 + Timestamp time.Time +} + +type realDiskSpaceManager struct { + cadvisor cadvisor.Interface + cachedInfo map[string]fsInfo // cache of filesystem info. + lock sync.Mutex // protecting cachedInfo. + policy DiskSpacePolicy // thresholds. Set at creation time. +} + +func (dm *realDiskSpaceManager) getFsInfo(fsType string, f func() (cadvisorapi.FsInfo, error)) (fsInfo, error) { + dm.lock.Lock() + defer dm.lock.Unlock() + fsi := fsInfo{} + if info, ok := dm.cachedInfo[fsType]; ok { + timeLimit := time.Now().Add(-2 * time.Second) + if info.Timestamp.After(timeLimit) { + fsi = info + } + } + if fsi.Timestamp.IsZero() { + fs, err := f() + if err != nil { + return fsInfo{}, err + } + fsi.Timestamp = time.Now() + fsi.Usage = int64(fs.Usage) + fsi.Capacity = int64(fs.Capacity) + fsi.Available = int64(fs.Available) + dm.cachedInfo[fsType] = fsi + } + return fsi, nil +} + +func (dm *realDiskSpaceManager) IsRuntimeDiskSpaceAvailable() (bool, error) { + return dm.isSpaceAvailable("runtime", dm.policy.DockerFreeDiskMB, dm.cadvisor.ImagesFsInfo) +} + +func (dm *realDiskSpaceManager) IsRootDiskSpaceAvailable() (bool, error) { + return dm.isSpaceAvailable("root", dm.policy.RootFreeDiskMB, dm.cadvisor.RootFsInfo) +} + +func (dm *realDiskSpaceManager) isSpaceAvailable(fsType string, threshold int, f func() (cadvisorapi.FsInfo, error)) (bool, error) { + fsInfo, err := dm.getFsInfo(fsType, f) + if err != nil { + return true, fmt.Errorf("failed to get fs info for %q: %v", fsType, err) + } + if fsInfo.Capacity == 0 { + return true, fmt.Errorf("could not determine capacity for %q fs. Info: %+v", fsType, fsInfo) + } + if fsInfo.Available < 0 { + return true, fmt.Errorf("wrong available space for %q: %+v", fsType, fsInfo) + } + + if fsInfo.Available < int64(threshold)*mb { + glog.Infof("Running out of space on disk for %q: available %d MB, threshold %d MB", fsType, fsInfo.Available/mb, threshold) + return false, nil + } + return true, nil +} + +func validatePolicy(policy DiskSpacePolicy) error { + if policy.DockerFreeDiskMB < 0 { + return fmt.Errorf("free disk space should be non-negative. Invalid value %d for docker disk space threshold.", policy.DockerFreeDiskMB) + } + if policy.RootFreeDiskMB < 0 { + return fmt.Errorf("free disk space should be non-negative. Invalid value %d for root disk space threshold.", policy.RootFreeDiskMB) + } + return nil +} + +func newDiskSpaceManager(cadvisorInterface cadvisor.Interface, policy DiskSpacePolicy) (diskSpaceManager, error) { + // validate policy + err := validatePolicy(policy) + if err != nil { + return nil, err + } + + dm := &realDiskSpaceManager{ + cadvisor: cadvisorInterface, + policy: policy, + cachedInfo: map[string]fsInfo{}, + } + + return dm, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/disk_manager_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/disk_manager_test.go new file mode 100644 index 000000000000..f9dba0bf1c30 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/disk_manager_test.go @@ -0,0 +1,295 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "testing" + + cadvisorapi "github.com/google/cadvisor/info/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" +) + +func testPolicy() DiskSpacePolicy { + return DiskSpacePolicy{ + DockerFreeDiskMB: 250, + RootFreeDiskMB: 250, + } +} + +func setUp(t *testing.T) (*assert.Assertions, DiskSpacePolicy, *cadvisortest.Mock) { + assert := assert.New(t) + policy := testPolicy() + c := new(cadvisortest.Mock) + return assert, policy, c +} + +func TestValidPolicy(t *testing.T) { + assert, policy, c := setUp(t) + _, err := newDiskSpaceManager(c, policy) + assert.NoError(err) + + policy = testPolicy() + policy.DockerFreeDiskMB = -1 + _, err = newDiskSpaceManager(c, policy) + assert.Error(err) + + policy = testPolicy() + policy.RootFreeDiskMB = -1 + _, err = newDiskSpaceManager(c, policy) + assert.Error(err) +} + +func TestSpaceAvailable(t *testing.T) { + assert, policy, mockCadvisor := setUp(t) + dm, err := newDiskSpaceManager(mockCadvisor, policy) + assert.NoError(err) + + mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{ + Usage: 400 * mb, + Capacity: 1000 * mb, + Available: 600 * mb, + }, nil) + mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{ + Usage: 9 * mb, + Capacity: 10 * mb, + }, nil) + + ok, err := dm.IsRuntimeDiskSpaceAvailable() + assert.NoError(err) + assert.True(ok) + + ok, err = dm.IsRootDiskSpaceAvailable() + assert.NoError(err) + assert.False(ok) +} + +// TestIsRuntimeDiskSpaceAvailableWithSpace verifies IsRuntimeDiskSpaceAvailable results when +// space is available. +func TestIsRuntimeDiskSpaceAvailableWithSpace(t *testing.T) { + assert, policy, mockCadvisor := setUp(t) + dm, err := newDiskSpaceManager(mockCadvisor, policy) + require.NoError(t, err) + + // 500MB available + mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{ + Usage: 9500 * mb, + Capacity: 10000 * mb, + Available: 500 * mb, + }, nil) + + ok, err := dm.IsRuntimeDiskSpaceAvailable() + assert.NoError(err) + assert.True(ok) +} + +// TestIsRuntimeDiskSpaceAvailableWithoutSpace verifies IsRuntimeDiskSpaceAvailable results when +// space is not available. +func TestIsRuntimeDiskSpaceAvailableWithoutSpace(t *testing.T) { + // 1MB available + assert, policy, mockCadvisor := setUp(t) + mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{ + Usage: 999 * mb, + Capacity: 1000 * mb, + Available: 1 * mb, + }, nil) + + dm, err := newDiskSpaceManager(mockCadvisor, policy) + require.NoError(t, err) + + ok, err := dm.IsRuntimeDiskSpaceAvailable() + assert.NoError(err) + assert.False(ok) +} + +// TestIsRootDiskSpaceAvailableWithSpace verifies IsRootDiskSpaceAvailable results when +// space is available. +func TestIsRootDiskSpaceAvailableWithSpace(t *testing.T) { + assert, policy, mockCadvisor := setUp(t) + policy.RootFreeDiskMB = 10 + dm, err := newDiskSpaceManager(mockCadvisor, policy) + assert.NoError(err) + + // 999MB available + mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{ + Usage: 1 * mb, + Capacity: 1000 * mb, + Available: 999 * mb, + }, nil) + + ok, err := dm.IsRootDiskSpaceAvailable() + assert.NoError(err) + assert.True(ok) +} + +// TestIsRootDiskSpaceAvailableWithoutSpace verifies IsRootDiskSpaceAvailable results when +// space is not available. +func TestIsRootDiskSpaceAvailableWithoutSpace(t *testing.T) { + assert, policy, mockCadvisor := setUp(t) + policy.RootFreeDiskMB = 10 + dm, err := newDiskSpaceManager(mockCadvisor, policy) + assert.NoError(err) + + // 9MB available + mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{ + Usage: 990 * mb, + Capacity: 1000 * mb, + Available: 9 * mb, + }, nil) + + ok, err := dm.IsRootDiskSpaceAvailable() + assert.NoError(err) + assert.False(ok) +} + +// TestCache verifies that caching works properly with DiskSpaceAvailable calls +func TestCache(t *testing.T) { + assert, policy, mockCadvisor := setUp(t) + dm, err := newDiskSpaceManager(mockCadvisor, policy) + assert.NoError(err) + + mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{ + Usage: 400 * mb, + Capacity: 1000 * mb, + Available: 300 * mb, + }, nil).Once() + mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{ + Usage: 500 * mb, + Capacity: 1000 * mb, + Available: 500 * mb, + }, nil).Once() + + // Initial calls which should be recorded in mockCadvisor + ok, err := dm.IsRuntimeDiskSpaceAvailable() + assert.NoError(err) + assert.True(ok) + + ok, err = dm.IsRootDiskSpaceAvailable() + assert.NoError(err) + assert.True(ok) + + // Get the current count of calls to mockCadvisor + cadvisorCallCount := len(mockCadvisor.Calls) + + // Checking for space again shouldn't need to mock as cache would serve it. + ok, err = dm.IsRuntimeDiskSpaceAvailable() + assert.NoError(err) + assert.True(ok) + + ok, err = dm.IsRootDiskSpaceAvailable() + assert.NoError(err) + assert.True(ok) + + // Ensure no more calls to the mockCadvisor occurred + assert.Equal(cadvisorCallCount, len(mockCadvisor.Calls)) +} + +// TestFsInfoError verifies errors are returned by DiskSpaceAvailable calls +// when FsInfo calls return an error +func TestFsInfoError(t *testing.T) { + assert, policy, mockCadvisor := setUp(t) + policy.RootFreeDiskMB = 10 + dm, err := newDiskSpaceManager(mockCadvisor, policy) + assert.NoError(err) + + mockCadvisor.On("ImagesFsInfo").Return(cadvisorapi.FsInfo{}, fmt.Errorf("can't find fs")) + mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{}, fmt.Errorf("EBUSY")) + ok, err := dm.IsRuntimeDiskSpaceAvailable() + assert.Error(err) + assert.True(ok) + ok, err = dm.IsRootDiskSpaceAvailable() + assert.Error(err) + assert.True(ok) +} + +// Test_getFSInfo verifies multiple possible cases for getFsInfo. +func Test_getFsInfo(t *testing.T) { + assert, policy, mockCadvisor := setUp(t) + + // Sunny day case + mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{ + Usage: 10 * mb, + Capacity: 100 * mb, + Available: 90 * mb, + }, nil).Once() + + dm := &realDiskSpaceManager{ + cadvisor: mockCadvisor, + policy: policy, + cachedInfo: map[string]fsInfo{}, + } + + available, err := dm.isSpaceAvailable("root", 10, dm.cadvisor.RootFsInfo) + assert.True(available) + assert.NoError(err) + + // Threshold case + mockCadvisor = new(cadvisortest.Mock) + mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{ + Usage: 9 * mb, + Capacity: 100 * mb, + Available: 9 * mb, + }, nil).Once() + + dm = &realDiskSpaceManager{ + cadvisor: mockCadvisor, + policy: policy, + cachedInfo: map[string]fsInfo{}, + } + available, err = dm.isSpaceAvailable("root", 10, dm.cadvisor.RootFsInfo) + assert.False(available) + assert.NoError(err) + + // Frozen case + mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{ + Usage: 9 * mb, + Capacity: 10 * mb, + Available: 500 * mb, + }, nil).Once() + + dm = &realDiskSpaceManager{ + cadvisor: mockCadvisor, + policy: policy, + cachedInfo: map[string]fsInfo{}, + } + available, err = dm.isSpaceAvailable("root", 10, dm.cadvisor.RootFsInfo) + assert.True(available) + assert.NoError(err) + + // Capacity error case + mockCadvisor = new(cadvisortest.Mock) + mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{ + Usage: 9 * mb, + Capacity: 0, + Available: 500 * mb, + }, nil).Once() + + dm = &realDiskSpaceManager{ + cadvisor: mockCadvisor, + policy: policy, + cachedInfo: map[string]fsInfo{}, + } + available, err = dm.isSpaceAvailable("root", 10, dm.cadvisor.RootFsInfo) + assert.True(available) + assert.Error(err) + assert.Contains(fmt.Sprintf("%s", err), "could not determine capacity") + + // Available error case skipped as v2.FSInfo uses uint64 and this + // can not be less than 0 +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/doc.go new file mode 100644 index 000000000000..8fd7b3b3df37 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubelet is the package that contains the libraries that drive the Kubelet binary. +// The kubelet is responsible for node level pod management. It runs on each worker in the cluster. +package kubelet diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/container_gc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/container_gc.go new file mode 100644 index 000000000000..22319f7fdbe4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/container_gc.go @@ -0,0 +1,257 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "fmt" + "os" + "path" + "path/filepath" + "sort" + "time" + + dockertypes "github.com/docker/engine-api/types" + "github.com/golang/glog" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" +) + +type containerGC struct { + client DockerInterface + podGetter podGetter + containerLogsDir string +} + +func NewContainerGC(client DockerInterface, podGetter podGetter, containerLogsDir string) *containerGC { + return &containerGC{ + client: client, + podGetter: podGetter, + containerLogsDir: containerLogsDir, + } +} + +// Internal information kept for containers being considered for GC. +type containerGCInfo struct { + // Docker ID of the container. + id string + + // Docker name of the container. + name string + + // Creation time for the container. + createTime time.Time + + // Full pod name, including namespace in the format `namespace_podName`. + // This comes from dockertools.ParseDockerName(...) + podNameWithNamespace string + + // Container name in pod + containerName string +} + +// Containers are considered for eviction as units of (UID, container name) pair. +type evictUnit struct { + // UID of the pod. + uid types.UID + + // Name of the container in the pod. + name string +} + +type containersByEvictUnit map[evictUnit][]containerGCInfo + +// Returns the number of containers in this map. +func (cu containersByEvictUnit) NumContainers() int { + num := 0 + for key := range cu { + num += len(cu[key]) + } + + return num +} + +// Returns the number of pod in this map. +func (cu containersByEvictUnit) NumEvictUnits() int { + return len(cu) +} + +// Newest first. +type byCreated []containerGCInfo + +func (a byCreated) Len() int { return len(a) } +func (a byCreated) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byCreated) Less(i, j int) bool { return a[i].createTime.After(a[j].createTime) } + +func (cgc *containerGC) enforceMaxContainersPerEvictUnit(evictUnits containersByEvictUnit, MaxContainers int) { + for uid := range evictUnits { + toRemove := len(evictUnits[uid]) - MaxContainers + + if toRemove > 0 { + evictUnits[uid] = cgc.removeOldestN(evictUnits[uid], toRemove) + } + } +} + +// Removes the oldest toRemove containers and returns the resulting slice. +func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int) []containerGCInfo { + // Remove from oldest to newest (last to first). + numToKeep := len(containers) - toRemove + for i := numToKeep; i < len(containers); i++ { + err := cgc.client.RemoveContainer(containers[i].id, dockertypes.ContainerRemoveOptions{RemoveVolumes: true}) + if err != nil { + glog.Warningf("Failed to remove dead container %q: %v", containers[i].name, err) + } + symlinkPath := LogSymlink(cgc.containerLogsDir, containers[i].podNameWithNamespace, containers[i].containerName, containers[i].id) + err = os.Remove(symlinkPath) + if err != nil && !os.IsNotExist(err) { + glog.Warningf("Failed to remove container %q log symlink %q: %v", containers[i].name, symlinkPath, err) + } + } + + // Assume we removed the containers so that we're not too aggressive. + return containers[:numToKeep] +} + +// Get all containers that are evictable. Evictable containers are: not running +// and created more than MinAge ago. +func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByEvictUnit, []containerGCInfo, error) { + containers, err := GetKubeletDockerContainers(cgc.client, true) + if err != nil { + return containersByEvictUnit{}, []containerGCInfo{}, err + } + + unidentifiedContainers := make([]containerGCInfo, 0) + evictUnits := make(containersByEvictUnit) + newestGCTime := time.Now().Add(-minAge) + for _, container := range containers { + // Prune out running containers. + data, err := cgc.client.InspectContainer(container.ID) + if err != nil { + // Container may have been removed already, skip. + continue + } else if data.State.Running { + continue + } + + created, err := parseDockerTimestamp(data.Created) + if err != nil { + glog.Errorf("Failed to parse Created timestamp %q for container %q", data.Created, container.ID) + } + if newestGCTime.Before(created) { + continue + } + + containerInfo := containerGCInfo{ + id: container.ID, + name: container.Names[0], + createTime: created, + } + + containerName, _, err := ParseDockerName(container.Names[0]) + + if err != nil { + unidentifiedContainers = append(unidentifiedContainers, containerInfo) + } else { + key := evictUnit{ + uid: containerName.PodUID, + name: containerName.ContainerName, + } + containerInfo.podNameWithNamespace = containerName.PodFullName + containerInfo.containerName = containerName.ContainerName + evictUnits[key] = append(evictUnits[key], containerInfo) + } + } + + // Sort the containers by age. + for uid := range evictUnits { + sort.Sort(byCreated(evictUnits[uid])) + } + + return evictUnits, unidentifiedContainers, nil +} + +// GarbageCollect removes dead containers using the specified container gc policy +func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy) error { + // Separate containers by evict units. + evictUnits, unidentifiedContainers, err := cgc.evictableContainers(gcPolicy.MinAge) + if err != nil { + return err + } + + // Remove unidentified containers. + for _, container := range unidentifiedContainers { + glog.Infof("Removing unidentified dead container %q with ID %q", container.name, container.id) + err = cgc.client.RemoveContainer(container.id, dockertypes.ContainerRemoveOptions{RemoveVolumes: true}) + if err != nil { + glog.Warningf("Failed to remove unidentified dead container %q: %v", container.name, err) + } + } + + // Remove deleted pod containers. + for key, unit := range evictUnits { + if cgc.isPodDeleted(key.uid) { + cgc.removeOldestN(unit, len(unit)) // Remove all. + delete(evictUnits, key) + } + } + + // Enforce max containers per evict unit. + if gcPolicy.MaxPerPodContainer >= 0 { + cgc.enforceMaxContainersPerEvictUnit(evictUnits, gcPolicy.MaxPerPodContainer) + } + + // Enforce max total number of containers. + if gcPolicy.MaxContainers >= 0 && evictUnits.NumContainers() > gcPolicy.MaxContainers { + // Leave an equal number of containers per evict unit (min: 1). + numContainersPerEvictUnit := gcPolicy.MaxContainers / evictUnits.NumEvictUnits() + if numContainersPerEvictUnit < 1 { + numContainersPerEvictUnit = 1 + } + cgc.enforceMaxContainersPerEvictUnit(evictUnits, numContainersPerEvictUnit) + + // If we still need to evict, evict oldest first. + numContainers := evictUnits.NumContainers() + if numContainers > gcPolicy.MaxContainers { + flattened := make([]containerGCInfo, 0, numContainers) + for uid := range evictUnits { + flattened = append(flattened, evictUnits[uid]...) + } + sort.Sort(byCreated(flattened)) + + cgc.removeOldestN(flattened, numContainers-gcPolicy.MaxContainers) + } + } + + // Remove dead symlinks - should only happen on upgrade + // from a k8s version without proper log symlink cleanup + logSymlinks, _ := filepath.Glob(path.Join(cgc.containerLogsDir, fmt.Sprintf("*.%s", LogSuffix))) + for _, logSymlink := range logSymlinks { + if _, err = os.Stat(logSymlink); os.IsNotExist(err) { + err = os.Remove(logSymlink) + if err != nil { + glog.Warningf("Failed to remove container log dead symlink %q: %v", logSymlink, err) + } + } + } + + return nil +} + +func (cgc *containerGC) isPodDeleted(podUID types.UID) bool { + _, found := cgc.podGetter.GetPodByUID(podUID) + return !found +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/container_gc_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/container_gc_test.go new file mode 100644 index 000000000000..36630ebed340 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/container_gc_test.go @@ -0,0 +1,246 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "fmt" + "reflect" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "k8s.io/kubernetes/pkg/api" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" +) + +func newTestContainerGC(t *testing.T) (*containerGC, *FakeDockerClient) { + fakeDocker := new(FakeDockerClient) + fakePodGetter := newFakePodGetter() + gc := NewContainerGC(fakeDocker, fakePodGetter, "") + return gc, fakeDocker +} + +// Makes a stable time object, lower id is earlier time. +func makeTime(id int) time.Time { + var zero time.Time + return zero.Add(time.Duration(id) * time.Second) +} + +// Makes a container with the specified properties. +func makeContainer(id, uid, name string, running bool, created time.Time) *FakeContainer { + return &FakeContainer{ + Name: fmt.Sprintf("/k8s_%s_bar_new_%s_42", name, uid), + Running: running, + ID: id, + CreatedAt: created, + } +} + +// Makes a container with unidentified name and specified properties. +func makeUndefinedContainer(id string, running bool, created time.Time) *FakeContainer { + return &FakeContainer{ + Name: "/k8s_unidentified", + Running: running, + ID: id, + CreatedAt: created, + } +} + +func addPods(podGetter podGetter, podUIDs ...types.UID) { + fakePodGetter := podGetter.(*fakePodGetter) + for _, uid := range podUIDs { + fakePodGetter.pods[uid] = &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod" + string(uid), + Namespace: "test", + UID: uid, + }, + } + } +} + +func verifyStringArrayEqualsAnyOrder(t *testing.T, actual, expected []string) { + act := make([]string, len(actual)) + exp := make([]string, len(expected)) + copy(act, actual) + copy(exp, expected) + + sort.StringSlice(act).Sort() + sort.StringSlice(exp).Sort() + + if !reflect.DeepEqual(exp, act) { + t.Errorf("Expected(sorted): %#v, Actual(sorted): %#v", exp, act) + } +} + +func TestGarbageCollectZeroMaxContainers(t *testing.T) { + gc, fakeDocker := newTestContainerGC(t) + fakeDocker.SetFakeContainers([]*FakeContainer{ + makeContainer("1876", "foo", "POD", false, makeTime(0)), + }) + addPods(gc.podGetter, "foo") + + assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0})) + assert.Len(t, fakeDocker.Removed, 1) +} + +func TestGarbageCollectNoMaxPerPodContainerLimit(t *testing.T) { + gc, fakeDocker := newTestContainerGC(t) + fakeDocker.SetFakeContainers([]*FakeContainer{ + makeContainer("1876", "foo", "POD", false, makeTime(0)), + makeContainer("2876", "foo1", "POD", false, makeTime(1)), + makeContainer("3876", "foo2", "POD", false, makeTime(2)), + makeContainer("4876", "foo3", "POD", false, makeTime(3)), + makeContainer("5876", "foo4", "POD", false, makeTime(4)), + }) + addPods(gc.podGetter, "foo", "foo1", "foo2", "foo3", "foo4") + + assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4})) + assert.Len(t, fakeDocker.Removed, 1) +} + +func TestGarbageCollectNoMaxLimit(t *testing.T) { + gc, fakeDocker := newTestContainerGC(t) + fakeDocker.SetFakeContainers([]*FakeContainer{ + makeContainer("1876", "foo", "POD", false, makeTime(0)), + makeContainer("2876", "foo1", "POD", false, makeTime(0)), + makeContainer("3876", "foo2", "POD", false, makeTime(0)), + makeContainer("4876", "foo3", "POD", false, makeTime(0)), + makeContainer("5876", "foo4", "POD", false, makeTime(0)), + }) + addPods(gc.podGetter, "foo", "foo1", "foo2", "foo3", "foo4") + + assert.Len(t, fakeDocker.Removed, 0) +} + +func TestGarbageCollect(t *testing.T) { + tests := []struct { + containers []*FakeContainer + expectedRemoved []string + }{ + // Don't remove containers started recently. + { + containers: []*FakeContainer{ + makeContainer("1876", "foo", "POD", false, time.Now()), + makeContainer("2876", "foo", "POD", false, time.Now()), + makeContainer("3876", "foo", "POD", false, time.Now()), + }, + }, + // Remove oldest containers. + { + containers: []*FakeContainer{ + makeContainer("1876", "foo", "POD", false, makeTime(0)), + makeContainer("2876", "foo", "POD", false, makeTime(1)), + makeContainer("3876", "foo", "POD", false, makeTime(2)), + }, + expectedRemoved: []string{"1876"}, + }, + // Only remove non-running containers. + { + containers: []*FakeContainer{ + makeContainer("1876", "foo", "POD", true, makeTime(0)), + makeContainer("2876", "foo", "POD", false, makeTime(1)), + makeContainer("3876", "foo", "POD", false, makeTime(2)), + makeContainer("4876", "foo", "POD", false, makeTime(3)), + }, + expectedRemoved: []string{"2876"}, + }, + // Less than maxContainerCount doesn't delete any. + { + containers: []*FakeContainer{ + makeContainer("1876", "foo", "POD", false, makeTime(0)), + }, + }, + // maxContainerCount applies per (UID,container) pair. + { + containers: []*FakeContainer{ + makeContainer("1876", "foo", "POD", false, makeTime(0)), + makeContainer("2876", "foo", "POD", false, makeTime(1)), + makeContainer("3876", "foo", "POD", false, makeTime(2)), + makeContainer("1076", "foo", "bar", false, makeTime(0)), + makeContainer("2076", "foo", "bar", false, makeTime(1)), + makeContainer("3076", "foo", "bar", false, makeTime(2)), + makeContainer("1176", "foo2", "POD", false, makeTime(0)), + makeContainer("2176", "foo2", "POD", false, makeTime(1)), + makeContainer("3176", "foo2", "POD", false, makeTime(2)), + }, + expectedRemoved: []string{"1076", "1176", "1876"}, + }, + // Remove non-running unidentified Kubernetes containers. + { + containers: []*FakeContainer{ + makeUndefinedContainer("1876", true, makeTime(0)), + makeUndefinedContainer("2876", false, makeTime(0)), + makeContainer("3876", "foo", "POD", false, makeTime(0)), + }, + expectedRemoved: []string{"2876"}, + }, + // Max limit applied and tries to keep from every pod. + { + containers: []*FakeContainer{ + makeContainer("1876", "foo", "POD", false, makeTime(0)), + makeContainer("2876", "foo", "POD", false, makeTime(1)), + makeContainer("3876", "foo1", "POD", false, makeTime(0)), + makeContainer("4876", "foo1", "POD", false, makeTime(1)), + makeContainer("5876", "foo2", "POD", false, makeTime(0)), + makeContainer("6876", "foo2", "POD", false, makeTime(1)), + makeContainer("7876", "foo3", "POD", false, makeTime(0)), + makeContainer("8876", "foo3", "POD", false, makeTime(1)), + makeContainer("9876", "foo4", "POD", false, makeTime(0)), + makeContainer("10876", "foo4", "POD", false, makeTime(1)), + }, + expectedRemoved: []string{"1876", "3876", "5876", "7876", "9876"}, + }, + // If more pods than limit allows, evicts oldest pod. + { + containers: []*FakeContainer{ + makeContainer("1876", "foo", "POD", false, makeTime(1)), + makeContainer("2876", "foo", "POD", false, makeTime(2)), + makeContainer("3876", "foo1", "POD", false, makeTime(1)), + makeContainer("4876", "foo1", "POD", false, makeTime(2)), + makeContainer("5876", "foo2", "POD", false, makeTime(0)), + makeContainer("6876", "foo3", "POD", false, makeTime(1)), + makeContainer("7876", "foo4", "POD", false, makeTime(0)), + makeContainer("8876", "foo5", "POD", false, makeTime(1)), + makeContainer("9876", "foo6", "POD", false, makeTime(2)), + makeContainer("10876", "foo7", "POD", false, makeTime(1)), + }, + expectedRemoved: []string{"1876", "3876", "5876", "7876"}, + }, + // Containers for deleted pods should be GC'd. + { + containers: []*FakeContainer{ + makeContainer("1876", "foo", "POD", false, makeTime(1)), + makeContainer("2876", "foo", "POD", false, makeTime(2)), + makeContainer("3876", "deleted", "POD", false, makeTime(1)), + makeContainer("4876", "deleted", "POD", false, makeTime(2)), + makeContainer("5876", "deleted", "POD", false, time.Now()), // Deleted pods still respect MinAge. + }, + expectedRemoved: []string{"3876", "4876"}, + }, + } + for i, test := range tests { + t.Logf("Running test case with index %d", i) + gc, fakeDocker := newTestContainerGC(t) + fakeDocker.SetFakeContainers(test.containers) + addPods(gc.podGetter, "foo", "foo1", "foo2", "foo3", "foo4", "foo5", "foo6", "foo7") + assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Hour, MaxPerPodContainer: 2, MaxContainers: 6})) + verifyStringArrayEqualsAnyOrder(t, fakeDocker.Removed, test.expectedRemoved) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/convert.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/convert.go new file mode 100644 index 000000000000..48c594ff1af2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/convert.go @@ -0,0 +1,83 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "fmt" + "strings" + + dockertypes "github.com/docker/engine-api/types" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" +) + +// This file contains helper functions to convert docker API types to runtime +// (kubecontainer) types. +const ( + statusRunningPrefix = "Up" + statusExitedPrefix = "Exited" +) + +func mapState(state string) kubecontainer.ContainerState { + // Parse the state string in dockertypes.Container. This could break when + // we upgrade docker. + switch { + case strings.HasPrefix(state, statusRunningPrefix): + return kubecontainer.ContainerStateRunning + case strings.HasPrefix(state, statusExitedPrefix): + return kubecontainer.ContainerStateExited + default: + return kubecontainer.ContainerStateUnknown + } +} + +// Converts dockertypes.Container to kubecontainer.Container. +func toRuntimeContainer(c *dockertypes.Container) (*kubecontainer.Container, error) { + if c == nil { + return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container") + } + + dockerName, hash, err := getDockerContainerNameInfo(c) + if err != nil { + return nil, err + } + + return &kubecontainer.Container{ + ID: kubecontainer.DockerID(c.ID).ContainerID(), + Name: dockerName.ContainerName, + Image: c.Image, + Hash: hash, + // (random-liu) docker uses status to indicate whether a container is running or exited. + // However, in kubernetes we usually use state to indicate whether a container is running or exited, + // while use status to indicate the comprehensive status of the container. So we have different naming + // norm here. + State: mapState(c.Status), + }, nil +} + +// Converts dockertypes.Image to kubecontainer.Image. +func toRuntimeImage(image *dockertypes.Image) (*kubecontainer.Image, error) { + if image == nil { + return nil, fmt.Errorf("unable to convert a nil pointer to a runtime image") + } + + return &kubecontainer.Image{ + ID: image.ID, + RepoTags: image.RepoTags, + RepoDigests: image.RepoDigests, + Size: image.VirtualSize, + }, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/convert_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/convert_test.go new file mode 100644 index 000000000000..fbc398553068 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/convert_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "reflect" + "testing" + + dockertypes "github.com/docker/engine-api/types" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" +) + +func TestMapState(t *testing.T) { + testCases := []struct { + input string + expected kubecontainer.ContainerState + }{ + {input: "Up 5 hours", expected: kubecontainer.ContainerStateRunning}, + {input: "Exited (0) 2 hours ago", expected: kubecontainer.ContainerStateExited}, + {input: "Created", expected: kubecontainer.ContainerStateUnknown}, + {input: "Random string", expected: kubecontainer.ContainerStateUnknown}, + } + + for i, test := range testCases { + if actual := mapState(test.input); actual != test.expected { + t.Errorf("Test[%d]: expected %q, got %q", i, test.expected, actual) + } + } +} + +func TestToRuntimeContainer(t *testing.T) { + original := &dockertypes.Container{ + ID: "ab2cdf", + Image: "bar_image", + Names: []string{"/k8s_bar.5678_foo_ns_1234_42"}, + Status: "Up 5 hours", + } + expected := &kubecontainer.Container{ + ID: kubecontainer.ContainerID{Type: "docker", ID: "ab2cdf"}, + Name: "bar", + Image: "bar_image", + Hash: 0x5678, + State: kubecontainer.ContainerStateRunning, + } + + actual, err := toRuntimeContainer(original) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if !reflect.DeepEqual(expected, actual) { + t.Errorf("expected %#v, got %#v", expected, actual) + } +} + +func TestToRuntimeImage(t *testing.T) { + original := &dockertypes.Image{ + ID: "aeeea", + RepoTags: []string{"abc", "def"}, + RepoDigests: []string{"123", "456"}, + VirtualSize: 1234, + } + expected := &kubecontainer.Image{ + ID: "aeeea", + RepoTags: []string{"abc", "def"}, + RepoDigests: []string{"123", "456"}, + Size: 1234, + } + + actual, err := toRuntimeImage(original) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if !reflect.DeepEqual(expected, actual) { + t.Errorf("expected %#v, got %#v", expected, actual) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/docker.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/docker.go new file mode 100644 index 000000000000..d265db512500 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/docker.go @@ -0,0 +1,393 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "fmt" + "math/rand" + "net/http" + "path" + "strconv" + "strings" + + dockerref "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/jsonmessage" + dockerapi "github.com/docker/engine-api/client" + dockertypes "github.com/docker/engine-api/types" + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/credentialprovider" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/leaky" + "k8s.io/kubernetes/pkg/types" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/util/parsers" +) + +const ( + PodInfraContainerName = leaky.PodInfraContainerName + DockerPrefix = "docker://" + LogSuffix = "log" +) + +const ( + // Taken from lmctfy https://github.com/google/lmctfy/blob/master/lmctfy/controllers/cpu_controller.cc + minShares = 2 + sharesPerCPU = 1024 + milliCPUToCPU = 1000 + + // 100000 is equivalent to 100ms + quotaPeriod = 100000 + minQuotaPerod = 1000 +) + +// DockerInterface is an abstract interface for testability. It abstracts the interface of docker client. +type DockerInterface interface { + ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) + InspectContainer(id string) (*dockertypes.ContainerJSON, error) + CreateContainer(dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) + StartContainer(id string) error + StopContainer(id string, timeout int) error + RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error + InspectImage(image string) (*dockertypes.ImageInspect, error) + ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) + PullImage(image string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error + RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) + ImageHistory(id string) ([]dockertypes.ImageHistory, error) + Logs(string, dockertypes.ContainerLogsOptions, StreamOptions) error + Version() (*dockertypes.Version, error) + Info() (*dockertypes.Info, error) + CreateExec(string, dockertypes.ExecConfig) (*dockertypes.ContainerExecCreateResponse, error) + StartExec(string, dockertypes.ExecStartCheck, StreamOptions) error + InspectExec(id string) (*dockertypes.ContainerExecInspect, error) + AttachToContainer(string, dockertypes.ContainerAttachOptions, StreamOptions) error +} + +// KubeletContainerName encapsulates a pod name and a Kubernetes container name. +type KubeletContainerName struct { + PodFullName string + PodUID types.UID + ContainerName string +} + +// containerNamePrefix is used to identify the containers on the node managed by this +// process. +var containerNamePrefix = "k8s" + +// SetContainerNamePrefix allows the container prefix name for this process to be changed. +// This is intended to support testing and bootstrapping experimentation. It cannot be +// changed once the Kubelet starts. +func SetContainerNamePrefix(prefix string) { + containerNamePrefix = prefix +} + +// DockerPuller is an abstract interface for testability. It abstracts image pull operations. +type DockerPuller interface { + Pull(image string, secrets []api.Secret) error + IsImagePresent(image string) (bool, error) +} + +// dockerPuller is the default implementation of DockerPuller. +type dockerPuller struct { + client DockerInterface + keyring credentialprovider.DockerKeyring +} + +type throttledDockerPuller struct { + puller dockerPuller + limiter flowcontrol.RateLimiter +} + +// newDockerPuller creates a new instance of the default implementation of DockerPuller. +func newDockerPuller(client DockerInterface, qps float32, burst int) DockerPuller { + dp := dockerPuller{ + client: client, + keyring: credentialprovider.NewDockerKeyring(), + } + + if qps == 0.0 { + return dp + } + return &throttledDockerPuller{ + puller: dp, + limiter: flowcontrol.NewTokenBucketRateLimiter(qps, burst), + } +} + +func filterHTTPError(err error, image string) error { + // docker/docker/pull/11314 prints detailed error info for docker pull. + // When it hits 502, it returns a verbose html output including an inline svg, + // which makes the output of kubectl get pods much harder to parse. + // Here converts such verbose output to a concise one. + jerr, ok := err.(*jsonmessage.JSONError) + if ok && (jerr.Code == http.StatusBadGateway || + jerr.Code == http.StatusServiceUnavailable || + jerr.Code == http.StatusGatewayTimeout) { + glog.V(2).Infof("Pulling image %q failed: %v", image, err) + return kubecontainer.RegistryUnavailable + } else { + return err + } +} + +// applyDefaultImageTag parses a docker image string, if it doesn't contain any tag or digest, +// a default tag will be applied. +func applyDefaultImageTag(image string) (string, error) { + named, err := dockerref.ParseNamed(image) + if err != nil { + return "", fmt.Errorf("couldn't parse image reference %q: %v", image, err) + } + _, isTagged := named.(dockerref.Tagged) + _, isDigested := named.(dockerref.Digested) + if !isTagged && !isDigested { + named, err := dockerref.WithTag(named, parsers.DefaultImageTag) + if err != nil { + return "", fmt.Errorf("failed to apply default image tag %q: %v", image, err) + } + image = named.String() + } + return image, nil +} + +func (p dockerPuller) Pull(image string, secrets []api.Secret) error { + // If the image contains no tag or digest, a default tag should be applied. + image, err := applyDefaultImageTag(image) + if err != nil { + return err + } + + keyring, err := credentialprovider.MakeDockerKeyring(secrets, p.keyring) + if err != nil { + return err + } + + // The only used image pull option RegistryAuth will be set in kube_docker_client + opts := dockertypes.ImagePullOptions{} + + creds, haveCredentials := keyring.Lookup(image) + if !haveCredentials { + glog.V(1).Infof("Pulling image %s without credentials", image) + + err := p.client.PullImage(image, dockertypes.AuthConfig{}, opts) + if err == nil { + // Sometimes PullImage failed with no error returned. + exist, ierr := p.IsImagePresent(image) + if ierr != nil { + glog.Warningf("Failed to inspect image %s: %v", image, ierr) + } + if !exist { + return fmt.Errorf("image pull failed for unknown error") + } + return nil + } + + // Image spec: [/]/[: 1 { + hash, err = strconv.ParseUint(nameParts[1], 16, 32) + if err != nil { + glog.Warningf("invalid container hash %q in container %q", nameParts[1], name) + } + } + + podFullName := parts[2] + "_" + parts[3] + podUID := types.UID(parts[4]) + + return &KubeletContainerName{podFullName, podUID, containerName}, hash, nil +} + +func LogSymlink(containerLogsDir, podFullName, containerName, dockerId string) string { + return path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s.%s", podFullName, containerName, dockerId, LogSuffix)) +} + +// Get a *dockerapi.Client, either using the endpoint passed in, or using +// DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec +func getDockerClient(dockerEndpoint string) (*dockerapi.Client, error) { + if len(dockerEndpoint) > 0 { + glog.Infof("Connecting to docker on %s", dockerEndpoint) + return dockerapi.NewClient(dockerEndpoint, "", nil, nil) + } + return dockerapi.NewEnvClient() +} + +// ConnectToDockerOrDie creates docker client connecting to docker daemon. +// If the endpoint passed in is "fake://", a fake docker client +// will be returned. The program exits if error occurs. +func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface { + if dockerEndpoint == "fake://" { + return NewFakeDockerClient() + } + client, err := getDockerClient(dockerEndpoint) + if err != nil { + glog.Fatalf("Couldn't connect to docker: %v", err) + } + return newKubeDockerClient(client) +} + +// milliCPUToQuota converts milliCPU to CFS quota and period values +func milliCPUToQuota(milliCPU int64) (quota int64, period int64) { + // CFS quota is measured in two values: + // - cfs_period_us=100ms (the amount of time to measure usage across) + // - cfs_quota=20ms (the amount of cpu time allowed to be used across a period) + // so in the above example, you are limited to 20% of a single CPU + // for multi-cpu environments, you just scale equivalent amounts + + if milliCPU == 0 { + // take the default behavior from docker + return + } + + // we set the period to 100ms by default + period = quotaPeriod + + // we then convert your milliCPU to a value normalized over a period + quota = (milliCPU * quotaPeriod) / milliCPUToCPU + + // quota needs to be a minimum of 1ms. + if quota < minQuotaPerod { + quota = minQuotaPerod + } + + return +} + +func milliCPUToShares(milliCPU int64) int64 { + if milliCPU == 0 { + // Docker converts zero milliCPU to unset, which maps to kernel default + // for unset: 1024. Return 2 here to really match kernel default for + // zero milliCPU. + return minShares + } + // Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding. + shares := (milliCPU * sharesPerCPU) / milliCPUToCPU + if shares < minShares { + return minShares + } + return shares +} + +// GetKubeletDockerContainers lists all container or just the running ones. +// Returns a list of docker containers that we manage +// TODO: Move this function with dockerCache to DockerManager. +func GetKubeletDockerContainers(client DockerInterface, allContainers bool) ([]*dockertypes.Container, error) { + result := []*dockertypes.Container{} + containers, err := client.ListContainers(dockertypes.ContainerListOptions{All: allContainers}) + if err != nil { + return nil, err + } + for i := range containers { + container := &containers[i] + if len(container.Names) == 0 { + continue + } + // Skip containers that we didn't create to allow users to manually + // spin up their own containers if they want. + // TODO(dchen1107): Remove the old separator "--" by end of Oct + if !strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"_") && + !strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"--") { + glog.V(3).Infof("Docker Container: %s is not managed by kubelet.", container.Names[0]) + continue + } + result = append(result, container) + } + return result, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_test.go new file mode 100644 index 000000000000..25825b522911 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/docker_test.go @@ -0,0 +1,804 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "encoding/json" + "fmt" + "hash/adler32" + "reflect" + "sort" + "strconv" + "strings" + "testing" + + "github.com/docker/docker/pkg/jsonmessage" + dockertypes "github.com/docker/engine-api/types" + dockernat "github.com/docker/go-connections/nat" + cadvisorapi "github.com/google/cadvisor/info/v1" + "k8s.io/kubernetes/cmd/kubelet/app/options" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/credentialprovider" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" + "k8s.io/kubernetes/pkg/kubelet/network" + nettest "k8s.io/kubernetes/pkg/kubelet/network/testing" + "k8s.io/kubernetes/pkg/types" + hashutil "k8s.io/kubernetes/pkg/util/hash" +) + +func verifyCalls(t *testing.T, fakeDocker *FakeDockerClient, calls []string) { + fakeDocker.Lock() + defer fakeDocker.Unlock() + verifyStringArrayEquals(t, fakeDocker.called, calls) +} + +func verifyStringArrayEquals(t *testing.T, actual, expected []string) { + invalid := len(actual) != len(expected) + if !invalid { + for ix, value := range actual { + if expected[ix] != value { + invalid = true + } + } + } + if invalid { + t.Errorf("Expected: %#v, Actual: %#v", expected, actual) + } +} + +func findPodContainer(dockerContainers []*dockertypes.Container, podFullName string, uid types.UID, containerName string) (*dockertypes.Container, bool, uint64) { + for _, dockerContainer := range dockerContainers { + if len(dockerContainer.Names) == 0 { + continue + } + dockerName, hash, err := ParseDockerName(dockerContainer.Names[0]) + if err != nil { + continue + } + if dockerName.PodFullName == podFullName && + (uid == "" || dockerName.PodUID == uid) && + dockerName.ContainerName == containerName { + return dockerContainer, true, hash + } + } + return nil, false, 0 +} + +func TestGetContainerID(t *testing.T) { + fakeDocker := NewFakeDockerClient() + fakeDocker.SetFakeRunningContainers([]*FakeContainer{ + { + ID: "foobar", + Name: "/k8s_foo_qux_ns_1234_42", + }, + { + ID: "barbar", + Name: "/k8s_bar_qux_ns_2565_42", + }, + }) + + dockerContainers, err := GetKubeletDockerContainers(fakeDocker, false) + if err != nil { + t.Errorf("Expected no error, Got %#v", err) + } + if len(dockerContainers) != 2 { + t.Errorf("Expected %#v, Got %#v", fakeDocker.RunningContainerList, dockerContainers) + } + verifyCalls(t, fakeDocker, []string{"list"}) + + dockerContainer, found, _ := findPodContainer(dockerContainers, "qux_ns", "", "foo") + if dockerContainer == nil || !found { + t.Errorf("Failed to find container %#v", dockerContainer) + } + + fakeDocker.ClearCalls() + dockerContainer, found, _ = findPodContainer(dockerContainers, "foobar", "", "foo") + verifyCalls(t, fakeDocker, []string{}) + if dockerContainer != nil || found { + t.Errorf("Should not have found container %#v", dockerContainer) + } +} + +func verifyPackUnpack(t *testing.T, podNamespace, podUID, podName, containerName string) { + container := &api.Container{Name: containerName} + hasher := adler32.New() + hashutil.DeepHashObject(hasher, *container) + computedHash := uint64(hasher.Sum32()) + podFullName := fmt.Sprintf("%s_%s", podName, podNamespace) + _, name, _ := BuildDockerName(KubeletContainerName{podFullName, types.UID(podUID), container.Name}, container) + returned, hash, err := ParseDockerName(name) + if err != nil { + t.Errorf("Failed to parse Docker container name %q: %v", name, err) + } + if podFullName != returned.PodFullName || podUID != string(returned.PodUID) || containerName != returned.ContainerName || computedHash != hash { + t.Errorf("For (%s, %s, %s, %d), unpacked (%s, %s, %s, %d)", podFullName, podUID, containerName, computedHash, returned.PodFullName, returned.PodUID, returned.ContainerName, hash) + } +} + +func TestContainerNaming(t *testing.T) { + podUID := "12345678" + verifyPackUnpack(t, "file", podUID, "name", "container") + verifyPackUnpack(t, "file", podUID, "name-with-dashes", "container") + // UID is same as pod name + verifyPackUnpack(t, "file", podUID, podUID, "container") + // No Container name + verifyPackUnpack(t, "other", podUID, "name", "") + + container := &api.Container{Name: "container"} + podName := "foo" + podNamespace := "test" + name := fmt.Sprintf("k8s_%s_%s_%s_%s_42", container.Name, podName, podNamespace, podUID) + podFullName := fmt.Sprintf("%s_%s", podName, podNamespace) + + returned, hash, err := ParseDockerName(name) + if err != nil { + t.Errorf("Failed to parse Docker container name %q: %v", name, err) + } + if returned.PodFullName != podFullName || string(returned.PodUID) != podUID || returned.ContainerName != container.Name || hash != 0 { + t.Errorf("unexpected parse: %s %s %s %d", returned.PodFullName, returned.PodUID, returned.ContainerName, hash) + } +} + +func TestApplyDefaultImageTag(t *testing.T) { + for _, testCase := range []struct { + Input string + Output string + }{ + {Input: "root", Output: "root:latest"}, + {Input: "root:tag", Output: "root:tag"}, + {Input: "root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", Output: "root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + } { + image, err := applyDefaultImageTag(testCase.Input) + if err != nil { + t.Errorf("applyDefaultTag(%s) failed: %v", testCase.Input, err) + } else if image != testCase.Output { + t.Errorf("Expected image reference: %q, got %q", testCase.Output, image) + } + } +} + +func TestPullWithNoSecrets(t *testing.T) { + tests := []struct { + imageName string + expectedImage string + }{ + {"ubuntu", "ubuntu:latest using {}"}, + {"ubuntu:2342", "ubuntu:2342 using {}"}, + {"ubuntu:latest", "ubuntu:latest using {}"}, + {"foo/bar:445566", "foo/bar:445566 using {}"}, + {"registry.example.com:5000/foobar", "registry.example.com:5000/foobar:latest using {}"}, + {"registry.example.com:5000/foobar:5342", "registry.example.com:5000/foobar:5342 using {}"}, + {"registry.example.com:5000/foobar:latest", "registry.example.com:5000/foobar:latest using {}"}, + } + for _, test := range tests { + fakeKeyring := &credentialprovider.FakeKeyring{} + fakeClient := NewFakeDockerClient() + + dp := dockerPuller{ + client: fakeClient, + keyring: fakeKeyring, + } + + err := dp.Pull(test.imageName, []api.Secret{}) + if err != nil { + t.Errorf("unexpected non-nil err: %s", err) + continue + } + + if e, a := 1, len(fakeClient.pulled); e != a { + t.Errorf("%s: expected 1 pulled image, got %d: %v", test.imageName, a, fakeClient.pulled) + continue + } + + if e, a := test.expectedImage, fakeClient.pulled[0]; e != a { + t.Errorf("%s: expected pull of %q, but got %q", test.imageName, e, a) + } + } +} + +func TestPullWithJSONError(t *testing.T) { + tests := map[string]struct { + imageName string + err error + expectedError string + }{ + "Json error": { + "ubuntu", + &jsonmessage.JSONError{Code: 50, Message: "Json error"}, + "Json error", + }, + "Bad gateway": { + "ubuntu", + &jsonmessage.JSONError{Code: 502, Message: "\n\n \n \n \n

Oops, there was an error!

\n

We have been contacted of this error, feel free to check out status.docker.com\n to see if there is a bigger issue.

\n\n \n"}, + kubecontainer.RegistryUnavailable.Error(), + }, + } + for i, test := range tests { + fakeKeyring := &credentialprovider.FakeKeyring{} + fakeClient := NewFakeDockerClient() + fakeClient.InjectError("pull", test.err) + + puller := &dockerPuller{ + client: fakeClient, + keyring: fakeKeyring, + } + err := puller.Pull(test.imageName, []api.Secret{}) + if err == nil || !strings.Contains(err.Error(), test.expectedError) { + t.Errorf("%s: expect error %s, got : %s", i, test.expectedError, err) + continue + } + } +} + +func TestPullWithSecrets(t *testing.T) { + // auth value is equivalent to: "username":"passed-user","password":"passed-password" + dockerCfg := map[string]map[string]string{"index.docker.io/v1/": {"email": "passed-email", "auth": "cGFzc2VkLXVzZXI6cGFzc2VkLXBhc3N3b3Jk"}} + dockercfgContent, err := json.Marshal(dockerCfg) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + dockerConfigJson := map[string]map[string]map[string]string{"auths": dockerCfg} + dockerConfigJsonContent, err := json.Marshal(dockerConfigJson) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + tests := map[string]struct { + imageName string + passedSecrets []api.Secret + builtInDockerConfig credentialprovider.DockerConfig + expectedPulls []string + }{ + "no matching secrets": { + "ubuntu", + []api.Secret{}, + credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{}), + []string{"ubuntu:latest using {}"}, + }, + "default keyring secrets": { + "ubuntu", + []api.Secret{}, + credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email", nil}}), + []string{`ubuntu:latest using {"username":"built-in","password":"password","email":"email"}`}, + }, + "default keyring secrets unused": { + "ubuntu", + []api.Secret{}, + credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"extraneous": {"built-in", "password", "email", nil}}), + []string{`ubuntu:latest using {}`}, + }, + "builtin keyring secrets, but use passed": { + "ubuntu", + []api.Secret{{Type: api.SecretTypeDockercfg, Data: map[string][]byte{api.DockerConfigKey: dockercfgContent}}}, + credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email", nil}}), + []string{`ubuntu:latest using {"username":"passed-user","password":"passed-password","email":"passed-email"}`}, + }, + "builtin keyring secrets, but use passed with new docker config": { + "ubuntu", + []api.Secret{{Type: api.SecretTypeDockerConfigJson, Data: map[string][]byte{api.DockerConfigJsonKey: dockerConfigJsonContent}}}, + credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{"index.docker.io/v1/": {"built-in", "password", "email", nil}}), + []string{`ubuntu:latest using {"username":"passed-user","password":"passed-password","email":"passed-email"}`}, + }, + } + for i, test := range tests { + builtInKeyRing := &credentialprovider.BasicDockerKeyring{} + builtInKeyRing.Add(test.builtInDockerConfig) + + fakeClient := NewFakeDockerClient() + + dp := dockerPuller{ + client: fakeClient, + keyring: builtInKeyRing, + } + + err := dp.Pull(test.imageName, test.passedSecrets) + if err != nil { + t.Errorf("%s: unexpected non-nil err: %s", i, err) + continue + } + + if e, a := 1, len(fakeClient.pulled); e != a { + t.Errorf("%s: expected 1 pulled image, got %d: %v", i, a, fakeClient.pulled) + continue + } + + if e, a := test.expectedPulls, fakeClient.pulled; !reflect.DeepEqual(e, a) { + t.Errorf("%s: expected pull of %v, but got %v", i, e, a) + } + } +} + +func TestDockerKeyringLookupFails(t *testing.T) { + fakeKeyring := &credentialprovider.FakeKeyring{} + fakeClient := NewFakeDockerClient() + fakeClient.InjectError("pull", fmt.Errorf("test error")) + + dp := dockerPuller{ + client: fakeClient, + keyring: fakeKeyring, + } + + err := dp.Pull("host/repository/image:version", []api.Secret{}) + if err == nil { + t.Errorf("unexpected non-error") + } + msg := "image pull failed for host/repository/image:version, this may be because there are no credentials on this request. details: (test error)" + if err.Error() != msg { + t.Errorf("expected: %s, saw: %s", msg, err.Error()) + } +} + +func TestDockerKeyringLookup(t *testing.T) { + ada := credentialprovider.LazyAuthConfiguration{ + AuthConfig: dockertypes.AuthConfig{ + Username: "ada", + Password: "smash", + Email: "ada@example.com", + }, + } + + grace := credentialprovider.LazyAuthConfiguration{ + AuthConfig: dockertypes.AuthConfig{ + Username: "grace", + Password: "squash", + Email: "grace@example.com", + }, + } + + dk := &credentialprovider.BasicDockerKeyring{} + dk.Add(credentialprovider.DockerConfig{ + "bar.example.com/pong": credentialprovider.DockerConfigEntry{ + Username: grace.Username, + Password: grace.Password, + Email: grace.Email, + }, + "bar.example.com": credentialprovider.DockerConfigEntry{ + Username: ada.Username, + Password: ada.Password, + Email: ada.Email, + }, + }) + + tests := []struct { + image string + match []credentialprovider.LazyAuthConfiguration + ok bool + }{ + // direct match + {"bar.example.com", []credentialprovider.LazyAuthConfiguration{ada}, true}, + + // direct match deeper than other possible matches + {"bar.example.com/pong", []credentialprovider.LazyAuthConfiguration{grace, ada}, true}, + + // no direct match, deeper path ignored + {"bar.example.com/ping", []credentialprovider.LazyAuthConfiguration{ada}, true}, + + // match first part of path token + {"bar.example.com/pongz", []credentialprovider.LazyAuthConfiguration{grace, ada}, true}, + + // match regardless of sub-path + {"bar.example.com/pong/pang", []credentialprovider.LazyAuthConfiguration{grace, ada}, true}, + + // no host match + {"example.com", []credentialprovider.LazyAuthConfiguration{}, false}, + {"foo.example.com", []credentialprovider.LazyAuthConfiguration{}, false}, + } + + for i, tt := range tests { + match, ok := dk.Lookup(tt.image) + if tt.ok != ok { + t.Errorf("case %d: expected ok=%t, got %t", i, tt.ok, ok) + } + + if !reflect.DeepEqual(tt.match, match) { + t.Errorf("case %d: expected match=%#v, got %#v", i, tt.match, match) + } + } +} + +// This validates that dockercfg entries with a scheme and url path are properly matched +// by images that only match the hostname. +// NOTE: the above covers the case of a more specific match trumping just hostname. +func TestIssue3797(t *testing.T) { + rex := credentialprovider.LazyAuthConfiguration{ + AuthConfig: dockertypes.AuthConfig{ + Username: "rex", + Password: "tiny arms", + Email: "rex@example.com", + }, + } + + dk := &credentialprovider.BasicDockerKeyring{} + dk.Add(credentialprovider.DockerConfig{ + "https://quay.io/v1/": credentialprovider.DockerConfigEntry{ + Username: rex.Username, + Password: rex.Password, + Email: rex.Email, + }, + }) + + tests := []struct { + image string + match []credentialprovider.LazyAuthConfiguration + ok bool + }{ + // direct match + {"quay.io", []credentialprovider.LazyAuthConfiguration{rex}, true}, + + // partial matches + {"quay.io/foo", []credentialprovider.LazyAuthConfiguration{rex}, true}, + {"quay.io/foo/bar", []credentialprovider.LazyAuthConfiguration{rex}, true}, + } + + for i, tt := range tests { + match, ok := dk.Lookup(tt.image) + if tt.ok != ok { + t.Errorf("case %d: expected ok=%t, got %t", i, tt.ok, ok) + } + + if !reflect.DeepEqual(tt.match, match) { + t.Errorf("case %d: expected match=%#v, got %#v", i, tt.match, match) + } + } +} + +type imageTrackingDockerClient struct { + *FakeDockerClient + imageName string +} + +func (f *imageTrackingDockerClient) InspectImage(name string) (image *dockertypes.ImageInspect, err error) { + image, err = f.FakeDockerClient.InspectImage(name) + f.imageName = name + return +} + +func TestIsImagePresent(t *testing.T) { + cl := &imageTrackingDockerClient{NewFakeDockerClient(), ""} + puller := &dockerPuller{ + client: cl, + } + _, _ = puller.IsImagePresent("abc:123") + if cl.imageName != "abc:123" { + t.Errorf("expected inspection of image abc:123, instead inspected image %v", cl.imageName) + } +} + +type podsByID []*kubecontainer.Pod + +func (b podsByID) Len() int { return len(b) } +func (b podsByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b podsByID) Less(i, j int) bool { return b[i].ID < b[j].ID } + +type containersByID []*kubecontainer.Container + +func (b containersByID) Len() int { return len(b) } +func (b containersByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b containersByID) Less(i, j int) bool { return b[i].ID.ID < b[j].ID.ID } + +func TestFindContainersByPod(t *testing.T) { + tests := []struct { + runningContainerList []dockertypes.Container + exitedContainerList []dockertypes.Container + all bool + expectedPods []*kubecontainer.Pod + }{ + + { + []dockertypes.Container{ + { + ID: "foobar", + Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"}, + }, + { + ID: "barbar", + Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"}, + }, + { + ID: "baz", + Names: []string{"/k8s_baz.1234_qux_ns_1234_42"}, + }, + }, + []dockertypes.Container{ + { + ID: "barfoo", + Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"}, + }, + { + ID: "bazbaz", + Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"}, + }, + }, + false, + []*kubecontainer.Pod{ + { + ID: "1234", + Name: "qux", + Namespace: "ns", + Containers: []*kubecontainer.Container{ + { + ID: kubecontainer.DockerID("foobar").ContainerID(), + Name: "foobar", + Hash: 0x1234, + State: kubecontainer.ContainerStateUnknown, + }, + { + ID: kubecontainer.DockerID("baz").ContainerID(), + Name: "baz", + Hash: 0x1234, + State: kubecontainer.ContainerStateUnknown, + }, + }, + }, + { + ID: "2343", + Name: "qux", + Namespace: "ns", + Containers: []*kubecontainer.Container{ + { + ID: kubecontainer.DockerID("barbar").ContainerID(), + Name: "barbar", + Hash: 0x1234, + State: kubecontainer.ContainerStateUnknown, + }, + }, + }, + }, + }, + { + []dockertypes.Container{ + { + ID: "foobar", + Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"}, + }, + { + ID: "barbar", + Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"}, + }, + { + ID: "baz", + Names: []string{"/k8s_baz.1234_qux_ns_1234_42"}, + }, + }, + []dockertypes.Container{ + { + ID: "barfoo", + Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"}, + }, + { + ID: "bazbaz", + Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"}, + }, + }, + true, + []*kubecontainer.Pod{ + { + ID: "1234", + Name: "qux", + Namespace: "ns", + Containers: []*kubecontainer.Container{ + { + ID: kubecontainer.DockerID("foobar").ContainerID(), + Name: "foobar", + Hash: 0x1234, + State: kubecontainer.ContainerStateUnknown, + }, + { + ID: kubecontainer.DockerID("barfoo").ContainerID(), + Name: "barfoo", + Hash: 0x1234, + State: kubecontainer.ContainerStateUnknown, + }, + { + ID: kubecontainer.DockerID("baz").ContainerID(), + Name: "baz", + Hash: 0x1234, + State: kubecontainer.ContainerStateUnknown, + }, + }, + }, + { + ID: "2343", + Name: "qux", + Namespace: "ns", + Containers: []*kubecontainer.Container{ + { + ID: kubecontainer.DockerID("barbar").ContainerID(), + Name: "barbar", + Hash: 0x1234, + State: kubecontainer.ContainerStateUnknown, + }, + }, + }, + { + ID: "5678", + Name: "qux", + Namespace: "ns", + Containers: []*kubecontainer.Container{ + { + ID: kubecontainer.DockerID("bazbaz").ContainerID(), + Name: "bazbaz", + Hash: 0x1234, + State: kubecontainer.ContainerStateUnknown, + }, + }, + }, + }, + }, + { + []dockertypes.Container{}, + []dockertypes.Container{}, + true, + nil, + }, + } + fakeClient := NewFakeDockerClient() + np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone) + // image back-off is set to nil, this test should not pull images + containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, &cadvisorapi.MachineInfo{}, options.GetDefaultPodInfraContainerImage(), 0, 0, "", &containertest.FakeOS{}, np, nil, nil, nil) + for i, test := range tests { + fakeClient.RunningContainerList = test.runningContainerList + fakeClient.ExitedContainerList = test.exitedContainerList + + result, _ := containerManager.GetPods(test.all) + for i := range result { + sort.Sort(containersByID(result[i].Containers)) + } + for i := range test.expectedPods { + sort.Sort(containersByID(test.expectedPods[i].Containers)) + } + sort.Sort(podsByID(result)) + sort.Sort(podsByID(test.expectedPods)) + if !reflect.DeepEqual(test.expectedPods, result) { + t.Errorf("%d: expected: %#v, saw: %#v", i, test.expectedPods, result) + } + } +} + +func TestMakePortsAndBindings(t *testing.T) { + portMapping := func(container, host int, protocol api.Protocol, ip string) kubecontainer.PortMapping { + return kubecontainer.PortMapping{ + ContainerPort: container, + HostPort: host, + Protocol: protocol, + HostIP: ip, + } + } + + portBinding := func(port, ip string) dockernat.PortBinding { + return dockernat.PortBinding{ + HostPort: port, + HostIP: ip, + } + } + + ports := []kubecontainer.PortMapping{ + portMapping(80, 8080, "", "127.0.0.1"), + portMapping(443, 443, "tcp", ""), + portMapping(444, 444, "udp", ""), + portMapping(445, 445, "foobar", ""), + portMapping(443, 446, "tcp", ""), + portMapping(443, 446, "udp", ""), + } + + exposedPorts, bindings := makePortsAndBindings(ports) + + // Count the expected exposed ports and bindings + expectedExposedPorts := map[string]struct{}{} + + for _, binding := range ports { + dockerKey := strconv.Itoa(binding.ContainerPort) + "/" + string(binding.Protocol) + expectedExposedPorts[dockerKey] = struct{}{} + } + + // Should expose right ports in docker + if len(expectedExposedPorts) != len(exposedPorts) { + t.Errorf("Unexpected ports and bindings, %#v %#v %#v", ports, exposedPorts, bindings) + } + + // Construct expected bindings + expectPortBindings := map[string][]dockernat.PortBinding{ + "80/tcp": { + portBinding("8080", "127.0.0.1"), + }, + "443/tcp": { + portBinding("443", ""), + portBinding("446", ""), + }, + "443/udp": { + portBinding("446", ""), + }, + "444/udp": { + portBinding("444", ""), + }, + "445/tcp": { + portBinding("445", ""), + }, + } + + // interate the bindings by dockerPort, and check its portBindings + for dockerPort, portBindings := range bindings { + switch dockerPort { + case "80/tcp", "443/tcp", "443/udp", "444/udp", "445/tcp": + if !reflect.DeepEqual(expectPortBindings[string(dockerPort)], portBindings) { + t.Errorf("Unexpected portbindings for %#v, expected: %#v, but got: %#v", + dockerPort, expectPortBindings[string(dockerPort)], portBindings) + } + default: + t.Errorf("Unexpected docker port: %#v with portbindings: %#v", dockerPort, portBindings) + } + } +} + +func TestMilliCPUToQuota(t *testing.T) { + testCases := []struct { + input int64 + quota int64 + period int64 + }{ + { + input: int64(0), + quota: int64(0), + period: int64(0), + }, + { + input: int64(5), + quota: int64(1000), + period: int64(100000), + }, + { + input: int64(9), + quota: int64(1000), + period: int64(100000), + }, + { + input: int64(10), + quota: int64(1000), + period: int64(100000), + }, + { + input: int64(200), + quota: int64(20000), + period: int64(100000), + }, + { + input: int64(500), + quota: int64(50000), + period: int64(100000), + }, + { + input: int64(1000), + quota: int64(100000), + period: int64(100000), + }, + { + input: int64(1500), + quota: int64(150000), + period: int64(100000), + }, + } + for _, testCase := range testCases { + quota, period := milliCPUToQuota(testCase.input) + if quota != testCase.quota || period != testCase.period { + t.Errorf("Input %v, expected quota %v period %v, but got quota %v period %v", testCase.input, testCase.quota, testCase.period, quota, period) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/exec.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/exec.go new file mode 100644 index 000000000000..2568b60b2d4f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/exec.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "fmt" + "io" + "os" + "os/exec" + "time" + + dockertypes "github.com/docker/engine-api/types" + "github.com/golang/glog" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" +) + +// ExecHandler knows how to execute a command in a running Docker container. +type ExecHandler interface { + ExecInContainer(client DockerInterface, container *dockertypes.ContainerJSON, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error +} + +// NsenterExecHandler executes commands in Docker containers using nsenter. +type NsenterExecHandler struct{} + +// TODO should we support nsenter in a container, running with elevated privs and --pid=host? +func (*NsenterExecHandler) ExecInContainer(client DockerInterface, container *dockertypes.ContainerJSON, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + nsenter, err := exec.LookPath("nsenter") + if err != nil { + return fmt.Errorf("exec unavailable - unable to locate nsenter") + } + + containerPid := container.State.Pid + + // TODO what if the container doesn't have `env`??? + args := []string{"-t", fmt.Sprintf("%d", containerPid), "-m", "-i", "-u", "-n", "-p", "--", "env", "-i"} + args = append(args, fmt.Sprintf("HOSTNAME=%s", container.Config.Hostname)) + args = append(args, container.Config.Env...) + args = append(args, cmd...) + command := exec.Command(nsenter, args...) + if tty { + p, err := kubecontainer.StartPty(command) + if err != nil { + return err + } + defer p.Close() + + // make sure to close the stdout stream + defer stdout.Close() + + if stdin != nil { + go io.Copy(p, stdin) + } + + if stdout != nil { + go io.Copy(stdout, p) + } + + return command.Wait() + } else { + if stdin != nil { + // Use an os.Pipe here as it returns true *os.File objects. + // This way, if you run 'kubectl exec -i bash' (no tty) and type 'exit', + // the call below to command.Run() can unblock because its Stdin is the read half + // of the pipe. + r, w, err := os.Pipe() + if err != nil { + return err + } + go io.Copy(w, stdin) + + command.Stdin = r + } + if stdout != nil { + command.Stdout = stdout + } + if stderr != nil { + command.Stderr = stderr + } + + return command.Run() + } +} + +// NativeExecHandler executes commands in Docker containers using Docker's exec API. +type NativeExecHandler struct{} + +func (*NativeExecHandler) ExecInContainer(client DockerInterface, container *dockertypes.ContainerJSON, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + createOpts := dockertypes.ExecConfig{ + Cmd: cmd, + AttachStdin: stdin != nil, + AttachStdout: stdout != nil, + AttachStderr: stderr != nil, + Tty: tty, + } + execObj, err := client.CreateExec(container.ID, createOpts) + if err != nil { + return fmt.Errorf("failed to exec in container - Exec setup failed - %v", err) + } + startOpts := dockertypes.ExecStartCheck{Detach: false, Tty: tty} + streamOpts := StreamOptions{ + InputStream: stdin, + OutputStream: stdout, + ErrorStream: stderr, + RawTerminal: tty, + } + err = client.StartExec(execObj.ID, startOpts, streamOpts) + if err != nil { + return err + } + ticker := time.NewTicker(2 * time.Second) + defer ticker.Stop() + count := 0 + for { + inspect, err2 := client.InspectExec(execObj.ID) + if err2 != nil { + return err2 + } + if !inspect.Running { + if inspect.ExitCode != 0 { + err = &dockerExitError{inspect} + } + break + } + + count++ + if count == 5 { + glog.Errorf("Exec session %s in container %s terminated but process still running!", execObj.ID, container.ID) + break + } + + <-ticker.C + } + + return err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_docker_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_docker_client.go new file mode 100644 index 000000000000..686a2ccd76eb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_docker_client.go @@ -0,0 +1,558 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "encoding/json" + "fmt" + "math/rand" + "os" + "reflect" + "sort" + "sync" + "time" + + dockertypes "github.com/docker/engine-api/types" + dockercontainer "github.com/docker/engine-api/types/container" + + "k8s.io/kubernetes/pkg/api" +) + +// FakeDockerClient is a simple fake docker client, so that kubelet can be run for testing without requiring a real docker setup. +type FakeDockerClient struct { + sync.Mutex + RunningContainerList []dockertypes.Container + ExitedContainerList []dockertypes.Container + ContainerMap map[string]*dockertypes.ContainerJSON + Image *dockertypes.ImageInspect + Images []dockertypes.Image + Errors map[string]error + called []string + pulled []string + + // Created, Stopped and Removed all container docker ID + Created []string + Stopped []string + Removed []string + VersionInfo dockertypes.Version + Information dockertypes.Info + ExecInspect *dockertypes.ContainerExecInspect + execCmd []string + EnableSleep bool + ImageHistoryMap map[string][]dockertypes.ImageHistory +} + +// We don't check docker version now, just set the docker version of fake docker client to 1.8.1. +// Notice that if someday we also have minimum docker version requirement, this should also be updated. +const fakeDockerVersion = "1.8.1" + +func NewFakeDockerClient() *FakeDockerClient { + return NewFakeDockerClientWithVersion(fakeDockerVersion, minimumDockerAPIVersion) +} + +func NewFakeDockerClientWithVersion(version, apiVersion string) *FakeDockerClient { + return &FakeDockerClient{ + VersionInfo: dockertypes.Version{Version: version, APIVersion: apiVersion}, + Errors: make(map[string]error), + ContainerMap: make(map[string]*dockertypes.ContainerJSON), + } +} + +func (f *FakeDockerClient) InjectError(fn string, err error) { + f.Lock() + defer f.Unlock() + f.Errors[fn] = err +} + +func (f *FakeDockerClient) InjectErrors(errs map[string]error) { + f.Lock() + defer f.Unlock() + for fn, err := range errs { + f.Errors[fn] = err + } +} + +func (f *FakeDockerClient) ClearErrors() { + f.Lock() + defer f.Unlock() + f.Errors = map[string]error{} +} + +func (f *FakeDockerClient) ClearCalls() { + f.Lock() + defer f.Unlock() + f.called = []string{} + f.Stopped = []string{} + f.pulled = []string{} + f.Created = []string{} + f.Removed = []string{} +} + +// Because the new data type returned by engine-api is too complex to manually initialize, we need a +// fake container which is easier to initialize. +type FakeContainer struct { + ID string + Name string + Running bool + ExitCode int + Pid int + CreatedAt time.Time + StartedAt time.Time + FinishedAt time.Time + Config *dockercontainer.Config + HostConfig *dockercontainer.HostConfig +} + +// convertFakeContainer converts the fake container to real container +func convertFakeContainer(f *FakeContainer) *dockertypes.ContainerJSON { + if f.Config == nil { + f.Config = &dockercontainer.Config{} + } + if f.HostConfig == nil { + f.HostConfig = &dockercontainer.HostConfig{} + } + return &dockertypes.ContainerJSON{ + ContainerJSONBase: &dockertypes.ContainerJSONBase{ + ID: f.ID, + Name: f.Name, + State: &dockertypes.ContainerState{ + Running: f.Running, + ExitCode: f.ExitCode, + Pid: f.Pid, + StartedAt: dockerTimestampToString(f.StartedAt), + FinishedAt: dockerTimestampToString(f.FinishedAt), + }, + Created: dockerTimestampToString(f.CreatedAt), + HostConfig: f.HostConfig, + }, + Config: f.Config, + NetworkSettings: &dockertypes.NetworkSettings{}, + } +} + +func (f *FakeDockerClient) SetFakeContainers(containers []*FakeContainer) { + f.Lock() + defer f.Unlock() + // Reset the lists and the map. + f.ContainerMap = map[string]*dockertypes.ContainerJSON{} + f.RunningContainerList = []dockertypes.Container{} + f.ExitedContainerList = []dockertypes.Container{} + + for i := range containers { + c := containers[i] + f.ContainerMap[c.ID] = convertFakeContainer(c) + container := dockertypes.Container{ + Names: []string{c.Name}, + ID: c.ID, + } + if c.Running { + f.RunningContainerList = append(f.RunningContainerList, container) + } else { + f.ExitedContainerList = append(f.ExitedContainerList, container) + } + } +} + +func (f *FakeDockerClient) SetFakeRunningContainers(containers []*FakeContainer) { + for _, c := range containers { + c.Running = true + } + f.SetFakeContainers(containers) +} + +func (f *FakeDockerClient) AssertCalls(calls []string) (err error) { + f.Lock() + defer f.Unlock() + + if !reflect.DeepEqual(calls, f.called) { + err = fmt.Errorf("expected %#v, got %#v", calls, f.called) + } + + return +} + +func (f *FakeDockerClient) AssertCreated(created []string) error { + f.Lock() + defer f.Unlock() + + actualCreated := []string{} + for _, c := range f.Created { + dockerName, _, err := ParseDockerName(c) + if err != nil { + return fmt.Errorf("unexpected error: %v", err) + } + actualCreated = append(actualCreated, dockerName.ContainerName) + } + sort.StringSlice(created).Sort() + sort.StringSlice(actualCreated).Sort() + if !reflect.DeepEqual(created, actualCreated) { + return fmt.Errorf("expected %#v, got %#v", created, actualCreated) + } + return nil +} + +func (f *FakeDockerClient) AssertStopped(stopped []string) error { + f.Lock() + defer f.Unlock() + sort.StringSlice(stopped).Sort() + sort.StringSlice(f.Stopped).Sort() + if !reflect.DeepEqual(stopped, f.Stopped) { + return fmt.Errorf("expected %#v, got %#v", stopped, f.Stopped) + } + return nil +} + +func (f *FakeDockerClient) AssertUnorderedCalls(calls []string) (err error) { + f.Lock() + defer f.Unlock() + + expected := make([]string, len(calls)) + actual := make([]string, len(f.called)) + copy(expected, calls) + copy(actual, f.called) + + sort.StringSlice(expected).Sort() + sort.StringSlice(actual).Sort() + + if !reflect.DeepEqual(actual, expected) { + err = fmt.Errorf("expected(sorted) %#v, got(sorted) %#v", expected, actual) + } + return +} + +func (f *FakeDockerClient) popError(op string) error { + if f.Errors == nil { + return nil + } + err, ok := f.Errors[op] + if ok { + delete(f.Errors, op) + return err + } else { + return nil + } +} + +// ListContainers is a test-spy implementation of DockerInterface.ListContainers. +// It adds an entry "list" to the internal method call record. +func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) { + f.Lock() + defer f.Unlock() + f.called = append(f.called, "list") + err := f.popError("list") + containerList := append([]dockertypes.Container{}, f.RunningContainerList...) + if options.All { + // Although the container is not sorted, but the container with the same name should be in order, + // that is enough for us now. + // TODO(random-liu): Is a fully sorted array needed? + containerList = append(containerList, f.ExitedContainerList...) + } + return containerList, err +} + +// InspectContainer is a test-spy implementation of DockerInterface.InspectContainer. +// It adds an entry "inspect" to the internal method call record. +func (f *FakeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJSON, error) { + f.Lock() + defer f.Unlock() + f.called = append(f.called, "inspect_container") + err := f.popError("inspect_container") + if container, ok := f.ContainerMap[id]; ok { + return container, err + } + return nil, err +} + +// InspectImage is a test-spy implementation of DockerInterface.InspectImage. +// It adds an entry "inspect" to the internal method call record. +func (f *FakeDockerClient) InspectImage(name string) (*dockertypes.ImageInspect, error) { + f.Lock() + defer f.Unlock() + f.called = append(f.called, "inspect_image") + err := f.popError("inspect_image") + return f.Image, err +} + +// Sleeps random amount of time with the normal distribution with given mean and stddev +// (in milliseconds), we never sleep less than cutOffMillis +func (f *FakeDockerClient) normalSleep(mean, stdDev, cutOffMillis int) { + if !f.EnableSleep { + return + } + cutoff := (time.Duration)(cutOffMillis) * time.Millisecond + delay := (time.Duration)(rand.NormFloat64()*float64(stdDev)+float64(mean)) * time.Millisecond + if delay < cutoff { + delay = cutoff + } + time.Sleep(delay) +} + +// CreateContainer is a test-spy implementation of DockerInterface.CreateContainer. +// It adds an entry "create" to the internal method call record. +func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) { + f.Lock() + defer f.Unlock() + f.called = append(f.called, "create") + if err := f.popError("create"); err != nil { + return nil, err + } + // This is not a very good fake. We'll just add this container's name to the list. + // Docker likes to add a '/', so copy that behavior. + name := "/" + c.Name + id := name + f.Created = append(f.Created, name) + // The newest container should be in front, because we assume so in GetPodStatus() + f.RunningContainerList = append([]dockertypes.Container{ + {ID: name, Names: []string{name}, Image: c.Config.Image, Labels: c.Config.Labels}, + }, f.RunningContainerList...) + f.ContainerMap[name] = convertFakeContainer(&FakeContainer{ID: id, Name: name, Config: c.Config, HostConfig: c.HostConfig}) + f.normalSleep(100, 25, 25) + return &dockertypes.ContainerCreateResponse{ID: id}, nil +} + +// StartContainer is a test-spy implementation of DockerInterface.StartContainer. +// It adds an entry "start" to the internal method call record. +func (f *FakeDockerClient) StartContainer(id string) error { + f.Lock() + defer f.Unlock() + f.called = append(f.called, "start") + if err := f.popError("start"); err != nil { + return err + } + container, ok := f.ContainerMap[id] + if !ok { + container = convertFakeContainer(&FakeContainer{ID: id, Name: id}) + } + container.State.Running = true + container.State.Pid = os.Getpid() + container.State.StartedAt = dockerTimestampToString(time.Now()) + container.NetworkSettings.IPAddress = "2.3.4.5" + f.ContainerMap[id] = container + f.updateContainerStatus(id, statusRunningPrefix) + f.normalSleep(200, 50, 50) + return nil +} + +// StopContainer is a test-spy implementation of DockerInterface.StopContainer. +// It adds an entry "stop" to the internal method call record. +func (f *FakeDockerClient) StopContainer(id string, timeout int) error { + f.Lock() + defer f.Unlock() + f.called = append(f.called, "stop") + if err := f.popError("stop"); err != nil { + return err + } + f.Stopped = append(f.Stopped, id) + // Container status should be Updated before container moved to ExitedContainerList + f.updateContainerStatus(id, statusExitedPrefix) + var newList []dockertypes.Container + for _, container := range f.RunningContainerList { + if container.ID == id { + // The newest exited container should be in front. Because we assume so in GetPodStatus() + f.ExitedContainerList = append([]dockertypes.Container{container}, f.ExitedContainerList...) + continue + } + newList = append(newList, container) + } + f.RunningContainerList = newList + container, ok := f.ContainerMap[id] + if !ok { + container = convertFakeContainer(&FakeContainer{ + ID: id, + Name: id, + Running: false, + StartedAt: time.Now().Add(-time.Second), + FinishedAt: time.Now(), + }) + } else { + container.State.FinishedAt = dockerTimestampToString(time.Now()) + container.State.Running = false + } + f.ContainerMap[id] = container + f.normalSleep(200, 50, 50) + return nil +} + +func (f *FakeDockerClient) RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error { + f.Lock() + defer f.Unlock() + f.called = append(f.called, "remove") + err := f.popError("remove") + if err != nil { + return err + } + for i := range f.ExitedContainerList { + if f.ExitedContainerList[i].ID == id { + delete(f.ContainerMap, id) + f.ExitedContainerList = append(f.ExitedContainerList[:i], f.ExitedContainerList[i+1:]...) + f.Removed = append(f.Removed, id) + return nil + } + + } + // To be a good fake, report error if container is not stopped. + return fmt.Errorf("container not stopped") +} + +// Logs is a test-spy implementation of DockerInterface.Logs. +// It adds an entry "logs" to the internal method call record. +func (f *FakeDockerClient) Logs(id string, opts dockertypes.ContainerLogsOptions, sopts StreamOptions) error { + f.Lock() + defer f.Unlock() + f.called = append(f.called, "logs") + return f.popError("logs") +} + +// PullImage is a test-spy implementation of DockerInterface.PullImage. +// It adds an entry "pull" to the internal method call record. +func (f *FakeDockerClient) PullImage(image string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error { + f.Lock() + defer f.Unlock() + f.called = append(f.called, "pull") + err := f.popError("pull") + if err == nil { + authJson, _ := json.Marshal(auth) + f.pulled = append(f.pulled, fmt.Sprintf("%s using %s", image, string(authJson))) + } + return err +} + +func (f *FakeDockerClient) Version() (*dockertypes.Version, error) { + f.Lock() + defer f.Unlock() + return &f.VersionInfo, f.popError("version") +} + +func (f *FakeDockerClient) Info() (*dockertypes.Info, error) { + return &f.Information, nil +} + +func (f *FakeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.ContainerExecCreateResponse, error) { + f.Lock() + defer f.Unlock() + f.execCmd = opts.Cmd + f.called = append(f.called, "create_exec") + return &dockertypes.ContainerExecCreateResponse{ID: "12345678"}, nil +} + +func (f *FakeDockerClient) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error { + f.Lock() + defer f.Unlock() + f.called = append(f.called, "start_exec") + return nil +} + +func (f *FakeDockerClient) AttachToContainer(id string, opts dockertypes.ContainerAttachOptions, sopts StreamOptions) error { + f.Lock() + defer f.Unlock() + f.called = append(f.called, "attach") + return nil +} + +func (f *FakeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecInspect, error) { + return f.ExecInspect, f.popError("inspect_exec") +} + +func (f *FakeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) { + f.called = append(f.called, "list_images") + err := f.popError("list_images") + return f.Images, err +} + +func (f *FakeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) { + err := f.popError("remove_image") + if err == nil { + for i := range f.Images { + if f.Images[i].ID == image { + f.Images = append(f.Images[:i], f.Images[i+1:]...) + break + } + } + } + return []dockertypes.ImageDelete{{Deleted: image}}, err +} + +func (f *FakeDockerClient) InjectImages(images []dockertypes.Image) { + f.Lock() + defer f.Unlock() + f.Images = append(f.Images, images...) +} + +func (f *FakeDockerClient) updateContainerStatus(id, status string) { + for i := range f.RunningContainerList { + if f.RunningContainerList[i].ID == id { + f.RunningContainerList[i].Status = status + } + } +} + +// FakeDockerPuller is a stub implementation of DockerPuller. +type FakeDockerPuller struct { + sync.Mutex + + HasImages []string + ImagesPulled []string + + // Every pull will return the first error here, and then reslice + // to remove it. Will give nil errors if this slice is empty. + ErrorsToInject []error +} + +// Pull records the image pull attempt, and optionally injects an error. +func (f *FakeDockerPuller) Pull(image string, secrets []api.Secret) (err error) { + f.Lock() + defer f.Unlock() + f.ImagesPulled = append(f.ImagesPulled, image) + + if len(f.ErrorsToInject) > 0 { + err = f.ErrorsToInject[0] + f.ErrorsToInject = f.ErrorsToInject[1:] + } + return err +} + +func (f *FakeDockerPuller) IsImagePresent(name string) (bool, error) { + f.Lock() + defer f.Unlock() + if f.HasImages == nil { + return true, nil + } + for _, s := range f.HasImages { + if s == name { + return true, nil + } + } + return false, nil +} +func (f *FakeDockerClient) ImageHistory(id string) ([]dockertypes.ImageHistory, error) { + f.Lock() + defer f.Unlock() + f.called = append(f.called, "image_history") + history := f.ImageHistoryMap[id] + return history, nil +} + +func (f *FakeDockerClient) InjectImageHistory(data map[string][]dockertypes.ImageHistory) { + f.Lock() + defer f.Unlock() + f.ImageHistoryMap = data +} + +// dockerTimestampToString converts the timestamp to string +func dockerTimestampToString(t time.Time) string { + return t.Format(time.RFC3339Nano) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_manager.go new file mode 100644 index 000000000000..38c801e35a8d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/fake_manager.go @@ -0,0 +1,78 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + cadvisorapi "github.com/google/cadvisor/info/v1" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/network" + proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util/cache" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/util/oom" + "k8s.io/kubernetes/pkg/util/procfs" +) + +func NewFakeDockerManager( + client DockerInterface, + recorder record.EventRecorder, + livenessManager proberesults.Manager, + containerRefManager *kubecontainer.RefManager, + machineInfo *cadvisorapi.MachineInfo, + podInfraContainerImage string, + qps float32, + burst int, + containerLogsDir string, + osInterface kubecontainer.OSInterface, + networkPlugin network.NetworkPlugin, + runtimeHelper kubecontainer.RuntimeHelper, + httpClient kubetypes.HttpGetter, imageBackOff *flowcontrol.Backoff) *DockerManager { + + fakeOOMAdjuster := oom.NewFakeOOMAdjuster() + fakeProcFs := procfs.NewFakeProcFS() + fakePodGetter := &fakePodGetter{} + dm := NewDockerManager(client, recorder, livenessManager, containerRefManager, fakePodGetter, machineInfo, podInfraContainerImage, qps, + burst, containerLogsDir, osInterface, networkPlugin, runtimeHelper, httpClient, &NativeExecHandler{}, + fakeOOMAdjuster, fakeProcFs, false, imageBackOff, false, false, true, "/var/lib/kubelet/seccomp") + dm.dockerPuller = &FakeDockerPuller{} + + // ttl of version cache is set to 0 so we always call version api directly in tests. + dm.versionCache = cache.NewObjectCache( + func() (interface{}, error) { + return dm.getVersionInfo() + }, + 0, + ) + return dm +} + +type fakePodGetter struct { + pods map[types.UID]*api.Pod +} + +func newFakePodGetter() *fakePodGetter { + return &fakePodGetter{make(map[types.UID]*api.Pod)} +} + +func (f *fakePodGetter) GetPodByUID(uid types.UID) (*api.Pod, bool) { + pod, found := f.pods[uid] + return pod, found +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/images.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/images.go new file mode 100644 index 000000000000..6f7be9e0eb2b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/images.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "fmt" + "sync" + + "github.com/golang/glog" + + dockertypes "github.com/docker/engine-api/types" + runtime "k8s.io/kubernetes/pkg/kubelet/container" +) + +// imageStatsProvider exposes stats about all images currently available. +type imageStatsProvider struct { + sync.Mutex + // layers caches the current layers, key is the layer ID. + layers map[string]*dockertypes.ImageHistory + // imageToLayerIDs maps image to its layer IDs. + imageToLayerIDs map[string][]string + // Docker remote API client + c DockerInterface +} + +func newImageStatsProvider(c DockerInterface) *imageStatsProvider { + return &imageStatsProvider{ + layers: make(map[string]*dockertypes.ImageHistory), + imageToLayerIDs: make(map[string][]string), + c: c, + } +} + +func (isp *imageStatsProvider) ImageStats() (*runtime.ImageStats, error) { + images, err := isp.c.ListImages(dockertypes.ImageListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to list docker images - %v", err) + } + // Take the lock to protect the cache + isp.Lock() + defer isp.Unlock() + // Create new cache each time, this is a little more memory consuming, but: + // * ImageStats is only called every 10 seconds + // * We use pointers and reference to copy cache elements. + // The memory usage should be acceptable. + // TODO(random-liu): Add more logic to implement in place cache update. + newLayers := make(map[string]*dockertypes.ImageHistory) + newImageToLayerIDs := make(map[string][]string) + for _, image := range images { + layerIDs, ok := isp.imageToLayerIDs[image.ID] + if !ok { + // Get information about the various layers of the given docker image. + history, err := isp.c.ImageHistory(image.ID) + if err != nil { + // Skip the image and inspect again in next ImageStats if the image is still there + glog.V(2).Infof("failed to get history of docker image %+v - %v", image, err) + continue + } + // Cache each layer + for i := range history { + layer := &history[i] + key := layer.ID + // Some of the layers are empty. + // We are hoping that these layers are unique to each image. + // Still keying with the CreatedBy field to be safe. + if key == "" || key == "" { + key = key + layer.CreatedBy + } + layerIDs = append(layerIDs, key) + newLayers[key] = layer + } + } else { + for _, layerID := range layerIDs { + newLayers[layerID] = isp.layers[layerID] + } + } + newImageToLayerIDs[image.ID] = layerIDs + } + ret := &runtime.ImageStats{} + // Calculate the total storage bytes + for _, layer := range newLayers { + ret.TotalStorageBytes += uint64(layer.Size) + } + // Update current cache + isp.layers = newLayers + isp.imageToLayerIDs = newImageToLayerIDs + return ret, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/images_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/images_test.go new file mode 100644 index 000000000000..6ef355629589 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/images_test.go @@ -0,0 +1,334 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "testing" + + dockertypes "github.com/docker/engine-api/types" + "github.com/stretchr/testify/assert" +) + +func TestImageStatsNoImages(t *testing.T) { + fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2") + isp := newImageStatsProvider(fakeDockerClient) + st, err := isp.ImageStats() + as := assert.New(t) + as.NoError(err) + as.NoError(fakeDockerClient.AssertCalls([]string{"list_images"})) + as.Equal(st.TotalStorageBytes, uint64(0)) +} + +func TestImageStatsWithImages(t *testing.T) { + fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2") + fakeHistoryData := map[string][]dockertypes.ImageHistory{ + "busybox": { + { + ID: "0123456", + CreatedBy: "foo", + Size: 100, + }, + { + ID: "0123457", + CreatedBy: "duplicate", + Size: 200, + }, + { + ID: "", + CreatedBy: "baz", + Size: 300, + }, + }, + "kubelet": { + { + ID: "1123456", + CreatedBy: "foo", + Size: 200, + }, + { + ID: "", + CreatedBy: "1baz", + Size: 400, + }, + }, + "busybox-new": { + { + ID: "01234567", + CreatedBy: "foo", + Size: 100, + }, + { + ID: "0123457", + CreatedBy: "duplicate", + Size: 200, + }, + { + ID: "", + CreatedBy: "baz", + Size: 300, + }, + }, + } + fakeDockerClient.InjectImageHistory(fakeHistoryData) + fakeDockerClient.InjectImages([]dockertypes.Image{ + { + ID: "busybox", + }, + { + ID: "kubelet", + }, + { + ID: "busybox-new", + }, + }) + isp := newImageStatsProvider(fakeDockerClient) + st, err := isp.ImageStats() + as := assert.New(t) + as.NoError(err) + as.NoError(fakeDockerClient.AssertCalls([]string{"list_images", "image_history", "image_history", "image_history"})) + const expectedOutput uint64 = 1300 + as.Equal(expectedOutput, st.TotalStorageBytes, "expected %d, got %d", expectedOutput, st.TotalStorageBytes) +} + +func TestImageStatsWithCachedImages(t *testing.T) { + for _, test := range []struct { + oldLayers map[string]*dockertypes.ImageHistory + oldImageToLayerIDs map[string][]string + images []dockertypes.Image + history map[string][]dockertypes.ImageHistory + expectedCalls []string + expectedLayers map[string]*dockertypes.ImageHistory + expectedImageToLayerIDs map[string][]string + expectedTotalStorageSize uint64 + }{ + { + // No cache + oldLayers: make(map[string]*dockertypes.ImageHistory), + oldImageToLayerIDs: make(map[string][]string), + images: []dockertypes.Image{ + { + ID: "busybox", + }, + { + ID: "kubelet", + }, + }, + history: map[string][]dockertypes.ImageHistory{ + "busybox": { + { + ID: "0123456", + CreatedBy: "foo", + Size: 100, + }, + { + ID: "", + CreatedBy: "baz", + Size: 300, + }, + }, + "kubelet": { + { + ID: "1123456", + CreatedBy: "foo", + Size: 200, + }, + { + ID: "", + CreatedBy: "1baz", + Size: 400, + }, + }, + }, + expectedCalls: []string{"list_images", "image_history", "image_history"}, + expectedLayers: map[string]*dockertypes.ImageHistory{ + "0123456": { + ID: "0123456", + CreatedBy: "foo", + Size: 100, + }, + "1123456": { + ID: "1123456", + CreatedBy: "foo", + Size: 200, + }, + "baz": { + ID: "", + CreatedBy: "baz", + Size: 300, + }, + "1baz": { + ID: "", + CreatedBy: "1baz", + Size: 400, + }, + }, + expectedImageToLayerIDs: map[string][]string{ + "busybox": {"0123456", "baz"}, + "kubelet": {"1123456", "1baz"}, + }, + expectedTotalStorageSize: 1000, + }, + { + // Use cache value + oldLayers: map[string]*dockertypes.ImageHistory{ + "0123456": { + ID: "0123456", + CreatedBy: "foo", + Size: 100, + }, + "baz": { + ID: "", + CreatedBy: "baz", + Size: 300, + }, + }, + oldImageToLayerIDs: map[string][]string{ + "busybox": {"0123456", "baz"}, + }, + images: []dockertypes.Image{ + { + ID: "busybox", + }, + { + ID: "kubelet", + }, + }, + history: map[string][]dockertypes.ImageHistory{ + "busybox": { + { + ID: "0123456", + CreatedBy: "foo", + Size: 100, + }, + { + ID: "", + CreatedBy: "baz", + Size: 300, + }, + }, + "kubelet": { + { + ID: "1123456", + CreatedBy: "foo", + Size: 200, + }, + { + ID: "", + CreatedBy: "1baz", + Size: 400, + }, + }, + }, + expectedCalls: []string{"list_images", "image_history"}, + expectedLayers: map[string]*dockertypes.ImageHistory{ + "0123456": { + ID: "0123456", + CreatedBy: "foo", + Size: 100, + }, + "1123456": { + ID: "1123456", + CreatedBy: "foo", + Size: 200, + }, + "baz": { + ID: "", + CreatedBy: "baz", + Size: 300, + }, + "1baz": { + ID: "", + CreatedBy: "1baz", + Size: 400, + }, + }, + expectedImageToLayerIDs: map[string][]string{ + "busybox": {"0123456", "baz"}, + "kubelet": {"1123456", "1baz"}, + }, + expectedTotalStorageSize: 1000, + }, + { + // Unused cache value + oldLayers: map[string]*dockertypes.ImageHistory{ + "0123456": { + ID: "0123456", + CreatedBy: "foo", + Size: 100, + }, + "baz": { + ID: "", + CreatedBy: "baz", + Size: 300, + }, + }, + oldImageToLayerIDs: map[string][]string{ + "busybox": {"0123456", "baz"}, + }, + images: []dockertypes.Image{ + { + ID: "kubelet", + }, + }, + history: map[string][]dockertypes.ImageHistory{ + "kubelet": { + { + ID: "1123456", + CreatedBy: "foo", + Size: 200, + }, + { + ID: "", + CreatedBy: "1baz", + Size: 400, + }, + }, + }, + expectedCalls: []string{"list_images", "image_history"}, + expectedLayers: map[string]*dockertypes.ImageHistory{ + "1123456": { + ID: "1123456", + CreatedBy: "foo", + Size: 200, + }, + "1baz": { + ID: "", + CreatedBy: "1baz", + Size: 400, + }, + }, + expectedImageToLayerIDs: map[string][]string{ + "kubelet": {"1123456", "1baz"}, + }, + expectedTotalStorageSize: 600, + }, + } { + fakeDockerClient := NewFakeDockerClientWithVersion("1.2.3", "1.2") + fakeDockerClient.InjectImages(test.images) + fakeDockerClient.InjectImageHistory(test.history) + isp := newImageStatsProvider(fakeDockerClient) + isp.layers = test.oldLayers + isp.imageToLayerIDs = test.oldImageToLayerIDs + st, err := isp.ImageStats() + as := assert.New(t) + as.NoError(err) + as.NoError(fakeDockerClient.AssertCalls(test.expectedCalls)) + as.Equal(test.expectedLayers, isp.layers, "expected %+v, got %+v", test.expectedLayers, isp.layers) + as.Equal(test.expectedImageToLayerIDs, isp.imageToLayerIDs, "expected %+v, got %+v", test.expectedImageToLayerIDs, isp.imageToLayerIDs) + as.Equal(test.expectedTotalStorageSize, st.TotalStorageBytes, "expected %d, got %d", test.expectedTotalStorageSize, st.TotalStorageBytes) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/instrumented_docker.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/instrumented_docker.go new file mode 100644 index 000000000000..36e3fdd637c1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/instrumented_docker.go @@ -0,0 +1,215 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "time" + + dockertypes "github.com/docker/engine-api/types" + "k8s.io/kubernetes/pkg/kubelet/metrics" +) + +// instrumentedDockerInterface wraps the DockerInterface and records the operations +// and errors metrics. +type instrumentedDockerInterface struct { + client DockerInterface +} + +// Creates an instrumented DockerInterface from an existing DockerInterface. +func newInstrumentedDockerInterface(dockerClient DockerInterface) DockerInterface { + return instrumentedDockerInterface{ + client: dockerClient, + } +} + +// recordOperation records the duration of the operation. +func recordOperation(operation string, start time.Time) { + metrics.DockerOperations.WithLabelValues(operation).Inc() + metrics.DockerOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start)) +} + +// recordError records error for metric if an error occurred. +func recordError(operation string, err error) { + if err != nil { + if _, ok := err.(operationTimeout); ok { + metrics.DockerOperationsTimeout.WithLabelValues(operation).Inc() + } + // Docker operation timeout error is also a docker error, so we don't add else here. + metrics.DockerOperationsErrors.WithLabelValues(operation).Inc() + } +} + +func (in instrumentedDockerInterface) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) { + const operation = "list_containers" + defer recordOperation(operation, time.Now()) + + out, err := in.client.ListContainers(options) + recordError(operation, err) + return out, err +} + +func (in instrumentedDockerInterface) InspectContainer(id string) (*dockertypes.ContainerJSON, error) { + const operation = "inspect_container" + defer recordOperation(operation, time.Now()) + + out, err := in.client.InspectContainer(id) + recordError(operation, err) + return out, err +} + +func (in instrumentedDockerInterface) CreateContainer(opts dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) { + const operation = "create_container" + defer recordOperation(operation, time.Now()) + + out, err := in.client.CreateContainer(opts) + recordError(operation, err) + return out, err +} + +func (in instrumentedDockerInterface) StartContainer(id string) error { + const operation = "start_container" + defer recordOperation(operation, time.Now()) + + err := in.client.StartContainer(id) + recordError(operation, err) + return err +} + +func (in instrumentedDockerInterface) StopContainer(id string, timeout int) error { + const operation = "stop_container" + defer recordOperation(operation, time.Now()) + + err := in.client.StopContainer(id, timeout) + recordError(operation, err) + return err +} + +func (in instrumentedDockerInterface) RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error { + const operation = "remove_container" + defer recordOperation(operation, time.Now()) + + err := in.client.RemoveContainer(id, opts) + recordError(operation, err) + return err +} + +func (in instrumentedDockerInterface) InspectImage(image string) (*dockertypes.ImageInspect, error) { + const operation = "inspect_image" + defer recordOperation(operation, time.Now()) + + out, err := in.client.InspectImage(image) + recordError(operation, err) + return out, err +} + +func (in instrumentedDockerInterface) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) { + const operation = "list_images" + defer recordOperation(operation, time.Now()) + + out, err := in.client.ListImages(opts) + recordError(operation, err) + return out, err +} + +func (in instrumentedDockerInterface) PullImage(imageID string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error { + const operation = "pull_image" + defer recordOperation(operation, time.Now()) + err := in.client.PullImage(imageID, auth, opts) + recordError(operation, err) + return err +} + +func (in instrumentedDockerInterface) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) { + const operation = "remove_image" + defer recordOperation(operation, time.Now()) + + imageDelete, err := in.client.RemoveImage(image, opts) + recordError(operation, err) + return imageDelete, err +} + +func (in instrumentedDockerInterface) Logs(id string, opts dockertypes.ContainerLogsOptions, sopts StreamOptions) error { + const operation = "logs" + defer recordOperation(operation, time.Now()) + + err := in.client.Logs(id, opts, sopts) + recordError(operation, err) + return err +} + +func (in instrumentedDockerInterface) Version() (*dockertypes.Version, error) { + const operation = "version" + defer recordOperation(operation, time.Now()) + + out, err := in.client.Version() + recordError(operation, err) + return out, err +} + +func (in instrumentedDockerInterface) Info() (*dockertypes.Info, error) { + const operation = "info" + defer recordOperation(operation, time.Now()) + + out, err := in.client.Info() + recordError(operation, err) + return out, err +} + +func (in instrumentedDockerInterface) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.ContainerExecCreateResponse, error) { + const operation = "create_exec" + defer recordOperation(operation, time.Now()) + + out, err := in.client.CreateExec(id, opts) + recordError(operation, err) + return out, err +} + +func (in instrumentedDockerInterface) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error { + const operation = "start_exec" + defer recordOperation(operation, time.Now()) + + err := in.client.StartExec(startExec, opts, sopts) + recordError(operation, err) + return err +} + +func (in instrumentedDockerInterface) InspectExec(id string) (*dockertypes.ContainerExecInspect, error) { + const operation = "inspect_exec" + defer recordOperation(operation, time.Now()) + + out, err := in.client.InspectExec(id) + recordError(operation, err) + return out, err +} + +func (in instrumentedDockerInterface) AttachToContainer(id string, opts dockertypes.ContainerAttachOptions, sopts StreamOptions) error { + const operation = "attach" + defer recordOperation(operation, time.Now()) + + err := in.client.AttachToContainer(id, opts, sopts) + recordError(operation, err) + return err +} + +func (in instrumentedDockerInterface) ImageHistory(id string) ([]dockertypes.ImageHistory, error) { + const operation = "image_history" + defer recordOperation(operation, time.Now()) + + out, err := in.client.ImageHistory(id) + recordError(operation, err) + return out, err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/kube_docker_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/kube_docker_client.go new file mode 100644 index 000000000000..8a0905269a6c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/kube_docker_client.go @@ -0,0 +1,517 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/golang/glog" + + dockermessage "github.com/docker/docker/pkg/jsonmessage" + dockerstdcopy "github.com/docker/docker/pkg/stdcopy" + dockerapi "github.com/docker/engine-api/client" + dockertypes "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// kubeDockerClient is a wrapped layer of docker client for kubelet internal use. This layer is added to: +// 1) Redirect stream for exec and attach operations. +// 2) Wrap the context in this layer to make the DockerInterface cleaner. +// 3) Stabilize the DockerInterface. The engine-api is still under active development, the interface +// is not stabilized yet. However, the DockerInterface is used in many files in Kubernetes, we may +// not want to change the interface frequently. With this layer, we can port the engine api to the +// DockerInterface to avoid changing DockerInterface as much as possible. +// (See +// * https://github.com/docker/engine-api/issues/89 +// * https://github.com/docker/engine-api/issues/137 +// * https://github.com/docker/engine-api/pull/140) +// TODO(random-liu): Swith to new docker interface by refactoring the functions in the old DockerInterface +// one by one. +type kubeDockerClient struct { + client *dockerapi.Client +} + +// Make sure that kubeDockerClient implemented the DockerInterface. +var _ DockerInterface = &kubeDockerClient{} + +const ( + // defaultTimeout is the default timeout of all docker operations. + defaultTimeout = 2 * time.Minute + + // defaultShmSize is the default ShmSize to use (in bytes) if not specified. + defaultShmSize = int64(1024 * 1024 * 64) + + // defaultImagePullingProgressReportInterval is the default interval of image pulling progress reporting. + defaultImagePullingProgressReportInterval = 10 * time.Second +) + +// newKubeDockerClient creates an kubeDockerClient from an existing docker client. +func newKubeDockerClient(dockerClient *dockerapi.Client) DockerInterface { + return &kubeDockerClient{ + client: dockerClient, + } +} + +func (k *kubeDockerClient) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) { + ctx, cancel := getDefaultContext() + defer cancel() + containers, err := k.client.ContainerList(ctx, options) + if ctxErr := contextError(ctx); ctxErr != nil { + return nil, ctxErr + } + if err != nil { + return nil, err + } + return containers, nil +} + +func (d *kubeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJSON, error) { + ctx, cancel := getDefaultContext() + defer cancel() + containerJSON, err := d.client.ContainerInspect(ctx, id) + if ctxErr := contextError(ctx); ctxErr != nil { + return nil, ctxErr + } + if err != nil { + if dockerapi.IsErrContainerNotFound(err) { + return nil, containerNotFoundError{ID: id} + } + return nil, err + } + return &containerJSON, nil +} + +func (d *kubeDockerClient) CreateContainer(opts dockertypes.ContainerCreateConfig) (*dockertypes.ContainerCreateResponse, error) { + ctx, cancel := getDefaultContext() + defer cancel() + // we provide an explicit default shm size as to not depend on docker daemon. + // TODO: evaluate exposing this as a knob in the API + if opts.HostConfig != nil && opts.HostConfig.ShmSize <= 0 { + opts.HostConfig.ShmSize = defaultShmSize + } + createResp, err := d.client.ContainerCreate(ctx, opts.Config, opts.HostConfig, opts.NetworkingConfig, opts.Name) + if ctxErr := contextError(ctx); ctxErr != nil { + return nil, ctxErr + } + if err != nil { + return nil, err + } + return &createResp, nil +} + +func (d *kubeDockerClient) StartContainer(id string) error { + ctx, cancel := getDefaultContext() + defer cancel() + err := d.client.ContainerStart(ctx, id) + if ctxErr := contextError(ctx); ctxErr != nil { + return ctxErr + } + return err +} + +// Stopping an already stopped container will not cause an error in engine-api. +func (d *kubeDockerClient) StopContainer(id string, timeout int) error { + ctx, cancel := getDefaultContext() + defer cancel() + err := d.client.ContainerStop(ctx, id, timeout) + if ctxErr := contextError(ctx); ctxErr != nil { + return ctxErr + } + return err +} + +func (d *kubeDockerClient) RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error { + ctx, cancel := getDefaultContext() + defer cancel() + err := d.client.ContainerRemove(ctx, id, opts) + if ctxErr := contextError(ctx); ctxErr != nil { + return ctxErr + } + return err +} + +func (d *kubeDockerClient) InspectImage(image string) (*dockertypes.ImageInspect, error) { + ctx, cancel := getDefaultContext() + defer cancel() + resp, _, err := d.client.ImageInspectWithRaw(ctx, image, true) + if ctxErr := contextError(ctx); ctxErr != nil { + return nil, ctxErr + } + if err != nil { + if dockerapi.IsErrImageNotFound(err) { + err = imageNotFoundError{ID: image} + } + return nil, err + } + return &resp, nil +} + +func (d *kubeDockerClient) ImageHistory(id string) ([]dockertypes.ImageHistory, error) { + ctx, cancel := getDefaultContext() + defer cancel() + resp, err := d.client.ImageHistory(ctx, id) + if ctxErr := contextError(ctx); ctxErr != nil { + return nil, ctxErr + } + return resp, err +} + +func (d *kubeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) { + ctx, cancel := getDefaultContext() + defer cancel() + images, err := d.client.ImageList(ctx, opts) + if ctxErr := contextError(ctx); ctxErr != nil { + return nil, ctxErr + } + if err != nil { + return nil, err + } + return images, nil +} + +func base64EncodeAuth(auth dockertypes.AuthConfig) (string, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(auth); err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf.Bytes()), nil +} + +// progress is a wrapper of dockermessage.JSONMessage with a lock protecting it. +type progress struct { + sync.RWMutex + // message stores the latest docker json message. + message *dockermessage.JSONMessage +} + +func (p *progress) set(msg *dockermessage.JSONMessage) { + p.Lock() + defer p.Unlock() + p.message = msg +} + +func (p *progress) get() string { + p.RLock() + defer p.RUnlock() + if p.message == nil { + return "No progress" + } + var prefix string + if p.message.ID != "" { + prefix = fmt.Sprintf("%s: ", p.message.ID) + } + if p.message.Progress == nil { + return fmt.Sprintf("%s%s", prefix, p.message.Status) + } + return fmt.Sprintf("%s%s %s", prefix, p.message.Status, p.message.Progress.String()) +} + +// progressReporter keeps the newest image pulling progress and periodically report the newest progress. +type progressReporter struct { + progress + image string + interval time.Duration + stopCh chan struct{} +} + +// newProgressReporter creates a new progressReporter for specific image with specified reporting interval +func newProgressReporter(image string, interval time.Duration) *progressReporter { + return &progressReporter{image: image, interval: interval, stopCh: make(chan struct{})} +} + +// start starts the progressReporter +func (p *progressReporter) start() { + go func() { + ticker := time.NewTicker(p.interval) + defer ticker.Stop() + for { + // TODO(random-liu): Report as events. + select { + case <-ticker.C: + glog.V(2).Infof("Pulling image %q: %q", p.image, p.progress.get()) + case <-p.stopCh: + glog.V(2).Infof("Stop pulling image %q: %q", p.image, p.progress.get()) + return + } + } + }() +} + +// stop stops the progressReporter +func (p *progressReporter) stop() { + close(p.stopCh) +} + +func (d *kubeDockerClient) PullImage(image string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error { + // RegistryAuth is the base64 encoded credentials for the registry + base64Auth, err := base64EncodeAuth(auth) + if err != nil { + return err + } + opts.RegistryAuth = base64Auth + // Don't set timeout for the context because image pulling can be + // take an arbitrarily long time. + resp, err := d.client.ImagePull(context.Background(), image, opts) + if err != nil { + return err + } + defer resp.Close() + reporter := newProgressReporter(image, defaultImagePullingProgressReportInterval) + reporter.start() + defer reporter.stop() + decoder := json.NewDecoder(resp) + for { + var msg dockermessage.JSONMessage + err := decoder.Decode(&msg) + if err == io.EOF { + break + } + if err != nil { + return err + } + if msg.Error != nil { + return msg.Error + } + reporter.set(&msg) + } + return nil +} + +func (d *kubeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) { + ctx, cancel := getDefaultContext() + defer cancel() + resp, err := d.client.ImageRemove(ctx, image, opts) + if ctxErr := contextError(ctx); ctxErr != nil { + return nil, ctxErr + } + return resp, err +} + +func (d *kubeDockerClient) Logs(id string, opts dockertypes.ContainerLogsOptions, sopts StreamOptions) error { + ctx, cancel := getDefaultContext() + defer cancel() + resp, err := d.client.ContainerLogs(ctx, id, opts) + if ctxErr := contextError(ctx); ctxErr != nil { + return ctxErr + } + if err != nil { + return err + } + defer resp.Close() + return d.redirectResponseToOutputStream(sopts.RawTerminal, sopts.OutputStream, sopts.ErrorStream, resp) +} + +func (d *kubeDockerClient) Version() (*dockertypes.Version, error) { + ctx, cancel := getDefaultContext() + defer cancel() + resp, err := d.client.ServerVersion(ctx) + if ctxErr := contextError(ctx); ctxErr != nil { + return nil, ctxErr + } + if err != nil { + return nil, err + } + return &resp, nil +} + +func (d *kubeDockerClient) Info() (*dockertypes.Info, error) { + ctx, cancel := getDefaultContext() + defer cancel() + resp, err := d.client.Info(ctx) + if ctxErr := contextError(ctx); ctxErr != nil { + return nil, ctxErr + } + if err != nil { + return nil, err + } + return &resp, nil +} + +// TODO(random-liu): Add unit test for exec and attach functions, just like what go-dockerclient did. +func (d *kubeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.ContainerExecCreateResponse, error) { + ctx, cancel := getDefaultContext() + defer cancel() + resp, err := d.client.ContainerExecCreate(ctx, id, opts) + if ctxErr := contextError(ctx); ctxErr != nil { + return nil, ctxErr + } + if err != nil { + return nil, err + } + return &resp, nil +} + +func (d *kubeDockerClient) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error { + ctx, cancel := getDefaultContext() + defer cancel() + if opts.Detach { + err := d.client.ContainerExecStart(ctx, startExec, opts) + if ctxErr := contextError(ctx); ctxErr != nil { + return ctxErr + } + return err + } + resp, err := d.client.ContainerExecAttach(ctx, startExec, dockertypes.ExecConfig{ + Detach: opts.Detach, + Tty: opts.Tty, + }) + if ctxErr := contextError(ctx); ctxErr != nil { + return ctxErr + } + if err != nil { + return err + } + defer resp.Close() + return d.holdHijackedConnection(sopts.RawTerminal || opts.Tty, sopts.InputStream, sopts.OutputStream, sopts.ErrorStream, resp) +} + +func (d *kubeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecInspect, error) { + ctx, cancel := getDefaultContext() + defer cancel() + resp, err := d.client.ContainerExecInspect(ctx, id) + if ctxErr := contextError(ctx); ctxErr != nil { + return nil, ctxErr + } + if err != nil { + return nil, err + } + return &resp, nil +} + +func (d *kubeDockerClient) AttachToContainer(id string, opts dockertypes.ContainerAttachOptions, sopts StreamOptions) error { + ctx, cancel := getDefaultContext() + defer cancel() + resp, err := d.client.ContainerAttach(ctx, id, opts) + if ctxErr := contextError(ctx); ctxErr != nil { + return ctxErr + } + if err != nil { + return err + } + defer resp.Close() + return d.holdHijackedConnection(sopts.RawTerminal, sopts.InputStream, sopts.OutputStream, sopts.ErrorStream, resp) +} + +// redirectResponseToOutputStream redirect the response stream to stdout and stderr. When tty is true, all stream will +// only be redirected to stdout. +func (d *kubeDockerClient) redirectResponseToOutputStream(tty bool, outputStream, errorStream io.Writer, resp io.Reader) error { + if outputStream == nil { + outputStream = ioutil.Discard + } + if errorStream == nil { + errorStream = ioutil.Discard + } + var err error + if tty { + _, err = io.Copy(outputStream, resp) + } else { + _, err = dockerstdcopy.StdCopy(outputStream, errorStream, resp) + } + return err +} + +// holdHijackedConnection hold the HijackedResponse, redirect the inputStream to the connection, and redirect the response +// stream to stdout and stderr. NOTE: If needed, we could also add context in this function. +func (d *kubeDockerClient) holdHijackedConnection(tty bool, inputStream io.Reader, outputStream, errorStream io.Writer, resp dockertypes.HijackedResponse) error { + receiveStdout := make(chan error) + if outputStream != nil || errorStream != nil { + go func() { + receiveStdout <- d.redirectResponseToOutputStream(tty, outputStream, errorStream, resp.Reader) + }() + } + + stdinDone := make(chan struct{}) + go func() { + if inputStream != nil { + io.Copy(resp.Conn, inputStream) + } + resp.CloseWrite() + close(stdinDone) + }() + + select { + case err := <-receiveStdout: + return err + case <-stdinDone: + if outputStream != nil || errorStream != nil { + return <-receiveStdout + } + } + return nil +} + +// parseDockerTimestamp parses the timestamp returned by DockerInterface from string to time.Time +func parseDockerTimestamp(s string) (time.Time, error) { + // Timestamp returned by Docker is in time.RFC3339Nano format. + return time.Parse(time.RFC3339Nano, s) +} + +func getDefaultContext() (context.Context, context.CancelFunc) { + return context.WithTimeout(context.Background(), defaultTimeout) +} + +// contextError checks the context, and returns error if the context is timeout. +func contextError(ctx context.Context) error { + if ctx.Err() == context.DeadlineExceeded { + return operationTimeout{err: ctx.Err()} + } + return ctx.Err() +} + +// StreamOptions are the options used to configure the stream redirection +type StreamOptions struct { + RawTerminal bool + InputStream io.Reader + OutputStream io.Writer + ErrorStream io.Writer +} + +// operationTimeout is the error returned when the docker operations are timeout. +type operationTimeout struct { + err error +} + +func (e operationTimeout) Error() string { + return fmt.Sprintf("operation timeout: %v", e.err) +} + +// containerNotFoundError is the error returned by InspectContainer when container not found. We +// add this error type for testability. We don't use the original error returned by engine-api +// because dockertypes.containerNotFoundError is private, we can't create and inject it in our test. +type containerNotFoundError struct { + ID string +} + +func (e containerNotFoundError) Error() string { + return fmt.Sprintf("no such container: %q", e.ID) +} + +// imageNotFoundError is the error returned by InspectImage when image not found. +type imageNotFoundError struct { + ID string +} + +func (e imageNotFoundError) Error() string { + return fmt.Sprintf("no such image: %q", e.ID) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/labels.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/labels.go new file mode 100644 index 000000000000..9d2bd68b50dd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/labels.go @@ -0,0 +1,229 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "encoding/json" + "strconv" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/custommetrics" + "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/runtime" + kubetypes "k8s.io/kubernetes/pkg/types" +) + +// This file contains all docker label related constants and functions, including: +// * label setters and getters +// * label filters (maybe in the future) + +const ( + kubernetesPodDeletionGracePeriodLabel = "io.kubernetes.pod.deletionGracePeriod" + kubernetesPodTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod" + + kubernetesContainerHashLabel = "io.kubernetes.container.hash" + kubernetesContainerRestartCountLabel = "io.kubernetes.container.restartCount" + kubernetesContainerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath" + kubernetesContainerPreStopHandlerLabel = "io.kubernetes.container.preStopHandler" + + // TODO(random-liu): Keep this for old containers, remove this when we drop support for v1.1. + kubernetesPodLabel = "io.kubernetes.pod.data" + + cadvisorPrometheusMetricsLabel = "io.cadvisor.metric.prometheus" +) + +// Container information which has been labelled on each docker container +// TODO(random-liu): The type of Hash should be compliance with kubelet container status. +type labelledContainerInfo struct { + PodName string + PodNamespace string + PodUID kubetypes.UID + PodDeletionGracePeriod *int64 + PodTerminationGracePeriod *int64 + Name string + Hash string + RestartCount int + TerminationMessagePath string + PreStopHandler *api.Handler +} + +func newLabels(container *api.Container, pod *api.Pod, restartCount int, enableCustomMetrics bool) map[string]string { + labels := map[string]string{} + labels[types.KubernetesPodNameLabel] = pod.Name + labels[types.KubernetesPodNamespaceLabel] = pod.Namespace + labels[types.KubernetesPodUIDLabel] = string(pod.UID) + if pod.DeletionGracePeriodSeconds != nil { + labels[kubernetesPodDeletionGracePeriodLabel] = strconv.FormatInt(*pod.DeletionGracePeriodSeconds, 10) + } + if pod.Spec.TerminationGracePeriodSeconds != nil { + labels[kubernetesPodTerminationGracePeriodLabel] = strconv.FormatInt(*pod.Spec.TerminationGracePeriodSeconds, 10) + } + + labels[types.KubernetesContainerNameLabel] = container.Name + labels[kubernetesContainerHashLabel] = strconv.FormatUint(kubecontainer.HashContainer(container), 16) + labels[kubernetesContainerRestartCountLabel] = strconv.Itoa(restartCount) + labels[kubernetesContainerTerminationMessagePathLabel] = container.TerminationMessagePath + if container.Lifecycle != nil && container.Lifecycle.PreStop != nil { + // Using json enconding so that the PreStop handler object is readable after writing as a label + rawPreStop, err := json.Marshal(container.Lifecycle.PreStop) + if err != nil { + glog.Errorf("Unable to marshal lifecycle PreStop handler for container %q of pod %q: %v", container.Name, format.Pod(pod), err) + } else { + labels[kubernetesContainerPreStopHandlerLabel] = string(rawPreStop) + } + } + + if enableCustomMetrics { + path, err := custommetrics.GetCAdvisorCustomMetricsDefinitionPath(container) + if path != nil && err == nil { + labels[cadvisorPrometheusMetricsLabel] = *path + } + } + + return labels +} + +func getContainerInfoFromLabel(labels map[string]string) *labelledContainerInfo { + var err error + containerInfo := &labelledContainerInfo{ + PodName: getStringValueFromLabel(labels, types.KubernetesPodNameLabel), + PodNamespace: getStringValueFromLabel(labels, types.KubernetesPodNamespaceLabel), + PodUID: kubetypes.UID(getStringValueFromLabel(labels, types.KubernetesPodUIDLabel)), + Name: getStringValueFromLabel(labels, types.KubernetesContainerNameLabel), + Hash: getStringValueFromLabel(labels, kubernetesContainerHashLabel), + TerminationMessagePath: getStringValueFromLabel(labels, kubernetesContainerTerminationMessagePathLabel), + } + if containerInfo.RestartCount, err = getIntValueFromLabel(labels, kubernetesContainerRestartCountLabel); err != nil { + logError(containerInfo, kubernetesContainerRestartCountLabel, err) + } + if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodDeletionGracePeriodLabel); err != nil { + logError(containerInfo, kubernetesPodDeletionGracePeriodLabel, err) + } + if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodTerminationGracePeriodLabel); err != nil { + logError(containerInfo, kubernetesPodTerminationGracePeriodLabel, err) + } + preStopHandler := &api.Handler{} + if found, err := getJsonObjectFromLabel(labels, kubernetesContainerPreStopHandlerLabel, preStopHandler); err != nil { + logError(containerInfo, kubernetesContainerPreStopHandlerLabel, err) + } else if found { + containerInfo.PreStopHandler = preStopHandler + } + supplyContainerInfoWithOldLabel(labels, containerInfo) + return containerInfo +} + +func getStringValueFromLabel(labels map[string]string, label string) string { + if value, found := labels[label]; found { + return value + } + // Do not report error, because there should be many old containers without label now. + glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) + // Return empty string "" for these containers, the caller will get value by other ways. + return "" +} + +func getIntValueFromLabel(labels map[string]string, label string) (int, error) { + if strValue, found := labels[label]; found { + intValue, err := strconv.Atoi(strValue) + if err != nil { + // This really should not happen. Just set value to 0 to handle this abnormal case + return 0, err + } + return intValue, nil + } + // Do not report error, because there should be many old containers without label now. + glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) + // Just set the value to 0 + return 0, nil +} + +func getInt64PointerFromLabel(labels map[string]string, label string) (*int64, error) { + if strValue, found := labels[label]; found { + int64Value, err := strconv.ParseInt(strValue, 10, 64) + if err != nil { + return nil, err + } + return &int64Value, nil + } + // Because it's normal that a container has no PodDeletionGracePeriod and PodTerminationGracePeriod label, + // don't report any error here. + return nil, nil +} + +// getJsonObjectFromLabel returns a bool value indicating whether an object is found +func getJsonObjectFromLabel(labels map[string]string, label string, value interface{}) (bool, error) { + if strValue, found := labels[label]; found { + err := json.Unmarshal([]byte(strValue), value) + return found, err + } + // Because it's normal that a container has no PreStopHandler label, don't report any error here. + return false, nil +} + +// The label kubernetesPodLabel is added a long time ago (#7421), it serialized the whole api.Pod to a docker label. +// We want to remove this label because it serialized too much useless information. However kubelet may still work +// with old containers which only have this label for a long time until we completely deprecate the old label. +// Before that to ensure correctness we have to supply information with the old labels when newly added labels +// are not available. +// TODO(random-liu): Remove this function when we can completely remove label kubernetesPodLabel, probably after +// dropping support for v1.1. +func supplyContainerInfoWithOldLabel(labels map[string]string, containerInfo *labelledContainerInfo) { + // Get api.Pod from old label + var pod *api.Pod + data, found := labels[kubernetesPodLabel] + if !found { + // Don't report any error here, because it's normal that a container has no pod label, especially + // when we gradually deprecate the old label + return + } + pod = &api.Pod{} + if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), []byte(data), pod); err != nil { + // If the pod label can't be parsed, we should report an error + logError(containerInfo, kubernetesPodLabel, err) + return + } + if containerInfo.PodDeletionGracePeriod == nil { + containerInfo.PodDeletionGracePeriod = pod.DeletionGracePeriodSeconds + } + if containerInfo.PodTerminationGracePeriod == nil { + containerInfo.PodTerminationGracePeriod = pod.Spec.TerminationGracePeriodSeconds + } + + // Get api.Container from api.Pod + var container *api.Container + for i := range pod.Spec.Containers { + if pod.Spec.Containers[i].Name == containerInfo.Name { + container = &pod.Spec.Containers[i] + break + } + } + if container == nil { + glog.Errorf("Unable to find container %q in pod %q", containerInfo.Name, format.Pod(pod)) + return + } + if containerInfo.PreStopHandler == nil && container.Lifecycle != nil { + containerInfo.PreStopHandler = container.Lifecycle.PreStop + } +} + +func logError(containerInfo *labelledContainerInfo, label string, err error) { + glog.Errorf("Unable to get %q for container %q of pod %q: %v", label, containerInfo.Name, + kubecontainer.BuildPodFullName(containerInfo.PodName, containerInfo.PodNamespace), err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/labels_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/labels_test.go new file mode 100644 index 000000000000..48eaa8059c56 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/labels_test.go @@ -0,0 +1,125 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "reflect" + "strconv" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/intstr" +) + +func TestLabels(t *testing.T) { + restartCount := 5 + deletionGracePeriod := int64(10) + terminationGracePeriod := int64(10) + lifecycle := &api.Lifecycle{ + // Left PostStart as nil + PreStop: &api.Handler{ + Exec: &api.ExecAction{ + Command: []string{"action1", "action2"}, + }, + HTTPGet: &api.HTTPGetAction{ + Path: "path", + Host: "host", + Port: intstr.FromInt(8080), + Scheme: "scheme", + }, + TCPSocket: &api.TCPSocketAction{ + Port: intstr.FromString("80"), + }, + }, + } + container := &api.Container{ + Name: "test_container", + TerminationMessagePath: "/somepath", + Lifecycle: lifecycle, + } + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "test_pod", + Namespace: "test_pod_namespace", + UID: "test_pod_uid", + DeletionGracePeriodSeconds: &deletionGracePeriod, + }, + Spec: api.PodSpec{ + Containers: []api.Container{*container}, + TerminationGracePeriodSeconds: &terminationGracePeriod, + }, + } + expected := &labelledContainerInfo{ + PodName: pod.Name, + PodNamespace: pod.Namespace, + PodUID: pod.UID, + PodDeletionGracePeriod: pod.DeletionGracePeriodSeconds, + PodTerminationGracePeriod: pod.Spec.TerminationGracePeriodSeconds, + Name: container.Name, + Hash: strconv.FormatUint(kubecontainer.HashContainer(container), 16), + RestartCount: restartCount, + TerminationMessagePath: container.TerminationMessagePath, + PreStopHandler: container.Lifecycle.PreStop, + } + + // Test whether we can get right information from label + labels := newLabels(container, pod, restartCount, false) + containerInfo := getContainerInfoFromLabel(labels) + if !reflect.DeepEqual(containerInfo, expected) { + t.Errorf("expected %v, got %v", expected, containerInfo) + } + + // Test when DeletionGracePeriodSeconds, TerminationGracePeriodSeconds and Lifecycle are nil, + // the information got from label should also be nil + container.Lifecycle = nil + pod.DeletionGracePeriodSeconds = nil + pod.Spec.TerminationGracePeriodSeconds = nil + expected.PodDeletionGracePeriod = nil + expected.PodTerminationGracePeriod = nil + expected.PreStopHandler = nil + // Because container is changed, the Hash should be updated + expected.Hash = strconv.FormatUint(kubecontainer.HashContainer(container), 16) + labels = newLabels(container, pod, restartCount, false) + containerInfo = getContainerInfoFromLabel(labels) + if !reflect.DeepEqual(containerInfo, expected) { + t.Errorf("expected %v, got %v", expected, containerInfo) + } + + // Test when DeletionGracePeriodSeconds, TerminationGracePeriodSeconds and Lifecycle are nil, + // but the old label kubernetesPodLabel is set, the information got from label should also be set + pod.DeletionGracePeriodSeconds = &deletionGracePeriod + pod.Spec.TerminationGracePeriodSeconds = &terminationGracePeriod + container.Lifecycle = lifecycle + data, err := runtime.Encode(testapi.Default.Codec(), pod) + if err != nil { + t.Fatalf("Failed to encode pod %q into string: %v", format.Pod(pod), err) + } + labels[kubernetesPodLabel] = string(data) + expected.PodDeletionGracePeriod = pod.DeletionGracePeriodSeconds + expected.PodTerminationGracePeriod = pod.Spec.TerminationGracePeriodSeconds + expected.PreStopHandler = container.Lifecycle.PreStop + // Do not update expected.Hash here, because we directly use the labels in last test, so we never + // changed the kubernetesContainerHashLabel in this test, the expected.Hash shouldn't be changed. + containerInfo = getContainerInfoFromLabel(labels) + if !reflect.DeepEqual(containerInfo, expected) { + t.Errorf("expected %v, got %v", expected, containerInfo) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/manager.go new file mode 100644 index 000000000000..282afd87a403 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/manager.go @@ -0,0 +1,2439 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + dockertypes "github.com/docker/engine-api/types" + dockercontainer "github.com/docker/engine-api/types/container" + dockerstrslice "github.com/docker/engine-api/types/strslice" + dockerversion "github.com/docker/engine-api/types/versions" + dockernat "github.com/docker/go-connections/nat" + "github.com/golang/glog" + cadvisorapi "github.com/google/cadvisor/info/v1" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/record" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/pkg/kubelet/metrics" + "k8s.io/kubernetes/pkg/kubelet/network" + "k8s.io/kubernetes/pkg/kubelet/network/hairpin" + proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" + "k8s.io/kubernetes/pkg/kubelet/qos" + "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util/cache" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/securitycontext" + kubetypes "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/util/oom" + "k8s.io/kubernetes/pkg/util/procfs" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/sets" + utilstrings "k8s.io/kubernetes/pkg/util/strings" +) + +const ( + DockerType = "docker" + + minimumDockerAPIVersion = "1.20" + + dockerv110APIVersion = "1.21" + + // ndots specifies the minimum number of dots that a domain name must contain for the resolver to consider it as FQDN (fully-qualified) + // we want to able to consider SRV lookup names like _dns._udp.kube-dns.default.svc to be considered relative. + // hence, setting ndots to be 5. + ndotsDNSOption = "options ndots:5\n" + // In order to avoid unnecessary SIGKILLs, give every container a minimum grace + // period after SIGTERM. Docker will guarantee the termination, but SIGTERM is + // potentially dangerous. + // TODO: evaluate whether there are scenarios in which SIGKILL is preferable to + // SIGTERM for certain process types, which may justify setting this to 0. + minimumGracePeriodInSeconds = 2 + + DockerNetnsFmt = "/proc/%v/ns/net" + + // String used to detect docker host mode for various namespaces (e.g. + // networking). Must match the value returned by docker inspect -f + // '{{.HostConfig.NetworkMode}}'. + namespaceModeHost = "host" + + // Remote API version for docker daemon version v1.10 + // https://docs.docker.com/engine/reference/api/docker_remote_api/ + dockerV110APIVersion = "1.22" + + // The expiration time of version cache. + versionCacheTTL = 60 * time.Second +) + +var ( + // DockerManager implements the Runtime interface. + _ kubecontainer.Runtime = &DockerManager{} + + // TODO: make this a TTL based pull (if image older than X policy, pull) + podInfraContainerImagePullPolicy = api.PullIfNotPresent + + // Default set of security options. + defaultSecurityOpt = []string{"seccomp:unconfined"} +) + +type DockerManager struct { + client DockerInterface + recorder record.EventRecorder + containerRefManager *kubecontainer.RefManager + os kubecontainer.OSInterface + machineInfo *cadvisorapi.MachineInfo + + // The image name of the pod infra container. + podInfraContainerImage string + // (Optional) Additional environment variables to be set for the pod infra container. + podInfraContainerEnv []api.EnvVar + + // TODO(yifan): Record the pull failure so we can eliminate the image checking? + // Lower level docker image puller. + dockerPuller DockerPuller + + // wrapped image puller. + imagePuller kubecontainer.ImagePuller + + // Root of the Docker runtime. + dockerRoot string + + // Directory of container logs. + containerLogsDir string + + // Network plugin. + networkPlugin network.NetworkPlugin + + // Health check results. + livenessManager proberesults.Manager + + // RuntimeHelper that wraps kubelet to generate runtime container options. + runtimeHelper kubecontainer.RuntimeHelper + + // Runner of lifecycle events. + runner kubecontainer.HandlerRunner + + // Handler used to execute commands in containers. + execHandler ExecHandler + + // Used to set OOM scores of processes. + oomAdjuster *oom.OOMAdjuster + + // Get information from /proc mount. + procFs procfs.ProcFSInterface + + // If true, enforce container cpu limits with CFS quota support + cpuCFSQuota bool + + // Container GC manager + containerGC *containerGC + + // Support for gathering custom metrics. + enableCustomMetrics bool + + // If true, the "hairpin mode" flag is set on container interfaces. + // A false value means the kubelet just backs off from setting it, + // it might already be true. + configureHairpinMode bool + + // Provides image stats + *imageStatsProvider + + // The version cache of docker daemon. + versionCache *cache.ObjectCache + + // Directory to host local seccomp profiles. + seccompProfileRoot string +} + +// A subset of the pod.Manager interface extracted for testing purposes. +type podGetter interface { + GetPodByUID(kubetypes.UID) (*api.Pod, bool) +} + +func PodInfraContainerEnv(env map[string]string) kubecontainer.Option { + return func(rt kubecontainer.Runtime) { + dm := rt.(*DockerManager) + for k, v := range env { + dm.podInfraContainerEnv = append(dm.podInfraContainerEnv, api.EnvVar{ + Name: k, + Value: v, + }) + } + } +} + +func NewDockerManager( + client DockerInterface, + recorder record.EventRecorder, + livenessManager proberesults.Manager, + containerRefManager *kubecontainer.RefManager, + podGetter podGetter, + machineInfo *cadvisorapi.MachineInfo, + podInfraContainerImage string, + qps float32, + burst int, + containerLogsDir string, + osInterface kubecontainer.OSInterface, + networkPlugin network.NetworkPlugin, + runtimeHelper kubecontainer.RuntimeHelper, + httpClient types.HttpGetter, + execHandler ExecHandler, + oomAdjuster *oom.OOMAdjuster, + procFs procfs.ProcFSInterface, + cpuCFSQuota bool, + imageBackOff *flowcontrol.Backoff, + serializeImagePulls bool, + enableCustomMetrics bool, + hairpinMode bool, + seccompProfileRoot string, + options ...kubecontainer.Option) *DockerManager { + // Wrap the docker client with instrumentedDockerInterface + client = newInstrumentedDockerInterface(client) + + // Work out the location of the Docker runtime, defaulting to /var/lib/docker + // if there are any problems. + dockerRoot := "/var/lib/docker" + dockerInfo, err := client.Info() + if err != nil { + glog.Errorf("Failed to execute Info() call to the Docker client: %v", err) + glog.Warningf("Using fallback default of /var/lib/docker for location of Docker runtime") + } else { + dockerRoot = dockerInfo.DockerRootDir + glog.Infof("Setting dockerRoot to %s", dockerRoot) + } + + dm := &DockerManager{ + client: client, + recorder: recorder, + containerRefManager: containerRefManager, + os: osInterface, + machineInfo: machineInfo, + podInfraContainerImage: podInfraContainerImage, + dockerPuller: newDockerPuller(client, qps, burst), + dockerRoot: dockerRoot, + containerLogsDir: containerLogsDir, + networkPlugin: networkPlugin, + livenessManager: livenessManager, + runtimeHelper: runtimeHelper, + execHandler: execHandler, + oomAdjuster: oomAdjuster, + procFs: procFs, + cpuCFSQuota: cpuCFSQuota, + enableCustomMetrics: enableCustomMetrics, + configureHairpinMode: hairpinMode, + imageStatsProvider: newImageStatsProvider(client), + seccompProfileRoot: seccompProfileRoot, + } + dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm) + if serializeImagePulls { + dm.imagePuller = kubecontainer.NewSerializedImagePuller(kubecontainer.FilterEventRecorder(recorder), dm, imageBackOff) + } else { + dm.imagePuller = kubecontainer.NewImagePuller(kubecontainer.FilterEventRecorder(recorder), dm, imageBackOff) + } + dm.containerGC = NewContainerGC(client, podGetter, containerLogsDir) + + dm.versionCache = cache.NewObjectCache( + func() (interface{}, error) { + return dm.getVersionInfo() + }, + versionCacheTTL, + ) + + // apply optional settings.. + for _, optf := range options { + optf(dm) + } + + return dm +} + +// GetContainerLogs returns logs of a specific container. By +// default, it returns a snapshot of the container log. Set 'follow' to true to +// stream the log. Set 'follow' to false and specify the number of lines (e.g. +// "100" or "all") to tail the log. +// TODO: Make 'RawTerminal' option flagable. +func (dm *DockerManager) GetContainerLogs(pod *api.Pod, containerID kubecontainer.ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) { + var since int64 + if logOptions.SinceSeconds != nil { + t := unversioned.Now().Add(-time.Duration(*logOptions.SinceSeconds) * time.Second) + since = t.Unix() + } + if logOptions.SinceTime != nil { + since = logOptions.SinceTime.Unix() + } + opts := dockertypes.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Since: strconv.FormatInt(since, 10), + Timestamps: logOptions.Timestamps, + Follow: logOptions.Follow, + } + if logOptions.TailLines != nil { + opts.Tail = strconv.FormatInt(*logOptions.TailLines, 10) + } + sopts := StreamOptions{ + OutputStream: stdout, + ErrorStream: stderr, + RawTerminal: false, + } + err = dm.client.Logs(containerID.ID, opts, sopts) + return +} + +var ( + // ErrNoContainersInPod is returned when there are no containers for a given pod + ErrNoContainersInPod = errors.New("NoContainersInPod") + + // ErrNoPodInfraContainerInPod is returned when there is no pod infra container for a given pod + ErrNoPodInfraContainerInPod = errors.New("NoPodInfraContainerInPod") + + // ErrContainerCannotRun is returned when a container is created, but cannot run properly + ErrContainerCannotRun = errors.New("ContainerCannotRun") +) + +// determineContainerIP determines the IP address of the given container. It is expected +// that the container passed is the infrastructure container of a pod and the responsibility +// of the caller to ensure that the correct container is passed. +func (dm *DockerManager) determineContainerIP(podNamespace, podName string, container *dockertypes.ContainerJSON) string { + result := "" + + if container.NetworkSettings != nil { + result = container.NetworkSettings.IPAddress + + // Fall back to IPv6 address if no IPv4 address is present + if result == "" { + result = container.NetworkSettings.GlobalIPv6Address + } + } + + if dm.networkPlugin.Name() != network.DefaultPluginName { + netStatus, err := dm.networkPlugin.GetPodNetworkStatus(podNamespace, podName, kubecontainer.DockerID(container.ID).ContainerID()) + if err != nil { + glog.Errorf("NetworkPlugin %s failed on the status hook for pod '%s' - %v", dm.networkPlugin.Name(), podName, err) + } else if netStatus != nil { + result = netStatus.IP.String() + } + } + + return result +} + +func (dm *DockerManager) inspectContainer(id string, podName, podNamespace string) (*kubecontainer.ContainerStatus, string, error) { + var ip string + iResult, err := dm.client.InspectContainer(id) + if err != nil { + return nil, ip, err + } + glog.V(4).Infof("Container inspect result: %+v", *iResult) + + // TODO: Get k8s container name by parsing the docker name. This will be + // replaced by checking docker labels eventually. + dockerName, hash, err := ParseDockerName(iResult.Name) + if err != nil { + return nil, ip, fmt.Errorf("Unable to parse docker name %q", iResult.Name) + } + containerName := dockerName.ContainerName + + var containerInfo *labelledContainerInfo + containerInfo = getContainerInfoFromLabel(iResult.Config.Labels) + + parseTimestampError := func(label, s string) { + glog.Errorf("Failed to parse %q timestamp %q for container %q of pod %q", label, s, id, kubecontainer.BuildPodFullName(podName, podNamespace)) + } + var createdAt, startedAt, finishedAt time.Time + if createdAt, err = parseDockerTimestamp(iResult.Created); err != nil { + parseTimestampError("Created", iResult.Created) + } + if startedAt, err = parseDockerTimestamp(iResult.State.StartedAt); err != nil { + parseTimestampError("StartedAt", iResult.State.StartedAt) + } + if finishedAt, err = parseDockerTimestamp(iResult.State.FinishedAt); err != nil { + parseTimestampError("FinishedAt", iResult.State.FinishedAt) + } + + status := kubecontainer.ContainerStatus{ + Name: containerName, + RestartCount: containerInfo.RestartCount, + Image: iResult.Config.Image, + ImageID: DockerPrefix + iResult.Image, + ID: kubecontainer.DockerID(id).ContainerID(), + ExitCode: iResult.State.ExitCode, + CreatedAt: createdAt, + Hash: hash, + } + if iResult.State.Running { + // Container that are running, restarting and paused + status.State = kubecontainer.ContainerStateRunning + status.StartedAt = startedAt + if containerName == PodInfraContainerName { + ip = dm.determineContainerIP(podNamespace, podName, iResult) + } + return &status, ip, nil + } + + // Find containers that have exited or failed to start. + if !finishedAt.IsZero() || iResult.State.ExitCode != 0 { + // Containers that are exited, dead or created (docker failed to start container) + // When a container fails to start State.ExitCode is non-zero, FinishedAt and StartedAt are both zero + reason := "" + message := iResult.State.Error + + // Note: An application might handle OOMKilled gracefully. + // In that case, the container is oom killed, but the exit + // code could be 0. + if iResult.State.OOMKilled { + reason = "OOMKilled" + } else if iResult.State.ExitCode == 0 { + reason = "Completed" + } else if !finishedAt.IsZero() { + reason = "Error" + } else { + // finishedAt is zero and ExitCode is nonZero occurs when docker fails to start the container + reason = ErrContainerCannotRun.Error() + // Adjust time to the time docker attempted to run the container, otherwise startedAt and finishedAt will be set to epoch, which is misleading + finishedAt = createdAt + startedAt = createdAt + } + + terminationMessagePath := containerInfo.TerminationMessagePath + if terminationMessagePath != "" { + for _, mount := range iResult.Mounts { + if mount.Destination == terminationMessagePath { + path := mount.Source + if data, err := ioutil.ReadFile(path); err != nil { + message = fmt.Sprintf("Error on reading termination-log %s: %v", path, err) + } else { + message = string(data) + } + } + } + } + status.State = kubecontainer.ContainerStateExited + status.Message = message + status.Reason = reason + status.StartedAt = startedAt + status.FinishedAt = finishedAt + } else { + // Non-running containers that are created (not yet started or kubelet failed before calling + // start container function etc.) Kubelet doesn't handle these scenarios yet. + status.State = kubecontainer.ContainerStateUnknown + } + return &status, "", nil +} + +// makeEnvList converts EnvVar list to a list of strings, in the form of +// '=', which can be understood by docker. +func makeEnvList(envs []kubecontainer.EnvVar) (result []string) { + for _, env := range envs { + result = append(result, fmt.Sprintf("%s=%s", env.Name, env.Value)) + } + return +} + +// makeMountBindings converts the mount list to a list of strings that +// can be understood by docker. +// Each element in the string is in the form of: +// ':', or +// '::ro', if the path is read only, or +// '::Z', if the volume requires SELinux +// relabeling and the pod provides an SELinux label +func makeMountBindings(mounts []kubecontainer.Mount, podHasSELinuxLabel bool) (result []string) { + for _, m := range mounts { + bind := fmt.Sprintf("%s:%s", m.HostPath, m.ContainerPath) + if m.ReadOnly { + bind += ":ro" + } + // Only request relabeling if the pod provides an + // SELinux context. If the pod does not provide an + // SELinux context relabeling will label the volume + // with the container's randomly allocated MCS label. + // This would restrict access to the volume to the + // container which mounts it first. + if m.SELinuxRelabel && podHasSELinuxLabel { + if m.ReadOnly { + bind += ",Z" + } else { + bind += ":Z" + } + + } + result = append(result, bind) + } + return +} + +func makePortsAndBindings(portMappings []kubecontainer.PortMapping) (map[dockernat.Port]struct{}, map[dockernat.Port][]dockernat.PortBinding) { + exposedPorts := map[dockernat.Port]struct{}{} + portBindings := map[dockernat.Port][]dockernat.PortBinding{} + for _, port := range portMappings { + exteriorPort := port.HostPort + if exteriorPort == 0 { + // No need to do port binding when HostPort is not specified + continue + } + interiorPort := port.ContainerPort + // Some of this port stuff is under-documented voodoo. + // See http://stackoverflow.com/questions/20428302/binding-a-port-to-a-host-interface-using-the-rest-api + var protocol string + switch strings.ToUpper(string(port.Protocol)) { + case "UDP": + protocol = "/udp" + case "TCP": + protocol = "/tcp" + default: + glog.Warningf("Unknown protocol %q: defaulting to TCP", port.Protocol) + protocol = "/tcp" + } + + dockerPort := dockernat.Port(strconv.Itoa(interiorPort) + protocol) + exposedPorts[dockerPort] = struct{}{} + + hostBinding := dockernat.PortBinding{ + HostPort: strconv.Itoa(exteriorPort), + HostIP: port.HostIP, + } + + // Allow multiple host ports bind to same docker port + if existedBindings, ok := portBindings[dockerPort]; ok { + // If a docker port already map to a host port, just append the host ports + portBindings[dockerPort] = append(existedBindings, hostBinding) + } else { + // Otherwise, it's fresh new port binding + portBindings[dockerPort] = []dockernat.PortBinding{ + hostBinding, + } + } + } + return exposedPorts, portBindings +} + +func (dm *DockerManager) runContainer( + pod *api.Pod, + container *api.Container, + opts *kubecontainer.RunContainerOptions, + ref *api.ObjectReference, + netMode string, + ipcMode string, + utsMode string, + pidMode string, + restartCount int, + oomScoreAdj int) (kubecontainer.ContainerID, error) { + + dockerName := KubeletContainerName{ + PodFullName: kubecontainer.GetPodFullName(pod), + PodUID: pod.UID, + ContainerName: container.Name, + } + + securityOpts, err := dm.getSecurityOpt(pod, container.Name) + if err != nil { + return kubecontainer.ContainerID{}, err + } + + // Pod information is recorded on the container as labels to preserve it in the event the pod is deleted + // while the Kubelet is down and there is no information available to recover the pod. + // TODO: keep these labels up to date if the pod changes + labels := newLabels(container, pod, restartCount, dm.enableCustomMetrics) + + // TODO(random-liu): Remove this when we start to use new labels for KillContainerInPod + if container.Lifecycle != nil && container.Lifecycle.PreStop != nil { + // TODO: This is kind of hacky, we should really just encode the bits we need. + // TODO: This is hacky because the Kubelet should be parameterized to encode a specific version + // and needs to be able to migrate this whenever we deprecate v1. Should be a member of DockerManager. + if data, err := runtime.Encode(api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"}), pod); err == nil { + labels[kubernetesPodLabel] = string(data) + } else { + glog.Errorf("Failed to encode pod: %s for prestop hook", pod.Name) + } + } + memoryLimit := container.Resources.Limits.Memory().Value() + cpuRequest := container.Resources.Requests.Cpu() + cpuLimit := container.Resources.Limits.Cpu() + nvidiaGPULimit := container.Resources.Limits.NvidiaGPU() + var cpuShares int64 + // If request is not specified, but limit is, we want request to default to limit. + // API server does this for new containers, but we repeat this logic in Kubelet + // for containers running on existing Kubernetes clusters. + if cpuRequest.IsZero() && !cpuLimit.IsZero() { + cpuShares = milliCPUToShares(cpuLimit.MilliValue()) + } else { + // if cpuRequest.Amount is nil, then milliCPUToShares will return the minimal number + // of CPU shares. + cpuShares = milliCPUToShares(cpuRequest.MilliValue()) + } + var devices []dockercontainer.DeviceMapping + if nvidiaGPULimit.Value() != 0 { + // Experimental. For now, we hardcode /dev/nvidia0 no matter what the user asks for + // (we only support one device per node). + devices = []dockercontainer.DeviceMapping{ + {"/dev/nvidia0", "/dev/nvidia0", "mrw"}, + {"/dev/nvidiactl", "/dev/nvidiactl", "mrw"}, + {"/dev/nvidia-uvm", "/dev/nvidia-uvm", "mrw"}, + } + } + podHasSELinuxLabel := pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SELinuxOptions != nil + binds := makeMountBindings(opts.Mounts, podHasSELinuxLabel) + // The reason we create and mount the log file in here (not in kubelet) is because + // the file's location depends on the ID of the container, and we need to create and + // mount the file before actually starting the container. + // TODO(yifan): Consider to pull this logic out since we might need to reuse it in + // other container runtime. + _, containerName, cid := BuildDockerName(dockerName, container) + if opts.PodContainerDir != "" && len(container.TerminationMessagePath) != 0 { + // Because the PodContainerDir contains pod uid and container name which is unique enough, + // here we just add an unique container id to make the path unique for different instances + // of the same container. + containerLogPath := path.Join(opts.PodContainerDir, cid) + fs, err := os.Create(containerLogPath) + if err != nil { + // TODO: Clean up the previouly created dir? return the error? + glog.Errorf("Error on creating termination-log file %q: %v", containerLogPath, err) + } else { + fs.Close() // Close immediately; we're just doing a `touch` here + b := fmt.Sprintf("%s:%s", containerLogPath, container.TerminationMessagePath) + binds = append(binds, b) + } + } + + hc := &dockercontainer.HostConfig{ + Binds: binds, + NetworkMode: dockercontainer.NetworkMode(netMode), + IpcMode: dockercontainer.IpcMode(ipcMode), + UTSMode: dockercontainer.UTSMode(utsMode), + PidMode: dockercontainer.PidMode(pidMode), + ReadonlyRootfs: readOnlyRootFilesystem(container), + Resources: dockercontainer.Resources{ + Memory: memoryLimit, + MemorySwap: -1, + CPUShares: cpuShares, + Devices: devices, + }, + SecurityOpt: securityOpts, + } + + // If current api version is newer than docker 1.10 requested, set OomScoreAdj to HostConfig + result, err := dm.checkDockerAPIVersion(dockerv110APIVersion) + if err != nil { + glog.Errorf("Failed to check docker api version: %v", err) + } else if result >= 0 { + hc.OomScoreAdj = oomScoreAdj + } + + if dm.cpuCFSQuota { + // if cpuLimit.Amount is nil, then the appropriate default value is returned to allow full usage of cpu resource. + cpuQuota, cpuPeriod := milliCPUToQuota(cpuLimit.MilliValue()) + + hc.CPUQuota = cpuQuota + hc.CPUPeriod = cpuPeriod + } + + if len(opts.CgroupParent) > 0 { + hc.CgroupParent = opts.CgroupParent + } + + dockerOpts := dockertypes.ContainerCreateConfig{ + Name: containerName, + Config: &dockercontainer.Config{ + Env: makeEnvList(opts.Envs), + Image: container.Image, + WorkingDir: container.WorkingDir, + Labels: labels, + // Interactive containers: + OpenStdin: container.Stdin, + StdinOnce: container.StdinOnce, + Tty: container.TTY, + }, + HostConfig: hc, + } + + // Set network configuration for infra-container + if container.Name == PodInfraContainerName { + setInfraContainerNetworkConfig(pod, netMode, opts, &dockerOpts) + } + + setEntrypointAndCommand(container, opts, dockerOpts) + + glog.V(3).Infof("Container %v/%v/%v: setting entrypoint \"%v\" and command \"%v\"", pod.Namespace, pod.Name, container.Name, dockerOpts.Config.Entrypoint, dockerOpts.Config.Cmd) + + securityContextProvider := securitycontext.NewSimpleSecurityContextProvider() + securityContextProvider.ModifyContainerConfig(pod, container, dockerOpts.Config) + securityContextProvider.ModifyHostConfig(pod, container, dockerOpts.HostConfig) + createResp, err := dm.client.CreateContainer(dockerOpts) + if err != nil { + dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToCreateContainer, "Failed to create docker container with error: %v", err) + return kubecontainer.ContainerID{}, err + } + if len(createResp.Warnings) != 0 { + glog.V(2).Infof("Container %q of pod %q created with warnings: %v", container.Name, format.Pod(pod), createResp.Warnings) + } + dm.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.CreatedContainer, "Created container with docker id %v", utilstrings.ShortenString(createResp.ID, 12)) + + if err = dm.client.StartContainer(createResp.ID); err != nil { + dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToStartContainer, + "Failed to start container with docker id %v with error: %v", utilstrings.ShortenString(createResp.ID, 12), err) + return kubecontainer.ContainerID{}, err + } + dm.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.StartedContainer, "Started container with docker id %v", utilstrings.ShortenString(createResp.ID, 12)) + + return kubecontainer.DockerID(createResp.ID).ContainerID(), nil +} + +// setInfraContainerNetworkConfig sets the network configuration for the infra-container. We only set network configuration for infra-container, all +// the user containers will share the same network namespace with infra-container. +func setInfraContainerNetworkConfig(pod *api.Pod, netMode string, opts *kubecontainer.RunContainerOptions, dockerOpts *dockertypes.ContainerCreateConfig) { + exposedPorts, portBindings := makePortsAndBindings(opts.PortMappings) + dockerOpts.Config.ExposedPorts = exposedPorts + dockerOpts.HostConfig.PortBindings = dockernat.PortMap(portBindings) + + if netMode != namespaceModeHost { + dockerOpts.Config.Hostname = opts.Hostname + if len(opts.DNS) > 0 { + dockerOpts.HostConfig.DNS = opts.DNS + } + if len(opts.DNSSearch) > 0 { + dockerOpts.HostConfig.DNSSearch = opts.DNSSearch + } + } +} + +func setEntrypointAndCommand(container *api.Container, opts *kubecontainer.RunContainerOptions, dockerOpts dockertypes.ContainerCreateConfig) { + command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs) + + dockerOpts.Config.Entrypoint = dockerstrslice.StrSlice(command) + dockerOpts.Config.Cmd = dockerstrslice.StrSlice(args) +} + +// A helper function to get the KubeletContainerName and hash from a docker +// container. +func getDockerContainerNameInfo(c *dockertypes.Container) (*KubeletContainerName, uint64, error) { + if len(c.Names) == 0 { + return nil, 0, fmt.Errorf("cannot parse empty docker container name: %#v", c.Names) + } + dockerName, hash, err := ParseDockerName(c.Names[0]) + if err != nil { + return nil, 0, fmt.Errorf("parse docker container name %q error: %v", c.Names[0], err) + } + return dockerName, hash, nil +} + +// Get pod UID, name, and namespace by examining the container names. +func getPodInfoFromContainer(c *dockertypes.Container) (kubetypes.UID, string, string, error) { + dockerName, _, err := getDockerContainerNameInfo(c) + if err != nil { + return kubetypes.UID(""), "", "", err + } + name, namespace, err := kubecontainer.ParsePodFullName(dockerName.PodFullName) + if err != nil { + return kubetypes.UID(""), "", "", fmt.Errorf("parse pod full name %q error: %v", dockerName.PodFullName, err) + } + return dockerName.PodUID, name, namespace, nil +} + +// GetContainers returns a list of running containers if |all| is false; +// otherwise, it returns all containers. +func (dm *DockerManager) GetContainers(all bool) ([]*kubecontainer.Container, error) { + containers, err := GetKubeletDockerContainers(dm.client, all) + if err != nil { + return nil, err + } + // Convert DockerContainers to []*kubecontainer.Container + result := make([]*kubecontainer.Container, 0, len(containers)) + for _, c := range containers { + converted, err := toRuntimeContainer(c) + if err != nil { + glog.Errorf("Error examining the container: %v", err) + continue + } + result = append(result, converted) + } + return result, nil +} + +func (dm *DockerManager) GetPods(all bool) ([]*kubecontainer.Pod, error) { + start := time.Now() + defer func() { + metrics.ContainerManagerLatency.WithLabelValues("GetPods").Observe(metrics.SinceInMicroseconds(start)) + }() + pods := make(map[kubetypes.UID]*kubecontainer.Pod) + var result []*kubecontainer.Pod + + containers, err := GetKubeletDockerContainers(dm.client, all) + if err != nil { + return nil, err + } + + // Group containers by pod. + for _, c := range containers { + converted, err := toRuntimeContainer(c) + if err != nil { + glog.Errorf("Error examining the container: %v", err) + continue + } + + podUID, podName, podNamespace, err := getPodInfoFromContainer(c) + if err != nil { + glog.Errorf("Error examining the container: %v", err) + continue + } + + pod, found := pods[podUID] + if !found { + pod = &kubecontainer.Pod{ + ID: podUID, + Name: podName, + Namespace: podNamespace, + } + pods[podUID] = pod + } + pod.Containers = append(pod.Containers, converted) + } + + // Convert map to list. + for _, p := range pods { + result = append(result, p) + } + return result, nil +} + +// List all images in the local storage. +func (dm *DockerManager) ListImages() ([]kubecontainer.Image, error) { + var images []kubecontainer.Image + + dockerImages, err := dm.client.ListImages(dockertypes.ImageListOptions{}) + if err != nil { + return images, err + } + + for _, di := range dockerImages { + image, err := toRuntimeImage(&di) + if err != nil { + continue + } + images = append(images, *image) + } + return images, nil +} + +// TODO(vmarmol): Consider unexporting. +// PullImage pulls an image from network to local storage. +func (dm *DockerManager) PullImage(image kubecontainer.ImageSpec, secrets []api.Secret) error { + return dm.dockerPuller.Pull(image.Image, secrets) +} + +// IsImagePresent checks whether the container image is already in the local storage. +func (dm *DockerManager) IsImagePresent(image kubecontainer.ImageSpec) (bool, error) { + return dm.dockerPuller.IsImagePresent(image.Image) +} + +// Removes the specified image. +func (dm *DockerManager) RemoveImage(image kubecontainer.ImageSpec) error { + // TODO(harryz) currently Runtime interface does not provide other remove options. + _, err := dm.client.RemoveImage(image.Image, dockertypes.ImageRemoveOptions{}) + return err +} + +// podInfraContainerChanged returns true if the pod infra container has changed. +func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContainerStatus *kubecontainer.ContainerStatus) (bool, error) { + var ports []api.ContainerPort + + // Check network mode. + if kubecontainer.IsHostNetworkPod(pod) { + dockerPodInfraContainer, err := dm.client.InspectContainer(podInfraContainerStatus.ID.ID) + if err != nil { + return false, err + } + + networkMode := getDockerNetworkMode(dockerPodInfraContainer) + if networkMode != namespaceModeHost { + glog.V(4).Infof("host: %v, %v", pod.Spec.SecurityContext.HostNetwork, networkMode) + return true, nil + } + } else if dm.networkPlugin.Name() != "cni" && dm.networkPlugin.Name() != "kubenet" { + // Docker only exports ports from the pod infra container. Let's + // collect all of the relevant ports and export them. + for _, container := range pod.Spec.InitContainers { + ports = append(ports, container.Ports...) + } + for _, container := range pod.Spec.Containers { + ports = append(ports, container.Ports...) + } + } + expectedPodInfraContainer := &api.Container{ + Name: PodInfraContainerName, + Image: dm.podInfraContainerImage, + Ports: ports, + ImagePullPolicy: podInfraContainerImagePullPolicy, + Env: dm.podInfraContainerEnv, + } + return podInfraContainerStatus.Hash != kubecontainer.HashContainer(expectedPodInfraContainer), nil +} + +// determine if the container root should be a read only filesystem. +func readOnlyRootFilesystem(container *api.Container) bool { + return container.SecurityContext != nil && container.SecurityContext.ReadOnlyRootFilesystem != nil && *container.SecurityContext.ReadOnlyRootFilesystem +} + +// container must not be nil +func getDockerNetworkMode(container *dockertypes.ContainerJSON) string { + if container.HostConfig != nil { + return string(container.HostConfig.NetworkMode) + } + return "" +} + +// dockerVersion implementes kubecontainer.Version interface by implementing +// Compare() and String(). It could contain either server version or api version. +type dockerVersion string + +func (v dockerVersion) String() string { + return string(v) +} + +func (v dockerVersion) Compare(other string) (int, error) { + if dockerversion.LessThan(string(v), other) { + return -1, nil + } else if dockerversion.GreaterThan(string(v), other) { + return 1, nil + } + return 0, nil +} + +func (dm *DockerManager) Type() string { + return DockerType +} + +func (dm *DockerManager) Version() (kubecontainer.Version, error) { + v, err := dm.client.Version() + if err != nil { + return nil, fmt.Errorf("docker: failed to get docker version: %v", err) + } + + return dockerVersion(v.Version), nil +} + +func (dm *DockerManager) APIVersion() (kubecontainer.Version, error) { + v, err := dm.client.Version() + if err != nil { + return nil, fmt.Errorf("docker: failed to get docker version: %v", err) + } + + return dockerVersion(v.APIVersion), nil +} + +// Status returns error if docker daemon is unhealthy, nil otherwise. +// Now we do this by checking whether: +// 1) `docker version` works +// 2) docker version is compatible with minimum requirement +func (dm *DockerManager) Status() error { + return dm.checkVersionCompatibility() +} + +func (dm *DockerManager) checkVersionCompatibility() error { + version, err := dm.APIVersion() + if err != nil { + return err + } + // Verify the docker version. + result, err := version.Compare(minimumDockerAPIVersion) + if err != nil { + return fmt.Errorf("failed to compare current docker version %v with minimum support Docker version %q - %v", version, minimumDockerAPIVersion, err) + } + if result < 0 { + return fmt.Errorf("container runtime version is older than %s", minimumDockerAPIVersion) + } + return nil +} + +func (dm *DockerManager) getSecurityOpt(pod *api.Pod, ctrName string) ([]string, error) { + version, err := dm.APIVersion() + if err != nil { + return nil, err + } + + // seccomp is only on docker versions >= v1.10 + result, err := version.Compare(dockerV110APIVersion) + if err != nil { + return nil, err + } + if result < 0 { + // return early for old versions + return nil, nil + } + + profile, profileOK := pod.ObjectMeta.Annotations["security.alpha.kubernetes.io/seccomp/container/"+ctrName] + if !profileOK { + // try the pod profile + profile, profileOK = pod.ObjectMeta.Annotations["security.alpha.kubernetes.io/seccomp/pod"] + if !profileOK { + // return early the default + return defaultSecurityOpt, nil + } + } + + if profile == "unconfined" { + // return early the default + return defaultSecurityOpt, nil + } + + if profile == "docker/default" { + // return nil so docker will load the default seccomp profile + return nil, nil + } + + if !strings.HasPrefix(profile, "localhost") { + return nil, fmt.Errorf("unknown seccomp profile option: %s", profile) + } + + file, err := ioutil.ReadFile(filepath.Join(dm.seccompProfileRoot, strings.TrimPrefix(profile, "localhost/"))) + if err != nil { + return nil, err + } + + b := bytes.NewBuffer(nil) + if err := json.Compact(b, file); err != nil { + return nil, err + } + + return []string{fmt.Sprintf("seccomp=%s", b.Bytes())}, nil +} + +type dockerExitError struct { + Inspect *dockertypes.ContainerExecInspect +} + +func (d *dockerExitError) String() string { + return d.Error() +} + +func (d *dockerExitError) Error() string { + return fmt.Sprintf("Error executing in Docker Container: %d", d.Inspect.ExitCode) +} + +func (d *dockerExitError) Exited() bool { + return !d.Inspect.Running +} + +func (d *dockerExitError) ExitStatus() int { + return d.Inspect.ExitCode +} + +// ExecInContainer runs the command inside the container identified by containerID. +func (dm *DockerManager) ExecInContainer(containerID kubecontainer.ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + if dm.execHandler == nil { + return errors.New("unable to exec without an exec handler") + } + + container, err := dm.client.InspectContainer(containerID.ID) + if err != nil { + return err + } + if !container.State.Running { + return fmt.Errorf("container not running (%s)", container.ID) + } + + return dm.execHandler.ExecInContainer(dm.client, container, cmd, stdin, stdout, stderr, tty) +} + +func (dm *DockerManager) AttachContainer(containerID kubecontainer.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + // TODO(random-liu): Do we really use the *Logs* field here? + opts := dockertypes.ContainerAttachOptions{ + Stream: true, + Stdin: stdin != nil, + Stdout: stdout != nil, + Stderr: stderr != nil, + } + sopts := StreamOptions{ + InputStream: stdin, + OutputStream: stdout, + ErrorStream: stderr, + RawTerminal: tty, + } + return dm.client.AttachToContainer(containerID.ID, opts, sopts) +} + +func noPodInfraContainerError(podName, podNamespace string) error { + return fmt.Errorf("cannot find pod infra container in pod %q", kubecontainer.BuildPodFullName(podName, podNamespace)) +} + +// PortForward executes socat in the pod's network namespace and copies +// data between stream (representing the user's local connection on their +// computer) and the specified port in the container. +// +// TODO: +// - match cgroups of container +// - should we support nsenter + socat on the host? (current impl) +// - should we support nsenter + socat in a container, running with elevated privs and --pid=host? +func (dm *DockerManager) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error { + podInfraContainer := pod.FindContainerByName(PodInfraContainerName) + if podInfraContainer == nil { + return noPodInfraContainerError(pod.Name, pod.Namespace) + } + container, err := dm.client.InspectContainer(podInfraContainer.ID.ID) + if err != nil { + return err + } + + if !container.State.Running { + return fmt.Errorf("container not running (%s)", container.ID) + } + + containerPid := container.State.Pid + socatPath, lookupErr := exec.LookPath("socat") + if lookupErr != nil { + return fmt.Errorf("unable to do port forwarding: socat not found.") + } + + args := []string{"-t", fmt.Sprintf("%d", containerPid), "-n", socatPath, "-", fmt.Sprintf("TCP4:localhost:%d", port)} + + nsenterPath, lookupErr := exec.LookPath("nsenter") + if lookupErr != nil { + return fmt.Errorf("unable to do port forwarding: nsenter not found.") + } + + commandString := fmt.Sprintf("%s %s", nsenterPath, strings.Join(args, " ")) + glog.V(4).Infof("executing port forwarding command: %s", commandString) + + command := exec.Command(nsenterPath, args...) + command.Stdout = stream + + stderr := new(bytes.Buffer) + command.Stderr = stderr + + // If we use Stdin, command.Run() won't return until the goroutine that's copying + // from stream finishes. Unfortunately, if you have a client like telnet connected + // via port forwarding, as long as the user's telnet client is connected to the user's + // local listener that port forwarding sets up, the telnet session never exits. This + // means that even if socat has finished running, command.Run() won't ever return + // (because the client still has the connection and stream open). + // + // The work around is to use StdinPipe(), as Wait() (called by Run()) closes the pipe + // when the command (socat) exits. + inPipe, err := command.StdinPipe() + if err != nil { + return fmt.Errorf("unable to do port forwarding: error creating stdin pipe: %v", err) + } + go func() { + io.Copy(inPipe, stream) + inPipe.Close() + }() + + if err := command.Run(); err != nil { + return fmt.Errorf("%v: %s", err, stderr.String()) + } + + return nil +} + +// Get the IP address of a container's interface using nsenter +func (dm *DockerManager) GetContainerIP(containerID, interfaceName string) (string, error) { + _, lookupErr := exec.LookPath("nsenter") + if lookupErr != nil { + return "", fmt.Errorf("Unable to obtain IP address of container: missing nsenter.") + } + container, err := dm.client.InspectContainer(containerID) + if err != nil { + return "", err + } + + if !container.State.Running { + return "", fmt.Errorf("container not running (%s)", container.ID) + } + + containerPid := container.State.Pid + extractIPCmd := fmt.Sprintf("ip -4 addr show %s | grep inet | awk -F\" \" '{print $2}'", interfaceName) + args := []string{"-t", fmt.Sprintf("%d", containerPid), "-n", "--", "bash", "-c", extractIPCmd} + command := exec.Command("nsenter", args...) + out, err := command.CombinedOutput() + + // Fall back to IPv6 address if no IPv4 address is present + if err == nil && string(out) == "" { + extractIPCmd = fmt.Sprintf("ip -6 addr show %s scope global | grep inet6 | awk -F\" \" '{print $2}'", interfaceName) + args = []string{"-t", fmt.Sprintf("%d", containerPid), "-n", "--", "bash", "-c", extractIPCmd} + command = exec.Command("nsenter", args...) + out, err = command.CombinedOutput() + } + + if err != nil { + return "", err + } + return string(out), nil +} + +// TODO(random-liu): Change running pod to pod status in the future. We can't do it now, because kubelet also uses this function without pod status. +// We can only deprecate this after refactoring kubelet. +// TODO(random-liu): After using pod status for KillPod(), we can also remove the kubernetesPodLabel, because all the needed information should have +// been extract from new labels and stored in pod status. +// only hard eviction scenarios should provide a grace period override, all other code paths must pass nil. +func (dm *DockerManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { + result := dm.killPodWithSyncResult(pod, runningPod, gracePeriodOverride) + return result.Error() +} + +// TODO(random-liu): This is just a temporary function, will be removed when we acturally add PodSyncResult +// NOTE(random-liu): The pod passed in could be *nil* when kubelet restarted. +func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) { + // Send the kills in parallel since they may take a long time. + // There may be len(runningPod.Containers) or len(runningPod.Containers)-1 of result in the channel + containerResults := make(chan *kubecontainer.SyncResult, len(runningPod.Containers)) + wg := sync.WaitGroup{} + var ( + networkContainer *kubecontainer.Container + networkSpec *api.Container + ) + wg.Add(len(runningPod.Containers)) + for _, container := range runningPod.Containers { + go func(container *kubecontainer.Container) { + defer utilruntime.HandleCrash() + defer wg.Done() + + var containerSpec *api.Container + if pod != nil { + for i, c := range pod.Spec.Containers { + if c.Name == container.Name { + containerSpec = &pod.Spec.Containers[i] + break + } + } + if containerSpec == nil { + for i, c := range pod.Spec.InitContainers { + if c.Name == container.Name { + containerSpec = &pod.Spec.InitContainers[i] + break + } + } + } + } + + // TODO: Handle this without signaling the pod infra container to + // adapt to the generic container runtime. + if container.Name == PodInfraContainerName { + // Store the container runtime for later deletion. + // We do this so that PreStop handlers can run in the network namespace. + networkContainer = container + networkSpec = containerSpec + return + } + + killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, container.Name) + err := dm.KillContainerInPod(container.ID, containerSpec, pod, "Need to kill pod.", gracePeriodOverride) + if err != nil { + killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error()) + glog.Errorf("Failed to delete container: %v; Skipping pod %q", err, runningPod.ID) + } + containerResults <- killContainerResult + }(container) + } + wg.Wait() + close(containerResults) + for containerResult := range containerResults { + result.AddSyncResult(containerResult) + } + if networkContainer != nil { + ins, err := dm.client.InspectContainer(networkContainer.ID.ID) + if err != nil { + err = fmt.Errorf("Error inspecting container %v: %v", networkContainer.ID.ID, err) + glog.Error(err) + result.Fail(err) + return + } + if getDockerNetworkMode(ins) != namespaceModeHost { + teardownNetworkResult := kubecontainer.NewSyncResult(kubecontainer.TeardownNetwork, kubecontainer.BuildPodFullName(runningPod.Name, runningPod.Namespace)) + result.AddSyncResult(teardownNetworkResult) + glog.V(3).Infof("Calling network plugin %s to tear down pod for %s", dm.networkPlugin.Name(), kubecontainer.BuildPodFullName(runningPod.Name, runningPod.Namespace)) + if err := dm.networkPlugin.TearDownPod(runningPod.Namespace, runningPod.Name, networkContainer.ID); err != nil { + message := fmt.Sprintf("Failed to teardown network for pod %q using network plugins %q: %v", runningPod.ID, dm.networkPlugin.Name(), err) + teardownNetworkResult.Fail(kubecontainer.ErrTeardownNetwork, message) + glog.Error(message) + } + } + killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, networkContainer.Name) + result.AddSyncResult(killContainerResult) + if err := dm.KillContainerInPod(networkContainer.ID, networkSpec, pod, "Need to kill pod.", gracePeriodOverride); err != nil { + killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error()) + glog.Errorf("Failed to delete container: %v; Skipping pod %q", err, runningPod.ID) + } + } + return +} + +// KillContainerInPod kills a container in the pod. It must be passed either a container ID or a container and pod, +// and will attempt to lookup the other information if missing. +func (dm *DockerManager) KillContainerInPod(containerID kubecontainer.ContainerID, container *api.Container, pod *api.Pod, message string, gracePeriodOverride *int64) error { + switch { + case containerID.IsEmpty(): + // Locate the container. + pods, err := dm.GetPods(false) + if err != nil { + return err + } + targetPod := kubecontainer.Pods(pods).FindPod(kubecontainer.GetPodFullName(pod), pod.UID) + targetContainer := targetPod.FindContainerByName(container.Name) + if targetContainer == nil { + return fmt.Errorf("unable to find container %q in pod %q", container.Name, targetPod.Name) + } + containerID = targetContainer.ID + + case container == nil || pod == nil: + // Read information about the container from labels + inspect, err := dm.client.InspectContainer(containerID.ID) + if err != nil { + return err + } + storedPod, storedContainer, cerr := containerAndPodFromLabels(inspect) + if cerr != nil { + glog.Errorf("unable to access pod data from container: %v", err) + } + if container == nil { + container = storedContainer + } + if pod == nil { + pod = storedPod + } + } + return dm.killContainer(containerID, container, pod, message, gracePeriodOverride) +} + +// killContainer accepts a containerID and an optional container or pod containing shutdown policies. Invoke +// KillContainerInPod if information must be retrieved first. It is only valid to provide a grace period override +// during hard eviction scenarios. All other code paths in kubelet must never provide a grace period override otherwise +// data corruption could occur in the end-user application. +func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, container *api.Container, pod *api.Pod, reason string, gracePeriodOverride *int64) error { + ID := containerID.ID + name := ID + if container != nil { + name = fmt.Sprintf("%s %s", name, container.Name) + } + if pod != nil { + name = fmt.Sprintf("%s %s/%s", name, pod.Namespace, pod.Name) + } + + gracePeriod := int64(minimumGracePeriodInSeconds) + if pod != nil { + switch { + case pod.DeletionGracePeriodSeconds != nil: + gracePeriod = *pod.DeletionGracePeriodSeconds + case pod.Spec.TerminationGracePeriodSeconds != nil: + gracePeriod = *pod.Spec.TerminationGracePeriodSeconds + } + } + glog.V(2).Infof("Killing container %q with %d second grace period", name, gracePeriod) + start := unversioned.Now() + + if pod != nil && container != nil && container.Lifecycle != nil && container.Lifecycle.PreStop != nil { + glog.V(4).Infof("Running preStop hook for container %q", name) + done := make(chan struct{}) + go func() { + defer close(done) + defer utilruntime.HandleCrash() + if msg, err := dm.runner.Run(containerID, pod, container, container.Lifecycle.PreStop); err != nil { + glog.Errorf("preStop hook for container %q failed: %v", name, err) + dm.generateFailedContainerEvent(containerID, pod.Name, kubecontainer.FailedPreStopHook, msg) + } + }() + select { + case <-time.After(time.Duration(gracePeriod) * time.Second): + glog.V(2).Infof("preStop hook for container %q did not complete in %d seconds", name, gracePeriod) + case <-done: + glog.V(4).Infof("preStop hook for container %q completed", name) + } + gracePeriod -= int64(unversioned.Now().Sub(start.Time).Seconds()) + } + + // if the caller did not specify a grace period override, we ensure that the grace period + // is not less than the minimal shutdown window to avoid unnecessary SIGKILLs. if a caller + // did provide an override, we always set the gracePeriod to that value. the only valid + // time to send an override is during eviction scenarios where we want to do a hard kill of + // a container because of resource exhaustion for incompressible resources (i.e. disk, memory). + if gracePeriodOverride == nil { + if gracePeriod < minimumGracePeriodInSeconds { + gracePeriod = minimumGracePeriodInSeconds + } + } else { + gracePeriod = *gracePeriodOverride + glog.V(2).Infof("Killing container %q, but using %d second grace period override", name, gracePeriod) + } + + err := dm.client.StopContainer(ID, int(gracePeriod)) + if err == nil { + glog.V(2).Infof("Container %q exited after %s", name, unversioned.Now().Sub(start.Time)) + } else { + glog.V(2).Infof("Container %q termination failed after %s: %v", name, unversioned.Now().Sub(start.Time), err) + } + ref, ok := dm.containerRefManager.GetRef(containerID) + if !ok { + glog.Warningf("No ref for pod '%q'", name) + } else { + message := fmt.Sprintf("Killing container with docker id %v", utilstrings.ShortenString(ID, 12)) + if reason != "" { + message = fmt.Sprint(message, ": ", reason) + } + dm.recorder.Event(ref, api.EventTypeNormal, kubecontainer.KillingContainer, message) + dm.containerRefManager.ClearRef(containerID) + } + return err +} + +func (dm *DockerManager) generateFailedContainerEvent(containerID kubecontainer.ContainerID, podName, reason, message string) { + ref, ok := dm.containerRefManager.GetRef(containerID) + if !ok { + glog.Warningf("No ref for pod '%q'", podName) + return + } + dm.recorder.Event(ref, api.EventTypeWarning, reason, message) +} + +var errNoPodOnContainer = fmt.Errorf("no pod information labels on Docker container") + +// containerAndPodFromLabels tries to load the appropriate container info off of a Docker container's labels +func containerAndPodFromLabels(inspect *dockertypes.ContainerJSON) (pod *api.Pod, container *api.Container, err error) { + if inspect == nil && inspect.Config == nil && inspect.Config.Labels == nil { + return nil, nil, errNoPodOnContainer + } + labels := inspect.Config.Labels + + // the pod data may not be set + if body, found := labels[kubernetesPodLabel]; found { + pod = &api.Pod{} + if err = runtime.DecodeInto(api.Codecs.UniversalDecoder(), []byte(body), pod); err == nil { + name := labels[types.KubernetesContainerNameLabel] + for ix := range pod.Spec.Containers { + if pod.Spec.Containers[ix].Name == name { + container = &pod.Spec.Containers[ix] + break + } + } + if container == nil { + for ix := range pod.Spec.InitContainers { + if pod.Spec.InitContainers[ix].Name == name { + container = &pod.Spec.InitContainers[ix] + break + } + } + } + if container == nil { + err = fmt.Errorf("unable to find container %s in pod %v", name, pod) + } + } else { + pod = nil + } + } + + // attempt to find the default grace period if we didn't commit a pod, but set the generic metadata + // field (the one used by kill) + if pod == nil { + if period, ok := labels[kubernetesPodTerminationGracePeriodLabel]; ok { + if seconds, err := strconv.ParseInt(period, 10, 64); err == nil { + pod = &api.Pod{} + pod.DeletionGracePeriodSeconds = &seconds + } + } + } + + return +} + +func (dm *DockerManager) applyOOMScoreAdj(pod *api.Pod, container *api.Container, containerInfo *dockertypes.ContainerJSON) error { + if containerInfo.State.Pid == 0 { + // Container exited. We cannot do anything about it. Ignore this error. + glog.V(2).Infof("Failed to apply OOM score adj on container %q with ID %q. Init process does not exist.", containerInfo.Name, containerInfo.ID) + return nil + } + + cgroupName, err := dm.procFs.GetFullContainerName(containerInfo.State.Pid) + if err != nil { + if err == os.ErrNotExist { + // Container exited. We cannot do anything about it. Ignore this error. + glog.V(2).Infof("Failed to apply OOM score adj on container %q with ID %q. Init process does not exist.", containerInfo.Name, containerInfo.ID) + return nil + } + return err + } + oomScoreAdj := dm.calculateOomScoreAdj(pod, container) + if err = dm.oomAdjuster.ApplyOOMScoreAdjContainer(cgroupName, oomScoreAdj, 5); err != nil { + if err == os.ErrNotExist { + // Container exited. We cannot do anything about it. Ignore this error. + glog.V(2).Infof("Failed to apply OOM score adj on container %q with ID %q. Init process does not exist.", containerInfo.Name, containerInfo.ID) + return nil + } + return err + } + return nil +} + +// Run a single container from a pod. Returns the docker container ID +// If do not need to pass labels, just pass nil. +func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Container, netMode, ipcMode, pidMode, podIP string, restartCount int) (kubecontainer.ContainerID, error) { + start := time.Now() + defer func() { + metrics.ContainerManagerLatency.WithLabelValues("runContainerInPod").Observe(metrics.SinceInMicroseconds(start)) + }() + + ref, err := kubecontainer.GenerateContainerRef(pod, container) + if err != nil { + glog.Errorf("Can't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err) + } + glog.Infof("Generating ref for container %s: %#v", container.Name, ref) + + opts, err := dm.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP) + if err != nil { + return kubecontainer.ContainerID{}, fmt.Errorf("GenerateRunContainerOptions: %v", err) + } + + utsMode := "" + if kubecontainer.IsHostNetworkPod(pod) { + utsMode = namespaceModeHost + } + + oomScoreAdj := dm.calculateOomScoreAdj(pod, container) + + id, err := dm.runContainer(pod, container, opts, ref, netMode, ipcMode, utsMode, pidMode, restartCount, oomScoreAdj) + if err != nil { + return kubecontainer.ContainerID{}, fmt.Errorf("runContainer: %v", err) + } + + // Remember this reference so we can report events about this container + if ref != nil { + dm.containerRefManager.SetRef(id, ref) + } + + if container.Lifecycle != nil && container.Lifecycle.PostStart != nil { + msg, handlerErr := dm.runner.Run(id, pod, container, container.Lifecycle.PostStart) + if handlerErr != nil { + err := fmt.Errorf("PostStart handler: %v", handlerErr) + dm.generateFailedContainerEvent(id, pod.Name, kubecontainer.FailedPostStartHook, msg) + dm.KillContainerInPod(id, container, pod, err.Error(), nil) + return kubecontainer.ContainerID{}, err + } + } + + // Container information is used in adjusting OOM scores, adding ndots and getting the logPath. + containerInfo, err := dm.client.InspectContainer(id.ID) + if err != nil { + return kubecontainer.ContainerID{}, fmt.Errorf("InspectContainer: %v", err) + } + + // Create a symbolic link to the Docker container log file using a name which captures the + // full pod name, the container name and the Docker container ID. Cluster level logging will + // capture these symbolic filenames which can be used for search terms in Elasticsearch or for + // labels for Cloud Logging. + containerLogFile := containerInfo.LogPath + symlinkFile := LogSymlink(dm.containerLogsDir, kubecontainer.GetPodFullName(pod), container.Name, id.ID) + if err = dm.os.Symlink(containerLogFile, symlinkFile); err != nil { + glog.Errorf("Failed to create symbolic link to the log file of pod %q container %q: %v", format.Pod(pod), container.Name, err) + } + + // Check if current docker version is higher than 1.10. Otherwise, we have to apply OOMScoreAdj instead of using docker API. + // TODO: Remove this logic after we stop supporting docker version < 1.10. + if err = dm.applyOOMScoreAdjIfNeeded(pod, container, containerInfo); err != nil { + return kubecontainer.ContainerID{}, err + } + + // The addNDotsOption call appends the ndots option to the resolv.conf file generated by docker. + // This resolv.conf file is shared by all containers of the same pod, and needs to be modified only once per pod. + // we modify it when the pause container is created since it is the first container created in the pod since it holds + // the networking namespace. + if container.Name == PodInfraContainerName && utsMode != namespaceModeHost { + err = addNDotsOption(containerInfo.ResolvConfPath) + if err != nil { + return kubecontainer.ContainerID{}, fmt.Errorf("addNDotsOption: %v", err) + } + } + + return id, err +} + +func (dm *DockerManager) applyOOMScoreAdjIfNeeded(pod *api.Pod, container *api.Container, containerInfo *dockertypes.ContainerJSON) error { + // Compare current API version with expected api version. + result, err := dm.checkDockerAPIVersion(dockerv110APIVersion) + if err != nil { + return fmt.Errorf("Failed to check docker api version: %v", err) + } + // If current api version is older than OOMScoreAdj requested, use the old way. + if result < 0 { + if err := dm.applyOOMScoreAdj(pod, container, containerInfo); err != nil { + return fmt.Errorf("Failed to apply oom-score-adj to container %q- %v", err, containerInfo.Name) + } + } + + return nil +} + +func (dm *DockerManager) calculateOomScoreAdj(pod *api.Pod, container *api.Container) int { + // Set OOM score of the container based on the priority of the container. + // Processes in lower-priority pods should be killed first if the system runs out of memory. + // The main pod infrastructure container is considered high priority, since if it is killed the + // whole pod will die. + var oomScoreAdj int + if container.Name == PodInfraContainerName { + oomScoreAdj = qos.PodInfraOOMAdj + } else { + oomScoreAdj = qos.GetContainerOOMScoreAdjust(pod, container, int64(dm.machineInfo.MemoryCapacity)) + + } + + return oomScoreAdj +} + +// versionInfo wraps api version and daemon version. +type versionInfo struct { + apiVersion kubecontainer.Version + daemonVersion kubecontainer.Version +} + +// checkDockerAPIVersion checks current docker API version against expected version. +// Return: +// 1 : newer than expected version +// -1: older than expected version +// 0 : same version +func (dm *DockerManager) checkDockerAPIVersion(expectedVersion string) (int, error) { + + value, err := dm.versionCache.Get(dm.machineInfo.MachineID) + if err != nil { + return 0, err + } + apiVersion := value.(versionInfo).apiVersion + result, err := apiVersion.Compare(expectedVersion) + if err != nil { + return 0, fmt.Errorf("Failed to compare current docker api version %v with OOMScoreAdj supported Docker version %q - %v", + apiVersion, expectedVersion, err) + } + return result, nil +} + +func addNDotsOption(resolvFilePath string) error { + if len(resolvFilePath) == 0 { + glog.Errorf("ResolvConfPath is empty.") + return nil + } + + if _, err := os.Stat(resolvFilePath); os.IsNotExist(err) { + return fmt.Errorf("ResolvConfPath %q does not exist", resolvFilePath) + } + + glog.V(4).Infof("DNS ResolvConfPath exists: %s. Will attempt to add ndots option: %s", resolvFilePath, ndotsDNSOption) + + if err := appendToFile(resolvFilePath, ndotsDNSOption); err != nil { + glog.Errorf("resolv.conf could not be updated: %v", err) + return err + } + return nil +} + +func appendToFile(filePath, stringToAppend string) error { + f, err := os.OpenFile(filePath, os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + _, err = f.WriteString(stringToAppend) + return err +} + +// createPodInfraContainer starts the pod infra container for a pod. Returns the docker container ID of the newly created container. +// If any error occurs in this function, it will return a brief error and a detailed error message. +func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.DockerID, error, string) { + start := time.Now() + defer func() { + metrics.ContainerManagerLatency.WithLabelValues("createPodInfraContainer").Observe(metrics.SinceInMicroseconds(start)) + }() + // Use host networking if specified. + netNamespace := "" + var ports []api.ContainerPort + + if kubecontainer.IsHostNetworkPod(pod) { + netNamespace = namespaceModeHost + } else if dm.networkPlugin.Name() == "cni" || dm.networkPlugin.Name() == "kubenet" { + netNamespace = "none" + } else { + // Docker only exports ports from the pod infra container. Let's + // collect all of the relevant ports and export them. + for _, container := range pod.Spec.InitContainers { + ports = append(ports, container.Ports...) + } + for _, container := range pod.Spec.Containers { + ports = append(ports, container.Ports...) + } + } + + container := &api.Container{ + Name: PodInfraContainerName, + Image: dm.podInfraContainerImage, + Ports: ports, + ImagePullPolicy: podInfraContainerImagePullPolicy, + Env: dm.podInfraContainerEnv, + } + + // No pod secrets for the infra container. + // The message isn't needed for the Infra container + if err, msg := dm.imagePuller.PullImage(pod, container, nil); err != nil { + return "", err, msg + } + + // Currently we don't care about restart count of infra container, just set it to 0. + id, err := dm.runContainerInPod(pod, container, netNamespace, getIPCMode(pod), getPidMode(pod), "", 0) + if err != nil { + return "", kubecontainer.ErrRunContainer, err.Error() + } + + return kubecontainer.DockerID(id.ID), nil, "" +} + +// Structure keeping information on changes that need to happen for a pod. The semantics is as follows: +// - startInfraContainer is true if new Infra Containers have to be started and old one (if running) killed. +// Additionally if it is true then containersToKeep have to be empty +// - infraContainerId have to be set if and only if startInfraContainer is false. It stores dockerID of running Infra Container +// - containersToStart keeps indices of Specs of containers that have to be started and reasons why containers will be started. +// - containersToKeep stores mapping from dockerIDs of running containers to indices of their Specs for containers that +// should be kept running. If startInfraContainer is false then it contains an entry for infraContainerId (mapped to -1). +// It shouldn't be the case where containersToStart is empty and containersToKeep contains only infraContainerId. In such case +// Infra Container should be killed, hence it's removed from this map. +// - all init containers are stored in initContainersToKeep +// - all running containers which are NOT contained in containersToKeep and initContainersToKeep should be killed. +type podContainerChangesSpec struct { + StartInfraContainer bool + InfraChanged bool + InfraContainerId kubecontainer.DockerID + InitFailed bool + InitContainersToKeep map[kubecontainer.DockerID]int + ContainersToStart map[int]string + ContainersToKeep map[kubecontainer.DockerID]int +} + +func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) (podContainerChangesSpec, error) { + start := time.Now() + defer func() { + metrics.ContainerManagerLatency.WithLabelValues("computePodContainerChanges").Observe(metrics.SinceInMicroseconds(start)) + }() + glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod) + + containersToStart := make(map[int]string) + containersToKeep := make(map[kubecontainer.DockerID]int) + + var err error + var podInfraContainerID kubecontainer.DockerID + var changed bool + podInfraContainerStatus := podStatus.FindContainerStatusByName(PodInfraContainerName) + if podInfraContainerStatus != nil && podInfraContainerStatus.State == kubecontainer.ContainerStateRunning { + glog.V(4).Infof("Found pod infra container for %q", format.Pod(pod)) + changed, err = dm.podInfraContainerChanged(pod, podInfraContainerStatus) + if err != nil { + return podContainerChangesSpec{}, err + } + } + + createPodInfraContainer := true + if podInfraContainerStatus == nil || podInfraContainerStatus.State != kubecontainer.ContainerStateRunning { + glog.V(2).Infof("Need to restart pod infra container for %q because it is not found", format.Pod(pod)) + } else if changed { + glog.V(2).Infof("Need to restart pod infra container for %q because it is changed", format.Pod(pod)) + } else { + glog.V(4).Infof("Pod infra container looks good, keep it %q", format.Pod(pod)) + createPodInfraContainer = false + podInfraContainerID = kubecontainer.DockerID(podInfraContainerStatus.ID.ID) + containersToKeep[podInfraContainerID] = -1 + } + + // check the status of the init containers + initFailed := false + initContainersToKeep := make(map[kubecontainer.DockerID]int) + // always reset the init containers if the pod is reset + if !createPodInfraContainer { + // keep all successfully completed containers up to and including the first failing container + Containers: + for i, container := range pod.Spec.InitContainers { + containerStatus := podStatus.FindContainerStatusByName(container.Name) + if containerStatus == nil { + continue + } + switch { + case containerStatus == nil: + continue + case containerStatus.State == kubecontainer.ContainerStateRunning: + initContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)] = i + case containerStatus.State == kubecontainer.ContainerStateExited: + initContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)] = i + // TODO: should we abstract the "did the init container fail" check? + if containerStatus.ExitCode != 0 { + initFailed = true + break Containers + } + } + } + } + + // check the status of the containers + for index, container := range pod.Spec.Containers { + expectedHash := kubecontainer.HashContainer(&container) + + containerStatus := podStatus.FindContainerStatusByName(container.Name) + if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning { + if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) { + // If we are here it means that the container is dead and should be restarted, or never existed and should + // be created. We may be inserting this ID again if the container has changed and it has + // RestartPolicy::Always, but it's not a big deal. + message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container) + glog.V(3).Info(message) + containersToStart[index] = message + } + continue + } + + containerID := kubecontainer.DockerID(containerStatus.ID.ID) + hash := containerStatus.Hash + glog.V(3).Infof("pod %q container %q exists as %v", format.Pod(pod), container.Name, containerID) + + if createPodInfraContainer { + // createPodInfraContainer == true and Container exists + // If we're creating infra container everything will be killed anyway + // If RestartPolicy is Always or OnFailure we restart containers that were running before we + // killed them when restarting Infra Container. + if pod.Spec.RestartPolicy != api.RestartPolicyNever { + message := fmt.Sprintf("Infra Container is being recreated. %q will be restarted.", container.Name) + glog.V(1).Info(message) + containersToStart[index] = message + } + continue + } + + if initFailed { + // initialization failed and Container exists + // If we have an initialization failure everything will be killed anyway + // If RestartPolicy is Always or OnFailure we restart containers that were running before we + // killed them when re-running initialization + if pod.Spec.RestartPolicy != api.RestartPolicyNever { + message := fmt.Sprintf("Failed to initialize pod. %q will be restarted.", container.Name) + glog.V(1).Info(message) + containersToStart[index] = message + } + continue + } + + // At this point, the container is running and pod infra container is good. + // We will look for changes and check healthiness for the container. + containerChanged := hash != 0 && hash != expectedHash + if containerChanged { + message := fmt.Sprintf("pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", format.Pod(pod), container.Name, hash, expectedHash) + glog.Info(message) + containersToStart[index] = message + continue + } + + liveness, found := dm.livenessManager.Get(containerStatus.ID) + if !found || liveness == proberesults.Success { + containersToKeep[containerID] = index + continue + } + if pod.Spec.RestartPolicy != api.RestartPolicyNever { + message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name) + glog.Info(message) + containersToStart[index] = message + } + } + + // After the loop one of the following should be true: + // - createPodInfraContainer is true and containersToKeep is empty. + // (In fact, when createPodInfraContainer is false, containersToKeep will not be touched). + // - createPodInfraContainer is false and containersToKeep contains at least ID of Infra Container + + // If Infra container is the last running one, we don't want to keep it, and we don't want to + // keep any init containers. + if !createPodInfraContainer && len(containersToStart) == 0 && len(containersToKeep) == 1 { + containersToKeep = make(map[kubecontainer.DockerID]int) + initContainersToKeep = make(map[kubecontainer.DockerID]int) + } + + return podContainerChangesSpec{ + StartInfraContainer: createPodInfraContainer, + InfraChanged: changed, + InfraContainerId: podInfraContainerID, + InitFailed: initFailed, + InitContainersToKeep: initContainersToKeep, + ContainersToStart: containersToStart, + ContainersToKeep: containersToKeep, + }, nil +} + +// Sync the running pod to match the specified desired pod. +func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { + start := time.Now() + defer func() { + metrics.ContainerManagerLatency.WithLabelValues("SyncPod").Observe(metrics.SinceInMicroseconds(start)) + }() + + containerChanges, err := dm.computePodContainerChanges(pod, podStatus) + if err != nil { + result.Fail(err) + return + } + glog.V(3).Infof("Got container changes for pod %q: %+v", format.Pod(pod), containerChanges) + + if containerChanges.InfraChanged { + ref, err := api.GetReference(pod) + if err != nil { + glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) + } + dm.recorder.Eventf(ref, api.EventTypeNormal, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.") + } + if containerChanges.StartInfraContainer || (len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0) { + if len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0 { + glog.V(4).Infof("Killing Infra Container for %q because all other containers are dead.", format.Pod(pod)) + } else { + glog.V(4).Infof("Killing Infra Container for %q, will start new one", format.Pod(pod)) + } + + // Killing phase: if we want to start new infra container, or nothing is running kill everything (including infra container) + // TODO(random-liu): We'll use pod status directly in the future + killResult := dm.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(podStatus), nil) + result.AddPodSyncResult(killResult) + if killResult.Error() != nil { + return + } + } else { + // Otherwise kill any running containers in this pod which are not specified as ones to keep. + runningContainerStatues := podStatus.GetRunningContainerStatuses() + for _, containerStatus := range runningContainerStatues { + _, keep := containerChanges.ContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)] + _, keepInit := containerChanges.InitContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)] + if !keep && !keepInit { + glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerStatus.Name, containerStatus.ID, format.Pod(pod)) + // attempt to find the appropriate container policy + var podContainer *api.Container + var killMessage string + for i, c := range pod.Spec.Containers { + if c.Name == containerStatus.Name { + podContainer = &pod.Spec.Containers[i] + killMessage = containerChanges.ContainersToStart[i] + break + } + } + killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerStatus.Name) + result.AddSyncResult(killContainerResult) + if err := dm.KillContainerInPod(containerStatus.ID, podContainer, pod, killMessage, nil); err != nil { + killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error()) + glog.Errorf("Error killing container %q(id=%q) for pod %q: %v", containerStatus.Name, containerStatus.ID, format.Pod(pod), err) + return + } + } + } + } + + // Keep terminated init containers fairly aggressively controlled + dm.pruneInitContainersBeforeStart(pod, podStatus, containerChanges.InitContainersToKeep) + + // We pass the value of the podIP down to runContainerInPod, which in turn + // passes it to various other functions, in order to facilitate + // functionality that requires this value (hosts file and downward API) + // and avoid races determining the pod IP in cases where a container + // requires restart but the podIP isn't in the status manager yet. + // + // We default to the IP in the passed-in pod status, and overwrite it if the + // infra container needs to be (re)started. + podIP := "" + if podStatus != nil { + podIP = podStatus.IP + } + + // If we should create infra container then we do it first. + podInfraContainerID := containerChanges.InfraContainerId + if containerChanges.StartInfraContainer && (len(containerChanges.ContainersToStart) > 0) { + glog.V(4).Infof("Creating pod infra container for %q", format.Pod(pod)) + startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, PodInfraContainerName) + result.AddSyncResult(startContainerResult) + var msg string + podInfraContainerID, err, msg = dm.createPodInfraContainer(pod) + if err != nil { + startContainerResult.Fail(err, msg) + glog.Errorf("Failed to create pod infra container: %v; Skipping pod %q", err, format.Pod(pod)) + return + } + + setupNetworkResult := kubecontainer.NewSyncResult(kubecontainer.SetupNetwork, kubecontainer.GetPodFullName(pod)) + result.AddSyncResult(setupNetworkResult) + if !kubecontainer.IsHostNetworkPod(pod) { + glog.V(3).Infof("Calling network plugin %s to setup pod for %s", dm.networkPlugin.Name(), format.Pod(pod)) + err = dm.networkPlugin.SetUpPod(pod.Namespace, pod.Name, podInfraContainerID.ContainerID()) + if err != nil { + // TODO: (random-liu) There shouldn't be "Skipping pod" in sync result message + message := fmt.Sprintf("Failed to setup network for pod %q using network plugins %q: %v; Skipping pod", format.Pod(pod), dm.networkPlugin.Name(), err) + setupNetworkResult.Fail(kubecontainer.ErrSetupNetwork, message) + glog.Error(message) + + // Delete infra container + killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, PodInfraContainerName) + result.AddSyncResult(killContainerResult) + if delErr := dm.KillContainerInPod(kubecontainer.ContainerID{ + ID: string(podInfraContainerID), + Type: "docker"}, nil, pod, message, nil); delErr != nil { + killContainerResult.Fail(kubecontainer.ErrKillContainer, delErr.Error()) + glog.Warningf("Clear infra container failed for pod %q: %v", format.Pod(pod), delErr) + } + return + } + + // Setup the host interface unless the pod is on the host's network (FIXME: move to networkPlugin when ready) + podInfraContainer, err := dm.client.InspectContainer(string(podInfraContainerID)) + if err != nil { + glog.Errorf("Failed to inspect pod infra container: %v; Skipping pod %q", err, format.Pod(pod)) + result.Fail(err) + return + } + + if dm.configureHairpinMode { + if err = hairpin.SetUpContainerPid(podInfraContainer.State.Pid, network.DefaultInterfaceName); err != nil { + glog.Warningf("Hairpin setup failed for pod %q: %v", format.Pod(pod), err) + } + } + + // Overwrite the podIP passed in the pod status, since we just started the infra container. + podIP = dm.determineContainerIP(pod.Name, pod.Namespace, podInfraContainer) + } + } + + next, status, done := findActiveInitContainer(pod, podStatus) + if status != nil { + if status.ExitCode != 0 { + // container initialization has failed, flag the pod as failed + initContainerResult := kubecontainer.NewSyncResult(kubecontainer.InitContainer, status.Name) + initContainerResult.Fail(kubecontainer.ErrRunInitContainer, fmt.Sprintf("init container %q exited with %d", status.Name, status.ExitCode)) + result.AddSyncResult(initContainerResult) + if pod.Spec.RestartPolicy == api.RestartPolicyNever { + utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %+v", format.Pod(pod), status.Name, status)) + return + } + utilruntime.HandleError(fmt.Errorf("Error running pod %q init container %q, restarting: %+v", format.Pod(pod), status.Name, status)) + } + } + + // Note: when configuring the pod's containers anything that can be configured by pointing + // to the namespace of the infra container should use namespaceMode. This includes things like the net namespace + // and IPC namespace. PID mode cannot point to another container right now. + // See createPodInfraContainer for infra container setup. + namespaceMode := fmt.Sprintf("container:%v", podInfraContainerID) + pidMode := getPidMode(pod) + + if next != nil { + if len(containerChanges.ContainersToStart) == 0 { + glog.V(4).Infof("No containers to start, stopping at init container %+v in pod %v", next.Name, format.Pod(pod)) + return + } + + // If we need to start the next container, do so now then exit + container := next + startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name) + result.AddSyncResult(startContainerResult) + + // containerChanges.StartInfraContainer causes the containers to be restarted for config reasons + if !containerChanges.StartInfraContainer { + isInBackOff, err, msg := dm.doBackOff(pod, container, podStatus, backOff) + if isInBackOff { + startContainerResult.Fail(err, msg) + glog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod)) + return + } + } + + glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod)) + if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil { + startContainerResult.Fail(err, msg) + utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg)) + return + } + + // Successfully started the container; clear the entry in the failure + glog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod)) + return + } + if !done { + // init container still running + glog.V(4).Infof("An init container is still running in pod %v", format.Pod(pod)) + return + } + if containerChanges.InitFailed { + // init container still running + glog.V(4).Infof("Not all init containers have succeeded for pod %v", format.Pod(pod)) + return + } + + // Start regular containers + for idx := range containerChanges.ContainersToStart { + container := &pod.Spec.Containers[idx] + startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name) + result.AddSyncResult(startContainerResult) + + // containerChanges.StartInfraContainer causes the containers to be restarted for config reasons + if !containerChanges.StartInfraContainer { + isInBackOff, err, msg := dm.doBackOff(pod, container, podStatus, backOff) + if isInBackOff { + startContainerResult.Fail(err, msg) + glog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, format.Pod(pod)) + continue + } + } + + glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod)) + if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil { + startContainerResult.Fail(err, msg) + utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg)) + continue + } + } + return +} + +// tryContainerStart attempts to pull and start the container, returning an error and a reason string if the start +// was not successful. +func (dm *DockerManager) tryContainerStart(container *api.Container, pod *api.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, namespaceMode, pidMode, podIP string) (err error, reason string) { + err, msg := dm.imagePuller.PullImage(pod, container, pullSecrets) + if err != nil { + return err, msg + } + + if container.SecurityContext != nil && container.SecurityContext.RunAsNonRoot != nil && *container.SecurityContext.RunAsNonRoot { + err := dm.verifyNonRoot(container) + if err != nil { + return kubecontainer.ErrVerifyNonRoot, err.Error() + } + } + + // For a new container, the RestartCount should be 0 + restartCount := 0 + containerStatus := podStatus.FindContainerStatusByName(container.Name) + if containerStatus != nil { + restartCount = containerStatus.RestartCount + 1 + } + + // TODO(dawnchen): Check RestartPolicy.DelaySeconds before restart a container + _, err = dm.runContainerInPod(pod, container, namespaceMode, namespaceMode, pidMode, podIP, restartCount) + if err != nil { + // TODO(bburns) : Perhaps blacklist a container after N failures? + return kubecontainer.ErrRunContainer, err.Error() + } + return nil, "" +} + +// pruneInitContainers ensures that before we begin creating init containers, we have reduced the number +// of outstanding init containers still present. This reduces load on the container garbage collector +// by only preserving the most recent terminated init container. +func (dm *DockerManager) pruneInitContainersBeforeStart(pod *api.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.DockerID]int) { + // only the last execution of each init container should be preserved, and only preserve it if it is in the + // list of init containers to keep. + initContainerNames := sets.NewString() + for _, container := range pod.Spec.InitContainers { + initContainerNames.Insert(container.Name) + } + for name := range initContainerNames { + count := 0 + for _, status := range podStatus.ContainerStatuses { + if status.Name != name || !initContainerNames.Has(status.Name) || status.State != kubecontainer.ContainerStateExited { + continue + } + count++ + // keep the first init container for this name + if count == 1 { + continue + } + // if there is a reason to preserve the older container, do so + if _, ok := initContainersToKeep[kubecontainer.DockerID(status.ID.ID)]; ok { + continue + } + + // prune all other init containers that match this container name + // TODO: we may not need aggressive pruning + glog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count) + if err := dm.client.RemoveContainer(status.ID.ID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true}); err != nil { + if _, ok := err.(containerNotFoundError); ok { + count-- + continue + } + utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod))) + // TODO: report serious errors + continue + } + + // remove any references to this container + if _, ok := dm.containerRefManager.GetRef(status.ID); ok { + dm.containerRefManager.ClearRef(status.ID) + } else { + glog.Warningf("No ref for pod '%q'", pod.Name) + } + } + } +} + +// findActiveInitContainer returns the status of the last failed container, the next init container to +// start, or done if there are no further init containers. Status is only returned if an init container +// failed, in which case next will point to the current container. +func findActiveInitContainer(pod *api.Pod, podStatus *kubecontainer.PodStatus) (next *api.Container, status *kubecontainer.ContainerStatus, done bool) { + if len(pod.Spec.InitContainers) == 0 { + return nil, nil, true + } + + for i := len(pod.Spec.InitContainers) - 1; i >= 0; i-- { + container := &pod.Spec.InitContainers[i] + status := podStatus.FindContainerStatusByName(container.Name) + switch { + case status == nil: + continue + case status.State == kubecontainer.ContainerStateRunning: + return nil, nil, false + case status.State == kubecontainer.ContainerStateExited: + switch { + // the container has failed, we'll have to retry + case status.ExitCode != 0: + return &pod.Spec.InitContainers[i], status, false + // all init containers successful + case i == (len(pod.Spec.InitContainers) - 1): + return nil, nil, true + // all containers up to i successful, go to i+1 + default: + return &pod.Spec.InitContainers[i+1], nil, false + } + } + } + + return &pod.Spec.InitContainers[0], nil, false +} + +// verifyNonRoot returns an error if the container or image will run as the root user. +func (dm *DockerManager) verifyNonRoot(container *api.Container) error { + if securitycontext.HasRunAsUser(container) { + if securitycontext.HasRootRunAsUser(container) { + return fmt.Errorf("container's runAsUser breaks non-root policy") + } + return nil + } + + imgRoot, err := dm.isImageRoot(container.Image) + if err != nil { + return fmt.Errorf("can't tell if image runs as root: %v", err) + } + if imgRoot { + return fmt.Errorf("container has no runAsUser and image will run as root") + } + + return nil +} + +// isImageRoot returns true if the user directive is not set on the image, the user is set to 0 +// or the user is set to root. If there is an error inspecting the image this method will return +// false and return the error. +func (dm *DockerManager) isImageRoot(image string) (bool, error) { + img, err := dm.client.InspectImage(image) + if err != nil { + return false, err + } + if img == nil || img.Config == nil { + return false, fmt.Errorf("unable to inspect image %s, nil Config", image) + } + + user := getUidFromUser(img.Config.User) + // if no user is defined container will run as root + if user == "" { + return true, nil + } + // do not allow non-numeric user directives + uid, err := strconv.Atoi(user) + if err != nil { + return false, fmt.Errorf("non-numeric user (%s) is not allowed", user) + } + // user is numeric, check for 0 + return uid == 0, nil +} + +// getUidFromUser splits the uid out of a uid:gid string. +func getUidFromUser(id string) string { + if id == "" { + return id + } + // split instances where the id may contain uid:gid + if strings.Contains(id, ":") { + return strings.Split(id, ":")[0] + } + // no gid, just return the id + return id +} + +// If all instances of a container are garbage collected, doBackOff will also return false, which means the container may be restarted before the +// backoff deadline. However, because that won't cause error and the chance is really slim, we can just ignore it for now. +// If a container is still in backoff, the function will return a brief backoff error and a detailed error message. +func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, error, string) { + var cStatus *kubecontainer.ContainerStatus + // Use the finished time of the latest exited container as the start point to calculate whether to do back-off. + // TODO(random-liu): Better define backoff start point; add unit and e2e test after we finalize this. (See github issue #22240) + for _, c := range podStatus.ContainerStatuses { + if c.Name == container.Name && c.State == kubecontainer.ContainerStateExited { + cStatus = c + break + } + } + if cStatus != nil { + glog.Infof("checking backoff for container %q in pod %q", container.Name, pod.Name) + ts := cStatus.FinishedAt + // found a container that requires backoff + dockerName := KubeletContainerName{ + PodFullName: kubecontainer.GetPodFullName(pod), + PodUID: pod.UID, + ContainerName: container.Name, + } + stableName, _, _ := BuildDockerName(dockerName, container) + if backOff.IsInBackOffSince(stableName, ts) { + if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil { + dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.BackOffStartContainer, "Back-off restarting failed docker container") + } + err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(stableName), container.Name, format.Pod(pod)) + glog.Infof("%s", err.Error()) + return true, kubecontainer.ErrCrashLoopBackOff, err.Error() + } + backOff.Next(stableName, ts) + } + return false, nil, "" +} + +// getPidMode returns the pid mode to use on the docker container based on pod.Spec.HostPID. +func getPidMode(pod *api.Pod) string { + pidMode := "" + if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostPID { + pidMode = namespaceModeHost + } + return pidMode +} + +// getIPCMode returns the ipc mode to use on the docker container based on pod.Spec.HostIPC. +func getIPCMode(pod *api.Pod) string { + ipcMode := "" + if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostIPC { + ipcMode = namespaceModeHost + } + return ipcMode +} + +// GetNetNS returns the network namespace path for the given container +func (dm *DockerManager) GetNetNS(containerID kubecontainer.ContainerID) (string, error) { + inspectResult, err := dm.client.InspectContainer(containerID.ID) + if err != nil { + glog.Errorf("Error inspecting container: '%v'", err) + return "", err + } + netnsPath := fmt.Sprintf(DockerNetnsFmt, inspectResult.State.Pid) + return netnsPath, nil +} + +// Garbage collection of dead containers +func (dm *DockerManager) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy) error { + return dm.containerGC.GarbageCollect(gcPolicy) +} + +func (dm *DockerManager) GetPodStatus(uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) { + podStatus := &kubecontainer.PodStatus{ID: uid, Name: name, Namespace: namespace} + // Now we retain restart count of container as a docker label. Each time a container + // restarts, pod will read the restart count from the registered dead container, increment + // it to get the new restart count, and then add a label with the new restart count on + // the newly started container. + // However, there are some limitations of this method: + // 1. When all dead containers were garbage collected, the container status could + // not get the historical value and would be *inaccurate*. Fortunately, the chance + // is really slim. + // 2. When working with old version containers which have no restart count label, + // we can only assume their restart count is 0. + // Anyhow, we only promised "best-effort" restart count reporting, we can just ignore + // these limitations now. + var containerStatuses []*kubecontainer.ContainerStatus + // We have added labels like pod name and pod namespace, it seems that we can do filtered list here. + // However, there may be some old containers without these labels, so at least now we can't do that. + // TODO(random-liu): Do only one list and pass in the list result in the future + // TODO(random-liu): Add filter when we are sure that all the containers have the labels + containers, err := dm.client.ListContainers(dockertypes.ContainerListOptions{All: true}) + if err != nil { + return podStatus, err + } + // Loop through list of running and exited docker containers to construct + // the statuses. We assume docker returns a list of containers sorted in + // reverse by time. + // TODO: optimization: set maximum number of containers per container name to examine. + for _, c := range containers { + if len(c.Names) == 0 { + continue + } + dockerName, _, err := ParseDockerName(c.Names[0]) + if err != nil { + continue + } + if dockerName.PodUID != uid { + continue + } + result, ip, err := dm.inspectContainer(c.ID, name, namespace) + if err != nil { + if _, ok := err.(containerNotFoundError); ok { + // https://github.com/kubernetes/kubernetes/issues/22541 + // Sometimes when docker's state is corrupt, a container can be listed + // but couldn't be inspected. We fake a status for this container so + // that we can still return a status for the pod to sync. + result = &kubecontainer.ContainerStatus{ + ID: kubecontainer.DockerID(c.ID).ContainerID(), + Name: dockerName.ContainerName, + State: kubecontainer.ContainerStateUnknown, + } + glog.Errorf("Unable to inspect container %q: %v", c.ID, err) + } else { + return podStatus, err + } + } + containerStatuses = append(containerStatuses, result) + if ip != "" { + podStatus.IP = ip + } + } + + podStatus.ContainerStatuses = containerStatuses + return podStatus, nil +} + +// getVersionInfo returns apiVersion & daemonVersion of docker runtime +func (dm *DockerManager) getVersionInfo() (versionInfo, error) { + apiVersion, err := dm.APIVersion() + if err != nil { + return versionInfo{}, err + } + daemonVersion, err := dm.Version() + if err != nil { + return versionInfo{}, err + } + return versionInfo{ + apiVersion: apiVersion, + daemonVersion: daemonVersion, + }, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/manager_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/manager_test.go new file mode 100644 index 000000000000..63c3bca13ea8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/dockertools/manager_test.go @@ -0,0 +1,2056 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockertools + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "testing" + "time" + + dockertypes "github.com/docker/engine-api/types" + dockercontainer "github.com/docker/engine-api/types/container" + dockerstrslice "github.com/docker/engine-api/types/strslice" + cadvisorapi "github.com/google/cadvisor/info/v1" + "github.com/stretchr/testify/assert" + "k8s.io/kubernetes/cmd/kubelet/app/options" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/client/record" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" + "k8s.io/kubernetes/pkg/kubelet/network" + nettest "k8s.io/kubernetes/pkg/kubelet/network/testing" + proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" + "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/runtime" + kubetypes "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" + uexec "k8s.io/kubernetes/pkg/util/exec" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/pkg/util/sets" +) + +type fakeHTTP struct { + url string + err error +} + +func (f *fakeHTTP) Get(url string) (*http.Response, error) { + f.url = url + return nil, f.err +} + +// fakeRuntimeHelper implementes kubecontainer.RuntimeHelper inter +// faces for testing purposes. +type fakeRuntimeHelper struct{} + +var _ kubecontainer.RuntimeHelper = &fakeRuntimeHelper{} + +var testPodContainerDir string + +func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { + var opts kubecontainer.RunContainerOptions + var err error + if len(container.TerminationMessagePath) != 0 { + testPodContainerDir, err = ioutil.TempDir("", "fooPodContainerDir") + if err != nil { + return nil, err + } + opts.PodContainerDir = testPodContainerDir + } + return &opts, nil +} + +func (f *fakeRuntimeHelper) GetClusterDNS(pod *api.Pod) ([]string, []string, error) { + return nil, nil, fmt.Errorf("not implemented") +} + +// This is not used by docker runtime. +func (f *fakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, error) { + return "", "", nil +} + +func (f *fakeRuntimeHelper) GetPodDir(kubetypes.UID) string { + return "" +} + +func createTestDockerManager(fakeHTTPClient *fakeHTTP, fakeDocker *FakeDockerClient) (*DockerManager, *FakeDockerClient) { + if fakeHTTPClient == nil { + fakeHTTPClient = &fakeHTTP{} + } + if fakeDocker == nil { + fakeDocker = NewFakeDockerClient() + } + fakeRecorder := &record.FakeRecorder{} + containerRefManager := kubecontainer.NewRefManager() + networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone) + dockerManager := NewFakeDockerManager( + fakeDocker, + fakeRecorder, + proberesults.NewManager(), + containerRefManager, + &cadvisorapi.MachineInfo{}, + options.GetDefaultPodInfraContainerImage(), + 0, 0, "", + &containertest.FakeOS{}, + networkPlugin, + &fakeRuntimeHelper{}, + fakeHTTPClient, + flowcontrol.NewBackOff(time.Second, 300*time.Second)) + + return dockerManager, fakeDocker +} + +func newTestDockerManagerWithHTTPClient(fakeHTTPClient *fakeHTTP) (*DockerManager, *FakeDockerClient) { + return createTestDockerManager(fakeHTTPClient, nil) +} + +func newTestDockerManagerWithVersion(version, apiVersion string) (*DockerManager, *FakeDockerClient) { + fakeDocker := NewFakeDockerClientWithVersion(version, apiVersion) + return createTestDockerManager(nil, fakeDocker) +} + +func newTestDockerManager() (*DockerManager, *FakeDockerClient) { + return createTestDockerManager(nil, nil) +} + +func matchString(t *testing.T, pattern, str string) bool { + match, err := regexp.MatchString(pattern, str) + if err != nil { + t.Logf("unexpected error: %v", err) + } + return match +} + +func TestSetEntrypointAndCommand(t *testing.T) { + cases := []struct { + name string + container *api.Container + envs []kubecontainer.EnvVar + expected *dockertypes.ContainerCreateConfig + }{ + { + name: "none", + container: &api.Container{}, + expected: &dockertypes.ContainerCreateConfig{ + Config: &dockercontainer.Config{}, + }, + }, + { + name: "command", + container: &api.Container{ + Command: []string{"foo", "bar"}, + }, + expected: &dockertypes.ContainerCreateConfig{ + Config: &dockercontainer.Config{ + Entrypoint: dockerstrslice.StrSlice([]string{"foo", "bar"}), + }, + }, + }, + { + name: "command expanded", + container: &api.Container{ + Command: []string{"foo", "$(VAR_TEST)", "$(VAR_TEST2)"}, + }, + envs: []kubecontainer.EnvVar{ + { + Name: "VAR_TEST", + Value: "zoo", + }, + { + Name: "VAR_TEST2", + Value: "boo", + }, + }, + expected: &dockertypes.ContainerCreateConfig{ + Config: &dockercontainer.Config{ + Entrypoint: dockerstrslice.StrSlice([]string{"foo", "zoo", "boo"}), + }, + }, + }, + { + name: "args", + container: &api.Container{ + Args: []string{"foo", "bar"}, + }, + expected: &dockertypes.ContainerCreateConfig{ + Config: &dockercontainer.Config{ + Cmd: []string{"foo", "bar"}, + }, + }, + }, + { + name: "args expanded", + container: &api.Container{ + Args: []string{"zap", "$(VAR_TEST)", "$(VAR_TEST2)"}, + }, + envs: []kubecontainer.EnvVar{ + { + Name: "VAR_TEST", + Value: "hap", + }, + { + Name: "VAR_TEST2", + Value: "trap", + }, + }, + expected: &dockertypes.ContainerCreateConfig{ + Config: &dockercontainer.Config{ + Cmd: dockerstrslice.StrSlice([]string{"zap", "hap", "trap"}), + }, + }, + }, + { + name: "both", + container: &api.Container{ + Command: []string{"foo"}, + Args: []string{"bar", "baz"}, + }, + expected: &dockertypes.ContainerCreateConfig{ + Config: &dockercontainer.Config{ + Entrypoint: dockerstrslice.StrSlice([]string{"foo"}), + Cmd: dockerstrslice.StrSlice([]string{"bar", "baz"}), + }, + }, + }, + { + name: "both expanded", + container: &api.Container{ + Command: []string{"$(VAR_TEST2)--$(VAR_TEST)", "foo", "$(VAR_TEST3)"}, + Args: []string{"foo", "$(VAR_TEST)", "$(VAR_TEST2)"}, + }, + envs: []kubecontainer.EnvVar{ + { + Name: "VAR_TEST", + Value: "zoo", + }, + { + Name: "VAR_TEST2", + Value: "boo", + }, + { + Name: "VAR_TEST3", + Value: "roo", + }, + }, + expected: &dockertypes.ContainerCreateConfig{ + Config: &dockercontainer.Config{ + Entrypoint: dockerstrslice.StrSlice([]string{"boo--zoo", "foo", "roo"}), + Cmd: dockerstrslice.StrSlice([]string{"foo", "zoo", "boo"}), + }, + }, + }, + } + + for _, tc := range cases { + opts := &kubecontainer.RunContainerOptions{ + Envs: tc.envs, + } + + actualOpts := dockertypes.ContainerCreateConfig{ + Config: &dockercontainer.Config{}, + } + setEntrypointAndCommand(tc.container, opts, actualOpts) + + if e, a := tc.expected.Config.Entrypoint, actualOpts.Config.Entrypoint; !api.Semantic.DeepEqual(e, a) { + t.Errorf("%v: unexpected entrypoint: expected %v, got %v", tc.name, e, a) + } + if e, a := tc.expected.Config.Cmd, actualOpts.Config.Cmd; !api.Semantic.DeepEqual(e, a) { + t.Errorf("%v: unexpected command: expected %v, got %v", tc.name, e, a) + } + } +} + +// verifyPods returns true if the two pod slices are equal. +func verifyPods(a, b []*kubecontainer.Pod) bool { + if len(a) != len(b) { + return false + } + + // Sort the containers within a pod. + for i := range a { + sort.Sort(containersByID(a[i].Containers)) + } + for i := range b { + sort.Sort(containersByID(b[i].Containers)) + } + + // Sort the pods by UID. + sort.Sort(podsByID(a)) + sort.Sort(podsByID(b)) + + return reflect.DeepEqual(a, b) +} + +func TestGetPods(t *testing.T) { + manager, fakeDocker := newTestDockerManager() + dockerContainers := []*FakeContainer{ + { + ID: "1111", + Name: "/k8s_foo_qux_new_1234_42", + }, + { + ID: "2222", + Name: "/k8s_bar_qux_new_1234_42", + }, + { + ID: "3333", + Name: "/k8s_bar_jlk_wen_5678_42", + }, + } + + // Convert the docker containers. This does not affect the test coverage + // because the conversion is tested separately in convert_test.go + containers := make([]*kubecontainer.Container, len(dockerContainers)) + for i := range containers { + c, err := toRuntimeContainer(&dockertypes.Container{ + ID: dockerContainers[i].ID, + Names: []string{dockerContainers[i].Name}, + }) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + containers[i] = c + } + + expected := []*kubecontainer.Pod{ + { + ID: kubetypes.UID("1234"), + Name: "qux", + Namespace: "new", + Containers: []*kubecontainer.Container{containers[0], containers[1]}, + }, + { + ID: kubetypes.UID("5678"), + Name: "jlk", + Namespace: "wen", + Containers: []*kubecontainer.Container{containers[2]}, + }, + } + + fakeDocker.SetFakeRunningContainers(dockerContainers) + actual, err := manager.GetPods(false) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if !verifyPods(expected, actual) { + t.Errorf("expected %#v, got %#v", expected, actual) + } +} + +func TestListImages(t *testing.T) { + manager, fakeDocker := newTestDockerManager() + dockerImages := []dockertypes.Image{{ID: "1111"}, {ID: "2222"}, {ID: "3333"}} + expected := sets.NewString([]string{"1111", "2222", "3333"}...) + + fakeDocker.Images = dockerImages + actualImages, err := manager.ListImages() + if err != nil { + t.Fatalf("unexpected error %v", err) + } + actual := sets.NewString() + for _, i := range actualImages { + actual.Insert(i.ID) + } + // We can compare the two sets directly because util.StringSet.List() + // returns a "sorted" list. + if !reflect.DeepEqual(expected.List(), actual.List()) { + t.Errorf("expected %#v, got %#v", expected.List(), actual.List()) + } +} + +func TestKillContainerInPod(t *testing.T) { + manager, fakeDocker := newTestDockerManager() + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "qux", + Namespace: "new", + }, + Spec: api.PodSpec{Containers: []api.Container{{Name: "foo"}, {Name: "bar"}}}, + } + containers := []*FakeContainer{ + { + ID: "1111", + Name: "/k8s_foo_qux_new_1234_42", + }, + { + ID: "2222", + Name: "/k8s_bar_qux_new_1234_42", + }, + } + containerToKill := containers[0] + containerToSpare := containers[1] + + fakeDocker.SetFakeRunningContainers(containers) + + if err := manager.KillContainerInPod(kubecontainer.ContainerID{}, &pod.Spec.Containers[0], pod, "test kill container in pod.", nil); err != nil { + t.Errorf("unexpected error: %v", err) + } + // Assert the container has been stopped. + if err := fakeDocker.AssertStopped([]string{containerToKill.ID}); err != nil { + t.Errorf("container was not stopped correctly: %v", err) + } + // Assert the container has been spared. + if err := fakeDocker.AssertStopped([]string{containerToSpare.ID}); err == nil { + t.Errorf("container unexpectedly stopped: %v", containerToSpare.ID) + } +} + +func TestKillContainerInPodWithPreStop(t *testing.T) { + manager, fakeDocker := newTestDockerManager() + fakeDocker.ExecInspect = &dockertypes.ContainerExecInspect{ + Running: false, + ExitCode: 0, + } + expectedCmd := []string{"foo.sh", "bar"} + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "qux", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Lifecycle: &api.Lifecycle{ + PreStop: &api.Handler{ + Exec: &api.ExecAction{ + Command: expectedCmd, + }, + }, + }, + }, + {Name: "bar"}}}, + } + podString, err := runtime.Encode(testapi.Default.Codec(), pod) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + containers := []*FakeContainer{ + { + ID: "1111", + Name: "/k8s_foo_qux_new_1234_42", + Config: &dockercontainer.Config{ + Labels: map[string]string{ + kubernetesPodLabel: string(podString), + types.KubernetesContainerNameLabel: "foo", + }, + }, + }, + { + ID: "2222", + Name: "/k8s_bar_qux_new_1234_42", + }, + } + containerToKill := containers[0] + fakeDocker.SetFakeRunningContainers(containers) + + if err := manager.KillContainerInPod(kubecontainer.ContainerID{}, &pod.Spec.Containers[0], pod, "test kill container with preStop.", nil); err != nil { + t.Errorf("unexpected error: %v", err) + } + // Assert the container has been stopped. + if err := fakeDocker.AssertStopped([]string{containerToKill.ID}); err != nil { + t.Errorf("container was not stopped correctly: %v", err) + } + verifyCalls(t, fakeDocker, []string{"list", "inspect_container", "create_exec", "start_exec", "stop"}) + if !reflect.DeepEqual(expectedCmd, fakeDocker.execCmd) { + t.Errorf("expected: %v, got %v", expectedCmd, fakeDocker.execCmd) + } +} + +func TestKillContainerInPodWithError(t *testing.T) { + manager, fakeDocker := newTestDockerManager() + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "qux", + Namespace: "new", + }, + Spec: api.PodSpec{Containers: []api.Container{{Name: "foo"}, {Name: "bar"}}}, + } + containers := []*FakeContainer{ + { + ID: "1111", + Name: "/k8s_foo_qux_new_1234_42", + }, + { + ID: "2222", + Name: "/k8s_bar_qux_new_1234_42", + }, + } + fakeDocker.SetFakeRunningContainers(containers) + fakeDocker.InjectError("stop", fmt.Errorf("sample error")) + + if err := manager.KillContainerInPod(kubecontainer.ContainerID{}, &pod.Spec.Containers[0], pod, "test kill container with error.", nil); err == nil { + t.Errorf("expected error, found nil") + } +} + +func TestIsAExitError(t *testing.T) { + var err error + err = &dockerExitError{nil} + _, ok := err.(uexec.ExitError) + if !ok { + t.Error("couldn't cast dockerExitError to exec.ExitError") + } +} + +func generatePodInfraContainerHash(pod *api.Pod) uint64 { + var ports []api.ContainerPort + if pod.Spec.SecurityContext == nil || !pod.Spec.SecurityContext.HostNetwork { + for _, container := range pod.Spec.Containers { + ports = append(ports, container.Ports...) + } + } + + container := &api.Container{ + Name: PodInfraContainerName, + Image: options.GetDefaultPodInfraContainerImage(), + Ports: ports, + ImagePullPolicy: podInfraContainerImagePullPolicy, + } + return kubecontainer.HashContainer(container) +} + +// runSyncPod is a helper function to retrieve the running pods from the fake +// docker client and runs SyncPod for the given pod. +func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, pod *api.Pod, backOff *flowcontrol.Backoff, expectErr bool) kubecontainer.PodSyncResult { + podStatus, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + fakeDocker.ClearCalls() + if backOff == nil { + backOff = flowcontrol.NewBackOff(time.Second, time.Minute) + } + // api.PodStatus is not used in SyncPod now, pass in an empty one. + result := dm.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff) + err = result.Error() + if err != nil && !expectErr { + t.Errorf("unexpected error: %v", err) + } else if err == nil && expectErr { + t.Errorf("expected error didn't occur") + } + return result +} + +func TestSyncPodCreateNetAndContainer(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + dm.podInfraContainerImage = "pod_infra_image" + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar"}, + }, + }, + } + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + verifyCalls(t, fakeDocker, []string{ + // Create pod infra container. + "create", "start", "inspect_container", "inspect_container", + // Create container. + "create", "start", "inspect_container", + }) + fakeDocker.Lock() + + found := false + for _, c := range fakeDocker.RunningContainerList { + if c.Image == "pod_infra_image" && strings.HasPrefix(c.Names[0], "/k8s_POD") { + found = true + } + } + if !found { + t.Errorf("Custom pod infra container not found: %v", fakeDocker.RunningContainerList) + } + + if len(fakeDocker.Created) != 2 || + !matchString(t, "/k8s_POD\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) || + !matchString(t, "/k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[1]) { + t.Errorf("unexpected containers created %v", fakeDocker.Created) + } + fakeDocker.Unlock() +} + +func TestSyncPodCreatesNetAndContainerPullsImage(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + dm.podInfraContainerImage = "pod_infra_image" + puller := dm.dockerPuller.(*FakeDockerPuller) + puller.HasImages = []string{} + dm.podInfraContainerImage = "pod_infra_image" + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar", Image: "something", ImagePullPolicy: "IfNotPresent"}, + }, + }, + } + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{ + // Create pod infra container. + "create", "start", "inspect_container", "inspect_container", + // Create container. + "create", "start", "inspect_container", + }) + + fakeDocker.Lock() + + if !reflect.DeepEqual(puller.ImagesPulled, []string{"pod_infra_image", "something"}) { + t.Errorf("unexpected pulled containers: %v", puller.ImagesPulled) + } + + if len(fakeDocker.Created) != 2 || + !matchString(t, "/k8s_POD\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) || + !matchString(t, "/k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[1]) { + t.Errorf("unexpected containers created %v", fakeDocker.Created) + } + fakeDocker.Unlock() +} + +func TestSyncPodWithPodInfraCreatesContainer(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar"}, + }, + }, + } + + fakeDocker.SetFakeRunningContainers([]*FakeContainer{{ + ID: "9876", + // Pod infra container. + Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0", + }}) + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{ + // Create container. + "create", "start", "inspect_container", + }) + + fakeDocker.Lock() + if len(fakeDocker.Created) != 1 || + !matchString(t, "/k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) { + t.Errorf("unexpected containers created %v", fakeDocker.Created) + } + fakeDocker.Unlock() +} + +func TestSyncPodDeletesWithNoPodInfraContainer(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo1", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar1"}, + }, + }, + } + fakeDocker.SetFakeRunningContainers([]*FakeContainer{{ + ID: "1234", + Name: "/k8s_bar1_foo1_new_12345678_0", + }}) + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{ + // Kill the container since pod infra container is not running. + "stop", + // Create pod infra container. + "create", "start", "inspect_container", "inspect_container", + // Create container. + "create", "start", "inspect_container", + }) + + // A map iteration is used to delete containers, so must not depend on + // order here. + expectedToStop := map[string]bool{ + "1234": true, + } + fakeDocker.Lock() + if len(fakeDocker.Stopped) != 1 || !expectedToStop[fakeDocker.Stopped[0]] { + t.Errorf("Wrong containers were stopped: %v", fakeDocker.Stopped) + } + fakeDocker.Unlock() +} + +func TestSyncPodDeletesDuplicate(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "bar", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "foo"}, + }, + }, + } + + fakeDocker.SetFakeRunningContainers([]*FakeContainer{ + { + ID: "1234", + Name: "/k8s_foo_bar_new_12345678_1111", + }, + { + ID: "9876", + Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_bar_new_12345678_2222", + }, + { + ID: "4567", + Name: "/k8s_foo_bar_new_12345678_3333", + }}) + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{ + // Kill the duplicated container. + "stop", + }) + // Expect one of the duplicates to be killed. + if len(fakeDocker.Stopped) != 1 || (fakeDocker.Stopped[0] != "1234" && fakeDocker.Stopped[0] != "4567") { + t.Errorf("Wrong containers were stopped: %v", fakeDocker.Stopped) + } +} + +func TestSyncPodBadHash(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar"}, + }, + }, + } + + fakeDocker.SetFakeRunningContainers([]*FakeContainer{ + { + ID: "1234", + Name: "/k8s_bar.1234_foo_new_12345678_42", + }, + { + ID: "9876", + Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_42", + }}) + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{ + // Kill and restart the bad hash container. + "stop", "create", "start", "inspect_container", + }) + + if err := fakeDocker.AssertStopped([]string{"1234"}); err != nil { + t.Errorf("%v", err) + } +} + +func TestSyncPodsUnhealthy(t *testing.T) { + const ( + unhealthyContainerID = "1234" + infraContainerID = "9876" + ) + dm, fakeDocker := newTestDockerManager() + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "unhealthy"}}, + }, + } + + fakeDocker.SetFakeRunningContainers([]*FakeContainer{ + { + ID: unhealthyContainerID, + Name: "/k8s_unhealthy_foo_new_12345678_42", + }, + { + ID: infraContainerID, + Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_42", + }}) + dm.livenessManager.Set(kubecontainer.DockerID(unhealthyContainerID).ContainerID(), proberesults.Failure, pod) + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{ + // Kill the unhealthy container. + "stop", + // Restart the unhealthy container. + "create", "start", "inspect_container", + }) + + if err := fakeDocker.AssertStopped([]string{unhealthyContainerID}); err != nil { + t.Errorf("%v", err) + } +} + +func TestSyncPodsDoesNothing(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + container := api.Container{Name: "bar"} + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + container, + }, + }, + } + fakeDocker.SetFakeRunningContainers([]*FakeContainer{ + { + ID: "1234", + Name: "/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainer(&container), 16) + "_foo_new_12345678_0", + }, + { + ID: "9876", + Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0", + }}) + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{}) +} + +func TestSyncPodWithRestartPolicy(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + containers := []api.Container{ + {Name: "succeeded"}, + {Name: "failed"}, + } + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: containers, + }, + } + dockerContainers := []*FakeContainer{ + { + ID: "9876", + Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0", + StartedAt: time.Now(), + Running: true, + }, + { + ID: "1234", + Name: "/k8s_succeeded." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_foo_new_12345678_0", + ExitCode: 0, + StartedAt: time.Now(), + FinishedAt: time.Now(), + }, + { + ID: "5678", + Name: "/k8s_failed." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_foo_new_12345678_0", + ExitCode: 42, + StartedAt: time.Now(), + FinishedAt: time.Now(), + }} + + tests := []struct { + policy api.RestartPolicy + calls []string + created []string + stopped []string + }{ + { + api.RestartPolicyAlways, + []string{ + // Restart both containers. + "create", "start", "inspect_container", "create", "start", "inspect_container", + }, + []string{"succeeded", "failed"}, + []string{}, + }, + { + api.RestartPolicyOnFailure, + []string{ + // Restart the failed container. + "create", "start", "inspect_container", + }, + []string{"failed"}, + []string{}, + }, + { + api.RestartPolicyNever, + []string{ + // Check the pod infra container. + "inspect_container", "inspect_container", + // Stop the last pod infra container. + "stop", + }, + []string{}, + []string{"9876"}, + }, + } + + for i, tt := range tests { + fakeDocker.SetFakeContainers(dockerContainers) + pod.Spec.RestartPolicy = tt.policy + runSyncPod(t, dm, fakeDocker, pod, nil, false) + // 'stop' is because the pod infra container is killed when no container is running. + verifyCalls(t, fakeDocker, tt.calls) + + if err := fakeDocker.AssertCreated(tt.created); err != nil { + t.Errorf("case [%d]: %v", i, err) + } + if err := fakeDocker.AssertStopped(tt.stopped); err != nil { + t.Errorf("case [%d]: %v", i, err) + } + } +} + +func TestSyncPodBackoff(t *testing.T) { + var fakeClock = util.NewFakeClock(time.Now()) + startTime := fakeClock.Now() + + dm, fakeDocker := newTestDockerManager() + containers := []api.Container{ + {Name: "good"}, + {Name: "bad"}, + } + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "podfoo", + Namespace: "nsnew", + }, + Spec: api.PodSpec{ + Containers: containers, + }, + } + + stableId := "k8s_bad." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_podfoo_nsnew_12345678" + dockerContainers := []*FakeContainer{ + { + ID: "9876", + Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_podfoo_nsnew_12345678_0", + StartedAt: startTime, + Running: true, + }, + { + ID: "1234", + Name: "/k8s_good." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_podfoo_nsnew_12345678_0", + StartedAt: startTime, + Running: true, + }, + { + ID: "5678", + Name: "/k8s_bad." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_podfoo_nsnew_12345678_0", + ExitCode: 42, + StartedAt: startTime, + FinishedAt: fakeClock.Now(), + }, + } + + startCalls := []string{"create", "start", "inspect_container"} + backOffCalls := []string{} + startResult := &kubecontainer.SyncResult{Action: kubecontainer.StartContainer, Target: "bad", Error: nil, Message: ""} + backoffResult := &kubecontainer.SyncResult{Action: kubecontainer.StartContainer, Target: "bad", Error: kubecontainer.ErrCrashLoopBackOff, Message: ""} + tests := []struct { + tick int + backoff int + killDelay int + result []string + expectErr bool + }{ + {1, 1, 1, startCalls, false}, + {2, 2, 2, startCalls, false}, + {3, 2, 3, backOffCalls, true}, + {4, 4, 4, startCalls, false}, + {5, 4, 5, backOffCalls, true}, + {6, 4, 6, backOffCalls, true}, + {7, 4, 7, backOffCalls, true}, + {8, 8, 129, startCalls, false}, + {130, 1, 0, startCalls, false}, + } + + backOff := flowcontrol.NewBackOff(time.Second, time.Minute) + backOff.Clock = fakeClock + for _, c := range tests { + fakeDocker.SetFakeContainers(dockerContainers) + fakeClock.SetTime(startTime.Add(time.Duration(c.tick) * time.Second)) + + result := runSyncPod(t, dm, fakeDocker, pod, backOff, c.expectErr) + verifyCalls(t, fakeDocker, c.result) + + // Verify whether the correct sync pod result is generated + if c.expectErr { + verifySyncResults(t, []*kubecontainer.SyncResult{backoffResult}, result) + } else { + verifySyncResults(t, []*kubecontainer.SyncResult{startResult}, result) + } + + if backOff.Get(stableId) != time.Duration(c.backoff)*time.Second { + t.Errorf("At tick %s expected backoff=%s got=%s", time.Duration(c.tick)*time.Second, time.Duration(c.backoff)*time.Second, backOff.Get(stableId)) + } + + if len(fakeDocker.Created) > 0 { + // pretend kill the container + fakeDocker.Created = nil + dockerContainers[2].FinishedAt = startTime.Add(time.Duration(c.killDelay) * time.Second) + } + } +} + +func TestGetRestartCount(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + containerName := "bar" + pod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: containerName}, + }, + RestartPolicy: "Always", + }, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + { + Name: containerName, + RestartCount: 3, + }, + }, + }, + } + + // Helper function for verifying the restart count. + verifyRestartCount := func(pod *api.Pod, expectedCount int) { + runSyncPod(t, dm, fakeDocker, pod, nil, false) + status, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + cs := status.FindContainerStatusByName(containerName) + if cs == nil { + t.Fatalf("Can't find status for container %q", containerName) + } + restartCount := cs.RestartCount + if restartCount != expectedCount { + t.Errorf("expected %d restart count, got %d", expectedCount, restartCount) + } + } + + killOneContainer := func(pod *api.Pod) { + status, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + cs := status.FindContainerStatusByName(containerName) + if cs == nil { + t.Fatalf("Can't find status for container %q", containerName) + } + dm.KillContainerInPod(cs.ID, &pod.Spec.Containers[0], pod, "test container restart count.", nil) + } + // Container "bar" starts the first time. + // TODO: container lists are expected to be sorted reversely by time. + // We should fix FakeDockerClient to sort the list before returning. + // (randome-liu) Just partially sorted now. + verifyRestartCount(&pod, 0) + killOneContainer(&pod) + + // Poor container "bar" has been killed, and should be restarted with restart count 1 + verifyRestartCount(&pod, 1) + killOneContainer(&pod) + + // Poor container "bar" has been killed again, and should be restarted with restart count 2 + verifyRestartCount(&pod, 2) + killOneContainer(&pod) + + // Poor container "bar" has been killed again ang again, and should be restarted with restart count 3 + verifyRestartCount(&pod, 3) + + // The oldest container has been garbage collected + exitedContainers := fakeDocker.ExitedContainerList + fakeDocker.ExitedContainerList = exitedContainers[:len(exitedContainers)-1] + verifyRestartCount(&pod, 3) + + // The last two oldest containers have been garbage collected + fakeDocker.ExitedContainerList = exitedContainers[:len(exitedContainers)-2] + verifyRestartCount(&pod, 3) + + // All exited containers have been garbage collected, restart count should be got from old api pod status + fakeDocker.ExitedContainerList = []dockertypes.Container{} + verifyRestartCount(&pod, 3) + killOneContainer(&pod) + + // Poor container "bar" has been killed again ang again and again, and should be restarted with restart count 4 + verifyRestartCount(&pod, 4) +} + +func TestGetTerminationMessagePath(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + containers := []api.Container{ + { + Name: "bar", + TerminationMessagePath: "/dev/somepath", + }, + } + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: containers, + }, + } + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + containerList := fakeDocker.RunningContainerList + if len(containerList) != 2 { + // One for infra container, one for container "bar" + t.Fatalf("unexpected container list length %d", len(containerList)) + } + inspectResult, err := fakeDocker.InspectContainer(containerList[0].ID) + if err != nil { + t.Fatalf("unexpected inspect error: %v", err) + } + containerInfo := getContainerInfoFromLabel(inspectResult.Config.Labels) + terminationMessagePath := containerInfo.TerminationMessagePath + if terminationMessagePath != containers[0].TerminationMessagePath { + t.Errorf("expected termination message path %s, got %s", containers[0].TerminationMessagePath, terminationMessagePath) + } +} + +func TestSyncPodWithPodInfraCreatesContainerCallsHandler(t *testing.T) { + fakeHTTPClient := &fakeHTTP{} + dm, fakeDocker := newTestDockerManagerWithHTTPClient(fakeHTTPClient) + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "bar", + Lifecycle: &api.Lifecycle{ + PostStart: &api.Handler{ + HTTPGet: &api.HTTPGetAction{ + Host: "foo", + Port: intstr.FromInt(8080), + Path: "bar", + }, + }, + }, + }, + }, + }, + } + fakeDocker.SetFakeRunningContainers([]*FakeContainer{{ + ID: "9876", + Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0", + }}) + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{ + // Create container. + "create", "start", "inspect_container", + }) + + fakeDocker.Lock() + if len(fakeDocker.Created) != 1 || + !matchString(t, "/k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) { + t.Errorf("unexpected containers created %v", fakeDocker.Created) + } + fakeDocker.Unlock() + if fakeHTTPClient.url != "http://foo:8080/bar" { + t.Errorf("unexpected handler: %q", fakeHTTPClient.url) + } +} + +func TestSyncPodEventHandlerFails(t *testing.T) { + // Simulate HTTP failure. + fakeHTTPClient := &fakeHTTP{err: fmt.Errorf("test error")} + dm, fakeDocker := newTestDockerManagerWithHTTPClient(fakeHTTPClient) + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar", + Lifecycle: &api.Lifecycle{ + PostStart: &api.Handler{ + HTTPGet: &api.HTTPGetAction{ + Host: "does.no.exist", + Port: intstr.FromInt(8080), + Path: "bar", + }, + }, + }, + }, + }, + }, + } + + fakeDocker.SetFakeRunningContainers([]*FakeContainer{{ + ID: "9876", + Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0", + }}) + runSyncPod(t, dm, fakeDocker, pod, nil, true) + + verifyCalls(t, fakeDocker, []string{ + // Create the container. + "create", "start", + // Kill the container since event handler fails. + "stop", + }) + + // TODO(yifan): Check the stopped container's name. + if len(fakeDocker.Stopped) != 1 { + t.Fatalf("Wrong containers were stopped: %v", fakeDocker.Stopped) + } + dockerName, _, err := ParseDockerName(fakeDocker.Stopped[0]) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if dockerName.ContainerName != "bar" { + t.Errorf("Wrong stopped container, expected: bar, get: %q", dockerName.ContainerName) + } +} + +type fakeReadWriteCloser struct{} + +func (*fakeReadWriteCloser) Read([]byte) (int, error) { return 0, nil } +func (*fakeReadWriteCloser) Write([]byte) (int, error) { return 0, nil } +func (*fakeReadWriteCloser) Close() error { return nil } + +func TestPortForwardNoSuchContainer(t *testing.T) { + dm, _ := newTestDockerManager() + + podName, podNamespace := "podName", "podNamespace" + err := dm.PortForward( + &kubecontainer.Pod{ + ID: "podID", + Name: podName, + Namespace: podNamespace, + Containers: nil, + }, + 5000, + // need a valid io.ReadWriteCloser here + &fakeReadWriteCloser{}, + ) + if err == nil { + t.Fatal("unexpected non-error") + } + expectedErr := noPodInfraContainerError(podName, podNamespace) + if !reflect.DeepEqual(err, expectedErr) { + t.Fatalf("expected %v, but saw %v", expectedErr, err) + } +} + +func TestSyncPodWithTerminationLog(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + container := api.Container{ + Name: "bar", + TerminationMessagePath: "/dev/somepath", + } + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + container, + }, + }, + } + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + verifyCalls(t, fakeDocker, []string{ + // Create pod infra container. + "create", "start", "inspect_container", "inspect_container", + // Create container. + "create", "start", "inspect_container", + }) + + defer os.Remove(testPodContainerDir) + + fakeDocker.Lock() + if len(fakeDocker.Created) != 2 || + !matchString(t, "/k8s_POD\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) || + !matchString(t, "/k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[1]) { + t.Errorf("unexpected containers created %v", fakeDocker.Created) + } + fakeDocker.Unlock() + newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1]) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + parts := strings.Split(newContainer.HostConfig.Binds[0], ":") + if !matchString(t, testPodContainerDir+"/[a-f0-9]", parts[0]) { + t.Errorf("unexpected host path: %s", parts[0]) + } + if parts[1] != "/dev/somepath" { + t.Errorf("unexpected container path: %s", parts[1]) + } +} + +func TestSyncPodWithHostNetwork(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar"}, + }, + SecurityContext: &api.PodSecurityContext{ + HostNetwork: true, + }, + }, + } + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{ + // Create pod infra container. + "create", "start", "inspect_container", + // Create container. + "create", "start", "inspect_container", + }) + + fakeDocker.Lock() + if len(fakeDocker.Created) != 2 || + !matchString(t, "/k8s_POD\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) || + !matchString(t, "/k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[1]) { + t.Errorf("unexpected containers created %v", fakeDocker.Created) + } + fakeDocker.Unlock() + + newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1]) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + utsMode := newContainer.HostConfig.UTSMode + if utsMode != "host" { + t.Errorf("Pod with host network must have \"host\" utsMode, actual: \"%v\"", utsMode) + } +} + +func TestVerifyNonRoot(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + + // setup test cases. + var rootUid int64 = 0 + var nonRootUid int64 = 1 + + tests := map[string]struct { + container *api.Container + inspectImage *dockertypes.ImageInspect + expectedError string + }{ + // success cases + "non-root runAsUser": { + container: &api.Container{ + SecurityContext: &api.SecurityContext{ + RunAsUser: &nonRootUid, + }, + }, + }, + "numeric non-root image user": { + container: &api.Container{}, + inspectImage: &dockertypes.ImageInspect{ + Config: &dockercontainer.Config{ + User: "1", + }, + }, + }, + "numeric non-root image user with gid": { + container: &api.Container{}, + inspectImage: &dockertypes.ImageInspect{ + Config: &dockercontainer.Config{ + User: "1:2", + }, + }, + }, + + // failure cases + "root runAsUser": { + container: &api.Container{ + SecurityContext: &api.SecurityContext{ + RunAsUser: &rootUid, + }, + }, + expectedError: "container's runAsUser breaks non-root policy", + }, + "non-numeric image user": { + container: &api.Container{}, + inspectImage: &dockertypes.ImageInspect{ + Config: &dockercontainer.Config{ + User: "foo", + }, + }, + expectedError: "non-numeric user", + }, + "numeric root image user": { + container: &api.Container{}, + inspectImage: &dockertypes.ImageInspect{ + Config: &dockercontainer.Config{ + User: "0", + }, + }, + expectedError: "container has no runAsUser and image will run as root", + }, + "numeric root image user with gid": { + container: &api.Container{}, + inspectImage: &dockertypes.ImageInspect{ + Config: &dockercontainer.Config{ + User: "0:1", + }, + }, + expectedError: "container has no runAsUser and image will run as root", + }, + "nil image in inspect": { + container: &api.Container{}, + expectedError: "unable to inspect image", + }, + "nil config in image inspect": { + container: &api.Container{}, + inspectImage: &dockertypes.ImageInspect{}, + expectedError: "unable to inspect image", + }, + } + + for k, v := range tests { + fakeDocker.Image = v.inspectImage + err := dm.verifyNonRoot(v.container) + if v.expectedError == "" && err != nil { + t.Errorf("case[%q]: unexpected error: %v", k, err) + } + if v.expectedError != "" && !strings.Contains(err.Error(), v.expectedError) { + t.Errorf("case[%q]: expected: %q, got: %q", k, v.expectedError, err.Error()) + } + } +} + +func TestGetUidFromUser(t *testing.T) { + tests := map[string]struct { + input string + expect string + }{ + "no gid": { + input: "0", + expect: "0", + }, + "uid/gid": { + input: "0:1", + expect: "0", + }, + "empty input": { + input: "", + expect: "", + }, + "multiple spearators": { + input: "1:2:3", + expect: "1", + }, + } + for k, v := range tests { + actual := getUidFromUser(v.input) + if actual != v.expect { + t.Errorf("%s failed. Expected %s but got %s", k, v.expect, actual) + } + } +} + +func TestGetPidMode(t *testing.T) { + // test false + pod := &api.Pod{} + pidMode := getPidMode(pod) + + if pidMode != "" { + t.Errorf("expected empty pid mode for pod but got %v", pidMode) + } + + // test true + pod.Spec.SecurityContext = &api.PodSecurityContext{} + pod.Spec.SecurityContext.HostPID = true + pidMode = getPidMode(pod) + if pidMode != "host" { + t.Errorf("expected host pid mode for pod but got %v", pidMode) + } +} + +func TestGetIPCMode(t *testing.T) { + // test false + pod := &api.Pod{} + ipcMode := getIPCMode(pod) + + if ipcMode != "" { + t.Errorf("expected empty ipc mode for pod but got %v", ipcMode) + } + + // test true + pod.Spec.SecurityContext = &api.PodSecurityContext{} + pod.Spec.SecurityContext.HostIPC = true + ipcMode = getIPCMode(pod) + if ipcMode != "host" { + t.Errorf("expected host ipc mode for pod but got %v", ipcMode) + } +} + +func TestSyncPodWithPullPolicy(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + puller := dm.dockerPuller.(*FakeDockerPuller) + puller.HasImages = []string{"existing_one", "want:latest"} + dm.podInfraContainerImage = "pod_infra_image" + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar", Image: "pull_always_image", ImagePullPolicy: api.PullAlways}, + {Name: "bar2", Image: "pull_if_not_present_image", ImagePullPolicy: api.PullIfNotPresent}, + {Name: "bar3", Image: "existing_one", ImagePullPolicy: api.PullIfNotPresent}, + {Name: "bar4", Image: "want:latest", ImagePullPolicy: api.PullIfNotPresent}, + {Name: "bar5", Image: "pull_never_image", ImagePullPolicy: api.PullNever}, + }, + }, + } + + expectedResults := []*kubecontainer.SyncResult{ + //Sync result for infra container + {kubecontainer.StartContainer, PodInfraContainerName, nil, ""}, + {kubecontainer.SetupNetwork, kubecontainer.GetPodFullName(pod), nil, ""}, + //Sync result for user containers + {kubecontainer.StartContainer, "bar", nil, ""}, + {kubecontainer.StartContainer, "bar2", nil, ""}, + {kubecontainer.StartContainer, "bar3", nil, ""}, + {kubecontainer.StartContainer, "bar4", nil, ""}, + {kubecontainer.StartContainer, "bar5", kubecontainer.ErrImageNeverPull, + "Container image \"pull_never_image\" is not present with pull policy of Never"}, + } + + result := runSyncPod(t, dm, fakeDocker, pod, nil, true) + verifySyncResults(t, expectedResults, result) + + fakeDocker.Lock() + defer fakeDocker.Unlock() + + pulledImageSorted := puller.ImagesPulled[:] + sort.Strings(pulledImageSorted) + assert.Equal(t, []string{"pod_infra_image", "pull_always_image", "pull_if_not_present_image"}, pulledImageSorted) + + if len(fakeDocker.Created) != 5 { + t.Errorf("unexpected containers created %v", fakeDocker.Created) + } +} + +// This test only covers SyncPod with PullImageFailure, CreateContainerFailure and StartContainerFailure. +// There are still quite a few failure cases not covered. +// TODO(random-liu): Better way to test the SyncPod failures. +func TestSyncPodWithFailure(t *testing.T) { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + } + tests := map[string]struct { + container api.Container + dockerError map[string]error + pullerError []error + expected []*kubecontainer.SyncResult + }{ + "PullImageFailure": { + api.Container{Name: "bar", Image: "realImage", ImagePullPolicy: api.PullAlways}, + map[string]error{}, + []error{fmt.Errorf("can't pull image")}, + []*kubecontainer.SyncResult{{kubecontainer.StartContainer, "bar", kubecontainer.ErrImagePull, "can't pull image"}}, + }, + "CreateContainerFailure": { + api.Container{Name: "bar", Image: "alreadyPresent"}, + map[string]error{"create": fmt.Errorf("can't create container")}, + []error{}, + []*kubecontainer.SyncResult{{kubecontainer.StartContainer, "bar", kubecontainer.ErrRunContainer, "can't create container"}}, + }, + "StartContainerFailure": { + api.Container{Name: "bar", Image: "alreadyPresent"}, + map[string]error{"start": fmt.Errorf("can't start container")}, + []error{}, + []*kubecontainer.SyncResult{{kubecontainer.StartContainer, "bar", kubecontainer.ErrRunContainer, "can't start container"}}, + }, + } + + for _, test := range tests { + dm, fakeDocker := newTestDockerManager() + puller := dm.dockerPuller.(*FakeDockerPuller) + puller.HasImages = []string{test.container.Image} + // Pretend that the pod infra container has already been created, so that + // we can run the user containers. + fakeDocker.SetFakeRunningContainers([]*FakeContainer{{ + ID: "9876", + Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0", + }}) + fakeDocker.InjectErrors(test.dockerError) + puller.ErrorsToInject = test.pullerError + pod.Spec.Containers = []api.Container{test.container} + result := runSyncPod(t, dm, fakeDocker, pod, nil, true) + verifySyncResults(t, test.expected, result) + } +} + +// Verify whether all the expected results appear exactly only once in real result. +func verifySyncResults(t *testing.T, expectedResults []*kubecontainer.SyncResult, realResult kubecontainer.PodSyncResult) { + if len(expectedResults) != len(realResult.SyncResults) { + t.Errorf("expected sync result number %d, got %d", len(expectedResults), len(realResult.SyncResults)) + for _, r := range expectedResults { + t.Errorf("expected result: %+v", r) + } + for _, r := range realResult.SyncResults { + t.Errorf("real result: %+v", r) + } + return + } + // The container start order is not fixed, because SyncPod() uses a map to store the containers to start. + // Here we should make sure each expected result appears only once in the real result. + for _, expectR := range expectedResults { + found := 0 + for _, realR := range realResult.SyncResults { + // For the same action of the same container, the result should be the same + if realR.Target == expectR.Target && realR.Action == expectR.Action { + // We use Contains() here because the message format may be changed, but at least we should + // make sure that the expected message is contained. + if realR.Error != expectR.Error || !strings.Contains(realR.Message, expectR.Message) { + t.Errorf("expected sync result %+v, got %+v", expectR, realR) + } + found++ + } + } + if found == 0 { + t.Errorf("not found expected result %+v", expectR) + } + if found > 1 { + t.Errorf("got %d duplicate expected result %+v", found, expectR) + } + } +} + +func TestSeccompIsUnconfinedByDefaultWithDockerV110(t *testing.T) { + dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22") + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar"}, + }, + }, + } + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{ + // Create pod infra container. + "create", "start", "inspect_container", "inspect_container", + // Create container. + "create", "start", "inspect_container", + }) + + fakeDocker.Lock() + if len(fakeDocker.Created) != 2 || + !matchString(t, "/k8s_POD\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) || + !matchString(t, "/k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[1]) { + t.Errorf("unexpected containers created %v", fakeDocker.Created) + } + fakeDocker.Unlock() + + newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1]) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + assert.Contains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods with Docker versions >= 1.10 must not have seccomp disabled by default") +} + +func TestUnconfinedSeccompProfileWithDockerV110(t *testing.T) { + dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22") + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo4", + Namespace: "new", + Annotations: map[string]string{ + "security.alpha.kubernetes.io/seccomp/pod": "unconfined", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar4"}, + }, + }, + } + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{ + // Create pod infra container. + "create", "start", "inspect_container", "inspect_container", + // Create container. + "create", "start", "inspect_container", + }) + + fakeDocker.Lock() + if len(fakeDocker.Created) != 2 || + !matchString(t, "/k8s_POD\\.[a-f0-9]+_foo4_new_", fakeDocker.Created[0]) || + !matchString(t, "/k8s_bar4\\.[a-f0-9]+_foo4_new_", fakeDocker.Created[1]) { + t.Errorf("unexpected containers created %v", fakeDocker.Created) + } + fakeDocker.Unlock() + + newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1]) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + assert.Contains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods created with a secccomp annotation of unconfined should have seccomp:unconfined.") +} + +func TestDefaultSeccompProfileWithDockerV110(t *testing.T) { + dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22") + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo1", + Namespace: "new", + Annotations: map[string]string{ + "security.alpha.kubernetes.io/seccomp/pod": "docker/default", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar1"}, + }, + }, + } + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{ + // Create pod infra container. + "create", "start", "inspect_container", "inspect_container", + // Create container. + "create", "start", "inspect_container", + }) + + fakeDocker.Lock() + if len(fakeDocker.Created) != 2 || + !matchString(t, "/k8s_POD\\.[a-f0-9]+_foo1_new_", fakeDocker.Created[0]) || + !matchString(t, "/k8s_bar1\\.[a-f0-9]+_foo1_new_", fakeDocker.Created[1]) { + t.Errorf("unexpected containers created %v", fakeDocker.Created) + } + fakeDocker.Unlock() + + newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1]) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + assert.NotContains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods created with a secccomp annotation of docker/default should have empty security opt.") +} + +func TestSeccompContainerAnnotationTrumpsPod(t *testing.T) { + dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22") + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo2", + Namespace: "new", + Annotations: map[string]string{ + "security.alpha.kubernetes.io/seccomp/pod": "unconfined", + "security.alpha.kubernetes.io/seccomp/container/bar2": "docker/default", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar2"}, + }, + }, + } + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{ + // Create pod infra container. + "create", "start", "inspect_container", "inspect_container", + // Create container. + "create", "start", "inspect_container", + }) + + fakeDocker.Lock() + if len(fakeDocker.Created) != 2 || + !matchString(t, "/k8s_POD\\.[a-f0-9]+_foo2_new_", fakeDocker.Created[0]) || + !matchString(t, "/k8s_bar2\\.[a-f0-9]+_foo2_new_", fakeDocker.Created[1]) { + t.Errorf("unexpected containers created %v", fakeDocker.Created) + } + fakeDocker.Unlock() + + newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1]) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + assert.NotContains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Container annotation should trump the pod annotation for seccomp.") +} + +func TestSecurityOptsAreNilWithDockerV19(t *testing.T) { + dm, fakeDocker := newTestDockerManagerWithVersion("1.9.1", "1.21") + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar"}, + }, + }, + } + + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + verifyCalls(t, fakeDocker, []string{ + // Create pod infra container. + "create", "start", "inspect_container", "inspect_container", + // Create container. + "create", "start", "inspect_container", + }) + + fakeDocker.Lock() + if len(fakeDocker.Created) != 2 || + !matchString(t, "/k8s_POD\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) || + !matchString(t, "/k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[1]) { + t.Errorf("unexpected containers created %v", fakeDocker.Created) + } + fakeDocker.Unlock() + + newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1]) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + assert.NotContains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods with Docker versions < 1.10 must not have seccomp disabled by default") +} + +func TestCheckVersionCompatibility(t *testing.T) { + type test struct { + version string + compatible bool + } + tests := []test{ + // Minimum apiversion + {minimumDockerAPIVersion, true}, + // Invalid apiversion + {"invalid_api_version", false}, + // Older apiversion + {"1.0.0", false}, + // Newer apiversion + // NOTE(random-liu): We need to bump up the newer apiversion, + // if docker apiversion really reaches "9.9.9" someday. But I + // really doubt whether the test could live that long. + {"9.9.9", true}, + } + for i, tt := range tests { + testCase := fmt.Sprintf("test case #%d test version %q", i, tt.version) + dm, fakeDocker := newTestDockerManagerWithVersion("", tt.version) + err := dm.checkVersionCompatibility() + assert.Equal(t, tt.compatible, err == nil, testCase) + if tt.compatible == true { + // Get docker version error + fakeDocker.InjectError("version", fmt.Errorf("injected version error")) + err := dm.checkVersionCompatibility() + assert.NotNil(t, err, testCase+" version error check") + } + } +} + +func TestVersion(t *testing.T) { + expectedVersion := "1.8.1" + expectedAPIVersion := "1.20" + dm, _ := newTestDockerManagerWithVersion(expectedVersion, expectedAPIVersion) + version, err := dm.Version() + if err != nil { + t.Errorf("got error while getting docker server version - %v", err) + } + if e, a := expectedVersion, version.String(); e != a { + t.Errorf("expect docker server version %q, got %q", e, a) + } + + apiVersion, err := dm.APIVersion() + if err != nil { + t.Errorf("got error while getting docker api version - %v", err) + } + if e, a := expectedAPIVersion, apiVersion.String(); e != a { + t.Errorf("expect docker api version %q, got %q", e, a) + } +} + +func TestGetPodStatusNoSuchContainer(t *testing.T) { + const ( + noSuchContainerID = "nosuchcontainer" + infraContainerID = "9876" + ) + dm, fakeDocker := newTestDockerManager() + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "nosuchcontainer"}}, + }, + } + + fakeDocker.SetFakeContainers([]*FakeContainer{ + { + ID: noSuchContainerID, + Name: "/k8s_nosuchcontainer_foo_new_12345678_42", + ExitCode: 0, + StartedAt: time.Now(), + FinishedAt: time.Now(), + Running: false, + }, + { + ID: infraContainerID, + Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_42", + ExitCode: 0, + StartedAt: time.Now(), + FinishedAt: time.Now(), + Running: false, + }, + }) + fakeDocker.InjectErrors(map[string]error{"inspect_container": containerNotFoundError{}}) + runSyncPod(t, dm, fakeDocker, pod, nil, false) + + // Verify that we will try to start new contrainers even if the inspections + // failed. + verifyCalls(t, fakeDocker, []string{ + // Start a new infra container. + "create", "start", "inspect_container", "inspect_container", + // Start a new container. + "create", "start", "inspect_container", + }) +} + +func TestPruneInitContainers(t *testing.T) { + dm, fake := newTestDockerManager() + pod := &api.Pod{ + Spec: api.PodSpec{ + InitContainers: []api.Container{ + {Name: "init1"}, + {Name: "init2"}, + }, + }, + } + status := &kubecontainer.PodStatus{ + ContainerStatuses: []*kubecontainer.ContainerStatus{ + {Name: "init2", ID: kubecontainer.ContainerID{ID: "init2-new-1"}, State: kubecontainer.ContainerStateExited}, + {Name: "init1", ID: kubecontainer.ContainerID{ID: "init1-new-1"}, State: kubecontainer.ContainerStateExited}, + {Name: "init1", ID: kubecontainer.ContainerID{ID: "init1-new-2"}, State: kubecontainer.ContainerStateExited}, + {Name: "init1", ID: kubecontainer.ContainerID{ID: "init1-old-1"}, State: kubecontainer.ContainerStateExited}, + {Name: "init2", ID: kubecontainer.ContainerID{ID: "init2-old-1"}, State: kubecontainer.ContainerStateExited}, + }, + } + fake.ExitedContainerList = []dockertypes.Container{ + {ID: "init1-new-1"}, + {ID: "init1-new-2"}, + {ID: "init1-old-1"}, + {ID: "init2-new-1"}, + {ID: "init2-old-1"}, + } + keep := map[kubecontainer.DockerID]int{} + dm.pruneInitContainersBeforeStart(pod, status, keep) + sort.Sort(sort.StringSlice(fake.Removed)) + if !reflect.DeepEqual([]string{"init1-new-2", "init1-old-1", "init2-old-1"}, fake.Removed) { + t.Fatal(fake.Removed) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/envvars/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/envvars/doc.go new file mode 100644 index 000000000000..22f57e80a885 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/envvars/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package envvars is the package that build the environment variables that kubernetes provides +// to the containers run by it. +package envvars diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/envvars/envvars.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/envvars/envvars.go new file mode 100644 index 000000000000..31e82eb781b4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/envvars/envvars.go @@ -0,0 +1,108 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envvars + +import ( + "fmt" + "strconv" + "strings" + + "k8s.io/kubernetes/pkg/api" +) + +// FromServices builds environment variables that a container is started with, +// which tell the container where to find the services it may need, which are +// provided as an argument. +func FromServices(services *api.ServiceList) []api.EnvVar { + var result []api.EnvVar + for i := range services.Items { + service := &services.Items[i] + + // ignore services where ClusterIP is "None" or empty + // the services passed to this method should be pre-filtered + // only services that have the cluster IP set should be included here + if !api.IsServiceIPSet(service) { + continue + } + + // Host + name := makeEnvVariableName(service.Name) + "_SERVICE_HOST" + result = append(result, api.EnvVar{Name: name, Value: service.Spec.ClusterIP}) + // First port - give it the backwards-compatible name + name = makeEnvVariableName(service.Name) + "_SERVICE_PORT" + result = append(result, api.EnvVar{Name: name, Value: strconv.Itoa(int(service.Spec.Ports[0].Port))}) + // All named ports (only the first may be unnamed, checked in validation) + for i := range service.Spec.Ports { + sp := &service.Spec.Ports[i] + if sp.Name != "" { + pn := name + "_" + makeEnvVariableName(sp.Name) + result = append(result, api.EnvVar{Name: pn, Value: strconv.Itoa(int(sp.Port))}) + } + } + // Docker-compatible vars. + result = append(result, makeLinkVariables(service)...) + } + return result +} + +func makeEnvVariableName(str string) string { + // TODO: If we simplify to "all names are DNS1123Subdomains" this + // will need two tweaks: + // 1) Handle leading digits + // 2) Handle dots + return strings.ToUpper(strings.Replace(str, "-", "_", -1)) +} + +func makeLinkVariables(service *api.Service) []api.EnvVar { + prefix := makeEnvVariableName(service.Name) + all := []api.EnvVar{} + for i := range service.Spec.Ports { + sp := &service.Spec.Ports[i] + + protocol := string(api.ProtocolTCP) + if sp.Protocol != "" { + protocol = string(sp.Protocol) + } + if i == 0 { + // Docker special-cases the first port. + all = append(all, api.EnvVar{ + Name: prefix + "_PORT", + Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.ClusterIP, sp.Port), + }) + } + portPrefix := fmt.Sprintf("%s_PORT_%d_%s", prefix, sp.Port, strings.ToUpper(protocol)) + all = append(all, []api.EnvVar{ + { + Name: portPrefix, + Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.ClusterIP, sp.Port), + }, + { + Name: portPrefix + "_PROTO", + Value: strings.ToLower(protocol), + }, + { + Name: portPrefix + "_PORT", + Value: strconv.Itoa(int(sp.Port)), + }, + { + Name: portPrefix + "_ADDR", + Value: service.Spec.ClusterIP, + }, + }...) + } + return all +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/envvars/envvars_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/envvars/envvars_test.go new file mode 100644 index 000000000000..7feaf065da5d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/envvars/envvars_test.go @@ -0,0 +1,128 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envvars_test + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/kubelet/envvars" +) + +func TestFromServices(t *testing.T) { + sl := api.ServiceList{ + Items: []api.Service{ + { + ObjectMeta: api.ObjectMeta{Name: "foo-bar"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + ClusterIP: "1.2.3.4", + Ports: []api.ServicePort{ + {Port: 8080, Protocol: "TCP"}, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "abc-123"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + ClusterIP: "5.6.7.8", + Ports: []api.ServicePort{ + {Name: "u-d-p", Port: 8081, Protocol: "UDP"}, + {Name: "t-c-p", Port: 8081, Protocol: "TCP"}, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "q-u-u-x"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + ClusterIP: "9.8.7.6", + Ports: []api.ServicePort{ + {Port: 8082, Protocol: "TCP"}, + {Name: "8083", Port: 8083, Protocol: "TCP"}, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-none"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + ClusterIP: "None", + Ports: []api.ServicePort{ + {Port: 8082, Protocol: "TCP"}, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-empty"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"bar": "baz"}, + ClusterIP: "", + Ports: []api.ServicePort{ + {Port: 8082, Protocol: "TCP"}, + }, + }, + }, + }, + } + vars := envvars.FromServices(&sl) + expected := []api.EnvVar{ + {Name: "FOO_BAR_SERVICE_HOST", Value: "1.2.3.4"}, + {Name: "FOO_BAR_SERVICE_PORT", Value: "8080"}, + {Name: "FOO_BAR_PORT", Value: "tcp://1.2.3.4:8080"}, + {Name: "FOO_BAR_PORT_8080_TCP", Value: "tcp://1.2.3.4:8080"}, + {Name: "FOO_BAR_PORT_8080_TCP_PROTO", Value: "tcp"}, + {Name: "FOO_BAR_PORT_8080_TCP_PORT", Value: "8080"}, + {Name: "FOO_BAR_PORT_8080_TCP_ADDR", Value: "1.2.3.4"}, + {Name: "ABC_123_SERVICE_HOST", Value: "5.6.7.8"}, + {Name: "ABC_123_SERVICE_PORT", Value: "8081"}, + {Name: "ABC_123_SERVICE_PORT_U_D_P", Value: "8081"}, + {Name: "ABC_123_SERVICE_PORT_T_C_P", Value: "8081"}, + {Name: "ABC_123_PORT", Value: "udp://5.6.7.8:8081"}, + {Name: "ABC_123_PORT_8081_UDP", Value: "udp://5.6.7.8:8081"}, + {Name: "ABC_123_PORT_8081_UDP_PROTO", Value: "udp"}, + {Name: "ABC_123_PORT_8081_UDP_PORT", Value: "8081"}, + {Name: "ABC_123_PORT_8081_UDP_ADDR", Value: "5.6.7.8"}, + {Name: "ABC_123_PORT_8081_TCP", Value: "tcp://5.6.7.8:8081"}, + {Name: "ABC_123_PORT_8081_TCP_PROTO", Value: "tcp"}, + {Name: "ABC_123_PORT_8081_TCP_PORT", Value: "8081"}, + {Name: "ABC_123_PORT_8081_TCP_ADDR", Value: "5.6.7.8"}, + {Name: "Q_U_U_X_SERVICE_HOST", Value: "9.8.7.6"}, + {Name: "Q_U_U_X_SERVICE_PORT", Value: "8082"}, + {Name: "Q_U_U_X_SERVICE_PORT_8083", Value: "8083"}, + {Name: "Q_U_U_X_PORT", Value: "tcp://9.8.7.6:8082"}, + {Name: "Q_U_U_X_PORT_8082_TCP", Value: "tcp://9.8.7.6:8082"}, + {Name: "Q_U_U_X_PORT_8082_TCP_PROTO", Value: "tcp"}, + {Name: "Q_U_U_X_PORT_8082_TCP_PORT", Value: "8082"}, + {Name: "Q_U_U_X_PORT_8082_TCP_ADDR", Value: "9.8.7.6"}, + {Name: "Q_U_U_X_PORT_8083_TCP", Value: "tcp://9.8.7.6:8083"}, + {Name: "Q_U_U_X_PORT_8083_TCP_PROTO", Value: "tcp"}, + {Name: "Q_U_U_X_PORT_8083_TCP_PORT", Value: "8083"}, + {Name: "Q_U_U_X_PORT_8083_TCP_ADDR", Value: "9.8.7.6"}, + } + if len(vars) != len(expected) { + t.Errorf("Expected %d env vars, got: %+v", len(expected), vars) + return + } + for i := range expected { + if !reflect.DeepEqual(vars[i], expected[i]) { + t.Errorf("expected %#v, got %#v", vars[i], expected[i]) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/doc.go new file mode 100644 index 000000000000..d46bb277968d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package eviction is responsible for enforcing eviction thresholds to maintain +// node stability. +package eviction diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go new file mode 100644 index 000000000000..d5b95ab19755 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go @@ -0,0 +1,566 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eviction + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" + qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util" + "k8s.io/kubernetes/pkg/kubelet/server/stats" + "k8s.io/kubernetes/pkg/quota/evaluator/core" + "k8s.io/kubernetes/pkg/util/sets" +) + +const ( + unsupportedEvictionSignal = "unsupported eviction signal %v" + // the reason reported back in status. + reason = "Evicted" + // the message associated with the reason. + message = "The node was low on compute resources." + // disk, in bytes. internal to this module, used to account for local disk usage. + resourceDisk api.ResourceName = "disk" +) + +// resourceToRankFunc maps a resource to ranking function for that resource. +var resourceToRankFunc = map[api.ResourceName]rankFunc{ + api.ResourceMemory: rankMemoryPressure, +} + +// signalToNodeCondition maps a signal to the node condition to report if threshold is met. +var signalToNodeCondition = map[Signal]api.NodeConditionType{ + SignalMemoryAvailable: api.NodeMemoryPressure, +} + +// signalToResource maps a Signal to its associated Resource. +var signalToResource = map[Signal]api.ResourceName{ + SignalMemoryAvailable: api.ResourceMemory, +} + +// validSignal returns true if the signal is supported. +func validSignal(signal Signal) bool { + _, found := signalToResource[signal] + return found +} + +// ParseThresholdConfig parses the flags for thresholds. +func ParseThresholdConfig(evictionHard, evictionSoft, evictionSoftGracePeriod string) ([]Threshold, error) { + results := []Threshold{} + + hardThresholds, err := parseThresholdStatements(evictionHard) + if err != nil { + return nil, err + } + results = append(results, hardThresholds...) + + softThresholds, err := parseThresholdStatements(evictionSoft) + if err != nil { + return nil, err + } + gracePeriods, err := parseGracePeriods(evictionSoftGracePeriod) + if err != nil { + return nil, err + } + for i := range softThresholds { + signal := softThresholds[i].Signal + period, found := gracePeriods[signal] + if !found { + return nil, fmt.Errorf("grace period must be specified for the soft eviction threshold %v", signal) + } + softThresholds[i].GracePeriod = period + } + results = append(results, softThresholds...) + return results, nil +} + +// parseThresholdStatements parses the input statements into a list of Threshold objects. +func parseThresholdStatements(expr string) ([]Threshold, error) { + if len(expr) == 0 { + return nil, nil + } + results := []Threshold{} + statements := strings.Split(expr, ",") + signalsFound := sets.NewString() + for _, statement := range statements { + result, err := parseThresholdStatement(statement) + if err != nil { + return nil, err + } + if signalsFound.Has(string(result.Signal)) { + return nil, fmt.Errorf("found duplicate eviction threshold for signal %v", result.Signal) + } + signalsFound.Insert(string(result.Signal)) + results = append(results, result) + } + return results, nil +} + +// parseThresholdStatement parses a threshold statement. +func parseThresholdStatement(statement string) (Threshold, error) { + tokens2Operator := map[string]ThresholdOperator{ + "<": OpLessThan, + } + var ( + operator ThresholdOperator + parts []string + ) + for token := range tokens2Operator { + parts = strings.Split(statement, token) + // if we got a token, we know this was the operator... + if len(parts) > 1 { + operator = tokens2Operator[token] + break + } + } + if len(operator) == 0 || len(parts) != 2 { + return Threshold{}, fmt.Errorf("invalid eviction threshold syntax %v, expected ", statement) + } + signal := Signal(parts[0]) + if !validSignal(signal) { + return Threshold{}, fmt.Errorf(unsupportedEvictionSignal, signal) + } + + quantity, err := resource.ParseQuantity(parts[1]) + if err != nil { + return Threshold{}, err + } + return Threshold{ + Signal: signal, + Operator: operator, + Value: &quantity, + }, nil +} + +// parseGracePeriods parses the grace period statements +func parseGracePeriods(expr string) (map[Signal]time.Duration, error) { + if len(expr) == 0 { + return nil, nil + } + results := map[Signal]time.Duration{} + statements := strings.Split(expr, ",") + for _, statement := range statements { + parts := strings.Split(statement, "=") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid eviction grace period syntax %v, expected =", statement) + } + signal := Signal(parts[0]) + if !validSignal(signal) { + return nil, fmt.Errorf(unsupportedEvictionSignal, signal) + } + + gracePeriod, err := time.ParseDuration(parts[1]) + if err != nil { + return nil, err + } + if gracePeriod < 0 { + return nil, fmt.Errorf("invalid eviction grace period specified: %v, must be a positive value", parts[1]) + } + + // check against duplicate statements + if _, found := results[signal]; found { + return nil, fmt.Errorf("duplicate eviction grace period specified for %v", signal) + } + results[signal] = gracePeriod + } + return results, nil +} + +// diskUsage converts used bytes into a resource quantity. +func diskUsage(fsStats *statsapi.FsStats) *resource.Quantity { + if fsStats == nil || fsStats.UsedBytes == nil { + return &resource.Quantity{Format: resource.BinarySI} + } + usage := int64(*fsStats.UsedBytes) + return resource.NewQuantity(usage, resource.BinarySI) +} + +// memoryUsage converts working set into a resource quantity. +func memoryUsage(memStats *statsapi.MemoryStats) *resource.Quantity { + if memStats == nil || memStats.WorkingSetBytes == nil { + return &resource.Quantity{Format: resource.BinarySI} + } + usage := int64(*memStats.WorkingSetBytes) + return resource.NewQuantity(usage, resource.BinarySI) +} + +// podUsage aggregates usage of compute resources. +// it supports the following memory and disk. +func podUsage(podStats statsapi.PodStats) (api.ResourceList, error) { + disk := resource.Quantity{Format: resource.BinarySI} + memory := resource.Quantity{Format: resource.BinarySI} + for _, container := range podStats.Containers { + // disk usage (if known) + // TODO: need to handle volumes + for _, fsStats := range []*statsapi.FsStats{container.Rootfs, container.Logs} { + disk.Add(*diskUsage(fsStats)) + } + // memory usage (if known) + memory.Add(*memoryUsage(container.Memory)) + } + return api.ResourceList{ + api.ResourceMemory: memory, + resourceDisk: disk, + }, nil +} + +// formatThreshold formats a threshold for logging. +func formatThreshold(threshold Threshold) string { + return fmt.Sprintf("threshold(signal=%v, operator=%v, value=%v, gracePeriod=%v)", threshold.Signal, threshold.Value.String(), threshold.Operator, threshold.GracePeriod) +} + +// cachedStatsFunc returns a statsFunc based on the provided pod stats. +func cachedStatsFunc(podStats []statsapi.PodStats) statsFunc { + uid2PodStats := map[string]statsapi.PodStats{} + for i := range podStats { + uid2PodStats[podStats[i].PodRef.UID] = podStats[i] + } + return func(pod *api.Pod) (statsapi.PodStats, bool) { + stats, found := uid2PodStats[string(pod.UID)] + return stats, found + } +} + +// Cmp compares p1 and p2 and returns: +// +// -1 if p1 < p2 +// 0 if p1 == p2 +// +1 if p1 > p2 +// +type cmpFunc func(p1, p2 *api.Pod) int + +// multiSorter implements the Sort interface, sorting changes within. +type multiSorter struct { + pods []*api.Pod + cmp []cmpFunc +} + +// Sort sorts the argument slice according to the less functions passed to OrderedBy. +func (ms *multiSorter) Sort(pods []*api.Pod) { + ms.pods = pods + sort.Sort(ms) +} + +// OrderedBy returns a Sorter that sorts using the cmp functions, in order. +// Call its Sort method to sort the data. +func orderedBy(cmp ...cmpFunc) *multiSorter { + return &multiSorter{ + cmp: cmp, + } +} + +// Len is part of sort.Interface. +func (ms *multiSorter) Len() int { + return len(ms.pods) +} + +// Swap is part of sort.Interface. +func (ms *multiSorter) Swap(i, j int) { + ms.pods[i], ms.pods[j] = ms.pods[j], ms.pods[i] +} + +// Less is part of sort.Interface. +func (ms *multiSorter) Less(i, j int) bool { + p1, p2 := ms.pods[i], ms.pods[j] + var k int + for k = 0; k < len(ms.cmp)-1; k++ { + cmpResult := ms.cmp[k](p1, p2) + // p1 is less than p2 + if cmpResult < 0 { + return true + } + // p1 is greater than p2 + if cmpResult > 0 { + return false + } + // we don't know yet + } + // the last cmp func is the final decider + return ms.cmp[k](p1, p2) < 0 +} + +// qos compares pods by QoS (BestEffort < Burstable < Guaranteed) +func qos(p1, p2 *api.Pod) int { + qosP1 := qosutil.GetPodQos(p1) + qosP2 := qosutil.GetPodQos(p2) + // its a tie + if qosP1 == qosP2 { + return 0 + } + // if p1 is best effort, we know p2 is burstable or guaranteed + if qosP1 == qosutil.BestEffort { + return -1 + } + // we know p1 and p2 are not besteffort, so if p1 is burstable, p2 must be guaranteed + if qosP1 == qosutil.Burstable { + if qosP2 == qosutil.Guaranteed { + return -1 + } + return 1 + } + // ok, p1 must be guaranteed. + return 1 +} + +// memory compares pods by largest consumer of memory relative to request. +func memory(stats statsFunc) cmpFunc { + return func(p1, p2 *api.Pod) int { + p1Stats, found := stats(p1) + // if we have no usage stats for p1, we want p2 first + if !found { + return -1 + } + // if we have no usage stats for p2, but p1 has usage, we want p1 first. + p2Stats, found := stats(p2) + if !found { + return 1 + } + // if we cant get usage for p1 measured, we want p2 first + p1Usage, err := podUsage(p1Stats) + if err != nil { + return -1 + } + // if we cant get usage for p2 measured, we want p1 first + p2Usage, err := podUsage(p2Stats) + if err != nil { + return 1 + } + + // adjust p1, p2 usage relative to the request (if any) + p1Memory := p1Usage[api.ResourceMemory] + p1Spec := core.PodUsageFunc(p1) + p1Request := p1Spec[api.ResourceRequestsMemory] + p1Memory.Sub(p1Request) + + p2Memory := p2Usage[api.ResourceMemory] + p2Spec := core.PodUsageFunc(p2) + p2Request := p2Spec[api.ResourceRequestsMemory] + p2Memory.Sub(p2Request) + + // if p2 is using more than p1, we want p2 first + return p2Memory.Cmp(p1Memory) + } +} + +// disk compares pods by largest consumer of disk relative to request. +func disk(stats statsFunc) cmpFunc { + return func(p1, p2 *api.Pod) int { + p1Stats, found := stats(p1) + // if we have no usage stats for p1, we want p2 first + if !found { + return -1 + } + // if we have no usage stats for p2, but p1 has usage, we want p1 first. + p2Stats, found := stats(p2) + if !found { + return 1 + } + // if we cant get usage for p1 measured, we want p2 first + p1Usage, err := podUsage(p1Stats) + if err != nil { + return -1 + } + // if we cant get usage for p2 measured, we want p1 first + p2Usage, err := podUsage(p2Stats) + if err != nil { + return 1 + } + + // disk is best effort, so we don't measure relative to a request. + // TODO: add disk as a guaranteed resource + p1Disk := p1Usage[api.ResourceStorage] + p2Disk := p2Usage[api.ResourceStorage] + // if p2 is using more than p1, we want p2 first + return p2Disk.Cmp(p1Disk) + } +} + +// rankMemoryPressure orders the input pods for eviction in response to memory pressure. +func rankMemoryPressure(pods []*api.Pod, stats statsFunc) { + orderedBy(qos, memory(stats)).Sort(pods) +} + +// rankDiskPressure orders the input pods for eviction in response to disk pressure. +func rankDiskPressure(pods []*api.Pod, stats statsFunc) { + orderedBy(qos, disk(stats)).Sort(pods) +} + +// byEvictionPriority implements sort.Interface for []api.ResourceName. +type byEvictionPriority []api.ResourceName + +func (a byEvictionPriority) Len() int { return len(a) } +func (a byEvictionPriority) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Less ranks memory before all other resources. +func (a byEvictionPriority) Less(i, j int) bool { + return a[i] == api.ResourceMemory +} + +// makeSignalObservations derives observations using the specified summary provider. +func makeSignalObservations(summaryProvider stats.SummaryProvider) (signalObservations, statsFunc, error) { + summary, err := summaryProvider.Get() + if err != nil { + return nil, nil, err + } + // build the function to work against for pod stats + statsFunc := cachedStatsFunc(summary.Pods) + // build an evaluation context for current eviction signals + result := signalObservations{} + result[SignalMemoryAvailable] = resource.NewQuantity(int64(*summary.Node.Memory.AvailableBytes), resource.BinarySI) + return result, statsFunc, nil +} + +// thresholdsMet returns the set of thresholds that were met independent of grace period +func thresholdsMet(thresholds []Threshold, observations signalObservations) []Threshold { + results := []Threshold{} + for i := range thresholds { + threshold := thresholds[i] + observed, found := observations[threshold.Signal] + if !found { + glog.Warningf("eviction manager: no observation found for eviction signal %v", threshold.Signal) + continue + } + // determine if we have met the specified threshold + thresholdMet := false + thresholdResult := threshold.Value.Cmp(*observed) + switch threshold.Operator { + case OpLessThan: + thresholdMet = thresholdResult > 0 + } + if thresholdMet { + results = append(results, threshold) + } + } + return results +} + +// thresholdsFirstObservedAt merges the input set of thresholds with the previous observation to determine when active set of thresholds were initially met. +func thresholdsFirstObservedAt(thresholds []Threshold, lastObservedAt thresholdsObservedAt, now time.Time) thresholdsObservedAt { + results := thresholdsObservedAt{} + for i := range thresholds { + observedAt, found := lastObservedAt[thresholds[i]] + if !found { + observedAt = now + } + results[thresholds[i]] = observedAt + } + return results +} + +// thresholdsMetGracePeriod returns the set of thresholds that have satisfied associated grace period +func thresholdsMetGracePeriod(observedAt thresholdsObservedAt, now time.Time) []Threshold { + results := []Threshold{} + for threshold, at := range observedAt { + duration := now.Sub(at) + if duration < threshold.GracePeriod { + glog.V(2).Infof("eviction manager: eviction criteria not yet met for %v, duration: %v", formatThreshold(threshold), duration) + continue + } + results = append(results, threshold) + } + return results +} + +// nodeConditions returns the set of node conditions associated with a threshold +func nodeConditions(thresholds []Threshold) []api.NodeConditionType { + results := []api.NodeConditionType{} + for _, threshold := range thresholds { + if nodeCondition, found := signalToNodeCondition[threshold.Signal]; found { + results = append(results, nodeCondition) + } + } + return results +} + +// nodeConditionsLastObservedAt merges the input with the previous observation to determine when a condition was most recently met. +func nodeConditionsLastObservedAt(nodeConditions []api.NodeConditionType, lastObservedAt nodeConditionsObservedAt, now time.Time) nodeConditionsObservedAt { + results := nodeConditionsObservedAt{} + // the input conditions were observed "now" + for i := range nodeConditions { + results[nodeConditions[i]] = now + } + // the conditions that were not observed now are merged in with their old time + for key, value := range lastObservedAt { + _, found := results[key] + if !found { + results[key] = value + } + } + return results +} + +// nodeConditionsObservedSince returns the set of conditions that have been observed within the specified period +func nodeConditionsObservedSince(observedAt nodeConditionsObservedAt, period time.Duration, now time.Time) []api.NodeConditionType { + results := []api.NodeConditionType{} + for nodeCondition, at := range observedAt { + duration := now.Sub(at) + if duration < period { + results = append(results, nodeCondition) + } + } + return results +} + +// hasNodeCondition returns true if the node condition is in the input list +func hasNodeCondition(inputs []api.NodeConditionType, item api.NodeConditionType) bool { + for _, input := range inputs { + if input == item { + return true + } + } + return false +} + +// hasThreshold returns true if the node condition is in the input list +func hasThreshold(inputs []Threshold, item Threshold) bool { + for _, input := range inputs { + if input.GracePeriod == item.GracePeriod && input.Operator == item.Operator && input.Signal == item.Signal && input.Value.Cmp(*item.Value) == 0 { + return true + } + } + return false +} + +// reclaimResources returns the set of resources that are starved based on thresholds met. +func reclaimResources(thresholds []Threshold) []api.ResourceName { + results := []api.ResourceName{} + for _, threshold := range thresholds { + if starvedResource, found := signalToResource[threshold.Signal]; found { + results = append(results, starvedResource) + } + } + return results +} + +// isSoftEviction returns true if the thresholds met for the starved resource are only soft thresholds +func isSoftEviction(thresholds []Threshold, starvedResource api.ResourceName) bool { + for _, threshold := range thresholds { + if resourceToCheck := signalToResource[threshold.Signal]; resourceToCheck != starvedResource { + continue + } + if threshold.GracePeriod == time.Duration(0) { + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/helpers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/helpers_test.go new file mode 100644 index 000000000000..6d412c9d1c79 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/helpers_test.go @@ -0,0 +1,725 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eviction + +import ( + "fmt" + "reflect" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" + statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" + "k8s.io/kubernetes/pkg/quota" + "k8s.io/kubernetes/pkg/types" +) + +func quantityMustParse(value string) *resource.Quantity { + q := resource.MustParse(value) + return &q +} + +func TestParseThresholdConfig(t *testing.T) { + gracePeriod, _ := time.ParseDuration("30s") + testCases := map[string]struct { + evictionHard string + evictionSoft string + evictionSoftGracePeriod string + expectErr bool + expectThresholds []Threshold + }{ + "no values": { + evictionHard: "", + evictionSoft: "", + evictionSoftGracePeriod: "", + expectErr: false, + expectThresholds: []Threshold{}, + }, + "all flag values": { + evictionHard: "memory.available<150Mi", + evictionSoft: "memory.available<300Mi", + evictionSoftGracePeriod: "memory.available=30s", + expectErr: false, + expectThresholds: []Threshold{ + { + Signal: SignalMemoryAvailable, + Operator: OpLessThan, + Value: quantityMustParse("150Mi"), + }, + { + Signal: SignalMemoryAvailable, + Operator: OpLessThan, + Value: quantityMustParse("300Mi"), + GracePeriod: gracePeriod, + }, + }, + }, + "invalid-signal": { + evictionHard: "mem.available<150Mi", + evictionSoft: "", + evictionSoftGracePeriod: "", + expectErr: true, + expectThresholds: []Threshold{}, + }, + "duplicate-signal": { + evictionHard: "memory.available<150Mi,memory.available<100Mi", + evictionSoft: "", + evictionSoftGracePeriod: "", + expectErr: true, + expectThresholds: []Threshold{}, + }, + "valid-and-invalid-signal": { + evictionHard: "memory.available<150Mi,invalid.foo<150Mi", + evictionSoft: "", + evictionSoftGracePeriod: "", + expectErr: true, + expectThresholds: []Threshold{}, + }, + "soft-no-grace-period": { + evictionHard: "", + evictionSoft: "memory.available<150Mi", + evictionSoftGracePeriod: "", + expectErr: true, + expectThresholds: []Threshold{}, + }, + "soft-neg-grace-period": { + evictionHard: "", + evictionSoft: "memory.available<150Mi", + evictionSoftGracePeriod: "memory.available=-30s", + expectErr: true, + expectThresholds: []Threshold{}, + }, + } + for testName, testCase := range testCases { + thresholds, err := ParseThresholdConfig(testCase.evictionHard, testCase.evictionSoft, testCase.evictionSoftGracePeriod) + if testCase.expectErr != (err != nil) { + t.Errorf("Err not as expected, test: %v, error expected: %v, actual: %v", testName, testCase.expectErr, err) + } + if !thresholdsEqual(testCase.expectThresholds, thresholds) { + t.Errorf("thresholds not as expected, test: %v, expected: %v, actual: %v", testName, testCase.expectThresholds, thresholds) + } + } +} + +func thresholdsEqual(expected []Threshold, actual []Threshold) bool { + if len(expected) != len(actual) { + return false + } + for _, aThreshold := range expected { + equal := false + for _, bThreshold := range actual { + if thresholdEqual(aThreshold, bThreshold) { + equal = true + } + } + if !equal { + return false + } + } + for _, aThreshold := range actual { + equal := false + for _, bThreshold := range expected { + if thresholdEqual(aThreshold, bThreshold) { + equal = true + } + } + if !equal { + return false + } + } + return true +} + +func thresholdEqual(a Threshold, b Threshold) bool { + return a.GracePeriod == b.GracePeriod && + a.Operator == b.Operator && + a.Signal == b.Signal && + a.Value.Cmp(*b.Value) == 0 +} + +// TestOrderedByQoS ensures we order BestEffort < Burstable < Guaranteed +func TestOrderedByQoS(t *testing.T) { + bestEffort := newPod("best-effort", []api.Container{ + newContainer("best-effort", newResourceList("", ""), newResourceList("", "")), + }) + burstable := newPod("burstable", []api.Container{ + newContainer("burstable", newResourceList("100m", "100Mi"), newResourceList("200m", "200Mi")), + }) + guaranteed := newPod("guaranteed", []api.Container{ + newContainer("guaranteed", newResourceList("200m", "200Mi"), newResourceList("200m", "200Mi")), + }) + + pods := []*api.Pod{guaranteed, burstable, bestEffort} + orderedBy(qos).Sort(pods) + + expected := []*api.Pod{bestEffort, burstable, guaranteed} + for i := range expected { + if pods[i] != expected[i] { + t.Errorf("Expected pod: %s, but got: %s", expected[i].Name, pods[i].Name) + } + } +} + +// TestOrderedByMemory ensures we order pods by greediest memory consumer relative to request. +func TestOrderedByMemory(t *testing.T) { + pod1 := newPod("best-effort-high", []api.Container{ + newContainer("best-effort-high", newResourceList("", ""), newResourceList("", "")), + }) + pod2 := newPod("best-effort-low", []api.Container{ + newContainer("best-effort-low", newResourceList("", ""), newResourceList("", "")), + }) + pod3 := newPod("burstable-high", []api.Container{ + newContainer("burstable-high", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")), + }) + pod4 := newPod("burstable-low", []api.Container{ + newContainer("burstable-low", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")), + }) + pod5 := newPod("guaranteed-high", []api.Container{ + newContainer("guaranteed-high", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")), + }) + pod6 := newPod("guaranteed-low", []api.Container{ + newContainer("guaranteed-low", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")), + }) + stats := map[*api.Pod]statsapi.PodStats{ + pod1: newPodMemoryStats(pod1, resource.MustParse("500Mi")), // 500 relative to request + pod2: newPodMemoryStats(pod2, resource.MustParse("300Mi")), // 300 relative to request + pod3: newPodMemoryStats(pod3, resource.MustParse("800Mi")), // 700 relative to request + pod4: newPodMemoryStats(pod4, resource.MustParse("300Mi")), // 200 relative to request + pod5: newPodMemoryStats(pod5, resource.MustParse("800Mi")), // -200 relative to request + pod6: newPodMemoryStats(pod6, resource.MustParse("200Mi")), // -800 relative to request + } + statsFn := func(pod *api.Pod) (statsapi.PodStats, bool) { + result, found := stats[pod] + return result, found + } + pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6} + orderedBy(memory(statsFn)).Sort(pods) + expected := []*api.Pod{pod3, pod1, pod2, pod4, pod5, pod6} + for i := range expected { + if pods[i] != expected[i] { + t.Errorf("Expected pod[%d]: %s, but got: %s", i, expected[i].Name, pods[i].Name) + } + } +} + +// TestOrderedByQoSMemory ensures we order by qos and then memory consumption relative to request. +func TestOrderedByQoSMemory(t *testing.T) { + pod1 := newPod("best-effort-high", []api.Container{ + newContainer("best-effort-high", newResourceList("", ""), newResourceList("", "")), + }) + pod2 := newPod("best-effort-low", []api.Container{ + newContainer("best-effort-low", newResourceList("", ""), newResourceList("", "")), + }) + pod3 := newPod("burstable-high", []api.Container{ + newContainer("burstable-high", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")), + }) + pod4 := newPod("burstable-low", []api.Container{ + newContainer("burstable-low", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")), + }) + pod5 := newPod("guaranteed-high", []api.Container{ + newContainer("guaranteed-high", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")), + }) + pod6 := newPod("guaranteed-low", []api.Container{ + newContainer("guaranteed-low", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")), + }) + stats := map[*api.Pod]statsapi.PodStats{ + pod1: newPodMemoryStats(pod1, resource.MustParse("500Mi")), // 500 relative to request + pod2: newPodMemoryStats(pod2, resource.MustParse("50Mi")), // 50 relative to request + pod3: newPodMemoryStats(pod3, resource.MustParse("50Mi")), // -50 relative to request + pod4: newPodMemoryStats(pod4, resource.MustParse("300Mi")), // 200 relative to request + pod5: newPodMemoryStats(pod5, resource.MustParse("800Mi")), // -200 relative to request + pod6: newPodMemoryStats(pod6, resource.MustParse("200Mi")), // -800 relative to request + } + statsFn := func(pod *api.Pod) (statsapi.PodStats, bool) { + result, found := stats[pod] + return result, found + } + pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6} + expected := []*api.Pod{pod1, pod2, pod4, pod3, pod5, pod6} + orderedBy(qos, memory(statsFn)).Sort(pods) + for i := range expected { + if pods[i] != expected[i] { + t.Errorf("Expected pod[%d]: %s, but got: %s", i, expected[i].Name, pods[i].Name) + } + } +} + +type fakeSummaryProvider struct { + result *statsapi.Summary +} + +func (f *fakeSummaryProvider) Get() (*statsapi.Summary, error) { + return f.result, nil +} + +// newPodStats returns a pod stat where each container is using the specified working set +// each pod must have a Name, UID, Namespace +func newPodStats(pod *api.Pod, containerWorkingSetBytes int64) statsapi.PodStats { + result := statsapi.PodStats{ + PodRef: statsapi.PodReference{ + Name: pod.Name, + Namespace: pod.Namespace, + UID: string(pod.UID), + }, + } + val := uint64(containerWorkingSetBytes) + for range pod.Spec.Containers { + result.Containers = append(result.Containers, statsapi.ContainerStats{ + Memory: &statsapi.MemoryStats{ + WorkingSetBytes: &val, + }, + }) + } + return result +} + +func TestMakeSignalObservations(t *testing.T) { + podMaker := func(name, namespace, uid string, numContainers int) *api.Pod { + pod := &api.Pod{} + pod.Name = name + pod.Namespace = namespace + pod.UID = types.UID(uid) + pod.Spec = api.PodSpec{} + for i := 0; i < numContainers; i++ { + pod.Spec.Containers = append(pod.Spec.Containers, api.Container{ + Name: fmt.Sprintf("ctr%v", i), + }) + } + return pod + } + nodeAvailableBytes := uint64(1024 * 1024 * 1024) + fakeStats := &statsapi.Summary{ + Node: statsapi.NodeStats{ + Memory: &statsapi.MemoryStats{ + AvailableBytes: &nodeAvailableBytes, + }, + }, + Pods: []statsapi.PodStats{}, + } + provider := &fakeSummaryProvider{ + result: fakeStats, + } + pods := []*api.Pod{ + podMaker("pod1", "ns1", "uuid1", 1), + podMaker("pod1", "ns2", "uuid2", 1), + podMaker("pod3", "ns3", "uuid3", 1), + } + containerWorkingSetBytes := int64(1024 * 1024) + for _, pod := range pods { + fakeStats.Pods = append(fakeStats.Pods, newPodStats(pod, containerWorkingSetBytes)) + } + actualObservations, statsFunc, err := makeSignalObservations(provider) + if err != nil { + t.Errorf("Unexpected err: %v", err) + } + quantity, found := actualObservations[SignalMemoryAvailable] + if !found { + t.Errorf("Expected available memory observation: %v", err) + } + if expectedBytes := int64(nodeAvailableBytes); quantity.Value() != expectedBytes { + t.Errorf("Expected %v, actual: %v", expectedBytes, quantity.Value()) + } + for _, pod := range pods { + podStats, found := statsFunc(pod) + if !found { + t.Errorf("Pod stats were not found for pod %v", pod.UID) + } + for _, container := range podStats.Containers { + actual := int64(*container.Memory.WorkingSetBytes) + if containerWorkingSetBytes != actual { + t.Errorf("Container working set expected %v, actual: %v", containerWorkingSetBytes, actual) + } + } + } +} + +func TestThresholdsMet(t *testing.T) { + hardThreshold := Threshold{ + Signal: SignalMemoryAvailable, + Operator: OpLessThan, + Value: quantityMustParse("1Gi"), + } + testCases := map[string]struct { + thresholds []Threshold + observations signalObservations + result []Threshold + }{ + "empty": { + thresholds: []Threshold{}, + observations: signalObservations{}, + result: []Threshold{}, + }, + "threshold-met": { + thresholds: []Threshold{hardThreshold}, + observations: signalObservations{ + SignalMemoryAvailable: quantityMustParse("500Mi"), + }, + result: []Threshold{hardThreshold}, + }, + "threshold-not-met": { + thresholds: []Threshold{hardThreshold}, + observations: signalObservations{ + SignalMemoryAvailable: quantityMustParse("2Gi"), + }, + result: []Threshold{}, + }, + } + for testName, testCase := range testCases { + actual := thresholdsMet(testCase.thresholds, testCase.observations) + if !thresholdList(actual).Equal(thresholdList(testCase.result)) { + t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual) + } + } +} + +func TestThresholdsFirstObservedAt(t *testing.T) { + hardThreshold := Threshold{ + Signal: SignalMemoryAvailable, + Operator: OpLessThan, + Value: quantityMustParse("1Gi"), + } + now := unversioned.Now() + oldTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute)) + testCases := map[string]struct { + thresholds []Threshold + lastObservedAt thresholdsObservedAt + now time.Time + result thresholdsObservedAt + }{ + "empty": { + thresholds: []Threshold{}, + lastObservedAt: thresholdsObservedAt{}, + now: now.Time, + result: thresholdsObservedAt{}, + }, + "no-previous-observation": { + thresholds: []Threshold{hardThreshold}, + lastObservedAt: thresholdsObservedAt{}, + now: now.Time, + result: thresholdsObservedAt{ + hardThreshold: now.Time, + }, + }, + "previous-observation": { + thresholds: []Threshold{hardThreshold}, + lastObservedAt: thresholdsObservedAt{ + hardThreshold: oldTime.Time, + }, + now: now.Time, + result: thresholdsObservedAt{ + hardThreshold: oldTime.Time, + }, + }, + } + for testName, testCase := range testCases { + actual := thresholdsFirstObservedAt(testCase.thresholds, testCase.lastObservedAt, testCase.now) + if !reflect.DeepEqual(actual, testCase.result) { + t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual) + } + } +} + +func TestThresholdsMetGracePeriod(t *testing.T) { + now := unversioned.Now() + hardThreshold := Threshold{ + Signal: SignalMemoryAvailable, + Operator: OpLessThan, + Value: quantityMustParse("1Gi"), + } + softThreshold := Threshold{ + Signal: SignalMemoryAvailable, + Operator: OpLessThan, + Value: quantityMustParse("2Gi"), + GracePeriod: 1 * time.Minute, + } + oldTime := unversioned.NewTime(now.Time.Add(-2 * time.Minute)) + testCases := map[string]struct { + observedAt thresholdsObservedAt + now time.Time + result []Threshold + }{ + "empty": { + observedAt: thresholdsObservedAt{}, + now: now.Time, + result: []Threshold{}, + }, + "hard-threshold-met": { + observedAt: thresholdsObservedAt{ + hardThreshold: now.Time, + }, + now: now.Time, + result: []Threshold{hardThreshold}, + }, + "soft-threshold-not-met": { + observedAt: thresholdsObservedAt{ + softThreshold: now.Time, + }, + now: now.Time, + result: []Threshold{}, + }, + "soft-threshold-met": { + observedAt: thresholdsObservedAt{ + softThreshold: oldTime.Time, + }, + now: now.Time, + result: []Threshold{softThreshold}, + }, + } + for testName, testCase := range testCases { + actual := thresholdsMetGracePeriod(testCase.observedAt, now.Time) + if !thresholdList(actual).Equal(thresholdList(testCase.result)) { + t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual) + } + } +} + +func TestNodeConditions(t *testing.T) { + testCases := map[string]struct { + inputs []Threshold + result []api.NodeConditionType + }{ + "empty-list": { + inputs: []Threshold{}, + result: []api.NodeConditionType{}, + }, + "memory.available": { + inputs: []Threshold{ + {Signal: SignalMemoryAvailable}, + }, + result: []api.NodeConditionType{api.NodeMemoryPressure}, + }, + } + for testName, testCase := range testCases { + actual := nodeConditions(testCase.inputs) + if !nodeConditionList(actual).Equal(nodeConditionList(testCase.result)) { + t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual) + } + } +} + +func TestNodeConditionsLastObservedAt(t *testing.T) { + now := unversioned.Now() + oldTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute)) + testCases := map[string]struct { + nodeConditions []api.NodeConditionType + lastObservedAt nodeConditionsObservedAt + now time.Time + result nodeConditionsObservedAt + }{ + "no-previous-observation": { + nodeConditions: []api.NodeConditionType{api.NodeMemoryPressure}, + lastObservedAt: nodeConditionsObservedAt{}, + now: now.Time, + result: nodeConditionsObservedAt{ + api.NodeMemoryPressure: now.Time, + }, + }, + "previous-observation": { + nodeConditions: []api.NodeConditionType{api.NodeMemoryPressure}, + lastObservedAt: nodeConditionsObservedAt{ + api.NodeMemoryPressure: oldTime.Time, + }, + now: now.Time, + result: nodeConditionsObservedAt{ + api.NodeMemoryPressure: now.Time, + }, + }, + "old-observation": { + nodeConditions: []api.NodeConditionType{}, + lastObservedAt: nodeConditionsObservedAt{ + api.NodeMemoryPressure: oldTime.Time, + }, + now: now.Time, + result: nodeConditionsObservedAt{ + api.NodeMemoryPressure: oldTime.Time, + }, + }, + } + for testName, testCase := range testCases { + actual := nodeConditionsLastObservedAt(testCase.nodeConditions, testCase.lastObservedAt, testCase.now) + if !reflect.DeepEqual(actual, testCase.result) { + t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual) + } + } +} + +func TestNodeConditionsObservedSince(t *testing.T) { + now := unversioned.Now() + observedTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute)) + testCases := map[string]struct { + observedAt nodeConditionsObservedAt + period time.Duration + now time.Time + result []api.NodeConditionType + }{ + "in-period": { + observedAt: nodeConditionsObservedAt{ + api.NodeMemoryPressure: observedTime.Time, + }, + period: 2 * time.Minute, + now: now.Time, + result: []api.NodeConditionType{api.NodeMemoryPressure}, + }, + "out-of-period": { + observedAt: nodeConditionsObservedAt{ + api.NodeMemoryPressure: observedTime.Time, + }, + period: 30 * time.Second, + now: now.Time, + result: []api.NodeConditionType{}, + }, + } + for testName, testCase := range testCases { + actual := nodeConditionsObservedSince(testCase.observedAt, testCase.period, testCase.now) + if !nodeConditionList(actual).Equal(nodeConditionList(testCase.result)) { + t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual) + } + } +} + +func TestHasNodeConditions(t *testing.T) { + testCases := map[string]struct { + inputs []api.NodeConditionType + item api.NodeConditionType + result bool + }{ + "has-condition": { + inputs: []api.NodeConditionType{api.NodeReady, api.NodeOutOfDisk, api.NodeMemoryPressure}, + item: api.NodeMemoryPressure, + result: true, + }, + "does-not-have-condition": { + inputs: []api.NodeConditionType{api.NodeReady, api.NodeOutOfDisk}, + item: api.NodeMemoryPressure, + result: false, + }, + } + for testName, testCase := range testCases { + if actual := hasNodeCondition(testCase.inputs, testCase.item); actual != testCase.result { + t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual) + } + } +} + +func TestReclaimResources(t *testing.T) { + testCases := map[string]struct { + inputs []Threshold + result []api.ResourceName + }{ + "memory.available": { + inputs: []Threshold{ + {Signal: SignalMemoryAvailable}, + }, + result: []api.ResourceName{api.ResourceMemory}, + }, + } + for testName, testCase := range testCases { + actual := reclaimResources(testCase.inputs) + actualSet := quota.ToSet(actual) + expectedSet := quota.ToSet(testCase.result) + if !actualSet.Equal(expectedSet) { + t.Errorf("Test case: %s, expected: %v, actual: %v", testName, expectedSet, actualSet) + } + } +} + +func newPodMemoryStats(pod *api.Pod, workingSet resource.Quantity) statsapi.PodStats { + result := statsapi.PodStats{ + PodRef: statsapi.PodReference{ + Name: pod.Name, Namespace: pod.Namespace, UID: string(pod.UID), + }, + } + for range pod.Spec.Containers { + workingSetBytes := uint64(workingSet.Value()) + result.Containers = append(result.Containers, statsapi.ContainerStats{ + Memory: &statsapi.MemoryStats{ + WorkingSetBytes: &workingSetBytes, + }, + }) + } + return result +} + +func newResourceList(cpu, memory string) api.ResourceList { + res := api.ResourceList{} + if cpu != "" { + res[api.ResourceCPU] = resource.MustParse(cpu) + } + if memory != "" { + res[api.ResourceMemory] = resource.MustParse(memory) + } + return res +} + +func newResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements { + res := api.ResourceRequirements{} + res.Requests = requests + res.Limits = limits + return res +} + +func newContainer(name string, requests api.ResourceList, limits api.ResourceList) api.Container { + return api.Container{ + Name: name, + Resources: newResourceRequirements(requests, limits), + } +} + +func newPod(name string, containers []api.Container) *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: name, + }, + Spec: api.PodSpec{ + Containers: containers, + }, + } +} + +// nodeConditionList is a simple alias to support equality checking independent of order +type nodeConditionList []api.NodeConditionType + +// Equal adds the ability to check equality between two lists of node conditions. +func (s1 nodeConditionList) Equal(s2 nodeConditionList) bool { + if len(s1) != len(s2) { + return false + } + for _, item := range s1 { + if !hasNodeCondition(s2, item) { + return false + } + } + return true +} + +// thresholdList is a simple alias to support equality checking independent of order +type thresholdList []Threshold + +// Equal adds the ability to check equality between two lists of node conditions. +func (s1 thresholdList) Equal(s2 thresholdList) bool { + if len(s1) != len(s2) { + return false + } + for _, item := range s1 { + if !hasThreshold(s2, item) { + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/manager.go new file mode 100644 index 000000000000..a672ebb9b887 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/manager.go @@ -0,0 +1,220 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eviction + +import ( + "sort" + "sync" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util" + "k8s.io/kubernetes/pkg/kubelet/server/stats" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/wait" +) + +// managerImpl implements NodeStabilityManager +type managerImpl struct { + // used to track time + clock util.Clock + // config is how the manager is configured + config Config + // the function to invoke to kill a pod + killPodFunc KillPodFunc + // protects access to internal state + sync.RWMutex + // node conditions are the set of conditions present + nodeConditions []api.NodeConditionType + // captures when a node condition was last observed based on a threshold being met + nodeConditionsLastObservedAt nodeConditionsObservedAt + // nodeRef is a reference to the node + nodeRef *api.ObjectReference + // used to record events about the node + recorder record.EventRecorder + // used to measure usage stats on system + summaryProvider stats.SummaryProvider + // records when a threshold was first observed + thresholdsFirstObservedAt thresholdsObservedAt +} + +// ensure it implements the required interface +var _ Manager = &managerImpl{} + +// NewManager returns a configured Manager and an associated admission handler to enforce eviction configuration. +func NewManager( + summaryProvider stats.SummaryProvider, + config Config, + killPodFunc KillPodFunc, + recorder record.EventRecorder, + nodeRef *api.ObjectReference, + clock util.Clock) (Manager, lifecycle.PodAdmitHandler, error) { + manager := &managerImpl{ + clock: clock, + killPodFunc: killPodFunc, + config: config, + recorder: recorder, + summaryProvider: summaryProvider, + nodeRef: nodeRef, + nodeConditionsLastObservedAt: nodeConditionsObservedAt{}, + thresholdsFirstObservedAt: thresholdsObservedAt{}, + } + return manager, manager, nil +} + +// Admit rejects a pod if its not safe to admit for node stability. +func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult { + m.RLock() + defer m.RUnlock() + if len(m.nodeConditions) == 0 { + return lifecycle.PodAdmitResult{Admit: true} + } + notBestEffort := qosutil.BestEffort != qosutil.GetPodQos(attrs.Pod) + if notBestEffort { + return lifecycle.PodAdmitResult{Admit: true} + } + glog.Warningf("Failed to admit pod %v - %s", format.Pod(attrs.Pod), "node has conditions: %v", m.nodeConditions) + // we reject all best effort pods until we are stable. + return lifecycle.PodAdmitResult{ + Admit: false, + Reason: reason, + Message: message, + } +} + +// Start starts the control loop to observe and response to low compute resources. +func (m *managerImpl) Start(podFunc ActivePodsFunc, monitoringInterval time.Duration) { + go wait.Until(func() { m.synchronize(podFunc) }, monitoringInterval, wait.NeverStop) +} + +// IsUnderMemoryPressure returns true if the node is under memory pressure. +func (m *managerImpl) IsUnderMemoryPressure() bool { + m.RLock() + defer m.RUnlock() + return hasNodeCondition(m.nodeConditions, api.NodeMemoryPressure) +} + +// synchronize is the main control loop that enforces eviction thresholds. +func (m *managerImpl) synchronize(podFunc ActivePodsFunc) { + // if we have nothing to do, just return + thresholds := m.config.Thresholds + if len(thresholds) == 0 { + return + } + + // make observations and get a function to derive pod usage stats relative to those observations. + observations, statsFunc, err := makeSignalObservations(m.summaryProvider) + if err != nil { + glog.Errorf("eviction manager: unexpected err: %v", err) + return + } + + // find the list of thresholds that are met independent of grace period + now := m.clock.Now() + + // determine the set of thresholds met independent of grace period + thresholds = thresholdsMet(thresholds, observations) + + // track when a threshold was first observed + thresholdsFirstObservedAt := thresholdsFirstObservedAt(thresholds, m.thresholdsFirstObservedAt, now) + + // the set of node conditions that are triggered by currently observed thresholds + nodeConditions := nodeConditions(thresholds) + + // track when a node condition was last observed + nodeConditionsLastObservedAt := nodeConditionsLastObservedAt(nodeConditions, m.nodeConditionsLastObservedAt, now) + + // node conditions report true if it has been observed within the transition period window + nodeConditions = nodeConditionsObservedSince(nodeConditionsLastObservedAt, m.config.PressureTransitionPeriod, now) + + // determine the set of thresholds we need to drive eviction behavior (i.e. all grace periods are met) + thresholds = thresholdsMetGracePeriod(thresholdsFirstObservedAt, now) + + // update internal state + m.Lock() + m.nodeConditions = nodeConditions + m.thresholdsFirstObservedAt = thresholdsFirstObservedAt + m.nodeConditionsLastObservedAt = nodeConditionsLastObservedAt + m.Unlock() + + // determine the set of resources under starvation + starvedResources := reclaimResources(thresholds) + if len(starvedResources) == 0 { + glog.V(3).Infof("eviction manager: no resources are starved") + return + } + + // rank the resources to reclaim by eviction priority + sort.Sort(byEvictionPriority(starvedResources)) + resourceToReclaim := starvedResources[0] + glog.Warningf("eviction manager: attempting to reclaim %v", resourceToReclaim) + + // determine if this is a soft or hard eviction associated with the resource + softEviction := isSoftEviction(thresholds, resourceToReclaim) + + // record an event about the resources we are now attempting to reclaim via eviction + m.recorder.Eventf(m.nodeRef, api.EventTypeWarning, "EvictionThresholdMet", "Attempting to reclaim %s", resourceToReclaim) + + // rank the pods for eviction + rank, ok := resourceToRankFunc[resourceToReclaim] + if !ok { + glog.Errorf("eviction manager: no ranking function for resource %s", resourceToReclaim) + return + } + + // the only candidates viable for eviction are those pods that had anything running. + activePods := podFunc() + if len(activePods) == 0 { + glog.Errorf("eviction manager: eviction thresholds have been met, but no pods are active to evict") + return + } + + // rank the running pods for eviction for the specified resource + rank(activePods, statsFunc) + + glog.Infof("eviction manager: pods ranked for eviction: %s", format.Pods(activePods)) + + // we kill at most a single pod during each eviction interval + for i := range activePods { + pod := activePods[i] + status := api.PodStatus{ + Phase: api.PodFailed, + Message: message, + Reason: reason, + } + // record that we are evicting the pod + m.recorder.Eventf(pod, api.EventTypeWarning, reason, message) + gracePeriodOverride := int64(0) + if softEviction { + gracePeriodOverride = m.config.MaxPodGracePeriodSeconds + } + // this is a blocking call and should only return when the pod and its containers are killed. + err := m.killPodFunc(pod, status, &gracePeriodOverride) + if err != nil { + glog.Infof("eviction manager: pod %s failed to evict %v", format.Pod(pod), err) + continue + } + // success, so we return until the next housekeeping interval + glog.Infof("eviction manager: pod %s evicted successfully", format.Pod(pod)) + return + } + glog.Infof("eviction manager: unable to evict any pods from the node") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/manager_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/manager_test.go new file mode 100644 index 000000000000..c51dbd8200de --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/manager_test.go @@ -0,0 +1,273 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eviction + +import ( + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/client/record" + statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" +) + +// mockPodKiller is used to testing which pod is killed +type mockPodKiller struct { + pod *api.Pod + status api.PodStatus + gracePeriodOverride *int64 +} + +// killPodNow records the pod that was killed +func (m *mockPodKiller) killPodNow(pod *api.Pod, status api.PodStatus, gracePeriodOverride *int64) error { + m.pod = pod + m.status = status + m.gracePeriodOverride = gracePeriodOverride + return nil +} + +// TestMemoryPressure +func TestMemoryPressure(t *testing.T) { + podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, memoryWorkingSet string) (*api.Pod, statsapi.PodStats) { + pod := newPod(name, []api.Container{ + newContainer(name, requests, api.ResourceList{}), + }) + podStats := newPodMemoryStats(pod, resource.MustParse(memoryWorkingSet)) + return pod, podStats + } + summaryStatsMaker := func(nodeAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary { + val := resource.MustParse(nodeAvailableBytes) + availableBytes := uint64(val.Value()) + result := &statsapi.Summary{ + Node: statsapi.NodeStats{ + Memory: &statsapi.MemoryStats{ + AvailableBytes: &availableBytes, + }, + }, + Pods: []statsapi.PodStats{}, + } + for _, podStat := range podStats { + result.Pods = append(result.Pods, podStat) + } + return result + } + podsToMake := []struct { + name string + requests api.ResourceList + limits api.ResourceList + memoryWorkingSet string + }{ + {name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "500Mi"}, + {name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "300Mi"}, + {name: "burstable-high", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "800Mi"}, + {name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "300Mi"}, + {name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "800Mi"}, + {name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "200Mi"}, + } + pods := []*api.Pod{} + podStats := map[*api.Pod]statsapi.PodStats{} + for _, podToMake := range podsToMake { + pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet) + pods = append(pods, pod) + podStats[pod] = podStat + } + activePodsFunc := func() []*api.Pod { + return pods + } + + fakeClock := util.NewFakeClock(time.Now()) + podKiller := &mockPodKiller{} + nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} + + config := Config{ + MaxPodGracePeriodSeconds: 5, + PressureTransitionPeriod: time.Minute * 5, + Thresholds: []Threshold{ + { + Signal: SignalMemoryAvailable, + Operator: OpLessThan, + Value: quantityMustParse("1Gi"), + }, + { + Signal: SignalMemoryAvailable, + Operator: OpLessThan, + Value: quantityMustParse("2Gi"), + GracePeriod: time.Minute * 2, + }, + }, + } + summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2Gi", podStats)} + manager := &managerImpl{ + clock: fakeClock, + killPodFunc: podKiller.killPodNow, + config: config, + recorder: &record.FakeRecorder{}, + summaryProvider: summaryProvider, + nodeRef: nodeRef, + nodeConditionsLastObservedAt: nodeConditionsObservedAt{}, + thresholdsFirstObservedAt: thresholdsObservedAt{}, + } + + // create a best effort pod to test admission + bestEffortPodToAdmit, _ := podMaker("best-admit", newResourceList("", ""), newResourceList("", ""), "0Gi") + burstablePodToAdmit, _ := podMaker("burst-admit", newResourceList("100m", "100Mi"), newResourceList("200m", "200Mi"), "0Gi") + + // synchronize + manager.synchronize(activePodsFunc) + + // we should not have memory pressure + if manager.IsUnderMemoryPressure() { + t.Errorf("Manager should not report memory pressure") + } + + // try to admit our pods (they should succeed) + expected := []bool{true, true} + for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { + if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { + t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) + } + } + + // induce soft threshold + fakeClock.Step(1 * time.Minute) + summaryProvider.result = summaryStatsMaker("1500Mi", podStats) + manager.synchronize(activePodsFunc) + + // we should have memory pressure + if !manager.IsUnderMemoryPressure() { + t.Errorf("Manager should report memory pressure since soft threshold was met") + } + + // verify no pod was yet killed because there has not yet been enough time passed. + if podKiller.pod != nil { + t.Errorf("Manager should not have killed a pod yet, but killed: %v", podKiller.pod) + } + + // step forward in time pass the grace period + fakeClock.Step(3 * time.Minute) + summaryProvider.result = summaryStatsMaker("1500Mi", podStats) + manager.synchronize(activePodsFunc) + + // we should have memory pressure + if !manager.IsUnderMemoryPressure() { + t.Errorf("Manager should report memory pressure since soft threshold was met") + } + + // verify the right pod was killed with the right grace period. + if podKiller.pod != pods[0] { + t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0]) + } + if podKiller.gracePeriodOverride == nil { + t.Errorf("Manager chose to kill pod but should have had a grace period override.") + } + observedGracePeriod := *podKiller.gracePeriodOverride + if observedGracePeriod != manager.config.MaxPodGracePeriodSeconds { + t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", manager.config.MaxPodGracePeriodSeconds, observedGracePeriod) + } + // reset state + podKiller.pod = nil + podKiller.gracePeriodOverride = nil + + // remove memory pressure + fakeClock.Step(20 * time.Minute) + summaryProvider.result = summaryStatsMaker("3Gi", podStats) + manager.synchronize(activePodsFunc) + + // we should not have memory pressure + if manager.IsUnderMemoryPressure() { + t.Errorf("Manager should not report memory pressure") + } + + // induce memory pressure! + fakeClock.Step(1 * time.Minute) + summaryProvider.result = summaryStatsMaker("500Mi", podStats) + manager.synchronize(activePodsFunc) + + // we should have memory pressure + if !manager.IsUnderMemoryPressure() { + t.Errorf("Manager should report memory pressure") + } + + // check the right pod was killed + if podKiller.pod != pods[0] { + t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, pods[0]) + } + observedGracePeriod = *podKiller.gracePeriodOverride + if observedGracePeriod != int64(0) { + t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod) + } + + // the best-effort pod should not admit, burstable should + expected = []bool{false, true} + for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { + if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { + t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) + } + } + + // reduce memory pressure + fakeClock.Step(1 * time.Minute) + summaryProvider.result = summaryStatsMaker("2Gi", podStats) + podKiller.pod = nil // reset state + manager.synchronize(activePodsFunc) + + // we should have memory pressure (because transition period not yet met) + if !manager.IsUnderMemoryPressure() { + t.Errorf("Manager should report memory pressure") + } + + // no pod should have been killed + if podKiller.pod != nil { + t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod) + } + + // the best-effort pod should not admit, burstable should + expected = []bool{false, true} + for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { + if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { + t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) + } + } + + // move the clock past transition period to ensure that we stop reporting pressure + fakeClock.Step(5 * time.Minute) + summaryProvider.result = summaryStatsMaker("2Gi", podStats) + podKiller.pod = nil // reset state + manager.synchronize(activePodsFunc) + + // we should not have memory pressure (because transition period met) + if manager.IsUnderMemoryPressure() { + t.Errorf("Manager should not report memory pressure") + } + + // no pod should have been killed + if podKiller.pod != nil { + t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod) + } + + // all pods should admit now + expected = []bool{true, true} + for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { + if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { + t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/types.go new file mode 100644 index 000000000000..3b4470d7c7b4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/eviction/types.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eviction + +import ( + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" +) + +// Signal defines a signal that can trigger eviction of pods on a node. +type Signal string + +const ( + // SignalMemoryAvailable is memory available (i.e. capacity - workingSet), in bytes. + SignalMemoryAvailable Signal = "memory.available" +) + +// ThresholdOperator is the operator used to express a Threshold. +type ThresholdOperator string + +const ( + // OpLessThan is the operator that expresses a less than operator. + OpLessThan ThresholdOperator = "LessThan" +) + +// Config holds information about how eviction is configured. +type Config struct { + // PressureTransitionPeriod is duration the kubelet has to wait before transititioning out of a pressure condition. + PressureTransitionPeriod time.Duration + // Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. + MaxPodGracePeriodSeconds int64 + // Thresholds define the set of conditions monitored to trigger eviction. + Thresholds []Threshold +} + +// Threshold defines a metric for when eviction should occur. +type Threshold struct { + // Signal defines the entity that was measured. + Signal Signal + // Operator represents a relationship of a signal to a value. + Operator ThresholdOperator + // value is a quantity associated with the signal that is evaluated against the specified operator. + Value *resource.Quantity + // GracePeriod represents the amount of time that a threshold must be met before eviction is triggered. + GracePeriod time.Duration +} + +// Manager evaluates when an eviction threshold for node stability has been met on the node. +type Manager interface { + // Start starts the control loop to monitor eviction thresholds at specified interval. + Start(podFunc ActivePodsFunc, monitoringInterval time.Duration) + + // IsUnderMemoryPressure returns true if the node is under memory pressure. + IsUnderMemoryPressure() bool +} + +// KillPodFunc kills a pod. +// The pod status is updated, and then it is killed with the specified grace period. +// This function must block until either the pod is killed or an error is encountered. +// Arguments: +// pod - the pod to kill +// status - the desired status to associate with the pod (i.e. why its killed) +// gracePeriodOverride - the grace period override to use instead of what is on the pod spec +type KillPodFunc func(pod *api.Pod, status api.PodStatus, gracePeriodOverride *int64) error + +// ActivePodsFunc returns pods bound to the kubelet that are active (i.e. non-terminal state) +type ActivePodsFunc func() []*api.Pod + +// statsFunc returns the usage stats if known for an input pod. +type statsFunc func(pod *api.Pod) (statsapi.PodStats, bool) + +// rankFunc sorts the pods in eviction order +type rankFunc func(pods []*api.Pod, stats statsFunc) + +// signalObservations maps a signal to an observed quantity +type signalObservations map[Signal]*resource.Quantity + +// thresholdsObservedAt maps a threshold to a time that it was observed +type thresholdsObservedAt map[Threshold]time.Time + +// nodeConditionsObservedAt maps a node condition to a time that it was observed +type nodeConditionsObservedAt map[api.NodeConditionType]time.Time diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/flannel_helper.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/flannel_helper.go new file mode 100644 index 000000000000..c81cb594fec8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/flannel_helper.go @@ -0,0 +1,168 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + + utildbus "k8s.io/kubernetes/pkg/util/dbus" + utilexec "k8s.io/kubernetes/pkg/util/exec" + utiliptables "k8s.io/kubernetes/pkg/util/iptables" + + "github.com/golang/glog" +) + +// TODO: Move all this to a network plugin. +const ( + // TODO: The location of default docker options is distro specific, so this + // probably won't work on anything other than debian/ubuntu. This is a + // short-term compromise till we've moved overlay setup into a plugin. + dockerOptsFile = "/etc/default/docker" + flannelSubnetKey = "FLANNEL_SUBNET" + flannelNetworkKey = "FLANNEL_NETWORK" + flannelMtuKey = "FLANNEL_MTU" + dockerOptsKey = "DOCKER_OPTS" + flannelSubnetFile = "/var/run/flannel/subnet.env" +) + +// A Kubelet to flannel bridging helper. +type FlannelHelper struct { + subnetFile string + iptablesHelper utiliptables.Interface +} + +// NewFlannelHelper creates a new flannel helper. +func NewFlannelHelper() *FlannelHelper { + return &FlannelHelper{ + subnetFile: flannelSubnetFile, + iptablesHelper: utiliptables.New(utilexec.New(), utildbus.New(), utiliptables.ProtocolIpv4), + } +} + +// Ensure the required MASQUERADE rules exist for the given network/cidr. +func (f *FlannelHelper) ensureFlannelMasqRule(kubeNetwork, podCIDR string) error { + // TODO: Investigate delegation to flannel via -ip-masq=true once flannel + // issue #374 is resolved. + comment := "Flannel masquerade facilitates pod<->node traffic." + args := []string{ + "-m", "comment", "--comment", comment, + "!", "-d", kubeNetwork, "-s", podCIDR, "-j", "MASQUERADE", + } + _, err := f.iptablesHelper.EnsureRule( + utiliptables.Append, + utiliptables.TableNAT, + utiliptables.ChainPostrouting, + args...) + return err +} + +// Handshake waits for the flannel subnet file and installs a few IPTables +// rules, returning the pod CIDR allocated for this node. +func (f *FlannelHelper) Handshake() (podCIDR string, err error) { + // TODO: Using a file to communicate is brittle + if _, err = os.Stat(f.subnetFile); err != nil { + return "", fmt.Errorf("Waiting for subnet file %v", f.subnetFile) + } + glog.Infof("Found flannel subnet file %v", f.subnetFile) + + config, err := parseKVConfig(f.subnetFile) + if err != nil { + return "", err + } + if err = writeDockerOptsFromFlannelConfig(config); err != nil { + return "", err + } + podCIDR, ok := config[flannelSubnetKey] + if !ok { + return "", fmt.Errorf("No flannel subnet, config %+v", config) + } + kubeNetwork, ok := config[flannelNetworkKey] + if !ok { + return "", fmt.Errorf("No flannel network, config %+v", config) + } + if f.ensureFlannelMasqRule(kubeNetwork, podCIDR); err != nil { + return "", fmt.Errorf("Unable to install flannel masquerade %v", err) + } + return podCIDR, nil +} + +// Take env variables from flannel subnet env and write to /etc/docker/defaults. +func writeDockerOptsFromFlannelConfig(flannelConfig map[string]string) error { + // TODO: Write dockeropts to unit file on systemd machines + // https://github.com/docker/docker/issues/9889 + mtu, ok := flannelConfig[flannelMtuKey] + if !ok { + return fmt.Errorf("No flannel mtu, flannel config %+v", flannelConfig) + } + dockerOpts, err := parseKVConfig(dockerOptsFile) + if err != nil { + return err + } + opts, ok := dockerOpts[dockerOptsKey] + if !ok { + glog.Errorf("Did not find docker opts, writing them") + opts = fmt.Sprintf( + " --bridge=cbr0 --iptables=false --ip-masq=false") + } else { + opts, _ = strconv.Unquote(opts) + } + dockerOpts[dockerOptsKey] = fmt.Sprintf("\"%v --mtu=%v\"", opts, mtu) + if err = writeKVConfig(dockerOptsFile, dockerOpts); err != nil { + return err + } + return nil +} + +// parseKVConfig takes a file with key-value env variables and returns a dictionary mapping the same. +func parseKVConfig(filename string) (map[string]string, error) { + config := map[string]string{} + if _, err := os.Stat(filename); err != nil { + return config, err + } + buff, err := ioutil.ReadFile(filename) + if err != nil { + return config, err + } + str := string(buff) + glog.Infof("Read kv options %+v from %v", str, filename) + for _, line := range strings.Split(str, "\n") { + kv := strings.Split(line, "=") + if len(kv) != 2 { + glog.Warningf("Ignoring non key-value pair %v", kv) + continue + } + config[string(kv[0])] = string(kv[1]) + } + return config, nil +} + +// writeKVConfig writes a kv map as env variables into the given file. +func writeKVConfig(filename string, kv map[string]string) error { + if _, err := os.Stat(filename); err != nil { + return err + } + content := "" + for k, v := range kv { + content += fmt.Sprintf("%v=%v\n", k, v) + } + glog.Warningf("Writing kv options %+v to %v", content, filename) + return ioutil.WriteFile(filename, []byte(content), 0644) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/image_manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/image_manager.go new file mode 100644 index 000000000000..6f521647d658 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/image_manager.go @@ -0,0 +1,337 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "sort" + "sync" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/kubelet/cadvisor" + "k8s.io/kubernetes/pkg/kubelet/container" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/wait" +) + +// Manages lifecycle of all images. +// +// Implementation is thread-safe. +type imageManager interface { + // Applies the garbage collection policy. Errors include being unable to free + // enough space as per the garbage collection policy. + GarbageCollect() error + + // Start async garbage collection of images. + Start() error + + GetImageList() ([]kubecontainer.Image, error) + + // TODO(vmarmol): Have this subsume pulls as well. +} + +// A policy for garbage collecting images. Policy defines an allowed band in +// which garbage collection will be run. +type ImageGCPolicy struct { + // Any usage above this threshold will always trigger garbage collection. + // This is the highest usage we will allow. + HighThresholdPercent int + + // Any usage below this threshold will never trigger garbage collection. + // This is the lowest threshold we will try to garbage collect to. + LowThresholdPercent int + + // Minimum age at which a image can be garbage collected. + MinAge time.Duration +} + +type realImageManager struct { + // Container runtime + runtime container.Runtime + + // Records of images and their use. + imageRecords map[string]*imageRecord + imageRecordsLock sync.Mutex + + // The image garbage collection policy in use. + policy ImageGCPolicy + + // cAdvisor instance. + cadvisor cadvisor.Interface + + // Recorder for Kubernetes events. + recorder record.EventRecorder + + // Reference to this node. + nodeRef *api.ObjectReference + + // Track initialization + initialized bool +} + +// Information about the images we track. +type imageRecord struct { + // Time when this image was first detected. + firstDetected time.Time + + // Time when we last saw this image being used. + lastUsed time.Time + + // Size of the image in bytes. + size int64 +} + +func newImageManager(runtime container.Runtime, cadvisorInterface cadvisor.Interface, recorder record.EventRecorder, nodeRef *api.ObjectReference, policy ImageGCPolicy) (imageManager, error) { + // Validate policy. + if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 { + return nil, fmt.Errorf("invalid HighThresholdPercent %d, must be in range [0-100]", policy.HighThresholdPercent) + } + if policy.LowThresholdPercent < 0 || policy.LowThresholdPercent > 100 { + return nil, fmt.Errorf("invalid LowThresholdPercent %d, must be in range [0-100]", policy.LowThresholdPercent) + } + if policy.LowThresholdPercent > policy.HighThresholdPercent { + return nil, fmt.Errorf("LowThresholdPercent %d can not be higher than HighThresholdPercent %d", policy.LowThresholdPercent, policy.HighThresholdPercent) + } + im := &realImageManager{ + runtime: runtime, + policy: policy, + imageRecords: make(map[string]*imageRecord), + cadvisor: cadvisorInterface, + recorder: recorder, + nodeRef: nodeRef, + initialized: false, + } + + return im, nil +} + +func (im *realImageManager) Start() error { + go wait.Until(func() { + // Initial detection make detected time "unknown" in the past. + var ts time.Time + if im.initialized { + ts = time.Now() + } + err := im.detectImages(ts) + if err != nil { + glog.Warningf("[ImageManager] Failed to monitor images: %v", err) + } else { + im.initialized = true + } + }, 5*time.Minute, wait.NeverStop) + + return nil +} + +// Get a list of images on this node +func (im *realImageManager) GetImageList() ([]kubecontainer.Image, error) { + images, err := im.runtime.ListImages() + if err != nil { + return nil, err + } + return images, nil +} + +func (im *realImageManager) detectImages(detectTime time.Time) error { + images, err := im.runtime.ListImages() + if err != nil { + return err + } + pods, err := im.runtime.GetPods(true) + if err != nil { + return err + } + + // Make a set of images in use by containers. + imagesInUse := sets.NewString() + for _, pod := range pods { + for _, container := range pod.Containers { + glog.V(5).Infof("Pod %s/%s, container %s uses image %s", pod.Namespace, pod.Name, container.Name, container.Image) + imagesInUse.Insert(container.Image) + } + } + + // Add new images and record those being used. + now := time.Now() + currentImages := sets.NewString() + im.imageRecordsLock.Lock() + defer im.imageRecordsLock.Unlock() + for _, image := range images { + glog.V(5).Infof("Adding image ID %s to currentImages", image.ID) + currentImages.Insert(image.ID) + + // New image, set it as detected now. + if _, ok := im.imageRecords[image.ID]; !ok { + glog.V(5).Infof("Image ID %s is new", image.ID) + im.imageRecords[image.ID] = &imageRecord{ + firstDetected: detectTime, + } + } + + // Set last used time to now if the image is being used. + if isImageUsed(image, imagesInUse) { + glog.V(5).Infof("Setting Image ID %s lastUsed to %v", image.ID, now) + im.imageRecords[image.ID].lastUsed = now + } + + glog.V(5).Infof("Image ID %s has size %d", image.ID, image.Size) + im.imageRecords[image.ID].size = image.Size + } + + // Remove old images from our records. + for image := range im.imageRecords { + if !currentImages.Has(image) { + glog.V(5).Infof("Image ID %s is no longer present; removing from imageRecords", image) + delete(im.imageRecords, image) + } + } + + return nil +} + +func (im *realImageManager) GarbageCollect() error { + // Get disk usage on disk holding images. + fsInfo, err := im.cadvisor.ImagesFsInfo() + if err != nil { + return err + } + usage := int64(fsInfo.Usage) + capacity := int64(fsInfo.Capacity) + + // Check valid capacity. + if capacity == 0 { + err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint) + im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, container.InvalidDiskCapacity, err.Error()) + return err + } + + // If over the max threshold, free enough to place us at the lower threshold. + usagePercent := int(usage * 100 / capacity) + if usagePercent >= im.policy.HighThresholdPercent { + amountToFree := usage - (int64(im.policy.LowThresholdPercent) * capacity / 100) + glog.Infof("[ImageManager]: Disk usage on %q (%s) is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes", fsInfo.Device, fsInfo.Mountpoint, usagePercent, im.policy.HighThresholdPercent, amountToFree) + freed, err := im.freeSpace(amountToFree, time.Now()) + if err != nil { + return err + } + + if freed < amountToFree { + err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed) + im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, container.FreeDiskSpaceFailed, err.Error()) + return err + } + } + + return nil +} + +// Tries to free bytesToFree worth of images on the disk. +// +// Returns the number of bytes free and an error if any occurred. The number of +// bytes freed is always returned. +// Note that error may be nil and the number of bytes free may be less +// than bytesToFree. +func (im *realImageManager) freeSpace(bytesToFree int64, freeTime time.Time) (int64, error) { + err := im.detectImages(freeTime) + if err != nil { + return 0, err + } + + im.imageRecordsLock.Lock() + defer im.imageRecordsLock.Unlock() + + // Get all images in eviction order. + images := make([]evictionInfo, 0, len(im.imageRecords)) + for image, record := range im.imageRecords { + images = append(images, evictionInfo{ + id: image, + imageRecord: *record, + }) + } + sort.Sort(byLastUsedAndDetected(images)) + + // Delete unused images until we've freed up enough space. + var lastErr error + spaceFreed := int64(0) + for _, image := range images { + glog.V(5).Infof("Evaluating image ID %s for possible garbage collection", image.id) + // Images that are currently in used were given a newer lastUsed. + if image.lastUsed.Equal(freeTime) || image.lastUsed.After(freeTime) { + glog.V(5).Infof("Image ID %s has lastUsed=%v which is >= freeTime=%v, not eligible for garbage collection", image.id, image.lastUsed, freeTime) + break + } + + // Avoid garbage collect the image if the image is not old enough. + // In such a case, the image may have just been pulled down, and will be used by a container right away. + + if freeTime.Sub(image.firstDetected) < im.policy.MinAge { + glog.V(5).Infof("Image ID %s has age %v which is less than the policy's minAge of %v, not eligible for garbage collection", image.id, freeTime.Sub(image.firstDetected), im.policy.MinAge) + continue + } + + // Remove image. Continue despite errors. + glog.Infof("[ImageManager]: Removing image %q to free %d bytes", image.id, image.size) + err := im.runtime.RemoveImage(container.ImageSpec{Image: image.id}) + if err != nil { + lastErr = err + continue + } + delete(im.imageRecords, image.id) + spaceFreed += image.size + + if spaceFreed >= bytesToFree { + break + } + } + + return spaceFreed, lastErr +} + +type evictionInfo struct { + id string + imageRecord +} + +type byLastUsedAndDetected []evictionInfo + +func (ev byLastUsedAndDetected) Len() int { return len(ev) } +func (ev byLastUsedAndDetected) Swap(i, j int) { ev[i], ev[j] = ev[j], ev[i] } +func (ev byLastUsedAndDetected) Less(i, j int) bool { + // Sort by last used, break ties by detected. + if ev[i].lastUsed.Equal(ev[j].lastUsed) { + return ev[i].firstDetected.Before(ev[j].firstDetected) + } else { + return ev[i].lastUsed.Before(ev[j].lastUsed) + } +} + +func isImageUsed(image container.Image, imagesInUse sets.String) bool { + // Check the image ID and all the RepoTags and RepoDigests. + if _, ok := imagesInUse[image.ID]; ok { + return true + } + for _, tag := range append(image.RepoTags, image.RepoDigests...) { + if _, ok := imagesInUse[tag]; ok { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/image_manager_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/image_manager_test.go new file mode 100644 index 000000000000..cee3fb1bf0e7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/image_manager_test.go @@ -0,0 +1,479 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "testing" + "time" + + cadvisorapiv2 "github.com/google/cadvisor/info/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/kubernetes/pkg/client/record" + cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" + "k8s.io/kubernetes/pkg/kubelet/container" + containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" + "k8s.io/kubernetes/pkg/util" +) + +var zero time.Time + +func newRealImageManager(policy ImageGCPolicy) (*realImageManager, *containertest.FakeRuntime, *cadvisortest.Mock) { + fakeRuntime := &containertest.FakeRuntime{} + mockCadvisor := new(cadvisortest.Mock) + return &realImageManager{ + runtime: fakeRuntime, + policy: policy, + imageRecords: make(map[string]*imageRecord), + cadvisor: mockCadvisor, + recorder: &record.FakeRecorder{}, + }, fakeRuntime, mockCadvisor +} + +// Accessors used for thread-safe testing. +func (im *realImageManager) imageRecordsLen() int { + im.imageRecordsLock.Lock() + defer im.imageRecordsLock.Unlock() + return len(im.imageRecords) +} +func (im *realImageManager) getImageRecord(name string) (*imageRecord, bool) { + im.imageRecordsLock.Lock() + defer im.imageRecordsLock.Unlock() + v, ok := im.imageRecords[name] + vCopy := *v + return &vCopy, ok +} + +// Returns the name of the image with the given ID. +func imageName(id int) string { + return fmt.Sprintf("image-%d", id) +} + +// Make an image with the specified ID. +func makeImage(id int, size int64) container.Image { + return container.Image{ + ID: imageName(id), + Size: size, + } +} + +// Make a container with the specified ID. It will use the image with the same ID. +func makeContainer(id int) *container.Container { + return &container.Container{ + ID: container.ContainerID{Type: "test", ID: fmt.Sprintf("container-%d", id)}, + Image: imageName(id), + } +} + +func TestDetectImagesInitialDetect(t *testing.T) { + manager, fakeRuntime, _ := newRealImageManager(ImageGCPolicy{}) + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + makeImage(1, 2048), + } + fakeRuntime.AllPodList = []*container.Pod{ + { + Containers: []*container.Container{ + makeContainer(1), + }, + }, + } + + startTime := time.Now().Add(-time.Millisecond) + err := manager.detectImages(zero) + assert := assert.New(t) + require.NoError(t, err) + assert.Equal(manager.imageRecordsLen(), 2) + noContainer, ok := manager.getImageRecord(imageName(0)) + require.True(t, ok) + assert.Equal(zero, noContainer.firstDetected) + assert.Equal(zero, noContainer.lastUsed) + withContainer, ok := manager.getImageRecord(imageName(1)) + require.True(t, ok) + assert.Equal(zero, withContainer.firstDetected) + assert.True(withContainer.lastUsed.After(startTime)) +} + +func TestDetectImagesWithNewImage(t *testing.T) { + // Just one image initially. + manager, fakeRuntime, _ := newRealImageManager(ImageGCPolicy{}) + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + makeImage(1, 2048), + } + fakeRuntime.AllPodList = []*container.Pod{ + { + Containers: []*container.Container{ + makeContainer(1), + }, + }, + } + + err := manager.detectImages(zero) + assert := assert.New(t) + require.NoError(t, err) + assert.Equal(manager.imageRecordsLen(), 2) + + // Add a new image. + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + makeImage(1, 1024), + makeImage(2, 1024), + } + + detectedTime := zero.Add(time.Second) + startTime := time.Now().Add(-time.Millisecond) + err = manager.detectImages(detectedTime) + require.NoError(t, err) + assert.Equal(manager.imageRecordsLen(), 3) + noContainer, ok := manager.getImageRecord(imageName(0)) + require.True(t, ok) + assert.Equal(zero, noContainer.firstDetected) + assert.Equal(zero, noContainer.lastUsed) + withContainer, ok := manager.getImageRecord(imageName(1)) + require.True(t, ok) + assert.Equal(zero, withContainer.firstDetected) + assert.True(withContainer.lastUsed.After(startTime)) + newContainer, ok := manager.getImageRecord(imageName(2)) + require.True(t, ok) + assert.Equal(detectedTime, newContainer.firstDetected) + assert.Equal(zero, noContainer.lastUsed) +} + +func TestDetectImagesContainerStopped(t *testing.T) { + manager, fakeRuntime, _ := newRealImageManager(ImageGCPolicy{}) + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + makeImage(1, 2048), + } + fakeRuntime.AllPodList = []*container.Pod{ + { + Containers: []*container.Container{ + makeContainer(1), + }, + }, + } + + err := manager.detectImages(zero) + assert := assert.New(t) + require.NoError(t, err) + assert.Equal(manager.imageRecordsLen(), 2) + withContainer, ok := manager.getImageRecord(imageName(1)) + require.True(t, ok) + + // Simulate container being stopped. + fakeRuntime.AllPodList = []*container.Pod{} + err = manager.detectImages(time.Now()) + require.NoError(t, err) + assert.Equal(manager.imageRecordsLen(), 2) + container1, ok := manager.getImageRecord(imageName(0)) + require.True(t, ok) + assert.Equal(zero, container1.firstDetected) + assert.Equal(zero, container1.lastUsed) + container2, ok := manager.getImageRecord(imageName(1)) + require.True(t, ok) + assert.Equal(zero, container2.firstDetected) + assert.True(container2.lastUsed.Equal(withContainer.lastUsed)) +} + +func TestDetectImagesWithRemovedImages(t *testing.T) { + manager, fakeRuntime, _ := newRealImageManager(ImageGCPolicy{}) + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + makeImage(1, 2048), + } + fakeRuntime.AllPodList = []*container.Pod{ + { + Containers: []*container.Container{ + makeContainer(1), + }, + }, + } + + err := manager.detectImages(zero) + assert := assert.New(t) + require.NoError(t, err) + assert.Equal(manager.imageRecordsLen(), 2) + + // Simulate both images being removed. + fakeRuntime.ImageList = []container.Image{} + err = manager.detectImages(time.Now()) + require.NoError(t, err) + assert.Equal(manager.imageRecordsLen(), 0) +} + +func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) { + manager, fakeRuntime, _ := newRealImageManager(ImageGCPolicy{}) + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + makeImage(1, 2048), + } + fakeRuntime.AllPodList = []*container.Pod{ + { + Containers: []*container.Container{ + makeContainer(1), + }, + }, + } + + spaceFreed, err := manager.freeSpace(2048, time.Now()) + assert := assert.New(t) + require.NoError(t, err) + assert.EqualValues(1024, spaceFreed) + assert.Len(fakeRuntime.ImageList, 1) +} + +func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) { + manager, fakeRuntime, _ := newRealImageManager(ImageGCPolicy{}) + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + makeImage(1, 2048), + } + fakeRuntime.AllPodList = []*container.Pod{ + { + Containers: []*container.Container{ + makeContainer(0), + makeContainer(1), + }, + }, + } + + // Make 1 be more recently used than 0. + require.NoError(t, manager.detectImages(zero)) + fakeRuntime.AllPodList = []*container.Pod{ + { + Containers: []*container.Container{ + makeContainer(1), + }, + }, + } + require.NoError(t, manager.detectImages(time.Now())) + fakeRuntime.AllPodList = []*container.Pod{ + { + Containers: []*container.Container{}, + }, + } + require.NoError(t, manager.detectImages(time.Now())) + require.Equal(t, manager.imageRecordsLen(), 2) + + spaceFreed, err := manager.freeSpace(1024, time.Now()) + assert := assert.New(t) + require.NoError(t, err) + assert.EqualValues(1024, spaceFreed) + assert.Len(fakeRuntime.ImageList, 1) +} + +func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) { + manager, fakeRuntime, _ := newRealImageManager(ImageGCPolicy{}) + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + } + fakeRuntime.AllPodList = []*container.Pod{ + { + Containers: []*container.Container{ + makeContainer(0), + }, + }, + } + + // Make 1 more recently detected but used at the same time as 0. + require.NoError(t, manager.detectImages(zero)) + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + makeImage(1, 2048), + } + require.NoError(t, manager.detectImages(time.Now())) + fakeRuntime.AllPodList = []*container.Pod{} + require.NoError(t, manager.detectImages(time.Now())) + require.Equal(t, manager.imageRecordsLen(), 2) + + spaceFreed, err := manager.freeSpace(1024, time.Now()) + assert := assert.New(t) + require.NoError(t, err) + assert.EqualValues(2048, spaceFreed) + assert.Len(fakeRuntime.ImageList, 1) +} + +func TestFreeSpaceImagesAlsoDoesLookupByRepoTags(t *testing.T) { + manager, fakeRuntime, _ := newRealImageManager(ImageGCPolicy{}) + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + { + ID: "5678", + RepoTags: []string{"potato", "salad"}, + Size: 2048, + }, + } + fakeRuntime.AllPodList = []*container.Pod{ + { + Containers: []*container.Container{ + { + ID: container.ContainerID{Type: "test", ID: "c5678"}, + Image: "salad", + }, + }, + }, + } + + spaceFreed, err := manager.freeSpace(1024, time.Now()) + assert := assert.New(t) + require.NoError(t, err) + assert.EqualValues(1024, spaceFreed) + assert.Len(fakeRuntime.ImageList, 1) +} + +func TestFreeSpaceImagesAlsoDoesLookupByRepoDigests(t *testing.T) { + manager, fakeRuntime, _ := newRealImageManager(ImageGCPolicy{}) + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + { + ID: "5678", + RepoDigests: []string{"potato", "salad"}, + Size: 2048, + }, + } + fakeRuntime.AllPodList = []*container.Pod{ + { + Containers: []*container.Container{ + { + ID: container.ContainerID{Type: "test", ID: "c5678"}, + Image: "salad", + }, + }, + }, + } + + spaceFreed, err := manager.freeSpace(1024, time.Now()) + assert := assert.New(t) + require.NoError(t, err) + assert.EqualValues(1024, spaceFreed) + assert.Len(fakeRuntime.ImageList, 1) +} + +func TestGarbageCollectBelowLowThreshold(t *testing.T) { + policy := ImageGCPolicy{ + HighThresholdPercent: 90, + LowThresholdPercent: 80, + } + manager, _, mockCadvisor := newRealImageManager(policy) + + // Expect 40% usage. + mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ + Usage: 400, + Capacity: 1000, + }, nil) + + assert.NoError(t, manager.GarbageCollect()) +} + +func TestGarbageCollectCadvisorFailure(t *testing.T) { + policy := ImageGCPolicy{ + HighThresholdPercent: 90, + LowThresholdPercent: 80, + } + manager, _, mockCadvisor := newRealImageManager(policy) + + mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, fmt.Errorf("error")) + assert.NotNil(t, manager.GarbageCollect()) +} + +func TestGarbageCollectBelowSuccess(t *testing.T) { + policy := ImageGCPolicy{ + HighThresholdPercent: 90, + LowThresholdPercent: 80, + } + manager, fakeRuntime, mockCadvisor := newRealImageManager(policy) + + // Expect 95% usage and most of it gets freed. + mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ + Usage: 950, + Capacity: 1000, + }, nil) + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 450), + } + + assert.NoError(t, manager.GarbageCollect()) +} + +func TestGarbageCollectNotEnoughFreed(t *testing.T) { + policy := ImageGCPolicy{ + HighThresholdPercent: 90, + LowThresholdPercent: 80, + } + manager, fakeRuntime, mockCadvisor := newRealImageManager(policy) + + // Expect 95% usage and little of it gets freed. + mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ + Usage: 950, + Capacity: 1000, + }, nil) + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 50), + } + + assert.NotNil(t, manager.GarbageCollect()) +} + +func TestGarbageCollectImageNotOldEnough(t *testing.T) { + policy := ImageGCPolicy{ + HighThresholdPercent: 90, + LowThresholdPercent: 80, + MinAge: time.Minute * 1, + } + fakeRuntime := &containertest.FakeRuntime{} + mockCadvisor := new(cadvisortest.Mock) + manager := &realImageManager{ + runtime: fakeRuntime, + policy: policy, + imageRecords: make(map[string]*imageRecord), + cadvisor: mockCadvisor, + recorder: &record.FakeRecorder{}, + } + + fakeRuntime.ImageList = []container.Image{ + makeImage(0, 1024), + makeImage(1, 2048), + } + // 1 image is in use, and another one is not old enough + fakeRuntime.AllPodList = []*container.Pod{ + { + Containers: []*container.Container{ + makeContainer(1), + }, + }, + } + + fakeClock := util.NewFakeClock(time.Now()) + t.Log(fakeClock.Now()) + require.NoError(t, manager.detectImages(fakeClock.Now())) + require.Equal(t, manager.imageRecordsLen(), 2) + // no space freed since one image is in used, and another one is not old enough + spaceFreed, err := manager.freeSpace(1024, fakeClock.Now()) + assert := assert.New(t) + require.NoError(t, err) + assert.EqualValues(0, spaceFreed) + assert.Len(fakeRuntime.ImageList, 2) + + // move clock by minAge duration, then 1 image will be garbage collected + fakeClock.Step(policy.MinAge) + spaceFreed, err = manager.freeSpace(1024, fakeClock.Now()) + require.NoError(t, err) + assert.EqualValues(1024, spaceFreed) + assert.Len(fakeRuntime.ImageList, 1) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet.go new file mode 100644 index 000000000000..167831cb0eed --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -0,0 +1,3935 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math" + "net" + "net/http" + "os" + "path" + "path/filepath" + goRuntime "runtime" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golang/glog" + cadvisorapi "github.com/google/cadvisor/info/v1" + "k8s.io/kubernetes/pkg/api" + apierrors "k8s.io/kubernetes/pkg/api/errors" + utilpod "k8s.io/kubernetes/pkg/api/pod" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/fieldpath" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/kubelet/cadvisor" + "k8s.io/kubernetes/pkg/kubelet/cm" + "k8s.io/kubernetes/pkg/kubelet/config" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/dockertools" + "k8s.io/kubernetes/pkg/kubelet/envvars" + "k8s.io/kubernetes/pkg/kubelet/eviction" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/pkg/kubelet/metrics" + "k8s.io/kubernetes/pkg/kubelet/network" + "k8s.io/kubernetes/pkg/kubelet/pleg" + kubepod "k8s.io/kubernetes/pkg/kubelet/pod" + "k8s.io/kubernetes/pkg/kubelet/prober" + proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" + "k8s.io/kubernetes/pkg/kubelet/rkt" + "k8s.io/kubernetes/pkg/kubelet/server" + "k8s.io/kubernetes/pkg/kubelet/server/stats" + "k8s.io/kubernetes/pkg/kubelet/status" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/kubelet/util/ioutils" + "k8s.io/kubernetes/pkg/kubelet/util/queue" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/securitycontext" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/bandwidth" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + utilexec "k8s.io/kubernetes/pkg/util/exec" + "k8s.io/kubernetes/pkg/util/flowcontrol" + kubeio "k8s.io/kubernetes/pkg/util/io" + "k8s.io/kubernetes/pkg/util/mount" + utilnet "k8s.io/kubernetes/pkg/util/net" + "k8s.io/kubernetes/pkg/util/oom" + "k8s.io/kubernetes/pkg/util/procfs" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/selinux" + "k8s.io/kubernetes/pkg/util/sets" + utilvalidation "k8s.io/kubernetes/pkg/util/validation" + "k8s.io/kubernetes/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/version" + "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/watch" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + "k8s.io/kubernetes/third_party/golang/expansion" +) + +const ( + // Max amount of time to wait for the container runtime to come up. + maxWaitForContainerRuntime = 5 * time.Minute + + // nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed. + nodeStatusUpdateRetry = 5 + + // Location of container logs. + containerLogsDir = "/var/log/containers" + + // max backoff period, exported for the e2e test + MaxContainerBackOff = 300 * time.Second + + // Capacity of the channel for storing pods to kill. A small number should + // suffice because a goroutine is dedicated to check the channel and does + // not block on anything else. + podKillingChannelCapacity = 50 + + // Period for performing global cleanup tasks. + housekeepingPeriod = time.Second * 2 + + // Period for performing eviction monitoring. + // TODO ensure this is in sync with internal cadvisor housekeeping. + evictionMonitoringPeriod = time.Second * 10 + + // The path in containers' filesystems where the hosts file is mounted. + etcHostsPath = "/etc/hosts" + + // Capacity of the channel for receiving pod lifecycle events. This number + // is a bit arbitrary and may be adjusted in the future. + plegChannelCapacity = 1000 + + // Generic PLEG relies on relisting for discovering container events. + // A longer period means that kubelet will take longer to detect container + // changes and to update pod status. On the other hand, a shorter period + // will cause more frequent relisting (e.g., container runtime operations), + // leading to higher cpu usage. + // Note that even though we set the period to 1s, the relisting itself can + // take more than 1s to finish if the container runtime responds slowly + // and/or when there are many container changes in one cycle. + plegRelistPeriod = time.Second * 1 + + // backOffPeriod is the period to back off when pod syncing results in an + // error. It is also used as the base period for the exponential backoff + // container restarts and image pulls. + backOffPeriod = time.Second * 10 + + // Period for performing container garbage collection. + ContainerGCPeriod = time.Minute + // Period for performing image garbage collection. + ImageGCPeriod = 5 * time.Minute + + // Maximum period to wait for pod volume setup operations + maxWaitForVolumeOps = 20 * time.Minute + + // maxImagesInStatus is the number of max images we store in image status. + maxImagesInNodeStatus = 50 +) + +// SyncHandler is an interface implemented by Kubelet, for testability +type SyncHandler interface { + HandlePodAdditions(pods []*api.Pod) + HandlePodUpdates(pods []*api.Pod) + HandlePodDeletions(pods []*api.Pod) + HandlePodReconcile(pods []*api.Pod) + HandlePodSyncs(pods []*api.Pod) + HandlePodCleanups() error +} + +// Option is a functional option type for Kubelet +type Option func(*Kubelet) + +// NewMainKubelet instantiates a new Kubelet object along with all the required internal modules. +// No initialization of Kubelet and its modules should happen here. +func NewMainKubelet( + hostname string, + nodeName string, + dockerClient dockertools.DockerInterface, + kubeClient clientset.Interface, + rootDirectory string, + seccompProfileRoot string, + podInfraContainerImage string, + resyncInterval time.Duration, + pullQPS float32, + pullBurst int, + eventQPS float32, + eventBurst int, + containerGCPolicy kubecontainer.ContainerGCPolicy, + sourcesReadyFn config.SourcesReadyFn, + registerNode bool, + registerSchedulable bool, + standaloneMode bool, + clusterDomain string, + clusterDNS net.IP, + masterServiceNamespace string, + volumePlugins []volume.VolumePlugin, + networkPlugins []network.NetworkPlugin, + networkPluginName string, + streamingConnectionIdleTimeout time.Duration, + recorder record.EventRecorder, + cadvisorInterface cadvisor.Interface, + imageGCPolicy ImageGCPolicy, + diskSpacePolicy DiskSpacePolicy, + cloud cloudprovider.Interface, + nodeLabels map[string]string, + nodeStatusUpdateFrequency time.Duration, + osInterface kubecontainer.OSInterface, + cgroupRoot string, + containerRuntime string, + rktPath string, + rktAPIEndpoint string, + rktStage1Image string, + mounter mount.Interface, + writer kubeio.Writer, + configureCBR0 bool, + nonMasqueradeCIDR string, + podCIDR string, + reconcileCIDR bool, + maxPods int, + podsPerCore int, + nvidiaGPUs int, + dockerExecHandler dockertools.ExecHandler, + resolverConfig string, + cpuCFSQuota bool, + daemonEndpoints *api.NodeDaemonEndpoints, + oomAdjuster *oom.OOMAdjuster, + serializeImagePulls bool, + containerManager cm.ContainerManager, + outOfDiskTransitionFrequency time.Duration, + flannelExperimentalOverlay bool, + nodeIP net.IP, + reservation kubetypes.Reservation, + enableCustomMetrics bool, + volumeStatsAggPeriod time.Duration, + containerRuntimeOptions []kubecontainer.Option, + hairpinMode string, + babysitDaemons bool, + evictionConfig eviction.Config, + kubeOptions []Option, +) (*Kubelet, error) { + if rootDirectory == "" { + return nil, fmt.Errorf("invalid root directory %q", rootDirectory) + } + if resyncInterval <= 0 { + return nil, fmt.Errorf("invalid sync frequency %d", resyncInterval) + } + + serviceStore := cache.NewStore(cache.MetaNamespaceKeyFunc) + if kubeClient != nil { + // TODO: cache.NewListWatchFromClient is limited as it takes a client implementation rather + // than an interface. There is no way to construct a list+watcher using resource name. + listWatch := &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return kubeClient.Core().Services(api.NamespaceAll).List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + return kubeClient.Core().Services(api.NamespaceAll).Watch(options) + }, + } + cache.NewReflector(listWatch, &api.Service{}, serviceStore, 0).Run() + } + serviceLister := &cache.StoreToServiceLister{Store: serviceStore} + + nodeStore := cache.NewStore(cache.MetaNamespaceKeyFunc) + if kubeClient != nil { + // TODO: cache.NewListWatchFromClient is limited as it takes a client implementation rather + // than an interface. There is no way to construct a list+watcher using resource name. + fieldSelector := fields.Set{api.ObjectNameField: nodeName}.AsSelector() + listWatch := &cache.ListWatch{ + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + options.FieldSelector = fieldSelector + return kubeClient.Core().Nodes().List(options) + }, + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + options.FieldSelector = fieldSelector + return kubeClient.Core().Nodes().Watch(options) + }, + } + cache.NewReflector(listWatch, &api.Node{}, nodeStore, 0).Run() + } + nodeLister := &cache.StoreToNodeLister{Store: nodeStore} + nodeInfo := &predicates.CachedNodeInfo{StoreToNodeLister: nodeLister} + + // TODO: get the real node object of ourself, + // and use the real node name and UID. + // TODO: what is namespace for node? + nodeRef := &api.ObjectReference{ + Kind: "Node", + Name: nodeName, + UID: types.UID(nodeName), + Namespace: "", + } + + diskSpaceManager, err := newDiskSpaceManager(cadvisorInterface, diskSpacePolicy) + if err != nil { + return nil, fmt.Errorf("failed to initialize disk manager: %v", err) + } + containerRefManager := kubecontainer.NewRefManager() + + volumeManager := newVolumeManager() + + oomWatcher := NewOOMWatcher(cadvisorInterface, recorder) + + // TODO: remove when internal cbr0 implementation gets removed in favor + // of the kubenet network plugin + if networkPluginName == "kubenet" { + configureCBR0 = false + flannelExperimentalOverlay = false + } + + klet := &Kubelet{ + hostname: hostname, + nodeName: nodeName, + dockerClient: dockerClient, + kubeClient: kubeClient, + rootDirectory: rootDirectory, + resyncInterval: resyncInterval, + containerRefManager: containerRefManager, + httpClient: &http.Client{}, + sourcesReady: config.NewSourcesReady(sourcesReadyFn), + registerNode: registerNode, + registerSchedulable: registerSchedulable, + standaloneMode: standaloneMode, + clusterDomain: clusterDomain, + clusterDNS: clusterDNS, + serviceLister: serviceLister, + nodeLister: nodeLister, + nodeInfo: nodeInfo, + masterServiceNamespace: masterServiceNamespace, + streamingConnectionIdleTimeout: streamingConnectionIdleTimeout, + recorder: recorder, + cadvisor: cadvisorInterface, + diskSpaceManager: diskSpaceManager, + volumeManager: volumeManager, + cloud: cloud, + nodeRef: nodeRef, + nodeLabels: nodeLabels, + nodeStatusUpdateFrequency: nodeStatusUpdateFrequency, + os: osInterface, + oomWatcher: oomWatcher, + cgroupRoot: cgroupRoot, + mounter: mounter, + writer: writer, + configureCBR0: configureCBR0, + nonMasqueradeCIDR: nonMasqueradeCIDR, + reconcileCIDR: reconcileCIDR, + maxPods: maxPods, + podsPerCore: podsPerCore, + nvidiaGPUs: nvidiaGPUs, + syncLoopMonitor: atomic.Value{}, + resolverConfig: resolverConfig, + cpuCFSQuota: cpuCFSQuota, + daemonEndpoints: daemonEndpoints, + containerManager: containerManager, + flannelExperimentalOverlay: flannelExperimentalOverlay, + flannelHelper: nil, + nodeIP: nodeIP, + clock: util.RealClock{}, + outOfDiskTransitionFrequency: outOfDiskTransitionFrequency, + reservation: reservation, + enableCustomMetrics: enableCustomMetrics, + babysitDaemons: babysitDaemons, + } + + if klet.flannelExperimentalOverlay { + klet.flannelHelper = NewFlannelHelper() + glog.Infof("Flannel is in charge of podCIDR and overlay networking.") + } + if klet.nodeIP != nil { + if err := klet.validateNodeIP(); err != nil { + return nil, err + } + glog.Infof("Using node IP: %q", klet.nodeIP.String()) + } + + if mode, err := effectiveHairpinMode(componentconfig.HairpinMode(hairpinMode), containerRuntime, configureCBR0, networkPluginName); err != nil { + // This is a non-recoverable error. Returning it up the callstack will just + // lead to retries of the same failure, so just fail hard. + glog.Fatalf("Invalid hairpin mode: %v", err) + } else { + klet.hairpinMode = mode + } + glog.Infof("Hairpin mode set to %q", klet.hairpinMode) + + if plug, err := network.InitNetworkPlugin(networkPlugins, networkPluginName, &networkHost{klet}, klet.hairpinMode); err != nil { + return nil, err + } else { + klet.networkPlugin = plug + } + + machineInfo, err := klet.GetCachedMachineInfo() + if err != nil { + return nil, err + } + + procFs := procfs.NewProcFS() + imageBackOff := flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff) + + klet.livenessManager = proberesults.NewManager() + + klet.podCache = kubecontainer.NewCache() + klet.podManager = kubepod.NewBasicPodManager(kubepod.NewBasicMirrorClient(klet.kubeClient)) + + // Initialize the runtime. + switch containerRuntime { + case "docker": + // Only supported one for now, continue. + klet.containerRuntime = dockertools.NewDockerManager( + dockerClient, + kubecontainer.FilterEventRecorder(recorder), + klet.livenessManager, + containerRefManager, + klet.podManager, + machineInfo, + podInfraContainerImage, + pullQPS, + pullBurst, + containerLogsDir, + osInterface, + klet.networkPlugin, + klet, + klet.httpClient, + dockerExecHandler, + oomAdjuster, + procFs, + klet.cpuCFSQuota, + imageBackOff, + serializeImagePulls, + enableCustomMetrics, + klet.hairpinMode == componentconfig.HairpinVeth, + seccompProfileRoot, + containerRuntimeOptions..., + ) + case "rkt": + // TODO: Include hairpin mode settings in rkt? + conf := &rkt.Config{ + Path: rktPath, + Stage1Image: rktStage1Image, + InsecureOptions: "image,ondisk", + } + rktRuntime, err := rkt.New( + rktAPIEndpoint, + conf, + klet, + recorder, + containerRefManager, + klet.podManager, + klet.livenessManager, + klet.httpClient, + klet.networkPlugin, + klet.hairpinMode == componentconfig.HairpinVeth, + utilexec.New(), + kubecontainer.RealOS{}, + imageBackOff, + serializeImagePulls, + ) + if err != nil { + return nil, err + } + klet.containerRuntime = rktRuntime + default: + return nil, fmt.Errorf("unsupported container runtime %q specified", containerRuntime) + } + + // TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency + klet.resourceAnalyzer = stats.NewResourceAnalyzer(klet, volumeStatsAggPeriod, klet.containerRuntime) + + klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, util.RealClock{}) + klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime) + klet.updatePodCIDR(podCIDR) + + // setup containerGC + containerGC, err := kubecontainer.NewContainerGC(klet.containerRuntime, containerGCPolicy) + if err != nil { + return nil, err + } + klet.containerGC = containerGC + + // setup imageManager + imageManager, err := newImageManager(klet.containerRuntime, cadvisorInterface, recorder, nodeRef, imageGCPolicy) + if err != nil { + return nil, fmt.Errorf("failed to initialize image manager: %v", err) + } + klet.imageManager = imageManager + + klet.runner = klet.containerRuntime + klet.statusManager = status.NewManager(kubeClient, klet.podManager) + + klet.probeManager = prober.NewManager( + klet.statusManager, + klet.livenessManager, + klet.runner, + containerRefManager, + recorder) + + if err := klet.volumePluginMgr.InitPlugins(volumePlugins, &volumeHost{klet}); err != nil { + return nil, err + } + + runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime) + if err != nil { + return nil, err + } + klet.runtimeCache = runtimeCache + klet.reasonCache = NewReasonCache() + klet.workQueue = queue.NewBasicWorkQueue(klet.clock) + klet.podWorkers = newPodWorkers(klet.syncPod, recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache) + + klet.backOff = flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff) + klet.podKillingCh = make(chan *kubecontainer.PodPair, podKillingChannelCapacity) + klet.setNodeStatusFuncs = klet.defaultNodeStatusFuncs() + + // setup eviction manager + evictionManager, evictionAdmitHandler, err := eviction.NewManager(klet.resourceAnalyzer, evictionConfig, killPodNow(klet.podWorkers), recorder, nodeRef, klet.clock) + if err != nil { + return nil, fmt.Errorf("failed to initialize eviction manager: %v", err) + } + klet.evictionManager = evictionManager + klet.AddPodAdmitHandler(evictionAdmitHandler) + + // apply functional Option's + for _, opt := range kubeOptions { + opt(klet) + } + return klet, nil +} + +// effectiveHairpinMode determines the effective hairpin mode given the +// configured mode, container runtime, and whether cbr0 should be configured. +func effectiveHairpinMode(hairpinMode componentconfig.HairpinMode, containerRuntime string, configureCBR0 bool, networkPlugin string) (componentconfig.HairpinMode, error) { + // The hairpin mode setting doesn't matter if: + // - We're not using a bridge network. This is hard to check because we might + // be using a plugin. It matters if --configure-cbr0=true, and we currently + // don't pipe it down to any plugins. + // - It's set to hairpin-veth for a container runtime that doesn't know how + // to set the hairpin flag on the veth's of containers. Currently the + // docker runtime is the only one that understands this. + // - It's set to "none". + if hairpinMode == componentconfig.PromiscuousBridge || hairpinMode == componentconfig.HairpinVeth { + // Only on docker. + if containerRuntime != "docker" { + glog.Warningf("Hairpin mode set to %q but container runtime is %q, ignoring", hairpinMode, containerRuntime) + return componentconfig.HairpinNone, nil + } + if hairpinMode == componentconfig.PromiscuousBridge && !configureCBR0 && networkPlugin != "kubenet" { + // This is not a valid combination. Users might be using the + // default values (from before the hairpin-mode flag existed) and we + // should keep the old behavior. + glog.Warningf("Hairpin mode set to %q but configureCBR0 is false, falling back to %q", hairpinMode, componentconfig.HairpinVeth) + return componentconfig.HairpinVeth, nil + } + } else if hairpinMode == componentconfig.HairpinNone { + if configureCBR0 { + glog.Warningf("Hairpin mode set to %q and configureCBR0 is true, this might result in loss of hairpin packets", hairpinMode) + } + } else { + return "", fmt.Errorf("unknown value: %q", hairpinMode) + } + return hairpinMode, nil +} + +type serviceLister interface { + List() (api.ServiceList, error) +} + +type nodeLister interface { + List() (machines api.NodeList, err error) +} + +// Kubelet is the main kubelet implementation. +type Kubelet struct { + hostname string + nodeName string + dockerClient dockertools.DockerInterface + runtimeCache kubecontainer.RuntimeCache + kubeClient clientset.Interface + rootDirectory string + + // podWorkers handle syncing Pods in response to events. + podWorkers PodWorkers + + // resyncInterval is the interval between periodic full reconciliations of + // pods on this node. + resyncInterval time.Duration + + // sourcesReady records the sources seen by the kubelet, it is thread-safe. + sourcesReady config.SourcesReady + + // podManager is a facade that abstracts away the various sources of pods + // this Kubelet services. + podManager kubepod.Manager + + // Needed to observe and respond to situations that could impact node stability + evictionManager eviction.Manager + + // Needed to report events for containers belonging to deleted/modified pods. + // Tracks references for reporting events + containerRefManager *kubecontainer.RefManager + + // Optional, defaults to /logs/ from /var/log + logServer http.Handler + // Optional, defaults to simple Docker implementation + runner kubecontainer.ContainerCommandRunner + // Optional, client for http requests, defaults to empty client + httpClient kubetypes.HttpGetter + + // cAdvisor used for container information. + cadvisor cadvisor.Interface + + // Set to true to have the node register itself with the apiserver. + registerNode bool + // Set to true to have the node register itself as schedulable. + registerSchedulable bool + // for internal book keeping; access only from within registerWithApiserver + registrationCompleted bool + + // Set to true if the kubelet is in standalone mode (i.e. setup without an apiserver) + standaloneMode bool + + // If non-empty, use this for container DNS search. + clusterDomain string + + // If non-nil, use this for container DNS server. + clusterDNS net.IP + + // masterServiceNamespace is the namespace that the master service is exposed in. + masterServiceNamespace string + // serviceLister knows how to list services + serviceLister serviceLister + // nodeLister knows how to list nodes + nodeLister nodeLister + // nodeInfo knows how to get information about the node for this kubelet. + nodeInfo predicates.NodeInfo + + // a list of node labels to register + nodeLabels map[string]string + + // Last timestamp when runtime responded on ping. + // Mutex is used to protect this value. + runtimeState *runtimeState + + // Volume plugins. + volumePluginMgr volume.VolumePluginMgr + + // Network plugin. + networkPlugin network.NetworkPlugin + + // Handles container probing. + probeManager prober.Manager + // Manages container health check results. + livenessManager proberesults.Manager + + // How long to keep idle streaming command execution/port forwarding + // connections open before terminating them + streamingConnectionIdleTimeout time.Duration + + // The EventRecorder to use + recorder record.EventRecorder + + // Policy for handling garbage collection of dead containers. + containerGC kubecontainer.ContainerGC + + // Manager for images. + imageManager imageManager + + // Diskspace manager. + diskSpaceManager diskSpaceManager + + // Cached MachineInfo returned by cadvisor. + machineInfo *cadvisorapi.MachineInfo + + // Syncs pods statuses with apiserver; also used as a cache of statuses. + statusManager status.Manager + + // Manager for the volume maps for the pods. + volumeManager *volumeManager + + //Cloud provider interface + cloud cloudprovider.Interface + + // Reference to this node. + nodeRef *api.ObjectReference + + // Container runtime. + containerRuntime kubecontainer.Runtime + + // reasonCache caches the failure reason of the last creation of all containers, which is + // used for generating ContainerStatus. + reasonCache *ReasonCache + + // nodeStatusUpdateFrequency specifies how often kubelet posts node status to master. + // Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod + // in nodecontroller. There are several constraints: + // 1. nodeMonitorGracePeriod must be N times more than nodeStatusUpdateFrequency, where + // N means number of retries allowed for kubelet to post node status. It is pointless + // to make nodeMonitorGracePeriod be less than nodeStatusUpdateFrequency, since there + // will only be fresh values from Kubelet at an interval of nodeStatusUpdateFrequency. + // The constant must be less than podEvictionTimeout. + // 2. nodeStatusUpdateFrequency needs to be large enough for kubelet to generate node + // status. Kubelet may fail to update node status reliably if the value is too small, + // as it takes time to gather all necessary node information. + nodeStatusUpdateFrequency time.Duration + + // Generates pod events. + pleg pleg.PodLifecycleEventGenerator + + // Store kubecontainer.PodStatus for all pods. + podCache kubecontainer.Cache + + // os is a facade for various syscalls that need to be mocked during testing. + os kubecontainer.OSInterface + + // Watcher of out of memory events. + oomWatcher OOMWatcher + + // Monitor resource usage + resourceAnalyzer stats.ResourceAnalyzer + + // If non-empty, pass this to the container runtime as the root cgroup. + cgroupRoot string + + // Mounter to use for volumes. + mounter mount.Interface + + // Writer interface to use for volumes. + writer kubeio.Writer + + // Manager of non-Runtime containers. + containerManager cm.ContainerManager + nodeConfig cm.NodeConfig + + // Whether or not kubelet should take responsibility for keeping cbr0 in + // the correct state. + configureCBR0 bool + reconcileCIDR bool + + // Traffic to IPs outside this range will use IP masquerade. + nonMasqueradeCIDR string + + // Maximum Number of Pods which can be run by this Kubelet + maxPods int + + // Number of NVIDIA GPUs on this node + nvidiaGPUs int + + // Monitor Kubelet's sync loop + syncLoopMonitor atomic.Value + + // Container restart Backoff + backOff *flowcontrol.Backoff + + // Channel for sending pods to kill. + podKillingCh chan *kubecontainer.PodPair + + // The configuration file used as the base to generate the container's + // DNS resolver configuration file. This can be used in conjunction with + // clusterDomain and clusterDNS. + resolverConfig string + + // Optionally shape the bandwidth of a pod + // TODO: remove when kubenet plugin is ready + shaper bandwidth.BandwidthShaper + + // True if container cpu limits should be enforced via cgroup CFS quota + cpuCFSQuota bool + + // Information about the ports which are opened by daemons on Node running this Kubelet server. + daemonEndpoints *api.NodeDaemonEndpoints + + // A queue used to trigger pod workers. + workQueue queue.WorkQueue + + // oneTimeInitializer is used to initialize modules that are dependent on the runtime to be up. + oneTimeInitializer sync.Once + + flannelExperimentalOverlay bool + + // TODO: Flannelhelper doesn't store any state, we can instantiate it + // on the fly if we're confident the dbus connetions it opens doesn't + // put the system under duress. + flannelHelper *FlannelHelper + + // If non-nil, use this IP address for the node + nodeIP net.IP + + // clock is an interface that provides time related functionality in a way that makes it + // easy to test the code. + clock util.Clock + + // outOfDiskTransitionFrequency specifies the amount of time the kubelet has to be actually + // not out of disk before it can transition the node condition status from out-of-disk to + // not-out-of-disk. This prevents a pod that causes out-of-disk condition from repeatedly + // getting rescheduled onto the node. + outOfDiskTransitionFrequency time.Duration + + // reservation specifies resources which are reserved for non-pod usage, including kubernetes and + // non-kubernetes system processes. + reservation kubetypes.Reservation + + // support gathering custom metrics. + enableCustomMetrics bool + + // How the Kubelet should setup hairpin NAT. Can take the values: "promiscuous-bridge" + // (make cbr0 promiscuous), "hairpin-veth" (set the hairpin flag on veth interfaces) + // or "none" (do nothing). + hairpinMode componentconfig.HairpinMode + + // The node has babysitter process monitoring docker and kubelet + babysitDaemons bool + + // handlers called during the tryUpdateNodeStatus cycle + setNodeStatusFuncs []func(*api.Node) error + + // TODO: think about moving this to be centralized in PodWorkers in follow-on. + // the list of handlers to call during pod admission. + lifecycle.PodAdmitHandlers + + // the list of handlers to call during pod sync loop. + lifecycle.PodSyncLoopHandlers + + // the list of handlers to call during pod sync. + lifecycle.PodSyncHandlers + + // the number of allowed pods per core + podsPerCore int +} + +// Validate given node IP belongs to the current host +func (kl *Kubelet) validateNodeIP() error { + if kl.nodeIP == nil { + return nil + } + + // Honor IP limitations set in setNodeStatus() + if kl.nodeIP.IsLoopback() { + return fmt.Errorf("nodeIP can't be loopback address") + } + if kl.nodeIP.To4() == nil { + return fmt.Errorf("nodeIP must be IPv4 address") + } + + addrs, err := net.InterfaceAddrs() + if err != nil { + return err + } + for _, addr := range addrs { + var ip net.IP + switch v := addr.(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + } + if ip != nil && ip.Equal(kl.nodeIP) { + return nil + } + } + return fmt.Errorf("Node IP: %q not found in the host's network interfaces", kl.nodeIP.String()) +} + +// dirExists returns true if the path exists and represents a directory. +func dirExists(path string) bool { + s, err := os.Stat(path) + if err != nil { + return false + } + return s.IsDir() +} + +// setupDataDirs creates: +// 1. the root directory +// 2. the pods directory +// 3. the plugins directory +func (kl *Kubelet) setupDataDirs() error { + kl.rootDirectory = path.Clean(kl.rootDirectory) + if err := os.MkdirAll(kl.getRootDir(), 0750); err != nil { + return fmt.Errorf("error creating root directory: %v", err) + } + if err := os.MkdirAll(kl.getPodsDir(), 0750); err != nil { + return fmt.Errorf("error creating pods directory: %v", err) + } + if err := os.MkdirAll(kl.getPluginsDir(), 0750); err != nil { + return fmt.Errorf("error creating plugins directory: %v", err) + } + return nil +} + +// Get a list of pods that have data directories. +func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) { + podInfos, err := ioutil.ReadDir(kl.getPodsDir()) + if err != nil { + return nil, err + } + pods := []types.UID{} + for i := range podInfos { + if podInfos[i].IsDir() { + pods = append(pods, types.UID(podInfos[i].Name())) + } + } + return pods, nil +} + +// Starts garbage collection threads. +func (kl *Kubelet) StartGarbageCollection() { + go wait.Until(func() { + if err := kl.containerGC.GarbageCollect(); err != nil { + glog.Errorf("Container garbage collection failed: %v", err) + } + }, ContainerGCPeriod, wait.NeverStop) + + go wait.Until(func() { + if err := kl.imageManager.GarbageCollect(); err != nil { + glog.Errorf("Image garbage collection failed: %v", err) + } + }, ImageGCPeriod, wait.NeverStop) +} + +// initializeModules will initialize internal modules that do not require the container runtime to be up. +// Note that the modules here must not depend on modules that are not initialized here. +func (kl *Kubelet) initializeModules() error { + // Step 1: Promethues metrics. + metrics.Register(kl.runtimeCache) + + // Step 2: Setup filesystem directories. + if err := kl.setupDataDirs(); err != nil { + return err + } + + // Step 3: If the container logs directory does not exist, create it. + if _, err := os.Stat(containerLogsDir); err != nil { + if err := kl.os.MkdirAll(containerLogsDir, 0755); err != nil { + glog.Errorf("Failed to create directory %q: %v", containerLogsDir, err) + } + } + + // Step 4: Start the image manager. + if err := kl.imageManager.Start(); err != nil { + return fmt.Errorf("Failed to start ImageManager, images may not be garbage collected: %v", err) + } + + // Step 5: Start container manager. + if err := kl.containerManager.Start(); err != nil { + return fmt.Errorf("Failed to start ContainerManager %v", err) + } + + // Step 6: Start out of memory watcher. + if err := kl.oomWatcher.Start(kl.nodeRef); err != nil { + return fmt.Errorf("Failed to start OOM watcher %v", err) + } + + // Step 7: Start resource analyzer + kl.resourceAnalyzer.Start() + + return nil +} + +// initializeRuntimeDependentModules will initialize internal modules that require the container runtime to be up. +func (kl *Kubelet) initializeRuntimeDependentModules() { + if err := kl.cadvisor.Start(); err != nil { + kl.runtimeState.setInternalError(fmt.Errorf("Failed to start cAdvisor %v", err)) + } +} + +// Run starts the kubelet reacting to config updates +func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { + if kl.logServer == nil { + kl.logServer = http.StripPrefix("/logs/", http.FileServer(http.Dir("/var/log/"))) + } + if kl.kubeClient == nil { + glog.Warning("No api server defined - no node status update will be sent.") + } + if err := kl.initializeModules(); err != nil { + kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, kubecontainer.KubeletSetupFailed, err.Error()) + glog.Error(err) + kl.runtimeState.setInitError(err) + } + + if kl.kubeClient != nil { + // Start syncing node status immediately, this may set up things the runtime needs to run. + go wait.Until(kl.syncNodeStatus, kl.nodeStatusUpdateFrequency, wait.NeverStop) + } + go wait.Until(kl.syncNetworkStatus, 30*time.Second, wait.NeverStop) + go wait.Until(kl.updateRuntimeUp, 5*time.Second, wait.NeverStop) + + // Start a goroutine responsible for killing pods (that are not properly + // handled by pod workers). + go wait.Until(kl.podKiller, 1*time.Second, wait.NeverStop) + + // Start component sync loops. + kl.statusManager.Start() + kl.probeManager.Start() + kl.evictionManager.Start(kl.getActivePods, evictionMonitoringPeriod) + + // Start the pod lifecycle event generator. + kl.pleg.Start() + kl.syncLoop(updates, kl) +} + +// getActivePods returns non-terminal pods +func (kl *Kubelet) getActivePods() []*api.Pod { + allPods := kl.podManager.GetPods() + activePods := kl.filterOutTerminatedPods(allPods) + return activePods +} + +// initialNodeStatus determines the initial node status, incorporating node +// labels and information from the cloud provider. +func (kl *Kubelet) initialNodeStatus() (*api.Node, error) { + node := &api.Node{ + ObjectMeta: api.ObjectMeta{ + Name: kl.nodeName, + Labels: map[string]string{ + unversioned.LabelHostname: kl.hostname, + unversioned.LabelOS: goRuntime.GOOS, + unversioned.LabelArch: goRuntime.GOARCH, + }, + }, + Spec: api.NodeSpec{ + Unschedulable: !kl.registerSchedulable, + }, + } + // Initially, set NodeNetworkUnavailable to true. + if kl.providerRequiresNetworkingConfiguration() { + node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{ + Type: api.NodeNetworkUnavailable, + Status: api.ConditionTrue, + Reason: "NoRouteCreated", + Message: "Node created without a route", + LastTransitionTime: unversioned.NewTime(kl.clock.Now()), + }) + } + + // @question: should this be place after the call to the cloud provider? which also applies labels + for k, v := range kl.nodeLabels { + if cv, found := node.ObjectMeta.Labels[k]; found { + glog.Warningf("the node label %s=%s will overwrite default setting %s", k, v, cv) + } + node.ObjectMeta.Labels[k] = v + } + + if kl.cloud != nil { + instances, ok := kl.cloud.Instances() + if !ok { + return nil, fmt.Errorf("failed to get instances from cloud provider") + } + + // TODO(roberthbailey): Can we do this without having credentials to talk + // to the cloud provider? + // TODO: ExternalID is deprecated, we'll have to drop this code + externalID, err := instances.ExternalID(kl.nodeName) + if err != nil { + return nil, fmt.Errorf("failed to get external ID from cloud provider: %v", err) + } + node.Spec.ExternalID = externalID + + // TODO: We can't assume that the node has credentials to talk to the + // cloudprovider from arbitrary nodes. At most, we should talk to a + // local metadata server here. + node.Spec.ProviderID, err = cloudprovider.GetInstanceProviderID(kl.cloud, kl.nodeName) + if err != nil { + return nil, err + } + + instanceType, err := instances.InstanceType(kl.nodeName) + if err != nil { + return nil, err + } + if instanceType != "" { + glog.Infof("Adding node label from cloud provider: %s=%s", unversioned.LabelInstanceType, instanceType) + node.ObjectMeta.Labels[unversioned.LabelInstanceType] = instanceType + } + // If the cloud has zone information, label the node with the zone information + zones, ok := kl.cloud.Zones() + if ok { + zone, err := zones.GetZone() + if err != nil { + return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err) + } + if zone.FailureDomain != "" { + glog.Infof("Adding node label from cloud provider: %s=%s", unversioned.LabelZoneFailureDomain, zone.FailureDomain) + node.ObjectMeta.Labels[unversioned.LabelZoneFailureDomain] = zone.FailureDomain + } + if zone.Region != "" { + glog.Infof("Adding node label from cloud provider: %s=%s", unversioned.LabelZoneRegion, zone.Region) + node.ObjectMeta.Labels[unversioned.LabelZoneRegion] = zone.Region + } + } + } else { + node.Spec.ExternalID = kl.hostname + // If no cloud provider is defined - use the one detected by cadvisor + info, err := kl.GetCachedMachineInfo() + if err == nil { + kl.updateCloudProviderFromMachineInfo(node, info) + } + } + if err := kl.setNodeStatus(node); err != nil { + return nil, err + } + return node, nil +} + +func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool { + if kl.cloud == nil || kl.flannelExperimentalOverlay { + return false + } + _, supported := kl.cloud.Routes() + return supported +} + +// registerWithApiserver registers the node with the cluster master. It is safe +// to call multiple times, but not concurrently (kl.registrationCompleted is +// not locked). +func (kl *Kubelet) registerWithApiserver() { + if kl.registrationCompleted { + return + } + step := 100 * time.Millisecond + for { + time.Sleep(step) + step = step * 2 + if step >= 7*time.Second { + step = 7 * time.Second + } + + node, err := kl.initialNodeStatus() + if err != nil { + glog.Errorf("Unable to construct api.Node object for kubelet: %v", err) + continue + } + glog.V(2).Infof("Attempting to register node %s", node.Name) + if _, err := kl.kubeClient.Core().Nodes().Create(node); err != nil { + if !apierrors.IsAlreadyExists(err) { + glog.V(2).Infof("Unable to register %s with the apiserver: %v", node.Name, err) + continue + } + currentNode, err := kl.kubeClient.Core().Nodes().Get(kl.nodeName) + if err != nil { + glog.Errorf("error getting node %q: %v", kl.nodeName, err) + continue + } + if currentNode == nil { + glog.Errorf("no node instance returned for %q", kl.nodeName) + continue + } + if currentNode.Spec.ExternalID == node.Spec.ExternalID { + glog.Infof("Node %s was previously registered", node.Name) + kl.registrationCompleted = true + return + } + glog.Errorf( + "Previously %q had externalID %q; now it is %q; will delete and recreate.", + kl.nodeName, node.Spec.ExternalID, currentNode.Spec.ExternalID, + ) + if err := kl.kubeClient.Core().Nodes().Delete(node.Name, nil); err != nil { + glog.Errorf("Unable to delete old node: %v", err) + } else { + glog.Errorf("Deleted old node object %q", kl.nodeName) + } + continue + } + glog.Infof("Successfully registered node %s", node.Name) + kl.registrationCompleted = true + return + } +} + +// syncNodeStatus should be called periodically from a goroutine. +// It synchronizes node status to master, registering the kubelet first if +// necessary. +func (kl *Kubelet) syncNodeStatus() { + if kl.kubeClient == nil { + return + } + if kl.registerNode { + // This will exit immediately if it doesn't need to do anything. + kl.registerWithApiserver() + } + if err := kl.updateNodeStatus(); err != nil { + glog.Errorf("Unable to update node status: %v", err) + } +} + +// relabelVolumes relabels SELinux volumes to match the pod's +// SELinuxOptions specification. This is only needed if the pod uses +// hostPID or hostIPC. Otherwise relabeling is delegated to docker. +func (kl *Kubelet) relabelVolumes(pod *api.Pod, volumes kubecontainer.VolumeMap) error { + if pod.Spec.SecurityContext.SELinuxOptions == nil { + return nil + } + + rootDirContext, err := kl.getRootDirContext() + if err != nil { + return err + } + + chconRunner := selinux.NewChconRunner() + // Apply the pod's Level to the rootDirContext + rootDirSELinuxOptions, err := securitycontext.ParseSELinuxOptions(rootDirContext) + if err != nil { + return err + } + + rootDirSELinuxOptions.Level = pod.Spec.SecurityContext.SELinuxOptions.Level + volumeContext := fmt.Sprintf("%s:%s:%s:%s", rootDirSELinuxOptions.User, rootDirSELinuxOptions.Role, rootDirSELinuxOptions.Type, rootDirSELinuxOptions.Level) + + for _, vol := range volumes { + if vol.Mounter.GetAttributes().Managed && vol.Mounter.GetAttributes().SupportsSELinux { + // Relabel the volume and its content to match the 'Level' of the pod + err := filepath.Walk(vol.Mounter.GetPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + return chconRunner.SetContext(path, volumeContext) + }) + if err != nil { + return err + } + vol.SELinuxLabeled = true + } + } + return nil +} + +// makeMounts determines the mount points for the given container. +func makeMounts(pod *api.Pod, podDir string, container *api.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) { + // Kubernetes only mounts on /etc/hosts if : + // - container does not use hostNetwork and + // - container is not a infrastructure(pause) container + // - container is not already mounting on /etc/hosts + // When the pause container is being created, its IP is still unknown. Hence, PodIP will not have been set. + mountEtcHostsFile := (pod.Spec.SecurityContext == nil || !pod.Spec.SecurityContext.HostNetwork) && len(podIP) > 0 + glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile) + mounts := []kubecontainer.Mount{} + for _, mount := range container.VolumeMounts { + mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath) + vol, ok := podVolumes[mount.Name] + if !ok { + glog.Warningf("Mount cannot be satisified for container %q, because the volume is missing: %q", container.Name, mount) + continue + } + + relabelVolume := false + // If the volume supports SELinux and it has not been + // relabeled already and it is not a read-only volume, + // relabel it and mark it as labeled + if vol.Mounter.GetAttributes().Managed && vol.Mounter.GetAttributes().SupportsSELinux && !vol.SELinuxLabeled { + vol.SELinuxLabeled = true + relabelVolume = true + } + hostPath := vol.Mounter.GetPath() + if mount.SubPath != "" { + hostPath = filepath.Join(hostPath, mount.SubPath) + } + mounts = append(mounts, kubecontainer.Mount{ + Name: mount.Name, + ContainerPath: mount.MountPath, + HostPath: hostPath, + ReadOnly: mount.ReadOnly, + SELinuxRelabel: relabelVolume, + }) + } + if mountEtcHostsFile { + hostsMount, err := makeHostsMount(podDir, podIP, hostName, hostDomain) + if err != nil { + return nil, err + } + mounts = append(mounts, *hostsMount) + } + return mounts, nil +} + +// makeHostsMount makes the mountpoint for the hosts file that the containers +// in a pod are injected with. +func makeHostsMount(podDir, podIP, hostName, hostDomainName string) (*kubecontainer.Mount, error) { + hostsFilePath := path.Join(podDir, "etc-hosts") + if err := ensureHostsFile(hostsFilePath, podIP, hostName, hostDomainName); err != nil { + return nil, err + } + return &kubecontainer.Mount{ + Name: "k8s-managed-etc-hosts", + ContainerPath: etcHostsPath, + HostPath: hostsFilePath, + ReadOnly: false, + }, nil +} + +// ensureHostsFile ensures that the given host file has an up-to-date ip, host +// name, and domain name. +func ensureHostsFile(fileName, hostIP, hostName, hostDomainName string) error { + if _, err := os.Stat(fileName); os.IsExist(err) { + glog.V(4).Infof("kubernetes-managed etc-hosts file exits. Will not be recreated: %q", fileName) + return nil + } + var buffer bytes.Buffer + buffer.WriteString("# Kubernetes-managed hosts file.\n") + buffer.WriteString("127.0.0.1\tlocalhost\n") // ipv4 localhost + buffer.WriteString("::1\tlocalhost ip6-localhost ip6-loopback\n") // ipv6 localhost + buffer.WriteString("fe00::0\tip6-localnet\n") + buffer.WriteString("fe00::0\tip6-mcastprefix\n") + buffer.WriteString("fe00::1\tip6-allnodes\n") + buffer.WriteString("fe00::2\tip6-allrouters\n") + if len(hostDomainName) > 0 { + buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName)) + } else { + buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName)) + } + return ioutil.WriteFile(fileName, buffer.Bytes(), 0644) +} + +func makePortMappings(container *api.Container) (ports []kubecontainer.PortMapping) { + names := make(map[string]struct{}) + for _, p := range container.Ports { + pm := kubecontainer.PortMapping{ + HostPort: int(p.HostPort), + ContainerPort: int(p.ContainerPort), + Protocol: p.Protocol, + HostIP: p.HostIP, + } + + // We need to create some default port name if it's not specified, since + // this is necessary for rkt. + // http://issue.k8s.io/7710 + if p.Name == "" { + pm.Name = fmt.Sprintf("%s-%s:%d", container.Name, p.Protocol, p.ContainerPort) + } else { + pm.Name = fmt.Sprintf("%s-%s", container.Name, p.Name) + } + + // Protect against exposing the same protocol-port more than once in a container. + if _, ok := names[pm.Name]; ok { + glog.Warningf("Port name conflicted, %q is defined more than once", pm.Name) + continue + } + ports = append(ports, pm) + names[pm.Name] = struct{}{} + } + return +} + +func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, error) { + // TODO(vmarmol): Handle better. + // Cap hostname at 63 chars (specification is 64bytes which is 63 chars and the null terminating char). + clusterDomain := kl.clusterDomain + const hostnameMaxLen = 63 + podAnnotations := pod.Annotations + if podAnnotations == nil { + podAnnotations = make(map[string]string) + } + hostname := pod.Name + if len(pod.Spec.Hostname) > 0 { + if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Hostname); len(msgs) != 0 { + return "", "", fmt.Errorf("Pod Hostname %q is not a valid DNS label: %s", pod.Spec.Hostname, strings.Join(msgs, ";")) + } + hostname = pod.Spec.Hostname + } else { + hostnameCandidate := podAnnotations[utilpod.PodHostnameAnnotation] + if len(utilvalidation.IsDNS1123Label(hostnameCandidate)) == 0 { + // use hostname annotation, if specified. + hostname = hostnameCandidate + } + } + if len(hostname) > hostnameMaxLen { + hostname = hostname[:hostnameMaxLen] + glog.Errorf("hostname for pod:%q was longer than %d. Truncated hostname to :%q", pod.Name, hostnameMaxLen, hostname) + } + + hostDomain := "" + if len(pod.Spec.Subdomain) > 0 { + if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Subdomain); len(msgs) != 0 { + return "", "", fmt.Errorf("Pod Subdomain %q is not a valid DNS label: %s", pod.Spec.Subdomain, strings.Join(msgs, ";")) + } + hostDomain = fmt.Sprintf("%s.%s.svc.%s", pod.Spec.Subdomain, pod.Namespace, clusterDomain) + } else { + subdomainCandidate := pod.Annotations[utilpod.PodSubdomainAnnotation] + if len(utilvalidation.IsDNS1123Label(subdomainCandidate)) == 0 { + hostDomain = fmt.Sprintf("%s.%s.svc.%s", subdomainCandidate, pod.Namespace, clusterDomain) + } + } + return hostname, hostDomain, nil +} + +// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by +// the container runtime to set parameters for launching a container. +func (kl *Kubelet) GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { + var err error + opts := &kubecontainer.RunContainerOptions{CgroupParent: kl.cgroupRoot} + hostname, hostDomainName, err := kl.GeneratePodHostNameAndDomain(pod) + if err != nil { + return nil, err + } + opts.Hostname = hostname + vol, ok := kl.volumeManager.GetVolumes(pod.UID) + if !ok { + return nil, fmt.Errorf("impossible: cannot find the mounted volumes for pod %q", format.Pod(pod)) + } + + opts.PortMappings = makePortMappings(container) + // Docker does not relabel volumes if the container is running + // in the host pid or ipc namespaces so the kubelet must + // relabel the volumes + if pod.Spec.SecurityContext != nil && (pod.Spec.SecurityContext.HostIPC || pod.Spec.SecurityContext.HostPID) { + err = kl.relabelVolumes(pod, vol) + if err != nil { + return nil, err + } + } + + opts.Mounts, err = makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, vol) + if err != nil { + return nil, err + } + opts.Envs, err = kl.makeEnvironmentVariables(pod, container, podIP) + if err != nil { + return nil, err + } + + if len(container.TerminationMessagePath) != 0 { + p := kl.getPodContainerDir(pod.UID, container.Name) + if err := os.MkdirAll(p, 0750); err != nil { + glog.Errorf("Error on creating %q: %v", p, err) + } else { + opts.PodContainerDir = p + } + } + + opts.DNS, opts.DNSSearch, err = kl.GetClusterDNS(pod) + if err != nil { + return nil, err + } + + return opts, nil +} + +var masterServices = sets.NewString("kubernetes") + +// getServiceEnvVarMap makes a map[string]string of env vars for services a pod in namespace ns should see +func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { + var ( + serviceMap = make(map[string]api.Service) + m = make(map[string]string) + ) + + // Get all service resources from the master (via a cache), + // and populate them into service environment variables. + if kl.serviceLister == nil { + // Kubelets without masters (e.g. plain GCE ContainerVM) don't set env vars. + return m, nil + } + services, err := kl.serviceLister.List() + if err != nil { + return m, fmt.Errorf("failed to list services when setting up env vars.") + } + + // project the services in namespace ns onto the master services + for _, service := range services.Items { + // ignore services where ClusterIP is "None" or empty + if !api.IsServiceIPSet(&service) { + continue + } + serviceName := service.Name + + switch service.Namespace { + // for the case whether the master service namespace is the namespace the pod + // is in, the pod should receive all the services in the namespace. + // + // ordering of the case clauses below enforces this + case ns: + serviceMap[serviceName] = service + case kl.masterServiceNamespace: + if masterServices.Has(serviceName) { + if _, exists := serviceMap[serviceName]; !exists { + serviceMap[serviceName] = service + } + } + } + } + services.Items = []api.Service{} + for _, service := range serviceMap { + services.Items = append(services.Items, service) + } + + for _, e := range envvars.FromServices(&services) { + m[e.Name] = e.Value + } + return m, nil +} + +// Make the environment variables for a pod in the given namespace. +func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Container, podIP string) ([]kubecontainer.EnvVar, error) { + var result []kubecontainer.EnvVar + // Note: These are added to the docker Config, but are not included in the checksum computed + // by dockertools.BuildDockerName(...). That way, we can still determine whether an + // api.Container is already running by its hash. (We don't want to restart a container just + // because some service changed.) + // + // Note that there is a race between Kubelet seeing the pod and kubelet seeing the service. + // To avoid this users can: (1) wait between starting a service and starting; or (2) detect + // missing service env var and exit and be restarted; or (3) use DNS instead of env vars + // and keep trying to resolve the DNS name of the service (recommended). + serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace) + if err != nil { + return result, err + } + + // Determine the final values of variables: + // + // 1. Determine the final value of each variable: + // a. If the variable's Value is set, expand the `$(var)` references to other + // variables in the .Value field; the sources of variables are the declared + // variables of the container and the service environment variables + // b. If a source is defined for an environment variable, resolve the source + // 2. Create the container's environment in the order variables are declared + // 3. Add remaining service environment vars + var ( + tmpEnv = make(map[string]string) + configMaps = make(map[string]*api.ConfigMap) + secrets = make(map[string]*api.Secret) + mappingFunc = expansion.MappingFuncFor(tmpEnv, serviceEnv) + ) + for _, envVar := range container.Env { + // Accesses apiserver+Pods. + // So, the master may set service env vars, or kubelet may. In case both are doing + // it, we delete the key from the kubelet-generated ones so we don't have duplicate + // env vars. + // TODO: remove this net line once all platforms use apiserver+Pods. + delete(serviceEnv, envVar.Name) + + runtimeVal := envVar.Value + if runtimeVal != "" { + // Step 1a: expand variable references + runtimeVal = expansion.Expand(runtimeVal, mappingFunc) + } else if envVar.ValueFrom != nil { + // Step 1b: resolve alternate env var sources + switch { + case envVar.ValueFrom.FieldRef != nil: + runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP) + if err != nil { + return result, err + } + case envVar.ValueFrom.ResourceFieldRef != nil: + runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, pod, container) + if err != nil { + return result, err + } + case envVar.ValueFrom.ConfigMapKeyRef != nil: + name := envVar.ValueFrom.ConfigMapKeyRef.Name + key := envVar.ValueFrom.ConfigMapKeyRef.Key + configMap, ok := configMaps[name] + if !ok { + configMap, err = kl.kubeClient.Core().ConfigMaps(pod.Namespace).Get(name) + if err != nil { + return result, err + } + } + runtimeVal, ok = configMap.Data[key] + if !ok { + return result, fmt.Errorf("Couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name) + } + case envVar.ValueFrom.SecretKeyRef != nil: + name := envVar.ValueFrom.SecretKeyRef.Name + key := envVar.ValueFrom.SecretKeyRef.Key + secret, ok := secrets[name] + if !ok { + secret, err = kl.kubeClient.Core().Secrets(pod.Namespace).Get(name) + if err != nil { + return result, err + } + } + runtimeValBytes, ok := secret.Data[key] + if !ok { + return result, fmt.Errorf("Couldn't find key %v in Secret %v/%v", key, pod.Namespace, name) + } + runtimeVal = string(runtimeValBytes) + } + } + + tmpEnv[envVar.Name] = runtimeVal + result = append(result, kubecontainer.EnvVar{Name: envVar.Name, Value: tmpEnv[envVar.Name]}) + } + + // Append remaining service env vars. + for k, v := range serviceEnv { + result = append(result, kubecontainer.EnvVar{Name: k, Value: v}) + } + return result, nil +} + +// podFieldSelectorRuntimeValue returns the runtime value of the given +// selector for a pod. +func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *api.ObjectFieldSelector, pod *api.Pod, podIP string) (string, error) { + internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "") + if err != nil { + return "", err + } + switch internalFieldPath { + case "status.podIP": + return podIP, nil + } + return fieldpath.ExtractFieldPathAsString(pod, internalFieldPath) +} + +// containerResourceRuntimeValue returns the value of the provided container resource +func containerResourceRuntimeValue(fs *api.ResourceFieldSelector, pod *api.Pod, container *api.Container) (string, error) { + containerName := fs.ContainerName + if len(containerName) == 0 { + return fieldpath.ExtractContainerResourceValue(fs, container) + } else { + return fieldpath.ExtractResourceValueByContainerName(fs, pod, containerName) + } +} + +// GetClusterDNS returns a list of the DNS servers and a list of the DNS search +// domains of the cluster. +func (kl *Kubelet) GetClusterDNS(pod *api.Pod) ([]string, []string, error) { + var hostDNS, hostSearch []string + // Get host DNS settings + if kl.resolverConfig != "" { + f, err := os.Open(kl.resolverConfig) + if err != nil { + return nil, nil, err + } + defer f.Close() + + hostDNS, hostSearch, err = kl.parseResolvConf(f) + if err != nil { + return nil, nil, err + } + } + useClusterFirstPolicy := pod.Spec.DNSPolicy == api.DNSClusterFirst + if useClusterFirstPolicy && kl.clusterDNS == nil { + // clusterDNS is not known. + // pod with ClusterDNSFirst Policy cannot be created + kl.recorder.Eventf(pod, api.EventTypeWarning, "MissingClusterDNS", "kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy) + log := fmt.Sprintf("kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. pod: %q. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy, format.Pod(pod)) + kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, "MissingClusterDNS", log) + + // fallback to DNSDefault + useClusterFirstPolicy = false + } + + if !useClusterFirstPolicy { + // When the kubelet --resolv-conf flag is set to the empty string, use + // DNS settings that override the docker default (which is to use + // /etc/resolv.conf) and effectivly disable DNS lookups. According to + // the bind documentation, the behavior of the DNS client library when + // "nameservers" are not specified is to "use the nameserver on the + // local machine". A nameserver setting of localhost is equivalent to + // this documented behavior. + if kl.resolverConfig == "" { + hostDNS = []string{"127.0.0.1"} + hostSearch = []string{"."} + } + return hostDNS, hostSearch, nil + } + + // for a pod with DNSClusterFirst policy, the cluster DNS server is the only nameserver configured for + // the pod. The cluster DNS server itself will forward queries to other nameservers that is configured to use, + // in case the cluster DNS server cannot resolve the DNS query itself + dns := []string{kl.clusterDNS.String()} + + var dnsSearch []string + if kl.clusterDomain != "" { + nsSvcDomain := fmt.Sprintf("%s.svc.%s", pod.Namespace, kl.clusterDomain) + svcDomain := fmt.Sprintf("svc.%s", kl.clusterDomain) + dnsSearch = append([]string{nsSvcDomain, svcDomain, kl.clusterDomain}, hostSearch...) + } else { + dnsSearch = hostSearch + } + return dns, dnsSearch, nil +} + +// Returns the list of DNS servers and DNS search domains. +func (kl *Kubelet) parseResolvConf(reader io.Reader) (nameservers []string, searches []string, err error) { + var scrubber dnsScrubber + if kl.cloud != nil { + scrubber = kl.cloud + } + return parseResolvConf(reader, scrubber) +} + +// A helper for testing. +type dnsScrubber interface { + ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) +} + +// parseResolveConf reads a resolv.conf file from the given reader, and parses +// it into nameservers and searches, possibly returning an error. The given +// dnsScrubber allows cloud providers to post-process dns names. +// TODO: move to utility package +func parseResolvConf(reader io.Reader, dnsScrubber dnsScrubber) (nameservers []string, searches []string, err error) { + file, err := ioutil.ReadAll(reader) + if err != nil { + return nil, nil, err + } + + // Lines of the form "nameserver 1.2.3.4" accumulate. + nameservers = []string{} + + // Lines of the form "search example.com" overrule - last one wins. + searches = []string{} + + lines := strings.Split(string(file), "\n") + for l := range lines { + trimmed := strings.TrimSpace(lines[l]) + if strings.HasPrefix(trimmed, "#") { + continue + } + fields := strings.Fields(trimmed) + if len(fields) == 0 { + continue + } + if fields[0] == "nameserver" { + nameservers = append(nameservers, fields[1:]...) + } + if fields[0] == "search" { + searches = fields[1:] + } + } + + // Give the cloud-provider a chance to post-process DNS settings. + if dnsScrubber != nil { + nameservers, searches = dnsScrubber.ScrubDNS(nameservers, searches) + } + return nameservers, searches, nil +} + +// One of the following aruguements must be non-nil: runningPod, status. +// TODO: Modify containerRuntime.KillPod() to accept the right arguments. +func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error { + var p kubecontainer.Pod + if runningPod != nil { + p = *runningPod + } else if status != nil { + p = kubecontainer.ConvertPodStatusToRunningPod(status) + } + return kl.containerRuntime.KillPod(pod, p, gracePeriodOverride) +} + +// makePodDataDirs creates the dirs for the pod datas. +func (kl *Kubelet) makePodDataDirs(pod *api.Pod) error { + uid := pod.UID + if err := os.MkdirAll(kl.getPodDir(uid), 0750); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(kl.getPodVolumesDir(uid), 0750); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(kl.getPodPluginsDir(uid), 0750); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// syncPod is the transaction script for the sync of a single pod. +// +// Arguments: +// +// pod - the pod to sync +// mirrorPod - the mirror pod for the pod to sync, if it is a static pod +// podStatus - the current status (TODO: always from the status manager?) +// updateType - the type of update (ADD, UPDATE, REMOVE, RECONCILE) +// +// The workflow is: +// * If the pod is being created, record pod worker start latency +// * Call generateAPIPodStatus to prepare an api.PodStatus for the pod +// * If the pod is being seen as running for the first time, record pod +// start latency +// * Update the status of the pod in the status manager +// * Kill the pod if it should not be running +// * Create a mirror pod if the pod is a static pod, and does not +// already have a mirror pod +// * Create the data directories for the pod if they do not exist +// * Mount volumes and update the volume manager +// * Fetch the pull secrets for the pod +// * Call the container runtime's SyncPod callback +// * Update the traffic shaping for the pod's ingress and egress limits +// +// If any step if this workflow errors, the error is returned, and is repeated +// on the next syncPod call. +func (kl *Kubelet) syncPod(o syncPodOptions) error { + // pull out the required options + pod := o.pod + mirrorPod := o.mirrorPod + podStatus := o.podStatus + updateType := o.updateType + + // if we want to kill a pod, do it now! + if updateType == kubetypes.SyncPodKill { + killPodOptions := o.killPodOptions + if killPodOptions == nil || killPodOptions.PodStatusFunc == nil { + return fmt.Errorf("kill pod options are required if update type is kill") + } + apiPodStatus := killPodOptions.PodStatusFunc(pod, podStatus) + kl.statusManager.SetPodStatus(pod, apiPodStatus) + // we kill the pod with the specified grace period since this is a termination + if err := kl.killPod(pod, nil, podStatus, killPodOptions.PodTerminationGracePeriodSecondsOverride); err != nil { + // there was an error killing the pod, so we return that error directly + utilruntime.HandleError(err) + return err + } + return nil + } + + // Latency measurements for the main workflow are relative to the + // (first time the pod was seen by the API server. + var firstSeenTime time.Time + if firstSeenTimeStr, ok := pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey]; ok { + firstSeenTime = kubetypes.ConvertToTimestamp(firstSeenTimeStr).Get() + } + + // Record pod worker start latency if being created + // TODO: make pod workers record their own latencies + if updateType == kubetypes.SyncPodCreate { + if !firstSeenTime.IsZero() { + // This is the first time we are syncing the pod. Record the latency + // since kubelet first saw the pod if firstSeenTime is set. + metrics.PodWorkerStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime)) + } else { + glog.V(3).Infof("First seen time not recorded for pod %q", pod.UID) + } + } + + // Generate final API pod status with pod and status manager status + apiPodStatus := kl.generateAPIPodStatus(pod, podStatus) + // The pod IP may be changed in generateAPIPodStatus if the pod is using host network. (See #24576) + // TODO(random-liu): After writing pod spec into container labels, check whether pod is using host network, and + // set pod IP to hostIP directly in runtime.GetPodStatus + podStatus.IP = apiPodStatus.PodIP + + // Record the time it takes for the pod to become running. + existingStatus, ok := kl.statusManager.GetPodStatus(pod.UID) + if !ok || existingStatus.Phase == api.PodPending && apiPodStatus.Phase == api.PodRunning && + !firstSeenTime.IsZero() { + metrics.PodStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime)) + } + + // Update status in the status manager + kl.statusManager.SetPodStatus(pod, apiPodStatus) + + // Kill pod if it should not be running + if errOuter := canRunPod(pod); errOuter != nil || pod.DeletionTimestamp != nil || apiPodStatus.Phase == api.PodFailed { + if errInner := kl.killPod(pod, nil, podStatus, nil); errInner != nil { + errOuter = fmt.Errorf("error killing pod: %v", errInner) + utilruntime.HandleError(errOuter) + } + // there was no error killing the pod, but the pod cannot be run, so we return that err (if any) + return errOuter + } + + // Create Mirror Pod for Static Pod if it doesn't already exist + if kubepod.IsStaticPod(pod) { + podFullName := kubecontainer.GetPodFullName(pod) + deleted := false + if mirrorPod != nil { + if mirrorPod.DeletionTimestamp != nil || !kl.podManager.IsMirrorPodOf(mirrorPod, pod) { + // The mirror pod is semantically different from the static pod. Remove + // it. The mirror pod will get recreated later. + glog.Errorf("Deleting mirror pod %q because it is outdated", format.Pod(mirrorPod)) + if err := kl.podManager.DeleteMirrorPod(podFullName); err != nil { + glog.Errorf("Failed deleting mirror pod %q: %v", format.Pod(mirrorPod), err) + } else { + deleted = true + } + } + } + if mirrorPod == nil || deleted { + glog.V(3).Infof("Creating a mirror pod for static pod %q", format.Pod(pod)) + if err := kl.podManager.CreateMirrorPod(pod); err != nil { + glog.Errorf("Failed creating a mirror pod for %q: %v", format.Pod(pod), err) + } + } + } + + // Make data directories for the pod + if err := kl.makePodDataDirs(pod); err != nil { + glog.Errorf("Unable to make pod data directories for pod %q: %v", format.Pod(pod), err) + return err + } + + // Mount volumes and update the volume manager + podVolumes, err := kl.mountExternalVolumes(pod) + if err != nil { + ref, errGetRef := api.GetReference(pod) + if errGetRef == nil && ref != nil { + kl.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err) + glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err) + return err + } + } + kl.volumeManager.SetVolumes(pod.UID, podVolumes) + + // Fetch the pull secrets for the pod + pullSecrets, err := kl.getPullSecretsForPod(pod) + if err != nil { + glog.Errorf("Unable to get pull secrets for pod %q: %v", format.Pod(pod), err) + return err + } + + // Call the container runtime's SyncPod callback + result := kl.containerRuntime.SyncPod(pod, apiPodStatus, podStatus, pullSecrets, kl.backOff) + kl.reasonCache.Update(pod.UID, result) + if err = result.Error(); err != nil { + return err + } + + // early successful exit if pod is not bandwidth-constrained + if !kl.shapingEnabled() { + return nil + } + + // Update the traffic shaping for the pod's ingress and egress limits + ingress, egress, err := bandwidth.ExtractPodBandwidthResources(pod.Annotations) + if err != nil { + return err + } + if egress != nil || ingress != nil { + if podUsesHostNetwork(pod) { + kl.recorder.Event(pod, api.EventTypeWarning, kubecontainer.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network") + } else if kl.shaper != nil { + if len(apiPodStatus.PodIP) > 0 { + err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", apiPodStatus.PodIP), egress, ingress) + } + } else { + kl.recorder.Event(pod, api.EventTypeWarning, kubecontainer.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined") + } + } + + return nil +} + +// returns whether the pod uses the host network namespace. +func podUsesHostNetwork(pod *api.Pod) bool { + return pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostNetwork +} + +// getPullSecretsForPod inspects the Pod and retrieves the referenced pull secrets +// TODO duplicate secrets are being retrieved multiple times and there is no cache. Creating and using a secret manager interface will make this easier to address. +func (kl *Kubelet) getPullSecretsForPod(pod *api.Pod) ([]api.Secret, error) { + pullSecrets := []api.Secret{} + + for _, secretRef := range pod.Spec.ImagePullSecrets { + secret, err := kl.kubeClient.Core().Secrets(pod.Namespace).Get(secretRef.Name) + if err != nil { + glog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err) + continue + } + + pullSecrets = append(pullSecrets, *secret) + } + + return pullSecrets, nil +} + +// resolveVolumeName returns the name of the persistent volume (PV) claimed by +// a persistent volume claim (PVC) or an error if the claim is not bound. +// Returns nil if the volume does not use a PVC. +func (kl *Kubelet) resolveVolumeName(pod *api.Pod, volume *api.Volume) (string, error) { + claimSource := volume.VolumeSource.PersistentVolumeClaim + if claimSource != nil { + // resolve real volume behind the claim + claim, err := kl.kubeClient.Core().PersistentVolumeClaims(pod.Namespace).Get(claimSource.ClaimName) + if err != nil { + return "", fmt.Errorf("Cannot find claim %s/%s for volume %s", pod.Namespace, claimSource.ClaimName, volume.Name) + } + if claim.Status.Phase != api.ClaimBound { + return "", fmt.Errorf("Claim for volume %s/%s is not bound yet", pod.Namespace, claimSource.ClaimName) + } + // Use the real bound volume instead of PersistentVolume.Name + return claim.Spec.VolumeName, nil + } + return volume.Name, nil +} + +// Stores all volumes defined by the set of pods into a map. +// It stores real volumes there, i.e. persistent volume claims are resolved +// to volumes that are bound to them. +// Keys for each entry are in the format (POD_ID)/(VOLUME_NAME) +func (kl *Kubelet) getDesiredVolumes(pods []*api.Pod) map[string]api.Volume { + desiredVolumes := make(map[string]api.Volume) + for _, pod := range pods { + for _, volume := range pod.Spec.Volumes { + volumeName, err := kl.resolveVolumeName(pod, &volume) + if err != nil { + glog.V(3).Infof("%v", err) + // Ignore the error and hope it's resolved next time + continue + } + identifier := path.Join(string(pod.UID), volumeName) + desiredVolumes[identifier] = volume + } + } + return desiredVolumes +} + +// cleanupOrphanedPodDirs removes the volumes of pods that should not be +// running and that have no containers running. +func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*api.Pod, runningPods []*kubecontainer.Pod) error { + active := sets.NewString() + for _, pod := range pods { + active.Insert(string(pod.UID)) + } + for _, pod := range runningPods { + active.Insert(string(pod.ID)) + } + + found, err := kl.listPodsFromDisk() + if err != nil { + return err + } + errlist := []error{} + for _, uid := range found { + if active.Has(string(uid)) { + continue + } + if volumes, err := kl.getPodVolumes(uid); err != nil || len(volumes) != 0 { + glog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up; err: %v, volumes: %v ", uid, err, volumes) + continue + } + + glog.V(3).Infof("Orphaned pod %q found, removing", uid) + if err := os.RemoveAll(kl.getPodDir(uid)); err != nil { + errlist = append(errlist, err) + } + } + return utilerrors.NewAggregate(errlist) +} + +// cleanupBandwidthLimits updates the status of bandwidth-limited containers +// and ensures that only the the appropriate CIDRs are active on the node. +func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error { + if kl.shaper == nil { + return nil + } + currentCIDRs, err := kl.shaper.GetCIDRs() + if err != nil { + return err + } + possibleCIDRs := sets.String{} + for ix := range allPods { + pod := allPods[ix] + ingress, egress, err := bandwidth.ExtractPodBandwidthResources(pod.Annotations) + if err != nil { + return err + } + if ingress == nil && egress == nil { + glog.V(8).Infof("Not a bandwidth limited container...") + continue + } + status, found := kl.statusManager.GetPodStatus(pod.UID) + if !found { + // TODO(random-liu): Cleanup status get functions. (issue #20477) + s, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace) + if err != nil { + return err + } + status = kl.generateAPIPodStatus(pod, s) + } + if status.Phase == api.PodRunning { + possibleCIDRs.Insert(fmt.Sprintf("%s/32", status.PodIP)) + } + } + for _, cidr := range currentCIDRs { + if !possibleCIDRs.Has(cidr) { + glog.V(2).Infof("Removing CIDR: %s (%v)", cidr, possibleCIDRs) + if err := kl.shaper.Reset(cidr); err != nil { + return err + } + } + } + return nil +} + +// Compares the map of current volumes to the map of desired volumes. +// If an active volume does not have a respective desired volume, clean it up. +// This method is blocking: +// 1) it talks to API server to find volumes bound to persistent volume claims +// 2) it talks to cloud to detach volumes +func (kl *Kubelet) cleanupOrphanedVolumes(pods []*api.Pod, runningPods []*kubecontainer.Pod) error { + desiredVolumes := kl.getDesiredVolumes(pods) + currentVolumes := kl.getPodVolumesFromDisk() + + runningSet := sets.String{} + for _, pod := range runningPods { + runningSet.Insert(string(pod.ID)) + } + + for name, cleaner := range currentVolumes { + if _, ok := desiredVolumes[name]; !ok { + parts := strings.Split(name, "/") + if runningSet.Has(parts[0]) { + glog.Infof("volume %q, still has a container running (%q), skipping teardown", name, parts[0]) + continue + } + //TODO (jonesdl) We should somehow differentiate between volumes that are supposed + //to be deleted and volumes that are leftover after a crash. + glog.V(3).Infof("Orphaned volume %q found, tearing down volume", name) + // TODO(yifan): Refactor this hacky string manipulation. + kl.volumeManager.DeleteVolumes(types.UID(parts[0])) + // Get path reference count + volumePath := cleaner.Unmounter.GetPath() + refs, err := mount.GetMountRefs(kl.mounter, volumePath) + if err != nil { + glog.Errorf("Could not get mount path references for %q: %v", volumePath, err) + } + //TODO (jonesdl) This should not block other kubelet synchronization procedures + err = cleaner.Unmounter.TearDown() + if err != nil { + glog.Errorf("Could not tear down volume %q at %q: %v", name, volumePath, err) + } + + // volume is unmounted. some volumes also require detachment from the node. + if cleaner.Detacher != nil && len(refs) == 1 { + + detacher := *cleaner.Detacher + devicePath, _, err := mount.GetDeviceNameFromMount(kl.mounter, refs[0]) + if err != nil { + glog.Errorf("Could not find device path %v", err) + } + + if err = detacher.UnmountDevice(refs[0], kl.mounter); err != nil { + glog.Errorf("Could not unmount the global mount for %q: %v", name, err) + } + + pdName := path.Base(refs[0]) + err = detacher.Detach(pdName, kl.hostname) + if err != nil { + glog.Errorf("Could not detach volume %q at %q: %v", name, volumePath, err) + } + + go func() { + if err = detacher.WaitForDetach(devicePath, maxWaitForVolumeOps); err != nil { + glog.Errorf("Error while waiting for detach: %v", err) + } + }() + } + } + } + return nil +} + +// pastActiveDeadline returns true if the pod has been active for more than +// ActiveDeadlineSeconds. +func (kl *Kubelet) pastActiveDeadline(pod *api.Pod) bool { + if pod.Spec.ActiveDeadlineSeconds != nil { + podStatus, ok := kl.statusManager.GetPodStatus(pod.UID) + if !ok { + podStatus = pod.Status + } + if !podStatus.StartTime.IsZero() { + startTime := podStatus.StartTime.Time + duration := kl.clock.Since(startTime) + allowedDuration := time.Duration(*pod.Spec.ActiveDeadlineSeconds) * time.Second + if duration >= allowedDuration { + return true + } + } + } + return false +} + +// Get pods which should be resynchronized. Currently, the following pod should be resynchronized: +// * pod whose work is ready. +// * pod past the active deadline. +// * internal modules that request sync of a pod. +func (kl *Kubelet) getPodsToSync() []*api.Pod { + allPods := kl.podManager.GetPods() + podUIDs := kl.workQueue.GetWork() + podUIDSet := sets.NewString() + for _, podUID := range podUIDs { + podUIDSet.Insert(string(podUID)) + } + var podsToSync []*api.Pod + for _, pod := range allPods { + // TODO: move active deadline code into a sync/evict pattern + if kl.pastActiveDeadline(pod) { + // The pod has passed the active deadline + podsToSync = append(podsToSync, pod) + continue + } + if podUIDSet.Has(string(pod.UID)) { + // The work of the pod is ready + podsToSync = append(podsToSync, pod) + continue + } + for _, podSyncLoopHandler := range kl.PodSyncLoopHandlers { + if podSyncLoopHandler.ShouldSync(pod) { + podsToSync = append(podsToSync, pod) + break + } + } + } + return podsToSync +} + +// Returns true if pod is in the terminated state ("Failed" or "Succeeded"). +func (kl *Kubelet) podIsTerminated(pod *api.Pod) bool { + var status api.PodStatus + // Check the cached pod status which was set after the last sync. + status, ok := kl.statusManager.GetPodStatus(pod.UID) + if !ok { + // If there is no cached status, use the status from the + // apiserver. This is useful if kubelet has recently been + // restarted. + status = pod.Status + } + if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded { + return true + } + + return false +} + +// filterOutTerminatedPods returns the given pods which the status manager +// does not consider failed or succeeded. +func (kl *Kubelet) filterOutTerminatedPods(pods []*api.Pod) []*api.Pod { + var filteredPods []*api.Pod + for _, p := range pods { + if kl.podIsTerminated(p) { + continue + } + filteredPods = append(filteredPods, p) + } + return filteredPods +} + +// removeOrphanedPodStatuses removes obsolete entries in podStatus where +// the pod is no longer considered bound to this node. +func (kl *Kubelet) removeOrphanedPodStatuses(pods []*api.Pod, mirrorPods []*api.Pod) { + podUIDs := make(map[types.UID]bool) + for _, pod := range pods { + podUIDs[pod.UID] = true + } + for _, pod := range mirrorPods { + podUIDs[pod.UID] = true + } + kl.statusManager.RemoveOrphanedStatuses(podUIDs) +} + +// deletePod deletes the pod from the internal state of the kubelet by: +// 1. stopping the associated pod worker asynchronously +// 2. signaling to kill the pod by sending on the podKillingCh channel +// +// deletePod returns an error if not all sources are ready or the pod is not +// found in the runtime cache. +func (kl *Kubelet) deletePod(pod *api.Pod) error { + if pod == nil { + return fmt.Errorf("deletePod does not allow nil pod") + } + if !kl.sourcesReady.AllReady() { + // If the sources aren't ready, skip deletion, as we may accidentally delete pods + // for sources that haven't reported yet. + return fmt.Errorf("skipping delete because sources aren't ready yet") + } + kl.podWorkers.ForgetWorker(pod.UID) + + // Runtime cache may not have been updated to with the pod, but it's okay + // because the periodic cleanup routine will attempt to delete again later. + runningPods, err := kl.runtimeCache.GetPods() + if err != nil { + return fmt.Errorf("error listing containers: %v", err) + } + runningPod := kubecontainer.Pods(runningPods).FindPod("", pod.UID) + if runningPod.IsEmpty() { + return fmt.Errorf("pod not found") + } + podPair := kubecontainer.PodPair{APIPod: pod, RunningPod: &runningPod} + + kl.podKillingCh <- &podPair + // TODO: delete the mirror pod here? + + // We leave the volume/directory cleanup to the periodic cleanup routine. + return nil +} + +// empty is a placeholder type used to implement a set +type empty struct{} + +// HandlePodCleanups performs a series of cleanup work, including terminating +// pod workers, killing unwanted pods, and removing orphaned volumes/pod +// directories. +// TODO(yujuhong): This function is executed by the main sync loop, so it +// should not contain any blocking calls. Re-examine the function and decide +// whether or not we should move it into a separte goroutine. +func (kl *Kubelet) HandlePodCleanups() error { + allPods, mirrorPods := kl.podManager.GetPodsAndMirrorPods() + // Pod phase progresses monotonically. Once a pod has reached a final state, + // it should never leave regardless of the restart policy. The statuses + // of such pods should not be changed, and there is no need to sync them. + // TODO: the logic here does not handle two cases: + // 1. If the containers were removed immediately after they died, kubelet + // may fail to generate correct statuses, let alone filtering correctly. + // 2. If kubelet restarted before writing the terminated status for a pod + // to the apiserver, it could still restart the terminated pod (even + // though the pod was not considered terminated by the apiserver). + // These two conditions could be alleviated by checkpointing kubelet. + activePods := kl.filterOutTerminatedPods(allPods) + + desiredPods := make(map[types.UID]empty) + for _, pod := range activePods { + desiredPods[pod.UID] = empty{} + } + // Stop the workers for no-longer existing pods. + // TODO: is here the best place to forget pod workers? + kl.podWorkers.ForgetNonExistingPodWorkers(desiredPods) + kl.probeManager.CleanupPods(activePods) + + runningPods, err := kl.runtimeCache.GetPods() + if err != nil { + glog.Errorf("Error listing containers: %#v", err) + return err + } + for _, pod := range runningPods { + if _, found := desiredPods[pod.ID]; !found { + kl.podKillingCh <- &kubecontainer.PodPair{APIPod: nil, RunningPod: pod} + } + } + + kl.removeOrphanedPodStatuses(allPods, mirrorPods) + // Note that we just killed the unwanted pods. This may not have reflected + // in the cache. We need to bypass the cache to get the latest set of + // running pods to clean up the volumes. + // TODO: Evaluate the performance impact of bypassing the runtime cache. + runningPods, err = kl.containerRuntime.GetPods(false) + if err != nil { + glog.Errorf("Error listing containers: %#v", err) + return err + } + + // Remove any orphaned volumes. + // Note that we pass all pods (including terminated pods) to the function, + // so that we don't remove volumes associated with terminated but not yet + // deleted pods. + err = kl.cleanupOrphanedVolumes(allPods, runningPods) + if err != nil { + glog.Errorf("Failed cleaning up orphaned volumes: %v", err) + return err + } + + // Remove any orphaned pod directories. + // Note that we pass all pods (including terminated pods) to the function, + // so that we don't remove directories associated with terminated but not yet + // deleted pods. + err = kl.cleanupOrphanedPodDirs(allPods, runningPods) + if err != nil { + glog.Errorf("Failed cleaning up orphaned pod directories: %v", err) + return err + } + + // Remove any orphaned mirror pods. + kl.podManager.DeleteOrphanedMirrorPods() + + // Clear out any old bandwidth rules + if err = kl.cleanupBandwidthLimits(allPods); err != nil { + return err + } + + kl.backOff.GC() + return err +} + +// podKiller launches a goroutine to kill a pod received from the channel if +// another goroutine isn't already in action. +func (kl *Kubelet) podKiller() { + killing := sets.NewString() + resultCh := make(chan types.UID) + defer close(resultCh) + for { + select { + case podPair, ok := <-kl.podKillingCh: + runningPod := podPair.RunningPod + apiPod := podPair.APIPod + if !ok { + return + } + if killing.Has(string(runningPod.ID)) { + // The pod is already being killed. + break + } + killing.Insert(string(runningPod.ID)) + go func(apiPod *api.Pod, runningPod *kubecontainer.Pod, ch chan types.UID) { + defer func() { + ch <- runningPod.ID + }() + glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name) + err := kl.killPod(apiPod, runningPod, nil, nil) + if err != nil { + glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err) + } + }(apiPod, runningPod, resultCh) + + case podID := <-resultCh: + killing.Delete(string(podID)) + } + } +} + +// podsByCreationTime makes an array of pods sortable by their creation +// timestamps. +// TODO: move into util package +type podsByCreationTime []*api.Pod + +func (s podsByCreationTime) Len() int { + return len(s) +} + +func (s podsByCreationTime) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s podsByCreationTime) Less(i, j int) bool { + return s[i].CreationTimestamp.Before(s[j].CreationTimestamp) +} + +// checkHostPortConflicts detects pods with conflicted host ports. +func hasHostPortConflicts(pods []*api.Pod) bool { + ports := sets.String{} + for _, pod := range pods { + if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 { + glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs) + return true + } + if errs := validation.AccumulateUniqueHostPorts(pod.Spec.InitContainers, &ports, field.NewPath("spec", "initContainers")); len(errs) > 0 { + glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs) + return true + } + } + return false +} + +// handleOutOfDisk detects if pods can't fit due to lack of disk space. +func (kl *Kubelet) isOutOfDisk() bool { + outOfDockerDisk := false + outOfRootDisk := false + // Check disk space once globally and reject or accept all new pods. + withinBounds, err := kl.diskSpaceManager.IsRuntimeDiskSpaceAvailable() + // Assume enough space in case of errors. + if err == nil && !withinBounds { + outOfDockerDisk = true + } + + withinBounds, err = kl.diskSpaceManager.IsRootDiskSpaceAvailable() + // Assume enough space in case of errors. + if err == nil && !withinBounds { + outOfRootDisk = true + } + return outOfDockerDisk || outOfRootDisk +} + +// matchesNodeSelector returns true if pod matches node's labels. +func (kl *Kubelet) matchesNodeSelector(pod *api.Pod) bool { + if kl.standaloneMode { + return true + } + node, err := kl.GetNode() + if err != nil { + glog.Errorf("error getting node: %v", err) + return true + } + return predicates.PodMatchesNodeLabels(pod, node) +} + +// rejectPod records an event about the pod with the given reason and message, +// and updates the pod to the failed phase in the status manage. +func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) { + kl.recorder.Eventf(pod, api.EventTypeWarning, reason, message) + kl.statusManager.SetPodStatus(pod, api.PodStatus{ + Phase: api.PodFailed, + Reason: reason, + Message: "Pod " + message}) +} + +// canAdmitPod determines if a pod can be admitted, and gives a reason if it +// cannot. "pod" is new pod, while "pods" include all admitted pods plus the +// new pod. The function returns a boolean value indicating whether the pod +// can be admitted, a brief single-word reason and a message explaining why +// the pod cannot be admitted. +func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, string) { + node, err := kl.getNodeAnyWay() + if err != nil { + glog.Errorf("Cannot get Node info: %v", err) + return false, "InvalidNodeInfo", "Kubelet cannot get node info." + } + otherPods := []*api.Pod{} + for _, p := range pods { + if p != pod { + otherPods = append(otherPods, p) + } + } + + // the kubelet will invoke each pod admit handler in sequence + // if any handler rejects, the pod is rejected. + // TODO: move predicate check into a pod admitter + // TODO: move out of disk check into a pod admitter + // TODO: out of resource eviction should have a pod admitter call-out + attrs := &lifecycle.PodAdmitAttributes{Pod: pod, OtherPods: otherPods} + for _, podAdmitHandler := range kl.PodAdmitHandlers { + if result := podAdmitHandler.Admit(attrs); !result.Admit { + return false, result.Reason, result.Message + } + } + nodeInfo := schedulercache.NewNodeInfo(otherPods...) + nodeInfo.SetNode(node) + fit, err := predicates.GeneralPredicates(pod, nodeInfo) + if !fit { + if re, ok := err.(*predicates.PredicateFailureError); ok { + reason := re.PredicateName + message := re.Error() + glog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(pod), message) + return fit, reason, message + } + if re, ok := err.(*predicates.InsufficientResourceError); ok { + reason := fmt.Sprintf("OutOf%s", re.ResourceName) + message := re.Error() + glog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(pod), message) + return fit, reason, message + } + reason := "UnexpectedPredicateFailureType" + message := fmt.Sprintf("GeneralPredicates failed due to %v, which is unexpected.", err) + glog.Warningf("Failed to admit pod %v - %s", format.Pod(pod), message) + return fit, reason, message + } + // TODO: When disk space scheduling is implemented (#11976), remove the out-of-disk check here and + // add the disk space predicate to predicates.GeneralPredicates. + if kl.isOutOfDisk() { + glog.Warningf("Failed to admit pod %v - %s", format.Pod(pod), "predicate fails due to isOutOfDisk") + return false, "OutOfDisk", "cannot be started due to lack of disk space." + } + + return true, "", "" +} + +// syncLoop is the main loop for processing changes. It watches for changes from +// three channels (file, apiserver, and http) and creates a union of them. For +// any new change seen, will run a sync against desired state and running state. If +// no changes are seen to the configuration, will synchronize the last known desired +// state every sync-frequency seconds. Never returns. +func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHandler) { + glog.Info("Starting kubelet main sync loop.") + // The resyncTicker wakes up kubelet to checks if there are any pod workers + // that need to be sync'd. A one-second period is sufficient because the + // sync interval is defaulted to 10s. + syncTicker := time.NewTicker(time.Second) + housekeepingTicker := time.NewTicker(housekeepingPeriod) + plegCh := kl.pleg.Watch() + for { + if rs := kl.runtimeState.errors(); len(rs) != 0 { + glog.Infof("skipping pod synchronization - %v", rs) + time.Sleep(5 * time.Second) + continue + } + if !kl.syncLoopIteration(updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) { + break + } + } +} + +// syncLoopIteration reads from various channels and dispatches pods to the +// given handler. +// +// Arguments: +// 1. configCh: a channel to read config events from +// 2. handler: the SyncHandler to dispatch pods to +// 3. syncCh: a channel to read periodic sync events from +// 4. houseKeepingCh: a channel to read housekeeping events from +// 5. plegCh: a channel to read PLEG updates from +// +// Events are also read from the kubelet liveness manager's update channel. +// +// The workflow is to read from one of the channels, handle that event, and +// update the timestamp in the sync loop monitor. +// +// Here is an appropriate place to note that despite the syntactical +// similarity to the switch statement, the case statements in a select are +// evaluated in a pseudorandom order if there are multiple channels ready to +// read from when the select is evaluated. In other words, case statements +// are evaluated in random order, and you can not assume that the case +// statements evaluate in order if multiple channels have events. +// +// With that in mind, in truly no particular order, the different channels +// are handled as follows: +// +// * configCh: dispatch the pods for the config change to the appropriate +// handler callback for the event type +// * plegCh: update the runtime cache; sync pod +// * syncCh: sync all pods waiting for sync +// * houseKeepingCh: trigger cleanup of pods +// * liveness manager: sync pods that have failed or in which one or more +// containers have failed liveness checks +func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handler SyncHandler, + syncCh <-chan time.Time, housekeepingCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool { + kl.syncLoopMonitor.Store(kl.clock.Now()) + select { + case u, open := <-configCh: + // Update from a config source; dispatch it to the right handler + // callback. + if !open { + glog.Errorf("Update channel is closed. Exiting the sync loop.") + return false + } + kl.sourcesReady.AddSource(u.Source) + + switch u.Op { + case kubetypes.ADD: + glog.V(2).Infof("SyncLoop (ADD, %q): %q", u.Source, format.Pods(u.Pods)) + // After restarting, kubelet will get all existing pods through + // ADD as if they are new pods. These pods will then go through the + // admission process and *may* be rejcted. This can be resolved + // once we have checkpointing. + handler.HandlePodAdditions(u.Pods) + case kubetypes.UPDATE: + glog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, format.Pods(u.Pods)) + handler.HandlePodUpdates(u.Pods) + case kubetypes.REMOVE: + glog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, format.Pods(u.Pods)) + handler.HandlePodDeletions(u.Pods) + case kubetypes.RECONCILE: + glog.V(4).Infof("SyncLoop (RECONCILE, %q): %q", u.Source, format.Pods(u.Pods)) + handler.HandlePodReconcile(u.Pods) + case kubetypes.SET: + // TODO: Do we want to support this? + glog.Errorf("Kubelet does not support snapshot update") + } + case e := <-plegCh: + // PLEG event for a pod; sync it. + pod, ok := kl.podManager.GetPodByUID(e.ID) + if !ok { + // If the pod no longer exists, ignore the event. + glog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e) + break + } + glog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", format.Pod(pod), e) + handler.HandlePodSyncs([]*api.Pod{pod}) + case <-syncCh: + // Sync pods waiting for sync + podsToSync := kl.getPodsToSync() + if len(podsToSync) == 0 { + break + } + glog.V(4).Infof("SyncLoop (SYNC): %d pods; %s", len(podsToSync), format.Pods(podsToSync)) + kl.HandlePodSyncs(podsToSync) + case update := <-kl.livenessManager.Updates(): + if update.Result == proberesults.Failure { + // The liveness manager detected a failure; sync the pod. + + // We should not use the pod from livenessManager, because it is never updated after + // initialization. + pod, ok := kl.podManager.GetPodByUID(update.PodUID) + if !ok { + // If the pod no longer exists, ignore the update. + glog.V(4).Infof("SyncLoop (container unhealthy): ignore irrelevant update: %#v", update) + break + } + glog.V(1).Infof("SyncLoop (container unhealthy): %q", format.Pod(pod)) + handler.HandlePodSyncs([]*api.Pod{pod}) + } + case <-housekeepingCh: + if !kl.sourcesReady.AllReady() { + // If the sources aren't ready, skip housekeeping, as we may + // accidentally delete pods from unready sources. + glog.V(4).Infof("SyncLoop (housekeeping, skipped): sources aren't ready yet.") + } else { + glog.V(4).Infof("SyncLoop (housekeeping)") + if err := handler.HandlePodCleanups(); err != nil { + glog.Errorf("Failed cleaning pods: %v", err) + } + } + } + kl.syncLoopMonitor.Store(kl.clock.Now()) + return true +} + +// dispatchWork starts the asynchronous sync of the pod in a pod worker. +// If the pod is terminated, dispatchWork +func (kl *Kubelet) dispatchWork(pod *api.Pod, syncType kubetypes.SyncPodType, mirrorPod *api.Pod, start time.Time) { + if kl.podIsTerminated(pod) { + if pod.DeletionTimestamp != nil { + // If the pod is in a terminated state, there is no pod worker to + // handle the work item. Check if the DeletionTimestamp has been + // set, and force a status update to trigger a pod deletion request + // to the apiserver. + kl.statusManager.TerminatePod(pod) + } + return + } + // Run the sync in an async worker. + kl.podWorkers.UpdatePod(&UpdatePodOptions{ + Pod: pod, + MirrorPod: mirrorPod, + UpdateType: syncType, + OnCompleteFunc: func(err error) { + if err != nil { + metrics.PodWorkerLatency.WithLabelValues(syncType.String()).Observe(metrics.SinceInMicroseconds(start)) + } + }, + }) + // Note the number of containers for new pods. + if syncType == kubetypes.SyncPodCreate { + metrics.ContainersPerPodCount.Observe(float64(len(pod.Spec.Containers))) + } +} + +// TODO: Consider handling all mirror pods updates in a separate component. +func (kl *Kubelet) handleMirrorPod(mirrorPod *api.Pod, start time.Time) { + // Mirror pod ADD/UPDATE/DELETE operations are considered an UPDATE to the + // corresponding static pod. Send update to the pod worker if the static + // pod exists. + if pod, ok := kl.podManager.GetPodByMirrorPod(mirrorPod); ok { + kl.dispatchWork(pod, kubetypes.SyncPodUpdate, mirrorPod, start) + } +} + +// HandlePodAdditions is the callback in SyncHandler for pods being added from +// a config source. +func (kl *Kubelet) HandlePodAdditions(pods []*api.Pod) { + start := kl.clock.Now() + sort.Sort(podsByCreationTime(pods)) + for _, pod := range pods { + kl.podManager.AddPod(pod) + if kubepod.IsMirrorPod(pod) { + kl.handleMirrorPod(pod, start) + continue + } + // Note that allPods includes the new pod since we added at the + // beginning of the loop. + allPods := kl.podManager.GetPods() + // We failed pods that we rejected, so activePods include all admitted + // pods that are alive and the new pod. + activePods := kl.filterOutTerminatedPods(allPods) + // Check if we can admit the pod; if not, reject it. + if ok, reason, message := kl.canAdmitPod(activePods, pod); !ok { + kl.rejectPod(pod, reason, message) + continue + } + mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) + kl.dispatchWork(pod, kubetypes.SyncPodCreate, mirrorPod, start) + kl.probeManager.AddPod(pod) + } +} + +// HandlePodUpdates is the callback in the SyncHandler interface for pods +// being updated from a config source. +func (kl *Kubelet) HandlePodUpdates(pods []*api.Pod) { + start := kl.clock.Now() + for _, pod := range pods { + kl.podManager.UpdatePod(pod) + if kubepod.IsMirrorPod(pod) { + kl.handleMirrorPod(pod, start) + continue + } + // TODO: Evaluate if we need to validate and reject updates. + + mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) + kl.dispatchWork(pod, kubetypes.SyncPodUpdate, mirrorPod, start) + } +} + +// HandlePodDeletions is the callback in the SyncHandler interface for pods +// being deleted from a config source. +func (kl *Kubelet) HandlePodDeletions(pods []*api.Pod) { + start := kl.clock.Now() + for _, pod := range pods { + kl.podManager.DeletePod(pod) + if kubepod.IsMirrorPod(pod) { + kl.handleMirrorPod(pod, start) + continue + } + // Deletion is allowed to fail because the periodic cleanup routine + // will trigger deletion again. + if err := kl.deletePod(pod); err != nil { + glog.V(2).Infof("Failed to delete pod %q, err: %v", format.Pod(pod), err) + } + kl.probeManager.RemovePod(pod) + } +} + +// HandlePodReconcile is the callback in the SyncHandler interface for pods +// that should be reconciled. +func (kl *Kubelet) HandlePodReconcile(pods []*api.Pod) { + for _, pod := range pods { + // Update the pod in pod manager, status manager will do periodically reconcile according + // to the pod manager. + kl.podManager.UpdatePod(pod) + } +} + +// HandlePodSyncs is the callback in the syncHandler interface for pods +// that should be dispatched to pod workers for sync. +func (kl *Kubelet) HandlePodSyncs(pods []*api.Pod) { + start := kl.clock.Now() + for _, pod := range pods { + mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) + kl.dispatchWork(pod, kubetypes.SyncPodSync, mirrorPod, start) + } +} + +// LatestLoopEntryTime returns the last time in the sync loop monitor. +func (kl *Kubelet) LatestLoopEntryTime() time.Time { + val := kl.syncLoopMonitor.Load() + if val == nil { + return time.Time{} + } + return val.(time.Time) +} + +// PLEGHealthCheck returns whether the PLEG is healty. +func (kl *Kubelet) PLEGHealthCheck() (bool, error) { + return kl.pleg.Healthy() +} + +// validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state +// of the container. The previous flag will only return the logs for the the last terminated container, otherwise, the current +// running container is preferred over a previous termination. If info about the container is not available then a specific +// error is returned to the end user. +func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *api.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) { + var cID string + + cStatus, found := api.GetContainerStatus(podStatus.ContainerStatuses, containerName) + if !found { + return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName) + } + lastState := cStatus.LastTerminationState + waiting, running, terminated := cStatus.State.Waiting, cStatus.State.Running, cStatus.State.Terminated + + switch { + case previous: + if lastState.Terminated == nil { + return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName) + } + cID = lastState.Terminated.ContainerID + + case running != nil: + cID = cStatus.ContainerID + + case terminated != nil: + cID = terminated.ContainerID + + case lastState.Terminated != nil: + cID = lastState.Terminated.ContainerID + + case waiting != nil: + // output some info for the most common pending failures + switch reason := waiting.Reason; reason { + case kubecontainer.ErrImagePull.Error(): + return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: image can't be pulled", containerName, podName) + case kubecontainer.ErrImagePullBackOff.Error(): + return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: trying and failing to pull image", containerName, podName) + default: + return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: %v", containerName, podName, reason) + } + default: + // unrecognized state + return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start - no logs yet", containerName, podName) + } + + return kubecontainer.ParseContainerID(cID), nil +} + +// GetKubeletContainerLogs returns logs from the container +// TODO: this method is returning logs of random container attempts, when it should be returning the most recent attempt +// or all of them. +func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error { + // TODO(vmarmol): Refactor to not need the pod status and verification. + // Pod workers periodically write status to statusManager. If status is not + // cached there, something is wrong (or kubelet just restarted and hasn't + // caught up yet). Just assume the pod is not ready yet. + name, namespace, err := kubecontainer.ParsePodFullName(podFullName) + if err != nil { + return fmt.Errorf("unable to parse pod full name %q: %v", podFullName, err) + } + + pod, ok := kl.GetPodByName(namespace, name) + if !ok { + return fmt.Errorf("pod %q cannot be found - no logs available", name) + } + + podUID := pod.UID + if mirrorPod, ok := kl.podManager.GetMirrorPodByPod(pod); ok { + podUID = mirrorPod.UID + } + podStatus, found := kl.statusManager.GetPodStatus(podUID) + if !found { + // If there is no cached status, use the status from the + // apiserver. This is useful if kubelet has recently been + // restarted. + podStatus = pod.Status + } + + containerID, err := kl.validateContainerLogStatus(pod.Name, &podStatus, containerName, logOptions.Previous) + if err != nil { + return err + } + return kl.containerRuntime.GetContainerLogs(pod, containerID, logOptions, stdout, stderr) +} + +// updateRuntimeUp calls the container runtime status callback, initializing +// the runtime dependent modules when the container runtime first comes up, +// and returns an error if the status check fails. If the status check is OK, +// update the container runtime uptime in the kubelet runtimeState. +func (kl *Kubelet) updateRuntimeUp() { + if err := kl.containerRuntime.Status(); err != nil { + glog.Errorf("Container runtime sanity check failed: %v", err) + return + } + kl.oneTimeInitializer.Do(kl.initializeRuntimeDependentModules) + kl.runtimeState.setRuntimeSync(kl.clock.Now()) +} + +// TODO: remove when kubenet plugin is ready +// NOTE!!! if you make changes here, also make them to kubenet +func (kl *Kubelet) reconcileCBR0(podCIDR string) error { + if podCIDR == "" { + glog.V(5).Info("PodCIDR not set. Will not configure cbr0.") + return nil + } + glog.V(5).Infof("PodCIDR is set to %q", podCIDR) + _, cidr, err := net.ParseCIDR(podCIDR) + if err != nil { + return err + } + // Set cbr0 interface address to first address in IPNet + cidr.IP.To4()[3] += 1 + if err := ensureCbr0(cidr, kl.hairpinMode == componentconfig.PromiscuousBridge, kl.babysitDaemons); err != nil { + return err + } + if kl.shapingEnabled() { + if kl.shaper == nil { + glog.V(5).Info("Shaper is nil, creating") + kl.shaper = bandwidth.NewTCShaper("cbr0") + } + return kl.shaper.ReconcileInterface() + } + return nil +} + +// updateNodeStatus updates node status to master with retries. +func (kl *Kubelet) updateNodeStatus() error { + for i := 0; i < nodeStatusUpdateRetry; i++ { + if err := kl.tryUpdateNodeStatus(); err != nil { + glog.Errorf("Error updating node status, will retry: %v", err) + } else { + return nil + } + } + return fmt.Errorf("update node status exceeds retry count") +} + +// recordNodeStatusEvent records an event of the given type with the given +// message for the node. +func (kl *Kubelet) recordNodeStatusEvent(eventtype, event string) { + glog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName) + // TODO: This requires a transaction, either both node status is updated + // and event is recorded or neither should happen, see issue #6055. + kl.recorder.Eventf(kl.nodeRef, eventtype, event, "Node %s status is now: %s", kl.nodeName, event) +} + +// syncNetworkStatus updates the network state, ensuring that the network is +// configured correctly if the kubelet is set to configure cbr0: +// * handshake flannel helper if the flannel experimental overlay is being used. +// * ensure that iptables masq rules are setup +// * reconcile cbr0 with the pod CIDR +func (kl *Kubelet) syncNetworkStatus() { + var err error + if kl.configureCBR0 { + if kl.flannelExperimentalOverlay { + podCIDR, err := kl.flannelHelper.Handshake() + if err != nil { + glog.Infof("Flannel server handshake failed %v", err) + return + } + kl.updatePodCIDR(podCIDR) + } + if err := ensureIPTablesMasqRule(kl.nonMasqueradeCIDR); err != nil { + err = fmt.Errorf("Error on adding ip table rules: %v", err) + glog.Error(err) + kl.runtimeState.setNetworkState(err) + return + } + podCIDR := kl.runtimeState.podCIDR() + if len(podCIDR) == 0 { + err = fmt.Errorf("ConfigureCBR0 requested, but PodCIDR not set. Will not configure CBR0 right now") + glog.Warning(err) + } else if err = kl.reconcileCBR0(podCIDR); err != nil { + err = fmt.Errorf("Error configuring cbr0: %v", err) + glog.Error(err) + } + if err != nil { + kl.runtimeState.setNetworkState(err) + return + } + } + + kl.runtimeState.setNetworkState(kl.networkPlugin.Status()) +} + +// Set addresses for the node. +func (kl *Kubelet) setNodeAddress(node *api.Node) error { + // Set addresses for the node. + if kl.cloud != nil { + instances, ok := kl.cloud.Instances() + if !ok { + return fmt.Errorf("failed to get instances from cloud provider") + } + // TODO(roberthbailey): Can we do this without having credentials to talk + // to the cloud provider? + // TODO(justinsb): We can if CurrentNodeName() was actually CurrentNode() and returned an interface + nodeAddresses, err := instances.NodeAddresses(kl.nodeName) + if err != nil { + return fmt.Errorf("failed to get node address from cloud provider: %v", err) + } + node.Status.Addresses = nodeAddresses + } else { + if kl.nodeIP != nil { + node.Status.Addresses = []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: kl.nodeIP.String()}, + {Type: api.NodeInternalIP, Address: kl.nodeIP.String()}, + } + } else if addr := net.ParseIP(kl.hostname); addr != nil { + node.Status.Addresses = []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: addr.String()}, + {Type: api.NodeInternalIP, Address: addr.String()}, + } + } else { + addrs, err := net.LookupIP(node.Name) + if err != nil { + return fmt.Errorf("can't get ip address of node %s: %v", node.Name, err) + } else if len(addrs) == 0 { + return fmt.Errorf("no ip address for node %v", node.Name) + } else { + // check all ip addresses for this node.Name and try to find the first non-loopback IPv4 address. + // If no match is found, it uses the IP of the interface with gateway on it. + for _, ip := range addrs { + if ip.IsLoopback() { + continue + } + + if ip.To4() != nil { + node.Status.Addresses = []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: ip.String()}, + {Type: api.NodeInternalIP, Address: ip.String()}, + } + break + } + } + + if len(node.Status.Addresses) == 0 { + ip, err := utilnet.ChooseHostInterface() + if err != nil { + return err + } + + node.Status.Addresses = []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: ip.String()}, + {Type: api.NodeInternalIP, Address: ip.String()}, + } + } + } + } + } + return nil +} + +func (kl *Kubelet) updateCloudProviderFromMachineInfo(node *api.Node, info *cadvisorapi.MachineInfo) { + if info.CloudProvider != cadvisorapi.UnknownProvider && + info.CloudProvider != cadvisorapi.Baremetal { + // The cloud providers from pkg/cloudprovider/providers/* that update ProviderID + // will use the format of cloudprovider://project/availability_zone/instance_name + // here we only have the cloudprovider and the instance name so we leave project + // and availability zone empty for compatibility. + node.Spec.ProviderID = strings.ToLower(string(info.CloudProvider)) + + ":////" + string(info.InstanceID) + } +} + +func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) { + // TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start + // cAdvisor locally, e.g. for test-cmd.sh, and in integration test. + info, err := kl.GetCachedMachineInfo() + if err != nil { + // TODO(roberthbailey): This is required for test-cmd.sh to pass. + // See if the test should be updated instead. + node.Status.Capacity = api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI), + api.ResourceMemory: resource.MustParse("0Gi"), + api.ResourcePods: *resource.NewQuantity(int64(kl.maxPods), resource.DecimalSI), + api.ResourceNvidiaGPU: *resource.NewQuantity(int64(kl.nvidiaGPUs), resource.DecimalSI), + } + glog.Errorf("Error getting machine info: %v", err) + } else { + node.Status.NodeInfo.MachineID = info.MachineID + node.Status.NodeInfo.SystemUUID = info.SystemUUID + node.Status.Capacity = cadvisor.CapacityFromMachineInfo(info) + if kl.podsPerCore > 0 { + node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity( + int64(math.Min(float64(info.NumCores*kl.podsPerCore), float64(kl.maxPods))), resource.DecimalSI) + } else { + node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity( + int64(kl.maxPods), resource.DecimalSI) + } + node.Status.Capacity[api.ResourceNvidiaGPU] = *resource.NewQuantity( + int64(kl.nvidiaGPUs), resource.DecimalSI) + if node.Status.NodeInfo.BootID != "" && + node.Status.NodeInfo.BootID != info.BootID { + // TODO: This requires a transaction, either both node status is updated + // and event is recorded or neither should happen, see issue #6055. + kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, kubecontainer.NodeRebooted, + "Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID) + } + node.Status.NodeInfo.BootID = info.BootID + } + + // Set Allocatable. + node.Status.Allocatable = make(api.ResourceList) + for k, v := range node.Status.Capacity { + value := *(v.Copy()) + if kl.reservation.System != nil { + value.Sub(kl.reservation.System[k]) + } + if kl.reservation.Kubernetes != nil { + value.Sub(kl.reservation.Kubernetes[k]) + } + if value.Sign() < 0 { + // Negative Allocatable resources don't make sense. + value.Set(0) + } + node.Status.Allocatable[k] = value + } +} + +// Set versioninfo for the node. +func (kl *Kubelet) setNodeStatusVersionInfo(node *api.Node) { + verinfo, err := kl.cadvisor.VersionInfo() + if err != nil { + glog.Errorf("Error getting version info: %v", err) + } else { + node.Status.NodeInfo.KernelVersion = verinfo.KernelVersion + node.Status.NodeInfo.OSImage = verinfo.ContainerOsVersion + + runtimeVersion := "Unknown" + if runtimeVer, err := kl.containerRuntime.Version(); err == nil { + runtimeVersion = runtimeVer.String() + } + node.Status.NodeInfo.ContainerRuntimeVersion = fmt.Sprintf("%s://%s", kl.containerRuntime.Type(), runtimeVersion) + + node.Status.NodeInfo.KubeletVersion = version.Get().String() + // TODO: kube-proxy might be different version from kubelet in the future + node.Status.NodeInfo.KubeProxyVersion = version.Get().String() + } + +} + +// Set daemonEndpoints for the node. +func (kl *Kubelet) setNodeStatusDaemonEndpoints(node *api.Node) { + node.Status.DaemonEndpoints = *kl.daemonEndpoints +} + +// Set images list for the node +func (kl *Kubelet) setNodeStatusImages(node *api.Node) { + // Update image list of this node + var imagesOnNode []api.ContainerImage + containerImages, err := kl.imageManager.GetImageList() + if err != nil { + glog.Errorf("Error getting image list: %v", err) + } else { + // sort the images from max to min, and only set top N images into the node status. + sort.Sort(byImageSize(containerImages)) + if maxImagesInNodeStatus < len(containerImages) { + containerImages = containerImages[0:maxImagesInNodeStatus] + } + + for _, image := range containerImages { + imagesOnNode = append(imagesOnNode, api.ContainerImage{ + Names: append(image.RepoTags, image.RepoDigests...), + SizeBytes: image.Size, + }) + } + } + node.Status.Images = imagesOnNode +} + +// Set the GOOS and GOARCH for this node +func (kl *Kubelet) setNodeStatusGoRuntime(node *api.Node) { + node.Status.NodeInfo.OperatingSystem = goRuntime.GOOS + node.Status.NodeInfo.Architecture = goRuntime.GOARCH +} + +type byImageSize []kubecontainer.Image + +// Sort from max to min +func (a byImageSize) Less(i, j int) bool { + return a[i].Size > a[j].Size +} +func (a byImageSize) Len() int { return len(a) } +func (a byImageSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Set status for the node. +func (kl *Kubelet) setNodeStatusInfo(node *api.Node) { + kl.setNodeStatusMachineInfo(node) + kl.setNodeStatusVersionInfo(node) + kl.setNodeStatusDaemonEndpoints(node) + kl.setNodeStatusImages(node) + kl.setNodeStatusGoRuntime(node) +} + +// Set Readycondition for the node. +func (kl *Kubelet) setNodeReadyCondition(node *api.Node) { + // NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions. + // This is due to an issue with version skewed kubelet and master components. + // ref: https://github.com/kubernetes/kubernetes/issues/16961 + currentTime := unversioned.NewTime(kl.clock.Now()) + var newNodeReadyCondition api.NodeCondition + if rs := kl.runtimeState.errors(); len(rs) == 0 { + newNodeReadyCondition = api.NodeCondition{ + Type: api.NodeReady, + Status: api.ConditionTrue, + Reason: "KubeletReady", + Message: "kubelet is posting ready status", + LastHeartbeatTime: currentTime, + } + } else { + newNodeReadyCondition = api.NodeCondition{ + Type: api.NodeReady, + Status: api.ConditionFalse, + Reason: "KubeletNotReady", + Message: strings.Join(rs, ","), + LastHeartbeatTime: currentTime, + } + } + + // Record any soft requirements that were not met in the container manager. + status := kl.containerManager.Status() + if status.SoftRequirements != nil { + newNodeReadyCondition.Message = fmt.Sprintf("%s. WARNING: %s", newNodeReadyCondition.Message, status.SoftRequirements.Error()) + } + + readyConditionUpdated := false + needToRecordEvent := false + for i := range node.Status.Conditions { + if node.Status.Conditions[i].Type == api.NodeReady { + if node.Status.Conditions[i].Status == newNodeReadyCondition.Status { + newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime + } else { + newNodeReadyCondition.LastTransitionTime = currentTime + needToRecordEvent = true + } + node.Status.Conditions[i] = newNodeReadyCondition + readyConditionUpdated = true + break + } + } + if !readyConditionUpdated { + newNodeReadyCondition.LastTransitionTime = currentTime + node.Status.Conditions = append(node.Status.Conditions, newNodeReadyCondition) + } + if needToRecordEvent { + if newNodeReadyCondition.Status == api.ConditionTrue { + kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeReady) + } else { + kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeNotReady) + } + } +} + +// setNodeMemoryPressureCondition for the node. +// TODO: this needs to move somewhere centralized... +func (kl *Kubelet) setNodeMemoryPressureCondition(node *api.Node) { + currentTime := unversioned.NewTime(kl.clock.Now()) + var condition *api.NodeCondition + + // Check if NodeMemoryPressure condition already exists and if it does, just pick it up for update. + for i := range node.Status.Conditions { + if node.Status.Conditions[i].Type == api.NodeMemoryPressure { + condition = &node.Status.Conditions[i] + } + } + + newCondition := false + // If the NodeMemoryPressure condition doesn't exist, create one + if condition == nil { + condition = &api.NodeCondition{ + Type: api.NodeMemoryPressure, + Status: api.ConditionUnknown, + } + // cannot be appended to node.Status.Conditions here because it gets + // copied to the slice. So if we append to the slice here none of the + // updates we make below are reflected in the slice. + newCondition = true + } + + // Update the heartbeat time + condition.LastHeartbeatTime = currentTime + + // Note: The conditions below take care of the case when a new NodeMemoryPressure condition is + // created and as well as the case when the condition already exists. When a new condition + // is created its status is set to api.ConditionUnknown which matches either + // condition.Status != api.ConditionTrue or + // condition.Status != api.ConditionFalse in the conditions below depending on whether + // the kubelet is under memory pressure or not. + if kl.evictionManager.IsUnderMemoryPressure() { + if condition.Status != api.ConditionTrue { + condition.Status = api.ConditionTrue + condition.Reason = "KubeletHasInsufficientMemory" + condition.Message = "kubelet has insufficient memory available" + condition.LastTransitionTime = currentTime + kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasInsufficientMemory") + } + } else { + if condition.Status != api.ConditionFalse { + condition.Status = api.ConditionFalse + condition.Reason = "KubeletHasSufficientMemory" + condition.Message = "kubelet has sufficient memory available" + condition.LastTransitionTime = currentTime + kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasSufficientMemory") + } + } + + if newCondition { + node.Status.Conditions = append(node.Status.Conditions, *condition) + } +} + +// Set OODcondition for the node. +func (kl *Kubelet) setNodeOODCondition(node *api.Node) { + currentTime := unversioned.NewTime(kl.clock.Now()) + var nodeOODCondition *api.NodeCondition + + // Check if NodeOutOfDisk condition already exists and if it does, just pick it up for update. + for i := range node.Status.Conditions { + if node.Status.Conditions[i].Type == api.NodeOutOfDisk { + nodeOODCondition = &node.Status.Conditions[i] + } + } + + newOODCondition := false + // If the NodeOutOfDisk condition doesn't exist, create one. + if nodeOODCondition == nil { + nodeOODCondition = &api.NodeCondition{ + Type: api.NodeOutOfDisk, + Status: api.ConditionUnknown, + } + // nodeOODCondition cannot be appended to node.Status.Conditions here because it gets + // copied to the slice. So if we append nodeOODCondition to the slice here none of the + // updates we make to nodeOODCondition below are reflected in the slice. + newOODCondition = true + } + + // Update the heartbeat time irrespective of all the conditions. + nodeOODCondition.LastHeartbeatTime = currentTime + + // Note: The conditions below take care of the case when a new NodeOutOfDisk condition is + // created and as well as the case when the condition already exists. When a new condition + // is created its status is set to api.ConditionUnknown which matches either + // nodeOODCondition.Status != api.ConditionTrue or + // nodeOODCondition.Status != api.ConditionFalse in the conditions below depending on whether + // the kubelet is out of disk or not. + if kl.isOutOfDisk() { + if nodeOODCondition.Status != api.ConditionTrue { + nodeOODCondition.Status = api.ConditionTrue + nodeOODCondition.Reason = "KubeletOutOfDisk" + nodeOODCondition.Message = "out of disk space" + nodeOODCondition.LastTransitionTime = currentTime + kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeOutOfDisk") + } + } else { + if nodeOODCondition.Status != api.ConditionFalse { + // Update the out of disk condition when the condition status is unknown even if we + // are within the outOfDiskTransitionFrequency duration. We do this to set the + // condition status correctly at kubelet startup. + if nodeOODCondition.Status == api.ConditionUnknown || kl.clock.Since(nodeOODCondition.LastTransitionTime.Time) >= kl.outOfDiskTransitionFrequency { + nodeOODCondition.Status = api.ConditionFalse + nodeOODCondition.Reason = "KubeletHasSufficientDisk" + nodeOODCondition.Message = "kubelet has sufficient disk space available" + nodeOODCondition.LastTransitionTime = currentTime + kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasSufficientDisk") + } else { + glog.Infof("Node condition status for OutOfDisk is false, but last transition time is less than %s", kl.outOfDiskTransitionFrequency) + } + } + } + + if newOODCondition { + node.Status.Conditions = append(node.Status.Conditions, *nodeOODCondition) + } +} + +// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus() +var oldNodeUnschedulable bool + +// record if node schedulable change. +func (kl *Kubelet) recordNodeSchedulableEvent(node *api.Node) { + if oldNodeUnschedulable != node.Spec.Unschedulable { + if node.Spec.Unschedulable { + kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeNotSchedulable) + } else { + kl.recordNodeStatusEvent(api.EventTypeNormal, kubecontainer.NodeSchedulable) + } + oldNodeUnschedulable = node.Spec.Unschedulable + } +} + +// setNodeStatus fills in the Status fields of the given Node, overwriting +// any fields that are currently set. +// TODO(madhusudancs): Simplify the logic for setting node conditions and +// refactor the node status condtion code out to a different file. +func (kl *Kubelet) setNodeStatus(node *api.Node) error { + for _, f := range kl.setNodeStatusFuncs { + if err := f(node); err != nil { + return err + } + } + return nil +} + +// defaultNodeStatusFuncs is a factory that generates the default set of setNodeStatus funcs +func (kl *Kubelet) defaultNodeStatusFuncs() []func(*api.Node) error { + // initial set of node status update handlers, can be modified by Option's + withoutError := func(f func(*api.Node)) func(*api.Node) error { + return func(n *api.Node) error { + f(n) + return nil + } + } + return []func(*api.Node) error{ + kl.setNodeAddress, + withoutError(kl.setNodeStatusInfo), + withoutError(kl.setNodeOODCondition), + withoutError(kl.setNodeMemoryPressureCondition), + withoutError(kl.setNodeReadyCondition), + withoutError(kl.recordNodeSchedulableEvent), + } +} + +// SetNodeStatus returns a functional Option that adds the given node status update handler to the Kubelet +func SetNodeStatus(f func(*api.Node) error) Option { + return func(k *Kubelet) { + k.setNodeStatusFuncs = append(k.setNodeStatusFuncs, f) + } +} + +// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0 +// is set, this function will also confirm that cbr0 is configured correctly. +func (kl *Kubelet) tryUpdateNodeStatus() error { + node, err := kl.kubeClient.Core().Nodes().Get(kl.nodeName) + if err != nil { + return fmt.Errorf("error getting node %q: %v", kl.nodeName, err) + } + if node == nil { + return fmt.Errorf("no node instance returned for %q", kl.nodeName) + } + // Flannel is the authoritative source of pod CIDR, if it's running. + // This is a short term compromise till we get flannel working in + // reservation mode. + if kl.flannelExperimentalOverlay { + flannelPodCIDR := kl.runtimeState.podCIDR() + if node.Spec.PodCIDR != flannelPodCIDR { + node.Spec.PodCIDR = flannelPodCIDR + glog.Infof("Updating podcidr to %v", node.Spec.PodCIDR) + if updatedNode, err := kl.kubeClient.Core().Nodes().Update(node); err != nil { + glog.Warningf("Failed to update podCIDR: %v", err) + } else { + // Update the node resourceVersion so the status update doesn't fail. + node = updatedNode + } + } + } else if kl.reconcileCIDR { + kl.updatePodCIDR(node.Spec.PodCIDR) + } + + if err := kl.setNodeStatus(node); err != nil { + return err + } + // Update the current status on the API server + _, err = kl.kubeClient.Core().Nodes().UpdateStatus(node) + return err +} + +// GetPhase returns the phase of a pod given its container info. +// This func is exported to simplify integration with 3rd party kubelet +// integrations like kubernetes-mesos. +func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase { + initialized := 0 + pendingInitialization := 0 + failedInitialization := 0 + for _, container := range spec.InitContainers { + containerStatus, ok := api.GetContainerStatus(info, container.Name) + if !ok { + pendingInitialization++ + continue + } + + switch { + case containerStatus.State.Running != nil: + pendingInitialization++ + case containerStatus.State.Terminated != nil: + if containerStatus.State.Terminated.ExitCode == 0 { + initialized++ + } else { + failedInitialization++ + } + case containerStatus.State.Waiting != nil: + if containerStatus.LastTerminationState.Terminated != nil { + if containerStatus.LastTerminationState.Terminated.ExitCode == 0 { + initialized++ + } else { + failedInitialization++ + } + } else { + pendingInitialization++ + } + default: + pendingInitialization++ + } + } + + unknown := 0 + running := 0 + waiting := 0 + stopped := 0 + failed := 0 + succeeded := 0 + for _, container := range spec.Containers { + containerStatus, ok := api.GetContainerStatus(info, container.Name) + if !ok { + unknown++ + continue + } + + switch { + case containerStatus.State.Running != nil: + running++ + case containerStatus.State.Terminated != nil: + stopped++ + if containerStatus.State.Terminated.ExitCode == 0 { + succeeded++ + } else { + failed++ + } + case containerStatus.State.Waiting != nil: + if containerStatus.LastTerminationState.Terminated != nil { + stopped++ + } else { + waiting++ + } + default: + unknown++ + } + } + + if failedInitialization > 0 && spec.RestartPolicy == api.RestartPolicyNever { + return api.PodFailed + } + + switch { + case pendingInitialization > 0: + fallthrough + case waiting > 0: + glog.V(5).Infof("pod waiting > 0, pending") + // One or more containers has not been started + return api.PodPending + case running > 0 && unknown == 0: + // All containers have been started, and at least + // one container is running + return api.PodRunning + case running == 0 && stopped > 0 && unknown == 0: + // All containers are terminated + if spec.RestartPolicy == api.RestartPolicyAlways { + // All containers are in the process of restarting + return api.PodRunning + } + if stopped == succeeded { + // RestartPolicy is not Always, and all + // containers are terminated in success + return api.PodSucceeded + } + if spec.RestartPolicy == api.RestartPolicyNever { + // RestartPolicy is Never, and all containers are + // terminated with at least one in failure + return api.PodFailed + } + // RestartPolicy is OnFailure, and at least one in failure + // and in the process of restarting + return api.PodRunning + default: + glog.V(5).Infof("pod default case, pending") + return api.PodPending + } +} + +// generateAPIPodStatus creates the final API pod status for a pod, given the +// internal pod status. +func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus { + glog.V(3).Infof("Generating status for %q", format.Pod(pod)) + + // check if an internal module has requested the pod is evicted. + for _, podSyncHandler := range kl.PodSyncHandlers { + if result := podSyncHandler.ShouldEvict(pod); result.Evict { + return api.PodStatus{ + Phase: api.PodFailed, + Reason: result.Reason, + Message: result.Message, + } + } + } + + // TODO: Consider include the container information. + // TODO: move this into a sync/evictor + if kl.pastActiveDeadline(pod) { + reason := "DeadlineExceeded" + kl.recorder.Eventf(pod, api.EventTypeNormal, reason, "Pod was active on the node longer than specified deadline") + return api.PodStatus{ + Phase: api.PodFailed, + Reason: reason, + Message: "Pod was active on the node longer than specified deadline"} + } + + s := kl.convertStatusToAPIStatus(pod, podStatus) + + // Assume info is ready to process + spec := &pod.Spec + allStatus := append(append([]api.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...) + s.Phase = GetPhase(spec, allStatus) + kl.probeManager.UpdatePodStatus(pod.UID, s) + s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase)) + s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase)) + // s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus() + // does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure + // it is set to true. If the existing PodStatus does not have a PodScheduled condition, then create one that is set to true. + if _, oldPodScheduled := api.GetPodCondition(&pod.Status, api.PodScheduled); oldPodScheduled != nil { + s.Conditions = append(s.Conditions, *oldPodScheduled) + } + api.UpdatePodCondition(&pod.Status, &api.PodCondition{ + Type: api.PodScheduled, + Status: api.ConditionTrue, + }) + + if !kl.standaloneMode { + hostIP, err := kl.GetHostIP() + if err != nil { + glog.V(4).Infof("Cannot get host IP: %v", err) + } else { + s.HostIP = hostIP.String() + if podUsesHostNetwork(pod) && s.PodIP == "" { + s.PodIP = hostIP.String() + } + } + } + + return *s +} + +// convertStatusToAPIStatus creates an api PodStatus for the given pod from +// the given internal pod status. It is purely transformative and does not +// alter the kubelet state at all. +func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) *api.PodStatus { + var apiPodStatus api.PodStatus + apiPodStatus.PodIP = podStatus.IP + + apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses( + pod, podStatus, + pod.Status.ContainerStatuses, + pod.Spec.Containers, + len(pod.Spec.InitContainers) > 0, + false, + ) + apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses( + pod, podStatus, + pod.Status.InitContainerStatuses, + pod.Spec.InitContainers, + len(pod.Spec.InitContainers) > 0, + true, + ) + + return &apiPodStatus +} + +func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubecontainer.PodStatus, previousStatus []api.ContainerStatus, containers []api.Container, hasInitContainers, isInitContainer bool) []api.ContainerStatus { + convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *api.ContainerStatus { + cid := cs.ID.String() + status := &api.ContainerStatus{ + Name: cs.Name, + RestartCount: int32(cs.RestartCount), + Image: cs.Image, + ImageID: cs.ImageID, + ContainerID: cid, + } + switch cs.State { + case kubecontainer.ContainerStateRunning: + status.State.Running = &api.ContainerStateRunning{StartedAt: unversioned.NewTime(cs.StartedAt)} + case kubecontainer.ContainerStateExited: + status.State.Terminated = &api.ContainerStateTerminated{ + ExitCode: int32(cs.ExitCode), + Reason: cs.Reason, + Message: cs.Message, + StartedAt: unversioned.NewTime(cs.StartedAt), + FinishedAt: unversioned.NewTime(cs.FinishedAt), + ContainerID: cid, + } + default: + status.State.Waiting = &api.ContainerStateWaiting{} + } + return status + } + + // Fetch old containers statuses from old pod status. + oldStatuses := make(map[string]api.ContainerStatus, len(containers)) + for _, status := range previousStatus { + oldStatuses[status.Name] = status + } + + // Set all container statuses to default waiting state + statuses := make(map[string]*api.ContainerStatus, len(containers)) + defaultWaitingState := api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerCreating"}} + if hasInitContainers { + defaultWaitingState = api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "PodInitializing"}} + } + + for _, container := range containers { + status := &api.ContainerStatus{ + Name: container.Name, + Image: container.Image, + State: defaultWaitingState, + } + // Apply some values from the old statuses as the default values. + if oldStatus, found := oldStatuses[container.Name]; found { + status.RestartCount = oldStatus.RestartCount + status.LastTerminationState = oldStatus.LastTerminationState + } + statuses[container.Name] = status + } + + // Make the latest container status comes first. + sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(podStatus.ContainerStatuses))) + // Set container statuses according to the statuses seen in pod status + containerSeen := map[string]int{} + for _, cStatus := range podStatus.ContainerStatuses { + cName := cStatus.Name + if _, ok := statuses[cName]; !ok { + // This would also ignore the infra container. + continue + } + if containerSeen[cName] >= 2 { + continue + } + status := convertContainerStatus(cStatus) + if containerSeen[cName] == 0 { + statuses[cName] = status + } else { + statuses[cName].LastTerminationState = status.State + } + containerSeen[cName] = containerSeen[cName] + 1 + } + + // Handle the containers failed to be started, which should be in Waiting state. + for _, container := range containers { + if isInitContainer { + // If the init container is terminated with exit code 0, it won't be restarted. + // TODO(random-liu): Handle this in a cleaner way. + s := podStatus.FindContainerStatusByName(container.Name) + if s != nil && s.State == kubecontainer.ContainerStateExited && s.ExitCode == 0 { + continue + } + } + // If a container should be restarted in next syncpod, it is *Waiting*. + if !kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) { + continue + } + status := statuses[container.Name] + reason, message, ok := kl.reasonCache.Get(pod.UID, container.Name) + if !ok { + // In fact, we could also apply Waiting state here, but it is less informative, + // and the container will be restarted soon, so we prefer the original state here. + // Note that with the current implementation of ShouldContainerBeRestarted the original state here + // could be: + // * Waiting: There is no associated historical container and start failure reason record. + // * Terminated: The container is terminated. + continue + } + if status.State.Terminated != nil { + status.LastTerminationState = status.State + } + status.State = api.ContainerState{ + Waiting: &api.ContainerStateWaiting{ + Reason: reason.Error(), + Message: message, + }, + } + statuses[container.Name] = status + } + + var containerStatuses []api.ContainerStatus + for _, status := range statuses { + containerStatuses = append(containerStatuses, *status) + } + + // Sort the container statuses since clients of this interface expect the list + // of containers in a pod has a deterministic order. + sort.Sort(kubetypes.SortedContainerStatuses(containerStatuses)) + return containerStatuses +} + +// Returns logs of current machine. +func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) { + // TODO: whitelist logs we are willing to serve + kl.logServer.ServeHTTP(w, req) +} + +// findContainer finds and returns the container with the given pod ID, full name, and container name. +// It returns nil if not found. +func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) { + pods, err := kl.containerRuntime.GetPods(false) + if err != nil { + return nil, err + } + pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID) + return pod.FindContainerByName(containerName), nil +} + +// Run a command in a container, returns the combined stdout, stderr as an array of bytes +func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) { + podUID = kl.podManager.TranslatePodUID(podUID) + + container, err := kl.findContainer(podFullName, podUID, containerName) + if err != nil { + return nil, err + } + if container == nil { + return nil, fmt.Errorf("container not found (%q)", containerName) + } + + var buffer bytes.Buffer + output := ioutils.WriteCloserWrapper(&buffer) + err = kl.runner.ExecInContainer(container.ID, cmd, nil, output, output, false) + if err != nil { + return nil, err + } + + return buffer.Bytes(), nil +} + +// ExecInContainer executes a command in a container, connecting the supplied +// stdin/stdout/stderr to the command's IO streams. +func (kl *Kubelet) ExecInContainer(podFullName string, podUID types.UID, containerName string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + podUID = kl.podManager.TranslatePodUID(podUID) + + container, err := kl.findContainer(podFullName, podUID, containerName) + if err != nil { + return err + } + if container == nil { + return fmt.Errorf("container not found (%q)", containerName) + } + return kl.runner.ExecInContainer(container.ID, cmd, stdin, stdout, stderr, tty) +} + +// AttachContainer uses the container runtime to attach the given streams to +// the given container. +func (kl *Kubelet) AttachContainer(podFullName string, podUID types.UID, containerName string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + podUID = kl.podManager.TranslatePodUID(podUID) + + container, err := kl.findContainer(podFullName, podUID, containerName) + if err != nil { + return err + } + if container == nil { + return fmt.Errorf("container not found (%q)", containerName) + } + return kl.containerRuntime.AttachContainer(container.ID, stdin, stdout, stderr, tty) +} + +// PortForward connects to the pod's port and copies data between the port +// and the stream. +func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16, stream io.ReadWriteCloser) error { + podUID = kl.podManager.TranslatePodUID(podUID) + + pods, err := kl.containerRuntime.GetPods(false) + if err != nil { + return err + } + pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID) + if pod.IsEmpty() { + return fmt.Errorf("pod not found (%q)", podFullName) + } + return kl.runner.PortForward(&pod, port, stream) +} + +// BirthCry sends an event that the kubelet has started up. +func (kl *Kubelet) BirthCry() { + // Make an event that kubelet restarted. + kl.recorder.Eventf(kl.nodeRef, api.EventTypeNormal, kubecontainer.StartingKubelet, "Starting kubelet.") +} + +// StreamingConnectionIdleTimeout returns the timeout for streaming connections to the HTTP server. +func (kl *Kubelet) StreamingConnectionIdleTimeout() time.Duration { + return kl.streamingConnectionIdleTimeout +} + +// ResyncInterval returns the interval used for periodic syncs. +func (kl *Kubelet) ResyncInterval() time.Duration { + return kl.resyncInterval +} + +// ListenAndServe runs the kubelet HTTP server. +func (kl *Kubelet) ListenAndServe(address net.IP, port uint, tlsOptions *server.TLSOptions, auth server.AuthInterface, enableDebuggingHandlers bool) { + server.ListenAndServeKubeletServer(kl, kl.resourceAnalyzer, address, port, tlsOptions, auth, enableDebuggingHandlers, kl.containerRuntime) +} + +// ListenAndServeReadOnly runs the kubelet HTTP server in read-only mode. +func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint) { + server.ListenAndServeKubeletReadOnlyServer(kl, kl.resourceAnalyzer, address, port, kl.containerRuntime) +} + +// updatePodCIDR updates the pod CIDR in the runtime state if it is different +// from the current CIDR. +func (kl *Kubelet) updatePodCIDR(cidr string) { + if kl.runtimeState.podCIDR() == cidr { + return + } + + glog.Infof("Setting Pod CIDR: %v -> %v", kl.runtimeState.podCIDR(), cidr) + kl.runtimeState.setPodCIDR(cidr) + + if kl.networkPlugin != nil { + details := make(map[string]interface{}) + details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = cidr + kl.networkPlugin.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details) + } +} + +// shapingEnabled returns whether traffic shaping is enabled. +func (kl *Kubelet) shapingEnabled() bool { + // Disable shaping if a network plugin is defined and supports shaping + if kl.networkPlugin != nil && kl.networkPlugin.Capabilities().Has(network.NET_PLUGIN_CAPABILITY_SHAPING) { + return false + } + return true +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_cadvisor.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_cadvisor.go new file mode 100644 index 000000000000..76bf6ceb343c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_cadvisor.go @@ -0,0 +1,89 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" +) + +// GetContainerInfo returns stats (from Cadvisor) for a container. +func (kl *Kubelet) GetContainerInfo(podFullName string, podUID types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + + podUID = kl.podManager.TranslatePodUID(podUID) + + pods, err := kl.runtimeCache.GetPods() + if err != nil { + return nil, err + } + pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID) + container := pod.FindContainerByName(containerName) + if container == nil { + return nil, kubecontainer.ErrContainerNotFound + } + + ci, err := kl.cadvisor.DockerContainer(container.ID.ID, req) + if err != nil { + return nil, err + } + return &ci, nil +} + +// GetContainerInfoV2 returns stats (from Cadvisor) for containers. +func (kl *Kubelet) GetContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) { + return kl.cadvisor.ContainerInfoV2(name, options) +} + +// ImagesFsInfo returns information about docker image fs usage from +// cadvisor. +func (kl *Kubelet) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) { + return kl.cadvisor.ImagesFsInfo() +} + +// RootFsInfo returns info about the root fs from cadvisor. +func (kl *Kubelet) RootFsInfo() (cadvisorapiv2.FsInfo, error) { + return kl.cadvisor.RootFsInfo() +} + +// Returns stats (from Cadvisor) for a non-Kubernetes container. +func (kl *Kubelet) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) { + if subcontainers { + return kl.cadvisor.SubcontainerInfo(containerName, req) + } else { + containerInfo, err := kl.cadvisor.ContainerInfo(containerName, req) + if err != nil { + return nil, err + } + return map[string]*cadvisorapi.ContainerInfo{ + containerInfo.Name: containerInfo, + }, nil + } +} + +// GetCachedMachineInfo assumes that the machine info can't change without a reboot +func (kl *Kubelet) GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error) { + if kl.machineInfo == nil { + info, err := kl.cadvisor.MachineInfo() + if err != nil { + return nil, err + } + kl.machineInfo = info + } + return kl.machineInfo, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_cadvisor_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_cadvisor_test.go new file mode 100644 index 000000000000..4b15e45aac4f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_cadvisor_test.go @@ -0,0 +1,232 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "testing" + + cadvisorapi "github.com/google/cadvisor/info/v1" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" +) + +func TestGetContainerInfo(t *testing.T) { + containerID := "ab2cdf" + containerPath := fmt.Sprintf("/docker/%v", containerID) + containerInfo := cadvisorapi.ContainerInfo{ + ContainerReference: cadvisorapi.ContainerReference{ + Name: containerPath, + }, + } + + testKubelet := newTestKubelet(t) + fakeRuntime := testKubelet.fakeRuntime + kubelet := testKubelet.kubelet + cadvisorReq := &cadvisorapi.ContainerInfoRequest{} + mockCadvisor := testKubelet.fakeCadvisor + mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil) + fakeRuntime.PodList = []*kubecontainer.Pod{ + { + ID: "12345678", + Name: "qux", + Namespace: "ns", + Containers: []*kubecontainer.Container{ + { + Name: "foo", + ID: kubecontainer.ContainerID{Type: "test", ID: containerID}, + }, + }, + }, + } + stats, err := kubelet.GetContainerInfo("qux_ns", "", "foo", cadvisorReq) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if stats == nil { + t.Fatalf("stats should not be nil") + } + mockCadvisor.AssertExpectations(t) +} + +func TestGetRawContainerInfoRoot(t *testing.T) { + containerPath := "/" + containerInfo := &cadvisorapi.ContainerInfo{ + ContainerReference: cadvisorapi.ContainerReference{ + Name: containerPath, + }, + } + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + mockCadvisor := testKubelet.fakeCadvisor + cadvisorReq := &cadvisorapi.ContainerInfoRequest{} + mockCadvisor.On("ContainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil) + + _, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, false) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + mockCadvisor.AssertExpectations(t) +} + +func TestGetRawContainerInfoSubcontainers(t *testing.T) { + containerPath := "/kubelet" + containerInfo := map[string]*cadvisorapi.ContainerInfo{ + containerPath: { + ContainerReference: cadvisorapi.ContainerReference{ + Name: containerPath, + }, + }, + "/kubelet/sub": { + ContainerReference: cadvisorapi.ContainerReference{ + Name: "/kubelet/sub", + }, + }, + } + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + mockCadvisor := testKubelet.fakeCadvisor + cadvisorReq := &cadvisorapi.ContainerInfoRequest{} + mockCadvisor.On("SubcontainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil) + + result, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, true) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(result) != 2 { + t.Errorf("Expected 2 elements, received: %+v", result) + } + mockCadvisor.AssertExpectations(t) +} + +func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) { + containerID := "ab2cdf" + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + mockCadvisor := testKubelet.fakeCadvisor + fakeRuntime := testKubelet.fakeRuntime + cadvisorApiFailure := fmt.Errorf("cAdvisor failure") + containerInfo := cadvisorapi.ContainerInfo{} + cadvisorReq := &cadvisorapi.ContainerInfoRequest{} + mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, cadvisorApiFailure) + fakeRuntime.PodList = []*kubecontainer.Pod{ + { + ID: "uuid", + Name: "qux", + Namespace: "ns", + Containers: []*kubecontainer.Container{ + {Name: "foo", + ID: kubecontainer.ContainerID{Type: "test", ID: containerID}, + }, + }, + }, + } + stats, err := kubelet.GetContainerInfo("qux_ns", "uuid", "foo", cadvisorReq) + if stats != nil { + t.Errorf("non-nil stats on error") + } + if err == nil { + t.Errorf("expect error but received nil error") + return + } + if err.Error() != cadvisorApiFailure.Error() { + t.Errorf("wrong error message. expect %v, got %v", cadvisorApiFailure, err) + } + mockCadvisor.AssertExpectations(t) +} + +func TestGetContainerInfoOnNonExistContainer(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + mockCadvisor := testKubelet.fakeCadvisor + fakeRuntime := testKubelet.fakeRuntime + fakeRuntime.PodList = []*kubecontainer.Pod{} + + stats, _ := kubelet.GetContainerInfo("qux", "", "foo", nil) + if stats != nil { + t.Errorf("non-nil stats on non exist container") + } + mockCadvisor.AssertExpectations(t) +} + +func TestGetContainerInfoWhenContainerRuntimeFailed(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + mockCadvisor := testKubelet.fakeCadvisor + fakeRuntime := testKubelet.fakeRuntime + expectedErr := fmt.Errorf("List containers error") + fakeRuntime.Err = expectedErr + + stats, err := kubelet.GetContainerInfo("qux", "", "foo", nil) + if err == nil { + t.Errorf("expected error from dockertools, got none") + } + if err.Error() != expectedErr.Error() { + t.Errorf("expected error %v got %v", expectedErr.Error(), err.Error()) + } + if stats != nil { + t.Errorf("non-nil stats when dockertools failed") + } + mockCadvisor.AssertExpectations(t) +} + +func TestGetContainerInfoWithNoContainers(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + mockCadvisor := testKubelet.fakeCadvisor + + stats, err := kubelet.GetContainerInfo("qux_ns", "", "foo", nil) + if err == nil { + t.Errorf("expected error from cadvisor client, got none") + } + if err != kubecontainer.ErrContainerNotFound { + t.Errorf("expected error %v, got %v", kubecontainer.ErrContainerNotFound.Error(), err.Error()) + } + if stats != nil { + t.Errorf("non-nil stats when dockertools returned no containers") + } + mockCadvisor.AssertExpectations(t) +} + +func TestGetContainerInfoWithNoMatchingContainers(t *testing.T) { + testKubelet := newTestKubelet(t) + fakeRuntime := testKubelet.fakeRuntime + kubelet := testKubelet.kubelet + mockCadvisor := testKubelet.fakeCadvisor + fakeRuntime.PodList = []*kubecontainer.Pod{ + { + ID: "12345678", + Name: "qux", + Namespace: "ns", + Containers: []*kubecontainer.Container{ + {Name: "bar", + ID: kubecontainer.ContainerID{Type: "test", ID: "fakeID"}, + }, + }}, + } + + stats, err := kubelet.GetContainerInfo("qux_ns", "", "foo", nil) + if err == nil { + t.Errorf("Expected error from cadvisor client, got none") + } + if err != kubecontainer.ErrContainerNotFound { + t.Errorf("Expected error %v, got %v", kubecontainer.ErrContainerNotFound.Error(), err.Error()) + } + if stats != nil { + t.Errorf("non-nil stats when dockertools returned no containers") + } + mockCadvisor.AssertExpectations(t) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go new file mode 100644 index 000000000000..6c594f537339 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go @@ -0,0 +1,220 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "net" + "path" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/kubelet/cm" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" + nodeutil "k8s.io/kubernetes/pkg/util/node" +) + +// getRootDir returns the full path to the directory under which kubelet can +// store data. These functions are useful to pass interfaces to other modules +// that may need to know where to write data without getting a whole kubelet +// instance. +func (kl *Kubelet) getRootDir() string { + return kl.rootDirectory +} + +// getPodsDir returns the full path to the directory under which pod +// directories are created. +func (kl *Kubelet) getPodsDir() string { + return path.Join(kl.getRootDir(), "pods") +} + +// getPluginsDir returns the full path to the directory under which plugin +// directories are created. Plugins can use these directories for data that +// they need to persist. Plugins should create subdirectories under this named +// after their own names. +func (kl *Kubelet) getPluginsDir() string { + return path.Join(kl.getRootDir(), "plugins") +} + +// getPluginDir returns a data directory name for a given plugin name. +// Plugins can use these directories to store data that they need to persist. +// For per-pod plugin data, see getPodPluginDir. +func (kl *Kubelet) getPluginDir(pluginName string) string { + return path.Join(kl.getPluginsDir(), pluginName) +} + +// GetPodDir returns the full path to the per-pod data directory for the +// specified pod. This directory may not exist if the pod does not exist. +func (kl *Kubelet) GetPodDir(podUID types.UID) string { + return kl.getPodDir(podUID) +} + +// getPodDir returns the full path to the per-pod directory for the pod with +// the given UID. +func (kl *Kubelet) getPodDir(podUID types.UID) string { + // Backwards compat. The "old" stuff should be removed before 1.0 + // release. The thinking here is this: + // !old && !new = use new + // !old && new = use new + // old && !new = use old + // old && new = use new (but warn) + oldPath := path.Join(kl.getRootDir(), string(podUID)) + oldExists := dirExists(oldPath) + newPath := path.Join(kl.getPodsDir(), string(podUID)) + newExists := dirExists(newPath) + if oldExists && !newExists { + return oldPath + } + if oldExists { + glog.Warningf("Data dir for pod %q exists in both old and new form, using new", podUID) + } + return newPath +} + +// getPodVolumesDir returns the full path to the per-pod data directory under +// which volumes are created for the specified pod. This directory may not +// exist if the pod does not exist. +func (kl *Kubelet) getPodVolumesDir(podUID types.UID) string { + return path.Join(kl.getPodDir(podUID), "volumes") +} + +// getPodVolumeDir returns the full path to the directory which represents the +// named volume under the named plugin for specified pod. This directory may not +// exist if the pod does not exist. +func (kl *Kubelet) getPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string { + return path.Join(kl.getPodVolumesDir(podUID), pluginName, volumeName) +} + +// getPodPluginsDir returns the full path to the per-pod data directory under +// which plugins may store data for the specified pod. This directory may not +// exist if the pod does not exist. +func (kl *Kubelet) getPodPluginsDir(podUID types.UID) string { + return path.Join(kl.getPodDir(podUID), "plugins") +} + +// getPodPluginDir returns a data directory name for a given plugin name for a +// given pod UID. Plugins can use these directories to store data that they +// need to persist. For non-per-pod plugin data, see getPluginDir. +func (kl *Kubelet) getPodPluginDir(podUID types.UID, pluginName string) string { + return path.Join(kl.getPodPluginsDir(podUID), pluginName) +} + +// getPodContainerDir returns the full path to the per-pod data directory under +// which container data is held for the specified pod. This directory may not +// exist if the pod or container does not exist. +func (kl *Kubelet) getPodContainerDir(podUID types.UID, ctrName string) string { + // Backwards compat. The "old" stuff should be removed before 1.0 + // release. The thinking here is this: + // !old && !new = use new + // !old && new = use new + // old && !new = use old + // old && new = use new (but warn) + oldPath := path.Join(kl.getPodDir(podUID), ctrName) + oldExists := dirExists(oldPath) + newPath := path.Join(kl.getPodDir(podUID), "containers", ctrName) + newExists := dirExists(newPath) + if oldExists && !newExists { + return oldPath + } + if oldExists { + glog.Warningf("Data dir for pod %q, container %q exists in both old and new form, using new", podUID, ctrName) + } + return newPath +} + +// GetPods returns all pods bound to the kubelet and their spec, and the mirror +// pods. +func (kl *Kubelet) GetPods() []*api.Pod { + return kl.podManager.GetPods() +} + +// GetRunningPods returns all pods running on kubelet from looking at the +// container runtime cache. This function converts kubecontainer.Pod to +// api.Pod, so only the fields that exist in both kubecontainer.Pod and +// api.Pod are considered meaningful. +func (kl *Kubelet) GetRunningPods() ([]*api.Pod, error) { + pods, err := kl.runtimeCache.GetPods() + if err != nil { + return nil, err + } + + apiPods := make([]*api.Pod, 0, len(pods)) + for _, pod := range pods { + apiPods = append(apiPods, pod.ToAPIPod()) + } + return apiPods, nil +} + +// GetPodByFullName gets the pod with the given 'full' name, which +// incorporates the namespace as well as whether the pod was found. +func (kl *Kubelet) GetPodByFullName(podFullName string) (*api.Pod, bool) { + return kl.podManager.GetPodByFullName(podFullName) +} + +// GetPodByName provides the first pod that matches namespace and name, as well +// as whether the pod was found. +func (kl *Kubelet) GetPodByName(namespace, name string) (*api.Pod, bool) { + return kl.podManager.GetPodByName(namespace, name) +} + +// GetHostname Returns the hostname as the kubelet sees it. +func (kl *Kubelet) GetHostname() string { + return kl.hostname +} + +// GetRuntime returns the current Runtime implementation in use by the kubelet. This func +// is exported to simplify integration with third party kubelet extensions (e.g. kubernetes-mesos). +func (kl *Kubelet) GetRuntime() kubecontainer.Runtime { + return kl.containerRuntime +} + +// GetNode returns the node info for the configured node name of this Kubelet. +func (kl *Kubelet) GetNode() (*api.Node, error) { + if kl.standaloneMode { + return kl.initialNodeStatus() + } + return kl.nodeInfo.GetNodeInfo(kl.nodeName) +} + +// getNodeAnyWay() must return a *api.Node which is required by RunGeneralPredicates(). +// The *api.Node is obtained as follows: +// Return kubelet's nodeInfo for this node, except on error or if in standalone mode, +// in which case return a manufactured nodeInfo representing a node with no pods, +// zero capacity, and the default labels. +func (kl *Kubelet) getNodeAnyWay() (*api.Node, error) { + if !kl.standaloneMode { + if n, err := kl.nodeInfo.GetNodeInfo(kl.nodeName); err == nil { + return n, nil + } + } + return kl.initialNodeStatus() +} + +// GetNodeConfig returns the container manager node config. +func (kl *Kubelet) GetNodeConfig() cm.NodeConfig { + return kl.containerManager.GetNodeConfig() +} + +// Returns host IP or nil in case of error. +func (kl *Kubelet) GetHostIP() (net.IP, error) { + node, err := kl.GetNode() + if err != nil { + return nil, fmt.Errorf("cannot get node: %v", err) + } + return nodeutil.GetNodeHostIP(node) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_getters_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_getters_test.go new file mode 100644 index 000000000000..b3cd0d9adc3a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_getters_test.go @@ -0,0 +1,179 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "os" + "path" + "testing" +) + +func TestKubeletDirs(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + root := kubelet.rootDirectory + + var exp, got string + + got = kubelet.getPodsDir() + exp = path.Join(root, "pods") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPluginsDir() + exp = path.Join(root, "plugins") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPluginDir("foobar") + exp = path.Join(root, "plugins/foobar") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPodDir("abc123") + exp = path.Join(root, "pods/abc123") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPodVolumesDir("abc123") + exp = path.Join(root, "pods/abc123/volumes") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPodVolumeDir("abc123", "plugin", "foobar") + exp = path.Join(root, "pods/abc123/volumes/plugin/foobar") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPodPluginsDir("abc123") + exp = path.Join(root, "pods/abc123/plugins") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPodPluginDir("abc123", "foobar") + exp = path.Join(root, "pods/abc123/plugins/foobar") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPodContainerDir("abc123", "def456") + exp = path.Join(root, "pods/abc123/containers/def456") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } +} + +func TestKubeletDirsCompat(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + root := kubelet.rootDirectory + if err := os.MkdirAll(root, 0750); err != nil { + t.Fatalf("can't mkdir(%q): %s", root, err) + } + + var exp, got string + + // Old-style pod dir. + if err := os.MkdirAll(fmt.Sprintf("%s/oldpod", root), 0750); err != nil { + t.Fatalf("can't mkdir(%q): %s", root, err) + } + // New-style pod dir. + if err := os.MkdirAll(fmt.Sprintf("%s/pods/newpod", root), 0750); err != nil { + t.Fatalf("can't mkdir(%q): %s", root, err) + } + // Both-style pod dir. + if err := os.MkdirAll(fmt.Sprintf("%s/bothpod", root), 0750); err != nil { + t.Fatalf("can't mkdir(%q): %s", root, err) + } + if err := os.MkdirAll(fmt.Sprintf("%s/pods/bothpod", root), 0750); err != nil { + t.Fatalf("can't mkdir(%q): %s", root, err) + } + + got = kubelet.getPodDir("oldpod") + exp = path.Join(root, "oldpod") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPodDir("newpod") + exp = path.Join(root, "pods/newpod") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPodDir("bothpod") + exp = path.Join(root, "pods/bothpod") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPodDir("neitherpod") + exp = path.Join(root, "pods/neitherpod") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + root = kubelet.getPodDir("newpod") + + // Old-style container dir. + if err := os.MkdirAll(fmt.Sprintf("%s/oldctr", root), 0750); err != nil { + t.Fatalf("can't mkdir(%q): %s", root, err) + } + // New-style container dir. + if err := os.MkdirAll(fmt.Sprintf("%s/containers/newctr", root), 0750); err != nil { + t.Fatalf("can't mkdir(%q): %s", root, err) + } + // Both-style container dir. + if err := os.MkdirAll(fmt.Sprintf("%s/bothctr", root), 0750); err != nil { + t.Fatalf("can't mkdir(%q): %s", root, err) + } + if err := os.MkdirAll(fmt.Sprintf("%s/containers/bothctr", root), 0750); err != nil { + t.Fatalf("can't mkdir(%q): %s", root, err) + } + + got = kubelet.getPodContainerDir("newpod", "oldctr") + exp = path.Join(root, "oldctr") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPodContainerDir("newpod", "newctr") + exp = path.Join(root, "containers/newctr") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPodContainerDir("newpod", "bothctr") + exp = path.Join(root, "containers/bothctr") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } + + got = kubelet.getPodContainerDir("newpod", "neitherctr") + exp = path.Join(root, "containers/neitherctr") + if got != exp { + t.Errorf("expected %q', got %q", exp, got) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go new file mode 100644 index 000000000000..347fcd801bc3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go @@ -0,0 +1,4776 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "os" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" + + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" + "github.com/stretchr/testify/assert" + "k8s.io/kubernetes/pkg/api" + apierrors "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/capabilities" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/client/testing/core" + cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" + "k8s.io/kubernetes/pkg/kubelet/cm" + "k8s.io/kubernetes/pkg/kubelet/config" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" + "k8s.io/kubernetes/pkg/kubelet/eviction" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/pkg/kubelet/network" + nettest "k8s.io/kubernetes/pkg/kubelet/network/testing" + "k8s.io/kubernetes/pkg/kubelet/pleg" + kubepod "k8s.io/kubernetes/pkg/kubelet/pod" + podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing" + proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" + probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing" + "k8s.io/kubernetes/pkg/kubelet/server/stats" + "k8s.io/kubernetes/pkg/kubelet/status" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util/queue" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/bandwidth" + "k8s.io/kubernetes/pkg/util/diff" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/util/rand" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/version" + "k8s.io/kubernetes/pkg/volume" + _ "k8s.io/kubernetes/pkg/volume/host_path" + volumetest "k8s.io/kubernetes/pkg/volume/testing" +) + +func init() { + utilruntime.ReallyCrash = true +} + +const ( + testKubeletHostname = "127.0.0.1" + + testReservationCPU = "200m" + testReservationMemory = "100M" + + maxImageTagsForTest = 3 + + // TODO(harry) any global place for these two? + // Reasonable size range of all container images. 90%ile of images on dockerhub drops into this range. + minImgSize int64 = 23 * 1024 * 1024 + maxImgSize int64 = 1000 * 1024 * 1024 +) + +type fakeHTTP struct { + url string + err error +} + +func (f *fakeHTTP) Get(url string) (*http.Response, error) { + f.url = url + return nil, f.err +} + +type TestKubelet struct { + kubelet *Kubelet + fakeRuntime *containertest.FakeRuntime + fakeCadvisor *cadvisortest.Mock + fakeKubeClient *fake.Clientset + fakeMirrorClient *podtest.FakeMirrorClient + fakeClock *util.FakeClock + mounter mount.Interface +} + +// newTestKubelet returns test kubelet with two images. +func newTestKubelet(t *testing.T) *TestKubelet { + imageList := []kubecontainer.Image{ + { + ID: "abc", + RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"}, + Size: 123, + }, + { + ID: "efg", + RepoTags: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, + Size: 456, + }, + } + return newTestKubeletWithImageList(t, imageList) +} + +// generateTestingImageList generate randomly generated image list and corresponding expectedImageList. +func generateTestingImageList(count int) ([]kubecontainer.Image, []api.ContainerImage) { + // imageList is randomly generated image list + var imageList []kubecontainer.Image + for ; count > 0; count-- { + imageItem := kubecontainer.Image{ + ID: string(util.NewUUID()), + RepoTags: generateImageTags(), + Size: rand.Int63nRange(minImgSize, maxImgSize+1), + } + imageList = append(imageList, imageItem) + } + + // expectedImageList is generated by imageList according to size and maxImagesInNodeStatus + // 1. sort the imageList by size + sort.Sort(byImageSize(imageList)) + // 2. convert sorted imageList to api.ContainerImage list + var expectedImageList []api.ContainerImage + for _, kubeImage := range imageList { + apiImage := api.ContainerImage{ + Names: kubeImage.RepoTags, + SizeBytes: kubeImage.Size, + } + + expectedImageList = append(expectedImageList, apiImage) + } + // 3. only returns the top maxImagesInNodeStatus images in expectedImageList + return imageList, expectedImageList[0:maxImagesInNodeStatus] +} + +func generateImageTags() []string { + var tagList []string + count := rand.IntnRange(1, maxImageTagsForTest+1) + for ; count > 0; count-- { + tagList = append(tagList, "gcr.io/google_containers:v"+strconv.Itoa(count)) + } + return tagList +} + +func newTestKubeletWithImageList(t *testing.T, imageList []kubecontainer.Image) *TestKubelet { + fakeRuntime := &containertest.FakeRuntime{} + fakeRuntime.RuntimeType = "test" + fakeRuntime.VersionInfo = "1.5.0" + fakeRuntime.ImageList = imageList + fakeRecorder := &record.FakeRecorder{} + fakeKubeClient := &fake.Clientset{} + kubelet := &Kubelet{} + kubelet.kubeClient = fakeKubeClient + kubelet.os = &containertest.FakeOS{} + + kubelet.hostname = testKubeletHostname + kubelet.nodeName = testKubeletHostname + kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime) + kubelet.runtimeState.setNetworkState(nil) + kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone) + if tempDir, err := ioutil.TempDir("/tmp", "kubelet_test."); err != nil { + t.Fatalf("can't make a temp rootdir: %v", err) + } else { + kubelet.rootDirectory = tempDir + } + if err := os.MkdirAll(kubelet.rootDirectory, 0750); err != nil { + t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err) + } + kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true }) + kubelet.masterServiceNamespace = api.NamespaceDefault + kubelet.serviceLister = testServiceLister{} + kubelet.nodeLister = testNodeLister{} + kubelet.nodeInfo = testNodeInfo{} + kubelet.recorder = fakeRecorder + if err := kubelet.setupDataDirs(); err != nil { + t.Fatalf("can't initialize kubelet data dirs: %v", err) + } + kubelet.daemonEndpoints = &api.NodeDaemonEndpoints{} + mockCadvisor := &cadvisortest.Mock{} + kubelet.cadvisor = mockCadvisor + fakeMirrorClient := podtest.NewFakeMirrorClient() + kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient) + kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager) + kubelet.containerRefManager = kubecontainer.NewRefManager() + diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{}) + if err != nil { + t.Fatalf("can't initialize disk space manager: %v", err) + } + kubelet.diskSpaceManager = diskSpaceManager + + kubelet.containerRuntime = fakeRuntime + kubelet.runtimeCache = containertest.NewFakeRuntimeCache(kubelet.containerRuntime) + kubelet.reasonCache = NewReasonCache() + kubelet.podCache = containertest.NewFakeCache(kubelet.containerRuntime) + kubelet.podWorkers = &fakePodWorkers{ + syncPodFn: kubelet.syncPod, + cache: kubelet.podCache, + t: t, + } + + kubelet.probeManager = probetest.FakeManager{} + kubelet.livenessManager = proberesults.NewManager() + + kubelet.volumeManager = newVolumeManager() + kubelet.containerManager = cm.NewStubContainerManager() + fakeNodeRef := &api.ObjectReference{ + Kind: "Node", + Name: testKubeletHostname, + UID: types.UID(testKubeletHostname), + Namespace: "", + } + fakeImageGCPolicy := ImageGCPolicy{ + HighThresholdPercent: 90, + LowThresholdPercent: 80, + } + kubelet.imageManager, err = newImageManager(fakeRuntime, mockCadvisor, fakeRecorder, fakeNodeRef, fakeImageGCPolicy) + fakeClock := util.NewFakeClock(time.Now()) + kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute) + kubelet.backOff.Clock = fakeClock + kubelet.podKillingCh = make(chan *kubecontainer.PodPair, 20) + kubelet.resyncInterval = 10 * time.Second + kubelet.reservation = kubetypes.Reservation{ + Kubernetes: api.ResourceList{ + api.ResourceCPU: resource.MustParse(testReservationCPU), + api.ResourceMemory: resource.MustParse(testReservationMemory), + }, + } + kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock) + // Relist period does not affect the tests. + kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, util.RealClock{}) + kubelet.clock = fakeClock + kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs() + + // TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency + volumeStatsAggPeriod := time.Second * 10 + kubelet.resourceAnalyzer = stats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod, kubelet.containerRuntime) + nodeRef := &api.ObjectReference{ + Kind: "Node", + Name: kubelet.nodeName, + UID: types.UID(kubelet.nodeName), + Namespace: "", + } + // setup eviction manager + evictionManager, evictionAdmitHandler, err := eviction.NewManager(kubelet.resourceAnalyzer, eviction.Config{}, killPodNow(kubelet.podWorkers), fakeRecorder, nodeRef, kubelet.clock) + if err != nil { + t.Fatalf("failed to initialize eviction manager: %v", err) + } + kubelet.evictionManager = evictionManager + kubelet.AddPodAdmitHandler(evictionAdmitHandler) + + return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient, fakeClock, nil} +} + +func newTestPods(count int) []*api.Pod { + pods := make([]*api.Pod, count) + for i := 0; i < count; i++ { + pods[i] = &api.Pod{ + Spec: api.PodSpec{ + SecurityContext: &api.PodSecurityContext{ + HostNetwork: true, + }, + }, + ObjectMeta: api.ObjectMeta{ + UID: types.UID(10000 + i), + Name: fmt.Sprintf("pod%d", i), + }, + } + } + return pods +} + +var emptyPodUIDs map[types.UID]kubetypes.SyncPodType + +func TestSyncLoopTimeUpdate(t *testing.T) { + testKubelet := newTestKubelet(t) + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + kubelet := testKubelet.kubelet + + loopTime1 := kubelet.LatestLoopEntryTime() + if !loopTime1.IsZero() { + t.Errorf("Unexpected sync loop time: %s, expected 0", loopTime1) + } + + // Start sync ticker. + syncCh := make(chan time.Time, 1) + housekeepingCh := make(chan time.Time, 1) + plegCh := make(chan *pleg.PodLifecycleEvent) + syncCh <- time.Now() + kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet, syncCh, housekeepingCh, plegCh) + loopTime2 := kubelet.LatestLoopEntryTime() + if loopTime2.IsZero() { + t.Errorf("Unexpected sync loop time: 0, expected non-zero value.") + } + + syncCh <- time.Now() + kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet, syncCh, housekeepingCh, plegCh) + loopTime3 := kubelet.LatestLoopEntryTime() + if !loopTime3.After(loopTime1) { + t.Errorf("Sync Loop Time was not updated correctly. Second update timestamp should be greater than first update timestamp") + } +} + +func TestSyncLoopAbort(t *testing.T) { + testKubelet := newTestKubelet(t) + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + kubelet := testKubelet.kubelet + kubelet.runtimeState.setRuntimeSync(time.Now()) + // The syncLoop waits on time.After(resyncInterval), set it really big so that we don't race for + // the channel close + kubelet.resyncInterval = time.Second * 30 + + ch := make(chan kubetypes.PodUpdate) + close(ch) + + // sanity check (also prevent this test from hanging in the next step) + ok := kubelet.syncLoopIteration(ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1)) + if ok { + t.Fatalf("expected syncLoopIteration to return !ok since update chan was closed") + } + + // this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly + kubelet.syncLoop(ch, kubelet) +} + +func TestSyncPodsStartPod(t *testing.T) { + testKubelet := newTestKubelet(t) + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + pods := []*api.Pod{ + podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{ + Containers: []api.Container{ + {Name: "bar"}, + }, + }), + } + kubelet.podManager.SetPods(pods) + kubelet.HandlePodSyncs(pods) + fakeRuntime.AssertStartedPods([]string{string(pods[0].UID)}) +} + +func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) { + ready := false + + testKubelet := newTestKubelet(t) + fakeRuntime := testKubelet.fakeRuntime + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + kubelet := testKubelet.kubelet + kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return ready }) + + fakeRuntime.PodList = []*kubecontainer.Pod{ + { + ID: "12345678", + Name: "foo", + Namespace: "new", + Containers: []*kubecontainer.Container{ + {Name: "bar"}, + }, + }, + } + kubelet.HandlePodCleanups() + // Sources are not ready yet. Don't remove any pods. + fakeRuntime.AssertKilledPods([]string{}) + + ready = true + kubelet.HandlePodCleanups() + + // Sources are ready. Remove unwanted pods. + fakeRuntime.AssertKilledPods([]string{"12345678"}) +} + +func TestMountExternalVolumes(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + kubelet.mounter = &mount.FakeMounter{} + plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil} + kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plug}, &volumeHost{kubelet}) + + pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{ + Volumes: []api.Volume{ + { + Name: "vol1", + VolumeSource: api.VolumeSource{}, + }, + }, + }) + + podVolumes, err := kubelet.mountExternalVolumes(pod) + if err != nil { + t.Errorf("Expected success: %v", err) + } + expectedPodVolumes := []string{"vol1"} + if len(expectedPodVolumes) != len(podVolumes) { + t.Errorf("Unexpected volumes. Expected %#v got %#v. Manifest was: %#v", expectedPodVolumes, podVolumes, pod) + } + for _, name := range expectedPodVolumes { + if _, ok := podVolumes[name]; !ok { + t.Errorf("api.Pod volumes map is missing key: %s. %#v", name, podVolumes) + } + } + if plug.NewAttacherCallCount != 1 { + t.Errorf("Expected plugin NewAttacher to be called %d times but got %d", 1, plug.NewAttacherCallCount) + } + + attacher := plug.Attachers[0] + if attacher.AttachCallCount != 1 { + t.Errorf("Expected Attach to be called") + } + if attacher.WaitForAttachCallCount != 1 { + t.Errorf("Expected WaitForAttach to be called") + } + if attacher.MountDeviceCallCount != 1 { + t.Errorf("Expected MountDevice to be called") + } +} + +func TestGetPodVolumesFromDisk(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil} + kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plug}, &volumeHost{kubelet}) + + volsOnDisk := []struct { + podUID types.UID + volName string + }{ + {"pod1", "vol1"}, + {"pod1", "vol2"}, + {"pod2", "vol1"}, + } + + expectedPaths := []string{} + for i := range volsOnDisk { + fv := volumetest.FakeVolume{PodUID: volsOnDisk[i].podUID, VolName: volsOnDisk[i].volName, Plugin: plug} + fv.SetUp(nil) + expectedPaths = append(expectedPaths, fv.GetPath()) + } + + volumesFound := kubelet.getPodVolumesFromDisk() + if len(volumesFound) != len(expectedPaths) { + t.Errorf("Expected to find %d unmounters, got %d", len(expectedPaths), len(volumesFound)) + } + for _, ep := range expectedPaths { + found := false + for _, cl := range volumesFound { + if ep == cl.Unmounter.GetPath() { + found = true + break + } + } + if !found { + t.Errorf("Could not find a volume with path %s", ep) + } + } + if plug.NewDetacherCallCount != len(volsOnDisk) { + t.Errorf("Expected plugin NewDetacher to be called %d times but got %d", len(volsOnDisk), plug.NewDetacherCallCount) + } +} + +// Test for https://github.com/kubernetes/kubernetes/pull/19600 +func TestCleanupOrphanedVolumes(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + kubelet.mounter = &mount.FakeMounter{} + kubeClient := testKubelet.fakeKubeClient + plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil} + kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plug}, &volumeHost{kubelet}) + + // create a volume "on disk" + volsOnDisk := []struct { + podUID types.UID + volName string + }{ + {"podUID", "myrealvol"}, + } + + pathsOnDisk := []string{} + for i := range volsOnDisk { + fv := volumetest.FakeVolume{PodUID: volsOnDisk[i].podUID, VolName: volsOnDisk[i].volName, Plugin: plug} + fv.SetUp(nil) + pathsOnDisk = append(pathsOnDisk, fv.GetPath()) + + // Simulate the global mount so that the fakeMounter returns the + // expected number of refs for the attached disk. + kubelet.mounter.Mount(fv.GetPath(), fv.GetPath(), "fakefs", nil) + kubelet.mounter.Mount(fv.GetPath(), "/path/fake/device", "fake", nil) + } + + // store the claim in fake kubelet database + claim := api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "myclaim", + Namespace: "test", + }, + Spec: api.PersistentVolumeClaimSpec{ + VolumeName: "myrealvol", + }, + Status: api.PersistentVolumeClaimStatus{ + Phase: api.ClaimBound, + }, + } + kubeClient.ReactionChain = fake.NewSimpleClientset(&api.PersistentVolumeClaimList{Items: []api.PersistentVolumeClaim{ + claim, + }}).ReactionChain + + // Create a pod referencing the volume via a PersistentVolumeClaim + pod := podWithUidNameNsSpec("podUID", "pod", "test", api.PodSpec{ + Volumes: []api.Volume{ + { + Name: "myvolumeclaim", + VolumeSource: api.VolumeSource{ + PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ + ClaimName: "myclaim", + }, + }, + }, + }, + }) + + // The pod is pending and not running yet. Test that cleanupOrphanedVolumes + // won't remove the volume from disk if the volume is referenced only + // indirectly by a claim. + err := kubelet.cleanupOrphanedVolumes([]*api.Pod{pod}, []*kubecontainer.Pod{}) + if err != nil { + t.Errorf("cleanupOrphanedVolumes failed: %v", err) + } + + if len(plug.Unmounters) != len(volsOnDisk) { + t.Errorf("Unexpected number of unmounters created. Expected %d got %d", len(volsOnDisk), len(plug.Unmounters)) + } + for _, unmounter := range plug.Unmounters { + if unmounter.TearDownCallCount != 0 { + t.Errorf("Unexpected number of calls to TearDown() %d for volume %v", unmounter.TearDownCallCount, unmounter) + } + } + volumesFound := kubelet.getPodVolumesFromDisk() + if len(volumesFound) != len(pathsOnDisk) { + t.Errorf("Expected to find %d unmounters, got %d", len(pathsOnDisk), len(volumesFound)) + } + for _, ep := range pathsOnDisk { + found := false + for _, cl := range volumesFound { + if ep == cl.Unmounter.GetPath() { + found = true + break + } + } + if !found { + t.Errorf("Could not find a volume with path %s", ep) + } + } + + // The pod is deleted -> kubelet should delete the volume + err = kubelet.cleanupOrphanedVolumes([]*api.Pod{}, []*kubecontainer.Pod{}) + if err != nil { + t.Errorf("cleanupOrphanedVolumes failed: %v", err) + } + volumesFound = kubelet.getPodVolumesFromDisk() + if len(volumesFound) != 0 { + t.Errorf("Expected to find 0 unmounters, got %d", len(volumesFound)) + } + for _, cl := range volumesFound { + t.Errorf("Found unexpected volume %s", cl.Unmounter.GetPath()) + } + + // Two unmounters created by the previous calls to cleanupOrphanedVolumes and getPodVolumesFromDisk + expectedUnmounters := len(volsOnDisk) + 2 + if len(plug.Unmounters) != expectedUnmounters { + t.Errorf("Unexpected number of unmounters created. Expected %d got %d", expectedUnmounters, len(plug.Unmounters)) + } + + // This is the unmounter which was actually used to perform a tear down. + unmounter := plug.Unmounters[2] + + if unmounter.TearDownCallCount != 1 { + t.Errorf("Unexpected number of calls to TearDown() %d for volume %v", unmounter.TearDownCallCount, unmounter) + } + + if plug.NewDetacherCallCount != expectedUnmounters { + t.Errorf("Expected plugin NewDetacher to be called %d times but got %d", expectedUnmounters, plug.NewDetacherCallCount) + } + + detacher := plug.Detachers[2] + if detacher.DetachCallCount != 1 { + t.Errorf("Expected Detach to be called") + } + if detacher.UnmountDeviceCallCount != 1 { + t.Errorf("Expected UnmountDevice to be called") + } +} + +func TestGetPersistentVolumeBySpec(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + kubeClient := testKubelet.fakeKubeClient + kubeClient.ReactionChain = fake.NewSimpleClientset(&api.PersistentVolumeClaimList{Items: []api.PersistentVolumeClaim{}}).ReactionChain + + // Test claim does not exist + _, err := kubelet.getPersistentVolumeByClaimName("myclaim", "test") + if err == nil { + t.Errorf("Expected claim to not be found") + } + + // Test claim found not bound. + claim := api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "myclaim", + Namespace: "test", + }, + } + kubeClient.ReactionChain = fake.NewSimpleClientset(&api.PersistentVolumeClaimList{Items: []api.PersistentVolumeClaim{ + claim, + }}).ReactionChain + + _, err = kubelet.getPersistentVolumeByClaimName("myclaim", "test") + if err == nil { + t.Errorf("Expected a claim not bound error to occur") + } + + // Test claim found, bound but PV does not exist + claim = api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "myclaim", + Namespace: "test", + UID: types.UID("myclaimUID123"), + }, + Spec: api.PersistentVolumeClaimSpec{ + VolumeName: "myrealvol", + }, + } + kubeClient.ReactionChain = fake.NewSimpleClientset(&api.PersistentVolumeClaimList{Items: []api.PersistentVolumeClaim{ + claim, + }}).ReactionChain + + _, err = kubelet.getPersistentVolumeByClaimName("myclaim", "test") + if err == nil { + t.Errorf("Expected PV not found error to occur") + } + + // Test volume found but not bound. + volume := api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "myrealvol", + }, + } + kubeClient.ReactionChain = fake.NewSimpleClientset( + &api.PersistentVolumeClaimList{Items: []api.PersistentVolumeClaim{ + claim, + }}, + &api.PersistentVolumeList{Items: []api.PersistentVolume{ + volume, + }}, + ).ReactionChain + _, err = kubelet.getPersistentVolumeByClaimName("myclaim", "test") + if err == nil { + t.Errorf("Expected PV not bound error to occur") + } + + // Test volume found and incorrectly bound + volume = api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "myrealvol", + }, + Spec: api.PersistentVolumeSpec{ + ClaimRef: &api.ObjectReference{ + Name: "myclaim", + Namespace: "test", + UID: types.UID("veryWrongUID"), + }, + }, + } + kubeClient.ReactionChain = fake.NewSimpleClientset( + &api.PersistentVolumeClaimList{Items: []api.PersistentVolumeClaim{ + claim, + }}, + &api.PersistentVolumeList{Items: []api.PersistentVolume{ + volume, + }}, + ).ReactionChain + _, err = kubelet.getPersistentVolumeByClaimName("myclaim", "test") + if err == nil { + t.Errorf("Expected wrong UID error to occur") + } + + // Test volume found and correctly bound + volume = api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "myrealvol", + }, + Spec: api.PersistentVolumeSpec{ + ClaimRef: &api.ObjectReference{ + Name: "myclaim", + Namespace: "test", + UID: types.UID("myclaimUID123"), + }, + }, + } + kubeClient.ReactionChain = fake.NewSimpleClientset( + &api.PersistentVolumeClaimList{Items: []api.PersistentVolumeClaim{ + claim, + }}, + &api.PersistentVolumeList{Items: []api.PersistentVolume{ + volume, + }}, + ).ReactionChain + + foundVolume, err := kubelet.getPersistentVolumeByClaimName("myclaim", "test") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if foundVolume.Name != volume.Name { + t.Errorf("Found incorrect volume expected %v, but got %v", volume, foundVolume) + } +} + +func TestApplyPersistentVolumeAnnotations(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + + pod := &api.Pod{} + + pv := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "pv", + Annotations: map[string]string{ + volumeGidAnnotationKey: "12345", + }, + }, + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + ClaimRef: &api.ObjectReference{ + Name: "claim", + UID: types.UID("abc123"), + }, + }, + } + + kubelet.applyPersistentVolumeAnnotations(pv, pod) + + if pod.Spec.SecurityContext == nil { + t.Errorf("Pod SecurityContext was not set") + } + + if pod.Spec.SecurityContext.SupplementalGroups[0] != 12345 { + t.Errorf("Pod's SupplementalGroups list does not contain expect group") + } +} + +type stubVolume struct { + path string + volume.MetricsNil +} + +func (f *stubVolume) GetPath() string { + return f.path +} + +func (f *stubVolume) GetAttributes() volume.Attributes { + return volume.Attributes{} +} + +func (f *stubVolume) SetUp(fsGroup *int64) error { + return nil +} + +func (f *stubVolume) SetUpAt(dir string, fsGroup *int64) error { + return nil +} + +func TestMakeVolumeMounts(t *testing.T) { + container := api.Container{ + VolumeMounts: []api.VolumeMount{ + { + MountPath: "/etc/hosts", + Name: "disk", + ReadOnly: false, + }, + { + MountPath: "/mnt/path3", + Name: "disk", + ReadOnly: true, + }, + { + MountPath: "/mnt/path4", + Name: "disk4", + ReadOnly: false, + }, + { + MountPath: "/mnt/path5", + Name: "disk5", + ReadOnly: false, + }, + }, + } + + podVolumes := kubecontainer.VolumeMap{ + "disk": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/disk"}}, + "disk4": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/host"}}, + "disk5": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/var/lib/kubelet/podID/volumes/empty/disk5"}}, + } + + pod := api.Pod{ + Spec: api.PodSpec{ + SecurityContext: &api.PodSecurityContext{ + HostNetwork: true, + }, + }, + } + + mounts, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", "", podVolumes) + + expectedMounts := []kubecontainer.Mount{ + { + "disk", + "/etc/hosts", + "/mnt/disk", + false, + false, + }, + { + "disk", + "/mnt/path3", + "/mnt/disk", + true, + false, + }, + { + "disk4", + "/mnt/path4", + "/mnt/host", + false, + false, + }, + { + "disk5", + "/mnt/path5", + "/var/lib/kubelet/podID/volumes/empty/disk5", + false, + false, + }, + } + if !reflect.DeepEqual(mounts, expectedMounts) { + t.Errorf("Unexpected mounts: Expected %#v got %#v. Container was: %#v", expectedMounts, mounts, container) + } +} + +func TestNodeIPParam(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + tests := []struct { + nodeIP string + success bool + testName string + }{ + { + nodeIP: "", + success: true, + testName: "IP not set", + }, + { + nodeIP: "127.0.0.1", + success: false, + testName: "loopback address", + }, + { + nodeIP: "FE80::0202:B3FF:FE1E:8329", + success: false, + testName: "IPv6 address", + }, + { + nodeIP: "1.2.3.4", + success: false, + testName: "IPv4 address that doesn't belong to host", + }, + } + for _, test := range tests { + kubelet.nodeIP = net.ParseIP(test.nodeIP) + err := kubelet.validateNodeIP() + if err != nil && test.success { + t.Errorf("Test: %s, expected no error but got: %v", test.testName, err) + } else if err == nil && !test.success { + t.Errorf("Test: %s, expected an error", test.testName) + } + } +} + +type fakeContainerCommandRunner struct { + Cmd []string + ID kubecontainer.ContainerID + PodID types.UID + E error + Stdin io.Reader + Stdout io.WriteCloser + Stderr io.WriteCloser + TTY bool + Port uint16 + Stream io.ReadWriteCloser +} + +func (f *fakeContainerCommandRunner) ExecInContainer(id kubecontainer.ContainerID, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error { + f.Cmd = cmd + f.ID = id + f.Stdin = in + f.Stdout = out + f.Stderr = err + f.TTY = tty + return f.E +} + +func (f *fakeContainerCommandRunner) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error { + f.PodID = pod.ID + f.Port = port + f.Stream = stream + return nil +} + +func TestRunInContainerNoSuchPod(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + fakeRuntime.PodList = []*kubecontainer.Pod{} + + podName := "podFoo" + podNamespace := "nsFoo" + containerName := "containerFoo" + output, err := kubelet.RunInContainer( + kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}), + "", + containerName, + []string{"ls"}) + if output != nil { + t.Errorf("unexpected non-nil command: %v", output) + } + if err == nil { + t.Error("unexpected non-error") + } +} + +func TestRunInContainer(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet.runner = &fakeCommandRunner + + containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} + fakeRuntime.PodList = []*kubecontainer.Pod{ + { + ID: "12345678", + Name: "podFoo", + Namespace: "nsFoo", + Containers: []*kubecontainer.Container{ + {Name: "containerFoo", + ID: containerID, + }, + }, + }, + } + cmd := []string{"ls"} + _, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd) + if fakeCommandRunner.ID != containerID { + t.Errorf("unexpected Name: %s", fakeCommandRunner.ID) + } + if !reflect.DeepEqual(fakeCommandRunner.Cmd, cmd) { + t.Errorf("unexpected command: %s", fakeCommandRunner.Cmd) + } + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +type countingDNSScrubber struct { + counter *int +} + +func (cds countingDNSScrubber) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) { + (*cds.counter)++ + return nameservers, searches +} + +func TestParseResolvConf(t *testing.T) { + testCases := []struct { + data string + nameservers []string + searches []string + }{ + {"", []string{}, []string{}}, + {" ", []string{}, []string{}}, + {"\n", []string{}, []string{}}, + {"\t\n\t", []string{}, []string{}}, + {"#comment\n", []string{}, []string{}}, + {" #comment\n", []string{}, []string{}}, + {"#comment\n#comment", []string{}, []string{}}, + {"#comment\nnameserver", []string{}, []string{}}, + {"#comment\nnameserver\nsearch", []string{}, []string{}}, + {"nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}}, + {" nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}}, + {"\tnameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}}, + {"nameserver\t1.2.3.4", []string{"1.2.3.4"}, []string{}}, + {"nameserver \t 1.2.3.4", []string{"1.2.3.4"}, []string{}}, + {"nameserver 1.2.3.4\nnameserver 5.6.7.8", []string{"1.2.3.4", "5.6.7.8"}, []string{}}, + {"search foo", []string{}, []string{"foo"}}, + {"search foo bar", []string{}, []string{"foo", "bar"}}, + {"search foo bar bat\n", []string{}, []string{"foo", "bar", "bat"}}, + {"search foo\nsearch bar", []string{}, []string{"bar"}}, + {"nameserver 1.2.3.4\nsearch foo bar", []string{"1.2.3.4"}, []string{"foo", "bar"}}, + {"nameserver 1.2.3.4\nsearch foo\nnameserver 5.6.7.8\nsearch bar", []string{"1.2.3.4", "5.6.7.8"}, []string{"bar"}}, + {"#comment\nnameserver 1.2.3.4\n#comment\nsearch foo\ncomment", []string{"1.2.3.4"}, []string{"foo"}}, + } + for i, tc := range testCases { + ns, srch, err := parseResolvConf(strings.NewReader(tc.data), nil) + if err != nil { + t.Errorf("expected success, got %v", err) + continue + } + if !reflect.DeepEqual(ns, tc.nameservers) { + t.Errorf("[%d] expected nameservers %#v, got %#v", i, tc.nameservers, ns) + } + if !reflect.DeepEqual(srch, tc.searches) { + t.Errorf("[%d] expected searches %#v, got %#v", i, tc.searches, srch) + } + + counter := 0 + cds := countingDNSScrubber{&counter} + ns, srch, err = parseResolvConf(strings.NewReader(tc.data), cds) + if err != nil { + t.Errorf("expected success, got %v", err) + continue + } + if !reflect.DeepEqual(ns, tc.nameservers) { + t.Errorf("[%d] expected nameservers %#v, got %#v", i, tc.nameservers, ns) + } + if !reflect.DeepEqual(srch, tc.searches) { + t.Errorf("[%d] expected searches %#v, got %#v", i, tc.searches, srch) + } + if counter != 1 { + t.Errorf("[%d] expected dnsScrubber to have been called: got %d", i, counter) + } + } +} + +func TestDNSConfigurationParams(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + + clusterNS := "203.0.113.1" + kubelet.clusterDomain = "kubernetes.io" + kubelet.clusterDNS = net.ParseIP(clusterNS) + + pods := newTestPods(2) + pods[0].Spec.DNSPolicy = api.DNSClusterFirst + pods[1].Spec.DNSPolicy = api.DNSDefault + + options := make([]*kubecontainer.RunContainerOptions, 2) + for i, pod := range pods { + var err error + kubelet.volumeManager.SetVolumes(pod.UID, make(kubecontainer.VolumeMap, 0)) + options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "") + if err != nil { + t.Fatalf("failed to generate container options: %v", err) + } + } + if len(options[0].DNS) != 1 || options[0].DNS[0] != clusterNS { + t.Errorf("expected nameserver %s, got %+v", clusterNS, options[0].DNS) + } + if len(options[0].DNSSearch) == 0 || options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain { + t.Errorf("expected search %s, got %+v", ".svc."+kubelet.clusterDomain, options[0].DNSSearch) + } + if len(options[1].DNS) != 1 || options[1].DNS[0] != "127.0.0.1" { + t.Errorf("expected nameserver 127.0.0.1, got %+v", options[1].DNS) + } + if len(options[1].DNSSearch) != 1 || options[1].DNSSearch[0] != "." { + t.Errorf("expected search \".\", got %+v", options[1].DNSSearch) + } + + kubelet.resolverConfig = "/etc/resolv.conf" + for i, pod := range pods { + var err error + options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "") + if err != nil { + t.Fatalf("failed to generate container options: %v", err) + } + } + t.Logf("nameservers %+v", options[1].DNS) + if len(options[0].DNS) != 1 { + t.Errorf("expected cluster nameserver only, got %+v", options[0].DNS) + } else if options[0].DNS[0] != clusterNS { + t.Errorf("expected nameserver %s, got %v", clusterNS, options[0].DNS[0]) + } + if len(options[0].DNSSearch) != len(options[1].DNSSearch)+3 { + t.Errorf("expected prepend of cluster domain, got %+v", options[0].DNSSearch) + } else if options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain { + t.Errorf("expected domain %s, got %s", ".svc."+kubelet.clusterDomain, options[0].DNSSearch) + } +} + +type testServiceLister struct { + services []api.Service +} + +func (ls testServiceLister) List() (api.ServiceList, error) { + return api.ServiceList{ + Items: ls.services, + }, nil +} + +type testNodeLister struct { + nodes []api.Node +} + +type testNodeInfo struct { + nodes []api.Node +} + +func (ls testNodeInfo) GetNodeInfo(id string) (*api.Node, error) { + for _, node := range ls.nodes { + if node.Name == id { + return &node, nil + } + } + return nil, fmt.Errorf("Node with name: %s does not exist", id) +} + +func (ls testNodeLister) List() (api.NodeList, error) { + return api.NodeList{ + Items: ls.nodes, + }, nil +} + +type envs []kubecontainer.EnvVar + +func (e envs) Len() int { + return len(e) +} + +func (e envs) Swap(i, j int) { e[i], e[j] = e[j], e[i] } + +func (e envs) Less(i, j int) bool { return e[i].Name < e[j].Name } + +func buildService(name, namespace, clusterIP, protocol string, port int) api.Service { + return api.Service{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{{ + Protocol: api.Protocol(protocol), + Port: int32(port), + }}, + ClusterIP: clusterIP, + }, + } +} + +func TestMakeEnvironmentVariables(t *testing.T) { + services := []api.Service{ + buildService("kubernetes", api.NamespaceDefault, "1.2.3.1", "TCP", 8081), + buildService("test", "test1", "1.2.3.3", "TCP", 8083), + buildService("kubernetes", "test2", "1.2.3.4", "TCP", 8084), + buildService("test", "test2", "1.2.3.5", "TCP", 8085), + buildService("test", "test2", "None", "TCP", 8085), + buildService("test", "test2", "", "TCP", 8085), + buildService("kubernetes", "kubernetes", "1.2.3.6", "TCP", 8086), + buildService("not-special", "kubernetes", "1.2.3.8", "TCP", 8088), + buildService("not-special", "kubernetes", "None", "TCP", 8088), + buildService("not-special", "kubernetes", "", "TCP", 8088), + } + + testCases := []struct { + name string // the name of the test case + ns string // the namespace to generate environment for + container *api.Container // the container to use + masterServiceNs string // the namespace to read master service info from + nilLister bool // whether the lister should be nil + expectedEnvs []kubecontainer.EnvVar // a set of expected environment vars + }{ + { + name: "api server = Y, kubelet = Y", + ns: "test1", + container: &api.Container{ + Env: []api.EnvVar{ + {Name: "FOO", Value: "BAR"}, + {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, + {Name: "TEST_SERVICE_PORT", Value: "8083"}, + {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, + {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, + {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, + }, + }, + masterServiceNs: api.NamespaceDefault, + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "FOO", Value: "BAR"}, + {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, + {Name: "TEST_SERVICE_PORT", Value: "8083"}, + {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, + {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, + {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, + {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"}, + {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"}, + {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"}, + {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"}, + {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"}, + {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"}, + {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"}, + }, + }, + { + name: "api server = Y, kubelet = N", + ns: "test1", + container: &api.Container{ + Env: []api.EnvVar{ + {Name: "FOO", Value: "BAR"}, + {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, + {Name: "TEST_SERVICE_PORT", Value: "8083"}, + {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, + {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, + {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, + }, + }, + masterServiceNs: api.NamespaceDefault, + nilLister: true, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "FOO", Value: "BAR"}, + {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, + {Name: "TEST_SERVICE_PORT", Value: "8083"}, + {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, + {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, + {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, + }, + }, + { + name: "api server = N; kubelet = Y", + ns: "test1", + container: &api.Container{ + Env: []api.EnvVar{ + {Name: "FOO", Value: "BAZ"}, + }, + }, + masterServiceNs: api.NamespaceDefault, + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "FOO", Value: "BAZ"}, + {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"}, + {Name: "TEST_SERVICE_PORT", Value: "8083"}, + {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"}, + {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"}, + {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"}, + {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"}, + {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"}, + {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"}, + {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"}, + {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"}, + {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"}, + {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"}, + {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"}, + }, + }, + { + name: "master service in pod ns", + ns: "test2", + container: &api.Container{ + Env: []api.EnvVar{ + {Name: "FOO", Value: "ZAP"}, + }, + }, + masterServiceNs: "kubernetes", + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "FOO", Value: "ZAP"}, + {Name: "TEST_SERVICE_HOST", Value: "1.2.3.5"}, + {Name: "TEST_SERVICE_PORT", Value: "8085"}, + {Name: "TEST_PORT", Value: "tcp://1.2.3.5:8085"}, + {Name: "TEST_PORT_8085_TCP", Value: "tcp://1.2.3.5:8085"}, + {Name: "TEST_PORT_8085_TCP_PROTO", Value: "tcp"}, + {Name: "TEST_PORT_8085_TCP_PORT", Value: "8085"}, + {Name: "TEST_PORT_8085_TCP_ADDR", Value: "1.2.3.5"}, + {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.4"}, + {Name: "KUBERNETES_SERVICE_PORT", Value: "8084"}, + {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.4:8084"}, + {Name: "KUBERNETES_PORT_8084_TCP", Value: "tcp://1.2.3.4:8084"}, + {Name: "KUBERNETES_PORT_8084_TCP_PROTO", Value: "tcp"}, + {Name: "KUBERNETES_PORT_8084_TCP_PORT", Value: "8084"}, + {Name: "KUBERNETES_PORT_8084_TCP_ADDR", Value: "1.2.3.4"}, + }, + }, + { + name: "pod in master service ns", + ns: "kubernetes", + container: &api.Container{}, + masterServiceNs: "kubernetes", + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "NOT_SPECIAL_SERVICE_HOST", Value: "1.2.3.8"}, + {Name: "NOT_SPECIAL_SERVICE_PORT", Value: "8088"}, + {Name: "NOT_SPECIAL_PORT", Value: "tcp://1.2.3.8:8088"}, + {Name: "NOT_SPECIAL_PORT_8088_TCP", Value: "tcp://1.2.3.8:8088"}, + {Name: "NOT_SPECIAL_PORT_8088_TCP_PROTO", Value: "tcp"}, + {Name: "NOT_SPECIAL_PORT_8088_TCP_PORT", Value: "8088"}, + {Name: "NOT_SPECIAL_PORT_8088_TCP_ADDR", Value: "1.2.3.8"}, + {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.6"}, + {Name: "KUBERNETES_SERVICE_PORT", Value: "8086"}, + {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.6:8086"}, + {Name: "KUBERNETES_PORT_8086_TCP", Value: "tcp://1.2.3.6:8086"}, + {Name: "KUBERNETES_PORT_8086_TCP_PROTO", Value: "tcp"}, + {Name: "KUBERNETES_PORT_8086_TCP_PORT", Value: "8086"}, + {Name: "KUBERNETES_PORT_8086_TCP_ADDR", Value: "1.2.3.6"}, + }, + }, + { + name: "downward api pod", + ns: "downward-api", + container: &api.Container{ + Env: []api.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "POD_IP", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "status.podIP", + }, + }, + }, + }, + }, + masterServiceNs: "nothing", + nilLister: true, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "POD_NAME", Value: "dapi-test-pod-name"}, + {Name: "POD_NAMESPACE", Value: "downward-api"}, + {Name: "POD_IP", Value: "1.2.3.4"}, + }, + }, + { + name: "env expansion", + ns: "test1", + container: &api.Container{ + Env: []api.EnvVar{ + { + Name: "TEST_LITERAL", + Value: "test-test-test", + }, + { + Name: "POD_NAME", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: testapi.Default.GroupVersion().String(), + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "OUT_OF_ORDER_TEST", + Value: "$(OUT_OF_ORDER_TARGET)", + }, + { + Name: "OUT_OF_ORDER_TARGET", + Value: "FOO", + }, + { + Name: "EMPTY_VAR", + }, + { + Name: "EMPTY_TEST", + Value: "foo-$(EMPTY_VAR)", + }, + { + Name: "POD_NAME_TEST2", + Value: "test2-$(POD_NAME)", + }, + { + Name: "POD_NAME_TEST3", + Value: "$(POD_NAME_TEST2)-3", + }, + { + Name: "LITERAL_TEST", + Value: "literal-$(TEST_LITERAL)", + }, + { + Name: "SERVICE_VAR_TEST", + Value: "$(TEST_SERVICE_HOST):$(TEST_SERVICE_PORT)", + }, + { + Name: "TEST_UNDEFINED", + Value: "$(UNDEFINED_VAR)", + }, + }, + }, + masterServiceNs: "nothing", + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + { + Name: "TEST_LITERAL", + Value: "test-test-test", + }, + { + Name: "POD_NAME", + Value: "dapi-test-pod-name", + }, + { + Name: "POD_NAME_TEST2", + Value: "test2-dapi-test-pod-name", + }, + { + Name: "POD_NAME_TEST3", + Value: "test2-dapi-test-pod-name-3", + }, + { + Name: "LITERAL_TEST", + Value: "literal-test-test-test", + }, + { + Name: "TEST_SERVICE_HOST", + Value: "1.2.3.3", + }, + { + Name: "TEST_SERVICE_PORT", + Value: "8083", + }, + { + Name: "TEST_PORT", + Value: "tcp://1.2.3.3:8083", + }, + { + Name: "TEST_PORT_8083_TCP", + Value: "tcp://1.2.3.3:8083", + }, + { + Name: "TEST_PORT_8083_TCP_PROTO", + Value: "tcp", + }, + { + Name: "TEST_PORT_8083_TCP_PORT", + Value: "8083", + }, + { + Name: "TEST_PORT_8083_TCP_ADDR", + Value: "1.2.3.3", + }, + { + Name: "SERVICE_VAR_TEST", + Value: "1.2.3.3:8083", + }, + { + Name: "OUT_OF_ORDER_TEST", + Value: "$(OUT_OF_ORDER_TARGET)", + }, + { + Name: "OUT_OF_ORDER_TARGET", + Value: "FOO", + }, + { + Name: "TEST_UNDEFINED", + Value: "$(UNDEFINED_VAR)", + }, + { + Name: "EMPTY_VAR", + }, + { + Name: "EMPTY_TEST", + Value: "foo-", + }, + }, + }, + } + + for i, tc := range testCases { + testKubelet := newTestKubelet(t) + kl := testKubelet.kubelet + kl.masterServiceNamespace = tc.masterServiceNs + if tc.nilLister { + kl.serviceLister = nil + } else { + kl.serviceLister = testServiceLister{services} + } + + testPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Namespace: tc.ns, + Name: "dapi-test-pod-name", + }, + } + podIP := "1.2.3.4" + + result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP) + if err != nil { + t.Errorf("[%v] Unexpected error: %v", tc.name, err) + } + + sort.Sort(envs(result)) + sort.Sort(envs(tc.expectedEnvs)) + + if !reflect.DeepEqual(result, tc.expectedEnvs) { + t.Errorf("%d: [%v] Unexpected env entries; expected {%v}, got {%v}", i, tc.name, tc.expectedEnvs, result) + } + } +} + +func waitingState(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + State: api.ContainerState{ + Waiting: &api.ContainerStateWaiting{}, + }, + } +} +func waitingStateWithLastTermination(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + State: api.ContainerState{ + Waiting: &api.ContainerStateWaiting{}, + }, + LastTerminationState: api.ContainerState{ + Terminated: &api.ContainerStateTerminated{ + ExitCode: 0, + }, + }, + } +} +func runningState(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + State: api.ContainerState{ + Running: &api.ContainerStateRunning{}, + }, + } +} +func stoppedState(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + State: api.ContainerState{ + Terminated: &api.ContainerStateTerminated{}, + }, + } +} +func succeededState(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + State: api.ContainerState{ + Terminated: &api.ContainerStateTerminated{ + ExitCode: 0, + }, + }, + } +} +func failedState(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + State: api.ContainerState{ + Terminated: &api.ContainerStateTerminated{ + ExitCode: -1, + }, + }, + } +} + +func TestPodPhaseWithRestartAlways(t *testing.T) { + desiredState := api.PodSpec{ + NodeName: "machine", + Containers: []api.Container{ + {Name: "containerA"}, + {Name: "containerB"}, + }, + RestartPolicy: api.RestartPolicyAlways, + } + + tests := []struct { + pod *api.Pod + status api.PodPhase + test string + }{ + {&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"}, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + runningState("containerB"), + }, + }, + }, + api.PodRunning, + "all running", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + stoppedState("containerA"), + stoppedState("containerB"), + }, + }, + }, + api.PodRunning, + "all stopped with restart always", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + stoppedState("containerB"), + }, + }, + }, + api.PodRunning, + "mixed state #1 with restart always", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + }, + }, + }, + api.PodPending, + "mixed state #2 with restart always", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + waitingState("containerB"), + }, + }, + }, + api.PodPending, + "mixed state #3 with restart always", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + waitingStateWithLastTermination("containerB"), + }, + }, + }, + api.PodRunning, + "backoff crashloop container with restart always", + }, + } + for _, test := range tests { + if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status { + t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status) + } + } +} + +func TestPodPhaseWithRestartNever(t *testing.T) { + desiredState := api.PodSpec{ + NodeName: "machine", + Containers: []api.Container{ + {Name: "containerA"}, + {Name: "containerB"}, + }, + RestartPolicy: api.RestartPolicyNever, + } + + tests := []struct { + pod *api.Pod + status api.PodPhase + test string + }{ + {&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"}, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + runningState("containerB"), + }, + }, + }, + api.PodRunning, + "all running with restart never", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + succeededState("containerA"), + succeededState("containerB"), + }, + }, + }, + api.PodSucceeded, + "all succeeded with restart never", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + failedState("containerA"), + failedState("containerB"), + }, + }, + }, + api.PodFailed, + "all failed with restart never", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + succeededState("containerB"), + }, + }, + }, + api.PodRunning, + "mixed state #1 with restart never", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + }, + }, + }, + api.PodPending, + "mixed state #2 with restart never", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + waitingState("containerB"), + }, + }, + }, + api.PodPending, + "mixed state #3 with restart never", + }, + } + for _, test := range tests { + if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status { + t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status) + } + } +} + +func TestPodPhaseWithRestartOnFailure(t *testing.T) { + desiredState := api.PodSpec{ + NodeName: "machine", + Containers: []api.Container{ + {Name: "containerA"}, + {Name: "containerB"}, + }, + RestartPolicy: api.RestartPolicyOnFailure, + } + + tests := []struct { + pod *api.Pod + status api.PodPhase + test string + }{ + {&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"}, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + runningState("containerB"), + }, + }, + }, + api.PodRunning, + "all running with restart onfailure", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + succeededState("containerA"), + succeededState("containerB"), + }, + }, + }, + api.PodSucceeded, + "all succeeded with restart onfailure", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + failedState("containerA"), + failedState("containerB"), + }, + }, + }, + api.PodRunning, + "all failed with restart never", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + succeededState("containerB"), + }, + }, + }, + api.PodRunning, + "mixed state #1 with restart onfailure", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + }, + }, + }, + api.PodPending, + "mixed state #2 with restart onfailure", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + waitingState("containerB"), + }, + }, + }, + api.PodPending, + "mixed state #3 with restart onfailure", + }, + { + &api.Pod{ + Spec: desiredState, + Status: api.PodStatus{ + ContainerStatuses: []api.ContainerStatus{ + runningState("containerA"), + waitingStateWithLastTermination("containerB"), + }, + }, + }, + api.PodRunning, + "backoff crashloop container with restart onfailure", + }, + } + for _, test := range tests { + if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status { + t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status) + } + } +} + +func TestExecInContainerNoSuchPod(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet.runner = &fakeCommandRunner + fakeRuntime.PodList = []*kubecontainer.Pod{} + + podName := "podFoo" + podNamespace := "nsFoo" + containerID := "containerFoo" + err := kubelet.ExecInContainer( + kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}), + "", + containerID, + []string{"ls"}, + nil, + nil, + nil, + false, + ) + if err == nil { + t.Fatal("unexpected non-error") + } + if !fakeCommandRunner.ID.IsEmpty() { + t.Fatal("unexpected invocation of runner.ExecInContainer") + } +} + +func TestExecInContainerNoSuchContainer(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet.runner = &fakeCommandRunner + + podName := "podFoo" + podNamespace := "nsFoo" + containerID := "containerFoo" + fakeRuntime.PodList = []*kubecontainer.Pod{ + { + ID: "12345678", + Name: podName, + Namespace: podNamespace, + Containers: []*kubecontainer.Container{ + {Name: "bar", + ID: kubecontainer.ContainerID{Type: "test", ID: "barID"}}, + }, + }, + } + + err := kubelet.ExecInContainer( + kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: podName, + Namespace: podNamespace, + }}), + "", + containerID, + []string{"ls"}, + nil, + nil, + nil, + false, + ) + if err == nil { + t.Fatal("unexpected non-error") + } + if !fakeCommandRunner.ID.IsEmpty() { + t.Fatal("unexpected invocation of runner.ExecInContainer") + } +} + +type fakeReadWriteCloser struct{} + +func (f *fakeReadWriteCloser) Write(data []byte) (int, error) { + return 0, nil +} + +func (f *fakeReadWriteCloser) Read(data []byte) (int, error) { + return 0, nil +} + +func (f *fakeReadWriteCloser) Close() error { + return nil +} + +func TestExecInContainer(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet.runner = &fakeCommandRunner + + podName := "podFoo" + podNamespace := "nsFoo" + containerID := "containerFoo" + command := []string{"ls"} + stdin := &bytes.Buffer{} + stdout := &fakeReadWriteCloser{} + stderr := &fakeReadWriteCloser{} + tty := true + fakeRuntime.PodList = []*kubecontainer.Pod{ + { + ID: "12345678", + Name: podName, + Namespace: podNamespace, + Containers: []*kubecontainer.Container{ + {Name: containerID, + ID: kubecontainer.ContainerID{Type: "test", ID: containerID}, + }, + }, + }, + } + + err := kubelet.ExecInContainer( + kubecontainer.GetPodFullName(podWithUidNameNs("12345678", podName, podNamespace)), + "", + containerID, + []string{"ls"}, + stdin, + stdout, + stderr, + tty, + ) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if e, a := containerID, fakeCommandRunner.ID.ID; e != a { + t.Fatalf("container name: expected %q, got %q", e, a) + } + if e, a := command, fakeCommandRunner.Cmd; !reflect.DeepEqual(e, a) { + t.Fatalf("command: expected '%v', got '%v'", e, a) + } + if e, a := stdin, fakeCommandRunner.Stdin; e != a { + t.Fatalf("stdin: expected %#v, got %#v", e, a) + } + if e, a := stdout, fakeCommandRunner.Stdout; e != a { + t.Fatalf("stdout: expected %#v, got %#v", e, a) + } + if e, a := stderr, fakeCommandRunner.Stderr; e != a { + t.Fatalf("stderr: expected %#v, got %#v", e, a) + } + if e, a := tty, fakeCommandRunner.TTY; e != a { + t.Fatalf("tty: expected %t, got %t", e, a) + } +} + +func TestPortForwardNoSuchPod(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + fakeRuntime.PodList = []*kubecontainer.Pod{} + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet.runner = &fakeCommandRunner + + podName := "podFoo" + podNamespace := "nsFoo" + var port uint16 = 5000 + + err := kubelet.PortForward( + kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}), + "", + port, + nil, + ) + if err == nil { + t.Fatal("unexpected non-error") + } + if !fakeCommandRunner.ID.IsEmpty() { + t.Fatal("unexpected invocation of runner.PortForward") + } +} + +func TestPortForward(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + fakeRuntime := testKubelet.fakeRuntime + + podName := "podFoo" + podNamespace := "nsFoo" + podID := types.UID("12345678") + fakeRuntime.PodList = []*kubecontainer.Pod{ + { + ID: podID, + Name: podName, + Namespace: podNamespace, + Containers: []*kubecontainer.Container{ + { + Name: "foo", + ID: kubecontainer.ContainerID{Type: "test", ID: "containerFoo"}, + }, + }, + }, + } + fakeCommandRunner := fakeContainerCommandRunner{} + kubelet.runner = &fakeCommandRunner + + var port uint16 = 5000 + stream := &fakeReadWriteCloser{} + err := kubelet.PortForward( + kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: podName, + Namespace: podNamespace, + }}), + "", + port, + stream, + ) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if e, a := podID, fakeCommandRunner.PodID; e != a { + t.Fatalf("container id: expected %q, got %q", e, a) + } + if e, a := port, fakeCommandRunner.Port; e != a { + t.Fatalf("port: expected %v, got %v", e, a) + } + if e, a := stream, fakeCommandRunner.Stream; e != a { + t.Fatalf("stream: expected %v, got %v", e, a) + } +} + +// Tests that identify the host port conflicts are detected correctly. +func TestGetHostPortConflicts(t *testing.T) { + pods := []*api.Pod{ + {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}}, + {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}}, + {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 82}}}}}}, + {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 83}}}}}}, + } + // Pods should not cause any conflict. + if hasHostPortConflicts(pods) { + t.Errorf("expected no conflicts, Got conflicts") + } + + expected := &api.Pod{ + Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}, + } + // The new pod should cause conflict and be reported. + pods = append(pods, expected) + if !hasHostPortConflicts(pods) { + t.Errorf("expected conflict, Got no conflicts") + } +} + +// Tests that we handle port conflicts correctly by setting the failed status in status map. +func TestHandlePortConflicts(t *testing.T) { + testKubelet := newTestKubelet(t) + kl := testKubelet.kubelet + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + + kl.nodeLister = testNodeLister{nodes: []api.Node{ + { + ObjectMeta: api.ObjectMeta{Name: kl.nodeName}, + Status: api.NodeStatus{ + Allocatable: api.ResourceList{ + api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + }, + }, + }, + }} + kl.nodeInfo = testNodeInfo{nodes: []api.Node{ + { + ObjectMeta: api.ObjectMeta{Name: kl.nodeName}, + Status: api.NodeStatus{ + Allocatable: api.ResourceList{ + api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + }, + }, + }, + }} + + spec := api.PodSpec{NodeName: kl.nodeName, Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}} + pods := []*api.Pod{ + podWithUidNameNsSpec("123456789", "newpod", "foo", spec), + podWithUidNameNsSpec("987654321", "oldpod", "foo", spec), + } + // Make sure the Pods are in the reverse order of creation time. + pods[1].CreationTimestamp = unversioned.NewTime(time.Now()) + pods[0].CreationTimestamp = unversioned.NewTime(time.Now().Add(1 * time.Second)) + // The newer pod should be rejected. + notfittingPod := pods[0] + fittingPod := pods[1] + + kl.HandlePodAdditions(pods) + // Check pod status stored in the status map. + // notfittingPod should be Failed + status, found := kl.statusManager.GetPodStatus(notfittingPod.UID) + if !found { + t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID) + } + if status.Phase != api.PodFailed { + t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase) + } + // fittingPod should be Pending + status, found = kl.statusManager.GetPodStatus(fittingPod.UID) + if !found { + t.Fatalf("status of pod %q is not found in the status map", fittingPod.UID) + } + if status.Phase != api.PodPending { + t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase) + } +} + +// Tests that we handle host name conflicts correctly by setting the failed status in status map. +func TestHandleHostNameConflicts(t *testing.T) { + testKubelet := newTestKubelet(t) + kl := testKubelet.kubelet + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + + kl.nodeLister = testNodeLister{nodes: []api.Node{ + { + ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"}, + Status: api.NodeStatus{ + Allocatable: api.ResourceList{ + api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + }, + }, + }, + }} + kl.nodeInfo = testNodeInfo{nodes: []api.Node{ + { + ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"}, + Status: api.NodeStatus{ + Allocatable: api.ResourceList{ + api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + }, + }, + }, + }} + + // default NodeName in test is 127.0.0.1 + pods := []*api.Pod{ + podWithUidNameNsSpec("123456789", "notfittingpod", "foo", api.PodSpec{NodeName: "127.0.0.2"}), + podWithUidNameNsSpec("987654321", "fittingpod", "foo", api.PodSpec{NodeName: "127.0.0.1"}), + } + + notfittingPod := pods[0] + fittingPod := pods[1] + + kl.HandlePodAdditions(pods) + // Check pod status stored in the status map. + // notfittingPod should be Failed + status, found := kl.statusManager.GetPodStatus(notfittingPod.UID) + if !found { + t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID) + } + if status.Phase != api.PodFailed { + t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase) + } + // fittingPod should be Pending + status, found = kl.statusManager.GetPodStatus(fittingPod.UID) + if !found { + t.Fatalf("status of pod %q is not found in the status map", fittingPod.UID) + } + if status.Phase != api.PodPending { + t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase) + } +} + +// Tests that we handle not matching labels selector correctly by setting the failed status in status map. +func TestHandleNodeSelector(t *testing.T) { + testKubelet := newTestKubelet(t) + kl := testKubelet.kubelet + nodes := []api.Node{ + { + ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}}, + Status: api.NodeStatus{ + Allocatable: api.ResourceList{ + api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + }, + }, + }, + } + kl.nodeLister = testNodeLister{nodes: nodes} + kl.nodeInfo = testNodeInfo{nodes: nodes} + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + pods := []*api.Pod{ + podWithUidNameNsSpec("123456789", "podA", "foo", api.PodSpec{NodeSelector: map[string]string{"key": "A"}}), + podWithUidNameNsSpec("987654321", "podB", "foo", api.PodSpec{NodeSelector: map[string]string{"key": "B"}}), + } + // The first pod should be rejected. + notfittingPod := pods[0] + fittingPod := pods[1] + + kl.HandlePodAdditions(pods) + // Check pod status stored in the status map. + // notfittingPod should be Failed + status, found := kl.statusManager.GetPodStatus(notfittingPod.UID) + if !found { + t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID) + } + if status.Phase != api.PodFailed { + t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase) + } + // fittingPod should be Pending + status, found = kl.statusManager.GetPodStatus(fittingPod.UID) + if !found { + t.Fatalf("status of pod %q is not found in the status map", fittingPod.UID) + } + if status.Phase != api.PodPending { + t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase) + } +} + +// Tests that we handle exceeded resources correctly by setting the failed status in status map. +func TestHandleMemExceeded(t *testing.T) { + testKubelet := newTestKubelet(t) + kl := testKubelet.kubelet + nodes := []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, + Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI), + api.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI), + }}}, + } + kl.nodeLister = testNodeLister{nodes: nodes} + kl.nodeInfo = testNodeInfo{nodes: nodes} + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + + spec := api.PodSpec{NodeName: kl.nodeName, + Containers: []api.Container{{Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + "memory": resource.MustParse("90"), + }, + }}}} + pods := []*api.Pod{ + podWithUidNameNsSpec("123456789", "newpod", "foo", spec), + podWithUidNameNsSpec("987654321", "oldpod", "foo", spec), + } + // Make sure the Pods are in the reverse order of creation time. + pods[1].CreationTimestamp = unversioned.NewTime(time.Now()) + pods[0].CreationTimestamp = unversioned.NewTime(time.Now().Add(1 * time.Second)) + // The newer pod should be rejected. + notfittingPod := pods[0] + fittingPod := pods[1] + + kl.HandlePodAdditions(pods) + // Check pod status stored in the status map. + // notfittingPod should be Failed + status, found := kl.statusManager.GetPodStatus(notfittingPod.UID) + if !found { + t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID) + } + if status.Phase != api.PodFailed { + t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase) + } + // fittingPod should be Pending + status, found = kl.statusManager.GetPodStatus(fittingPod.UID) + if !found { + t.Fatalf("status of pod %q is not found in the status map", fittingPod.UID) + } + if status.Phase != api.PodPending { + t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase) + } +} + +// TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal. +func TestPurgingObsoleteStatusMapEntries(t *testing.T) { + testKubelet := newTestKubelet(t) + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + versionInfo := &cadvisorapi.VersionInfo{ + KernelVersion: "3.16.0-0.bpo.4-amd64", + ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", + DockerVersion: "1.5.0", + } + testKubelet.fakeCadvisor.On("VersionInfo").Return(versionInfo, nil) + + kl := testKubelet.kubelet + pods := []*api.Pod{ + {ObjectMeta: api.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}}, + {ObjectMeta: api.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}}, + } + podToTest := pods[1] + // Run once to populate the status map. + kl.HandlePodAdditions(pods) + if _, found := kl.statusManager.GetPodStatus(podToTest.UID); !found { + t.Fatalf("expected to have status cached for pod2") + } + // Sync with empty pods so that the entry in status map will be removed. + kl.podManager.SetPods([]*api.Pod{}) + kl.HandlePodCleanups() + if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found { + t.Fatalf("expected to not have status cached for pod2") + } +} + +func TestValidateContainerLogStatus(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + containerName := "x" + testCases := []struct { + statuses []api.ContainerStatus + success bool + }{ + { + statuses: []api.ContainerStatus{ + { + Name: containerName, + State: api.ContainerState{ + Running: &api.ContainerStateRunning{}, + }, + LastTerminationState: api.ContainerState{ + Terminated: &api.ContainerStateTerminated{}, + }, + }, + }, + success: true, + }, + { + statuses: []api.ContainerStatus{ + { + Name: containerName, + State: api.ContainerState{ + Running: &api.ContainerStateRunning{}, + }, + }, + }, + success: true, + }, + { + statuses: []api.ContainerStatus{ + { + Name: containerName, + State: api.ContainerState{ + Terminated: &api.ContainerStateTerminated{}, + }, + }, + }, + success: true, + }, + { + statuses: []api.ContainerStatus{ + { + Name: containerName, + State: api.ContainerState{ + Waiting: &api.ContainerStateWaiting{}, + }, + }, + }, + success: false, + }, + { + statuses: []api.ContainerStatus{ + { + Name: containerName, + State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePull"}}, + }, + }, + success: false, + }, + { + statuses: []api.ContainerStatus{ + { + Name: containerName, + State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}}, + }, + }, + success: false, + }, + } + + for i, tc := range testCases { + _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{ + ContainerStatuses: tc.statuses, + }, containerName, false) + if tc.success { + if err != nil { + t.Errorf("[case %d]: unexpected failure - %v", i, err) + } + } else if err == nil { + t.Errorf("[case %d]: unexpected success", i) + } + } + if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{ + ContainerStatuses: testCases[0].statuses, + }, "blah", false); err == nil { + t.Errorf("expected error with invalid container name") + } + if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{ + ContainerStatuses: testCases[0].statuses, + }, containerName, true); err != nil { + t.Errorf("unexpected error with for previous terminated container - %v", err) + } + if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{ + ContainerStatuses: testCases[0].statuses, + }, containerName, false); err != nil { + t.Errorf("unexpected error with for most recent container - %v", err) + } + if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{ + ContainerStatuses: testCases[1].statuses, + }, containerName, true); err == nil { + t.Errorf("expected error with for previous terminated container") + } + if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{ + ContainerStatuses: testCases[1].statuses, + }, containerName, false); err != nil { + t.Errorf("unexpected error with for most recent container") + } +} + +// updateDiskSpacePolicy creates a new DiskSpaceManager with a new policy. This new manager along +// with the mock FsInfo values added to Cadvisor should make the kubelet report that it has +// sufficient disk space or it is out of disk, depending on the capacity, availability and +// threshold values. +func updateDiskSpacePolicy(kubelet *Kubelet, mockCadvisor *cadvisortest.Mock, rootCap, dockerCap, rootAvail, dockerAvail uint64, rootThreshold, dockerThreshold int) error { + dockerimagesFsInfo := cadvisorapiv2.FsInfo{Capacity: rootCap * mb, Available: rootAvail * mb} + rootFsInfo := cadvisorapiv2.FsInfo{Capacity: dockerCap * mb, Available: dockerAvail * mb} + mockCadvisor.On("ImagesFsInfo").Return(dockerimagesFsInfo, nil) + mockCadvisor.On("RootFsInfo").Return(rootFsInfo, nil) + + dsp := DiskSpacePolicy{DockerFreeDiskMB: rootThreshold, RootFreeDiskMB: dockerThreshold} + diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, dsp) + if err != nil { + return err + } + kubelet.diskSpaceManager = diskSpaceManager + return nil +} + +func TestUpdateNewNodeStatus(t *testing.T) { + // generate one more than maxImagesInNodeStatus in inputImageList + inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1) + testKubelet := newTestKubeletWithImageList(t, inputImageList) + kubelet := testKubelet.kubelet + kubeClient := testKubelet.fakeKubeClient + kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}}, + }}).ReactionChain + machineInfo := &cadvisorapi.MachineInfo{ + MachineID: "123", + SystemUUID: "abc", + BootID: "1b3", + NumCores: 2, + MemoryCapacity: 10E9, // 10G + } + mockCadvisor := testKubelet.fakeCadvisor + mockCadvisor.On("Start").Return(nil) + mockCadvisor.On("MachineInfo").Return(machineInfo, nil) + versionInfo := &cadvisorapi.VersionInfo{ + KernelVersion: "3.16.0-0.bpo.4-amd64", + ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", + } + mockCadvisor.On("VersionInfo").Return(versionInfo, nil) + + // Make kubelet report that it has sufficient disk space. + if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil { + t.Fatalf("can't update disk space manager: %v", err) + } + + expectedNode := &api.Node{ + ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, + Spec: api.NodeSpec{}, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeOutOfDisk, + Status: api.ConditionFalse, + Reason: "KubeletHasSufficientDisk", + Message: fmt.Sprintf("kubelet has sufficient disk space available"), + LastHeartbeatTime: unversioned.Time{}, + LastTransitionTime: unversioned.Time{}, + }, + { + Type: api.NodeMemoryPressure, + Status: api.ConditionFalse, + Reason: "KubeletHasSufficientMemory", + Message: fmt.Sprintf("kubelet has sufficient memory available"), + LastHeartbeatTime: unversioned.Time{}, + LastTransitionTime: unversioned.Time{}, + }, + { + Type: api.NodeReady, + Status: api.ConditionTrue, + Reason: "KubeletReady", + Message: fmt.Sprintf("kubelet is posting ready status"), + LastHeartbeatTime: unversioned.Time{}, + LastTransitionTime: unversioned.Time{}, + }, + }, + NodeInfo: api.NodeSystemInfo{ + MachineID: "123", + SystemUUID: "abc", + BootID: "1b3", + KernelVersion: "3.16.0-0.bpo.4-amd64", + OSImage: "Debian GNU/Linux 7 (wheezy)", + OperatingSystem: "linux", + Architecture: "amd64", + ContainerRuntimeVersion: "test://1.5.0", + KubeletVersion: version.Get().String(), + KubeProxyVersion: version.Get().String(), + }, + Capacity: api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), + api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), + }, + Allocatable: api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), + api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), + }, + Addresses: []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: "127.0.0.1"}, + {Type: api.NodeInternalIP, Address: "127.0.0.1"}, + }, + Images: expectedImageList, + }, + } + + kubelet.updateRuntimeUp() + if err := kubelet.updateNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + actions := kubeClient.Actions() + if len(actions) != 2 { + t.Fatalf("unexpected actions: %v", actions) + } + if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" { + t.Fatalf("unexpected actions: %v", actions) + } + updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node) + if !ok { + t.Errorf("unexpected object type") + } + for i, cond := range updatedNode.Status.Conditions { + if cond.LastHeartbeatTime.IsZero() { + t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type) + } + if cond.LastTransitionTime.IsZero() { + t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type) + } + updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{} + updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{} + } + + // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 + if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady { + t.Errorf("unexpected node condition order. NodeReady should be last.") + } + + if maxImagesInNodeStatus != len(updatedNode.Status.Images) { + t.Errorf("unexpected image list length in node status, expected: %v, got: %v", maxImagesInNodeStatus, len(updatedNode.Status.Images)) + } else { + if !api.Semantic.DeepEqual(expectedNode, updatedNode) { + t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode)) + } + } + +} + +func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + kubeClient := testKubelet.fakeKubeClient + kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}}, + }}).ReactionChain + machineInfo := &cadvisorapi.MachineInfo{ + MachineID: "123", + SystemUUID: "abc", + BootID: "1b3", + NumCores: 2, + MemoryCapacity: 1024, + } + mockCadvisor := testKubelet.fakeCadvisor + mockCadvisor.On("Start").Return(nil) + mockCadvisor.On("MachineInfo").Return(machineInfo, nil) + versionInfo := &cadvisorapi.VersionInfo{ + KernelVersion: "3.16.0-0.bpo.4-amd64", + ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", + } + mockCadvisor.On("VersionInfo").Return(versionInfo, nil) + + // Make Kubelet report that it has sufficient disk space. + if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil { + t.Fatalf("can't update disk space manager: %v", err) + } + + kubelet.outOfDiskTransitionFrequency = 10 * time.Second + + expectedNodeOutOfDiskCondition := api.NodeCondition{ + Type: api.NodeOutOfDisk, + Status: api.ConditionFalse, + Reason: "KubeletHasSufficientDisk", + Message: fmt.Sprintf("kubelet has sufficient disk space available"), + LastHeartbeatTime: unversioned.Time{}, + LastTransitionTime: unversioned.Time{}, + } + + kubelet.updateRuntimeUp() + if err := kubelet.updateNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + actions := kubeClient.Actions() + if len(actions) != 2 { + t.Fatalf("unexpected actions: %v", actions) + } + if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" { + t.Fatalf("unexpected actions: %v", actions) + } + updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node) + if !ok { + t.Errorf("unexpected object type") + } + + var oodCondition api.NodeCondition + for i, cond := range updatedNode.Status.Conditions { + if cond.LastHeartbeatTime.IsZero() { + t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type) + } + if cond.LastTransitionTime.IsZero() { + t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type) + } + updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{} + updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{} + if cond.Type == api.NodeOutOfDisk { + oodCondition = updatedNode.Status.Conditions[i] + } + } + + if !reflect.DeepEqual(expectedNodeOutOfDiskCondition, oodCondition) { + t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNodeOutOfDiskCondition, oodCondition)) + } +} + +func TestUpdateExistingNodeStatus(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + kubeClient := testKubelet.fakeKubeClient + kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ + { + ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, + Spec: api.NodeSpec{}, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeOutOfDisk, + Status: api.ConditionTrue, + Reason: "KubeletOutOfDisk", + Message: "out of disk space", + LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + { + Type: api.NodeMemoryPressure, + Status: api.ConditionFalse, + Reason: "KubeletHasSufficientMemory", + Message: fmt.Sprintf("kubelet has sufficient memory available"), + LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + { + Type: api.NodeReady, + Status: api.ConditionTrue, + Reason: "KubeletReady", + Message: fmt.Sprintf("kubelet is posting ready status"), + LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + }, + }, + Capacity: api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), + api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + }, + Allocatable: api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI), + api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + }, + }, + }, + }}).ReactionChain + mockCadvisor := testKubelet.fakeCadvisor + mockCadvisor.On("Start").Return(nil) + machineInfo := &cadvisorapi.MachineInfo{ + MachineID: "123", + SystemUUID: "abc", + BootID: "1b3", + NumCores: 2, + MemoryCapacity: 20E9, + } + mockCadvisor.On("MachineInfo").Return(machineInfo, nil) + versionInfo := &cadvisorapi.VersionInfo{ + KernelVersion: "3.16.0-0.bpo.4-amd64", + ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", + } + mockCadvisor.On("VersionInfo").Return(versionInfo, nil) + + // Make kubelet report that it is out of disk space. + if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 50, 50, 100, 100); err != nil { + t.Fatalf("can't update disk space manager: %v", err) + } + + expectedNode := &api.Node{ + ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, + Spec: api.NodeSpec{}, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeOutOfDisk, + Status: api.ConditionTrue, + Reason: "KubeletOutOfDisk", + Message: "out of disk space", + LastHeartbeatTime: unversioned.Time{}, // placeholder + LastTransitionTime: unversioned.Time{}, // placeholder + }, + { + Type: api.NodeMemoryPressure, + Status: api.ConditionFalse, + Reason: "KubeletHasSufficientMemory", + Message: fmt.Sprintf("kubelet has sufficient memory available"), + LastHeartbeatTime: unversioned.Time{}, + LastTransitionTime: unversioned.Time{}, + }, + { + Type: api.NodeReady, + Status: api.ConditionTrue, + Reason: "KubeletReady", + Message: fmt.Sprintf("kubelet is posting ready status"), + LastHeartbeatTime: unversioned.Time{}, // placeholder + LastTransitionTime: unversioned.Time{}, // placeholder + }, + }, + NodeInfo: api.NodeSystemInfo{ + MachineID: "123", + SystemUUID: "abc", + BootID: "1b3", + KernelVersion: "3.16.0-0.bpo.4-amd64", + OSImage: "Debian GNU/Linux 7 (wheezy)", + OperatingSystem: "linux", + Architecture: "amd64", + ContainerRuntimeVersion: "test://1.5.0", + KubeletVersion: version.Get().String(), + KubeProxyVersion: version.Get().String(), + }, + Capacity: api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI), + api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), + }, + Allocatable: api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI), + api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), + }, + Addresses: []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: "127.0.0.1"}, + {Type: api.NodeInternalIP, Address: "127.0.0.1"}, + }, + // images will be sorted from max to min in node status. + Images: []api.ContainerImage{ + { + Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, + SizeBytes: 456, + }, + { + Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"}, + SizeBytes: 123, + }, + }, + }, + } + + kubelet.updateRuntimeUp() + if err := kubelet.updateNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + actions := kubeClient.Actions() + if len(actions) != 2 { + t.Errorf("unexpected actions: %v", actions) + } + updateAction, ok := actions[1].(core.UpdateAction) + if !ok { + t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1]) + } + updatedNode, ok := updateAction.GetObject().(*api.Node) + if !ok { + t.Errorf("unexpected object type") + } + for i, cond := range updatedNode.Status.Conditions { + // Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same. + if old := unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; reflect.DeepEqual(cond.LastHeartbeatTime.Rfc3339Copy().UTC(), old) { + t.Errorf("Condition %v LastProbeTime: expected \n%v\n, got \n%v", cond.Type, unversioned.Now(), old) + } + if got, want := cond.LastTransitionTime.Rfc3339Copy().UTC(), unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; !reflect.DeepEqual(got, want) { + t.Errorf("Condition %v LastTransitionTime: expected \n%#v\n, got \n%#v", cond.Type, want, got) + } + updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{} + updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{} + } + + // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 + if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady { + t.Errorf("unexpected node condition order. NodeReady should be last.") + } + + if !api.Semantic.DeepEqual(expectedNode, updatedNode) { + t.Errorf("expected \n%v\n, got \n%v", expectedNode, updatedNode) + } +} + +func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + clock := testKubelet.fakeClock + kubeClient := testKubelet.fakeKubeClient + kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ + { + ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, + Spec: api.NodeSpec{}, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeReady, + Status: api.ConditionTrue, + Reason: "KubeletReady", + Message: fmt.Sprintf("kubelet is posting ready status"), + LastHeartbeatTime: unversioned.NewTime(clock.Now()), + LastTransitionTime: unversioned.NewTime(clock.Now()), + }, + { + + Type: api.NodeOutOfDisk, + Status: api.ConditionTrue, + Reason: "KubeletOutOfDisk", + Message: "out of disk space", + LastHeartbeatTime: unversioned.NewTime(clock.Now()), + LastTransitionTime: unversioned.NewTime(clock.Now()), + }, + }, + }, + }, + }}).ReactionChain + mockCadvisor := testKubelet.fakeCadvisor + machineInfo := &cadvisorapi.MachineInfo{ + MachineID: "123", + SystemUUID: "abc", + BootID: "1b3", + NumCores: 2, + MemoryCapacity: 1024, + } + mockCadvisor.On("Start").Return(nil) + mockCadvisor.On("MachineInfo").Return(machineInfo, nil) + versionInfo := &cadvisorapi.VersionInfo{ + KernelVersion: "3.16.0-0.bpo.4-amd64", + ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", + DockerVersion: "1.5.0", + } + mockCadvisor.On("VersionInfo").Return(versionInfo, nil) + + kubelet.outOfDiskTransitionFrequency = 5 * time.Second + + ood := api.NodeCondition{ + Type: api.NodeOutOfDisk, + Status: api.ConditionTrue, + Reason: "KubeletOutOfDisk", + Message: "out of disk space", + LastHeartbeatTime: unversioned.NewTime(clock.Now()), // placeholder + LastTransitionTime: unversioned.NewTime(clock.Now()), // placeholder + } + noOod := api.NodeCondition{ + Type: api.NodeOutOfDisk, + Status: api.ConditionFalse, + Reason: "KubeletHasSufficientDisk", + Message: fmt.Sprintf("kubelet has sufficient disk space available"), + LastHeartbeatTime: unversioned.NewTime(clock.Now()), // placeholder + LastTransitionTime: unversioned.NewTime(clock.Now()), // placeholder + } + + testCases := []struct { + rootFsAvail uint64 + dockerFsAvail uint64 + expected api.NodeCondition + }{ + { + // NodeOutOfDisk==false + rootFsAvail: 200, + dockerFsAvail: 200, + expected: ood, + }, + { + // NodeOutOfDisk==true + rootFsAvail: 50, + dockerFsAvail: 200, + expected: ood, + }, + { + // NodeOutOfDisk==false + rootFsAvail: 200, + dockerFsAvail: 200, + expected: ood, + }, + { + // NodeOutOfDisk==true + rootFsAvail: 200, + dockerFsAvail: 50, + expected: ood, + }, + { + // NodeOutOfDisk==false + rootFsAvail: 200, + dockerFsAvail: 200, + expected: noOod, + }, + } + + kubelet.updateRuntimeUp() + for tcIdx, tc := range testCases { + // Step by a second + clock.Step(1 * time.Second) + + // Setup expected times. + tc.expected.LastHeartbeatTime = unversioned.NewTime(clock.Now()) + // In the last case, there should be a status transition for NodeOutOfDisk + if tcIdx == len(testCases)-1 { + tc.expected.LastTransitionTime = unversioned.NewTime(clock.Now()) + } + + // Make kubelet report that it has sufficient disk space + if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, tc.rootFsAvail, tc.dockerFsAvail, 100, 100); err != nil { + t.Fatalf("can't update disk space manager: %v", err) + } + + if err := kubelet.updateNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + actions := kubeClient.Actions() + if len(actions) != 2 { + t.Errorf("%d. unexpected actions: %v", tcIdx, actions) + } + updateAction, ok := actions[1].(core.UpdateAction) + if !ok { + t.Errorf("%d. unexpected action type. expected UpdateAction, got %#v", tcIdx, actions[1]) + } + updatedNode, ok := updateAction.GetObject().(*api.Node) + if !ok { + t.Errorf("%d. unexpected object type", tcIdx) + } + kubeClient.ClearActions() + + var oodCondition api.NodeCondition + for i, cond := range updatedNode.Status.Conditions { + if cond.Type == api.NodeOutOfDisk { + oodCondition = updatedNode.Status.Conditions[i] + } + } + + if !reflect.DeepEqual(tc.expected, oodCondition) { + t.Errorf("%d.\nwant \n%v\n, got \n%v", tcIdx, tc.expected, oodCondition) + } + } +} + +func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + clock := testKubelet.fakeClock + kubeClient := testKubelet.fakeKubeClient + kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}}, + }}).ReactionChain + mockCadvisor := testKubelet.fakeCadvisor + mockCadvisor.On("Start").Return(nil) + machineInfo := &cadvisorapi.MachineInfo{ + MachineID: "123", + SystemUUID: "abc", + BootID: "1b3", + NumCores: 2, + MemoryCapacity: 10E9, + } + mockCadvisor.On("MachineInfo").Return(machineInfo, nil) + versionInfo := &cadvisorapi.VersionInfo{ + KernelVersion: "3.16.0-0.bpo.4-amd64", + ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", + } + mockCadvisor.On("VersionInfo").Return(versionInfo, nil) + + // Make kubelet report that it has sufficient disk space. + if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil { + t.Fatalf("can't update disk space manager: %v", err) + } + + expectedNode := &api.Node{ + ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, + Spec: api.NodeSpec{}, + Status: api.NodeStatus{ + Conditions: []api.NodeCondition{ + { + Type: api.NodeOutOfDisk, + Status: api.ConditionFalse, + Reason: "KubeletHasSufficientDisk", + Message: "kubelet has sufficient disk space available", + LastHeartbeatTime: unversioned.Time{}, + LastTransitionTime: unversioned.Time{}, + }, + { + Type: api.NodeMemoryPressure, + Status: api.ConditionFalse, + Reason: "KubeletHasSufficientMemory", + Message: fmt.Sprintf("kubelet has sufficient memory available"), + LastHeartbeatTime: unversioned.Time{}, + LastTransitionTime: unversioned.Time{}, + }, + {}, //placeholder + }, + NodeInfo: api.NodeSystemInfo{ + MachineID: "123", + SystemUUID: "abc", + BootID: "1b3", + KernelVersion: "3.16.0-0.bpo.4-amd64", + OSImage: "Debian GNU/Linux 7 (wheezy)", + OperatingSystem: "linux", + Architecture: "amd64", + ContainerRuntimeVersion: "test://1.5.0", + KubeletVersion: version.Get().String(), + KubeProxyVersion: version.Get().String(), + }, + Capacity: api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), + api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), + }, + Allocatable: api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), + api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI), + }, + Addresses: []api.NodeAddress{ + {Type: api.NodeLegacyHostIP, Address: "127.0.0.1"}, + {Type: api.NodeInternalIP, Address: "127.0.0.1"}, + }, + Images: []api.ContainerImage{ + { + Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"}, + SizeBytes: 456, + }, + { + Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"}, + SizeBytes: 123, + }, + }, + }, + } + + checkNodeStatus := func(status api.ConditionStatus, reason, message string) { + kubeClient.ClearActions() + if err := kubelet.updateNodeStatus(); err != nil { + t.Errorf("unexpected error: %v", err) + } + actions := kubeClient.Actions() + if len(actions) != 2 { + t.Fatalf("unexpected actions: %v", actions) + } + if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" { + t.Fatalf("unexpected actions: %v", actions) + } + updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node) + if !ok { + t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1]) + } + + for i, cond := range updatedNode.Status.Conditions { + if cond.LastHeartbeatTime.IsZero() { + t.Errorf("unexpected zero last probe timestamp") + } + if cond.LastTransitionTime.IsZero() { + t.Errorf("unexpected zero last transition timestamp") + } + updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{} + updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{} + } + + // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 + lastIndex := len(updatedNode.Status.Conditions) - 1 + if updatedNode.Status.Conditions[lastIndex].Type != api.NodeReady { + t.Errorf("unexpected node condition order. NodeReady should be last.") + } + expectedNode.Status.Conditions[lastIndex] = api.NodeCondition{ + Type: api.NodeReady, + Status: status, + Reason: reason, + Message: message, + LastHeartbeatTime: unversioned.Time{}, + LastTransitionTime: unversioned.Time{}, + } + if !api.Semantic.DeepEqual(expectedNode, updatedNode) { + t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode)) + } + } + + readyMessage := "kubelet is posting ready status" + downMessage := "container runtime is down" + + // Should report kubelet not ready if the runtime check is out of date + clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime)) + kubelet.updateRuntimeUp() + checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage) + + // Should report kubelet ready if the runtime check is updated + clock.SetTime(time.Now()) + kubelet.updateRuntimeUp() + checkNodeStatus(api.ConditionTrue, "KubeletReady", readyMessage) + + // Should report kubelet not ready if the runtime check is out of date + clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime)) + kubelet.updateRuntimeUp() + checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage) + + // Should report kubelet not ready if the runtime check failed + fakeRuntime := testKubelet.fakeRuntime + // Inject error into fake runtime status check, node should be NotReady + fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error") + clock.SetTime(time.Now()) + kubelet.updateRuntimeUp() + checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage) +} + +func TestUpdateNodeStatusError(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + // No matching node for the kubelet + testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{}}).ReactionChain + + if err := kubelet.updateNodeStatus(); err == nil { + t.Errorf("unexpected non error: %v", err) + } + if len(testKubelet.fakeKubeClient.Actions()) != nodeStatusUpdateRetry { + t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions()) + } +} + +func TestCreateMirrorPod(t *testing.T) { + for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} { + testKubelet := newTestKubelet(t) + kl := testKubelet.kubelet + manager := testKubelet.fakeMirrorClient + pod := podWithUidNameNs("12345678", "bar", "foo") + pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file" + pods := []*api.Pod{pod} + kl.podManager.SetPods(pods) + err := kl.syncPod(syncPodOptions{ + pod: pod, + podStatus: &kubecontainer.PodStatus{}, + updateType: updateType, + }) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + podFullName := kubecontainer.GetPodFullName(pod) + if !manager.HasPod(podFullName) { + t.Errorf("expected mirror pod %q to be created", podFullName) + } + if manager.NumOfPods() != 1 || !manager.HasPod(podFullName) { + t.Errorf("expected one mirror pod %q, got %v", podFullName, manager.GetPods()) + } + } +} + +func TestDeleteOutdatedMirrorPod(t *testing.T) { + testKubelet := newTestKubelet(t) + testKubelet.fakeCadvisor.On("Start").Return(nil) + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + + kl := testKubelet.kubelet + manager := testKubelet.fakeMirrorClient + pod := podWithUidNameNsSpec("12345678", "foo", "ns", api.PodSpec{ + Containers: []api.Container{ + {Name: "1234", Image: "foo"}, + }, + }) + pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file" + + // Mirror pod has an outdated spec. + mirrorPod := podWithUidNameNsSpec("11111111", "foo", "ns", api.PodSpec{ + Containers: []api.Container{ + {Name: "1234", Image: "bar"}, + }, + }) + mirrorPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "api" + mirrorPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = "mirror" + + pods := []*api.Pod{pod, mirrorPod} + kl.podManager.SetPods(pods) + err := kl.syncPod(syncPodOptions{ + pod: pod, + mirrorPod: mirrorPod, + podStatus: &kubecontainer.PodStatus{}, + updateType: kubetypes.SyncPodUpdate, + }) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + name := kubecontainer.GetPodFullName(pod) + creates, deletes := manager.GetCounts(name) + if creates != 1 || deletes != 1 { + t.Errorf("expected 1 creation and 1 deletion of %q, got %d, %d", name, creates, deletes) + } +} + +func TestDeleteOrphanedMirrorPods(t *testing.T) { + testKubelet := newTestKubelet(t) + testKubelet.fakeCadvisor.On("Start").Return(nil) + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + + kl := testKubelet.kubelet + manager := testKubelet.fakeMirrorClient + orphanPods := []*api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "pod1", + Namespace: "ns", + Annotations: map[string]string{ + kubetypes.ConfigSourceAnnotationKey: "api", + kubetypes.ConfigMirrorAnnotationKey: "mirror", + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + UID: "12345679", + Name: "pod2", + Namespace: "ns", + Annotations: map[string]string{ + kubetypes.ConfigSourceAnnotationKey: "api", + kubetypes.ConfigMirrorAnnotationKey: "mirror", + }, + }, + }, + } + + kl.podManager.SetPods(orphanPods) + // Sync with an empty pod list to delete all mirror pods. + kl.HandlePodCleanups() + if manager.NumOfPods() != 0 { + t.Errorf("expected zero mirror pods, got %v", manager.GetPods()) + } + for _, pod := range orphanPods { + name := kubecontainer.GetPodFullName(pod) + creates, deletes := manager.GetCounts(name) + if creates != 0 || deletes != 1 { + t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes) + } + } +} + +func TestGetContainerInfoForMirrorPods(t *testing.T) { + // pods contain one static and one mirror pod with the same name but + // different UIDs. + pods := []*api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + UID: "1234", + Name: "qux", + Namespace: "ns", + Annotations: map[string]string{ + kubetypes.ConfigSourceAnnotationKey: "file", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "foo"}, + }, + }, + }, + { + ObjectMeta: api.ObjectMeta{ + UID: "5678", + Name: "qux", + Namespace: "ns", + Annotations: map[string]string{ + kubetypes.ConfigSourceAnnotationKey: "api", + kubetypes.ConfigMirrorAnnotationKey: "mirror", + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "foo"}, + }, + }, + }, + } + + containerID := "ab2cdf" + containerPath := fmt.Sprintf("/docker/%v", containerID) + containerInfo := cadvisorapi.ContainerInfo{ + ContainerReference: cadvisorapi.ContainerReference{ + Name: containerPath, + }, + } + + testKubelet := newTestKubelet(t) + fakeRuntime := testKubelet.fakeRuntime + mockCadvisor := testKubelet.fakeCadvisor + cadvisorReq := &cadvisorapi.ContainerInfoRequest{} + mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil) + kubelet := testKubelet.kubelet + + fakeRuntime.PodList = []*kubecontainer.Pod{ + { + ID: "1234", + Name: "qux", + Namespace: "ns", + Containers: []*kubecontainer.Container{ + { + Name: "foo", + ID: kubecontainer.ContainerID{Type: "test", ID: containerID}, + }, + }, + }, + } + + kubelet.podManager.SetPods(pods) + // Use the mirror pod UID to retrieve the stats. + stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if stats == nil { + t.Fatalf("stats should not be nil") + } + mockCadvisor.AssertExpectations(t) +} + +func TestHostNetworkAllowed(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + + capabilities.SetForTests(capabilities.Capabilities{ + PrivilegedSources: capabilities.PrivilegedSources{ + HostNetworkSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource}, + }, + }) + pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{ + Containers: []api.Container{ + {Name: "foo"}, + }, + SecurityContext: &api.PodSecurityContext{ + HostNetwork: true, + }, + }) + pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource + + kubelet.podManager.SetPods([]*api.Pod{pod}) + err := kubelet.syncPod(syncPodOptions{ + pod: pod, + podStatus: &kubecontainer.PodStatus{}, + updateType: kubetypes.SyncPodUpdate, + }) + if err != nil { + t.Errorf("expected pod infra creation to succeed: %v", err) + } +} + +func TestHostNetworkDisallowed(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + + capabilities.SetForTests(capabilities.Capabilities{ + PrivilegedSources: capabilities.PrivilegedSources{ + HostNetworkSources: []string{}, + }, + }) + pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{ + Containers: []api.Container{ + {Name: "foo"}, + }, + SecurityContext: &api.PodSecurityContext{ + HostNetwork: true, + }, + }) + pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource + + err := kubelet.syncPod(syncPodOptions{ + pod: pod, + podStatus: &kubecontainer.PodStatus{}, + updateType: kubetypes.SyncPodUpdate, + }) + if err == nil { + t.Errorf("expected pod infra creation to fail") + } +} + +func TestPrivilegeContainerAllowed(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + + capabilities.SetForTests(capabilities.Capabilities{ + AllowPrivileged: true, + }) + privileged := true + pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{ + Containers: []api.Container{ + {Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}}, + }, + }) + + kubelet.podManager.SetPods([]*api.Pod{pod}) + err := kubelet.syncPod(syncPodOptions{ + pod: pod, + podStatus: &kubecontainer.PodStatus{}, + updateType: kubetypes.SyncPodUpdate, + }) + if err != nil { + t.Errorf("expected pod infra creation to succeed: %v", err) + } +} + +func TestPrivilegeContainerDisallowed(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + + capabilities.SetForTests(capabilities.Capabilities{ + AllowPrivileged: false, + }) + privileged := true + pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{ + Containers: []api.Container{ + {Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}}, + }, + }) + + err := kubelet.syncPod(syncPodOptions{ + pod: pod, + podStatus: &kubecontainer.PodStatus{}, + updateType: kubetypes.SyncPodUpdate, + }) + if err == nil { + t.Errorf("expected pod infra creation to fail") + } +} + +func TestFilterOutTerminatedPods(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + pods := newTestPods(5) + pods[0].Status.Phase = api.PodFailed + pods[1].Status.Phase = api.PodSucceeded + pods[2].Status.Phase = api.PodRunning + pods[3].Status.Phase = api.PodPending + + expected := []*api.Pod{pods[2], pods[3], pods[4]} + kubelet.podManager.SetPods(pods) + actual := kubelet.filterOutTerminatedPods(pods) + if !reflect.DeepEqual(expected, actual) { + t.Errorf("expected %#v, got %#v", expected, actual) + } +} + +func TestRegisterExistingNodeWithApiserver(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + kubeClient := testKubelet.fakeKubeClient + kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) { + // Return an error on create. + return true, &api.Node{}, &apierrors.StatusError{ + ErrStatus: unversioned.Status{Reason: unversioned.StatusReasonAlreadyExists}, + } + }) + kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { + // Return an existing (matching) node on get. + return true, &api.Node{ + ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}, + Spec: api.NodeSpec{ExternalID: testKubeletHostname}, + }, nil + }) + kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { + return true, nil, fmt.Errorf("no reaction implemented for %s", action) + }) + machineInfo := &cadvisorapi.MachineInfo{ + MachineID: "123", + SystemUUID: "abc", + BootID: "1b3", + NumCores: 2, + MemoryCapacity: 1024, + } + mockCadvisor := testKubelet.fakeCadvisor + mockCadvisor.On("MachineInfo").Return(machineInfo, nil) + versionInfo := &cadvisorapi.VersionInfo{ + KernelVersion: "3.16.0-0.bpo.4-amd64", + ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", + DockerVersion: "1.5.0", + } + mockCadvisor.On("VersionInfo").Return(versionInfo, nil) + mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ + Usage: 400 * mb, + Capacity: 1000 * mb, + Available: 600 * mb, + }, nil) + mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ + Usage: 9 * mb, + Capacity: 10 * mb, + }, nil) + + done := make(chan struct{}) + go func() { + kubelet.registerWithApiserver() + done <- struct{}{} + }() + select { + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("timed out waiting for registration") + case <-done: + return + } +} + +func TestMakePortMappings(t *testing.T) { + port := func(name string, protocol api.Protocol, containerPort, hostPort int32, ip string) api.ContainerPort { + return api.ContainerPort{ + Name: name, + Protocol: protocol, + ContainerPort: containerPort, + HostPort: hostPort, + HostIP: ip, + } + } + portMapping := func(name string, protocol api.Protocol, containerPort, hostPort int, ip string) kubecontainer.PortMapping { + return kubecontainer.PortMapping{ + Name: name, + Protocol: protocol, + ContainerPort: containerPort, + HostPort: hostPort, + HostIP: ip, + } + } + + tests := []struct { + container *api.Container + expectedPortMappings []kubecontainer.PortMapping + }{ + { + &api.Container{ + Name: "fooContainer", + Ports: []api.ContainerPort{ + port("", api.ProtocolTCP, 80, 8080, "127.0.0.1"), + port("", api.ProtocolTCP, 443, 4343, "192.168.0.1"), + port("foo", api.ProtocolUDP, 555, 5555, ""), + // Duplicated, should be ignored. + port("foo", api.ProtocolUDP, 888, 8888, ""), + // Duplicated, should be ignored. + port("", api.ProtocolTCP, 80, 8888, ""), + }, + }, + []kubecontainer.PortMapping{ + portMapping("fooContainer-TCP:80", api.ProtocolTCP, 80, 8080, "127.0.0.1"), + portMapping("fooContainer-TCP:443", api.ProtocolTCP, 443, 4343, "192.168.0.1"), + portMapping("fooContainer-foo", api.ProtocolUDP, 555, 5555, ""), + }, + }, + } + + for i, tt := range tests { + actual := makePortMappings(tt.container) + if !reflect.DeepEqual(tt.expectedPortMappings, actual) { + t.Errorf("%d: Expected: %#v, saw: %#v", i, tt.expectedPortMappings, actual) + } + } +} + +func TestIsPodPastActiveDeadline(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + pods := newTestPods(5) + + exceededActiveDeadlineSeconds := int64(30) + notYetActiveDeadlineSeconds := int64(120) + now := unversioned.Now() + startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute)) + pods[0].Status.StartTime = &startTime + pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds + pods[1].Status.StartTime = &startTime + pods[1].Spec.ActiveDeadlineSeconds = ¬YetActiveDeadlineSeconds + tests := []struct { + pod *api.Pod + expected bool + }{{pods[0], true}, {pods[1], false}, {pods[2], false}, {pods[3], false}, {pods[4], false}} + + kubelet.podManager.SetPods(pods) + for i, tt := range tests { + actual := kubelet.pastActiveDeadline(tt.pod) + if actual != tt.expected { + t.Errorf("[%d] expected %#v, got %#v", i, tt.expected, actual) + } + } +} + +func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) { + testKubelet := newTestKubelet(t) + fakeRuntime := testKubelet.fakeRuntime + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + kubelet := testKubelet.kubelet + + now := unversioned.Now() + startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute)) + exceededActiveDeadlineSeconds := int64(30) + + pods := []*api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "bar", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "foo"}, + }, + ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds, + }, + Status: api.PodStatus{ + StartTime: &startTime, + }, + }, + } + + fakeRuntime.PodList = []*kubecontainer.Pod{ + { + ID: "12345678", + Name: "bar", + Namespace: "new", + Containers: []*kubecontainer.Container{ + {Name: "foo"}, + }, + }, + } + + // Let the pod worker sets the status to fail after this sync. + kubelet.HandlePodUpdates(pods) + status, found := kubelet.statusManager.GetPodStatus(pods[0].UID) + if !found { + t.Errorf("expected to found status for pod %q", pods[0].UID) + } + if status.Phase != api.PodFailed { + t.Fatalf("expected pod status %q, ot %q.", api.PodFailed, status.Phase) + } +} + +func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) { + testKubelet := newTestKubelet(t) + fakeRuntime := testKubelet.fakeRuntime + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + kubelet := testKubelet.kubelet + + now := unversioned.Now() + startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute)) + exceededActiveDeadlineSeconds := int64(300) + + pods := []*api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "bar", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "foo"}, + }, + ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds, + }, + Status: api.PodStatus{ + StartTime: &startTime, + }, + }, + } + + fakeRuntime.PodList = []*kubecontainer.Pod{ + { + ID: "12345678", + Name: "bar", + Namespace: "new", + Containers: []*kubecontainer.Container{ + {Name: "foo"}, + }, + }, + } + + kubelet.podManager.SetPods(pods) + kubelet.HandlePodUpdates(pods) + status, found := kubelet.statusManager.GetPodStatus(pods[0].UID) + if !found { + t.Errorf("expected to found status for pod %q", pods[0].UID) + } + if status.Phase == api.PodFailed { + t.Fatalf("expected pod status to not be %q", status.Phase) + } +} + +func podWithUidNameNs(uid types.UID, name, namespace string) *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: uid, + Name: name, + Namespace: namespace, + Annotations: map[string]string{}, + }, + } +} + +func podWithUidNameNsSpec(uid types.UID, name, namespace string, spec api.PodSpec) *api.Pod { + pod := podWithUidNameNs(uid, name, namespace) + pod.Spec = spec + return pod +} + +func TestDeletePodDirsForDeletedPods(t *testing.T) { + testKubelet := newTestKubelet(t) + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + kl := testKubelet.kubelet + pods := []*api.Pod{ + podWithUidNameNs("12345678", "pod1", "ns"), + podWithUidNameNs("12345679", "pod2", "ns"), + } + + kl.podManager.SetPods(pods) + // Sync to create pod directories. + kl.HandlePodSyncs(kl.podManager.GetPods()) + for i := range pods { + if !dirExists(kl.getPodDir(pods[i].UID)) { + t.Errorf("expected directory to exist for pod %d", i) + } + } + + // Pod 1 has been deleted and no longer exists. + kl.podManager.SetPods([]*api.Pod{pods[0]}) + kl.HandlePodCleanups() + if !dirExists(kl.getPodDir(pods[0].UID)) { + t.Errorf("expected directory to exist for pod 0") + } + if dirExists(kl.getPodDir(pods[1].UID)) { + t.Errorf("expected directory to be deleted for pod 1") + } +} + +func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*api.Pod, podsToCheck []*api.Pod, shouldExist bool) { + kl := testKubelet.kubelet + + kl.podManager.SetPods(pods) + kl.HandlePodSyncs(pods) + kl.HandlePodCleanups() + for i, pod := range podsToCheck { + exist := dirExists(kl.getPodDir(pod.UID)) + if shouldExist && !exist { + t.Errorf("expected directory to exist for pod %d", i) + } else if !shouldExist && exist { + t.Errorf("expected directory to be removed for pod %d", i) + } + } +} + +func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) { + testKubelet := newTestKubelet(t) + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + kl := testKubelet.kubelet + pods := []*api.Pod{ + podWithUidNameNs("12345678", "pod1", "ns"), + podWithUidNameNs("12345679", "pod2", "ns"), + podWithUidNameNs("12345680", "pod3", "ns"), + } + + syncAndVerifyPodDir(t, testKubelet, pods, pods, true) + // Pod 1 failed, and pod 2 succeeded. None of the pod directories should be + // deleted. + kl.statusManager.SetPodStatus(pods[1], api.PodStatus{Phase: api.PodFailed}) + kl.statusManager.SetPodStatus(pods[2], api.PodStatus{Phase: api.PodSucceeded}) + syncAndVerifyPodDir(t, testKubelet, pods, pods, true) +} + +func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) { + testKubelet := newTestKubelet(t) + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + runningPod := &kubecontainer.Pod{ + ID: "12345678", + Name: "pod1", + Namespace: "ns", + } + apiPod := podWithUidNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace) + + // Sync once to create pod directory; confirm that the pod directory has + // already been created. + pods := []*api.Pod{apiPod} + syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true) + + // Pretend the pod is deleted from apiserver, but is still active on the node. + // The pod directory should not be removed. + pods = []*api.Pod{} + testKubelet.fakeRuntime.PodList = []*kubecontainer.Pod{runningPod} + syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true) + + // The pod is deleted and also not active on the node. The pod directory + // should be removed. + pods = []*api.Pod{} + testKubelet.fakeRuntime.PodList = []*kubecontainer.Pod{} + syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, false) +} + +func TestCleanupBandwidthLimits(t *testing.T) { + testPod := func(name, ingress string) *api.Pod { + pod := podWithUidNameNs("", name, "") + + if len(ingress) != 0 { + pod.Annotations["kubernetes.io/ingress-bandwidth"] = ingress + } + + return pod + } + + // TODO(random-liu): We removed the test case for pod status not cached here. We should add a higher + // layer status getter function and test that function instead. + tests := []struct { + status *api.PodStatus + pods []*api.Pod + inputCIDRs []string + expectResetCIDRs []string + name string + }{ + { + status: &api.PodStatus{ + PodIP: "1.2.3.4", + Phase: api.PodRunning, + }, + pods: []*api.Pod{ + testPod("foo", "10M"), + testPod("bar", ""), + }, + inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"}, + expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"}, + name: "pod running", + }, + { + status: &api.PodStatus{ + PodIP: "1.2.3.4", + Phase: api.PodFailed, + }, + pods: []*api.Pod{ + testPod("foo", "10M"), + testPod("bar", ""), + }, + inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"}, + expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"}, + name: "pod not running", + }, + { + status: &api.PodStatus{ + PodIP: "1.2.3.4", + Phase: api.PodFailed, + }, + pods: []*api.Pod{ + testPod("foo", ""), + testPod("bar", ""), + }, + inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"}, + expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"}, + name: "no bandwidth limits", + }, + } + for _, test := range tests { + shaper := &bandwidth.FakeShaper{ + CIDRs: test.inputCIDRs, + } + + testKube := newTestKubelet(t) + testKube.kubelet.shaper = shaper + + for _, pod := range test.pods { + testKube.kubelet.statusManager.SetPodStatus(pod, *test.status) + } + + err := testKube.kubelet.cleanupBandwidthLimits(test.pods) + if err != nil { + t.Errorf("unexpected error: %v (%s)", test.name, err) + } + if !reflect.DeepEqual(shaper.ResetCIDRs, test.expectResetCIDRs) { + t.Errorf("[%s]\nexpected: %v, saw: %v", test.name, test.expectResetCIDRs, shaper.ResetCIDRs) + } + } +} + +func TestExtractBandwidthResources(t *testing.T) { + four, _ := resource.ParseQuantity("4M") + ten, _ := resource.ParseQuantity("10M") + twenty, _ := resource.ParseQuantity("20M") + + testPod := func(ingress, egress string) *api.Pod { + pod := &api.Pod{ObjectMeta: api.ObjectMeta{Annotations: map[string]string{}}} + if len(ingress) != 0 { + pod.Annotations["kubernetes.io/ingress-bandwidth"] = ingress + } + if len(egress) != 0 { + pod.Annotations["kubernetes.io/egress-bandwidth"] = egress + } + return pod + } + + tests := []struct { + pod *api.Pod + expectedIngress *resource.Quantity + expectedEgress *resource.Quantity + expectError bool + }{ + { + pod: &api.Pod{}, + }, + { + pod: testPod("10M", ""), + expectedIngress: &ten, + }, + { + pod: testPod("", "10M"), + expectedEgress: &ten, + }, + { + pod: testPod("4M", "20M"), + expectedIngress: &four, + expectedEgress: &twenty, + }, + { + pod: testPod("foo", ""), + expectError: true, + }, + } + for _, test := range tests { + ingress, egress, err := bandwidth.ExtractPodBandwidthResources(test.pod.Annotations) + if test.expectError { + if err == nil { + t.Errorf("unexpected non-error") + } + continue + } + if err != nil { + t.Errorf("unexpected error: %v", err) + continue + } + if !reflect.DeepEqual(ingress, test.expectedIngress) { + t.Errorf("expected: %v, saw: %v", ingress, test.expectedIngress) + } + if !reflect.DeepEqual(egress, test.expectedEgress) { + t.Errorf("expected: %v, saw: %v", egress, test.expectedEgress) + } + } +} + +func TestGetPodsToSync(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + clock := testKubelet.fakeClock + pods := newTestPods(5) + + exceededActiveDeadlineSeconds := int64(30) + notYetActiveDeadlineSeconds := int64(120) + startTime := unversioned.NewTime(clock.Now()) + pods[0].Status.StartTime = &startTime + pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds + pods[1].Status.StartTime = &startTime + pods[1].Spec.ActiveDeadlineSeconds = ¬YetActiveDeadlineSeconds + pods[2].Status.StartTime = &startTime + pods[2].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds + + kubelet.podManager.SetPods(pods) + kubelet.workQueue.Enqueue(pods[2].UID, 0) + kubelet.workQueue.Enqueue(pods[3].UID, 30*time.Second) + kubelet.workQueue.Enqueue(pods[4].UID, 2*time.Minute) + + clock.Step(1 * time.Minute) + + expectedPods := []*api.Pod{pods[0], pods[2], pods[3]} + + podsToSync := kubelet.getPodsToSync() + + if len(podsToSync) == len(expectedPods) { + for _, expect := range expectedPods { + var found bool + for _, got := range podsToSync { + if expect.UID == got.UID { + found = true + break + } + } + if !found { + t.Errorf("expected pod not found: %+v", expect) + } + } + } else { + t.Errorf("expected %d pods to sync, got %d", len(expectedPods), len(podsToSync)) + } +} + +func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + numContainers := 10 + expectedOrder := []string{} + cStatuses := []*kubecontainer.ContainerStatus{} + specContainerList := []api.Container{} + for i := 0; i < numContainers; i++ { + id := fmt.Sprintf("%v", i) + containerName := fmt.Sprintf("%vcontainer", id) + expectedOrder = append(expectedOrder, containerName) + cStatus := &kubecontainer.ContainerStatus{ + ID: kubecontainer.BuildContainerID("test", id), + Name: containerName, + } + // Rearrange container statuses + if i%2 == 0 { + cStatuses = append(cStatuses, cStatus) + } else { + cStatuses = append([]*kubecontainer.ContainerStatus{cStatus}, cStatuses...) + } + specContainerList = append(specContainerList, api.Container{Name: containerName}) + } + pod := podWithUidNameNs("uid1", "foo", "test") + pod.Spec = api.PodSpec{ + Containers: specContainerList, + } + + status := &kubecontainer.PodStatus{ + ID: pod.UID, + Name: pod.Name, + Namespace: pod.Namespace, + ContainerStatuses: cStatuses, + } + for i := 0; i < 5; i++ { + apiStatus := kubelet.generateAPIPodStatus(pod, status) + for i, c := range apiStatus.ContainerStatuses { + if expectedOrder[i] != c.Name { + t.Fatalf("Container status not sorted, expected %v at index %d, but found %v", expectedOrder[i], i, c.Name) + } + } + } +} + +func verifyContainerStatuses(statuses []api.ContainerStatus, state, lastTerminationState map[string]api.ContainerState) error { + for _, s := range statuses { + if !reflect.DeepEqual(s.State, state[s.Name]) { + return fmt.Errorf("unexpected state: %s", diff.ObjectDiff(state[s.Name], s.State)) + } + if !reflect.DeepEqual(s.LastTerminationState, lastTerminationState[s.Name]) { + return fmt.Errorf("unexpected last termination state %s", diff.ObjectDiff( + lastTerminationState[s.Name], s.LastTerminationState)) + } + } + return nil +} + +// Test generateAPIPodStatus with different reason cache and old api pod status. +func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) { + // The following waiting reason and message are generated in convertStatusToAPIStatus() + startWaitingReason := "ContainerCreating" + initWaitingReason := "PodInitializing" + testTimestamp := time.Unix(123456789, 987654321) + testErrorReason := fmt.Errorf("test-error") + emptyContainerID := (&kubecontainer.ContainerID{}).String() + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + pod := podWithUidNameNs("12345678", "foo", "new") + pod.Spec = api.PodSpec{RestartPolicy: api.RestartPolicyOnFailure} + + podStatus := &kubecontainer.PodStatus{ + ID: pod.UID, + Name: pod.Name, + Namespace: pod.Namespace, + } + tests := []struct { + containers []api.Container + statuses []*kubecontainer.ContainerStatus + reasons map[string]error + oldStatuses []api.ContainerStatus + expectedState map[string]api.ContainerState + // Only set expectedInitState when it is different from expectedState + expectedInitState map[string]api.ContainerState + expectedLastTerminationState map[string]api.ContainerState + }{ + // For container with no historical record, State should be Waiting, LastTerminationState should be retrieved from + // old status from apiserver. + { + containers: []api.Container{{Name: "without-old-record"}, {Name: "with-old-record"}}, + statuses: []*kubecontainer.ContainerStatus{}, + reasons: map[string]error{}, + oldStatuses: []api.ContainerStatus{{ + Name: "with-old-record", + LastTerminationState: api.ContainerState{Terminated: &api.ContainerStateTerminated{}}, + }}, + expectedState: map[string]api.ContainerState{ + "without-old-record": {Waiting: &api.ContainerStateWaiting{ + Reason: startWaitingReason, + }}, + "with-old-record": {Waiting: &api.ContainerStateWaiting{ + Reason: startWaitingReason, + }}, + }, + expectedInitState: map[string]api.ContainerState{ + "without-old-record": {Waiting: &api.ContainerStateWaiting{ + Reason: initWaitingReason, + }}, + "with-old-record": {Waiting: &api.ContainerStateWaiting{ + Reason: initWaitingReason, + }}, + }, + expectedLastTerminationState: map[string]api.ContainerState{ + "with-old-record": {Terminated: &api.ContainerStateTerminated{}}, + }, + }, + // For running container, State should be Running, LastTerminationState should be retrieved from latest terminated status. + { + containers: []api.Container{{Name: "running"}}, + statuses: []*kubecontainer.ContainerStatus{ + { + Name: "running", + State: kubecontainer.ContainerStateRunning, + StartedAt: testTimestamp, + }, + { + Name: "running", + State: kubecontainer.ContainerStateExited, + ExitCode: 1, + }, + }, + reasons: map[string]error{}, + oldStatuses: []api.ContainerStatus{}, + expectedState: map[string]api.ContainerState{ + "running": {Running: &api.ContainerStateRunning{ + StartedAt: unversioned.NewTime(testTimestamp), + }}, + }, + expectedLastTerminationState: map[string]api.ContainerState{ + "running": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 1, + ContainerID: emptyContainerID, + }}, + }, + }, + // For terminated container: + // * If there is no recent start error record, State should be Terminated, LastTerminationState should be retrieved from + // second latest terminated status; + // * If there is recent start error record, State should be Waiting, LastTerminationState should be retrieved from latest + // terminated status; + // * If ExitCode = 0, restart policy is RestartPolicyOnFailure, the container shouldn't be restarted. No matter there is + // recent start error or not, State should be Terminated, LastTerminationState should be retrieved from second latest + // terminated status. + { + containers: []api.Container{{Name: "without-reason"}, {Name: "with-reason"}}, + statuses: []*kubecontainer.ContainerStatus{ + { + Name: "without-reason", + State: kubecontainer.ContainerStateExited, + ExitCode: 1, + }, + { + Name: "with-reason", + State: kubecontainer.ContainerStateExited, + ExitCode: 2, + }, + { + Name: "without-reason", + State: kubecontainer.ContainerStateExited, + ExitCode: 3, + }, + { + Name: "with-reason", + State: kubecontainer.ContainerStateExited, + ExitCode: 4, + }, + { + Name: "succeed", + State: kubecontainer.ContainerStateExited, + ExitCode: 0, + }, + { + Name: "succeed", + State: kubecontainer.ContainerStateExited, + ExitCode: 5, + }, + }, + reasons: map[string]error{"with-reason": testErrorReason, "succeed": testErrorReason}, + oldStatuses: []api.ContainerStatus{}, + expectedState: map[string]api.ContainerState{ + "without-reason": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 1, + ContainerID: emptyContainerID, + }}, + "with-reason": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}}, + "succeed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 0, + ContainerID: emptyContainerID, + }}, + }, + expectedLastTerminationState: map[string]api.ContainerState{ + "without-reason": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 3, + ContainerID: emptyContainerID, + }}, + "with-reason": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 2, + ContainerID: emptyContainerID, + }}, + "succeed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 5, + ContainerID: emptyContainerID, + }}, + }, + }, + } + + for i, test := range tests { + kubelet.reasonCache = NewReasonCache() + for n, e := range test.reasons { + kubelet.reasonCache.add(pod.UID, n, e, "") + } + pod.Spec.Containers = test.containers + pod.Status.ContainerStatuses = test.oldStatuses + podStatus.ContainerStatuses = test.statuses + apiStatus := kubelet.generateAPIPodStatus(pod, podStatus) + assert.NoError(t, verifyContainerStatuses(apiStatus.ContainerStatuses, test.expectedState, test.expectedLastTerminationState), "case %d", i) + } + + // Everything should be the same for init containers + for i, test := range tests { + kubelet.reasonCache = NewReasonCache() + for n, e := range test.reasons { + kubelet.reasonCache.add(pod.UID, n, e, "") + } + pod.Spec.InitContainers = test.containers + pod.Status.InitContainerStatuses = test.oldStatuses + podStatus.ContainerStatuses = test.statuses + apiStatus := kubelet.generateAPIPodStatus(pod, podStatus) + expectedState := test.expectedState + if test.expectedInitState != nil { + expectedState = test.expectedInitState + } + assert.NoError(t, verifyContainerStatuses(apiStatus.InitContainerStatuses, expectedState, test.expectedLastTerminationState), "case %d", i) + } +} + +// Test generateAPIPodStatus with different restart policies. +func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) { + testErrorReason := fmt.Errorf("test-error") + emptyContainerID := (&kubecontainer.ContainerID{}).String() + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + pod := podWithUidNameNs("12345678", "foo", "new") + containers := []api.Container{{Name: "succeed"}, {Name: "failed"}} + podStatus := &kubecontainer.PodStatus{ + ID: pod.UID, + Name: pod.Name, + Namespace: pod.Namespace, + ContainerStatuses: []*kubecontainer.ContainerStatus{ + { + Name: "succeed", + State: kubecontainer.ContainerStateExited, + ExitCode: 0, + }, + { + Name: "failed", + State: kubecontainer.ContainerStateExited, + ExitCode: 1, + }, + { + Name: "succeed", + State: kubecontainer.ContainerStateExited, + ExitCode: 2, + }, + { + Name: "failed", + State: kubecontainer.ContainerStateExited, + ExitCode: 3, + }, + }, + } + kubelet.reasonCache.add(pod.UID, "succeed", testErrorReason, "") + kubelet.reasonCache.add(pod.UID, "failed", testErrorReason, "") + for c, test := range []struct { + restartPolicy api.RestartPolicy + expectedState map[string]api.ContainerState + expectedLastTerminationState map[string]api.ContainerState + // Only set expectedInitState when it is different from expectedState + expectedInitState map[string]api.ContainerState + // Only set expectedInitLastTerminationState when it is different from expectedLastTerminationState + expectedInitLastTerminationState map[string]api.ContainerState + }{ + { + restartPolicy: api.RestartPolicyNever, + expectedState: map[string]api.ContainerState{ + "succeed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 0, + ContainerID: emptyContainerID, + }}, + "failed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 1, + ContainerID: emptyContainerID, + }}, + }, + expectedLastTerminationState: map[string]api.ContainerState{ + "succeed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 2, + ContainerID: emptyContainerID, + }}, + "failed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 3, + ContainerID: emptyContainerID, + }}, + }, + }, + { + restartPolicy: api.RestartPolicyOnFailure, + expectedState: map[string]api.ContainerState{ + "succeed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 0, + ContainerID: emptyContainerID, + }}, + "failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}}, + }, + expectedLastTerminationState: map[string]api.ContainerState{ + "succeed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 2, + ContainerID: emptyContainerID, + }}, + "failed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 1, + ContainerID: emptyContainerID, + }}, + }, + }, + { + restartPolicy: api.RestartPolicyAlways, + expectedState: map[string]api.ContainerState{ + "succeed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}}, + "failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}}, + }, + expectedLastTerminationState: map[string]api.ContainerState{ + "succeed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 0, + ContainerID: emptyContainerID, + }}, + "failed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 1, + ContainerID: emptyContainerID, + }}, + }, + // If the init container is terminated with exit code 0, it won't be restarted even when the + // restart policy is RestartAlways. + expectedInitState: map[string]api.ContainerState{ + "succeed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 0, + ContainerID: emptyContainerID, + }}, + "failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}}, + }, + expectedInitLastTerminationState: map[string]api.ContainerState{ + "succeed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 2, + ContainerID: emptyContainerID, + }}, + "failed": {Terminated: &api.ContainerStateTerminated{ + ExitCode: 1, + ContainerID: emptyContainerID, + }}, + }, + }, + } { + pod.Spec.RestartPolicy = test.restartPolicy + // Test normal containers + pod.Spec.Containers = containers + apiStatus := kubelet.generateAPIPodStatus(pod, podStatus) + expectedState, expectedLastTerminationState := test.expectedState, test.expectedLastTerminationState + assert.NoError(t, verifyContainerStatuses(apiStatus.ContainerStatuses, expectedState, expectedLastTerminationState), "case %d", c) + pod.Spec.Containers = nil + + // Test init containers + pod.Spec.InitContainers = containers + apiStatus = kubelet.generateAPIPodStatus(pod, podStatus) + if test.expectedInitState != nil { + expectedState = test.expectedInitState + } + if test.expectedInitLastTerminationState != nil { + expectedLastTerminationState = test.expectedInitLastTerminationState + } + assert.NoError(t, verifyContainerStatuses(apiStatus.InitContainerStatuses, expectedState, expectedLastTerminationState), "case %d", c) + pod.Spec.InitContainers = nil + } +} + +// testPodAdmitHandler is a lifecycle.PodAdmitHandler for testing. +type testPodAdmitHandler struct { + // list of pods to reject. + podsToReject []*api.Pod +} + +// Admit rejects all pods in the podsToReject list with a matching UID. +func (a *testPodAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult { + for _, podToReject := range a.podsToReject { + if podToReject.UID == attrs.Pod.UID { + return lifecycle.PodAdmitResult{Admit: false, Reason: "Rejected", Message: "Pod is rejected"} + } + } + return lifecycle.PodAdmitResult{Admit: true} +} + +// Test verifies that the kubelet invokes an admission handler during HandlePodAdditions. +func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) { + testKubelet := newTestKubelet(t) + kl := testKubelet.kubelet + kl.nodeLister = testNodeLister{nodes: []api.Node{ + { + ObjectMeta: api.ObjectMeta{Name: kl.nodeName}, + Status: api.NodeStatus{ + Allocatable: api.ResourceList{ + api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + }, + }, + }, + }} + kl.nodeInfo = testNodeInfo{nodes: []api.Node{ + { + ObjectMeta: api.ObjectMeta{Name: kl.nodeName}, + Status: api.NodeStatus{ + Allocatable: api.ResourceList{ + api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + }, + }, + }, + }} + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) + + pods := []*api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + UID: "123456789", + Name: "podA", + Namespace: "foo", + }, + }, + { + ObjectMeta: api.ObjectMeta{ + UID: "987654321", + Name: "podB", + Namespace: "foo", + }, + }, + } + podToReject := pods[0] + podToAdmit := pods[1] + podsToReject := []*api.Pod{podToReject} + + kl.AddPodAdmitHandler(&testPodAdmitHandler{podsToReject: podsToReject}) + + kl.HandlePodAdditions(pods) + // Check pod status stored in the status map. + // podToReject should be Failed + status, found := kl.statusManager.GetPodStatus(podToReject.UID) + if !found { + t.Fatalf("status of pod %q is not found in the status map", podToReject.UID) + } + if status.Phase != api.PodFailed { + t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase) + } + // podToAdmit should be Pending + status, found = kl.statusManager.GetPodStatus(podToAdmit.UID) + if !found { + t.Fatalf("status of pod %q is not found in the status map", podToAdmit.UID) + } + if status.Phase != api.PodPending { + t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase) + } +} + +// testPodSyncLoopHandler is a lifecycle.PodSyncLoopHandler that is used for testing. +type testPodSyncLoopHandler struct { + // list of pods to sync + podsToSync []*api.Pod +} + +// ShouldSync evaluates if the pod should be synced from the kubelet. +func (a *testPodSyncLoopHandler) ShouldSync(pod *api.Pod) bool { + for _, podToSync := range a.podsToSync { + if podToSync.UID == pod.UID { + return true + } + } + return false +} + +// TestGetPodsToSyncInvokesPodSyncLoopHandlers ensures that the get pods to sync routine invokes the handler. +func TestGetPodsToSyncInvokesPodSyncLoopHandlers(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + pods := newTestPods(5) + podUIDs := []types.UID{} + for _, pod := range pods { + podUIDs = append(podUIDs, pod.UID) + } + podsToSync := []*api.Pod{pods[0]} + kubelet.AddPodSyncLoopHandler(&testPodSyncLoopHandler{podsToSync}) + + kubelet.podManager.SetPods(pods) + + expectedPodsUID := []types.UID{pods[0].UID} + + podsToSync = kubelet.getPodsToSync() + + if len(podsToSync) == len(expectedPodsUID) { + var rightNum int + for _, podUID := range expectedPodsUID { + for _, podToSync := range podsToSync { + if podToSync.UID == podUID { + rightNum++ + break + } + } + } + if rightNum != len(expectedPodsUID) { + // Just for report error + podsToSyncUID := []types.UID{} + for _, podToSync := range podsToSync { + podsToSyncUID = append(podsToSyncUID, podToSync.UID) + } + t.Errorf("expected pods %v to sync, got %v", expectedPodsUID, podsToSyncUID) + } + + } else { + t.Errorf("expected %d pods to sync, got %d", 3, len(podsToSync)) + } +} + +// testPodSyncHandler is a lifecycle.PodSyncHandler that is used for testing. +type testPodSyncHandler struct { + // list of pods to evict. + podsToEvict []*api.Pod + // the reason for the eviction + reason string + // the mesage for the eviction + message string +} + +// ShouldEvict evaluates if the pod should be evicted from the kubelet. +func (a *testPodSyncHandler) ShouldEvict(pod *api.Pod) lifecycle.ShouldEvictResponse { + for _, podToEvict := range a.podsToEvict { + if podToEvict.UID == pod.UID { + return lifecycle.ShouldEvictResponse{Evict: true, Reason: a.reason, Message: a.message} + } + } + return lifecycle.ShouldEvictResponse{Evict: false} +} + +// TestGenerateAPIPodStatusInvokesPodSyncHandlers invokes the handlers and reports the proper status +func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) { + testKubelet := newTestKubelet(t) + kubelet := testKubelet.kubelet + pod := newTestPods(1)[0] + podsToEvict := []*api.Pod{pod} + kubelet.AddPodSyncHandler(&testPodSyncHandler{podsToEvict, "Evicted", "because"}) + status := &kubecontainer.PodStatus{ + ID: pod.UID, + Name: pod.Name, + Namespace: pod.Namespace, + } + apiStatus := kubelet.generateAPIPodStatus(pod, status) + if apiStatus.Phase != api.PodFailed { + t.Fatalf("Expected phase %v, but got %v", api.PodFailed, apiStatus.Phase) + } + if apiStatus.Reason != "Evicted" { + t.Fatalf("Expected reason %v, but got %v", "Evicted", apiStatus.Reason) + } + if apiStatus.Message != "because" { + t.Fatalf("Expected message %v, but got %v", "because", apiStatus.Message) + } +} + +func TestSyncPodKillPod(t *testing.T) { + testKubelet := newTestKubelet(t) + kl := testKubelet.kubelet + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "bar", + Namespace: "foo", + }, + } + pods := []*api.Pod{pod} + kl.podManager.SetPods(pods) + gracePeriodOverride := int64(0) + err := kl.syncPod(syncPodOptions{ + pod: pod, + podStatus: &kubecontainer.PodStatus{}, + updateType: kubetypes.SyncPodKill, + killPodOptions: &KillPodOptions{ + PodStatusFunc: func(p *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus { + return api.PodStatus{ + Phase: api.PodFailed, + Reason: "reason", + Message: "message", + } + }, + PodTerminationGracePeriodSecondsOverride: &gracePeriodOverride, + }, + }) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + // Check pod status stored in the status map. + status, found := kl.statusManager.GetPodStatus(pod.UID) + if !found { + t.Fatalf("status of pod %q is not found in the status map", pod.UID) + } + if status.Phase != api.PodFailed { + t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/leaky/leaky.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/leaky/leaky.go new file mode 100644 index 000000000000..dd4e6efb0345 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/leaky/leaky.go @@ -0,0 +1,25 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package leaky holds bits of kubelet that should be internal but have leaked +// out through bad abstractions. TODO: delete all of this. +package leaky + +const ( + // This is used in a few places outside of Kubelet, such as indexing + // into the container info. + PodInfraContainerName = "POD" +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/doc.go new file mode 100644 index 000000000000..f398ca060cce --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Handlers for pod lifecycle events and interfaces to integrate +// with kubelet admission, synchronization, and eviction of pods. +package lifecycle diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/fake_handler_runner.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/fake_handler_runner.go new file mode 100644 index 000000000000..501fb79caa2e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/fake_handler_runner.go @@ -0,0 +1,62 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifecycle + +import ( + "fmt" + "sync" + + "k8s.io/kubernetes/pkg/api" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/util/format" +) + +type FakeHandlerRunner struct { + sync.Mutex + HandlerRuns []string + Err error +} + +func NewFakeHandlerRunner() *FakeHandlerRunner { + return &FakeHandlerRunner{HandlerRuns: []string{}} +} + +func (hr *FakeHandlerRunner) Run(containerID kubecontainer.ContainerID, pod *api.Pod, container *api.Container, handler *api.Handler) (string, error) { + hr.Lock() + defer hr.Unlock() + + if hr.Err != nil { + return "", hr.Err + } + + switch { + case handler.Exec != nil: + hr.HandlerRuns = append(hr.HandlerRuns, fmt.Sprintf("exec on pod: %v, container: %v: %v", format.Pod(pod), container.Name, containerID.String())) + case handler.HTTPGet != nil: + hr.HandlerRuns = append(hr.HandlerRuns, fmt.Sprintf("http-get on pod: %v, container: %v: %v", format.Pod(pod), container.Name, containerID.String())) + case handler.TCPSocket != nil: + hr.HandlerRuns = append(hr.HandlerRuns, fmt.Sprintf("tcp-socket on pod: %v, container: %v: %v", format.Pod(pod), container.Name, containerID.String())) + default: + return "", fmt.Errorf("Invalid handler: %v", handler) + } + return "", nil +} + +func (hr *FakeHandlerRunner) Reset() { + hr.HandlerRuns = []string{} + hr.Err = nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go new file mode 100644 index 000000000000..011a264ad000 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go @@ -0,0 +1,144 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifecycle + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "net/http" + "strconv" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/kubelet/util/ioutils" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/intstr" +) + +type HandlerRunner struct { + httpGetter kubetypes.HttpGetter + commandRunner kubecontainer.ContainerCommandRunner + containerManager podStatusProvider +} + +type podStatusProvider interface { + GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) +} + +func NewHandlerRunner(httpGetter kubetypes.HttpGetter, commandRunner kubecontainer.ContainerCommandRunner, containerManager podStatusProvider) kubecontainer.HandlerRunner { + return &HandlerRunner{ + httpGetter: httpGetter, + commandRunner: commandRunner, + containerManager: containerManager, + } +} + +func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *api.Pod, container *api.Container, handler *api.Handler) (string, error) { + switch { + case handler.Exec != nil: + var ( + buffer bytes.Buffer + msg string + ) + output := ioutils.WriteCloserWrapper(&buffer) + err := hr.commandRunner.ExecInContainer(containerID, handler.Exec.Command, nil, output, output, false) + if err != nil { + msg := fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - %q", handler.Exec.Command, container.Name, format.Pod(pod), buffer.String()) + glog.V(1).Infof(msg) + } + return msg, err + case handler.HTTPGet != nil: + msg, err := hr.runHTTPHandler(pod, container, handler) + if err != nil { + msg := fmt.Sprintf("Http lifecycle hook (%s) for Container %q in Pod %q failed - %q", handler.HTTPGet.Path, container.Name, format.Pod(pod), msg) + glog.V(1).Infof(msg) + } + return msg, err + default: + err := fmt.Errorf("Invalid handler: %v", handler) + msg := fmt.Sprintf("Cannot run handler: %v", err) + glog.Errorf(msg) + return msg, err + } +} + +// resolvePort attempts to turn a IntOrString port reference into a concrete port number. +// If portReference has an int value, it is treated as a literal, and simply returns that value. +// If portReference is a string, an attempt is first made to parse it as an integer. If that fails, +// an attempt is made to find a port with the same name in the container spec. +// If a port with the same name is found, it's ContainerPort value is returned. If no matching +// port is found, an error is returned. +func resolvePort(portReference intstr.IntOrString, container *api.Container) (int, error) { + if portReference.Type == intstr.Int { + return portReference.IntValue(), nil + } + portName := portReference.StrVal + port, err := strconv.Atoi(portName) + if err == nil { + return port, nil + } + for _, portSpec := range container.Ports { + if portSpec.Name == portName { + return int(portSpec.ContainerPort), nil + } + } + return -1, fmt.Errorf("couldn't find port: %v in %v", portReference, container) +} + +func (hr *HandlerRunner) runHTTPHandler(pod *api.Pod, container *api.Container, handler *api.Handler) (string, error) { + host := handler.HTTPGet.Host + if len(host) == 0 { + status, err := hr.containerManager.GetPodStatus(pod.UID, pod.Name, pod.Namespace) + if err != nil { + glog.Errorf("Unable to get pod info, event handlers may be invalid.") + return "", err + } + if status.IP == "" { + return "", fmt.Errorf("failed to find networking container: %v", status) + } + host = status.IP + } + var port int + if handler.HTTPGet.Port.Type == intstr.String && len(handler.HTTPGet.Port.StrVal) == 0 { + port = 80 + } else { + var err error + port, err = resolvePort(handler.HTTPGet.Port, container) + if err != nil { + return "", err + } + } + url := fmt.Sprintf("http://%s/%s", net.JoinHostPort(host, strconv.Itoa(port)), handler.HTTPGet.Path) + resp, err := hr.httpGetter.Get(url) + return getHttpRespBody(resp), err +} + +func getHttpRespBody(resp *http.Response) string { + if resp == nil { + return "" + } + defer resp.Body.Close() + if bytes, err := ioutil.ReadAll(resp.Body); err == nil { + return string(bytes) + } + return "" +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers_test.go new file mode 100644 index 000000000000..3fc2389ee8d5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers_test.go @@ -0,0 +1,227 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifecycle + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/util/intstr" +) + +func TestResolvePortInt(t *testing.T) { + expected := 80 + port, err := resolvePort(intstr.FromInt(expected), &api.Container{}) + if port != expected { + t.Errorf("expected: %d, saw: %d", expected, port) + } + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestResolvePortString(t *testing.T) { + expected := 80 + name := "foo" + container := &api.Container{ + Ports: []api.ContainerPort{ + {Name: name, ContainerPort: int32(expected)}, + }, + } + port, err := resolvePort(intstr.FromString(name), container) + if port != expected { + t.Errorf("expected: %d, saw: %d", expected, port) + } + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func TestResolvePortStringUnknown(t *testing.T) { + expected := int32(80) + name := "foo" + container := &api.Container{ + Ports: []api.ContainerPort{ + {Name: "bar", ContainerPort: expected}, + }, + } + port, err := resolvePort(intstr.FromString(name), container) + if port != -1 { + t.Errorf("expected: -1, saw: %d", port) + } + if err == nil { + t.Error("unexpected non-error") + } +} + +type fakeContainerCommandRunner struct { + Cmd []string + ID kubecontainer.ContainerID +} + +func (f *fakeContainerCommandRunner) ExecInContainer(id kubecontainer.ContainerID, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error { + f.Cmd = cmd + f.ID = id + return nil +} + +func (f *fakeContainerCommandRunner) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error { + return nil +} + +func TestRunHandlerExec(t *testing.T) { + fakeCommandRunner := fakeContainerCommandRunner{} + handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil) + + containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} + containerName := "containerFoo" + + container := api.Container{ + Name: containerName, + Lifecycle: &api.Lifecycle{ + PostStart: &api.Handler{ + Exec: &api.ExecAction{ + Command: []string{"ls", "-a"}, + }, + }, + }, + } + + pod := api.Pod{} + pod.ObjectMeta.Name = "podFoo" + pod.ObjectMeta.Namespace = "nsFoo" + pod.Spec.Containers = []api.Container{container} + _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if fakeCommandRunner.ID != containerID || + !reflect.DeepEqual(container.Lifecycle.PostStart.Exec.Command, fakeCommandRunner.Cmd) { + t.Errorf("unexpected commands: %v", fakeCommandRunner) + } +} + +type fakeHTTP struct { + url string + err error + resp *http.Response +} + +func (f *fakeHTTP) Get(url string) (*http.Response, error) { + f.url = url + return f.resp, f.err +} + +func TestRunHandlerHttp(t *testing.T) { + fakeHttp := fakeHTTP{} + handlerRunner := NewHandlerRunner(&fakeHttp, &fakeContainerCommandRunner{}, nil) + + containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} + containerName := "containerFoo" + + container := api.Container{ + Name: containerName, + Lifecycle: &api.Lifecycle{ + PostStart: &api.Handler{ + HTTPGet: &api.HTTPGetAction{ + Host: "foo", + Port: intstr.FromInt(8080), + Path: "bar", + }, + }, + }, + } + pod := api.Pod{} + pod.ObjectMeta.Name = "podFoo" + pod.ObjectMeta.Namespace = "nsFoo" + pod.Spec.Containers = []api.Container{container} + _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) + + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if fakeHttp.url != "http://foo:8080/bar" { + t.Errorf("unexpected url: %s", fakeHttp.url) + } +} + +func TestRunHandlerNil(t *testing.T) { + handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, nil) + containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} + podName := "podFoo" + podNamespace := "nsFoo" + containerName := "containerFoo" + + container := api.Container{ + Name: containerName, + Lifecycle: &api.Lifecycle{ + PostStart: &api.Handler{}, + }, + } + pod := api.Pod{} + pod.ObjectMeta.Name = podName + pod.ObjectMeta.Namespace = podNamespace + pod.Spec.Containers = []api.Container{container} + _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) + if err == nil { + t.Errorf("expect error, but got nil") + } +} + +func TestRunHandlerHttpFailure(t *testing.T) { + expectedErr := fmt.Errorf("fake http error") + expectedResp := http.Response{ + Body: ioutil.NopCloser(strings.NewReader(expectedErr.Error())), + } + fakeHttp := fakeHTTP{err: expectedErr, resp: &expectedResp} + handlerRunner := NewHandlerRunner(&fakeHttp, &fakeContainerCommandRunner{}, nil) + containerName := "containerFoo" + containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} + container := api.Container{ + Name: containerName, + Lifecycle: &api.Lifecycle{ + PostStart: &api.Handler{ + HTTPGet: &api.HTTPGetAction{ + Host: "foo", + Port: intstr.FromInt(8080), + Path: "bar", + }, + }, + }, + } + pod := api.Pod{} + pod.ObjectMeta.Name = "podFoo" + pod.ObjectMeta.Namespace = "nsFoo" + pod.Spec.Containers = []api.Container{container} + msg, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) + if err == nil { + t.Errorf("expected error: %v", expectedErr) + } + if msg != expectedErr.Error() { + t.Errorf("unexpected error message: %q; expected %q", msg, expectedErr) + } + if fakeHttp.url != "http://foo:8080/bar" { + t.Errorf("unexpected url: %s", fakeHttp.url) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/interfaces.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/interfaces.go new file mode 100644 index 000000000000..0dedd4510a71 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/lifecycle/interfaces.go @@ -0,0 +1,122 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifecycle + +import "k8s.io/kubernetes/pkg/api" + +// PodAdmitAttributes is the context for a pod admission decision. +// The member fields of this struct should never be mutated. +type PodAdmitAttributes struct { + // the pod to evaluate for admission + Pod *api.Pod + // all pods bound to the kubelet excluding the pod being evaluated + OtherPods []*api.Pod +} + +// PodAdmitResult provides the result of a pod admission decision. +type PodAdmitResult struct { + // if true, the pod should be admitted. + Admit bool + // a brief single-word reason why the pod could not be admitted. + Reason string + // a brief message explaining why the pod could not be admitted. + Message string +} + +// PodAdmitHandler is notified during pod admission. +type PodAdmitHandler interface { + // Admit evaluates if a pod can be admitted. + Admit(attrs *PodAdmitAttributes) PodAdmitResult +} + +// PodAdmitTarget maintains a list of handlers to invoke. +type PodAdmitTarget interface { + // AddPodAdmitHandler adds the specified handler. + AddPodAdmitHandler(a PodAdmitHandler) +} + +// PodSyncLoopHandler is invoked during each sync loop iteration. +type PodSyncLoopHandler interface { + // ShouldSync returns true if the pod needs to be synced. + // This operation must return immediately as its called for each pod. + // The provided pod should never be modified. + ShouldSync(pod *api.Pod) bool +} + +// PodSyncLoopTarget maintains a list of handlers to pod sync loop. +type PodSyncLoopTarget interface { + // AddPodSyncLoopHandler adds the specified handler. + AddPodSyncLoopHandler(a PodSyncLoopHandler) +} + +// ShouldEvictResponse provides the result of a should evict request. +type ShouldEvictResponse struct { + // if true, the pod should be evicted. + Evict bool + // a brief CamelCase reason why the pod should be evicted. + Reason string + // a brief message why the pod should be evicted. + Message string +} + +// PodSyncHandler is invoked during each sync pod operation. +type PodSyncHandler interface { + // ShouldEvict is invoked during each sync pod operation to determine + // if the pod should be evicted from the kubelet. If so, the pod status + // is updated to mark its phase as failed with the provided reason and message, + // and the pod is immediately killed. + // This operation must return immediately as its called for each sync pod. + // The provided pod should never be modified. + ShouldEvict(pod *api.Pod) ShouldEvictResponse +} + +// PodSyncTarget maintains a list of handlers to pod sync. +type PodSyncTarget interface { + // AddPodSyncHandler adds the specified handler + AddPodSyncHandler(a PodSyncHandler) +} + +// PodLifecycleTarget groups a set of lifecycle interfaces for convenience. +type PodLifecycleTarget interface { + PodAdmitTarget + PodSyncLoopTarget + PodSyncTarget +} + +// PodAdmitHandlers maintains a list of handlers to pod admission. +type PodAdmitHandlers []PodAdmitHandler + +// AddPodAdmitHandler adds the specified observer. +func (handlers *PodAdmitHandlers) AddPodAdmitHandler(a PodAdmitHandler) { + *handlers = append(*handlers, a) +} + +// PodSyncLoopHandlers maintains a list of handlers to pod sync loop. +type PodSyncLoopHandlers []PodSyncLoopHandler + +// AddPodSyncLoopHandler adds the specified observer. +func (handlers *PodSyncLoopHandlers) AddPodSyncLoopHandler(a PodSyncLoopHandler) { + *handlers = append(*handlers, a) +} + +// PodSyncHandlers maintains a list of handlers to pod sync. +type PodSyncHandlers []PodSyncHandler + +// AddPodSyncHandler adds the specified handler. +func (handlers *PodSyncHandlers) AddPodSyncHandler(a PodSyncHandler) { + *handlers = append(*handlers, a) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go new file mode 100644 index 000000000000..7d8294c1503b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go @@ -0,0 +1,220 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + "time" + + "github.com/golang/glog" + "github.com/prometheus/client_golang/prometheus" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" +) + +const ( + KubeletSubsystem = "kubelet" + PodWorkerLatencyKey = "pod_worker_latency_microseconds" + SyncPodsLatencyKey = "sync_pods_latency_microseconds" + PodStartLatencyKey = "pod_start_latency_microseconds" + PodStatusLatencyKey = "generate_pod_status_latency_microseconds" + ContainerManagerOperationsKey = "container_manager_latency_microseconds" + DockerOperationsLatencyKey = "docker_operations_latency_microseconds" + DockerOperationsKey = "docker_operations" + DockerOperationsErrorsKey = "docker_operations_errors" + DockerOperationsTimeoutKey = "docker_operations_timeout" + PodWorkerStartLatencyKey = "pod_worker_start_latency_microseconds" + PLEGRelistLatencyKey = "pleg_relist_latency_microseconds" + PLEGRelistIntervalKey = "pleg_relist_interval_microseconds" +) + +var ( + ContainersPerPodCount = prometheus.NewSummary( + prometheus.SummaryOpts{ + Subsystem: KubeletSubsystem, + Name: "containers_per_pod_count", + Help: "The number of containers per pod.", + }, + ) + PodWorkerLatency = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Subsystem: KubeletSubsystem, + Name: PodWorkerLatencyKey, + Help: "Latency in microseconds to sync a single pod. Broken down by operation type: create, update, or sync", + }, + []string{"operation_type"}, + ) + SyncPodsLatency = prometheus.NewSummary( + prometheus.SummaryOpts{ + Subsystem: KubeletSubsystem, + Name: SyncPodsLatencyKey, + Help: "Latency in microseconds to sync all pods.", + }, + ) + PodStartLatency = prometheus.NewSummary( + prometheus.SummaryOpts{ + Subsystem: KubeletSubsystem, + Name: PodStartLatencyKey, + Help: "Latency in microseconds for a single pod to go from pending to running. Broken down by podname.", + }, + ) + PodStatusLatency = prometheus.NewSummary( + prometheus.SummaryOpts{ + Subsystem: KubeletSubsystem, + Name: PodStatusLatencyKey, + Help: "Latency in microseconds to generate status for a single pod.", + }, + ) + ContainerManagerLatency = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Subsystem: KubeletSubsystem, + Name: ContainerManagerOperationsKey, + Help: "Latency in microseconds for container manager operations. Broken down by method.", + }, + []string{"operation_type"}, + ) + PodWorkerStartLatency = prometheus.NewSummary( + prometheus.SummaryOpts{ + Subsystem: KubeletSubsystem, + Name: PodWorkerStartLatencyKey, + Help: "Latency in microseconds from seeing a pod to starting a worker.", + }, + ) + DockerOperationsLatency = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Subsystem: KubeletSubsystem, + Name: DockerOperationsLatencyKey, + Help: "Latency in microseconds of Docker operations. Broken down by operation type.", + }, + []string{"operation_type"}, + ) + DockerOperations = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: KubeletSubsystem, + Name: DockerOperationsKey, + Help: "Cumulative number of Docker operations by operation type.", + }, + []string{"operation_type"}, + ) + DockerOperationsErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: KubeletSubsystem, + Name: DockerOperationsErrorsKey, + Help: "Cumulative number of Docker operation errors by operation type.", + }, + []string{"operation_type"}, + ) + DockerOperationsTimeout = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: KubeletSubsystem, + Name: DockerOperationsTimeoutKey, + Help: "Cumulative number of Docker operation timeout by operation type.", + }, + []string{"operation_type"}, + ) + PLEGRelistLatency = prometheus.NewSummary( + prometheus.SummaryOpts{ + Subsystem: KubeletSubsystem, + Name: PLEGRelistLatencyKey, + Help: "Latency in microseconds for relisting pods in PLEG.", + }, + ) + PLEGRelistInterval = prometheus.NewSummary( + prometheus.SummaryOpts{ + Subsystem: KubeletSubsystem, + Name: PLEGRelistIntervalKey, + Help: "Interval in microseconds between relisting in PLEG.", + }, + ) +) + +var registerMetrics sync.Once + +// Register all metrics. +func Register(containerCache kubecontainer.RuntimeCache) { + // Register the metrics. + registerMetrics.Do(func() { + prometheus.MustRegister(PodWorkerLatency) + prometheus.MustRegister(PodStartLatency) + prometheus.MustRegister(PodStatusLatency) + prometheus.MustRegister(DockerOperationsLatency) + prometheus.MustRegister(ContainerManagerLatency) + prometheus.MustRegister(SyncPodsLatency) + prometheus.MustRegister(PodWorkerStartLatency) + prometheus.MustRegister(ContainersPerPodCount) + prometheus.MustRegister(DockerOperations) + prometheus.MustRegister(DockerOperationsErrors) + prometheus.MustRegister(DockerOperationsTimeout) + prometheus.MustRegister(newPodAndContainerCollector(containerCache)) + prometheus.MustRegister(PLEGRelistLatency) + prometheus.MustRegister(PLEGRelistInterval) + }) +} + +// Gets the time since the specified start in microseconds. +func SinceInMicroseconds(start time.Time) float64 { + return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) +} + +func newPodAndContainerCollector(containerCache kubecontainer.RuntimeCache) *podAndContainerCollector { + return &podAndContainerCollector{ + containerCache: containerCache, + } +} + +// Custom collector for current pod and container counts. +type podAndContainerCollector struct { + // Cache for accessing information about running containers. + containerCache kubecontainer.RuntimeCache +} + +// TODO(vmarmol): Split by source? +var ( + runningPodCountDesc = prometheus.NewDesc( + prometheus.BuildFQName("", KubeletSubsystem, "running_pod_count"), + "Number of pods currently running", + nil, nil) + runningContainerCountDesc = prometheus.NewDesc( + prometheus.BuildFQName("", KubeletSubsystem, "running_container_count"), + "Number of containers currently running", + nil, nil) +) + +func (pc *podAndContainerCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- runningPodCountDesc + ch <- runningContainerCountDesc +} + +func (pc *podAndContainerCollector) Collect(ch chan<- prometheus.Metric) { + runningPods, err := pc.containerCache.GetPods() + if err != nil { + glog.Warningf("Failed to get running container information while collecting metrics: %v", err) + return + } + + runningContainers := 0 + for _, p := range runningPods { + runningContainers += len(p.Containers) + } + ch <- prometheus.MustNewConstMetric( + runningPodCountDesc, + prometheus.GaugeValue, + float64(len(runningPods))) + ch <- prometheus.MustNewConstMetric( + runningContainerCountDesc, + prometheus.GaugeValue, + float64(runningContainers)) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/cni/cni.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/cni/cni.go new file mode 100644 index 000000000000..5eac8772fb96 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/cni/cni.go @@ -0,0 +1,207 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cni + +import ( + "fmt" + "net" + "sort" + "strings" + + "github.com/appc/cni/libcni" + cnitypes "github.com/appc/cni/pkg/types" + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/apis/componentconfig" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/dockertools" + "k8s.io/kubernetes/pkg/kubelet/network" +) + +const ( + CNIPluginName = "cni" + DefaultNetDir = "/etc/cni/net.d" + DefaultCNIDir = "/opt/cni/bin" + VendorCNIDirTemplate = "%s/opt/%s/bin" +) + +type cniNetworkPlugin struct { + network.NoopNetworkPlugin + + defaultNetwork *cniNetwork + host network.Host +} + +type cniNetwork struct { + name string + NetworkConfig *libcni.NetworkConfig + CNIConfig *libcni.CNIConfig +} + +func probeNetworkPluginsWithVendorCNIDirPrefix(pluginDir, vendorCNIDirPrefix string) []network.NetworkPlugin { + configList := make([]network.NetworkPlugin, 0) + network, err := getDefaultCNINetwork(pluginDir, vendorCNIDirPrefix) + if err != nil { + return configList + } + return append(configList, &cniNetworkPlugin{defaultNetwork: network}) +} + +func ProbeNetworkPlugins(pluginDir string) []network.NetworkPlugin { + return probeNetworkPluginsWithVendorCNIDirPrefix(pluginDir, "") +} + +func getDefaultCNINetwork(pluginDir, vendorCNIDirPrefix string) (*cniNetwork, error) { + if pluginDir == "" { + pluginDir = DefaultNetDir + } + files, err := libcni.ConfFiles(pluginDir) + switch { + case err != nil: + return nil, err + case len(files) == 0: + return nil, fmt.Errorf("No networks found in %s", pluginDir) + } + + sort.Strings(files) + for _, confFile := range files { + conf, err := libcni.ConfFromFile(confFile) + if err != nil { + glog.Warningf("Error loading CNI config file %s: %v", confFile, err) + continue + } + // Search for vendor-specific plugins as well as default plugins in the CNI codebase. + vendorCNIDir := fmt.Sprintf(VendorCNIDirTemplate, vendorCNIDirPrefix, conf.Network.Type) + cninet := &libcni.CNIConfig{ + Path: []string{DefaultCNIDir, vendorCNIDir}, + } + network := &cniNetwork{name: conf.Network.Name, NetworkConfig: conf, CNIConfig: cninet} + return network, nil + } + return nil, fmt.Errorf("No valid networks found in %s", pluginDir) +} + +func (plugin *cniNetworkPlugin) Init(host network.Host, hairpinMode componentconfig.HairpinMode) error { + plugin.host = host + return nil +} + +func (plugin *cniNetworkPlugin) Name() string { + return CNIPluginName +} + +func (plugin *cniNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID) error { + runtime, ok := plugin.host.GetRuntime().(*dockertools.DockerManager) + if !ok { + return fmt.Errorf("CNI execution called on non-docker runtime") + } + netns, err := runtime.GetNetNS(id) + if err != nil { + return err + } + + _, err = plugin.defaultNetwork.addToNetwork(name, namespace, id, netns) + if err != nil { + glog.Errorf("Error while adding to cni network: %s", err) + return err + } + + return err +} + +func (plugin *cniNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error { + runtime, ok := plugin.host.GetRuntime().(*dockertools.DockerManager) + if !ok { + return fmt.Errorf("CNI execution called on non-docker runtime") + } + netns, err := runtime.GetNetNS(id) + if err != nil { + return err + } + + return plugin.defaultNetwork.deleteFromNetwork(name, namespace, id, netns) +} + +// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin. +// Also fix the runtime's call to Status function to be done only in the case that the IP is lost, no need to do periodic calls +func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) { + runtime, ok := plugin.host.GetRuntime().(*dockertools.DockerManager) + if !ok { + return nil, fmt.Errorf("CNI execution called on non-docker runtime") + } + ipStr, err := runtime.GetContainerIP(id.ID, network.DefaultInterfaceName) + if err != nil { + return nil, err + } + ip, _, err := net.ParseCIDR(strings.Trim(ipStr, "\n")) + if err != nil { + return nil, err + } + return &network.PodNetworkStatus{IP: ip}, nil +} + +func (network *cniNetwork) addToNetwork(podName string, podNamespace string, podInfraContainerID kubecontainer.ContainerID, podNetnsPath string) (*cnitypes.Result, error) { + rt, err := buildCNIRuntimeConf(podName, podNamespace, podInfraContainerID, podNetnsPath) + if err != nil { + glog.Errorf("Error adding network: %v", err) + return nil, err + } + + netconf, cninet := network.NetworkConfig, network.CNIConfig + glog.V(4).Infof("About to run with conf.Network.Type=%v, c.Path=%v", netconf.Network.Type, cninet.Path) + res, err := cninet.AddNetwork(netconf, rt) + if err != nil { + glog.Errorf("Error adding network: %v", err) + return nil, err + } + + return res, nil +} + +func (network *cniNetwork) deleteFromNetwork(podName string, podNamespace string, podInfraContainerID kubecontainer.ContainerID, podNetnsPath string) error { + rt, err := buildCNIRuntimeConf(podName, podNamespace, podInfraContainerID, podNetnsPath) + if err != nil { + glog.Errorf("Error deleting network: %v", err) + return err + } + + netconf, cninet := network.NetworkConfig, network.CNIConfig + glog.V(4).Infof("About to run with conf.Network.Type=%v, c.Path=%v", netconf.Network.Type, cninet.Path) + err = cninet.DelNetwork(netconf, rt) + if err != nil { + glog.Errorf("Error deleting network: %v", err) + return err + } + return nil +} + +func buildCNIRuntimeConf(podName string, podNs string, podInfraContainerID kubecontainer.ContainerID, podNetnsPath string) (*libcni.RuntimeConf, error) { + glog.V(4).Infof("Got netns path %v", podNetnsPath) + glog.V(4).Infof("Using netns path %v", podNs) + + rt := &libcni.RuntimeConf{ + ContainerID: podInfraContainerID.ID, + NetNS: podNetnsPath, + IfName: network.DefaultInterfaceName, + Args: [][2]string{ + {"K8S_POD_NAMESPACE", podNs}, + {"K8S_POD_NAME", podName}, + {"K8S_POD_INFRA_CONTAINER_ID", podInfraContainerID.ID}, + }, + } + + return rt, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/cni/cni_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/cni/cni_test.go new file mode 100644 index 000000000000..32beb984898c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/cni/cni_test.go @@ -0,0 +1,207 @@ +// +build linux + +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cni + +import ( + "bytes" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "testing" + "text/template" + + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + + cadvisorapi "github.com/google/cadvisor/info/v1" + + "k8s.io/kubernetes/cmd/kubelet/app/options" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/client/record" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" + "k8s.io/kubernetes/pkg/kubelet/dockertools" + "k8s.io/kubernetes/pkg/kubelet/network" + nettest "k8s.io/kubernetes/pkg/kubelet/network/testing" + proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" + utiltesting "k8s.io/kubernetes/pkg/util/testing" +) + +func installPluginUnderTest(t *testing.T, testVendorCNIDirPrefix, testNetworkConfigPath, vendorName string, plugName string) { + pluginDir := path.Join(testNetworkConfigPath, plugName) + err := os.MkdirAll(pluginDir, 0777) + if err != nil { + t.Fatalf("Failed to create plugin config dir: %v", err) + } + pluginConfig := path.Join(pluginDir, plugName+".conf") + f, err := os.Create(pluginConfig) + if err != nil { + t.Fatalf("Failed to install plugin") + } + networkConfig := fmt.Sprintf("{ \"name\": \"%s\", \"type\": \"%s\" }", plugName, vendorName) + + _, err = f.WriteString(networkConfig) + if err != nil { + t.Fatalf("Failed to write network config file (%v)", err) + } + f.Close() + + vendorCNIDir := fmt.Sprintf(VendorCNIDirTemplate, testVendorCNIDirPrefix, vendorName) + err = os.MkdirAll(vendorCNIDir, 0777) + if err != nil { + t.Fatalf("Failed to create plugin dir: %v", err) + } + pluginExec := path.Join(vendorCNIDir, vendorName) + f, err = os.Create(pluginExec) + + const execScriptTempl = `#!/bin/bash +read ignore +env > {{.OutputEnv}} +echo "%@" >> {{.OutputEnv}} +export $(echo ${CNI_ARGS} | sed 's/;/ /g') &> /dev/null +mkdir -p {{.OutputDir}} &> /dev/null +echo -n "$CNI_COMMAND $CNI_NETNS $K8S_POD_NAMESPACE $K8S_POD_NAME $K8S_POD_INFRA_CONTAINER_ID" >& {{.OutputFile}} +echo -n "{ \"ip4\": { \"ip\": \"10.1.0.23/24\" } }" +` + execTemplateData := &map[string]interface{}{ + "OutputFile": path.Join(pluginDir, plugName+".out"), + "OutputEnv": path.Join(pluginDir, plugName+".env"), + "OutputDir": pluginDir, + } + + tObj := template.Must(template.New("test").Parse(execScriptTempl)) + buf := &bytes.Buffer{} + if err := tObj.Execute(buf, *execTemplateData); err != nil { + t.Fatalf("Error in executing script template - %v", err) + } + execScript := buf.String() + _, err = f.WriteString(execScript) + if err != nil { + t.Fatalf("Failed to write plugin exec - %v", err) + } + + err = f.Chmod(0777) + if err != nil { + t.Fatalf("Failed to set exec perms on plugin") + } + + f.Close() +} + +func tearDownPlugin(tmpDir string) { + err := os.RemoveAll(tmpDir) + if err != nil { + fmt.Printf("Error in cleaning up test: %v", err) + } +} + +type fakeNetworkHost struct { + kubeClient clientset.Interface +} + +func NewFakeHost(kubeClient clientset.Interface) *fakeNetworkHost { + host := &fakeNetworkHost{kubeClient: kubeClient} + return host +} + +func (fnh *fakeNetworkHost) GetPodByName(name, namespace string) (*api.Pod, bool) { + return nil, false +} + +func (fnh *fakeNetworkHost) GetKubeClient() clientset.Interface { + return nil +} + +func (nh *fakeNetworkHost) GetRuntime() kubecontainer.Runtime { + dm, fakeDockerClient := newTestDockerManager() + fakeDockerClient.SetFakeRunningContainers([]*dockertools.FakeContainer{ + { + ID: "test_infra_container", + Pid: 12345, + }, + }) + return dm +} + +func newTestDockerManager() (*dockertools.DockerManager, *dockertools.FakeDockerClient) { + fakeDocker := dockertools.NewFakeDockerClient() + fakeRecorder := &record.FakeRecorder{} + containerRefManager := kubecontainer.NewRefManager() + networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone) + dockerManager := dockertools.NewFakeDockerManager( + fakeDocker, + fakeRecorder, + proberesults.NewManager(), + containerRefManager, + &cadvisorapi.MachineInfo{}, + options.GetDefaultPodInfraContainerImage(), + 0, 0, "", + &containertest.FakeOS{}, + networkPlugin, + nil, + nil, + nil) + + return dockerManager, fakeDocker +} + +func TestCNIPlugin(t *testing.T) { + // install some random plugin + pluginName := fmt.Sprintf("test%d", rand.Intn(1000)) + vendorName := fmt.Sprintf("test_vendor%d", rand.Intn(1000)) + + tmpDir := utiltesting.MkTmpdirOrDie("cni-test") + testNetworkConfigPath := path.Join(tmpDir, "plugins", "net", "cni") + testVendorCNIDirPrefix := tmpDir + defer tearDownPlugin(tmpDir) + installPluginUnderTest(t, testVendorCNIDirPrefix, testNetworkConfigPath, vendorName, pluginName) + + np := probeNetworkPluginsWithVendorCNIDirPrefix(path.Join(testNetworkConfigPath, pluginName), testVendorCNIDirPrefix) + plug, err := network.InitNetworkPlugin(np, "cni", NewFakeHost(nil), componentconfig.HairpinNone) + if err != nil { + t.Fatalf("Failed to select the desired plugin: %v", err) + } + + err = plug.SetUpPod("podNamespace", "podName", kubecontainer.ContainerID{Type: "docker", ID: "test_infra_container"}) + if err != nil { + t.Errorf("Expected nil: %v", err) + } + outputEnv := path.Join(testNetworkConfigPath, pluginName, pluginName+".env") + eo, eerr := ioutil.ReadFile(outputEnv) + outputFile := path.Join(testNetworkConfigPath, pluginName, pluginName+".out") + output, err := ioutil.ReadFile(outputFile) + if err != nil { + t.Errorf("Failed to read output file %s: %v (env %s err %v)", outputFile, err, eo, eerr) + } + expectedOutput := "ADD /proc/12345/ns/net podNamespace podName test_infra_container" + if string(output) != expectedOutput { + t.Errorf("Mismatch in expected output for setup hook. Expected '%s', got '%s'", expectedOutput, string(output)) + } + err = plug.TearDownPod("podNamespace", "podName", kubecontainer.ContainerID{Type: "docker", ID: "test_infra_container"}) + if err != nil { + t.Errorf("Expected nil: %v", err) + } + output, err = ioutil.ReadFile(path.Join(testNetworkConfigPath, pluginName, pluginName+".out")) + expectedOutput = "DEL /proc/12345/ns/net podNamespace podName test_infra_container" + if string(output) != expectedOutput { + t.Errorf("Mismatch in expected output for setup hook. Expected '%s', got '%s'", expectedOutput, string(output)) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/cni/testing/mock_cni.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/cni/testing/mock_cni.go new file mode 100644 index 000000000000..622edefd5776 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/cni/testing/mock_cni.go @@ -0,0 +1,39 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// mock_cni is a mock of the `libcni.CNI` interface. It's a handwritten mock +// because there are only two functions to deal with. +package mock_cni + +import ( + "github.com/appc/cni/libcni" + "github.com/appc/cni/pkg/types" + "github.com/stretchr/testify/mock" +) + +type MockCNI struct { + mock.Mock +} + +func (m *MockCNI) AddNetwork(net *libcni.NetworkConfig, rt *libcni.RuntimeConf) (*types.Result, error) { + args := m.Called(net, rt) + return args.Get(0).(*types.Result), args.Error(1) +} + +func (m *MockCNI) DelNetwork(net *libcni.NetworkConfig, rt *libcni.RuntimeConf) error { + args := m.Called(net, rt) + return args.Error(0) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/exec/exec.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/exec/exec.go new file mode 100644 index 000000000000..685da4b36113 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/exec/exec.go @@ -0,0 +1,182 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package exec scans and loads networking plugins that are installed +// under /usr/libexec/kubernetes/kubelet-plugins/net/exec/ +// The layout convention for a plugin is: +// plugin-name/ (plugins have to be directories first) +// plugin-name/plugin-name (executable that will be called out, see Vendoring Note for more nuances) +// plugin-name/ +// where, 'executable' has the following requirements: +// - should have exec permissions +// - should give non-zero exit code on failure, and zero on success +// - the arguments will be +// whereupon, will be one of: +// - init, called when the kubelet loads the plugin +// - setup, called after the infra container of a pod is +// created, but before other containers of the pod are created +// - teardown, called before the pod infra container is killed +// - status, called at regular intervals and is supposed to return a json +// formatted output indicating the pod's IPAddress(v4/v6). An empty string value or an erroneous output +// will mean the container runtime (docker) will be asked for the PodIP +// e.g. { +// "apiVersion" : "v1beta1", +// "kind" : "PodNetworkStatus", +// "ip" : "10.20.30.40" +// } +// The fields "apiVersion" and "kind" are optional in version v1beta1 +// As the executables are called, the file-descriptors stdin, stdout, stderr +// remain open. The combined output of stdout/stderr is captured and logged. +// +// Note: If the pod infra container self-terminates (e.g. crashes or is killed), +// the entire pod lifecycle will be restarted, but teardown will not be called. +// +// Vendoring Note: +// Plugin Names can be vendored also. Use '~' as the escaped name for plugin directories. +// And expect command line argument to call vendored plugins as 'vendor/pluginName' +// e.g. pluginName = mysdn +// vendorname = mycompany +// then, plugin layout should be +// mycompany~mysdn/ +// mycompany~mysdn/mysdn (this becomes the executable) +// mycompany~mysdn/ +// and, call the kubelet with '--network-plugin=mycompany/mysdn' +package exec + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "path" + "strings" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/componentconfig" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/network" + utilexec "k8s.io/kubernetes/pkg/util/exec" +) + +type execNetworkPlugin struct { + network.NoopNetworkPlugin + + execName string + execPath string + host network.Host +} + +const ( + initCmd = "init" + setUpCmd = "setup" + tearDownCmd = "teardown" + statusCmd = "status" +) + +func ProbeNetworkPlugins(pluginDir string) []network.NetworkPlugin { + execPlugins := []network.NetworkPlugin{} + + files, _ := ioutil.ReadDir(pluginDir) + for _, f := range files { + // only directories are counted as plugins + // and pluginDir/dirname/dirname should be an executable + // unless dirname contains '~' for escaping namespace + // e.g. dirname = vendor~ipvlan + // then, executable will be pluginDir/dirname/ipvlan + if f.IsDir() { + execPath := path.Join(pluginDir, f.Name()) + execPlugins = append(execPlugins, &execNetworkPlugin{execName: network.UnescapePluginName(f.Name()), execPath: execPath}) + } + } + return execPlugins +} + +func (plugin *execNetworkPlugin) Init(host network.Host, hairpinMode componentconfig.HairpinMode) error { + err := plugin.validate() + if err != nil { + return err + } + plugin.host = host + // call the init script + out, err := utilexec.New().Command(plugin.getExecutable(), initCmd).CombinedOutput() + glog.V(5).Infof("Init 'exec' network plugin output: %s, %v", string(out), err) + return err +} + +func (plugin *execNetworkPlugin) getExecutable() string { + parts := strings.Split(plugin.execName, "/") + execName := parts[len(parts)-1] + return path.Join(plugin.execPath, execName) +} + +func (plugin *execNetworkPlugin) Name() string { + return plugin.execName +} + +func (plugin *execNetworkPlugin) validate() error { + if !isExecutable(plugin.getExecutable()) { + errStr := fmt.Sprintf("Invalid exec plugin. Executable '%s' does not have correct permissions.", plugin.execName) + return errors.New(errStr) + } + return nil +} + +func (plugin *execNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID) error { + out, err := utilexec.New().Command(plugin.getExecutable(), setUpCmd, namespace, name, id.ID).CombinedOutput() + glog.V(5).Infof("SetUpPod 'exec' network plugin output: %s, %v", string(out), err) + return err +} + +func (plugin *execNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error { + out, err := utilexec.New().Command(plugin.getExecutable(), tearDownCmd, namespace, name, id.ID).CombinedOutput() + glog.V(5).Infof("TearDownPod 'exec' network plugin output: %s, %v", string(out), err) + return err +} + +func (plugin *execNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) { + out, err := utilexec.New().Command(plugin.getExecutable(), statusCmd, namespace, name, id.ID).CombinedOutput() + glog.V(5).Infof("Status 'exec' network plugin output: %s, %v", string(out), err) + if err != nil { + return nil, err + } + if string(out) == "" { + return nil, nil + } + findVersion := struct { + unversioned.TypeMeta `json:",inline"` + }{} + err = json.Unmarshal(out, &findVersion) + if err != nil { + return nil, err + } + + // check kind and version + if findVersion.Kind != "" && findVersion.Kind != "PodNetworkStatus" { + errStr := fmt.Sprintf("Invalid 'kind' returned in network status for pod '%s'. Valid value is 'PodNetworkStatus', got '%s'.", name, findVersion.Kind) + return nil, errors.New(errStr) + } + switch findVersion.APIVersion { + case "": + fallthrough + case "v1beta1": + networkStatus := &network.PodNetworkStatus{} + err = json.Unmarshal(out, networkStatus) + return networkStatus, err + } + errStr := fmt.Sprintf("Unknown version '%s' in network status for pod '%s'.", findVersion.APIVersion, name) + return nil, errors.New(errStr) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/exec/exec_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/exec/exec_test.go new file mode 100644 index 000000000000..75fa0c985d73 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/exec/exec_test.go @@ -0,0 +1,341 @@ +// +build linux + +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "bytes" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "sync" + "testing" + "text/template" + + "k8s.io/kubernetes/pkg/apis/componentconfig" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/network" + nettest "k8s.io/kubernetes/pkg/kubelet/network/testing" + "k8s.io/kubernetes/pkg/util/sets" + utiltesting "k8s.io/kubernetes/pkg/util/testing" +) + +func tmpDirOrDie() string { + dir, err := utiltesting.MkTmpdir("exec-test") + if err != nil { + panic(fmt.Sprintf("error creating tmp dir: %v", err)) + } + return path.Join(dir, "fake", "plugins", "net") +} + +var lock sync.Mutex +var namesInUse = sets.NewString() + +func selectName() string { + lock.Lock() + defer lock.Unlock() + for { + pluginName := fmt.Sprintf("test%d", rand.Intn(1000)) + if !namesInUse.Has(pluginName) { + namesInUse.Insert(pluginName) + return pluginName + } + } +} + +func releaseName(name string) { + lock.Lock() + defer lock.Unlock() + namesInUse.Delete(name) +} + +func installPluginUnderTest(t *testing.T, vendorName, testPluginPath, plugName string, execTemplateData *map[string]interface{}) { + vendoredName := plugName + if vendorName != "" { + vendoredName = fmt.Sprintf("%s~%s", vendorName, plugName) + } + pluginDir := path.Join(testPluginPath, vendoredName) + err := os.MkdirAll(pluginDir, 0777) + if err != nil { + t.Errorf("Failed to create plugin dir %q: %v", pluginDir, err) + } + pluginExec := path.Join(pluginDir, plugName) + f, err := os.Create(pluginExec) + if err != nil { + t.Errorf("Failed to install plugin %q: %v", pluginExec, err) + } + defer f.Close() + err = f.Chmod(0777) + if err != nil { + t.Errorf("Failed to set exec perms on plugin %q: %v", pluginExec, err) + } + const execScriptTempl = `#!/bin/bash + +# If status hook is called print the expected json to stdout +if [ "$1" == "status" ]; then + echo -n '{ + "ip" : "{{.IPAddress}}" +}' +fi + +# Direct the arguments to a file to be tested against later +echo -n "$@" &> {{.OutputFile}} +` + if execTemplateData == nil { + execTemplateData = &map[string]interface{}{ + "IPAddress": "10.20.30.40", + "OutputFile": path.Join(pluginDir, plugName+".out"), + } + } + + tObj := template.Must(template.New("test").Parse(execScriptTempl)) + buf := &bytes.Buffer{} + if err := tObj.Execute(buf, *execTemplateData); err != nil { + t.Errorf("Error in executing script template: %v", err) + } + execScript := buf.String() + _, err = f.WriteString(execScript) + if err != nil { + t.Errorf("Failed to write plugin %q: %v", pluginExec, err) + } +} + +func tearDownPlugin(testPluginPath string) { + err := os.RemoveAll(testPluginPath) + if err != nil { + fmt.Printf("Error in cleaning up test: %v", err) + } +} + +func TestSelectPlugin(t *testing.T) { + // The temp dir where test plugins will be stored. + testPluginPath := tmpDirOrDie() + + // install some random plugin under testPluginPath + pluginName := selectName() + defer tearDownPlugin(testPluginPath) + defer releaseName(pluginName) + + installPluginUnderTest(t, "", testPluginPath, pluginName, nil) + + plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, nettest.NewFakeHost(nil), componentconfig.HairpinNone) + if err != nil { + t.Errorf("Failed to select the desired plugin: %v", err) + } + if plug.Name() != pluginName { + t.Errorf("Wrong plugin selected, chose %s, got %s\n", pluginName, plug.Name()) + } +} + +func TestSelectVendoredPlugin(t *testing.T) { + // The temp dir where test plugins will be stored. + testPluginPath := tmpDirOrDie() + + // install some random plugin under testPluginPath + pluginName := selectName() + defer tearDownPlugin(testPluginPath) + defer releaseName(pluginName) + + vendor := "mycompany" + installPluginUnderTest(t, vendor, testPluginPath, pluginName, nil) + + vendoredPluginName := fmt.Sprintf("%s/%s", vendor, pluginName) + plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), vendoredPluginName, nettest.NewFakeHost(nil), componentconfig.HairpinNone) + if err != nil { + t.Errorf("Failed to select the desired plugin: %v", err) + } + if plug.Name() != vendoredPluginName { + t.Errorf("Wrong plugin selected, chose %s, got %s\n", vendoredPluginName, plug.Name()) + } +} + +func TestSelectWrongPlugin(t *testing.T) { + // The temp dir where test plugins will be stored. + testPluginPath := tmpDirOrDie() + + // install some random plugin under testPluginPath + pluginName := selectName() + defer tearDownPlugin(testPluginPath) + defer releaseName(pluginName) + + installPluginUnderTest(t, "", testPluginPath, pluginName, nil) + + wrongPlugin := "abcd" + plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), wrongPlugin, nettest.NewFakeHost(nil), componentconfig.HairpinNone) + if plug != nil || err == nil { + t.Errorf("Expected to see an error. Wrong plugin selected.") + } +} + +func TestPluginValidation(t *testing.T) { + // The temp dir where test plugins will be stored. + testPluginPath := tmpDirOrDie() + + // install some random plugin under testPluginPath + pluginName := selectName() + defer tearDownPlugin(testPluginPath) + defer releaseName(pluginName) + + installPluginUnderTest(t, "", testPluginPath, pluginName, nil) + + // modify the perms of the pluginExecutable + f, err := os.Open(path.Join(testPluginPath, pluginName, pluginName)) + if err != nil { + t.Errorf("Nil value expected.") + } + err = f.Chmod(0444) + if err != nil { + t.Errorf("Failed to set perms on plugin exec") + } + f.Close() + + _, err = network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, nettest.NewFakeHost(nil), componentconfig.HairpinNone) + if err == nil { + // we expected an error here because validation would have failed + t.Errorf("Expected non-nil value.") + } +} + +func TestPluginSetupHook(t *testing.T) { + // The temp dir where test plugins will be stored. + testPluginPath := tmpDirOrDie() + + // install some random plugin under testPluginPath + pluginName := selectName() + defer tearDownPlugin(testPluginPath) + defer releaseName(pluginName) + + installPluginUnderTest(t, "", testPluginPath, pluginName, nil) + + plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, nettest.NewFakeHost(nil), componentconfig.HairpinNone) + + err = plug.SetUpPod("podNamespace", "podName", kubecontainer.ContainerID{Type: "docker", ID: "dockerid2345"}) + if err != nil { + t.Errorf("Expected nil: %v", err) + } + // check output of setup hook + output, err := ioutil.ReadFile(path.Join(testPluginPath, pluginName, pluginName+".out")) + if err != nil { + t.Errorf("Expected nil") + } + expectedOutput := "setup podNamespace podName dockerid2345" + if string(output) != expectedOutput { + t.Errorf("Mismatch in expected output for setup hook. Expected '%s', got '%s'", expectedOutput, string(output)) + } +} + +func TestPluginTearDownHook(t *testing.T) { + // The temp dir where test plugins will be stored. + testPluginPath := tmpDirOrDie() + + // install some random plugin under testPluginPath + pluginName := selectName() + defer tearDownPlugin(testPluginPath) + defer releaseName(pluginName) + + installPluginUnderTest(t, "", testPluginPath, pluginName, nil) + + plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, nettest.NewFakeHost(nil), componentconfig.HairpinNone) + + err = plug.TearDownPod("podNamespace", "podName", kubecontainer.ContainerID{Type: "docker", ID: "dockerid2345"}) + if err != nil { + t.Errorf("Expected nil") + } + // check output of setup hook + output, err := ioutil.ReadFile(path.Join(testPluginPath, pluginName, pluginName+".out")) + if err != nil { + t.Errorf("Expected nil") + } + expectedOutput := "teardown podNamespace podName dockerid2345" + if string(output) != expectedOutput { + t.Errorf("Mismatch in expected output for teardown hook. Expected '%s', got '%s'", expectedOutput, string(output)) + } +} + +func TestPluginStatusHook(t *testing.T) { + // The temp dir where test plugins will be stored. + testPluginPath := tmpDirOrDie() + + // install some random plugin under testPluginPath + pluginName := selectName() + defer tearDownPlugin(testPluginPath) + defer releaseName(pluginName) + + installPluginUnderTest(t, "", testPluginPath, pluginName, nil) + + plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, nettest.NewFakeHost(nil), componentconfig.HairpinNone) + + ip, err := plug.GetPodNetworkStatus("namespace", "name", kubecontainer.ContainerID{Type: "docker", ID: "dockerid2345"}) + if err != nil { + t.Errorf("Expected nil got %v", err) + } + // check output of status hook + output, err := ioutil.ReadFile(path.Join(testPluginPath, pluginName, pluginName+".out")) + if err != nil { + t.Errorf("Expected nil") + } + expectedOutput := "status namespace name dockerid2345" + if string(output) != expectedOutput { + t.Errorf("Mismatch in expected output for status hook. Expected '%s', got '%s'", expectedOutput, string(output)) + } + if ip.IP.String() != "10.20.30.40" { + t.Errorf("Mismatch in expected output for status hook. Expected '10.20.30.40', got '%s'", ip.IP.String()) + } +} + +func TestPluginStatusHookIPv6(t *testing.T) { + // The temp dir where test plugins will be stored. + testPluginPath := tmpDirOrDie() + + // install some random plugin under testPluginPath + pluginName := selectName() + defer tearDownPlugin(testPluginPath) + defer releaseName(pluginName) + + pluginDir := path.Join(testPluginPath, pluginName) + execTemplate := &map[string]interface{}{ + "IPAddress": "fe80::e2cb:4eff:fef9:6710", + "OutputFile": path.Join(pluginDir, pluginName+".out"), + } + installPluginUnderTest(t, "", testPluginPath, pluginName, execTemplate) + + plug, err := network.InitNetworkPlugin(ProbeNetworkPlugins(testPluginPath), pluginName, nettest.NewFakeHost(nil), componentconfig.HairpinNone) + if err != nil { + t.Errorf("InitNetworkPlugin() failed: %v", err) + } + + ip, err := plug.GetPodNetworkStatus("namespace", "name", kubecontainer.ContainerID{Type: "docker", ID: "dockerid2345"}) + if err != nil { + t.Errorf("Status() failed: %v", err) + } + // check output of status hook + outPath := path.Join(testPluginPath, pluginName, pluginName+".out") + output, err := ioutil.ReadFile(outPath) + if err != nil { + t.Errorf("ReadFile(%q) failed: %v", outPath, err) + } + expectedOutput := "status namespace name dockerid2345" + if string(output) != expectedOutput { + t.Errorf("Mismatch in expected output for status hook. Expected %q, got %q", expectedOutput, string(output)) + } + if ip.IP.String() != "fe80::e2cb:4eff:fef9:6710" { + t.Errorf("Mismatch in expected output for status hook. Expected 'fe80::e2cb:4eff:fef9:6710', got '%s'", ip.IP.String()) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/term.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/exec/exec_unix.go similarity index 74% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/term.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/exec/exec_unix.go index 9db85fe4b062..26847fe768d6 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/term.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/exec/exec_unix.go @@ -16,12 +16,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package editor +package exec -import ( - "os" - "syscall" -) +import "syscall" -// childSignals are the allowed signals that can be sent to children in Unix variant OS's -var childSignals = []os.Signal{syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT} +const X_OK = 0x1 + +func isExecutable(path string) bool { + return syscall.Access(path, X_OK) == nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/exec/exec_unsupported.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/exec/exec_unsupported.go new file mode 100644 index 000000000000..e2d4969f7594 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/exec/exec_unsupported.go @@ -0,0 +1,23 @@ +// +build windows + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +func isExecutable(path string) bool { + return false +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/hairpin/hairpin.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/hairpin/hairpin.go new file mode 100644 index 000000000000..b725fbba3af9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/hairpin/hairpin.go @@ -0,0 +1,122 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hairpin + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "path" + "regexp" + "strconv" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/util/exec" +) + +const ( + sysfsNetPath = "/sys/devices/virtual/net" + brportRelativePath = "brport" + hairpinModeRelativePath = "hairpin_mode" + hairpinEnable = "1" +) + +var ( + ethtoolOutputRegex = regexp.MustCompile("peer_ifindex: (\\d+)") +) + +func SetUpContainerPid(containerPid int, containerInterfaceName string) error { + pidStr := fmt.Sprintf("%d", containerPid) + nsenterArgs := []string{"-t", pidStr, "-n"} + return setUpContainerInternal(containerInterfaceName, pidStr, nsenterArgs) +} + +func SetUpContainerPath(netnsPath string, containerInterfaceName string) error { + if netnsPath[0] != '/' { + return fmt.Errorf("netnsPath path '%s' was invalid", netnsPath) + } + nsenterArgs := []string{"-n", netnsPath} + return setUpContainerInternal(containerInterfaceName, netnsPath, nsenterArgs) +} + +func setUpContainerInternal(containerInterfaceName, containerDesc string, nsenterArgs []string) error { + e := exec.New() + hostIfName, err := findPairInterfaceOfContainerInterface(e, containerInterfaceName, containerDesc, nsenterArgs) + if err != nil { + glog.Infof("Unable to find pair interface, setting up all interfaces: %v", err) + return setUpAllInterfaces() + } + return setUpInterface(hostIfName) +} + +func findPairInterfaceOfContainerInterface(e exec.Interface, containerInterfaceName, containerDesc string, nsenterArgs []string) (string, error) { + nsenterPath, err := e.LookPath("nsenter") + if err != nil { + return "", err + } + ethtoolPath, err := e.LookPath("ethtool") + if err != nil { + return "", err + } + + nsenterArgs = append(nsenterArgs, "-F", "--", ethtoolPath, "--statistics", containerInterfaceName) + output, err := e.Command(nsenterPath, nsenterArgs...).CombinedOutput() + if err != nil { + return "", fmt.Errorf("Unable to query interface %s of container %s: %v: %s", containerInterfaceName, containerDesc, err, string(output)) + } + // look for peer_ifindex + match := ethtoolOutputRegex.FindSubmatch(output) + if match == nil { + return "", fmt.Errorf("No peer_ifindex in interface statistics for %s of container %s", containerInterfaceName, containerDesc) + } + peerIfIndex, err := strconv.Atoi(string(match[1])) + if err != nil { // seems impossible (\d+ not numeric) + return "", fmt.Errorf("peer_ifindex wasn't numeric: %s: %v", match[1], err) + } + iface, err := net.InterfaceByIndex(peerIfIndex) + if err != nil { + return "", err + } + return iface.Name, nil +} + +func setUpAllInterfaces() error { + interfaces, err := net.Interfaces() + if err != nil { + return err + } + for _, netIf := range interfaces { + setUpInterface(netIf.Name) // ignore errors + } + return nil +} + +func setUpInterface(ifName string) error { + glog.V(3).Infof("Enabling hairpin on interface %s", ifName) + ifPath := path.Join(sysfsNetPath, ifName) + if _, err := os.Stat(ifPath); err != nil { + return err + } + brportPath := path.Join(ifPath, brportRelativePath) + if _, err := os.Stat(brportPath); err != nil && os.IsNotExist(err) { + // Device is not on a bridge, so doesn't need hairpin mode + return nil + } + hairpinModeFile := path.Join(brportPath, hairpinModeRelativePath) + return ioutil.WriteFile(hairpinModeFile, []byte(hairpinEnable), 0644) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/hairpin/hairpin_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/hairpin/hairpin_test.go new file mode 100644 index 000000000000..b94c7b997cb3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/hairpin/hairpin_test.go @@ -0,0 +1,108 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hairpin + +import ( + "errors" + "fmt" + "net" + "os" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/util/exec" +) + +func TestFindPairInterfaceOfContainerInterface(t *testing.T) { + // there should be at least "lo" on any system + interfaces, _ := net.Interfaces() + validOutput := fmt.Sprintf("garbage\n peer_ifindex: %d", interfaces[0].Index) + invalidOutput := fmt.Sprintf("garbage\n unknown: %d", interfaces[0].Index) + + tests := []struct { + output string + err error + expectedName string + expectErr bool + }{ + { + output: validOutput, + expectedName: interfaces[0].Name, + }, + { + output: invalidOutput, + expectErr: true, + }, + { + output: validOutput, + err: errors.New("error"), + expectErr: true, + }, + } + for _, test := range tests { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte(test.output), test.err }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { + return exec.InitFakeCmd(&fcmd, cmd, args...) + }, + }, + LookPathFunc: func(file string) (string, error) { + return fmt.Sprintf("/fake-bin/%s", file), nil + }, + } + nsenterArgs := []string{"-t", "123", "-n"} + name, err := findPairInterfaceOfContainerInterface(&fexec, "eth0", "123", nsenterArgs) + if test.expectErr { + if err == nil { + t.Errorf("unexpected non-error") + } + } else { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + } + if name != test.expectedName { + t.Errorf("unexpected name: %s (expected: %s)", name, test.expectedName) + } + } +} + +func TestSetUpInterfaceNonExistent(t *testing.T) { + err := setUpInterface("non-existent") + if err == nil { + t.Errorf("unexpected non-error") + } + deviceDir := fmt.Sprintf("%s/%s", sysfsNetPath, "non-existent") + if !strings.Contains(fmt.Sprintf("%v", err), deviceDir) { + t.Errorf("should have tried to open %s", deviceDir) + } +} + +func TestSetUpInterfaceNotBridged(t *testing.T) { + err := setUpInterface("lo") + if err != nil { + if os.IsNotExist(err) { + t.Skipf("'lo' device does not exist??? (%v)", err) + } + t.Errorf("unexpected error: %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux.go new file mode 100644 index 000000000000..db22b3afd7b0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux.go @@ -0,0 +1,824 @@ +// +build linux + +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubenet + +import ( + "bytes" + "crypto/sha256" + "encoding/base32" + "fmt" + "net" + "strings" + "sync" + "syscall" + "time" + + "github.com/appc/cni/libcni" + cnitypes "github.com/appc/cni/pkg/types" + "github.com/golang/glog" + "github.com/vishvananda/netlink" + "github.com/vishvananda/netlink/nl" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/componentconfig" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/dockertools" + "k8s.io/kubernetes/pkg/kubelet/network" + iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables" + "k8s.io/kubernetes/pkg/util/bandwidth" + utildbus "k8s.io/kubernetes/pkg/util/dbus" + utilexec "k8s.io/kubernetes/pkg/util/exec" + utiliptables "k8s.io/kubernetes/pkg/util/iptables" + utilsets "k8s.io/kubernetes/pkg/util/sets" + utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" +) + +const ( + KubenetPluginName = "kubenet" + BridgeName = "cbr0" + DefaultCNIDir = "/opt/cni/bin" + + sysctlBridgeCallIptables = "net/bridge/bridge-nf-call-iptables" + + // the hostport chain + kubenetHostportsChain utiliptables.Chain = "KUBENET-HOSTPORTS" + // prefix for kubenet hostport chains + kubenetHostportChainPrefix string = "KUBENET-HP-" +) + +type kubenetNetworkPlugin struct { + network.NoopNetworkPlugin + + host network.Host + netConfig *libcni.NetworkConfig + loConfig *libcni.NetworkConfig + cniConfig libcni.CNI + bandwidthShaper bandwidth.BandwidthShaper + mu sync.Mutex //Mutex for protecting podIPs map, netConfig, and shaper initialization + podIPs map[kubecontainer.ContainerID]string + MTU int + execer utilexec.Interface + nsenterPath string + hairpinMode componentconfig.HairpinMode + hostPortMap map[hostport]closeable + iptables utiliptables.Interface +} + +func NewPlugin() network.NetworkPlugin { + protocol := utiliptables.ProtocolIpv4 + execer := utilexec.New() + dbus := utildbus.New() + iptInterface := utiliptables.New(execer, dbus, protocol) + + return &kubenetNetworkPlugin{ + podIPs: make(map[kubecontainer.ContainerID]string), + hostPortMap: make(map[hostport]closeable), + MTU: 1460, //TODO: don't hardcode this + execer: utilexec.New(), + iptables: iptInterface, + } +} + +func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode componentconfig.HairpinMode) error { + plugin.host = host + plugin.hairpinMode = hairpinMode + plugin.cniConfig = &libcni.CNIConfig{ + Path: []string{DefaultCNIDir}, + } + + if link, err := findMinMTU(); err == nil { + plugin.MTU = link.MTU + glog.V(5).Infof("Using interface %s MTU %d as bridge MTU", link.Name, link.MTU) + } else { + glog.Warningf("Failed to find default bridge MTU: %v", err) + } + + // Since this plugin uses a Linux bridge, set bridge-nf-call-iptables=1 + // is necessary to ensure kube-proxy functions correctly. + // + // This will return an error on older kernel version (< 3.18) as the module + // was built-in, we simply ignore the error here. A better thing to do is + // to check the kernel version in the future. + plugin.execer.Command("modprobe", "br-netfilter").CombinedOutput() + err := utilsysctl.SetSysctl(sysctlBridgeCallIptables, 1) + if err != nil { + glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIptables, err) + } + + plugin.loConfig, err = libcni.ConfFromBytes([]byte(`{ + "cniVersion": "0.1.0", + "name": "kubenet-loopback", + "type": "loopback" +}`)) + if err != nil { + return fmt.Errorf("Failed to generate loopback config: %v", err) + } + + return nil +} + +func findMinMTU() (*net.Interface, error) { + intfs, err := net.Interfaces() + if err != nil { + return nil, err + } + + mtu := 999999 + defIntfIndex := -1 + for i, intf := range intfs { + if ((intf.Flags & net.FlagUp) != 0) && (intf.Flags&(net.FlagLoopback|net.FlagPointToPoint) == 0) { + if intf.MTU < mtu { + mtu = intf.MTU + defIntfIndex = i + } + } + } + + if mtu >= 999999 || mtu < 576 || defIntfIndex < 0 { + return nil, fmt.Errorf("no suitable interface: %v", BridgeName) + } + + return &intfs[defIntfIndex], nil +} + +const NET_CONFIG_TEMPLATE = `{ + "cniVersion": "0.1.0", + "name": "kubenet", + "type": "bridge", + "bridge": "%s", + "mtu": %d, + "addIf": "%s", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "%s", + "gateway": "%s", + "routes": [ + { "dst": "0.0.0.0/0" } + ] + } +}` + +func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interface{}) { + if name != network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE { + return + } + + plugin.mu.Lock() + defer plugin.mu.Unlock() + + podCIDR, ok := details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR].(string) + if !ok { + glog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE) + return + } + + if plugin.netConfig != nil { + glog.V(5).Infof("Ignoring subsequent pod CIDR update to %s", podCIDR) + return + } + + glog.V(5).Infof("PodCIDR is set to %q", podCIDR) + _, cidr, err := net.ParseCIDR(podCIDR) + if err == nil { + // Set bridge address to first address in IPNet + cidr.IP.To4()[3] += 1 + + json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.MTU, network.DefaultInterfaceName, podCIDR, cidr.IP.String()) + glog.V(2).Infof("CNI network config set to %v", json) + plugin.netConfig, err = libcni.ConfFromBytes([]byte(json)) + if err == nil { + glog.V(5).Infof("CNI network config:\n%s", json) + + // Ensure cbr0 has no conflicting addresses; CNI's 'bridge' + // plugin will bail out if the bridge has an unexpected one + plugin.clearBridgeAddressesExcept(cidr.IP.String()) + } + } + + if err != nil { + glog.Warningf("Failed to generate CNI network config: %v", err) + } +} + +func (plugin *kubenetNetworkPlugin) clearBridgeAddressesExcept(keep string) { + bridge, err := netlink.LinkByName(BridgeName) + if err != nil { + return + } + + addrs, err := netlink.AddrList(bridge, syscall.AF_INET) + if err != nil { + return + } + + for _, addr := range addrs { + if addr.IPNet.String() != keep { + glog.V(5).Infof("Removing old address %s from %s", addr.IPNet.String(), BridgeName) + netlink.AddrDel(bridge, &addr) + } + } +} + +// ensureBridgeTxQueueLen() ensures that the bridge interface's TX queue +// length is greater than zero. Due to a CNI <= 0.3.0 'bridge' plugin bug, +// the bridge is initially created with a TX queue length of 0, which gets +// used as the packet limit for FIFO traffic shapers, which drops packets. +// TODO: remove when we can depend on a fixed CNI +func (plugin *kubenetNetworkPlugin) ensureBridgeTxQueueLen() { + bridge, err := netlink.LinkByName(BridgeName) + if err != nil { + return + } + + if bridge.Attrs().TxQLen > 0 { + return + } + + req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + req.AddData(msg) + + nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(BridgeName)) + req.AddData(nameData) + + qlen := nl.NewRtAttr(syscall.IFLA_TXQLEN, nl.Uint32Attr(1000)) + req.AddData(qlen) + + _, err = req.Execute(syscall.NETLINK_ROUTE, 0) + if err != nil { + glog.V(5).Infof("Failed to set bridge tx queue length: %v", err) + } +} + +func (plugin *kubenetNetworkPlugin) Name() string { + return KubenetPluginName +} + +func (plugin *kubenetNetworkPlugin) Capabilities() utilsets.Int { + return utilsets.NewInt(network.NET_PLUGIN_CAPABILITY_SHAPING) +} + +func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID) error { + plugin.mu.Lock() + defer plugin.mu.Unlock() + + start := time.Now() + defer func() { + glog.V(4).Infof("SetUpPod took %v for %s/%s", time.Since(start), namespace, name) + }() + + pod, ok := plugin.host.GetPodByName(namespace, name) + if !ok { + return fmt.Errorf("pod %q cannot be found", name) + } + // try to open pod host port if specified + hostportMap, err := plugin.openPodHostports(pod) + if err != nil { + return err + } + if len(hostportMap) > 0 { + // defer to decide whether to keep the host port open based on the result of SetUpPod + defer plugin.syncHostportMap(id, hostportMap) + } + + ingress, egress, err := bandwidth.ExtractPodBandwidthResources(pod.Annotations) + if err != nil { + return fmt.Errorf("Error reading pod bandwidth annotations: %v", err) + } + + if err := plugin.Status(); err != nil { + return fmt.Errorf("Kubenet cannot SetUpPod: %v", err) + } + + // Bring up container loopback interface + if _, err := plugin.addContainerToNetwork(plugin.loConfig, "lo", namespace, name, id); err != nil { + return err + } + + // Hook container up with our bridge + res, err := plugin.addContainerToNetwork(plugin.netConfig, network.DefaultInterfaceName, namespace, name, id) + if err != nil { + return err + } + if res.IP4 == nil { + return fmt.Errorf("CNI plugin reported no IPv4 address for container %v.", id) + } + ip4 := res.IP4.IP.IP.To4() + if ip4 == nil { + return fmt.Errorf("CNI plugin reported an invalid IPv4 address for container %v: %+v.", id, res.IP4) + } + plugin.podIPs[id] = ip4.String() + + // Put the container bridge into promiscuous mode to force it to accept hairpin packets. + // TODO: Remove this once the kernel bug (#20096) is fixed. + // TODO: check and set promiscuous mode with netlink once vishvananda/netlink supports it + if plugin.hairpinMode == componentconfig.PromiscuousBridge { + output, err := plugin.execer.Command("ip", "link", "show", "dev", BridgeName).CombinedOutput() + if err != nil || strings.Index(string(output), "PROMISC") < 0 { + _, err := plugin.execer.Command("ip", "link", "set", BridgeName, "promisc", "on").CombinedOutput() + if err != nil { + return fmt.Errorf("Error setting promiscuous mode on %s: %v", BridgeName, err) + } + } + } + + // The first SetUpPod call creates the bridge; get a shaper for the sake of + // initialization + shaper := plugin.shaper() + + if egress != nil || ingress != nil { + ipAddr := plugin.podIPs[id] + if err := shaper.ReconcileCIDR(fmt.Sprintf("%s/32", ipAddr), egress, ingress); err != nil { + return fmt.Errorf("Failed to add pod to shaper: %v", err) + } + } + + plugin.syncHostportsRules() + return nil +} + +func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error { + plugin.mu.Lock() + defer plugin.mu.Unlock() + + start := time.Now() + defer func() { + glog.V(4).Infof("TearDownPod took %v for %s/%s", time.Since(start), namespace, name) + }() + + if plugin.netConfig == nil { + return fmt.Errorf("Kubenet needs a PodCIDR to tear down pods") + } + + // no cached IP is Ok during teardown + podIP, hasIP := plugin.podIPs[id] + if hasIP { + glog.V(5).Infof("Removing pod IP %s from shaper", podIP) + // shaper wants /32 + if err := plugin.shaper().Reset(fmt.Sprintf("%s/32", podIP)); err != nil { + // Possible bandwidth shaping wasn't enabled for this pod anyways + glog.V(4).Infof("Failed to remove pod IP %s from shaper: %v", podIP, err) + } + } + if err := plugin.delContainerFromNetwork(plugin.netConfig, network.DefaultInterfaceName, namespace, name, id); err != nil { + // This is to prevent returning error when TearDownPod is called twice on the same pod. This helps to reduce event pollution. + if !hasIP { + glog.Warningf("Failed to delete container from kubenet: %v", err) + return nil + } + return err + } + delete(plugin.podIPs, id) + + plugin.syncHostportsRules() + return nil +} + +// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin. +// Also fix the runtime's call to Status function to be done only in the case that the IP is lost, no need to do periodic calls +func (plugin *kubenetNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) { + plugin.mu.Lock() + defer plugin.mu.Unlock() + // Assuming the ip of pod does not change. Try to retrieve ip from kubenet map first. + if podIP, ok := plugin.podIPs[id]; ok { + return &network.PodNetworkStatus{IP: net.ParseIP(podIP)}, nil + } + + netnsPath, err := plugin.host.GetRuntime().GetNetNS(id) + if err != nil { + return nil, fmt.Errorf("Kubenet failed to retrieve network namespace path: %v", err) + } + nsenterPath, err := plugin.getNsenterPath() + if err != nil { + return nil, err + } + // Try to retrieve ip inside container network namespace + output, err := plugin.execer.Command(nsenterPath, fmt.Sprintf("--net=%s", netnsPath), "-F", "--", + "ip", "-o", "-4", "addr", "show", "dev", network.DefaultInterfaceName).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("Unexpected command output %s with error: %v", output, err) + } + fields := strings.Fields(string(output)) + if len(fields) < 4 { + return nil, fmt.Errorf("Unexpected command output %s ", output) + } + ip, _, err := net.ParseCIDR(fields[3]) + if err != nil { + return nil, fmt.Errorf("Kubenet failed to parse ip from output %s due to %v", output, err) + } + plugin.podIPs[id] = ip.String() + return &network.PodNetworkStatus{IP: ip}, nil +} + +func (plugin *kubenetNetworkPlugin) Status() error { + // Can't set up pods if we don't have a PodCIDR yet + if plugin.netConfig == nil { + return fmt.Errorf("Kubenet does not have netConfig. This is most likely due to lack of PodCIDR") + } + return nil +} + +func (plugin *kubenetNetworkPlugin) buildCNIRuntimeConf(ifName string, id kubecontainer.ContainerID) (*libcni.RuntimeConf, error) { + netnsPath, err := plugin.host.GetRuntime().GetNetNS(id) + if err != nil { + return nil, fmt.Errorf("Kubenet failed to retrieve network namespace path: %v", err) + } + + return &libcni.RuntimeConf{ + ContainerID: id.ID, + NetNS: netnsPath, + IfName: ifName, + }, nil +} + +func (plugin *kubenetNetworkPlugin) addContainerToNetwork(config *libcni.NetworkConfig, ifName, namespace, name string, id kubecontainer.ContainerID) (*cnitypes.Result, error) { + rt, err := plugin.buildCNIRuntimeConf(ifName, id) + if err != nil { + return nil, fmt.Errorf("Error building CNI config: %v", err) + } + + glog.V(3).Infof("Adding %s/%s to '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) + res, err := plugin.cniConfig.AddNetwork(config, rt) + if err != nil { + return nil, fmt.Errorf("Error adding container to network: %v", err) + } + return res, nil +} + +func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.NetworkConfig, ifName, namespace, name string, id kubecontainer.ContainerID) error { + rt, err := plugin.buildCNIRuntimeConf(ifName, id) + if err != nil { + return fmt.Errorf("Error building CNI config: %v", err) + } + + glog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) + if err := plugin.cniConfig.DelNetwork(config, rt); err != nil { + return fmt.Errorf("Error removing container from network: %v", err) + } + return nil +} + +func (plugin *kubenetNetworkPlugin) getNsenterPath() (string, error) { + if plugin.nsenterPath == "" { + nsenterPath, err := plugin.execer.LookPath("nsenter") + if err != nil { + return "", err + } + plugin.nsenterPath = nsenterPath + } + return plugin.nsenterPath, nil +} + +type closeable interface { + Close() error +} + +type hostport struct { + port int32 + protocol string +} + +type targetPod struct { + podFullName string + podIP string +} + +func (hp *hostport) String() string { + return fmt.Sprintf("%s:%d", hp.protocol, hp.port) +} + +//openPodHostports opens all hostport for pod and returns the map of hostport and socket +func (plugin *kubenetNetworkPlugin) openPodHostports(pod *api.Pod) (map[hostport]closeable, error) { + var retErr error + hostportMap := make(map[hostport]closeable) + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + if port.HostPort <= 0 { + // Ignore + continue + } + hp := hostport{ + port: port.HostPort, + protocol: strings.ToLower(string(port.Protocol)), + } + socket, err := openLocalPort(&hp) + if err != nil { + retErr = fmt.Errorf("Cannot open hostport %d for pod %s: %v", port.HostPort, kubecontainer.GetPodFullName(pod), err) + break + } + hostportMap[hp] = socket + } + if retErr != nil { + break + } + } + // If encounter any error, close all hostports that just got opened. + if retErr != nil { + for hp, socket := range hostportMap { + if err := socket.Close(); err != nil { + glog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, kubecontainer.GetPodFullName(pod), err) + } + } + } + return hostportMap, retErr +} + +//syncHostportMap syncs newly opened hostports to kubenet on successful pod setup. If pod setup failed, then clean up. +func (plugin *kubenetNetworkPlugin) syncHostportMap(id kubecontainer.ContainerID, hostportMap map[hostport]closeable) { + // if pod ip cannot be retrieved from podCIDR, then assume pod setup failed. + if _, ok := plugin.podIPs[id]; !ok { + for hp, socket := range hostportMap { + err := socket.Close() + if err != nil { + glog.Errorf("Failed to close socket for hostport %v", hp) + } + } + return + } + // add newly opened hostports + for hp, socket := range hostportMap { + plugin.hostPortMap[hp] = socket + } +} + +// gatherAllHostports returns all hostports that should be presented on node +func (plugin *kubenetNetworkPlugin) gatherAllHostports() (map[api.ContainerPort]targetPod, error) { + podHostportMap := make(map[api.ContainerPort]targetPod) + pods, err := plugin.host.GetRuntime().GetPods(false) + if err != nil { + return nil, fmt.Errorf("Failed to retrieve pods from runtime: %v", err) + } + for _, p := range pods { + var podInfraContainerId kubecontainer.ContainerID + for _, c := range p.Containers { + if c.Name == dockertools.PodInfraContainerName { + podInfraContainerId = c.ID + break + } + } + // Assuming if kubenet has the pod's ip, the pod is alive and its host port should be presented. + podIP, ok := plugin.podIPs[podInfraContainerId] + if !ok { + // The POD has been delete. Ignore + continue + } + // Need the complete api.Pod object + pod, ok := plugin.host.GetPodByName(p.Namespace, p.Name) + if ok { + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + if port.HostPort != 0 { + podHostportMap[port] = targetPod{podFullName: kubecontainer.GetPodFullName(pod), podIP: podIP} + } + } + } + } + } + return podHostportMap, nil +} + +// Join all words with spaces, terminate with newline and write to buf. +func writeLine(buf *bytes.Buffer, words ...string) { + buf.WriteString(strings.Join(words, " ") + "\n") +} + +//hostportChainName takes containerPort for a pod and returns associated iptables chain. +// This is computed by hashing (sha256) +// then encoding to base32 and truncating with the prefix "KUBE-SVC-". We do +// this because Iptables Chain Names must be <= 28 chars long, and the longer +// they are the harder they are to read. +func hostportChainName(cp api.ContainerPort, podFullName string) utiliptables.Chain { + hash := sha256.Sum256([]byte(string(cp.HostPort) + string(cp.Protocol) + podFullName)) + encoded := base32.StdEncoding.EncodeToString(hash[:]) + return utiliptables.Chain(kubenetHostportChainPrefix + encoded[:16]) +} + +// syncHostportsRules gathers all hostports on node and setup iptables rules enable them. And finally clean up stale hostports +func (plugin *kubenetNetworkPlugin) syncHostportsRules() { + start := time.Now() + defer func() { + glog.V(4).Infof("syncHostportsRules took %v", time.Since(start)) + }() + + containerPortMap, err := plugin.gatherAllHostports() + if err != nil { + glog.Errorf("Fail to get hostports: %v", err) + return + } + + glog.V(4).Info("Ensuring kubenet hostport chains") + // Ensure kubenetHostportChain + if _, err := plugin.iptables.EnsureChain(utiliptables.TableNAT, kubenetHostportsChain); err != nil { + glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubenetHostportsChain, err) + return + } + tableChainsNeedJumpServices := []struct { + table utiliptables.Table + chain utiliptables.Chain + }{ + {utiliptables.TableNAT, utiliptables.ChainOutput}, + {utiliptables.TableNAT, utiliptables.ChainPrerouting}, + } + args := []string{"-m", "comment", "--comment", "kubenet hostport portals", + "-m", "addrtype", "--dst-type", "LOCAL", + "-j", string(kubenetHostportsChain)} + for _, tc := range tableChainsNeedJumpServices { + if _, err := plugin.iptables.EnsureRule(utiliptables.Prepend, tc.table, tc.chain, args...); err != nil { + glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", tc.table, tc.chain, kubenetHostportsChain, err) + return + } + } + // Need to SNAT traffic from localhost + args = []string{"-m", "comment", "--comment", "SNAT for localhost access to hostports", "-o", BridgeName, "-s", "127.0.0.0/8", "-j", "MASQUERADE"} + if _, err := plugin.iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil { + glog.Errorf("Failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err) + return + } + + // Get iptables-save output so we can check for existing chains and rules. + // This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore + existingNATChains := make(map[utiliptables.Chain]string) + iptablesSaveRaw, err := plugin.iptables.Save(utiliptables.TableNAT) + if err != nil { // if we failed to get any rules + glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) + } else { // otherwise parse the output + existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, iptablesSaveRaw) + } + + natChains := bytes.NewBuffer(nil) + natRules := bytes.NewBuffer(nil) + writeLine(natChains, "*nat") + // Make sure we keep stats for the top-level chains, if they existed + // (which most should have because we created them above). + if chain, ok := existingNATChains[kubenetHostportsChain]; ok { + writeLine(natChains, chain) + } else { + writeLine(natChains, utiliptables.MakeChainLine(kubenetHostportsChain)) + } + // Assuming the node is running kube-proxy in iptables mode + // Reusing kube-proxy's KubeMarkMasqChain for SNAT + // TODO: let kubelet manage KubeMarkMasqChain. Other components should just be able to use it + if chain, ok := existingNATChains[iptablesproxy.KubeMarkMasqChain]; ok { + writeLine(natChains, chain) + } else { + writeLine(natChains, utiliptables.MakeChainLine(iptablesproxy.KubeMarkMasqChain)) + } + + // Accumulate NAT chains to keep. + activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set + + for containerPort, target := range containerPortMap { + protocol := strings.ToLower(string(containerPort.Protocol)) + hostportChain := hostportChainName(containerPort, target.podFullName) + if chain, ok := existingNATChains[hostportChain]; ok { + writeLine(natChains, chain) + } else { + writeLine(natChains, utiliptables.MakeChainLine(hostportChain)) + } + + activeNATChains[hostportChain] = true + + // Redirect to hostport chain + args := []string{ + "-A", string(kubenetHostportsChain), + "-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, containerPort.HostPort), + "-m", protocol, "-p", protocol, + "--dport", fmt.Sprintf("%d", containerPort.HostPort), + "-j", string(hostportChain), + } + writeLine(natRules, args...) + + // If the request comes from the pod that is serving the hostport, then SNAT + args = []string{ + "-A", string(hostportChain), + "-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, containerPort.HostPort), + "-s", target.podIP, "-j", string(iptablesproxy.KubeMarkMasqChain), + } + writeLine(natRules, args...) + + // Create hostport chain to DNAT traffic to final destination + // Iptables will maintained the stats for this chain + args = []string{ + "-A", string(hostportChain), + "-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, containerPort.HostPort), + "-m", protocol, "-p", protocol, + "-j", "DNAT", fmt.Sprintf("--to-destination=%s:%d", target.podIP, containerPort.ContainerPort), + } + writeLine(natRules, args...) + } + + // Delete chains no longer in use. + for chain := range existingNATChains { + if !activeNATChains[chain] { + chainString := string(chain) + if !strings.HasPrefix(chainString, kubenetHostportChainPrefix) { + // Ignore chains that aren't ours. + continue + } + // We must (as per iptables) write a chain-line for it, which has + // the nice effect of flushing the chain. Then we can remove the + // chain. + writeLine(natChains, existingNATChains[chain]) + writeLine(natRules, "-X", chainString) + } + } + writeLine(natRules, "COMMIT") + + natLines := append(natChains.Bytes(), natRules.Bytes()...) + glog.V(3).Infof("Restoring iptables rules: %s", natLines) + err = plugin.iptables.RestoreAll(natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) + if err != nil { + glog.Errorf("Failed to execute iptables-restore: %v", err) + return + } + + plugin.cleanupHostportMap(containerPortMap) +} + +func openLocalPort(hp *hostport) (closeable, error) { + // For ports on node IPs, open the actual port and hold it, even though we + // use iptables to redirect traffic. + // This ensures a) that it's safe to use that port and b) that (a) stays + // true. The risk is that some process on the node (e.g. sshd or kubelet) + // is using a port and we give that same port out to a Service. That would + // be bad because iptables would silently claim the traffic but the process + // would never know. + // NOTE: We should not need to have a real listen()ing socket - bind() + // should be enough, but I can't figure out a way to e2e test without + // it. Tools like 'ss' and 'netstat' do not show sockets that are + // bind()ed but not listen()ed, and at least the default debian netcat + // has no way to avoid about 10 seconds of retries. + var socket closeable + switch hp.protocol { + case "tcp": + listener, err := net.Listen("tcp", fmt.Sprintf(":%d", hp.port)) + if err != nil { + return nil, err + } + socket = listener + case "udp": + addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf(":%d", hp.port)) + if err != nil { + return nil, err + } + conn, err := net.ListenUDP("udp", addr) + if err != nil { + return nil, err + } + socket = conn + default: + return nil, fmt.Errorf("unknown protocol %q", hp.protocol) + } + glog.V(2).Infof("Opened local port %s", hp.String()) + return socket, nil +} + +// cleanupHostportMap closes obsolete hostports +func (plugin *kubenetNetworkPlugin) cleanupHostportMap(containerPortMap map[api.ContainerPort]targetPod) { + // compute hostports that are supposed to be open + currentHostports := make(map[hostport]bool) + for containerPort := range containerPortMap { + hp := hostport{ + port: containerPort.HostPort, + protocol: string(containerPort.Protocol), + } + currentHostports[hp] = true + } + + // close and delete obsolete hostports + for hp, socket := range plugin.hostPortMap { + if _, ok := currentHostports[hp]; !ok { + socket.Close() + delete(plugin.hostPortMap, hp) + } + } +} + +// shaper retrieves the bandwidth shaper and, if it hasn't been fetched before, +// initializes it and ensures the bridge is appropriately configured +// This function should only be called while holding the `plugin.mu` lock +func (plugin *kubenetNetworkPlugin) shaper() bandwidth.BandwidthShaper { + if plugin.bandwidthShaper == nil { + plugin.bandwidthShaper = bandwidth.NewTCShaper(BridgeName) + plugin.ensureBridgeTxQueueLen() + plugin.bandwidthShaper.ReconcileInterface() + } + return plugin.bandwidthShaper +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux_test.go new file mode 100644 index 000000000000..051a0f2494fe --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux_test.go @@ -0,0 +1,158 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubenet + +import ( + "fmt" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "testing" + + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/network" + "k8s.io/kubernetes/pkg/kubelet/network/cni/testing" + nettest "k8s.io/kubernetes/pkg/kubelet/network/testing" + "k8s.io/kubernetes/pkg/util/bandwidth" + "k8s.io/kubernetes/pkg/util/exec" + ipttest "k8s.io/kubernetes/pkg/util/iptables/testing" +) + +// test it fulfills the NetworkPlugin interface +var _ network.NetworkPlugin = &kubenetNetworkPlugin{} + +func newFakeKubenetPlugin(initMap map[kubecontainer.ContainerID]string, execer exec.Interface, host network.Host) *kubenetNetworkPlugin { + return &kubenetNetworkPlugin{ + podIPs: initMap, + execer: execer, + MTU: 1460, + host: host, + } +} + +func TestGetPodNetworkStatus(t *testing.T) { + podIPMap := make(map[kubecontainer.ContainerID]string) + podIPMap[kubecontainer.ContainerID{ID: "1"}] = "10.245.0.2" + podIPMap[kubecontainer.ContainerID{ID: "2"}] = "10.245.0.3" + + testCases := []struct { + id string + expectError bool + expectIP string + }{ + //in podCIDR map + { + "1", + false, + "10.245.0.2", + }, + { + "2", + false, + "10.245.0.3", + }, + //not in podCIDR map + { + "3", + true, + "", + }, + //TODO: add test cases for retrieving ip inside container network namespace + } + + fakeCmds := make([]exec.FakeCommandAction, 0) + for _, t := range testCases { + // the fake commands return the IP from the given index, or an error + fCmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + func() ([]byte, error) { + ip, ok := podIPMap[kubecontainer.ContainerID{ID: t.id}] + if !ok { + return nil, fmt.Errorf("Pod IP %q not found", t.id) + } + return []byte(ip), nil + }, + }, + } + fakeCmds = append(fakeCmds, func(cmd string, args ...string) exec.Cmd { + return exec.InitFakeCmd(&fCmd, cmd, args...) + }) + } + fexec := exec.FakeExec{ + CommandScript: fakeCmds, + LookPathFunc: func(file string) (string, error) { + return fmt.Sprintf("/fake-bin/%s", file), nil + }, + } + + fhost := nettest.NewFakeHost(nil) + fakeKubenet := newFakeKubenetPlugin(podIPMap, &fexec, fhost) + + for i, tc := range testCases { + out, err := fakeKubenet.GetPodNetworkStatus("", "", kubecontainer.ContainerID{ID: tc.id}) + if tc.expectError { + if err == nil { + t.Errorf("Test case %d expects error but got none", i) + } + continue + } else { + if err != nil { + t.Errorf("Test case %d expects error but got error: %v", i, err) + } + } + if tc.expectIP != out.IP.String() { + t.Errorf("Test case %d expects ip %s but got %s", i, tc.expectIP, out.IP.String()) + } + } +} + +// TestTeardownBeforeSetUp tests that a `TearDown` call does call +// `shaper.Reset` +func TestTeardownCallsShaper(t *testing.T) { + fexec := &exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{}, + LookPathFunc: func(file string) (string, error) { + return fmt.Sprintf("/fake-bin/%s", file), nil + }, + } + fhost := nettest.NewFakeHost(nil) + fshaper := &bandwidth.FakeShaper{} + mockcni := &mock_cni.MockCNI{} + kubenet := newFakeKubenetPlugin(map[kubecontainer.ContainerID]string{}, fexec, fhost) + kubenet.cniConfig = mockcni + kubenet.iptables = ipttest.NewFake() + kubenet.bandwidthShaper = fshaper + + mockcni.On("DelNetwork", mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil) + + details := make(map[string]interface{}) + details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = "10.0.0.1/24" + kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details) + + existingContainerID := kubecontainer.BuildContainerID("docker", "123") + kubenet.podIPs[existingContainerID] = "10.0.0.1" + + if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil { + t.Fatalf("Unexpected error in TearDownPod: %v", err) + } + assert.Equal(t, []string{"10.0.0.1/32"}, fshaper.ResetCIDRs, "shaper.Reset should have been called") + + mockcni.AssertExpectations(t) +} + +//TODO: add unit test for each implementation of network plugin interface diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_unsupported.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_unsupported.go new file mode 100644 index 000000000000..d2408bc25f38 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_unsupported.go @@ -0,0 +1,55 @@ +// +build !linux + +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubenet + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/apis/componentconfig" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/network" +) + +type kubenetNetworkPlugin struct { + network.NoopNetworkPlugin +} + +func NewPlugin() network.NetworkPlugin { + return &kubenetNetworkPlugin{} +} + +func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode componentconfig.HairpinMode) error { + return fmt.Errorf("Kubenet is not supported in this build") +} + +func (plugin *kubenetNetworkPlugin) Name() string { + return "kubenet" +} + +func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID) error { + return fmt.Errorf("Kubenet is not supported in this build") +} + +func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error { + return fmt.Errorf("Kubenet is not supported in this build") +} + +func (plugin *kubenetNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) { + return nil, fmt.Errorf("Kubenet is not supported in this build") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/mock_network/network_plugins.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/mock_network/network_plugins.go new file mode 100644 index 000000000000..6055de6e218b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/mock_network/network_plugins.go @@ -0,0 +1,129 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generated code, generated via: `mockgen k8s.io/kubernetes/pkg/kubelet/network NetworkPlugin > $GOPATH/src/k8s.io/kubernetes/pkg/kubelet/network/mock_network/network_plugins.go` +// Edited by hand for boilerplate and gofmt. +// TODO, this should be autogenerated/autoupdated by scripts. + +package mock_network + +import ( + gomock "github.com/golang/mock/gomock" + componentconfig "k8s.io/kubernetes/pkg/apis/componentconfig" + container "k8s.io/kubernetes/pkg/kubelet/container" + network "k8s.io/kubernetes/pkg/kubelet/network" + sets "k8s.io/kubernetes/pkg/util/sets" +) + +// Mock of NetworkPlugin interface +type MockNetworkPlugin struct { + ctrl *gomock.Controller + recorder *_MockNetworkPluginRecorder +} + +// Recorder for MockNetworkPlugin (not exported) +type _MockNetworkPluginRecorder struct { + mock *MockNetworkPlugin +} + +func NewMockNetworkPlugin(ctrl *gomock.Controller) *MockNetworkPlugin { + mock := &MockNetworkPlugin{ctrl: ctrl} + mock.recorder = &_MockNetworkPluginRecorder{mock} + return mock +} + +func (_m *MockNetworkPlugin) EXPECT() *_MockNetworkPluginRecorder { + return _m.recorder +} + +func (_m *MockNetworkPlugin) Capabilities() sets.Int { + ret := _m.ctrl.Call(_m, "Capabilities") + ret0, _ := ret[0].(sets.Int) + return ret0 +} + +func (_mr *_MockNetworkPluginRecorder) Capabilities() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Capabilities") +} + +func (_m *MockNetworkPlugin) Event(_param0 string, _param1 map[string]interface{}) { + _m.ctrl.Call(_m, "Event", _param0, _param1) +} + +func (_mr *_MockNetworkPluginRecorder) Event(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Event", arg0, arg1) +} + +func (_m *MockNetworkPlugin) GetPodNetworkStatus(_param0 string, _param1 string, _param2 container.ContainerID) (*network.PodNetworkStatus, error) { + ret := _m.ctrl.Call(_m, "GetPodNetworkStatus", _param0, _param1, _param2) + ret0, _ := ret[0].(*network.PodNetworkStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockNetworkPluginRecorder) GetPodNetworkStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "GetPodNetworkStatus", arg0, arg1, arg2) +} + +func (_m *MockNetworkPlugin) Init(_param0 network.Host, _param1 componentconfig.HairpinMode) error { + ret := _m.ctrl.Call(_m, "Init", _param0, _param1) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockNetworkPluginRecorder) Init(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Init", arg0, arg1) +} + +func (_m *MockNetworkPlugin) Name() string { + ret := _m.ctrl.Call(_m, "Name") + ret0, _ := ret[0].(string) + return ret0 +} + +func (_mr *_MockNetworkPluginRecorder) Name() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Name") +} + +func (_m *MockNetworkPlugin) SetUpPod(_param0 string, _param1 string, _param2 container.ContainerID) error { + ret := _m.ctrl.Call(_m, "SetUpPod", _param0, _param1, _param2) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockNetworkPluginRecorder) SetUpPod(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "SetUpPod", arg0, arg1, arg2) +} + +func (_m *MockNetworkPlugin) Status() error { + ret := _m.ctrl.Call(_m, "Status") + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockNetworkPluginRecorder) Status() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Status") +} + +func (_m *MockNetworkPlugin) TearDownPod(_param0 string, _param1 string, _param2 container.ContainerID) error { + ret := _m.ctrl.Call(_m, "TearDownPod", _param0, _param1, _param2) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockNetworkPluginRecorder) TearDownPod(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "TearDownPod", arg0, arg1, arg2) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/network.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/network.go new file mode 100644 index 000000000000..1396d4155853 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/network.go @@ -0,0 +1,20 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +// TODO: Consider making this value configurable. +const DefaultInterfaceName = "eth0" diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/plugins.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/plugins.go new file mode 100644 index 000000000000..1d52415b22cb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/plugins.go @@ -0,0 +1,201 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "fmt" + "net" + "strings" + + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/componentconfig" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + utilexec "k8s.io/kubernetes/pkg/util/exec" + utilsets "k8s.io/kubernetes/pkg/util/sets" + utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" + "k8s.io/kubernetes/pkg/util/validation" +) + +const DefaultPluginName = "kubernetes.io/no-op" + +// Called when the node's Pod CIDR is known when using the +// controller manager's --allocate-node-cidrs=true option +const NET_PLUGIN_EVENT_POD_CIDR_CHANGE = "pod-cidr-change" +const NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR = "pod-cidr" + +// Plugin capabilities +const ( + // Indicates the plugin handles Kubernetes bandwidth shaping annotations internally + NET_PLUGIN_CAPABILITY_SHAPING int = 1 +) + +// Plugin is an interface to network plugins for the kubelet +type NetworkPlugin interface { + // Init initializes the plugin. This will be called exactly once + // before any other methods are called. + Init(host Host, hairpinMode componentconfig.HairpinMode) error + + // Called on various events like: + // NET_PLUGIN_EVENT_POD_CIDR_CHANGE + Event(name string, details map[string]interface{}) + + // Name returns the plugin's name. This will be used when searching + // for a plugin by name, e.g. + Name() string + + // Returns a set of NET_PLUGIN_CAPABILITY_* + Capabilities() utilsets.Int + + // SetUpPod is the method called after the infra container of + // the pod has been created but before the other containers of the + // pod are launched. + SetUpPod(namespace string, name string, podInfraContainerID kubecontainer.ContainerID) error + + // TearDownPod is the method called before a pod's infra container will be deleted + TearDownPod(namespace string, name string, podInfraContainerID kubecontainer.ContainerID) error + + // Status is the method called to obtain the ipv4 or ipv6 addresses of the container + GetPodNetworkStatus(namespace string, name string, podInfraContainerID kubecontainer.ContainerID) (*PodNetworkStatus, error) + + // NetworkStatus returns error if the network plugin is in error state + Status() error +} + +// PodNetworkStatus stores the network status of a pod (currently just the primary IP address) +// This struct represents version "v1beta1" +type PodNetworkStatus struct { + unversioned.TypeMeta `json:",inline"` + + // IP is the primary ipv4/ipv6 address of the pod. Among other things it is the address that - + // - kube expects to be reachable across the cluster + // - service endpoints are constructed with + // - will be reported in the PodStatus.PodIP field (will override the IP reported by docker) + IP net.IP `json:"ip" description:"Primary IP address of the pod"` +} + +// Host is an interface that plugins can use to access the kubelet. +type Host interface { + // Get the pod structure by its name, namespace + GetPodByName(namespace, name string) (*api.Pod, bool) + + // GetKubeClient returns a client interface + GetKubeClient() clientset.Interface + + // GetContainerRuntime returns the container runtime that implements the containers (e.g. docker/rkt) + GetRuntime() kubecontainer.Runtime +} + +// InitNetworkPlugin inits the plugin that matches networkPluginName. Plugins must have unique names. +func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host Host, hairpinMode componentconfig.HairpinMode) (NetworkPlugin, error) { + if networkPluginName == "" { + // default to the no_op plugin + plug := &NoopNetworkPlugin{} + if err := plug.Init(host, hairpinMode); err != nil { + return nil, err + } + return plug, nil + } + + pluginMap := map[string]NetworkPlugin{} + + allErrs := []error{} + for _, plugin := range plugins { + name := plugin.Name() + if errs := validation.IsQualifiedName(name); len(errs) != 0 { + allErrs = append(allErrs, fmt.Errorf("network plugin has invalid name: %q: %s", name, strings.Join(errs, ";"))) + continue + } + + if _, found := pluginMap[name]; found { + allErrs = append(allErrs, fmt.Errorf("network plugin %q was registered more than once", name)) + continue + } + pluginMap[name] = plugin + } + + chosenPlugin := pluginMap[networkPluginName] + if chosenPlugin != nil { + err := chosenPlugin.Init(host, hairpinMode) + if err != nil { + allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err)) + } else { + glog.V(1).Infof("Loaded network plugin %q", networkPluginName) + } + } else { + allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName)) + } + + return chosenPlugin, utilerrors.NewAggregate(allErrs) +} + +func UnescapePluginName(in string) string { + return strings.Replace(in, "~", "/", -1) +} + +type NoopNetworkPlugin struct { +} + +const sysctlBridgeCallIptables = "net/bridge/bridge-nf-call-iptables" + +func (plugin *NoopNetworkPlugin) Init(host Host, hairpinMode componentconfig.HairpinMode) error { + // Set bridge-nf-call-iptables=1 to maintain compatibility with older + // kubernetes versions to ensure the iptables-based kube proxy functions + // correctly. Other plugins are responsible for setting this correctly + // depending on whether or not they connect containers to Linux bridges + // or use some other mechanism (ie, SDN vswitch). + + // Ensure the netfilter module is loaded on kernel >= 3.18; previously + // it was built-in. + utilexec.New().Command("modprobe", "br-netfilter").CombinedOutput() + if err := utilsysctl.SetSysctl(sysctlBridgeCallIptables, 1); err != nil { + glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIptables, err) + } + + return nil +} + +func (plugin *NoopNetworkPlugin) Event(name string, details map[string]interface{}) { +} + +func (plugin *NoopNetworkPlugin) Name() string { + return DefaultPluginName +} + +func (plugin *NoopNetworkPlugin) Capabilities() utilsets.Int { + return utilsets.NewInt() +} + +func (plugin *NoopNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID) error { + return nil +} + +func (plugin *NoopNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error { + return nil +} + +func (plugin *NoopNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*PodNetworkStatus, error) { + return nil, nil +} + +func (plugin *NoopNetworkPlugin) Status() error { + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/plugins_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/plugins_test.go new file mode 100644 index 000000000000..e6ea9ed283fb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/plugins_test.go @@ -0,0 +1,38 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package network + +import ( + "testing" + + "k8s.io/kubernetes/pkg/apis/componentconfig" + nettest "k8s.io/kubernetes/pkg/kubelet/network/testing" +) + +func TestSelectDefaultPlugin(t *testing.T) { + all_plugins := []NetworkPlugin{} + plug, err := InitNetworkPlugin(all_plugins, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone) + if err != nil { + t.Fatalf("Unexpected error in selecting default plugin: %v", err) + } + if plug == nil { + t.Fatalf("Failed to select the default plugin.") + } + if plug.Name() != DefaultPluginName { + t.Errorf("Failed to select the default plugin. Expected %s. Got %s", DefaultPluginName, plug.Name()) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/testing/fake_host.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/testing/fake_host.go new file mode 100644 index 000000000000..9b0f349ab2bc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/network/testing/fake_host.go @@ -0,0 +1,48 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +// helper for testing plugins +// a fake host is created here that can be used by plugins for testing + +import ( + "k8s.io/kubernetes/pkg/api" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" +) + +type fakeNetworkHost struct { + kubeClient clientset.Interface +} + +func NewFakeHost(kubeClient clientset.Interface) *fakeNetworkHost { + host := &fakeNetworkHost{kubeClient: kubeClient} + return host +} + +func (fnh *fakeNetworkHost) GetPodByName(name, namespace string) (*api.Pod, bool) { + return nil, false +} + +func (fnh *fakeNetworkHost) GetKubeClient() clientset.Interface { + return nil +} + +func (nh *fakeNetworkHost) GetRuntime() kubecontainer.Runtime { + return &containertest.FakeRuntime{} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/networks.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/networks.go new file mode 100644 index 000000000000..43674f804103 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/networks.go @@ -0,0 +1,41 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "k8s.io/kubernetes/pkg/api" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" +) + +// This just exports required functions from kubelet proper, for use by network +// plugins. +type networkHost struct { + kubelet *Kubelet +} + +func (nh *networkHost) GetPodByName(name, namespace string) (*api.Pod, bool) { + return nh.kubelet.GetPodByName(name, namespace) +} + +func (nh *networkHost) GetKubeClient() clientset.Interface { + return nh.kubelet.kubeClient +} + +func (nh *networkHost) GetRuntime() kubecontainer.Runtime { + return nh.kubelet.GetRuntime() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/oom_watcher.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/oom_watcher.go new file mode 100644 index 000000000000..12dd2c48f5d0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/oom_watcher.go @@ -0,0 +1,72 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "github.com/golang/glog" + "github.com/google/cadvisor/events" + cadvisorapi "github.com/google/cadvisor/info/v1" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/kubelet/cadvisor" + "k8s.io/kubernetes/pkg/util/runtime" +) + +type OOMWatcher interface { + Start(ref *api.ObjectReference) error +} + +type realOOMWatcher struct { + cadvisor cadvisor.Interface + recorder record.EventRecorder +} + +func NewOOMWatcher(cadvisor cadvisor.Interface, recorder record.EventRecorder) OOMWatcher { + return &realOOMWatcher{ + cadvisor: cadvisor, + recorder: recorder, + } +} + +const systemOOMEvent = "SystemOOM" + +// Watches cadvisor for system oom's and records an event for every system oom encountered. +func (ow *realOOMWatcher) Start(ref *api.ObjectReference) error { + request := events.Request{ + EventType: map[cadvisorapi.EventType]bool{ + cadvisorapi.EventOom: true, + }, + ContainerName: "/", + IncludeSubcontainers: false, + } + eventChannel, err := ow.cadvisor.WatchEvents(&request) + if err != nil { + return err + } + + go func() { + defer runtime.HandleCrash() + + for event := range eventChannel.GetChannel() { + glog.V(2).Infof("Got sys oom event from cadvisor: %v", event) + ow.recorder.PastEventf(ref, unversioned.Time{Time: event.Timestamp}, api.EventTypeWarning, systemOOMEvent, "System OOM encountered") + } + glog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor") + }() + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/oom_watcher_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/oom_watcher_test.go new file mode 100644 index 000000000000..5928e87ee9e4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/oom_watcher_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" +) + +func TestBasic(t *testing.T) { + fakeRecorder := &record.FakeRecorder{} + mockCadvisor := &cadvisortest.Fake{} + node := &api.ObjectReference{} + oomWatcher := NewOOMWatcher(mockCadvisor, fakeRecorder) + err := oomWatcher.Start(node) + if err != nil { + t.Errorf("Should not have failed: %v", err) + } + + // TODO: Improve this test once cadvisor exports events.EventChannel as an interface + // and thereby allow using a mock version of cadvisor. +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pleg/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pleg/doc.go new file mode 100644 index 000000000000..c8782ee89835 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pleg/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package pleg contains types and a generic implementation of the pod +// lifecycle event generator. +package pleg diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go new file mode 100644 index 000000000000..e0340bdd106f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go @@ -0,0 +1,399 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pleg + +import ( + "fmt" + "sync/atomic" + "time" + + "github.com/golang/glog" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/metrics" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/wait" +) + +// GenericPLEG is an extremely simple generic PLEG that relies solely on +// periodic listing to discover container changes. It should be be used +// as temporary replacement for container runtimes do not support a proper +// event generator yet. +// +// Note that GenericPLEG assumes that a container would not be created, +// terminated, and garbage collected within one relist period. If such an +// incident happens, GenenricPLEG would miss all events regarding this +// container. In the case of relisting failure, the window may become longer. +// Note that this assumption is not unique -- many kubelet internal components +// rely on terminated containers as tombstones for bookkeeping purposes. The +// garbage collector is implemented to work with such situtations. However, to +// guarantee that kubelet can handle missing container events, it is +// recommended to set the relist period short and have an auxiliary, longer +// periodic sync in kubelet as the safety net. +type GenericPLEG struct { + // The period for relisting. + relistPeriod time.Duration + // The container runtime. + runtime kubecontainer.Runtime + // The channel from which the subscriber listens events. + eventChannel chan *PodLifecycleEvent + // The internal cache for pod/container information. + podRecords podRecords + // Time of the last relisting. + relistTime atomic.Value + // Cache for storing the runtime states required for syncing pods. + cache kubecontainer.Cache + // For testability. + clock util.Clock + // Pods that failed to have their status retrieved during a relist. These pods will be + // retried during the next relisting. + podsToReinspect map[types.UID]*kubecontainer.Pod +} + +// plegContainerState has a one-to-one mapping to the +// kubecontainer.ContainerState except for the non-existent state. This state +// is introduced here to complete the state transition scenarios. +type plegContainerState string + +const ( + plegContainerRunning plegContainerState = "running" + plegContainerExited plegContainerState = "exited" + plegContainerUnknown plegContainerState = "unknown" + plegContainerNonExistent plegContainerState = "non-existent" +) + +func convertState(state kubecontainer.ContainerState) plegContainerState { + switch state { + case kubecontainer.ContainerStateRunning: + return plegContainerRunning + case kubecontainer.ContainerStateExited: + return plegContainerExited + case kubecontainer.ContainerStateUnknown: + return plegContainerUnknown + default: + panic(fmt.Sprintf("unrecognized container state: %v", state)) + } +} + +type podRecord struct { + old *kubecontainer.Pod + current *kubecontainer.Pod +} + +type podRecords map[types.UID]*podRecord + +func NewGenericPLEG(runtime kubecontainer.Runtime, channelCapacity int, + relistPeriod time.Duration, cache kubecontainer.Cache, clock util.Clock) PodLifecycleEventGenerator { + return &GenericPLEG{ + relistPeriod: relistPeriod, + runtime: runtime, + eventChannel: make(chan *PodLifecycleEvent, channelCapacity), + podRecords: make(podRecords), + cache: cache, + clock: clock, + } +} + +// Returns a channel from which the subscriber can receive PodLifecycleEvent +// events. +// TODO: support multiple subscribers. +func (g *GenericPLEG) Watch() chan *PodLifecycleEvent { + return g.eventChannel +} + +// Start spawns a goroutine to relist periodically. +func (g *GenericPLEG) Start() { + go wait.Until(g.relist, g.relistPeriod, wait.NeverStop) +} + +func (g *GenericPLEG) Healthy() (bool, error) { + relistTime := g.getRelistTime() + // TODO: Evaluate if we can reduce this threshold. + // The threshold needs to be greater than the relisting period + the + // relisting time, which can vary significantly. Set a conservative + // threshold so that we don't cause kubelet to be restarted unnecessarily. + threshold := 2 * time.Minute + if g.clock.Since(relistTime) > threshold { + return false, fmt.Errorf("pleg was last seen active at %v", relistTime) + } + return true, nil +} + +func generateEvent(podID types.UID, cid string, oldState, newState plegContainerState) *PodLifecycleEvent { + if newState == oldState { + return nil + } + glog.V(4).Infof("GenericPLEG: %v/%v: %v -> %v", podID, cid, oldState, newState) + switch newState { + case plegContainerRunning: + return &PodLifecycleEvent{ID: podID, Type: ContainerStarted, Data: cid} + case plegContainerExited: + return &PodLifecycleEvent{ID: podID, Type: ContainerDied, Data: cid} + case plegContainerUnknown: + return &PodLifecycleEvent{ID: podID, Type: ContainerChanged, Data: cid} + case plegContainerNonExistent: + // We report "ContainerDied" when container was stopped OR removed. We + // may want to distinguish the two cases in the future. + switch oldState { + case plegContainerExited: + // We already reported that the container died before. + return &PodLifecycleEvent{ID: podID, Type: ContainerRemoved, Data: cid} + default: + // TODO: We may want to generate a ContainerRemoved event as well. + // It's ok now because no one relies on the ContainerRemoved event. + return &PodLifecycleEvent{ID: podID, Type: ContainerDied, Data: cid} + } + default: + panic(fmt.Sprintf("unrecognized container state: %v", newState)) + } +} + +func (g *GenericPLEG) getRelistTime() time.Time { + val := g.relistTime.Load() + if val == nil { + return time.Time{} + } + return val.(time.Time) +} + +func (g *GenericPLEG) updateRelisTime(timestamp time.Time) { + g.relistTime.Store(timestamp) +} + +// relist queries the container runtime for list of pods/containers, compare +// with the internal pods/containers, and generats events accordingly. +func (g *GenericPLEG) relist() { + glog.V(5).Infof("GenericPLEG: Relisting") + + if lastRelistTime := g.getRelistTime(); !lastRelistTime.IsZero() { + metrics.PLEGRelistInterval.Observe(metrics.SinceInMicroseconds(lastRelistTime)) + } + + timestamp := g.clock.Now() + // Update the relist time. + g.updateRelisTime(timestamp) + defer func() { + metrics.PLEGRelistLatency.Observe(metrics.SinceInMicroseconds(timestamp)) + }() + + // Get all the pods. + podList, err := g.runtime.GetPods(true) + if err != nil { + glog.Errorf("GenericPLEG: Unable to retrieve pods: %v", err) + return + } + pods := kubecontainer.Pods(podList) + g.podRecords.setCurrent(pods) + + // Compare the old and the current pods, and generate events. + eventsByPodID := map[types.UID][]*PodLifecycleEvent{} + for pid := range g.podRecords { + oldPod := g.podRecords.getOld(pid) + pod := g.podRecords.getCurrent(pid) + // Get all containers in the old and the new pod. + allContainers := getContainersFromPods(oldPod, pod) + for _, container := range allContainers { + e := computeEvent(oldPod, pod, &container.ID) + updateEvents(eventsByPodID, e) + } + } + + var needsReinspection map[types.UID]*kubecontainer.Pod + if g.cacheEnabled() { + needsReinspection = make(map[types.UID]*kubecontainer.Pod) + } + + // If there are events associated with a pod, we should update the + // podCache. + for pid, events := range eventsByPodID { + pod := g.podRecords.getCurrent(pid) + if g.cacheEnabled() { + // updateCache() will inspect the pod and update the cache. If an + // error occurs during the inspection, we want PLEG to retry again + // in the next relist. To achieve this, we do not update the + // associated podRecord of the pod, so that the change will be + // detect again in the next relist. + // TODO: If many pods changed during the same relist period, + // inspecting the pod and getting the PodStatus to update the cache + // serially may take a while. We should be aware of this and + // parallelize if needed. + if err := g.updateCache(pod, pid); err != nil { + glog.Errorf("PLEG: Ignoring events for pod %s/%s: %v", pod.Name, pod.Namespace, err) + + // make sure we try to reinspect the pod during the next relisting + needsReinspection[pid] = pod + + continue + } else if _, found := g.podsToReinspect[pid]; found { + // this pod was in the list to reinspect and we did so because it had events, so remove it + // from the list (we don't want the reinspection code below to inspect it a second time in + // this relist execution) + delete(g.podsToReinspect, pid) + } + } + // Update the internal storage and send out the events. + g.podRecords.update(pid) + for i := range events { + // Filter out events that are not reliable and no other components use yet. + if events[i].Type == ContainerChanged || events[i].Type == ContainerRemoved { + continue + } + g.eventChannel <- events[i] + } + } + + if g.cacheEnabled() { + // reinspect any pods that failed inspection during the previous relist + if len(g.podsToReinspect) > 0 { + glog.V(5).Infof("GenericPLEG: Reinspecting pods that previously failed inspection") + for pid, pod := range g.podsToReinspect { + if err := g.updateCache(pod, pid); err != nil { + glog.Errorf("PLEG: pod %s/%s failed reinspection: %v", pod.Name, pod.Namespace, err) + needsReinspection[pid] = pod + } + } + } + + // Update the cache timestamp. This needs to happen *after* + // all pods have been properly updated in the cache. + g.cache.UpdateTime(timestamp) + } + + // make sure we retain the list of pods that need reinspecting the next time relist is called + g.podsToReinspect = needsReinspection +} + +func getContainersFromPods(pods ...*kubecontainer.Pod) []*kubecontainer.Container { + cidSet := sets.NewString() + var containers []*kubecontainer.Container + for _, p := range pods { + if p == nil { + continue + } + for _, c := range p.Containers { + cid := string(c.ID.ID) + if cidSet.Has(cid) { + continue + } + cidSet.Insert(cid) + containers = append(containers, c) + } + } + return containers +} + +func computeEvent(oldPod, newPod *kubecontainer.Pod, cid *kubecontainer.ContainerID) *PodLifecycleEvent { + var pid types.UID + if oldPod != nil { + pid = oldPod.ID + } else if newPod != nil { + pid = newPod.ID + } + oldState := getContainerState(oldPod, cid) + newState := getContainerState(newPod, cid) + return generateEvent(pid, cid.ID, oldState, newState) +} + +func (g *GenericPLEG) cacheEnabled() bool { + return g.cache != nil +} + +func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { + if pod == nil { + // The pod is missing in the current relist. This means that + // the pod has no visible (active or inactive) containers. + glog.V(4).Infof("PLEG: Delete status for pod %q", string(pid)) + g.cache.Delete(pid) + return nil + } + timestamp := g.clock.Now() + // TODO: Consider adding a new runtime method + // GetPodStatus(pod *kubecontainer.Pod) so that Docker can avoid listing + // all containers again. + status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace) + glog.V(4).Infof("PLEG: Write status for %s/%s: %+v (err: %v)", pod.Name, pod.Namespace, status, err) + g.cache.Set(pod.ID, status, err, timestamp) + return err +} + +func updateEvents(eventsByPodID map[types.UID][]*PodLifecycleEvent, e *PodLifecycleEvent) { + if e == nil { + return + } + eventsByPodID[e.ID] = append(eventsByPodID[e.ID], e) +} + +func getContainerState(pod *kubecontainer.Pod, cid *kubecontainer.ContainerID) plegContainerState { + // Default to the non-existent state. + state := plegContainerNonExistent + if pod == nil { + return state + } + container := pod.FindContainerByID(*cid) + if container == nil { + return state + } + return convertState(container.State) +} + +func (pr podRecords) getOld(id types.UID) *kubecontainer.Pod { + r, ok := pr[id] + if !ok { + return nil + } + return r.old +} + +func (pr podRecords) getCurrent(id types.UID) *kubecontainer.Pod { + r, ok := pr[id] + if !ok { + return nil + } + return r.current +} + +func (pr podRecords) setCurrent(pods []*kubecontainer.Pod) { + for i := range pr { + pr[i].current = nil + } + for _, pod := range pods { + if r, ok := pr[pod.ID]; ok { + r.current = pod + } else { + pr[pod.ID] = &podRecord{current: pod} + } + } +} + +func (pr podRecords) update(id types.UID) { + r, ok := pr[id] + if !ok { + return + } + pr.updateInternal(id, r) +} + +func (pr podRecords) updateInternal(id types.UID, r *podRecord) { + if r.current == nil { + // Pod no longer exists; delete the entry. + delete(pr, id) + return + } + r.old = r.current + r.current = nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pleg/generic_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pleg/generic_test.go new file mode 100644 index 000000000000..460547d85a1d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pleg/generic_test.go @@ -0,0 +1,427 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pleg + +import ( + "errors" + "fmt" + "reflect" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/diff" +) + +const ( + testContainerRuntimeType = "fooRuntime" +) + +type TestGenericPLEG struct { + pleg *GenericPLEG + runtime *containertest.FakeRuntime + clock *util.FakeClock +} + +func newTestGenericPLEG() *TestGenericPLEG { + fakeRuntime := &containertest.FakeRuntime{} + clock := util.NewFakeClock(time.Time{}) + // The channel capacity should be large enough to hold all events in a + // single test. + pleg := &GenericPLEG{ + relistPeriod: time.Hour, + runtime: fakeRuntime, + eventChannel: make(chan *PodLifecycleEvent, 100), + podRecords: make(podRecords), + clock: clock, + } + return &TestGenericPLEG{pleg: pleg, runtime: fakeRuntime, clock: clock} +} + +func getEventsFromChannel(ch <-chan *PodLifecycleEvent) []*PodLifecycleEvent { + events := []*PodLifecycleEvent{} + for len(ch) > 0 { + e := <-ch + events = append(events, e) + } + return events +} + +func createTestContainer(ID string, state kubecontainer.ContainerState) *kubecontainer.Container { + return &kubecontainer.Container{ + ID: kubecontainer.ContainerID{Type: testContainerRuntimeType, ID: ID}, + State: state, + } +} + +type sortableEvents []*PodLifecycleEvent + +func (a sortableEvents) Len() int { return len(a) } +func (a sortableEvents) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a sortableEvents) Less(i, j int) bool { + if a[i].ID != a[j].ID { + return a[i].ID < a[j].ID + } + return a[i].Data.(string) < a[j].Data.(string) +} + +func verifyEvents(t *testing.T, expected, actual []*PodLifecycleEvent) { + sort.Sort(sortableEvents(expected)) + sort.Sort(sortableEvents(actual)) + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Actual events differ from the expected; diff:\n %v", diff.ObjectDiff(expected, actual)) + } +} + +func TestRelisting(t *testing.T) { + testPleg := newTestGenericPLEG() + pleg, runtime := testPleg.pleg, testPleg.runtime + ch := pleg.Watch() + // The first relist should send a PodSync event to each pod. + runtime.AllPodList = []*kubecontainer.Pod{ + { + ID: "1234", + Containers: []*kubecontainer.Container{ + createTestContainer("c1", kubecontainer.ContainerStateExited), + createTestContainer("c2", kubecontainer.ContainerStateRunning), + createTestContainer("c3", kubecontainer.ContainerStateUnknown), + }, + }, + { + ID: "4567", + Containers: []*kubecontainer.Container{ + createTestContainer("c1", kubecontainer.ContainerStateExited), + }, + }, + } + pleg.relist() + // Report every running/exited container if we see them for the first time. + expected := []*PodLifecycleEvent{ + {ID: "1234", Type: ContainerStarted, Data: "c2"}, + {ID: "4567", Type: ContainerDied, Data: "c1"}, + {ID: "1234", Type: ContainerDied, Data: "c1"}, + } + actual := getEventsFromChannel(ch) + verifyEvents(t, expected, actual) + + // The second relist should not send out any event because no container + // changed. + pleg.relist() + verifyEvents(t, expected, actual) + + runtime.AllPodList = []*kubecontainer.Pod{ + { + ID: "1234", + Containers: []*kubecontainer.Container{ + createTestContainer("c2", kubecontainer.ContainerStateExited), + createTestContainer("c3", kubecontainer.ContainerStateRunning), + }, + }, + { + ID: "4567", + Containers: []*kubecontainer.Container{ + createTestContainer("c4", kubecontainer.ContainerStateRunning), + }, + }, + } + pleg.relist() + // Only report containers that transitioned to running or exited status. + expected = []*PodLifecycleEvent{ + {ID: "1234", Type: ContainerDied, Data: "c2"}, + {ID: "1234", Type: ContainerStarted, Data: "c3"}, + {ID: "4567", Type: ContainerStarted, Data: "c4"}, + } + + actual = getEventsFromChannel(ch) + verifyEvents(t, expected, actual) +} + +func TestDetectingContainerDeaths(t *testing.T) { + // Vary the number of relists after the container started and before the + // container died to account for the changes in pleg's internal states. + testReportMissingContainers(t, 1) + testReportMissingPods(t, 1) + + testReportMissingContainers(t, 3) + testReportMissingPods(t, 3) +} + +func testReportMissingContainers(t *testing.T, numRelists int) { + testPleg := newTestGenericPLEG() + pleg, runtime := testPleg.pleg, testPleg.runtime + ch := pleg.Watch() + runtime.AllPodList = []*kubecontainer.Pod{ + { + ID: "1234", + Containers: []*kubecontainer.Container{ + createTestContainer("c1", kubecontainer.ContainerStateRunning), + createTestContainer("c2", kubecontainer.ContainerStateRunning), + createTestContainer("c3", kubecontainer.ContainerStateExited), + }, + }, + } + // Relist and drain the events from the channel. + for i := 0; i < numRelists; i++ { + pleg.relist() + getEventsFromChannel(ch) + } + + // Container c2 was stopped and removed between relists. We should report + // the event. The exited container c3 was garbage collected (i.e., removed) + // between relists. We should ignore that event. + runtime.AllPodList = []*kubecontainer.Pod{ + { + ID: "1234", + Containers: []*kubecontainer.Container{ + createTestContainer("c1", kubecontainer.ContainerStateRunning), + }, + }, + } + pleg.relist() + expected := []*PodLifecycleEvent{ + {ID: "1234", Type: ContainerDied, Data: "c2"}, + } + actual := getEventsFromChannel(ch) + verifyEvents(t, expected, actual) +} + +func testReportMissingPods(t *testing.T, numRelists int) { + testPleg := newTestGenericPLEG() + pleg, runtime := testPleg.pleg, testPleg.runtime + ch := pleg.Watch() + runtime.AllPodList = []*kubecontainer.Pod{ + { + ID: "1234", + Containers: []*kubecontainer.Container{ + createTestContainer("c2", kubecontainer.ContainerStateRunning), + }, + }, + } + // Relist and drain the events from the channel. + for i := 0; i < numRelists; i++ { + pleg.relist() + getEventsFromChannel(ch) + } + + // Container c2 was stopped and removed between relists. We should report + // the event. + runtime.AllPodList = []*kubecontainer.Pod{} + pleg.relist() + expected := []*PodLifecycleEvent{ + {ID: "1234", Type: ContainerDied, Data: "c2"}, + } + actual := getEventsFromChannel(ch) + verifyEvents(t, expected, actual) +} + +func newTestGenericPLEGWithRuntimeMock() (*GenericPLEG, *containertest.Mock) { + runtimeMock := &containertest.Mock{} + pleg := &GenericPLEG{ + relistPeriod: time.Hour, + runtime: runtimeMock, + eventChannel: make(chan *PodLifecycleEvent, 100), + podRecords: make(podRecords), + cache: kubecontainer.NewCache(), + clock: util.RealClock{}, + } + return pleg, runtimeMock +} + +func createTestPodsStatusesAndEvents(num int) ([]*kubecontainer.Pod, []*kubecontainer.PodStatus, []*PodLifecycleEvent) { + var pods []*kubecontainer.Pod + var statuses []*kubecontainer.PodStatus + var events []*PodLifecycleEvent + for i := 0; i < num; i++ { + id := types.UID(fmt.Sprintf("test-pod-%d", i)) + cState := kubecontainer.ContainerStateRunning + container := createTestContainer(fmt.Sprintf("c%d", i), cState) + pod := &kubecontainer.Pod{ + ID: id, + Containers: []*kubecontainer.Container{container}, + } + status := &kubecontainer.PodStatus{ + ID: id, + ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: cState}}, + } + event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID} + pods = append(pods, pod) + statuses = append(statuses, status) + events = append(events, event) + + } + return pods, statuses, events +} + +func TestRelistWithCache(t *testing.T) { + pleg, runtimeMock := newTestGenericPLEGWithRuntimeMock() + ch := pleg.Watch() + + pods, statuses, events := createTestPodsStatusesAndEvents(2) + runtimeMock.On("GetPods", true).Return(pods, nil) + runtimeMock.On("GetPodStatus", pods[0].ID, "", "").Return(statuses[0], nil).Once() + // Inject an error when querying runtime for the pod status for pods[1]. + statusErr := fmt.Errorf("unable to get status") + runtimeMock.On("GetPodStatus", pods[1].ID, "", "").Return(&kubecontainer.PodStatus{}, statusErr).Once() + + pleg.relist() + actualEvents := getEventsFromChannel(ch) + cases := []struct { + pod *kubecontainer.Pod + status *kubecontainer.PodStatus + error error + }{ + {pod: pods[0], status: statuses[0], error: nil}, + {pod: pods[1], status: &kubecontainer.PodStatus{}, error: statusErr}, + } + for i, c := range cases { + testStr := fmt.Sprintf("test[%d]", i) + actualStatus, actualErr := pleg.cache.Get(c.pod.ID) + assert.Equal(t, c.status, actualStatus, testStr) + assert.Equal(t, c.error, actualErr, testStr) + } + // pleg should not generate any event for pods[1] because of the error. + assert.Exactly(t, []*PodLifecycleEvent{events[0]}, actualEvents) + + // Return normal status for pods[1]. + runtimeMock.On("GetPodStatus", pods[1].ID, "", "").Return(statuses[1], nil).Once() + pleg.relist() + actualEvents = getEventsFromChannel(ch) + cases = []struct { + pod *kubecontainer.Pod + status *kubecontainer.PodStatus + error error + }{ + {pod: pods[0], status: statuses[0], error: nil}, + {pod: pods[1], status: statuses[1], error: nil}, + } + for i, c := range cases { + testStr := fmt.Sprintf("test[%d]", i) + actualStatus, actualErr := pleg.cache.Get(c.pod.ID) + assert.Equal(t, c.status, actualStatus, testStr) + assert.Equal(t, c.error, actualErr, testStr) + } + // Now that we are able to query status for pods[1], pleg should generate an event. + assert.Exactly(t, []*PodLifecycleEvent{events[1]}, actualEvents) +} + +func TestRemoveCacheEntry(t *testing.T) { + pleg, runtimeMock := newTestGenericPLEGWithRuntimeMock() + pods, statuses, _ := createTestPodsStatusesAndEvents(1) + runtimeMock.On("GetPods", true).Return(pods, nil).Once() + runtimeMock.On("GetPodStatus", pods[0].ID, "", "").Return(statuses[0], nil).Once() + // Does a relist to populate the cache. + pleg.relist() + // Delete the pod from runtime. Verify that the cache entry has been + // removed after relisting. + runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{}, nil).Once() + pleg.relist() + actualStatus, actualErr := pleg.cache.Get(pods[0].ID) + assert.Equal(t, &kubecontainer.PodStatus{ID: pods[0].ID}, actualStatus) + assert.Equal(t, nil, actualErr) +} + +func TestHealthy(t *testing.T) { + testPleg := newTestGenericPLEG() + pleg, _, clock := testPleg.pleg, testPleg.runtime, testPleg.clock + ok, _ := pleg.Healthy() + assert.True(t, ok, "pleg should be healthy") + + // Advance the clock without any relisting. + clock.Step(time.Minute * 10) + ok, _ = pleg.Healthy() + assert.False(t, ok, "pleg should be unhealthy") + + // Relist and than advance the time by 1 minute. pleg should be healthy + // because this is within the allowed limit. + pleg.relist() + clock.Step(time.Minute * 1) + ok, _ = pleg.Healthy() + assert.True(t, ok, "pleg should be healthy") +} + +func TestRelistWithReinspection(t *testing.T) { + pleg, runtimeMock := newTestGenericPLEGWithRuntimeMock() + ch := pleg.Watch() + + infraContainer := createTestContainer("infra", kubecontainer.ContainerStateRunning) + + podID := types.UID("test-pod") + pods := []*kubecontainer.Pod{{ + ID: podID, + Containers: []*kubecontainer.Container{infraContainer}, + }} + runtimeMock.On("GetPods", true).Return(pods, nil).Once() + + goodStatus := &kubecontainer.PodStatus{ + ID: podID, + ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: infraContainer.ID, State: infraContainer.State}}, + } + runtimeMock.On("GetPodStatus", podID, "", "").Return(goodStatus, nil).Once() + + goodEvent := &PodLifecycleEvent{ID: podID, Type: ContainerStarted, Data: infraContainer.ID.ID} + + // listing 1 - everything ok, infra container set up for pod + pleg.relist() + actualEvents := getEventsFromChannel(ch) + actualStatus, actualErr := pleg.cache.Get(podID) + assert.Equal(t, goodStatus, actualStatus) + assert.Equal(t, nil, actualErr) + assert.Exactly(t, []*PodLifecycleEvent{goodEvent}, actualEvents) + + // listing 2 - pretend runtime was in the middle of creating the non-infra container for the pod + // and return an error during inspection + transientContainer := createTestContainer("transient", kubecontainer.ContainerStateUnknown) + podsWithTransientContainer := []*kubecontainer.Pod{{ + ID: podID, + Containers: []*kubecontainer.Container{infraContainer, transientContainer}, + }} + runtimeMock.On("GetPods", true).Return(podsWithTransientContainer, nil).Once() + + badStatus := &kubecontainer.PodStatus{ + ID: podID, + ContainerStatuses: []*kubecontainer.ContainerStatus{}, + } + runtimeMock.On("GetPodStatus", podID, "", "").Return(badStatus, errors.New("inspection error")).Once() + + pleg.relist() + actualEvents = getEventsFromChannel(ch) + actualStatus, actualErr = pleg.cache.Get(podID) + assert.Equal(t, badStatus, actualStatus) + assert.Equal(t, errors.New("inspection error"), actualErr) + assert.Exactly(t, []*PodLifecycleEvent{}, actualEvents) + + // listing 3 - pretend the transient container has now disappeared, leaving just the infra + // container. Make sure the pod is reinspected for its status and the cache is updated. + runtimeMock.On("GetPods", true).Return(pods, nil).Once() + runtimeMock.On("GetPodStatus", podID, "", "").Return(goodStatus, nil).Once() + + pleg.relist() + actualEvents = getEventsFromChannel(ch) + actualStatus, actualErr = pleg.cache.Get(podID) + assert.Equal(t, goodStatus, actualStatus) + assert.Equal(t, nil, actualErr) + // no events are expected because relist #1 set the old pod record which has the infra container + // running. relist #2 had the inspection error and therefore didn't modify either old or new. + // relist #3 forced the reinspection of the pod to retrieve its status, but because the list of + // containers was the same as relist #1, nothing "changed", so there are no new events. + assert.Exactly(t, []*PodLifecycleEvent{}, actualEvents) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pleg/pleg.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pleg/pleg.go new file mode 100644 index 000000000000..01798237286e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pleg/pleg.go @@ -0,0 +1,52 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pleg + +import ( + "k8s.io/kubernetes/pkg/types" +) + +type PodLifeCycleEventType string + +const ( + ContainerStarted PodLifeCycleEventType = "ContainerStarted" + ContainerDied PodLifeCycleEventType = "ContainerDied" + // PodSync is used to trigger syncing of a pod when the observed change of + // the state of the pod cannot be captured by any single event above. + PodSync PodLifeCycleEventType = "PodSync" + // Do not use the events below because they are disabled in GenericPLEG. + ContainerRemoved PodLifeCycleEventType = "ContainerRemoved" + ContainerChanged PodLifeCycleEventType = "ContainerChanged" +) + +// PodLifecycleEvent is an event that reflects the change of the pod state. +type PodLifecycleEvent struct { + // The pod ID. + ID types.UID + // The type of the event. + Type PodLifeCycleEventType + // The accompanied data which varies based on the event type. + // - ContainerStarted/ContainerStopped: the container name (string). + // - All other event types: unused. + Data interface{} +} + +type PodLifecycleEventGenerator interface { + Start() + Watch() chan *PodLifecycleEvent + Healthy() (bool, error) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/manager.go new file mode 100644 index 000000000000..57b418102a55 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/manager.go @@ -0,0 +1,285 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "sync" + + "k8s.io/kubernetes/pkg/api" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" +) + +// Pod manager stores and manages access to the pods. +// +// Kubelet discovers pod updates from 3 sources: file, http, and apiserver. +// Pods from non-apiserver sources are called static pods, and API server is +// not aware of the existence of static pods. In order to monitor the status of +// such pods, kubelet creates a mirror pod for each static pod via the API +// server. +// +// A mirror pod has the same pod full name (name and namespace) as its static +// counterpart (albeit different metadata such as UID, etc). By leveraging the +// fact that kubelet reports the pod status using the pod full name, the status +// of the mirror pod always reflects the actual status of the static pod. +// When a static pod gets deleted, the associated orphaned mirror pod will +// also be removed. + +type Manager interface { + GetPods() []*api.Pod + GetPodByFullName(podFullName string) (*api.Pod, bool) + GetPodByName(namespace, name string) (*api.Pod, bool) + GetPodByUID(types.UID) (*api.Pod, bool) + GetPodByMirrorPod(*api.Pod) (*api.Pod, bool) + GetMirrorPodByPod(*api.Pod) (*api.Pod, bool) + GetPodsAndMirrorPods() ([]*api.Pod, []*api.Pod) + + // SetPods replaces the internal pods with the new pods. + // It is currently only used for testing. + SetPods(pods []*api.Pod) + + // Methods that modify a single pod. + AddPod(pod *api.Pod) + UpdatePod(pod *api.Pod) + DeletePod(pod *api.Pod) + + DeleteOrphanedMirrorPods() + TranslatePodUID(uid types.UID) types.UID + GetUIDTranslations() (podToMirror, mirrorToPod map[types.UID]types.UID) + IsMirrorPodOf(mirrorPod, pod *api.Pod) bool + MirrorClient +} + +// All maps in basicManager should be set by calling UpdatePods(); +// individual arrays/maps are not immutable and no other methods should attempt +// to modify them. +type basicManager struct { + // Protects all internal maps. + lock sync.RWMutex + + // Regular pods indexed by UID. + podByUID map[types.UID]*api.Pod + // Mirror pods indexed by UID. + mirrorPodByUID map[types.UID]*api.Pod + + // Pods indexed by full name for easy access. + podByFullName map[string]*api.Pod + mirrorPodByFullName map[string]*api.Pod + + // Mirror pod UID to pod UID map. + translationByUID map[types.UID]types.UID + + // A mirror pod client to create/delete mirror pods. + MirrorClient +} + +func NewBasicPodManager(client MirrorClient) Manager { + pm := &basicManager{} + pm.MirrorClient = client + pm.SetPods(nil) + return pm +} + +// Set the internal pods based on the new pods. +func (pm *basicManager) SetPods(newPods []*api.Pod) { + pm.lock.Lock() + defer pm.lock.Unlock() + + pm.podByUID = make(map[types.UID]*api.Pod) + pm.podByFullName = make(map[string]*api.Pod) + pm.mirrorPodByUID = make(map[types.UID]*api.Pod) + pm.mirrorPodByFullName = make(map[string]*api.Pod) + pm.translationByUID = make(map[types.UID]types.UID) + + pm.updatePodsInternal(newPods...) +} + +func (pm *basicManager) AddPod(pod *api.Pod) { + pm.UpdatePod(pod) +} + +func (pm *basicManager) UpdatePod(pod *api.Pod) { + pm.lock.Lock() + defer pm.lock.Unlock() + pm.updatePodsInternal(pod) +} + +func (pm *basicManager) updatePodsInternal(pods ...*api.Pod) { + for _, pod := range pods { + podFullName := kubecontainer.GetPodFullName(pod) + if IsMirrorPod(pod) { + pm.mirrorPodByUID[pod.UID] = pod + pm.mirrorPodByFullName[podFullName] = pod + if p, ok := pm.podByFullName[podFullName]; ok { + pm.translationByUID[pod.UID] = p.UID + } + } else { + pm.podByUID[pod.UID] = pod + pm.podByFullName[podFullName] = pod + if mirror, ok := pm.mirrorPodByFullName[podFullName]; ok { + pm.translationByUID[mirror.UID] = pod.UID + } + } + } +} + +func (pm *basicManager) DeletePod(pod *api.Pod) { + pm.lock.Lock() + defer pm.lock.Unlock() + podFullName := kubecontainer.GetPodFullName(pod) + if IsMirrorPod(pod) { + delete(pm.mirrorPodByUID, pod.UID) + delete(pm.mirrorPodByFullName, podFullName) + delete(pm.translationByUID, pod.UID) + } else { + delete(pm.podByUID, pod.UID) + delete(pm.podByFullName, podFullName) + } +} + +// GetPods returns the regular pods bound to the kubelet and their spec. +func (pm *basicManager) GetPods() []*api.Pod { + pm.lock.RLock() + defer pm.lock.RUnlock() + return podsMapToPods(pm.podByUID) +} + +// GetPodsAndMirrorPods returns the both regular and mirror pods. +func (pm *basicManager) GetPodsAndMirrorPods() ([]*api.Pod, []*api.Pod) { + pm.lock.RLock() + defer pm.lock.RUnlock() + pods := podsMapToPods(pm.podByUID) + mirrorPods := podsMapToPods(pm.mirrorPodByUID) + return pods, mirrorPods +} + +// Returns all pods (including mirror pods). +func (pm *basicManager) getAllPods() []*api.Pod { + return append(podsMapToPods(pm.podByUID), podsMapToPods(pm.mirrorPodByUID)...) +} + +// GetPodByUID provides the (non-mirror) pod that matches pod UID, as well as +// whether the pod is found. +func (pm *basicManager) GetPodByUID(uid types.UID) (*api.Pod, bool) { + pm.lock.RLock() + defer pm.lock.RUnlock() + pod, ok := pm.podByUID[uid] + return pod, ok +} + +// GetPodByName provides the (non-mirror) pod that matches namespace and name, +// as well as whether the pod was found. +func (pm *basicManager) GetPodByName(namespace, name string) (*api.Pod, bool) { + podFullName := kubecontainer.BuildPodFullName(name, namespace) + return pm.GetPodByFullName(podFullName) +} + +// GetPodByName returns the (non-mirror) pod that matches full name, as well as +// whether the pod was found. +func (pm *basicManager) GetPodByFullName(podFullName string) (*api.Pod, bool) { + pm.lock.RLock() + defer pm.lock.RUnlock() + pod, ok := pm.podByFullName[podFullName] + return pod, ok +} + +// If the UID belongs to a mirror pod, maps it to the UID of its static pod. +// Otherwise, return the original UID. All public-facing functions should +// perform this translation for UIDs because user may provide a mirror pod UID, +// which is not recognized by internal Kubelet functions. +func (pm *basicManager) TranslatePodUID(uid types.UID) types.UID { + if uid == "" { + return uid + } + + pm.lock.RLock() + defer pm.lock.RUnlock() + if translated, ok := pm.translationByUID[uid]; ok { + return translated + } + return uid +} + +func (pm *basicManager) GetUIDTranslations() (podToMirror, mirrorToPod map[types.UID]types.UID) { + pm.lock.RLock() + defer pm.lock.RUnlock() + + podToMirror = make(map[types.UID]types.UID, len(pm.translationByUID)) + mirrorToPod = make(map[types.UID]types.UID, len(pm.translationByUID)) + for k, v := range pm.translationByUID { + mirrorToPod[k] = v + podToMirror[v] = k + } + return podToMirror, mirrorToPod +} + +func (pm *basicManager) getOrphanedMirrorPodNames() []string { + pm.lock.RLock() + defer pm.lock.RUnlock() + var podFullNames []string + for podFullName := range pm.mirrorPodByFullName { + if _, ok := pm.podByFullName[podFullName]; !ok { + podFullNames = append(podFullNames, podFullName) + } + } + return podFullNames +} + +// Delete all mirror pods which do not have associated static pods. This method +// sends deletion requets to the API server, but does NOT modify the internal +// pod storage in basicManager. +func (pm *basicManager) DeleteOrphanedMirrorPods() { + podFullNames := pm.getOrphanedMirrorPodNames() + for _, podFullName := range podFullNames { + pm.MirrorClient.DeleteMirrorPod(podFullName) + } +} + +// Returns true if mirrorPod is a correct representation of pod; false otherwise. +func (pm *basicManager) IsMirrorPodOf(mirrorPod, pod *api.Pod) bool { + // Check name and namespace first. + if pod.Name != mirrorPod.Name || pod.Namespace != mirrorPod.Namespace { + return false + } + hash, ok := getHashFromMirrorPod(mirrorPod) + if !ok { + return false + } + return hash == getPodHash(pod) +} + +func podsMapToPods(UIDMap map[types.UID]*api.Pod) []*api.Pod { + pods := make([]*api.Pod, 0, len(UIDMap)) + for _, pod := range UIDMap { + pods = append(pods, pod) + } + return pods +} + +func (pm *basicManager) GetMirrorPodByPod(pod *api.Pod) (*api.Pod, bool) { + pm.lock.RLock() + defer pm.lock.RUnlock() + mirrorPod, ok := pm.mirrorPodByFullName[kubecontainer.GetPodFullName(pod)] + return mirrorPod, ok +} + +func (pm *basicManager) GetPodByMirrorPod(mirrorPod *api.Pod) (*api.Pod, bool) { + pm.lock.RLock() + defer pm.lock.RUnlock() + pod, ok := pm.podByFullName[kubecontainer.GetPodFullName(mirrorPod)] + return pod, ok +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/manager_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/manager_test.go new file mode 100644 index 000000000000..965e24ef2d45 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/manager_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" +) + +// Stub out mirror client for testing purpose. +func newTestManager() (*basicManager, *podtest.FakeMirrorClient) { + fakeMirrorClient := podtest.NewFakeMirrorClient() + manager := NewBasicPodManager(fakeMirrorClient).(*basicManager) + return manager, fakeMirrorClient +} + +// Tests that pods/maps are properly set after the pod update, and the basic +// methods work correctly. +func TestGetSetPods(t *testing.T) { + mirrorPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "987654321", + Name: "bar", + Namespace: "default", + Annotations: map[string]string{ + kubetypes.ConfigSourceAnnotationKey: "api", + kubetypes.ConfigMirrorAnnotationKey: "mirror", + }, + }, + } + staticPod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "123456789", + Name: "bar", + Namespace: "default", + Annotations: map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}, + }, + } + + expectedPods := []*api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + UID: "999999999", + Name: "taco", + Namespace: "default", + Annotations: map[string]string{kubetypes.ConfigSourceAnnotationKey: "api"}, + }, + }, + staticPod, + } + updates := append(expectedPods, mirrorPod) + podManager, _ := newTestManager() + podManager.SetPods(updates) + + // Tests that all regular pods are recorded corrrectly. + actualPods := podManager.GetPods() + if len(actualPods) != len(expectedPods) { + t.Errorf("expected %d pods, got %d pods; expected pods %#v, got pods %#v", len(expectedPods), len(actualPods), + expectedPods, actualPods) + } + for _, expected := range expectedPods { + found := false + for _, actual := range actualPods { + if actual.UID == expected.UID { + if !reflect.DeepEqual(&expected, &actual) { + t.Errorf("pod was recorded incorrectly. expect: %#v, got: %#v", expected, actual) + } + found = true + break + } + } + if !found { + t.Errorf("pod %q was not found in %#v", expected.UID, actualPods) + } + } + // Tests UID translation works as expected. + if uid := podManager.TranslatePodUID(mirrorPod.UID); uid != staticPod.UID { + t.Errorf("unable to translate UID %q to the static POD's UID %q; %#v", + mirrorPod.UID, staticPod.UID, podManager.mirrorPodByUID) + } + + // Test the basic Get methods. + actualPod, ok := podManager.GetPodByFullName("bar_default") + if !ok || !reflect.DeepEqual(actualPod, staticPod) { + t.Errorf("unable to get pod by full name; expected: %#v, got: %#v", staticPod, actualPod) + } + actualPod, ok = podManager.GetPodByName("default", "bar") + if !ok || !reflect.DeepEqual(actualPod, staticPod) { + t.Errorf("unable to get pod by name; expected: %#v, got: %#v", staticPod, actualPod) + } + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go new file mode 100644 index 000000000000..b29867bfd832 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go @@ -0,0 +1,104 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" +) + +// Mirror client is used to create/delete a mirror pod. +type MirrorClient interface { + CreateMirrorPod(*api.Pod) error + DeleteMirrorPod(string) error +} + +type basicMirrorClient struct { + // mirror pods are stored in the kubelet directly because they need to be + // in sync with the internal pods. + apiserverClient clientset.Interface +} + +func NewBasicMirrorClient(apiserverClient clientset.Interface) MirrorClient { + return &basicMirrorClient{apiserverClient: apiserverClient} +} + +// Creates a mirror pod. +func (mc *basicMirrorClient) CreateMirrorPod(pod *api.Pod) error { + if mc.apiserverClient == nil { + return nil + } + // Make a copy of the pod. + copyPod := *pod + copyPod.Annotations = make(map[string]string) + + for k, v := range pod.Annotations { + copyPod.Annotations[k] = v + } + hash := getPodHash(pod) + copyPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = hash + apiPod, err := mc.apiserverClient.Core().Pods(copyPod.Namespace).Create(©Pod) + if err != nil && errors.IsAlreadyExists(err) { + // Check if the existing pod is the same as the pod we want to create. + if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash { + return nil + } + } + return err +} + +// Deletes a mirror pod. +func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string) error { + if mc.apiserverClient == nil { + return nil + } + name, namespace, err := kubecontainer.ParsePodFullName(podFullName) + if err != nil { + glog.Errorf("Failed to parse a pod full name %q", podFullName) + return err + } + glog.V(4).Infof("Deleting a mirror pod %q", podFullName) + // TODO(random-liu): Delete the mirror pod with uid precondition in mirror pod manager + if err := mc.apiserverClient.Core().Pods(namespace).Delete(name, api.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) { + glog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err) + } + return nil +} + +func IsStaticPod(pod *api.Pod) bool { + source, err := kubetypes.GetPodSource(pod) + return err == nil && source != kubetypes.ApiserverSource +} + +func IsMirrorPod(pod *api.Pod) bool { + _, ok := pod.Annotations[kubetypes.ConfigMirrorAnnotationKey] + return ok +} + +func getHashFromMirrorPod(pod *api.Pod) (string, bool) { + hash, ok := pod.Annotations[kubetypes.ConfigMirrorAnnotationKey] + return hash, ok +} + +func getPodHash(pod *api.Pod) string { + // The annotation exists for all static pods. + return pod.Annotations[kubetypes.ConfigHashAnnotationKey] +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client_test.go new file mode 100644 index 000000000000..d8baa05f805f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "testing" + + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" +) + +func TestParsePodFullName(t *testing.T) { + type nameTuple struct { + Name string + Namespace string + } + successfulCases := map[string]nameTuple{ + "bar_foo": {Name: "bar", Namespace: "foo"}, + "bar.org_foo.com": {Name: "bar.org", Namespace: "foo.com"}, + "bar-bar_foo": {Name: "bar-bar", Namespace: "foo"}, + } + failedCases := []string{"barfoo", "bar_foo_foo", ""} + + for podFullName, expected := range successfulCases { + name, namespace, err := kubecontainer.ParsePodFullName(podFullName) + if err != nil { + t.Errorf("unexpected error when parsing the full name: %v", err) + continue + } + if name != expected.Name || namespace != expected.Namespace { + t.Errorf("expected name %q, namespace %q; got name %q, namespace %q", + expected.Name, expected.Namespace, name, namespace) + } + } + for _, podFullName := range failedCases { + _, _, err := kubecontainer.ParsePodFullName(podFullName) + if err == nil { + t.Errorf("expected error when parsing the full name, got none") + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/testing/fake_mirror_client.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/testing/fake_mirror_client.go new file mode 100644 index 000000000000..64bfd2351b1a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod/testing/fake_mirror_client.go @@ -0,0 +1,83 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "sync" + + "k8s.io/kubernetes/pkg/api" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/util/sets" +) + +type FakeMirrorClient struct { + mirrorPodLock sync.RWMutex + // Note that a real mirror manager does not store the mirror pods in + // itself. This fake manager does this to track calls. + mirrorPods sets.String + createCounts map[string]int + deleteCounts map[string]int +} + +func NewFakeMirrorClient() *FakeMirrorClient { + m := FakeMirrorClient{} + m.mirrorPods = sets.NewString() + m.createCounts = make(map[string]int) + m.deleteCounts = make(map[string]int) + return &m +} + +func (fmc *FakeMirrorClient) CreateMirrorPod(pod *api.Pod) error { + fmc.mirrorPodLock.Lock() + defer fmc.mirrorPodLock.Unlock() + podFullName := kubecontainer.GetPodFullName(pod) + fmc.mirrorPods.Insert(podFullName) + fmc.createCounts[podFullName]++ + return nil +} + +func (fmc *FakeMirrorClient) DeleteMirrorPod(podFullName string) error { + fmc.mirrorPodLock.Lock() + defer fmc.mirrorPodLock.Unlock() + fmc.mirrorPods.Delete(podFullName) + fmc.deleteCounts[podFullName]++ + return nil +} + +func (fmc *FakeMirrorClient) HasPod(podFullName string) bool { + fmc.mirrorPodLock.RLock() + defer fmc.mirrorPodLock.RUnlock() + return fmc.mirrorPods.Has(podFullName) +} + +func (fmc *FakeMirrorClient) NumOfPods() int { + fmc.mirrorPodLock.RLock() + defer fmc.mirrorPodLock.RUnlock() + return fmc.mirrorPods.Len() +} + +func (fmc *FakeMirrorClient) GetPods() []string { + fmc.mirrorPodLock.RLock() + defer fmc.mirrorPodLock.RUnlock() + return fmc.mirrorPods.List() +} + +func (fmc *FakeMirrorClient) GetCounts(podFullName string) (int, int) { + fmc.mirrorPodLock.RLock() + defer fmc.mirrorPodLock.RUnlock() + return fmc.createCounts[podFullName], fmc.deleteCounts[podFullName] +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod_workers.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod_workers.go new file mode 100644 index 000000000000..90d1c2c3d842 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod_workers.go @@ -0,0 +1,330 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "sync" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/eviction" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util/queue" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/wait" +) + +// OnCompleteFunc is a function that is invoked when an operation completes. +// If err is non-nil, the operation did not complete successfully. +type OnCompleteFunc func(err error) + +// PodStatusFunc is a function that is invoked to generate a pod status. +type PodStatusFunc func(pod *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus + +// KillPodOptions are options when performing a pod update whose update type is kill. +type KillPodOptions struct { + // PodStatusFunc is the function to invoke to set pod status in response to a kill request. + PodStatusFunc PodStatusFunc + // PodTerminationGracePeriodSecondsOverride is optional override to use if a pod is being killed as part of kill operation. + PodTerminationGracePeriodSecondsOverride *int64 +} + +// UpdatePodOptions is an options struct to pass to a UpdatePod operation. +type UpdatePodOptions struct { + // pod to update + Pod *api.Pod + // the mirror pod for the pod to update, if it is a static pod + MirrorPod *api.Pod + // the type of update (create, update, sync, kill) + UpdateType kubetypes.SyncPodType + // optional callback function when operation completes + // this callback is not guaranteed to be completed since a pod worker may + // drop update requests if it was fulfilling a previous request. this is + // only guaranteed to be invoked in response to a kill pod request which is + // always delivered. + OnCompleteFunc OnCompleteFunc + // if update type is kill, use the specified options to kill the pod. + KillPodOptions *KillPodOptions +} + +// PodWorkers is an abstract interface for testability. +type PodWorkers interface { + UpdatePod(options *UpdatePodOptions) + ForgetNonExistingPodWorkers(desiredPods map[types.UID]empty) + ForgetWorker(uid types.UID) +} + +// syncPodOptions provides the arguments to a SyncPod operation. +type syncPodOptions struct { + // the mirror pod for the pod to sync, if it is a static pod + mirrorPod *api.Pod + // pod to sync + pod *api.Pod + // the type of update (create, update, sync) + updateType kubetypes.SyncPodType + // the current status + podStatus *kubecontainer.PodStatus + // if update type is kill, use the specified options to kill the pod. + killPodOptions *KillPodOptions +} + +// the function to invoke to perform a sync. +type syncPodFnType func(options syncPodOptions) error + +const ( + // jitter factor for resyncInterval + workerResyncIntervalJitterFactor = 0.5 + + // jitter factor for backOffPeriod + workerBackOffPeriodJitterFactor = 0.5 +) + +type podWorkers struct { + // Protects all per worker fields. + podLock sync.Mutex + + // Tracks all running per-pod goroutines - per-pod goroutine will be + // processing updates received through its corresponding channel. + podUpdates map[types.UID]chan UpdatePodOptions + // Track the current state of per-pod goroutines. + // Currently all update request for a given pod coming when another + // update of this pod is being processed are ignored. + isWorking map[types.UID]bool + // Tracks the last undelivered work item for this pod - a work item is + // undelivered if it comes in while the worker is working. + lastUndeliveredWorkUpdate map[types.UID]UpdatePodOptions + + workQueue queue.WorkQueue + + // This function is run to sync the desired stated of pod. + // NOTE: This function has to be thread-safe - it can be called for + // different pods at the same time. + syncPodFn syncPodFnType + + // The EventRecorder to use + recorder record.EventRecorder + + // backOffPeriod is the duration to back off when there is a sync error. + backOffPeriod time.Duration + + // resyncInterval is the duration to wait until the next sync. + resyncInterval time.Duration + + // podCache stores kubecontainer.PodStatus for all pods. + podCache kubecontainer.Cache +} + +func newPodWorkers(syncPodFn syncPodFnType, recorder record.EventRecorder, workQueue queue.WorkQueue, + resyncInterval, backOffPeriod time.Duration, podCache kubecontainer.Cache) *podWorkers { + return &podWorkers{ + podUpdates: map[types.UID]chan UpdatePodOptions{}, + isWorking: map[types.UID]bool{}, + lastUndeliveredWorkUpdate: map[types.UID]UpdatePodOptions{}, + syncPodFn: syncPodFn, + recorder: recorder, + workQueue: workQueue, + resyncInterval: resyncInterval, + backOffPeriod: backOffPeriod, + podCache: podCache, + } +} + +func (p *podWorkers) managePodLoop(podUpdates <-chan UpdatePodOptions) { + var lastSyncTime time.Time + for update := range podUpdates { + err := func() error { + podUID := update.Pod.UID + // This is a blocking call that would return only if the cache + // has an entry for the pod that is newer than minRuntimeCache + // Time. This ensures the worker doesn't start syncing until + // after the cache is at least newer than the finished time of + // the previous sync. + status, err := p.podCache.GetNewerThan(podUID, lastSyncTime) + if err != nil { + return err + } + err = p.syncPodFn(syncPodOptions{ + mirrorPod: update.MirrorPod, + pod: update.Pod, + podStatus: status, + killPodOptions: update.KillPodOptions, + updateType: update.UpdateType, + }) + lastSyncTime = time.Now() + if err != nil { + return err + } + return nil + }() + // notify the call-back function if the operation succeeded or not + if update.OnCompleteFunc != nil { + update.OnCompleteFunc(err) + } + if err != nil { + glog.Errorf("Error syncing pod %s, skipping: %v", update.Pod.UID, err) + p.recorder.Eventf(update.Pod, api.EventTypeWarning, kubecontainer.FailedSync, "Error syncing pod, skipping: %v", err) + } + p.wrapUp(update.Pod.UID, err) + } +} + +// Apply the new setting to the specified pod. +// If the options provide an OnCompleteFunc, the function is invoked if the update is accepted. +// Update requests are ignored if a kill pod request is pending. +func (p *podWorkers) UpdatePod(options *UpdatePodOptions) { + pod := options.Pod + uid := pod.UID + var podUpdates chan UpdatePodOptions + var exists bool + + p.podLock.Lock() + defer p.podLock.Unlock() + if podUpdates, exists = p.podUpdates[uid]; !exists { + // We need to have a buffer here, because checkForUpdates() method that + // puts an update into channel is called from the same goroutine where + // the channel is consumed. However, it is guaranteed that in such case + // the channel is empty, so buffer of size 1 is enough. + podUpdates = make(chan UpdatePodOptions, 1) + p.podUpdates[uid] = podUpdates + + // Creating a new pod worker either means this is a new pod, or that the + // kubelet just restarted. In either case the kubelet is willing to believe + // the status of the pod for the first pod worker sync. See corresponding + // comment in syncPod. + go func() { + defer runtime.HandleCrash() + p.managePodLoop(podUpdates) + }() + } + if !p.isWorking[pod.UID] { + p.isWorking[pod.UID] = true + podUpdates <- *options + } else { + // if a request to kill a pod is pending, we do not let anything overwrite that request. + update, found := p.lastUndeliveredWorkUpdate[pod.UID] + if !found || update.UpdateType != kubetypes.SyncPodKill { + p.lastUndeliveredWorkUpdate[pod.UID] = *options + } + } +} + +func (p *podWorkers) removeWorker(uid types.UID) { + if ch, ok := p.podUpdates[uid]; ok { + close(ch) + delete(p.podUpdates, uid) + // If there is an undelivered work update for this pod we need to remove it + // since per-pod goroutine won't be able to put it to the already closed + // channel when it finish processing the current work update. + if _, cached := p.lastUndeliveredWorkUpdate[uid]; cached { + delete(p.lastUndeliveredWorkUpdate, uid) + } + } +} +func (p *podWorkers) ForgetWorker(uid types.UID) { + p.podLock.Lock() + defer p.podLock.Unlock() + p.removeWorker(uid) +} + +func (p *podWorkers) ForgetNonExistingPodWorkers(desiredPods map[types.UID]empty) { + p.podLock.Lock() + defer p.podLock.Unlock() + for key := range p.podUpdates { + if _, exists := desiredPods[key]; !exists { + p.removeWorker(key) + } + } +} + +func (p *podWorkers) wrapUp(uid types.UID, syncErr error) { + // Requeue the last update if the last sync returned error. + switch { + case syncErr == nil: + // No error; requeue at the regular resync interval. + p.workQueue.Enqueue(uid, wait.Jitter(p.resyncInterval, workerResyncIntervalJitterFactor)) + default: + // Error occurred during the sync; back off and then retry. + p.workQueue.Enqueue(uid, wait.Jitter(p.backOffPeriod, workerBackOffPeriodJitterFactor)) + } + p.checkForUpdates(uid) +} + +func (p *podWorkers) checkForUpdates(uid types.UID) { + p.podLock.Lock() + defer p.podLock.Unlock() + if workUpdate, exists := p.lastUndeliveredWorkUpdate[uid]; exists { + p.podUpdates[uid] <- workUpdate + delete(p.lastUndeliveredWorkUpdate, uid) + } else { + p.isWorking[uid] = false + } +} + +// killPodNow returns a KillPodFunc that can be used to kill a pod. +// It is intended to be injected into other modules that need to kill a pod. +func killPodNow(podWorkers PodWorkers) eviction.KillPodFunc { + return func(pod *api.Pod, status api.PodStatus, gracePeriodOverride *int64) error { + // determine the grace period to use when killing the pod + gracePeriod := int64(0) + if gracePeriodOverride != nil { + gracePeriod = *gracePeriodOverride + } else if pod.Spec.TerminationGracePeriodSeconds != nil { + gracePeriod = *pod.Spec.TerminationGracePeriodSeconds + } + + // we timeout and return an error if we dont get a callback within a reasonable time. + // the default timeout is relative to the grace period (we settle on 2s to wait for kubelet->runtime traffic to complete in sigkill) + timeout := int64(gracePeriod + (gracePeriod / 2)) + minTimeout := int64(2) + if timeout < minTimeout { + timeout = minTimeout + } + timeoutDuration := time.Duration(timeout) * time.Second + + // open a channel we block against until we get a result + type response struct { + err error + } + ch := make(chan response) + podWorkers.UpdatePod(&UpdatePodOptions{ + Pod: pod, + UpdateType: kubetypes.SyncPodKill, + OnCompleteFunc: func(err error) { + ch <- response{err: err} + }, + KillPodOptions: &KillPodOptions{ + PodStatusFunc: func(p *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus { + return status + }, + PodTerminationGracePeriodSecondsOverride: gracePeriodOverride, + }, + }) + + // wait for either a response, or a timeout + select { + case r := <-ch: + return r.err + case <-time.After(timeoutDuration): + return fmt.Errorf("timeout waiting to kill pod") + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod_workers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod_workers_test.go new file mode 100644 index 000000000000..655c835a55c7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/pod_workers_test.go @@ -0,0 +1,353 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "reflect" + "sync" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util/queue" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" +) + +// fakePodWorkers runs sync pod function in serial, so we can have +// deterministic behaviour in testing. +type fakePodWorkers struct { + syncPodFn syncPodFnType + cache kubecontainer.Cache + t TestingInterface +} + +func (f *fakePodWorkers) UpdatePod(options *UpdatePodOptions) { + status, err := f.cache.Get(options.Pod.UID) + if err != nil { + f.t.Errorf("Unexpected error: %v", err) + } + if err := f.syncPodFn(syncPodOptions{ + mirrorPod: options.MirrorPod, + pod: options.Pod, + podStatus: status, + updateType: options.UpdateType, + killPodOptions: options.KillPodOptions, + }); err != nil { + f.t.Errorf("Unexpected error: %v", err) + } +} + +func (f *fakePodWorkers) ForgetNonExistingPodWorkers(desiredPods map[types.UID]empty) {} + +func (f *fakePodWorkers) ForgetWorker(uid types.UID) {} + +type TestingInterface interface { + Errorf(format string, args ...interface{}) +} + +func newPod(uid, name string) *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: types.UID(uid), + Name: name, + }, + } +} + +// syncPodRecord is a record of a sync pod call +type syncPodRecord struct { + name string + updateType kubetypes.SyncPodType +} + +func createPodWorkers() (*podWorkers, map[types.UID][]syncPodRecord) { + lock := sync.Mutex{} + processed := make(map[types.UID][]syncPodRecord) + fakeRecorder := &record.FakeRecorder{} + fakeRuntime := &containertest.FakeRuntime{} + fakeCache := containertest.NewFakeCache(fakeRuntime) + podWorkers := newPodWorkers( + func(options syncPodOptions) error { + func() { + lock.Lock() + defer lock.Unlock() + pod := options.pod + processed[pod.UID] = append(processed[pod.UID], syncPodRecord{ + name: pod.Name, + updateType: options.updateType, + }) + }() + return nil + }, + fakeRecorder, + queue.NewBasicWorkQueue(&util.RealClock{}), + time.Second, + time.Second, + fakeCache, + ) + return podWorkers, processed +} + +func drainWorkers(podWorkers *podWorkers, numPods int) { + for { + stillWorking := false + podWorkers.podLock.Lock() + for i := 0; i < numPods; i++ { + if podWorkers.isWorking[types.UID(string(i))] { + stillWorking = true + } + } + podWorkers.podLock.Unlock() + if !stillWorking { + break + } + time.Sleep(50 * time.Millisecond) + } +} + +func TestUpdatePod(t *testing.T) { + podWorkers, processed := createPodWorkers() + + // Check whether all pod updates will be processed. + numPods := 20 + for i := 0; i < numPods; i++ { + for j := i; j < numPods; j++ { + podWorkers.UpdatePod(&UpdatePodOptions{ + Pod: newPod(string(j), string(i)), + UpdateType: kubetypes.SyncPodCreate, + }) + } + } + drainWorkers(podWorkers, numPods) + + if len(processed) != numPods { + t.Errorf("Not all pods processed: %v", len(processed)) + return + } + for i := 0; i < numPods; i++ { + uid := types.UID(i) + if len(processed[uid]) < 1 || len(processed[uid]) > i+1 { + t.Errorf("Pod %v processed %v times", i, len(processed[uid])) + continue + } + + first := 0 + last := len(processed[uid]) - 1 + if processed[uid][first].name != string(0) { + t.Errorf("Pod %v: incorrect order %v, %v", i, first, processed[uid][first]) + + } + if processed[uid][last].name != string(i) { + t.Errorf("Pod %v: incorrect order %v, %v", i, last, processed[uid][last]) + } + } +} + +func TestUpdatePodDoesNotForgetSyncPodKill(t *testing.T) { + podWorkers, processed := createPodWorkers() + numPods := 20 + for i := 0; i < numPods; i++ { + pod := newPod(string(i), string(i)) + podWorkers.UpdatePod(&UpdatePodOptions{ + Pod: pod, + UpdateType: kubetypes.SyncPodCreate, + }) + podWorkers.UpdatePod(&UpdatePodOptions{ + Pod: pod, + UpdateType: kubetypes.SyncPodKill, + }) + podWorkers.UpdatePod(&UpdatePodOptions{ + Pod: pod, + UpdateType: kubetypes.SyncPodUpdate, + }) + } + drainWorkers(podWorkers, numPods) + if len(processed) != numPods { + t.Errorf("Not all pods processed: %v", len(processed)) + return + } + for i := 0; i < numPods; i++ { + uid := types.UID(i) + // each pod should be processed two times (create, kill, but not update) + syncPodRecords := processed[uid] + if len(syncPodRecords) < 2 { + t.Errorf("Pod %v processed %v times, but expected at least 2", i, len(syncPodRecords)) + continue + } + if syncPodRecords[0].updateType != kubetypes.SyncPodCreate { + t.Errorf("Pod %v event was %v, but expected %v", i, syncPodRecords[0].updateType, kubetypes.SyncPodCreate) + } + if syncPodRecords[1].updateType != kubetypes.SyncPodKill { + t.Errorf("Pod %v event was %v, but expected %v", i, syncPodRecords[1].updateType, kubetypes.SyncPodKill) + } + } +} + +func TestForgetNonExistingPodWorkers(t *testing.T) { + podWorkers, _ := createPodWorkers() + + numPods := 20 + for i := 0; i < numPods; i++ { + podWorkers.UpdatePod(&UpdatePodOptions{ + Pod: newPod(string(i), "name"), + UpdateType: kubetypes.SyncPodUpdate, + }) + } + drainWorkers(podWorkers, numPods) + + if len(podWorkers.podUpdates) != numPods { + t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates)) + } + + desiredPods := map[types.UID]empty{} + desiredPods[types.UID(2)] = empty{} + desiredPods[types.UID(14)] = empty{} + podWorkers.ForgetNonExistingPodWorkers(desiredPods) + if len(podWorkers.podUpdates) != 2 { + t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates)) + } + if _, exists := podWorkers.podUpdates[types.UID(2)]; !exists { + t.Errorf("No updates channel for pod 2") + } + if _, exists := podWorkers.podUpdates[types.UID(14)]; !exists { + t.Errorf("No updates channel for pod 14") + } + + podWorkers.ForgetNonExistingPodWorkers(map[types.UID]empty{}) + if len(podWorkers.podUpdates) != 0 { + t.Errorf("Incorrect number of open channels %v", len(podWorkers.podUpdates)) + } +} + +type simpleFakeKubelet struct { + pod *api.Pod + mirrorPod *api.Pod + podStatus *kubecontainer.PodStatus + wg sync.WaitGroup +} + +func (kl *simpleFakeKubelet) syncPod(options syncPodOptions) error { + kl.pod, kl.mirrorPod, kl.podStatus = options.pod, options.mirrorPod, options.podStatus + return nil +} + +func (kl *simpleFakeKubelet) syncPodWithWaitGroup(options syncPodOptions) error { + kl.pod, kl.mirrorPod, kl.podStatus = options.pod, options.mirrorPod, options.podStatus + kl.wg.Done() + return nil +} + +// byContainerName sort the containers in a running pod by their names. +type byContainerName kubecontainer.Pod + +func (b byContainerName) Len() int { return len(b.Containers) } + +func (b byContainerName) Swap(i, j int) { + b.Containers[i], b.Containers[j] = b.Containers[j], b.Containers[i] +} + +func (b byContainerName) Less(i, j int) bool { + return b.Containers[i].Name < b.Containers[j].Name +} + +// TestFakePodWorkers verifies that the fakePodWorkers behaves the same way as the real podWorkers +// for their invocation of the syncPodFn. +func TestFakePodWorkers(t *testing.T) { + fakeRecorder := &record.FakeRecorder{} + fakeRuntime := &containertest.FakeRuntime{} + fakeCache := containertest.NewFakeCache(fakeRuntime) + + kubeletForRealWorkers := &simpleFakeKubelet{} + kubeletForFakeWorkers := &simpleFakeKubelet{} + + realPodWorkers := newPodWorkers(kubeletForRealWorkers.syncPodWithWaitGroup, fakeRecorder, queue.NewBasicWorkQueue(&util.RealClock{}), time.Second, time.Second, fakeCache) + fakePodWorkers := &fakePodWorkers{kubeletForFakeWorkers.syncPod, fakeCache, t} + + tests := []struct { + pod *api.Pod + mirrorPod *api.Pod + }{ + { + &api.Pod{}, + &api.Pod{}, + }, + { + podWithUidNameNs("12345678", "foo", "new"), + podWithUidNameNs("12345678", "fooMirror", "new"), + }, + { + podWithUidNameNs("98765", "bar", "new"), + podWithUidNameNs("98765", "barMirror", "new"), + }, + } + + for i, tt := range tests { + kubeletForRealWorkers.wg.Add(1) + realPodWorkers.UpdatePod(&UpdatePodOptions{ + Pod: tt.pod, + MirrorPod: tt.mirrorPod, + UpdateType: kubetypes.SyncPodUpdate, + }) + fakePodWorkers.UpdatePod(&UpdatePodOptions{ + Pod: tt.pod, + MirrorPod: tt.mirrorPod, + UpdateType: kubetypes.SyncPodUpdate, + }) + + kubeletForRealWorkers.wg.Wait() + + if !reflect.DeepEqual(kubeletForRealWorkers.pod, kubeletForFakeWorkers.pod) { + t.Errorf("%d: Expected: %#v, Actual: %#v", i, kubeletForRealWorkers.pod, kubeletForFakeWorkers.pod) + } + + if !reflect.DeepEqual(kubeletForRealWorkers.mirrorPod, kubeletForFakeWorkers.mirrorPod) { + t.Errorf("%d: Expected: %#v, Actual: %#v", i, kubeletForRealWorkers.mirrorPod, kubeletForFakeWorkers.mirrorPod) + } + + if !reflect.DeepEqual(kubeletForRealWorkers.podStatus, kubeletForFakeWorkers.podStatus) { + t.Errorf("%d: Expected: %#v, Actual: %#v", i, kubeletForRealWorkers.podStatus, kubeletForFakeWorkers.podStatus) + } + } +} + +// TestKillPodNowFunc tests the blocking kill pod function works with pod workers as expected. +func TestKillPodNowFunc(t *testing.T) { + podWorkers, processed := createPodWorkers() + killPodFunc := killPodNow(podWorkers) + pod := newPod("test", "test") + gracePeriodOverride := int64(0) + err := killPodFunc(pod, api.PodStatus{Phase: api.PodFailed, Reason: "reason", Message: "message"}, &gracePeriodOverride) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if len(processed) != 1 { + t.Errorf("len(processed) expected: %v, actual: %v", 1, len(processed)) + return + } + syncPodRecords := processed[pod.UID] + if len(syncPodRecords) != 1 { + t.Errorf("Pod processed %v times, but expected %v", len(syncPodRecords), 1) + } + if syncPodRecords[0].updateType != kubetypes.SyncPodKill { + t.Errorf("Pod update type was %v, but expected %v", syncPodRecords[0].updateType, kubetypes.SyncPodKill) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/common_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/common_test.go new file mode 100644 index 000000000000..aeb61ca2174a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/common_test.go @@ -0,0 +1,147 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prober + +import ( + "reflect" + "sync" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/record" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + kubepod "k8s.io/kubernetes/pkg/kubelet/pod" + "k8s.io/kubernetes/pkg/kubelet/prober/results" + "k8s.io/kubernetes/pkg/kubelet/status" + "k8s.io/kubernetes/pkg/probe" + "k8s.io/kubernetes/pkg/util/exec" +) + +const ( + testContainerName = "cOnTaInEr_NaMe" + testPodUID = "pOd_UiD" +) + +var testContainerID = kubecontainer.ContainerID{Type: "test", ID: "cOnTaInEr_Id"} + +func getTestRunningStatus() api.PodStatus { + containerStatus := api.ContainerStatus{ + Name: testContainerName, + ContainerID: testContainerID.String(), + } + containerStatus.State.Running = &api.ContainerStateRunning{StartedAt: unversioned.Now()} + podStatus := api.PodStatus{ + Phase: api.PodRunning, + ContainerStatuses: []api.ContainerStatus{containerStatus}, + } + return podStatus +} + +func getTestPod() *api.Pod { + container := api.Container{ + Name: testContainerName, + } + pod := api.Pod{ + Spec: api.PodSpec{ + Containers: []api.Container{container}, + RestartPolicy: api.RestartPolicyNever, + }, + } + pod.Name = "testPod" + pod.UID = testPodUID + return &pod +} + +func setTestProbe(pod *api.Pod, probeType probeType, probeSpec api.Probe) { + // All tests rely on the fake exec prober. + probeSpec.Handler = api.Handler{ + Exec: &api.ExecAction{}, + } + + // Apply test defaults, overwridden for test speed. + defaults := map[string]int64{ + "TimeoutSeconds": 1, + "PeriodSeconds": 1, + "SuccessThreshold": 1, + "FailureThreshold": 1, + } + for field, value := range defaults { + f := reflect.ValueOf(&probeSpec).Elem().FieldByName(field) + if f.Int() == 0 { + f.SetInt(value) + } + } + + switch probeType { + case readiness: + pod.Spec.Containers[0].ReadinessProbe = &probeSpec + case liveness: + pod.Spec.Containers[0].LivenessProbe = &probeSpec + } +} + +func newTestManager() *manager { + refManager := kubecontainer.NewRefManager() + refManager.SetRef(testContainerID, &api.ObjectReference{}) // Suppress prober warnings. + podManager := kubepod.NewBasicPodManager(nil) + // Add test pod to pod manager, so that status manager can get the pod from pod manager if needed. + podManager.AddPod(getTestPod()) + m := NewManager( + status.NewManager(&fake.Clientset{}, podManager), + results.NewManager(), + nil, // runner + refManager, + &record.FakeRecorder{}, + ).(*manager) + // Don't actually execute probes. + m.prober.exec = fakeExecProber{probe.Success, nil} + return m +} + +func newTestWorker(m *manager, probeType probeType, probeSpec api.Probe) *worker { + pod := getTestPod() + setTestProbe(pod, probeType, probeSpec) + return newWorker(m, probeType, pod, pod.Spec.Containers[0]) +} + +type fakeExecProber struct { + result probe.Result + err error +} + +func (p fakeExecProber) Probe(_ exec.Cmd) (probe.Result, string, error) { + return p.result, "", p.err +} + +type syncExecProber struct { + sync.RWMutex + fakeExecProber +} + +func (p *syncExecProber) set(result probe.Result, err error) { + p.Lock() + defer p.Unlock() + p.result = result + p.err = err +} + +func (p *syncExecProber) Probe(cmd exec.Cmd) (probe.Result, string, error) { + p.RLock() + defer p.RUnlock() + return p.fakeExecProber.Probe(cmd) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/manager.go new file mode 100644 index 000000000000..01218a75df15 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/manager.go @@ -0,0 +1,247 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prober + +import ( + "sync" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/prober/results" + "k8s.io/kubernetes/pkg/kubelet/status" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/wait" +) + +// Manager manages pod probing. It creates a probe "worker" for every container that specifies a +// probe (AddPod). The worker periodically probes its assigned container and caches the results. The +// manager use the cached probe results to set the appropriate Ready state in the PodStatus when +// requested (UpdatePodStatus). Updating probe parameters is not currently supported. +// TODO: Move liveness probing out of the runtime, to here. +type Manager interface { + // AddPod creates new probe workers for every container probe. This should be called for every + // pod created. + AddPod(pod *api.Pod) + + // RemovePod handles cleaning up the removed pod state, including terminating probe workers and + // deleting cached results. + RemovePod(pod *api.Pod) + + // CleanupPods handles cleaning up pods which should no longer be running. + // It takes a list of "active pods" which should not be cleaned up. + CleanupPods(activePods []*api.Pod) + + // UpdatePodStatus modifies the given PodStatus with the appropriate Ready state for each + // container based on container running status, cached probe results and worker states. + UpdatePodStatus(types.UID, *api.PodStatus) + + // Start starts the Manager sync loops. + Start() +} + +type manager struct { + // Map of active workers for probes + workers map[probeKey]*worker + // Lock for accessing & mutating workers + workerLock sync.RWMutex + + // The statusManager cache provides pod IP and container IDs for probing. + statusManager status.Manager + + // readinessManager manages the results of readiness probes + readinessManager results.Manager + + // livenessManager manages the results of liveness probes + livenessManager results.Manager + + // prober executes the probe actions. + prober *prober +} + +func NewManager( + statusManager status.Manager, + livenessManager results.Manager, + runner kubecontainer.ContainerCommandRunner, + refManager *kubecontainer.RefManager, + recorder record.EventRecorder) Manager { + + prober := newProber(runner, refManager, recorder) + readinessManager := results.NewManager() + return &manager{ + statusManager: statusManager, + prober: prober, + readinessManager: readinessManager, + livenessManager: livenessManager, + workers: make(map[probeKey]*worker), + } +} + +// Start syncing probe status. This should only be called once. +func (m *manager) Start() { + // Start syncing readiness. + go wait.Forever(m.updateReadiness, 0) +} + +// Key uniquely identifying container probes +type probeKey struct { + podUID types.UID + containerName string + probeType probeType +} + +// Type of probe (readiness or liveness) +type probeType int + +const ( + liveness probeType = iota + readiness +) + +// For debugging. +func (t probeType) String() string { + switch t { + case readiness: + return "Readiness" + case liveness: + return "Liveness" + default: + return "UNKNOWN" + } +} + +func (m *manager) AddPod(pod *api.Pod) { + m.workerLock.Lock() + defer m.workerLock.Unlock() + + key := probeKey{podUID: pod.UID} + for _, c := range pod.Spec.Containers { + key.containerName = c.Name + + if c.ReadinessProbe != nil { + key.probeType = readiness + if _, ok := m.workers[key]; ok { + glog.Errorf("Readiness probe already exists! %v - %v", + format.Pod(pod), c.Name) + return + } + w := newWorker(m, readiness, pod, c) + m.workers[key] = w + go w.run() + } + + if c.LivenessProbe != nil { + key.probeType = liveness + if _, ok := m.workers[key]; ok { + glog.Errorf("Liveness probe already exists! %v - %v", + format.Pod(pod), c.Name) + return + } + w := newWorker(m, liveness, pod, c) + m.workers[key] = w + go w.run() + } + } +} + +func (m *manager) RemovePod(pod *api.Pod) { + m.workerLock.RLock() + defer m.workerLock.RUnlock() + + key := probeKey{podUID: pod.UID} + for _, c := range pod.Spec.Containers { + key.containerName = c.Name + for _, probeType := range [...]probeType{readiness, liveness} { + key.probeType = probeType + if worker, ok := m.workers[key]; ok { + worker.stop() + } + } + } +} + +func (m *manager) CleanupPods(activePods []*api.Pod) { + desiredPods := make(map[types.UID]sets.Empty) + for _, pod := range activePods { + desiredPods[pod.UID] = sets.Empty{} + } + + m.workerLock.RLock() + defer m.workerLock.RUnlock() + + for key, worker := range m.workers { + if _, ok := desiredPods[key.podUID]; !ok { + worker.stop() + } + } +} + +func (m *manager) UpdatePodStatus(podUID types.UID, podStatus *api.PodStatus) { + for i, c := range podStatus.ContainerStatuses { + var ready bool + if c.State.Running == nil { + ready = false + } else if result, ok := m.readinessManager.Get(kubecontainer.ParseContainerID(c.ContainerID)); ok { + ready = result == results.Success + } else { + // The check whether there is a probe which hasn't run yet. + _, exists := m.getWorker(podUID, c.Name, readiness) + ready = !exists + } + podStatus.ContainerStatuses[i].Ready = ready + } + // init containers are ready if they have exited with success or if a readiness probe has + // succeeded. + for i, c := range podStatus.InitContainerStatuses { + var ready bool + if c.State.Terminated != nil && c.State.Terminated.ExitCode == 0 { + ready = true + } + podStatus.InitContainerStatuses[i].Ready = ready + } +} + +func (m *manager) getWorker(podUID types.UID, containerName string, probeType probeType) (*worker, bool) { + m.workerLock.RLock() + defer m.workerLock.RUnlock() + worker, ok := m.workers[probeKey{podUID, containerName, probeType}] + return worker, ok +} + +// Called by the worker after exiting. +func (m *manager) removeWorker(podUID types.UID, containerName string, probeType probeType) { + m.workerLock.Lock() + defer m.workerLock.Unlock() + delete(m.workers, probeKey{podUID, containerName, probeType}) +} + +// workerCount returns the total number of probe workers. For testing. +func (m *manager) workerCount() int { + m.workerLock.Lock() + defer m.workerLock.Unlock() + return len(m.workers) +} + +func (m *manager) updateReadiness() { + update := <-m.readinessManager.Updates() + + ready := update.Result == results.Success + m.statusManager.SetContainerReadiness(update.PodUID, update.ContainerID, ready) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/manager_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/manager_test.go new file mode 100644 index 000000000000..1cf6e5c94421 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/manager_test.go @@ -0,0 +1,411 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prober + +import ( + "fmt" + "strconv" + "testing" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/prober/results" + "k8s.io/kubernetes/pkg/probe" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/wait" +) + +func init() { + runtime.ReallyCrash = true +} + +var defaultProbe *api.Probe = &api.Probe{ + Handler: api.Handler{ + Exec: &api.ExecAction{}, + }, + TimeoutSeconds: 1, + PeriodSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 3, +} + +func TestAddRemovePods(t *testing.T) { + noProbePod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "no_probe_pod", + }, + Spec: api.PodSpec{ + Containers: []api.Container{{ + Name: "no_probe1", + }, { + Name: "no_probe2", + }}, + }, + } + + probePod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "probe_pod", + }, + Spec: api.PodSpec{ + Containers: []api.Container{{ + Name: "no_probe1", + }, { + Name: "readiness", + ReadinessProbe: defaultProbe, + }, { + Name: "no_probe2", + }, { + Name: "liveness", + LivenessProbe: defaultProbe, + }}, + }, + } + + m := newTestManager() + defer cleanup(t, m) + if err := expectProbes(m, nil); err != nil { + t.Error(err) + } + + // Adding a pod with no probes should be a no-op. + m.AddPod(&noProbePod) + if err := expectProbes(m, nil); err != nil { + t.Error(err) + } + + // Adding a pod with probes. + m.AddPod(&probePod) + probePaths := []probeKey{ + {"probe_pod", "readiness", readiness}, + {"probe_pod", "liveness", liveness}, + } + if err := expectProbes(m, probePaths); err != nil { + t.Error(err) + } + + // Removing un-probed pod. + m.RemovePod(&noProbePod) + if err := expectProbes(m, probePaths); err != nil { + t.Error(err) + } + + // Removing probed pod. + m.RemovePod(&probePod) + if err := waitForWorkerExit(m, probePaths); err != nil { + t.Fatal(err) + } + if err := expectProbes(m, nil); err != nil { + t.Error(err) + } + + // Removing already removed pods should be a no-op. + m.RemovePod(&probePod) + if err := expectProbes(m, nil); err != nil { + t.Error(err) + } +} + +func TestCleanupPods(t *testing.T) { + m := newTestManager() + defer cleanup(t, m) + podToCleanup := api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "pod_cleanup", + }, + Spec: api.PodSpec{ + Containers: []api.Container{{ + Name: "prober1", + ReadinessProbe: defaultProbe, + }, { + Name: "prober2", + LivenessProbe: defaultProbe, + }}, + }, + } + podToKeep := api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "pod_keep", + }, + Spec: api.PodSpec{ + Containers: []api.Container{{ + Name: "prober1", + ReadinessProbe: defaultProbe, + }, { + Name: "prober2", + LivenessProbe: defaultProbe, + }}, + }, + } + m.AddPod(&podToCleanup) + m.AddPod(&podToKeep) + + m.CleanupPods([]*api.Pod{&podToKeep}) + + removedProbes := []probeKey{ + {"pod_cleanup", "prober1", readiness}, + {"pod_cleanup", "prober2", liveness}, + } + expectedProbes := []probeKey{ + {"pod_keep", "prober1", readiness}, + {"pod_keep", "prober2", liveness}, + } + if err := waitForWorkerExit(m, removedProbes); err != nil { + t.Fatal(err) + } + if err := expectProbes(m, expectedProbes); err != nil { + t.Error(err) + } +} + +func TestCleanupRepeated(t *testing.T) { + m := newTestManager() + defer cleanup(t, m) + podTemplate := api.Pod{ + Spec: api.PodSpec{ + Containers: []api.Container{{ + Name: "prober1", + ReadinessProbe: defaultProbe, + LivenessProbe: defaultProbe, + }}, + }, + } + + const numTestPods = 100 + for i := 0; i < numTestPods; i++ { + pod := podTemplate + pod.UID = types.UID(strconv.Itoa(i)) + m.AddPod(&pod) + } + + for i := 0; i < 10; i++ { + m.CleanupPods([]*api.Pod{}) + } +} + +func TestUpdatePodStatus(t *testing.T) { + unprobed := api.ContainerStatus{ + Name: "unprobed_container", + ContainerID: "test://unprobed_container_id", + State: api.ContainerState{ + Running: &api.ContainerStateRunning{}, + }, + } + probedReady := api.ContainerStatus{ + Name: "probed_container_ready", + ContainerID: "test://probed_container_ready_id", + State: api.ContainerState{ + Running: &api.ContainerStateRunning{}, + }, + } + probedPending := api.ContainerStatus{ + Name: "probed_container_pending", + ContainerID: "test://probed_container_pending_id", + State: api.ContainerState{ + Running: &api.ContainerStateRunning{}, + }, + } + probedUnready := api.ContainerStatus{ + Name: "probed_container_unready", + ContainerID: "test://probed_container_unready_id", + State: api.ContainerState{ + Running: &api.ContainerStateRunning{}, + }, + } + terminated := api.ContainerStatus{ + Name: "terminated_container", + ContainerID: "test://terminated_container_id", + State: api.ContainerState{ + Terminated: &api.ContainerStateTerminated{}, + }, + } + podStatus := api.PodStatus{ + Phase: api.PodRunning, + ContainerStatuses: []api.ContainerStatus{ + unprobed, probedReady, probedPending, probedUnready, terminated, + }, + } + + m := newTestManager() + // no cleanup: using fake workers. + + // Setup probe "workers" and cached results. + m.workers = map[probeKey]*worker{ + probeKey{testPodUID, unprobed.Name, liveness}: {}, + probeKey{testPodUID, probedReady.Name, readiness}: {}, + probeKey{testPodUID, probedPending.Name, readiness}: {}, + probeKey{testPodUID, probedUnready.Name, readiness}: {}, + probeKey{testPodUID, terminated.Name, readiness}: {}, + } + m.readinessManager.Set(kubecontainer.ParseContainerID(probedReady.ContainerID), results.Success, &api.Pod{}) + m.readinessManager.Set(kubecontainer.ParseContainerID(probedUnready.ContainerID), results.Failure, &api.Pod{}) + m.readinessManager.Set(kubecontainer.ParseContainerID(terminated.ContainerID), results.Success, &api.Pod{}) + + m.UpdatePodStatus(testPodUID, &podStatus) + + expectedReadiness := map[probeKey]bool{ + probeKey{testPodUID, unprobed.Name, readiness}: true, + probeKey{testPodUID, probedReady.Name, readiness}: true, + probeKey{testPodUID, probedPending.Name, readiness}: false, + probeKey{testPodUID, probedUnready.Name, readiness}: false, + probeKey{testPodUID, terminated.Name, readiness}: false, + } + for _, c := range podStatus.ContainerStatuses { + expected, ok := expectedReadiness[probeKey{testPodUID, c.Name, readiness}] + if !ok { + t.Fatalf("Missing expectation for test case: %v", c.Name) + } + if expected != c.Ready { + t.Errorf("Unexpected readiness for container %v: Expected %v but got %v", + c.Name, expected, c.Ready) + } + } +} + +func TestUpdateReadiness(t *testing.T) { + testPod := getTestPod() + setTestProbe(testPod, readiness, api.Probe{}) + m := newTestManager() + defer cleanup(t, m) + + // Start syncing readiness without leaking goroutine. + stopCh := make(chan struct{}) + go wait.Until(m.updateReadiness, 0, stopCh) + defer func() { + close(stopCh) + // Send an update to exit updateReadiness() + m.readinessManager.Set(kubecontainer.ContainerID{}, results.Success, &api.Pod{}) + }() + + exec := syncExecProber{} + exec.set(probe.Success, nil) + m.prober.exec = &exec + + m.statusManager.SetPodStatus(testPod, getTestRunningStatus()) + + m.AddPod(testPod) + probePaths := []probeKey{{testPodUID, testContainerName, readiness}} + if err := expectProbes(m, probePaths); err != nil { + t.Error(err) + } + + // Wait for ready status. + if err := waitForReadyStatus(m, true); err != nil { + t.Error(err) + } + + // Prober fails. + exec.set(probe.Failure, nil) + + // Wait for failed status. + if err := waitForReadyStatus(m, false); err != nil { + t.Error(err) + } +} + +func expectProbes(m *manager, expectedProbes []probeKey) error { + m.workerLock.RLock() + defer m.workerLock.RUnlock() + + var unexpected []probeKey + missing := make([]probeKey, len(expectedProbes)) + copy(missing, expectedProbes) + +outer: + for probePath := range m.workers { + for i, expectedPath := range missing { + if probePath == expectedPath { + missing = append(missing[:i], missing[i+1:]...) + continue outer + } + } + unexpected = append(unexpected, probePath) + } + + if len(missing) == 0 && len(unexpected) == 0 { + return nil // Yay! + } + + return fmt.Errorf("Unexpected probes: %v; Missing probes: %v;", unexpected, missing) +} + +const interval = 1 * time.Second + +// Wait for the given workers to exit & clean up. +func waitForWorkerExit(m *manager, workerPaths []probeKey) error { + for _, w := range workerPaths { + condition := func() (bool, error) { + _, exists := m.getWorker(w.podUID, w.containerName, w.probeType) + return !exists, nil + } + if exited, _ := condition(); exited { + continue // Already exited, no need to poll. + } + glog.Infof("Polling %v", w) + if err := wait.Poll(interval, wait.ForeverTestTimeout, condition); err != nil { + return err + } + } + + return nil +} + +// Wait for the given workers to exit & clean up. +func waitForReadyStatus(m *manager, ready bool) error { + condition := func() (bool, error) { + status, ok := m.statusManager.GetPodStatus(testPodUID) + if !ok { + return false, fmt.Errorf("status not found: %q", testPodUID) + } + if len(status.ContainerStatuses) != 1 { + return false, fmt.Errorf("expected single container, found %d", len(status.ContainerStatuses)) + } + if status.ContainerStatuses[0].ContainerID != testContainerID.String() { + return false, fmt.Errorf("expected container %q, found %q", + testContainerID, status.ContainerStatuses[0].ContainerID) + } + return status.ContainerStatuses[0].Ready == ready, nil + } + glog.Infof("Polling for ready state %v", ready) + if err := wait.Poll(interval, wait.ForeverTestTimeout, condition); err != nil { + return err + } + + return nil +} + +// cleanup running probes to avoid leaking goroutines. +func cleanup(t *testing.T, m *manager) { + m.CleanupPods(nil) + + condition := func() (bool, error) { + workerCount := m.workerCount() + if workerCount > 0 { + glog.Infof("Waiting for %d workers to exit...", workerCount) + } + return workerCount == 0, nil + } + if exited, _ := condition(); exited { + return // Already exited, no need to poll. + } + if err := wait.Poll(interval, wait.ForeverTestTimeout, condition); err != nil { + t.Fatalf("Error during cleanup: %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/prober.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/prober.go new file mode 100644 index 000000000000..c244026c3031 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/prober.go @@ -0,0 +1,245 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prober + +import ( + "bytes" + "fmt" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/prober/results" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/kubelet/util/ioutils" + "k8s.io/kubernetes/pkg/probe" + execprobe "k8s.io/kubernetes/pkg/probe/exec" + httprobe "k8s.io/kubernetes/pkg/probe/http" + tcprobe "k8s.io/kubernetes/pkg/probe/tcp" + "k8s.io/kubernetes/pkg/util/exec" + "k8s.io/kubernetes/pkg/util/intstr" + + "github.com/golang/glog" +) + +const maxProbeRetries = 3 + +// Prober helps to check the liveness/readiness of a container. +type prober struct { + exec execprobe.ExecProber + http httprobe.HTTPProber + tcp tcprobe.TCPProber + runner kubecontainer.ContainerCommandRunner + + refManager *kubecontainer.RefManager + recorder record.EventRecorder +} + +// NewProber creates a Prober, it takes a command runner and +// several container info managers. +func newProber( + runner kubecontainer.ContainerCommandRunner, + refManager *kubecontainer.RefManager, + recorder record.EventRecorder) *prober { + + return &prober{ + exec: execprobe.New(), + http: httprobe.New(), + tcp: tcprobe.New(), + runner: runner, + refManager: refManager, + recorder: recorder, + } +} + +// probe probes the container. +func (pb *prober) probe(probeType probeType, pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (results.Result, error) { + var probeSpec *api.Probe + switch probeType { + case readiness: + probeSpec = container.ReadinessProbe + case liveness: + probeSpec = container.LivenessProbe + default: + return results.Failure, fmt.Errorf("Unknown probe type: %q", probeType) + } + + ctrName := fmt.Sprintf("%s:%s", format.Pod(pod), container.Name) + if probeSpec == nil { + glog.Warningf("%s probe for %s is nil", probeType, ctrName) + return results.Success, nil + } + + result, output, err := pb.runProbeWithRetries(probeSpec, pod, status, container, containerID, maxProbeRetries) + if err != nil || result != probe.Success { + // Probe failed in one way or another. + ref, hasRef := pb.refManager.GetRef(containerID) + if !hasRef { + glog.Warningf("No ref for container %q (%s)", containerID.String(), ctrName) + } + if err != nil { + glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err) + if hasRef { + pb.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.ContainerUnhealthy, "%s probe errored: %v", probeType, err) + } + } else { // result != probe.Success + glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output) + if hasRef { + pb.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.ContainerUnhealthy, "%s probe failed: %s", probeType, output) + } + } + return results.Failure, err + } + glog.V(3).Infof("%s probe for %q succeeded", probeType, ctrName) + return results.Success, nil +} + +// runProbeWithRetries tries to probe the container in a finite loop, it returns the last result +// if it never succeeds. +func (pb *prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) { + var err error + var result probe.Result + var output string + for i := 0; i < retries; i++ { + result, output, err = pb.runProbe(p, pod, status, container, containerID) + if err == nil { + return result, output, nil + } + } + return result, output, err +} + +// buildHeaderMap takes a list of HTTPHeader string +// pairs and returns a a populated string->[]string http.Header map. +func buildHeader(headerList []api.HTTPHeader) http.Header { + headers := make(http.Header) + for _, header := range headerList { + headers[header.Name] = append(headers[header.Name], header.Value) + } + return headers +} + +func (pb *prober) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { + timeout := time.Duration(p.TimeoutSeconds) * time.Second + if p.Exec != nil { + glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command) + return pb.exec.Probe(pb.newExecInContainer(container, containerID, p.Exec.Command)) + } + if p.HTTPGet != nil { + scheme := strings.ToLower(string(p.HTTPGet.Scheme)) + host := p.HTTPGet.Host + if host == "" { + host = status.PodIP + } + port, err := extractPort(p.HTTPGet.Port, container) + if err != nil { + return probe.Unknown, "", err + } + path := p.HTTPGet.Path + glog.V(4).Infof("HTTP-Probe Host: %v://%v, Port: %v, Path: %v", scheme, host, port, path) + url := formatURL(scheme, host, port, path) + headers := buildHeader(p.HTTPGet.HTTPHeaders) + glog.V(4).Infof("HTTP-Probe Headers: %v", headers) + return pb.http.Probe(url, headers, timeout) + } + if p.TCPSocket != nil { + port, err := extractPort(p.TCPSocket.Port, container) + if err != nil { + return probe.Unknown, "", err + } + glog.V(4).Infof("TCP-Probe PodIP: %v, Port: %v, Timeout: %v", status.PodIP, port, timeout) + return pb.tcp.Probe(status.PodIP, port, timeout) + } + glog.Warningf("Failed to find probe builder for container: %v", container) + return probe.Unknown, "", fmt.Errorf("Missing probe handler for %s:%s", format.Pod(pod), container.Name) +} + +func extractPort(param intstr.IntOrString, container api.Container) (int, error) { + port := -1 + var err error + switch param.Type { + case intstr.Int: + port = param.IntValue() + case intstr.String: + if port, err = findPortByName(container, param.StrVal); err != nil { + // Last ditch effort - maybe it was an int stored as string? + if port, err = strconv.Atoi(param.StrVal); err != nil { + return port, err + } + } + default: + return port, fmt.Errorf("IntOrString had no kind: %+v", param) + } + if port > 0 && port < 65536 { + return port, nil + } + return port, fmt.Errorf("invalid port number: %v", port) +} + +// findPortByName is a helper function to look up a port in a container by name. +func findPortByName(container api.Container, portName string) (int, error) { + for _, port := range container.Ports { + if port.Name == portName { + return int(port.ContainerPort), nil + } + } + return 0, fmt.Errorf("port %s not found", portName) +} + +// formatURL formats a URL from args. For testability. +func formatURL(scheme string, host string, port int, path string) *url.URL { + return &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(host, strconv.Itoa(port)), + Path: path, + } +} + +type execInContainer struct { + run func() ([]byte, error) +} + +func (p *prober) newExecInContainer(container api.Container, containerID kubecontainer.ContainerID, cmd []string) exec.Cmd { + return execInContainer{func() ([]byte, error) { + var buffer bytes.Buffer + output := ioutils.WriteCloserWrapper(&buffer) + err := p.runner.ExecInContainer(containerID, cmd, nil, output, output, false) + if err != nil { + return nil, err + } + + return buffer.Bytes(), nil + }} +} + +func (eic execInContainer) CombinedOutput() ([]byte, error) { + return eic.run() +} + +func (eic execInContainer) Output() ([]byte, error) { + return nil, fmt.Errorf("unimplemented") +} + +func (eic execInContainer) SetDir(dir string) { + //unimplemented +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/prober_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/prober_test.go new file mode 100644 index 000000000000..add29804f22a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/prober_test.go @@ -0,0 +1,276 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prober + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/record" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/prober/results" + "k8s.io/kubernetes/pkg/probe" + "k8s.io/kubernetes/pkg/util/intstr" +) + +func TestFormatURL(t *testing.T) { + testCases := []struct { + scheme string + host string + port int + path string + result string + }{ + {"http", "localhost", 93, "", "http://localhost:93"}, + {"https", "localhost", 93, "/path", "https://localhost:93/path"}, + } + for _, test := range testCases { + url := formatURL(test.scheme, test.host, test.port, test.path) + if url.String() != test.result { + t.Errorf("Expected %s, got %s", test.result, url.String()) + } + } +} + +func TestFindPortByName(t *testing.T) { + container := api.Container{ + Ports: []api.ContainerPort{ + { + Name: "foo", + ContainerPort: 8080, + }, + { + Name: "bar", + ContainerPort: 9000, + }, + }, + } + want := 8080 + got, err := findPortByName(container, "foo") + if got != want || err != nil { + t.Errorf("Expected %v, got %v, err: %v", want, got, err) + } +} + +func TestGetURLParts(t *testing.T) { + testCases := []struct { + probe *api.HTTPGetAction + ok bool + host string + port int + path string + }{ + {&api.HTTPGetAction{Host: "", Port: intstr.FromInt(-1), Path: ""}, false, "", -1, ""}, + {&api.HTTPGetAction{Host: "", Port: intstr.FromString(""), Path: ""}, false, "", -1, ""}, + {&api.HTTPGetAction{Host: "", Port: intstr.FromString("-1"), Path: ""}, false, "", -1, ""}, + {&api.HTTPGetAction{Host: "", Port: intstr.FromString("not-found"), Path: ""}, false, "", -1, ""}, + {&api.HTTPGetAction{Host: "", Port: intstr.FromString("found"), Path: ""}, true, "127.0.0.1", 93, ""}, + {&api.HTTPGetAction{Host: "", Port: intstr.FromInt(76), Path: ""}, true, "127.0.0.1", 76, ""}, + {&api.HTTPGetAction{Host: "", Port: intstr.FromString("118"), Path: ""}, true, "127.0.0.1", 118, ""}, + {&api.HTTPGetAction{Host: "hostname", Port: intstr.FromInt(76), Path: "path"}, true, "hostname", 76, "path"}, + } + + for _, test := range testCases { + state := api.PodStatus{PodIP: "127.0.0.1"} + container := api.Container{ + Ports: []api.ContainerPort{{Name: "found", ContainerPort: 93}}, + LivenessProbe: &api.Probe{ + Handler: api.Handler{ + HTTPGet: test.probe, + }, + }, + } + + scheme := test.probe.Scheme + if scheme == "" { + scheme = api.URISchemeHTTP + } + host := test.probe.Host + if host == "" { + host = state.PodIP + } + port, err := extractPort(test.probe.Port, container) + if test.ok && err != nil { + t.Errorf("Unexpected error: %v", err) + } + path := test.probe.Path + + if !test.ok && err == nil { + t.Errorf("Expected error for %+v, got %s%s:%d/%s", test, scheme, host, port, path) + } + if test.ok { + if host != test.host || port != test.port || path != test.path { + t.Errorf("Expected %s:%d/%s, got %s:%d/%s", + test.host, test.port, test.path, host, port, path) + } + } + } +} + +func TestGetTCPAddrParts(t *testing.T) { + testCases := []struct { + probe *api.TCPSocketAction + ok bool + host string + port int + }{ + {&api.TCPSocketAction{Port: intstr.FromInt(-1)}, false, "", -1}, + {&api.TCPSocketAction{Port: intstr.FromString("")}, false, "", -1}, + {&api.TCPSocketAction{Port: intstr.FromString("-1")}, false, "", -1}, + {&api.TCPSocketAction{Port: intstr.FromString("not-found")}, false, "", -1}, + {&api.TCPSocketAction{Port: intstr.FromString("found")}, true, "1.2.3.4", 93}, + {&api.TCPSocketAction{Port: intstr.FromInt(76)}, true, "1.2.3.4", 76}, + {&api.TCPSocketAction{Port: intstr.FromString("118")}, true, "1.2.3.4", 118}, + } + + for _, test := range testCases { + host := "1.2.3.4" + container := api.Container{ + Ports: []api.ContainerPort{{Name: "found", ContainerPort: 93}}, + LivenessProbe: &api.Probe{ + Handler: api.Handler{ + TCPSocket: test.probe, + }, + }, + } + port, err := extractPort(test.probe.Port, container) + if !test.ok && err == nil { + t.Errorf("Expected error for %+v, got %s:%d", test, host, port) + } + if test.ok && err != nil { + t.Errorf("Unexpected error: %v", err) + } + if test.ok { + if host != test.host || port != test.port { + t.Errorf("Expected %s:%d, got %s:%d", test.host, test.port, host, port) + } + } + } +} + +func TestHTTPHeaders(t *testing.T) { + testCases := []struct { + input []api.HTTPHeader + output http.Header + }{ + {[]api.HTTPHeader{}, http.Header{}}, + {[]api.HTTPHeader{ + {"X-Muffins-Or-Cupcakes", "Muffins"}, + }, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins"}}}, + {[]api.HTTPHeader{ + {"X-Muffins-Or-Cupcakes", "Muffins"}, + {"X-Muffins-Or-Plumcakes", "Muffins!"}, + }, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins"}, + "X-Muffins-Or-Plumcakes": {"Muffins!"}}}, + {[]api.HTTPHeader{ + {"X-Muffins-Or-Cupcakes", "Muffins"}, + {"X-Muffins-Or-Cupcakes", "Cupcakes, too"}, + }, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins", "Cupcakes, too"}}}, + } + for _, test := range testCases { + headers := buildHeader(test.input) + if !reflect.DeepEqual(test.output, headers) { + t.Errorf("Expected %#v, got %#v", test.output, headers) + } + } +} + +func TestProbe(t *testing.T) { + prober := &prober{ + refManager: kubecontainer.NewRefManager(), + recorder: &record.FakeRecorder{}, + } + containerID := kubecontainer.ContainerID{Type: "test", ID: "foobar"} + + execProbe := &api.Probe{ + Handler: api.Handler{ + Exec: &api.ExecAction{}, + }, + } + tests := []struct { + probe *api.Probe + execError bool + expectError bool + execResult probe.Result + expectedResult results.Result + }{ + { // No probe + probe: nil, + expectedResult: results.Success, + }, + { // No handler + probe: &api.Probe{}, + expectError: true, + expectedResult: results.Failure, + }, + { // Probe fails + probe: execProbe, + execResult: probe.Failure, + expectedResult: results.Failure, + }, + { // Probe succeeds + probe: execProbe, + execResult: probe.Success, + expectedResult: results.Success, + }, + { // Probe result is unknown + probe: execProbe, + execResult: probe.Unknown, + expectedResult: results.Failure, + }, + { // Probe has an error + probe: execProbe, + execError: true, + expectError: true, + execResult: probe.Unknown, + expectedResult: results.Failure, + }, + } + + for i, test := range tests { + for _, probeType := range [...]probeType{liveness, readiness} { + testID := fmt.Sprintf("%d-%s", i, probeType) + testContainer := api.Container{} + switch probeType { + case liveness: + testContainer.LivenessProbe = test.probe + case readiness: + testContainer.ReadinessProbe = test.probe + } + if test.execError { + prober.exec = fakeExecProber{test.execResult, errors.New("exec error")} + } else { + prober.exec = fakeExecProber{test.execResult, nil} + } + + result, err := prober.probe(probeType, &api.Pod{}, api.PodStatus{}, testContainer, containerID) + if test.expectError && err == nil { + t.Errorf("[%s] Expected probe error but no error was returned.", testID) + } + if !test.expectError && err != nil { + t.Errorf("[%s] Didn't expect probe error but got: %v", testID, err) + } + if test.expectedResult != result { + t.Errorf("[%s] Expected result to be %v but was %v", testID, test.expectedResult, result) + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/results/results_manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/results/results_manager.go new file mode 100644 index 000000000000..9f9b1938d6fe --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/results/results_manager.go @@ -0,0 +1,121 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package results + +import ( + "sync" + + "k8s.io/kubernetes/pkg/api" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" +) + +// Manager provides a probe results cache and channel of updates. +type Manager interface { + // Get returns the cached result for the container with the given ID. + Get(kubecontainer.ContainerID) (Result, bool) + // Set sets the cached result for the container with the given ID. + // The pod is only included to be sent with the update. + Set(kubecontainer.ContainerID, Result, *api.Pod) + // Remove clears the cached result for the container with the given ID. + Remove(kubecontainer.ContainerID) + // Updates creates a channel that receives an Update whenever its result changes (but not + // removed). + // NOTE: The current implementation only supports a single updates channel. + Updates() <-chan Update +} + +// Result is the type for probe results. +type Result bool + +const ( + Success Result = true + Failure Result = false +) + +func (r Result) String() string { + switch r { + case Success: + return "Success" + case Failure: + return "Failure" + default: + return "UNKNOWN" + } +} + +// Update is an enum of the types of updates sent over the Updates channel. +type Update struct { + ContainerID kubecontainer.ContainerID + Result Result + PodUID types.UID +} + +// Manager implementation. +type manager struct { + // guards the cache + sync.RWMutex + // map of container ID -> probe Result + cache map[kubecontainer.ContainerID]Result + // channel of updates + updates chan Update +} + +var _ Manager = &manager{} + +// NewManager creates ane returns an empty results manager. +func NewManager() Manager { + return &manager{ + cache: make(map[kubecontainer.ContainerID]Result), + updates: make(chan Update, 20), + } +} + +func (m *manager) Get(id kubecontainer.ContainerID) (Result, bool) { + m.RLock() + defer m.RUnlock() + result, found := m.cache[id] + return result, found +} + +func (m *manager) Set(id kubecontainer.ContainerID, result Result, pod *api.Pod) { + if m.setInternal(id, result) { + m.updates <- Update{id, result, pod.UID} + } +} + +// Internal helper for locked portion of set. Returns whether an update should be sent. +func (m *manager) setInternal(id kubecontainer.ContainerID, result Result) bool { + m.Lock() + defer m.Unlock() + prev, exists := m.cache[id] + if !exists || prev != result { + m.cache[id] = result + return true + } + return false +} + +func (m *manager) Remove(id kubecontainer.ContainerID) { + m.Lock() + defer m.Unlock() + delete(m.cache, id) +} + +func (m *manager) Updates() <-chan Update { + return m.updates +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/results/results_manager_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/results/results_manager_test.go new file mode 100644 index 000000000000..9cc513598750 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/results/results_manager_test.go @@ -0,0 +1,98 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package results + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "k8s.io/kubernetes/pkg/api" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/util/wait" +) + +func TestCacheOperations(t *testing.T) { + m := NewManager() + + unsetID := kubecontainer.ContainerID{Type: "test", ID: "unset"} + setID := kubecontainer.ContainerID{Type: "test", ID: "set"} + + _, found := m.Get(unsetID) + assert.False(t, found, "unset result found") + + m.Set(setID, Success, &api.Pod{}) + result, found := m.Get(setID) + assert.True(t, result == Success, "set result") + assert.True(t, found, "set result found") + + m.Remove(setID) + _, found = m.Get(setID) + assert.False(t, found, "removed result found") +} + +func TestUpdates(t *testing.T) { + m := NewManager() + + pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "test-pod"}} + fooID := kubecontainer.ContainerID{Type: "test", ID: "foo"} + barID := kubecontainer.ContainerID{Type: "test", ID: "bar"} + + expectUpdate := func(expected Update, msg string) { + select { + case u := <-m.Updates(): + if expected != u { + t.Errorf("Expected update %v, recieved %v: %s", expected, u, msg) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("Timed out waiting for update %v: %s", expected, msg) + } + } + + expectNoUpdate := func(msg string) { + // NOTE: Since updates are accumulated asynchronously, this method is not guaranteed to fail + // when it should. In the event it misses a failure, the following calls to expectUpdate should + // still fail. + select { + case u := <-m.Updates(): + t.Errorf("Unexpected update %v: %s", u, msg) + default: + // Pass + } + } + + // New result should always push an update. + m.Set(fooID, Success, pod) + expectUpdate(Update{fooID, Success, pod.UID}, "new success") + + m.Set(barID, Failure, pod) + expectUpdate(Update{barID, Failure, pod.UID}, "new failure") + + // Unchanged results should not send an update. + m.Set(fooID, Success, pod) + expectNoUpdate("unchanged foo") + + m.Set(barID, Failure, pod) + expectNoUpdate("unchanged bar") + + // Changed results should send an update. + m.Set(fooID, Failure, pod) + expectUpdate(Update{fooID, Failure, pod.UID}, "changed foo") + + m.Set(barID, Success, pod) + expectUpdate(Update{barID, Success, pod.UID}, "changed bar") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/testing/fake_manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/testing/fake_manager.go new file mode 100644 index 000000000000..b0d4e558910b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/testing/fake_manager.go @@ -0,0 +1,36 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/types" +) + +type FakeManager struct{} + +// Unused methods. +func (_ FakeManager) AddPod(_ *api.Pod) {} +func (_ FakeManager) RemovePod(_ *api.Pod) {} +func (_ FakeManager) CleanupPods(_ []*api.Pod) {} +func (_ FakeManager) Start() {} + +func (_ FakeManager) UpdatePodStatus(_ types.UID, podStatus *api.PodStatus) { + for i := range podStatus.ContainerStatuses { + podStatus.ContainerStatuses[i].Ready = true + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/worker.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/worker.go new file mode 100644 index 000000000000..6edd3daa38e7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/worker.go @@ -0,0 +1,225 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prober + +import ( + "math/rand" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/prober/results" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/util/runtime" +) + +// worker handles the periodic probing of its assigned container. Each worker has a go-routine +// associated with it which runs the probe loop until the container permanently terminates, or the +// stop channel is closed. The worker uses the probe Manager's statusManager to get up-to-date +// container IDs. +type worker struct { + // Channel for stopping the probe. + stopCh chan struct{} + + // The pod containing this probe (read-only) + pod *api.Pod + + // The container to probe (read-only) + container api.Container + + // Describes the probe configuration (read-only) + spec *api.Probe + + // The type of the worker. + probeType probeType + + // The probe value during the initial delay. + initialValue results.Result + + // Where to store this workers results. + resultsManager results.Manager + probeManager *manager + + // The last known container ID for this worker. + containerID kubecontainer.ContainerID + // The last probe result for this worker. + lastResult results.Result + // How many times in a row the probe has returned the same result. + resultRun int + + // If set, skip probing. + onHold bool +} + +// Creates and starts a new probe worker. +func newWorker( + m *manager, + probeType probeType, + pod *api.Pod, + container api.Container) *worker { + + w := &worker{ + stopCh: make(chan struct{}, 1), // Buffer so stop() can be non-blocking. + pod: pod, + container: container, + probeType: probeType, + probeManager: m, + } + + switch probeType { + case readiness: + w.spec = container.ReadinessProbe + w.resultsManager = m.readinessManager + w.initialValue = results.Failure + case liveness: + w.spec = container.LivenessProbe + w.resultsManager = m.livenessManager + w.initialValue = results.Success + } + + return w +} + +// run periodically probes the container. +func (w *worker) run() { + probeTickerPeriod := time.Duration(w.spec.PeriodSeconds) * time.Second + probeTicker := time.NewTicker(probeTickerPeriod) + + defer func() { + // Clean up. + probeTicker.Stop() + if !w.containerID.IsEmpty() { + w.resultsManager.Remove(w.containerID) + } + + w.probeManager.removeWorker(w.pod.UID, w.container.Name, w.probeType) + }() + + // If kubelet restarted the probes could be started in rapid succession. + // Let the worker wait for a random portion of tickerPeriod before probing. + time.Sleep(time.Duration(rand.Float64() * float64(probeTickerPeriod))) + +probeLoop: + for w.doProbe() { + // Wait for next probe tick. + select { + case <-w.stopCh: + break probeLoop + case <-probeTicker.C: + // continue + } + } +} + +// stop stops the probe worker. The worker handles cleanup and removes itself from its manager. +// It is safe to call stop multiple times. +func (w *worker) stop() { + select { + case w.stopCh <- struct{}{}: + default: // Non-blocking. + } +} + +// doProbe probes the container once and records the result. +// Returns whether the worker should continue. +func (w *worker) doProbe() (keepGoing bool) { + defer runtime.HandleCrash(func(_ interface{}) { keepGoing = true }) + + status, ok := w.probeManager.statusManager.GetPodStatus(w.pod.UID) + if !ok { + // Either the pod has not been created yet, or it was already deleted. + glog.V(3).Infof("No status for pod: %v", format.Pod(w.pod)) + return true + } + + // Worker should terminate if pod is terminated. + if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded { + glog.V(3).Infof("Pod %v %v, exiting probe worker", + format.Pod(w.pod), status.Phase) + return false + } + + c, ok := api.GetContainerStatus(status.ContainerStatuses, w.container.Name) + if !ok || len(c.ContainerID) == 0 { + // Either the container has not been created yet, or it was deleted. + glog.V(3).Infof("Probe target container not found: %v - %v", + format.Pod(w.pod), w.container.Name) + return true // Wait for more information. + } + + if w.containerID.String() != c.ContainerID { + if !w.containerID.IsEmpty() { + w.resultsManager.Remove(w.containerID) + } + w.containerID = kubecontainer.ParseContainerID(c.ContainerID) + w.resultsManager.Set(w.containerID, w.initialValue, w.pod) + // We've got a new container; resume probing. + w.onHold = false + } + + if w.onHold { + // Worker is on hold until there is a new container. + return true + } + + if c.State.Running == nil { + glog.V(3).Infof("Non-running container probed: %v - %v", + format.Pod(w.pod), w.container.Name) + if !w.containerID.IsEmpty() { + w.resultsManager.Set(w.containerID, results.Failure, w.pod) + } + // Abort if the container will not be restarted. + return c.State.Terminated == nil || + w.pod.Spec.RestartPolicy != api.RestartPolicyNever + } + + if int32(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds { + return true + } + + result, err := w.probeManager.prober.probe(w.probeType, w.pod, status, w.container, w.containerID) + if err != nil { + // Prober error, throw away the result. + return true + } + + if w.lastResult == result { + w.resultRun++ + } else { + w.lastResult = result + w.resultRun = 1 + } + + if (result == results.Failure && w.resultRun < int(w.spec.FailureThreshold)) || + (result == results.Success && w.resultRun < int(w.spec.SuccessThreshold)) { + // Success or failure is below threshold - leave the probe state unchanged. + return true + } + + w.resultsManager.Set(w.containerID, result, w.pod) + + if w.probeType == liveness && result == results.Failure { + // The container fails a liveness check, it will need to be restared. + // Stop probing until we see a new container ID. This is to reduce the + // chance of hitting #21751, where running `docker exec` when a + // container is being stopped may lead to corrupted container state. + w.onHold = true + } + + return true +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/worker_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/worker_test.go new file mode 100644 index 000000000000..2b23ad36aa9b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/prober/worker_test.go @@ -0,0 +1,342 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prober + +import ( + "fmt" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/record" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + kubepod "k8s.io/kubernetes/pkg/kubelet/pod" + "k8s.io/kubernetes/pkg/kubelet/prober/results" + "k8s.io/kubernetes/pkg/kubelet/status" + "k8s.io/kubernetes/pkg/probe" + "k8s.io/kubernetes/pkg/util/exec" + "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/wait" +) + +func init() { + runtime.ReallyCrash = true +} + +func TestDoProbe(t *testing.T) { + m := newTestManager() + + // Test statuses. + runningStatus := getTestRunningStatus() + pendingStatus := getTestRunningStatus() + pendingStatus.ContainerStatuses[0].State.Running = nil + terminatedStatus := getTestRunningStatus() + terminatedStatus.ContainerStatuses[0].State.Running = nil + terminatedStatus.ContainerStatuses[0].State.Terminated = &api.ContainerStateTerminated{ + StartedAt: unversioned.Now(), + } + otherStatus := getTestRunningStatus() + otherStatus.ContainerStatuses[0].Name = "otherContainer" + failedStatus := getTestRunningStatus() + failedStatus.Phase = api.PodFailed + + tests := []struct { + probe api.Probe + podStatus *api.PodStatus + expectContinue bool + expectSet bool + expectedResult results.Result + }{ + { // No status. + expectContinue: true, + }, + { // Pod failed + podStatus: &failedStatus, + }, + { // No container status + podStatus: &otherStatus, + expectContinue: true, + }, + { // Container waiting + podStatus: &pendingStatus, + expectContinue: true, + expectSet: true, + }, + { // Container terminated + podStatus: &terminatedStatus, + expectSet: true, + }, + { // Probe successful. + podStatus: &runningStatus, + expectContinue: true, + expectSet: true, + expectedResult: results.Success, + }, + { // Initial delay passed + podStatus: &runningStatus, + probe: api.Probe{ + InitialDelaySeconds: -100, + }, + expectContinue: true, + expectSet: true, + expectedResult: results.Success, + }, + } + + for _, probeType := range [...]probeType{liveness, readiness} { + for i, test := range tests { + w := newTestWorker(m, probeType, test.probe) + if test.podStatus != nil { + m.statusManager.SetPodStatus(w.pod, *test.podStatus) + } + if c := w.doProbe(); c != test.expectContinue { + t.Errorf("[%s-%d] Expected continue to be %v but got %v", probeType, i, test.expectContinue, c) + } + result, ok := resultsManager(m, probeType).Get(testContainerID) + if ok != test.expectSet { + t.Errorf("[%s-%d] Expected to have result: %v but got %v", probeType, i, test.expectSet, ok) + } + if result != test.expectedResult { + t.Errorf("[%s-%d] Expected result: %v but got %v", probeType, i, test.expectedResult, result) + } + + // Clean up. + m.statusManager = status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(nil)) + resultsManager(m, probeType).Remove(testContainerID) + } + } +} + +func TestInitialDelay(t *testing.T) { + m := newTestManager() + + for _, probeType := range [...]probeType{liveness, readiness} { + w := newTestWorker(m, probeType, api.Probe{ + InitialDelaySeconds: 10, + }) + m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) + + expectContinue(t, w, w.doProbe(), "during initial delay") + expectResult(t, w, results.Result(probeType == liveness), "during initial delay") + + // 100 seconds later... + laterStatus := getTestRunningStatus() + laterStatus.ContainerStatuses[0].State.Running.StartedAt.Time = + time.Now().Add(-100 * time.Second) + m.statusManager.SetPodStatus(w.pod, laterStatus) + + // Second call should succeed (already waited). + expectContinue(t, w, w.doProbe(), "after initial delay") + expectResult(t, w, results.Success, "after initial delay") + } +} + +func TestFailureThreshold(t *testing.T) { + m := newTestManager() + w := newTestWorker(m, readiness, api.Probe{SuccessThreshold: 1, FailureThreshold: 3}) + m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) + + for i := 0; i < 2; i++ { + // First probe should succeed. + m.prober.exec = fakeExecProber{probe.Success, nil} + + for j := 0; j < 3; j++ { + msg := fmt.Sprintf("%d success (%d)", j+1, i) + expectContinue(t, w, w.doProbe(), msg) + expectResult(t, w, results.Success, msg) + } + + // Prober starts failing :( + m.prober.exec = fakeExecProber{probe.Failure, nil} + + // Next 2 probes should still be "success". + for j := 0; j < 2; j++ { + msg := fmt.Sprintf("%d failing (%d)", j+1, i) + expectContinue(t, w, w.doProbe(), msg) + expectResult(t, w, results.Success, msg) + } + + // Third & following fail. + for j := 0; j < 3; j++ { + msg := fmt.Sprintf("%d failure (%d)", j+3, i) + expectContinue(t, w, w.doProbe(), msg) + expectResult(t, w, results.Failure, msg) + } + } +} + +func TestSuccessThreshold(t *testing.T) { + m := newTestManager() + w := newTestWorker(m, readiness, api.Probe{SuccessThreshold: 3, FailureThreshold: 1}) + m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) + + // Start out failure. + w.resultsManager.Set(testContainerID, results.Failure, &api.Pod{}) + + for i := 0; i < 2; i++ { + // Probe defaults to Failure. + for j := 0; j < 2; j++ { + msg := fmt.Sprintf("%d success (%d)", j+1, i) + expectContinue(t, w, w.doProbe(), msg) + expectResult(t, w, results.Failure, msg) + } + + // Continuing success! + for j := 0; j < 3; j++ { + msg := fmt.Sprintf("%d success (%d)", j+3, i) + expectContinue(t, w, w.doProbe(), msg) + expectResult(t, w, results.Success, msg) + } + + // Prober flakes :( + m.prober.exec = fakeExecProber{probe.Failure, nil} + msg := fmt.Sprintf("1 failure (%d)", i) + expectContinue(t, w, w.doProbe(), msg) + expectResult(t, w, results.Failure, msg) + + // Back to success. + m.prober.exec = fakeExecProber{probe.Success, nil} + } +} + +func TestCleanUp(t *testing.T) { + m := newTestManager() + + for _, probeType := range [...]probeType{liveness, readiness} { + key := probeKey{testPodUID, testContainerName, probeType} + w := newTestWorker(m, probeType, api.Probe{}) + m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) + go w.run() + m.workers[key] = w + + // Wait for worker to run. + condition := func() (bool, error) { + ready, _ := resultsManager(m, probeType).Get(testContainerID) + return ready == results.Success, nil + } + if ready, _ := condition(); !ready { + if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, condition); err != nil { + t.Fatalf("[%s] Error waiting for worker ready: %v", probeType, err) + } + } + + for i := 0; i < 10; i++ { + w.stop() // Stop should be callable multiple times without consequence. + } + if err := waitForWorkerExit(m, []probeKey{key}); err != nil { + t.Fatalf("[%s] error waiting for worker exit: %v", probeType, err) + } + + if _, ok := resultsManager(m, probeType).Get(testContainerID); ok { + t.Errorf("[%s] Expected result to be cleared.", probeType) + } + if _, ok := m.workers[key]; ok { + t.Errorf("[%s] Expected worker to be cleared.", probeType) + } + } +} + +func TestHandleCrash(t *testing.T) { + runtime.ReallyCrash = false // Test that we *don't* really crash. + + m := newTestManager() + w := newTestWorker(m, readiness, api.Probe{}) + m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) + + expectContinue(t, w, w.doProbe(), "Initial successful probe.") + expectResult(t, w, results.Success, "Initial successful probe.") + + // Prober starts crashing. + m.prober = &prober{ + refManager: kubecontainer.NewRefManager(), + recorder: &record.FakeRecorder{}, + exec: crashingExecProber{}, + } + + // doProbe should recover from the crash, and keep going. + expectContinue(t, w, w.doProbe(), "Crashing probe.") + expectResult(t, w, results.Success, "Crashing probe unchanged.") +} + +func expectResult(t *testing.T, w *worker, expectedResult results.Result, msg string) { + result, ok := resultsManager(w.probeManager, w.probeType).Get(w.containerID) + if !ok { + t.Errorf("[%s - %s] Expected result to be set, but was not set", w.probeType, msg) + } else if result != expectedResult { + t.Errorf("[%s - %s] Expected result to be %v, but was %v", + w.probeType, msg, expectedResult, result) + } +} + +func expectContinue(t *testing.T, w *worker, c bool, msg string) { + if !c { + t.Errorf("[%s - %s] Expected to continue, but did not", w.probeType, msg) + } +} + +func resultsManager(m *manager, probeType probeType) results.Manager { + switch probeType { + case readiness: + return m.readinessManager + case liveness: + return m.livenessManager + } + panic(fmt.Errorf("Unhandled case: %v", probeType)) +} + +type crashingExecProber struct{} + +func (p crashingExecProber) Probe(_ exec.Cmd) (probe.Result, string, error) { + panic("Intentional Probe crash.") +} + +func TestOnHoldOnLivenessCheckFailure(t *testing.T) { + m := newTestManager() + w := newTestWorker(m, liveness, api.Probe{SuccessThreshold: 1, FailureThreshold: 1}) + status := getTestRunningStatus() + m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) + + // First probe should fail. + m.prober.exec = fakeExecProber{probe.Failure, nil} + msg := "first probe" + expectContinue(t, w, w.doProbe(), msg) + expectResult(t, w, results.Failure, msg) + if !w.onHold { + t.Errorf("Prober should be on hold due to liveness check failure") + } + // Set fakeExecProber to return success. However, the result will remain + // failure because the worker is on hold and won't probe. + m.prober.exec = fakeExecProber{probe.Success, nil} + msg = "while on hold" + expectContinue(t, w, w.doProbe(), msg) + expectResult(t, w, results.Failure, msg) + if !w.onHold { + t.Errorf("Prober should be on hold due to liveness check failure") + } + + // Set a new container ID to lift the hold. The next probe will succeed. + status.ContainerStatuses[0].ContainerID = "test://newCont_ID" + m.statusManager.SetPodStatus(w.pod, status) + msg = "hold lifted" + expectContinue(t, w, w.doProbe(), msg) + expectResult(t, w, results.Success, msg) + if w.onHold { + t.Errorf("Prober should not be on hold anymore") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/memory_policy.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/memory_policy.go deleted file mode 100644 index b785ab676f40..000000000000 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/memory_policy.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package qos - -import ( - "k8s.io/kubernetes/pkg/api" -) - -const ( - PodInfraOOMAdj int = -999 - KubeletOOMScoreAdj int = -999 - KubeProxyOOMScoreAdj int = -999 -) - -// isMemoryBestEffort returns true if the container's memory requirements are best-effort. -func isMemoryBestEffort(container *api.Container) bool { - // A container is memory best-effort if its memory request is unspecified or 0. - // If a request is specified, then the user expects some kind of resource guarantee. - return container.Resources.Requests.Memory().Value() == 0 -} - -// isMemoryGuaranteed returns true if the container's memory requirements are Guaranteed. -func isMemoryGuaranteed(container *api.Container) bool { - // A container is memory guaranteed if its memory request == memory limit. - // If memory request == memory limit, the user is very confident of resource consumption. - memoryRequest := container.Resources.Requests.Memory() - memoryLimit := container.Resources.Limits.Memory() - return (*memoryRequest).Cmp(*memoryLimit) == 0 && memoryRequest.Value() != 0 -} - -// GetContainerOOMAdjust returns the amount by which the OOM score of all processes in the -// container should be adjusted. The OOM score of a process is the percentage of memory it consumes -// multiplied by 10 (barring exceptional cases) + a configurable quantity which is between -1000 -// and 1000. Containers with higher OOM scores are killed if the system runs out of memory. -// See https://lwn.net/Articles/391222/ for more information. -func GetContainerOOMScoreAdjust(container *api.Container, memoryCapacity int64) int { - if isMemoryGuaranteed(container) { - // Memory guaranteed containers should be the last to get killed. - return -999 - } else if isMemoryBestEffort(container) { - // Memory best-effort containers should be the first to be killed. - return 1000 - } else { - // Burstable containers are a middle tier, between Guaranteed and Best-Effort. Ideally, - // we want to protect Burstable containers that consume less memory than requested. - // The formula below is a heuristic. A container requesting for 10% of a system's - // memory will have an oom score adjust of 900. If a process in container Y - // uses over 10% of memory, its OOM score will be 1000. The idea is that containers - // which use more than their request will have an OOM score of 1000 and will be prime - // targets for OOM kills. - // Note that this is a heuristic, it won't work if a container has many small processes. - memoryRequest := container.Resources.Requests.Memory().Value() - oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity - // A memory guaranteed container using 100% of memory can have an OOM score of 1. Ensure - // that memory burstable containers have a higher OOM score. - if oomScoreAdjust < 2 { - return 2 - } - return int(oomScoreAdjust) - } -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/policy.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/policy.go new file mode 100644 index 000000000000..511e629fadfe --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/policy.go @@ -0,0 +1,67 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package qos + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/kubelet/qos/util" +) + +const ( + PodInfraOOMAdj int = -999 + KubeletOOMScoreAdj int = -999 + KubeProxyOOMScoreAdj int = -999 + guaranteedOOMScoreAdj int = -998 + besteffortOOMScoreAdj int = 1000 +) + +// GetContainerOOMAdjust returns the amount by which the OOM score of all processes in the +// container should be adjusted. +// The OOM score of a process is the percentage of memory it consumes +// multiplied by 10 (barring exceptional cases) + a configurable quantity which is between -1000 +// and 1000. Containers with higher OOM scores are killed if the system runs out of memory. +// See https://lwn.net/Articles/391222/ for more information. +func GetContainerOOMScoreAdjust(pod *api.Pod, container *api.Container, memoryCapacity int64) int { + switch util.GetPodQos(pod) { + case util.Guaranteed: + // Guaranteed containers should be the last to get killed. + return guaranteedOOMScoreAdj + case util.BestEffort: + return besteffortOOMScoreAdj + } + + // Burstable containers are a middle tier, between Guaranteed and Best-Effort. Ideally, + // we want to protect Burstable containers that consume less memory than requested. + // The formula below is a heuristic. A container requesting for 10% of a system's + // memory will have an OOM score adjust of 900. If a process in container Y + // uses over 10% of memory, its OOM score will be 1000. The idea is that containers + // which use more than their request will have an OOM score of 1000 and will be prime + // targets for OOM kills. + // Note that this is a heuristic, it won't work if a container has many small processes. + memoryRequest := container.Resources.Requests.Memory().Value() + oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity + // A guaranteed pod using 100% of memory can have an OOM score of 1. Ensure + // that burstable pods have a higher OOM score adjustment. + if oomScoreAdjust < 2 { + return 2 + } + // Give burstable pods a higher chance of survival over besteffort pods. + if int(oomScoreAdjust) == besteffortOOMScoreAdj { + return int(oomScoreAdjust - 1) + } + return int(oomScoreAdjust) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/policy_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/policy_test.go new file mode 100644 index 000000000000..e66b7b158ad8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/policy_test.go @@ -0,0 +1,198 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package qos + +import ( + "strconv" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" +) + +const ( + standardMemoryAmount = 8000000000 +) + +var ( + cpuLimit = api.Pod{ + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("10"), + }, + }, + }, + }, + }, + } + + memoryLimitCPURequest = api.Pod{ + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("0"), + }, + Limits: api.ResourceList{ + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + }, + }, + }, + } + + zeroMemoryLimit = api.Pod{ + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{ + api.ResourceName(api.ResourceMemory): resource.MustParse("0"), + }, + }, + }, + }, + }, + } + + noRequestLimit = api.Pod{ + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{}, + }, + }, + }, + } + + equalRequestLimitCPUMemory = api.Pod{ + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + api.ResourceName(api.ResourceCPU): resource.MustParse("5m"), + }, + Limits: api.ResourceList{ + api.ResourceName(api.ResourceCPU): resource.MustParse("5m"), + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + }, + }, + }, + } + + cpuUnlimitedMemoryLimitedWithRequests = api.Pod{ + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceMemory): resource.MustParse(strconv.Itoa(standardMemoryAmount / 2)), + api.ResourceName(api.ResourceCPU): resource.MustParse("5m"), + }, + Limits: api.ResourceList{ + api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), + }, + }, + }, + }, + }, + } + + requestNoLimit = api.Pod{ + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceName(api.ResourceMemory): resource.MustParse(strconv.Itoa(standardMemoryAmount - 1)), + api.ResourceName(api.ResourceCPU): resource.MustParse("5m"), + }, + }, + }, + }, + }, + } +) + +type oomTest struct { + pod *api.Pod + memoryCapacity int64 + lowOOMScoreAdj int // The max oom_score_adj score the container should be assigned. + highOOMScoreAdj int // The min oom_score_adj score the container should be assigned. +} + +func TestGetContainerOOMScoreAdjust(t *testing.T) { + oomTests := []oomTest{ + { + pod: &cpuLimit, + memoryCapacity: 4000000000, + lowOOMScoreAdj: 999, + highOOMScoreAdj: 999, + }, + { + pod: &memoryLimitCPURequest, + memoryCapacity: 8000000000, + lowOOMScoreAdj: 999, + highOOMScoreAdj: 999, + }, + { + pod: &zeroMemoryLimit, + memoryCapacity: 7230457451, + lowOOMScoreAdj: 1000, + highOOMScoreAdj: 1000, + }, + { + pod: &noRequestLimit, + memoryCapacity: 4000000000, + lowOOMScoreAdj: 1000, + highOOMScoreAdj: 1000, + }, + { + pod: &equalRequestLimitCPUMemory, + memoryCapacity: 123456789, + lowOOMScoreAdj: -998, + highOOMScoreAdj: -998, + }, + { + pod: &cpuUnlimitedMemoryLimitedWithRequests, + memoryCapacity: standardMemoryAmount, + lowOOMScoreAdj: 495, + highOOMScoreAdj: 505, + }, + { + pod: &requestNoLimit, + memoryCapacity: standardMemoryAmount, + lowOOMScoreAdj: 2, + highOOMScoreAdj: 2, + }, + } + for _, test := range oomTests { + oomScoreAdj := GetContainerOOMScoreAdjust(test.pod, &test.pod.Spec.Containers[0], test.memoryCapacity) + if oomScoreAdj < test.lowOOMScoreAdj || oomScoreAdj > test.highOOMScoreAdj { + t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOOMScoreAdj, test.highOOMScoreAdj, oomScoreAdj) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/util/qos.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/util/qos.go index af415c2acb40..9d7a5786a3b3 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/util/qos.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/util/qos.go @@ -18,6 +18,7 @@ package util import ( "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" ) const ( @@ -46,9 +47,70 @@ func isResourceBestEffort(container *api.Container, resource api.ResourceName) b return !hasReq || req.Value() == 0 } -// GetQos returns a mapping of resource name to QoS class of a container -func GetQoS(container *api.Container) map[api.ResourceName]string { - resourceToQoS := map[api.ResourceName]string{} +// GetPodQos returns the QoS class of a pod. +// A pod is besteffort if none of its containers have specified any requests or limits. +// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. +// A pod is burstable if limits and requests do not match across all containers. +func GetPodQos(pod *api.Pod) string { + requests := api.ResourceList{} + limits := api.ResourceList{} + zeroQuantity := resource.MustParse("0") + isGuaranteed := true + for _, container := range pod.Spec.Containers { + // process requests + for name, quantity := range container.Resources.Requests { + if quantity.Cmp(zeroQuantity) == 1 { + delta := quantity.Copy() + if _, exists := requests[name]; !exists { + requests[name] = *delta + } else { + delta.Add(requests[name]) + requests[name] = *delta + } + } + } + // process limits + for name, quantity := range container.Resources.Limits { + if quantity.Cmp(zeroQuantity) == 1 { + delta := quantity.Copy() + if _, exists := limits[name]; !exists { + limits[name] = *delta + } else { + delta.Add(limits[name]) + limits[name] = *delta + } + } + } + if len(container.Resources.Limits) != len(supportedComputeResources) { + isGuaranteed = false + } + } + if len(requests) == 0 && len(limits) == 0 { + return BestEffort + } + // Check is requests match limits for all resources. + if isGuaranteed { + for name, req := range requests { + if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { + isGuaranteed = false + break + } + } + } + if isGuaranteed && + len(requests) == len(limits) && + len(limits) == len(supportedComputeResources) { + return Guaranteed + } + return Burstable +} + +// QoSList is a set of (resource name, QoS class) pairs. +type QoSList map[api.ResourceName]string + +// GetQoS returns a mapping of resource name to QoS class of a container +func GetQoS(container *api.Container) QoSList { + resourceToQoS := QoSList{} for resource := range allResources(container) { switch { case isResourceGuaranteed(container, resource): diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/util/qos_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/util/qos_test.go new file mode 100644 index 000000000000..e7a060fec5b4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/qos/util/qos_test.go @@ -0,0 +1,132 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" +) + +func getResourceList(cpu, memory string) api.ResourceList { + res := api.ResourceList{} + if cpu != "" { + res[api.ResourceCPU] = resource.MustParse(cpu) + } + if memory != "" { + res[api.ResourceMemory] = resource.MustParse(memory) + } + return res +} + +func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements { + res := api.ResourceRequirements{} + res.Requests = requests + res.Limits = limits + return res +} + +func newContainer(name string, requests api.ResourceList, limits api.ResourceList) api.Container { + return api.Container{ + Name: name, + Resources: getResourceRequirements(requests, limits), + } +} + +func newPod(name string, containers []api.Container) *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: name, + }, + Spec: api.PodSpec{ + Containers: containers, + }, + } +} + +func TestGetPodQos(t *testing.T) { + testCases := []struct { + pod *api.Pod + expected string + }{ + { + pod: newPod("guaranteed", []api.Container{ + newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), + }), + expected: Guaranteed, + }, + { + pod: newPod("guaranteed-guaranteed", []api.Container{ + newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), + newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), + }), + expected: Guaranteed, + }, + { + pod: newPod("best-effort-best-effort", []api.Container{ + newContainer("best-effort", getResourceList("", ""), getResourceList("", "")), + newContainer("best-effort", getResourceList("", ""), getResourceList("", "")), + }), + expected: BestEffort, + }, + { + pod: newPod("best-effort", []api.Container{ + newContainer("best-effort", getResourceList("", ""), getResourceList("", "")), + }), + expected: BestEffort, + }, + { + pod: newPod("best-effort-burstable", []api.Container{ + newContainer("best-effort", getResourceList("", ""), getResourceList("", "")), + newContainer("burstable", getResourceList("1", ""), getResourceList("2", "")), + }), + expected: Burstable, + }, + { + pod: newPod("best-effort-guaranteed", []api.Container{ + newContainer("best-effort", getResourceList("", ""), getResourceList("", "")), + newContainer("guaranteed", getResourceList("10m", "100Mi"), getResourceList("10m", "100Mi")), + }), + expected: Burstable, + }, + { + pod: newPod("burstable-cpu-guaranteed-memory", []api.Container{ + newContainer("burstable", getResourceList("", "100Mi"), getResourceList("", "100Mi")), + }), + expected: Burstable, + }, + { + pod: newPod("burstable-guaranteed", []api.Container{ + newContainer("burstable", getResourceList("1", "100Mi"), getResourceList("2", "100Mi")), + newContainer("guaranteed", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")), + }), + expected: Burstable, + }, + { + pod: newPod("burstable", []api.Container{ + newContainer("burstable", getResourceList("10m", "100Mi"), getResourceList("100m", "200Mi")), + }), + expected: Burstable, + }, + } + for _, testCase := range testCases { + if actual := GetPodQos(testCase.pod); testCase.expected != actual { + t.Errorf("invalid qos pod %s, expected: %s, actual: %s", testCase.pod.Name, testCase.expected, actual) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/reason_cache.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/reason_cache.go new file mode 100644 index 000000000000..6134ffe1b071 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/reason_cache.go @@ -0,0 +1,104 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "sync" + + "github.com/golang/groupcache/lru" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" +) + +// ReasonCache stores the failure reason of the latest container start +// in a string, keyed by _. The goal is to +// propagate this reason to the container status. This endeavor is +// "best-effort" for two reasons: +// 1. The cache is not persisted. +// 2. We use an LRU cache to avoid extra garbage collection work. This +// means that some entries may be recycled before a pod has been +// deleted. +// TODO(random-liu): Use more reliable cache which could collect garbage of failed pod. +// TODO(random-liu): Move reason cache to somewhere better. +type ReasonCache struct { + lock sync.RWMutex + cache *lru.Cache +} + +// reasonInfo is the cached item in ReasonCache +type reasonInfo struct { + reason error + message string +} + +// maxReasonCacheEntries is the cache entry number in lru cache. 1000 is a proper number +// for our 100 pods per node target. If we support more pods per node in the future, we +// may want to increase the number. +const maxReasonCacheEntries = 1000 + +func NewReasonCache() *ReasonCache { + return &ReasonCache{cache: lru.New(maxReasonCacheEntries)} +} + +func (c *ReasonCache) composeKey(uid types.UID, name string) string { + return fmt.Sprintf("%s_%s", uid, name) +} + +// add adds error reason into the cache +func (c *ReasonCache) add(uid types.UID, name string, reason error, message string) { + c.lock.Lock() + defer c.lock.Unlock() + c.cache.Add(c.composeKey(uid, name), reasonInfo{reason, message}) +} + +// Update updates the reason cache with the SyncPodResult. Only SyncResult with +// StartContainer action will change the cache. +func (c *ReasonCache) Update(uid types.UID, result kubecontainer.PodSyncResult) { + for _, r := range result.SyncResults { + if r.Action != kubecontainer.StartContainer { + continue + } + name := r.Target.(string) + if r.Error != nil { + c.add(uid, name, r.Error, r.Message) + } else { + c.Remove(uid, name) + } + } +} + +// Remove removes error reason from the cache +func (c *ReasonCache) Remove(uid types.UID, name string) { + c.lock.Lock() + defer c.lock.Unlock() + c.cache.Remove(c.composeKey(uid, name)) +} + +// Get gets error reason from the cache. The return values are error reason, error message and +// whether an error reason is found in the cache. If no error reason is found, empty string will +// be returned for error reason and error message. +func (c *ReasonCache) Get(uid types.UID, name string) (error, string, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + value, ok := c.cache.Get(c.composeKey(uid, name)) + if !ok { + return nil, "", ok + } + info := value.(reasonInfo) + return info.reason, info.message, ok +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/reason_cache_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/reason_cache_test.go new file mode 100644 index 000000000000..cc77ded5786b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/reason_cache_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "testing" + + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" +) + +func TestReasonCache(t *testing.T) { + // Create test sync result + syncResult := kubecontainer.PodSyncResult{} + results := []*kubecontainer.SyncResult{ + // reason cache should be set for SyncResult with StartContainer action and error + kubecontainer.NewSyncResult(kubecontainer.StartContainer, "container_1"), + // reason cache should not be set for SyncResult with StartContainer action but without error + kubecontainer.NewSyncResult(kubecontainer.StartContainer, "container_2"), + // reason cache should not be set for SyncResult with other actions + kubecontainer.NewSyncResult(kubecontainer.KillContainer, "container_3"), + } + results[0].Fail(kubecontainer.ErrRunContainer, "message_1") + results[2].Fail(kubecontainer.ErrKillContainer, "message_3") + syncResult.AddSyncResult(results...) + uid := types.UID("pod_1") + + reasonCache := NewReasonCache() + reasonCache.Update(uid, syncResult) + assertReasonInfo(t, reasonCache, uid, results[0], true) + assertReasonInfo(t, reasonCache, uid, results[1], false) + assertReasonInfo(t, reasonCache, uid, results[2], false) + + reasonCache.Remove(uid, results[0].Target.(string)) + assertReasonInfo(t, reasonCache, uid, results[0], false) +} + +func assertReasonInfo(t *testing.T, cache *ReasonCache, uid types.UID, result *kubecontainer.SyncResult, found bool) { + name := result.Target.(string) + actualReason, actualMessage, ok := cache.Get(uid, name) + if ok && !found { + t.Fatalf("unexpected cache hit: %v, %q", actualReason, actualMessage) + } + if !ok && found { + t.Fatalf("corresponding reason info not found") + } + if !found { + return + } + reason := result.Error + message := result.Message + if actualReason != reason || actualMessage != message { + t.Errorf("expected %v %q, got %v %q", reason, message, actualReason, actualMessage) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/cap.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/cap.go new file mode 100644 index 000000000000..a00057f9e1c7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/cap.go @@ -0,0 +1,110 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rkt + +// TODO(yifan): Export this to higher level package. +const ( + CAP_CHOWN = iota + CAP_DAC_OVERRIDE + CAP_DAC_READ_SEARCH + CAP_FOWNER + CAP_FSETID + CAP_KILL + CAP_SETGID + CAP_SETUID + CAP_SETPCAP + CAP_LINUX_IMMUTABLE + CAP_NET_BIND_SERVICE + CAP_NET_BROADCAST + CAP_NET_ADMIN + CAP_NET_RAW + CAP_IPC_LOCK + CAP_IPC_OWNER + CAP_SYS_MODULE + CAP_SYS_RAWIO + CAP_SYS_CHROOT + CAP_SYS_PTRACE + CAP_SYS_PACCT + CAP_SYS_ADMIN + CAP_SYS_BOOT + CAP_SYS_NICE + CAP_SYS_RESOURCE + CAP_SYS_TIME + CAP_SYS_TTY_CONFIG + CAP_MKNOD + CAP_LEASE + CAP_AUDIT_WRITE + CAP_AUDIT_CONTROL + CAP_SETFCAP + CAP_MAC_OVERRIDE + CAP_MAC_ADMIN + CAP_SYSLOG + CAP_WAKE_ALARM + CAP_BLOCK_SUSPEND + CAP_AUDIT_READ +) + +// TODO(yifan): Export this to higher level package. +var capabilityList = map[int]string{ + CAP_CHOWN: "CAP_CHOWN", + CAP_DAC_OVERRIDE: "CAP_DAC_OVERRIDE", + CAP_DAC_READ_SEARCH: "CAP_DAC_READ_SEARCH", + CAP_FOWNER: "CAP_FOWNER", + CAP_FSETID: "CAP_FSETID", + CAP_KILL: "CAP_KILL", + CAP_SETGID: "CAP_SETGID", + CAP_SETUID: "CAP_SETUID", + CAP_SETPCAP: "CAP_SETPCAP", + CAP_LINUX_IMMUTABLE: "CAP_LINUX_IMMUTABLE", + CAP_NET_BIND_SERVICE: "CAP_NET_BIND_SERVICE", + CAP_NET_BROADCAST: "CAP_NET_BROADCAST", + CAP_NET_ADMIN: "CAP_NET_ADMIN", + CAP_NET_RAW: "CAP_NET_RAW", + CAP_IPC_LOCK: "CAP_IPC_LOCK", + CAP_IPC_OWNER: "CAP_IPC_OWNER", + CAP_SYS_MODULE: "CAP_SYS_MODULE", + CAP_SYS_RAWIO: "CAP_SYS_RAWIO", + CAP_SYS_CHROOT: "CAP_SYS_CHROOT", + CAP_SYS_PTRACE: "CAP_SYS_PTRACE", + CAP_SYS_PACCT: "CAP_SYS_PACCT", + CAP_SYS_ADMIN: "CAP_SYS_ADMIN", + CAP_SYS_BOOT: "CAP_SYS_BOOT", + CAP_SYS_NICE: "CAP_SYS_NICE", + CAP_SYS_RESOURCE: "CAP_SYS_RESOURCE", + CAP_SYS_TIME: "CAP_SYS_TIME", + CAP_SYS_TTY_CONFIG: "CAP_SYS_TTY_CONFIG", + CAP_MKNOD: "CAP_MKNOD", + CAP_LEASE: "CAP_LEASE", + CAP_AUDIT_WRITE: "CAP_AUDIT_WRITE", + CAP_AUDIT_CONTROL: "CAP_AUDIT_CONTROL", + CAP_SETFCAP: "CAP_SETFCAP", + CAP_MAC_OVERRIDE: "CAP_MAC_OVERRIDE", + CAP_MAC_ADMIN: "CAP_MAC_ADMIN", + CAP_SYSLOG: "CAP_SYSLOG", + CAP_WAKE_ALARM: "CAP_WAKE_ALARM", + CAP_BLOCK_SUSPEND: "CAP_BLOCK_SUSPEND", + CAP_AUDIT_READ: "CAP_AUDIT_READ", +} + +// allCapabilities returns the capability list with all capabilities. +func allCapabilities() []string { + var capabilities []string + for _, cap := range capabilityList { + capabilities = append(capabilities, cap) + } + return capabilities +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/config.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/config.go new file mode 100644 index 000000000000..809eefc54313 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/config.go @@ -0,0 +1,106 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rkt + +import ( + "fmt" + + rktapi "github.com/coreos/rkt/api/v1alpha" + "golang.org/x/net/context" +) + +// Config stores the global configuration for the rkt runtime. +// Detailed documents can be found at: +// https://github.com/coreos/rkt/blob/master/Documentation/commands.md#global-options +type Config struct { + // The absolute path to the binary, or leave empty to find it in $PATH. + Path string + // The rkt data directory. + Dir string + // The image to use as stage1. + Stage1Image string + // The debug flag for rkt. + Debug bool + // Comma-separated list of security features to disable. + // Allowed values: "none", "image", "tls", "ondisk", "http", "all". + InsecureOptions string + // The local config directory. + LocalConfigDir string + // The user config directory. + UserConfigDir string + // The system config directory. + SystemConfigDir string +} + +// buildGlobalOptions returns an array of global command line options. +func (c *Config) buildGlobalOptions() []string { + var result []string + if c == nil { + return result + } + + if c.Debug { + result = append(result, "--debug=true") + } + if c.InsecureOptions != "" { + result = append(result, fmt.Sprintf("--insecure-options=%s", c.InsecureOptions)) + } + if c.LocalConfigDir != "" { + result = append(result, fmt.Sprintf("--local-config=%s", c.LocalConfigDir)) + } + if c.UserConfigDir != "" { + result = append(result, fmt.Sprintf("--user-config=%s", c.UserConfigDir)) + } + if c.SystemConfigDir != "" { + result = append(result, fmt.Sprintf("--system-config=%s", c.SystemConfigDir)) + } + if c.Dir != "" { + result = append(result, fmt.Sprintf("--dir=%s", c.Dir)) + } + return result +} + +// getConfig gets configurations from the rkt API service +// and merge it with the existing config. The merge rule is +// that the fields in the provided config will override the +// result that get from the rkt api service. +func (r *Runtime) getConfig(cfg *Config) (*Config, error) { + resp, err := r.apisvc.GetInfo(context.Background(), &rktapi.GetInfoRequest{}) + if err != nil { + return nil, err + } + + flags := resp.Info.GlobalFlags + + if cfg.Dir == "" { + cfg.Dir = flags.Dir + } + if cfg.InsecureOptions == "" { + cfg.InsecureOptions = flags.InsecureFlags + } + if cfg.LocalConfigDir == "" { + cfg.LocalConfigDir = flags.LocalConfigDir + } + if cfg.UserConfigDir == "" { + cfg.UserConfigDir = flags.UserConfigDir + } + if cfg.SystemConfigDir == "" { + cfg.SystemConfigDir = flags.SystemConfigDir + } + + return cfg, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/container_id.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/container_id.go new file mode 100644 index 000000000000..73810ed70a54 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/container_id.go @@ -0,0 +1,55 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rkt + +import ( + "fmt" + "strings" + + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" +) + +// containerID defines the ID of rkt containers, it will +// be returned to kubelet, and kubelet will use this for +// container level operations. +type containerID struct { + uuid string // rkt uuid of the pod. + appName string // Name of the app in that pod. +} + +// buildContainerID constructs the containers's ID using containerID, +// which consists of the pod uuid and the container name. +// The result can be used to uniquely identify a container. +func buildContainerID(c *containerID) kubecontainer.ContainerID { + return kubecontainer.ContainerID{ + Type: RktType, + ID: fmt.Sprintf("%s:%s", c.uuid, c.appName), + } +} + +// parseContainerID parses the containerID into pod uuid and the container name. The +// results can be used to get more information of the container. +func parseContainerID(id kubecontainer.ContainerID) (*containerID, error) { + tuples := strings.Split(id.ID, ":") + if len(tuples) != 2 { + return nil, fmt.Errorf("rkt: cannot parse container ID for: %q, required format is [UUID:APPNAME]", id) + } + return &containerID{ + uuid: tuples[0], + appName: tuples[1], + }, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/doc.go new file mode 100644 index 000000000000..d45fb3f0e190 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rkt contains the Containerruntime interface implementation for rkt. +package rkt diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/fake_rkt_interface_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/fake_rkt_interface_test.go new file mode 100644 index 000000000000..740701a331b4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/fake_rkt_interface_test.go @@ -0,0 +1,222 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rkt + +import ( + "fmt" + "strconv" + "strings" + "sync" + + "github.com/coreos/go-systemd/dbus" + rktapi "github.com/coreos/rkt/api/v1alpha" + "golang.org/x/net/context" + "google.golang.org/grpc" + "k8s.io/kubernetes/pkg/api" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" +) + +// fakeRktInterface mocks the rktapi.PublicAPIClient interface for testing purpose. +type fakeRktInterface struct { + sync.Mutex + info rktapi.Info + images []*rktapi.Image + podFilters []*rktapi.PodFilter + pods []*rktapi.Pod + called []string + err error +} + +func newFakeRktInterface() *fakeRktInterface { + return &fakeRktInterface{} +} + +func (f *fakeRktInterface) CleanCalls() { + f.Lock() + defer f.Unlock() + f.called = nil +} + +func (f *fakeRktInterface) GetInfo(ctx context.Context, in *rktapi.GetInfoRequest, opts ...grpc.CallOption) (*rktapi.GetInfoResponse, error) { + f.Lock() + defer f.Unlock() + + f.called = append(f.called, "GetInfo") + return &rktapi.GetInfoResponse{Info: &f.info}, f.err +} + +func (f *fakeRktInterface) ListPods(ctx context.Context, in *rktapi.ListPodsRequest, opts ...grpc.CallOption) (*rktapi.ListPodsResponse, error) { + f.Lock() + defer f.Unlock() + + f.called = append(f.called, "ListPods") + f.podFilters = in.Filters + return &rktapi.ListPodsResponse{Pods: f.pods}, f.err +} + +func (f *fakeRktInterface) InspectPod(ctx context.Context, in *rktapi.InspectPodRequest, opts ...grpc.CallOption) (*rktapi.InspectPodResponse, error) { + f.Lock() + defer f.Unlock() + + f.called = append(f.called, "InspectPod") + for _, pod := range f.pods { + if pod.Id == in.Id { + return &rktapi.InspectPodResponse{Pod: pod}, f.err + } + } + return &rktapi.InspectPodResponse{}, fmt.Errorf("pod %q not found", in.Id) +} + +func (f *fakeRktInterface) ListImages(ctx context.Context, in *rktapi.ListImagesRequest, opts ...grpc.CallOption) (*rktapi.ListImagesResponse, error) { + f.Lock() + defer f.Unlock() + + f.called = append(f.called, "ListImages") + return &rktapi.ListImagesResponse{Images: f.images}, f.err +} + +func (f *fakeRktInterface) InspectImage(ctx context.Context, in *rktapi.InspectImageRequest, opts ...grpc.CallOption) (*rktapi.InspectImageResponse, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (f *fakeRktInterface) ListenEvents(ctx context.Context, in *rktapi.ListenEventsRequest, opts ...grpc.CallOption) (rktapi.PublicAPI_ListenEventsClient, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (f *fakeRktInterface) GetLogs(ctx context.Context, in *rktapi.GetLogsRequest, opts ...grpc.CallOption) (rktapi.PublicAPI_GetLogsClient, error) { + return nil, fmt.Errorf("Not implemented") +} + +// fakeSystemd mocks the systemdInterface for testing purpose. +// TODO(yifan): Remove this once we have a package for launching rkt pods. +// See https://github.com/coreos/rkt/issues/1769. +type fakeSystemd struct { + sync.Mutex + called []string + version string + err error +} + +func newFakeSystemd() *fakeSystemd { + return &fakeSystemd{} +} + +func (f *fakeSystemd) CleanCalls() { + f.Lock() + defer f.Unlock() + f.called = nil +} + +func (f *fakeSystemd) Version() (systemdVersion, error) { + f.Lock() + defer f.Unlock() + + f.called = append(f.called, "Version") + v, _ := strconv.Atoi(f.version) + return systemdVersion(v), f.err +} + +func (f *fakeSystemd) ListUnits() ([]dbus.UnitStatus, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (f *fakeSystemd) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return 0, fmt.Errorf("Not implemented") +} + +func (f *fakeSystemd) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return 0, fmt.Errorf("Not implemented") +} + +func (f *fakeSystemd) Reload() error { + return fmt.Errorf("Not implemented") +} + +func (f *fakeSystemd) ResetFailed() error { + f.Lock() + defer f.Unlock() + + f.called = append(f.called, "ResetFailed") + return f.err +} + +// fakeRuntimeHelper implementes kubecontainer.RuntimeHelper interfaces for testing purpose. +type fakeRuntimeHelper struct { + dnsServers []string + dnsSearches []string + hostName string + hostDomain string + err error +} + +func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (f *fakeRuntimeHelper) GetClusterDNS(pod *api.Pod) ([]string, []string, error) { + return f.dnsServers, f.dnsSearches, f.err +} + +func (f *fakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, error) { + return f.hostName, f.hostDomain, nil +} + +func (f *fakeRuntimeHelper) GetPodDir(podUID types.UID) string { + return "/poddir/" + string(podUID) +} + +type fakeRktCli struct { + sync.Mutex + cmds []string + result []string + err error +} + +func newFakeRktCli() *fakeRktCli { + return &fakeRktCli{ + cmds: []string{}, + result: []string{}, + } +} + +func (f *fakeRktCli) RunCommand(args ...string) (result []string, err error) { + f.Lock() + defer f.Unlock() + cmd := append([]string{"rkt"}, args...) + f.cmds = append(f.cmds, strings.Join(cmd, " ")) + return f.result, f.err +} + +func (f *fakeRktCli) Reset() { + f.cmds = []string{} + f.result = []string{} + f.err = nil +} + +type fakePodGetter struct { + pods map[types.UID]*api.Pod +} + +func newFakePodGetter() *fakePodGetter { + return &fakePodGetter{pods: make(map[types.UID]*api.Pod)} +} + +func (f fakePodGetter) GetPodByUID(uid types.UID) (*api.Pod, bool) { + p, found := f.pods[uid] + return p, found +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/image.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/image.go new file mode 100644 index 000000000000..3515b073ad39 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/image.go @@ -0,0 +1,243 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains all image related functions for rkt runtime. +package rkt + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" + "sort" + "strings" + + appcschema "github.com/appc/spec/schema" + rktapi "github.com/coreos/rkt/api/v1alpha" + dockertypes "github.com/docker/engine-api/types" + "github.com/golang/glog" + "golang.org/x/net/context" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/credentialprovider" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/util/parsers" +) + +// PullImage invokes 'rkt fetch' to download an aci. +// TODO(yifan): Now we only support docker images, this should be changed +// once the format of image is landed, see: +// +// http://issue.k8s.io/7203 +// +func (r *Runtime) PullImage(image kubecontainer.ImageSpec, pullSecrets []api.Secret) error { + img := image.Image + // TODO(yifan): The credential operation is a copy from dockertools package, + // Need to resolve the code duplication. + repoToPull, _, _, err := parsers.ParseImageName(img) + if err != nil { + return err + } + + keyring, err := credentialprovider.MakeDockerKeyring(pullSecrets, r.dockerKeyring) + if err != nil { + return err + } + + creds, ok := keyring.Lookup(repoToPull) + if !ok { + glog.V(1).Infof("Pulling image %s without credentials", img) + } + + // Let's update a json. + // TODO(yifan): Find a way to feed this to rkt. + if err := r.writeDockerAuthConfig(img, creds); err != nil { + return err + } + + if _, err := r.cli.RunCommand("fetch", dockerPrefix+img); err != nil { + glog.Errorf("Failed to fetch: %v", err) + return err + } + return nil +} + +func (r *Runtime) IsImagePresent(image kubecontainer.ImageSpec) (bool, error) { + images, err := r.listImages(image.Image, false) + return len(images) > 0, err +} + +// ListImages lists all the available appc images on the machine by invoking 'rkt image list'. +func (r *Runtime) ListImages() ([]kubecontainer.Image, error) { + listResp, err := r.apisvc.ListImages(context.Background(), &rktapi.ListImagesRequest{}) + if err != nil { + return nil, fmt.Errorf("couldn't list images: %v", err) + } + + images := make([]kubecontainer.Image, len(listResp.Images)) + for i, image := range listResp.Images { + images[i] = kubecontainer.Image{ + ID: image.Id, + RepoTags: []string{buildImageName(image)}, + Size: image.Size, + } + } + return images, nil +} + +// RemoveImage removes an on-disk image using 'rkt image rm'. +func (r *Runtime) RemoveImage(image kubecontainer.ImageSpec) error { + imageID, err := r.getImageID(image.Image) + if err != nil { + return err + } + if _, err := r.cli.RunCommand("image", "rm", imageID); err != nil { + return err + } + return nil +} + +// buildImageName constructs the image name for kubecontainer.Image. +func buildImageName(img *rktapi.Image) string { + return fmt.Sprintf("%s:%s", img.Name, img.Version) +} + +// getImageID tries to find the image ID for the given image name. +// imageName should be in the form of 'name[:version]', e.g., 'example.com/app:latest'. +// The name should matches the result of 'rkt image list'. If the version is empty, +// then 'latest' is assumed. +func (r *Runtime) getImageID(imageName string) (string, error) { + images, err := r.listImages(imageName, false) + if err != nil { + return "", err + } + if len(images) == 0 { + return "", fmt.Errorf("cannot find the image %q", imageName) + } + return images[0].Id, nil +} + +type sortByImportTime []*rktapi.Image + +func (s sortByImportTime) Len() int { return len(s) } +func (s sortByImportTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sortByImportTime) Less(i, j int) bool { return s[i].ImportTimestamp < s[j].ImportTimestamp } + +// listImages lists the images that have the given name. If detail is true, +// then image manifest is also included in the result. +// Note that there could be more than one images that have the given name, we +// will return the result reversely sorted by the import time, so that the latest +// image comes first. +func (r *Runtime) listImages(image string, detail bool) ([]*rktapi.Image, error) { + repoToPull, tag, _, err := parsers.ParseImageName(image) + if err != nil { + return nil, err + } + + listResp, err := r.apisvc.ListImages(context.Background(), &rktapi.ListImagesRequest{ + Detail: detail, + Filters: []*rktapi.ImageFilter{ + { + // TODO(yifan): Add a field in the ImageFilter to match the whole name, + // not just keywords. + // https://github.com/coreos/rkt/issues/1872#issuecomment-166456938 + Keywords: []string{repoToPull}, + Labels: []*rktapi.KeyValue{{Key: "version", Value: tag}}, + }, + }, + }) + if err != nil { + return nil, fmt.Errorf("couldn't list images: %v", err) + } + + // TODO(yifan): Let the API service to sort the result: + // See https://github.com/coreos/rkt/issues/1911. + sort.Sort(sort.Reverse(sortByImportTime(listResp.Images))) + return listResp.Images, nil +} + +// getImageManifest retrieves the image manifest for the given image. +func (r *Runtime) getImageManifest(image string) (*appcschema.ImageManifest, error) { + var manifest appcschema.ImageManifest + + images, err := r.listImages(image, true) + if err != nil { + return nil, err + } + if len(images) == 0 { + return nil, fmt.Errorf("cannot find the image %q", image) + } + + return &manifest, json.Unmarshal(images[0].Manifest, &manifest) +} + +// TODO(yifan): This is very racy, unefficient, and unsafe, we need to provide +// different namespaces. See: https://github.com/coreos/rkt/issues/836. +func (r *Runtime) writeDockerAuthConfig(image string, credsSlice []credentialprovider.LazyAuthConfiguration) error { + if len(credsSlice) == 0 { + return nil + } + + creds := dockertypes.AuthConfig{} + // TODO handle multiple creds + if len(credsSlice) >= 1 { + creds = credentialprovider.LazyProvide(credsSlice[0]) + } + + registry := "index.docker.io" + // Image spec: [/]/[: 0 { + // Need to add '-r' flag if we include '--since' and '-n' at the both time, + // see https://github.com/systemd/systemd/issues/1477 + cmd.Args = append(cmd.Args, "--since", time.Unix(since, 0).Format(journalSinceLayout)) + if logOptions.TailLines != nil { + cmd.Args = append(cmd.Args, "-r") + } + } + + glog.V(4).Infof("rkt: gettings logs with command %q", cmd.Args) + outPipe, err := cmd.StdoutPipe() + if err != nil { + glog.Errorf("rkt: cannot create pipe for journalctl's stdout: %v", err) + return err + } + errPipe, err := cmd.StderrPipe() + if err != nil { + glog.Errorf("rkt: cannot create pipe for journalctl's stderr: %v", err) + return err + } + + if err := cmd.Start(); err != nil { + return err + } + + var wg sync.WaitGroup + + wg.Add(2) + + go pipeLog(&wg, logOptions, outPipe, stdout) + go pipeLog(&wg, logOptions, errPipe, stderr) + + // Wait until the logs are fed to stdout, stderr. + wg.Wait() + cmd.Wait() + + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/mock_os/mockfileinfo.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/mock_os/mockfileinfo.go new file mode 100644 index 000000000000..16761e2586b0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/mock_os/mockfileinfo.go @@ -0,0 +1,109 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generated via: mockgen os FileInfo +// Edited to include required boilerplate +// Source: os (interfaces: FileInfo) + +package mock_os + +import ( + os "os" + time "time" + + gomock "github.com/golang/mock/gomock" +) + +// Mock of FileInfo interface +type MockFileInfo struct { + ctrl *gomock.Controller + recorder *_MockFileInfoRecorder +} + +// Recorder for MockFileInfo (not exported) +type _MockFileInfoRecorder struct { + mock *MockFileInfo +} + +func NewMockFileInfo(ctrl *gomock.Controller) *MockFileInfo { + mock := &MockFileInfo{ctrl: ctrl} + mock.recorder = &_MockFileInfoRecorder{mock} + return mock +} + +func (_m *MockFileInfo) EXPECT() *_MockFileInfoRecorder { + return _m.recorder +} + +func (_m *MockFileInfo) IsDir() bool { + ret := _m.ctrl.Call(_m, "IsDir") + ret0, _ := ret[0].(bool) + return ret0 +} + +func (_mr *_MockFileInfoRecorder) IsDir() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "IsDir") +} + +func (_m *MockFileInfo) ModTime() time.Time { + ret := _m.ctrl.Call(_m, "ModTime") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +func (_mr *_MockFileInfoRecorder) ModTime() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "ModTime") +} + +func (_m *MockFileInfo) Mode() os.FileMode { + ret := _m.ctrl.Call(_m, "Mode") + ret0, _ := ret[0].(os.FileMode) + return ret0 +} + +func (_mr *_MockFileInfoRecorder) Mode() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Mode") +} + +func (_m *MockFileInfo) Name() string { + ret := _m.ctrl.Call(_m, "Name") + ret0, _ := ret[0].(string) + return ret0 +} + +func (_mr *_MockFileInfoRecorder) Name() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Name") +} + +func (_m *MockFileInfo) Size() int64 { + ret := _m.ctrl.Call(_m, "Size") + ret0, _ := ret[0].(int64) + return ret0 +} + +func (_mr *_MockFileInfoRecorder) Size() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Size") +} + +func (_m *MockFileInfo) Sys() interface{} { + ret := _m.ctrl.Call(_m, "Sys") + ret0, _ := ret[0].(interface{}) + return ret0 +} + +func (_mr *_MockFileInfoRecorder) Sys() *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Sys") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/rkt.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/rkt.go new file mode 100644 index 000000000000..9e06ddf82a60 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/rkt.go @@ -0,0 +1,2117 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rkt + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "syscall" + "time" + + appcschema "github.com/appc/spec/schema" + appctypes "github.com/appc/spec/schema/types" + "github.com/coreos/go-systemd/unit" + rktapi "github.com/coreos/rkt/api/v1alpha" + "github.com/golang/glog" + "golang.org/x/net/context" + "google.golang.org/grpc" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/capabilities" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/credentialprovider" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/pkg/kubelet/network" + "k8s.io/kubernetes/pkg/kubelet/network/hairpin" + proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" + "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/securitycontext" + kubetypes "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/errors" + utilexec "k8s.io/kubernetes/pkg/util/exec" + "k8s.io/kubernetes/pkg/util/flowcontrol" + utilstrings "k8s.io/kubernetes/pkg/util/strings" + utilwait "k8s.io/kubernetes/pkg/util/wait" +) + +const ( + RktType = "rkt" + DefaultRktAPIServiceEndpoint = "localhost:15441" + + minimumAppcVersion = "0.8.1" + minimumRktBinVersion = "1.6.0" + recommendedRktBinVersion = "1.6.0" + minimumRktApiVersion = "1.0.0-alpha" + minimumSystemdVersion = "219" + + systemdServiceDir = "/run/systemd/system" + rktDataDir = "/var/lib/rkt" + rktLocalConfigDir = "/etc/rkt" + + kubernetesUnitPrefix = "k8s_" + unitKubernetesSection = "X-Kubernetes" + unitPodUID = "PodUID" + unitPodName = "PodName" + unitPodNamespace = "PodNamespace" + unitRestartCount = "RestartCount" + + k8sRktKubeletAnno = "rkt.kubernetes.io/managed-by-kubelet" + k8sRktKubeletAnnoValue = "true" + k8sRktContainerHashAnno = "rkt.kubernetes.io/container-hash" + k8sRktRestartCountAnno = "rkt.kubernetes.io/restart-count" + k8sRktTerminationMessagePathAnno = "rkt.kubernetes.io/termination-message-path" + + // TODO(euank): This has significant security concerns as a stage1 image is + // effectively root. + // Furthermore, this (using an annotation) is a hack to pass an extra + // non-portable argument in. It should not be relied on to be stable. + // In the future, this might be subsumed by a first-class api object, or by a + // kitchen-sink params object (#17064). + // See discussion in #23944 + // Also, do we want more granularity than path-at-the-kubelet-level and + // image/name-at-the-pod-level? + k8sRktStage1NameAnno = "rkt.alpha.kubernetes.io/stage1-name-override" + dockerPrefix = "docker://" + + authDir = "auth.d" + dockerAuthTemplate = `{"rktKind":"dockerAuth","rktVersion":"v1","registries":[%q],"credentials":{"user":%q,"password":%q}}` + + defaultRktAPIServiceAddr = "localhost:15441" + + // ndots specifies the minimum number of dots that a domain name must contain for the resolver to consider it as FQDN (fully-qualified) + // we want to able to consider SRV lookup names like _dns._udp.kube-dns.default.svc to be considered relative. + // hence, setting ndots to be 5. + // TODO(yifan): Move this and dockertools.ndotsDNSOption to a common package. + defaultDNSOption = "ndots:5" + + // Annotations for the ENTRYPOINT and CMD for an ACI that's converted from Docker image. + // TODO(yifan): Import them from docker2aci. See https://github.com/appc/docker2aci/issues/133. + appcDockerEntrypoint = "appc.io/docker/entrypoint" + appcDockerCmd = "appc.io/docker/cmd" + + // TODO(yifan): Reuse this const with Docker runtime. + minimumGracePeriodInSeconds = 2 +) + +// Runtime implements the Containerruntime for rkt. The implementation +// uses systemd, so in order to run this runtime, systemd must be installed +// on the machine. +type Runtime struct { + cli cliInterface + systemd systemdInterface + // The grpc client for rkt api-service. + apisvcConn *grpc.ClientConn + apisvc rktapi.PublicAPIClient + config *Config + // TODO(yifan): Refactor this to be generic keyring. + dockerKeyring credentialprovider.DockerKeyring + + containerRefManager *kubecontainer.RefManager + podGetter podGetter + runtimeHelper kubecontainer.RuntimeHelper + recorder record.EventRecorder + livenessManager proberesults.Manager + imagePuller kubecontainer.ImagePuller + runner kubecontainer.HandlerRunner + execer utilexec.Interface + os kubecontainer.OSInterface + + // Network plugin. + networkPlugin network.NetworkPlugin + + // If true, the "hairpin mode" flag is set on container interfaces. + // A false value means the kubelet just backs off from setting it, + // it might already be true. + configureHairpinMode bool + + // used for a systemd Exec, which requires the full path. + touchPath string + nsenterPath string + + versions versions +} + +var _ kubecontainer.Runtime = &Runtime{} + +// TODO(yifan): This duplicates the podGetter in dockertools. +type podGetter interface { + GetPodByUID(kubetypes.UID) (*api.Pod, bool) +} + +// cliInterface wrapps the command line calls for testing purpose. +type cliInterface interface { + // args are the arguments given to the 'rkt' command, + // e.g. args can be 'rm ${UUID}'. + RunCommand(args ...string) (result []string, err error) +} + +// New creates the rkt container runtime which implements the container runtime interface. +// It will test if the rkt binary is in the $PATH, and whether we can get the +// version of it. If so, creates the rkt container runtime, otherwise returns an error. +func New( + apiEndpoint string, + config *Config, + runtimeHelper kubecontainer.RuntimeHelper, + recorder record.EventRecorder, + containerRefManager *kubecontainer.RefManager, + podGetter podGetter, + livenessManager proberesults.Manager, + httpClient types.HttpGetter, + networkPlugin network.NetworkPlugin, + hairpinMode bool, + execer utilexec.Interface, + os kubecontainer.OSInterface, + imageBackOff *flowcontrol.Backoff, + serializeImagePulls bool, +) (*Runtime, error) { + // Create dbus connection. + systemd, err := newSystemd() + if err != nil { + return nil, fmt.Errorf("rkt: cannot create systemd interface: %v", err) + } + + // TODO(yifan): Use secure connection. + apisvcConn, err := grpc.Dial(apiEndpoint, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("rkt: cannot connect to rkt api service: %v", err) + } + + // TODO(yifan): Get the rkt path from API service. + if config.Path == "" { + // No default rkt path was set, so try to find one in $PATH. + var err error + config.Path, err = execer.LookPath("rkt") + if err != nil { + return nil, fmt.Errorf("cannot find rkt binary: %v", err) + } + } + + touchPath, err := execer.LookPath("touch") + if err != nil { + return nil, fmt.Errorf("cannot find touch binary: %v", err) + } + + nsenterPath, err := execer.LookPath("nsenter") + if err != nil { + return nil, fmt.Errorf("cannot find nsenter binary: %v", err) + } + + rkt := &Runtime{ + os: kubecontainer.RealOS{}, + systemd: systemd, + apisvcConn: apisvcConn, + apisvc: rktapi.NewPublicAPIClient(apisvcConn), + config: config, + dockerKeyring: credentialprovider.NewDockerKeyring(), + containerRefManager: containerRefManager, + podGetter: podGetter, + runtimeHelper: runtimeHelper, + recorder: recorder, + livenessManager: livenessManager, + networkPlugin: networkPlugin, + execer: execer, + touchPath: touchPath, + nsenterPath: nsenterPath, + } + + rkt.config, err = rkt.getConfig(rkt.config) + if err != nil { + return nil, fmt.Errorf("rkt: cannot get config from rkt api service: %v", err) + } + + rkt.runner = lifecycle.NewHandlerRunner(httpClient, rkt, rkt) + + if serializeImagePulls { + rkt.imagePuller = kubecontainer.NewSerializedImagePuller(recorder, rkt, imageBackOff) + } else { + rkt.imagePuller = kubecontainer.NewImagePuller(recorder, rkt, imageBackOff) + } + + if err := rkt.getVersions(); err != nil { + return nil, fmt.Errorf("rkt: error getting version info: %v", err) + } + + rkt.cli = rkt + + return rkt, nil +} + +func (r *Runtime) buildCommand(args ...string) *exec.Cmd { + allArgs := append(r.config.buildGlobalOptions(), args...) + return exec.Command(r.config.Path, allArgs...) +} + +// convertToACName converts a string into ACName. +func convertToACName(name string) appctypes.ACName { + // Note that as the 'name' already matches 'DNS_LABEL' + // defined in pkg/api/types.go, there shouldn't be error or panic. + acname, _ := appctypes.SanitizeACName(name) + return *appctypes.MustACName(acname) +} + +// RunCommand invokes rkt binary with arguments and returns the result +// from stdout in a list of strings. Each string in the list is a line. +func (r *Runtime) RunCommand(args ...string) ([]string, error) { + glog.V(4).Info("rkt: Run command:", args) + + var stdout, stderr bytes.Buffer + cmd := r.buildCommand(args...) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("failed to run %v: %v\nstdout: %v\nstderr: %v", args, err, stdout.String(), stderr.String()) + } + return strings.Split(strings.TrimSpace(stdout.String()), "\n"), nil +} + +// makePodServiceFileName constructs the unit file name for a pod using its rkt pod uuid. +func makePodServiceFileName(uuid string) string { + // TODO(yifan): Add name for readability? We need to consider the + // limit of the length. + return fmt.Sprintf("%s%s.service", kubernetesUnitPrefix, uuid) +} + +func getRktUUIDFromServiceFileName(filename string) string { + return strings.TrimPrefix(strings.TrimSuffix(filename, path.Ext(filename)), kubernetesUnitPrefix) +} + +// setIsolators sets the apps' isolators according to the security context and resource spec. +func setIsolators(app *appctypes.App, c *api.Container, ctx *api.SecurityContext) error { + var isolators []appctypes.Isolator + + // Capabilities isolators. + if ctx != nil { + var addCaps, dropCaps []string + + if ctx.Capabilities != nil { + addCaps, dropCaps = securitycontext.MakeCapabilities(ctx.Capabilities.Add, ctx.Capabilities.Drop) + } + if ctx.Privileged != nil && *ctx.Privileged { + addCaps, dropCaps = allCapabilities(), []string{} + } + if len(addCaps) > 0 { + set, err := appctypes.NewLinuxCapabilitiesRetainSet(addCaps...) + if err != nil { + return err + } + isolators = append(isolators, set.AsIsolator()) + } + if len(dropCaps) > 0 { + set, err := appctypes.NewLinuxCapabilitiesRevokeSet(dropCaps...) + if err != nil { + return err + } + isolators = append(isolators, set.AsIsolator()) + } + } + + // Resources isolators. + type resource struct { + limit string + request string + } + + // If limit is empty, populate it with request and vice versa. + resources := make(map[api.ResourceName]*resource) + for name, quantity := range c.Resources.Limits { + resources[name] = &resource{limit: quantity.String(), request: quantity.String()} + } + for name, quantity := range c.Resources.Requests { + r, ok := resources[name] + if ok { + r.request = quantity.String() + continue + } + resources[name] = &resource{limit: quantity.String(), request: quantity.String()} + } + + for name, res := range resources { + switch name { + case api.ResourceCPU: + cpu, err := appctypes.NewResourceCPUIsolator(res.request, res.limit) + if err != nil { + return err + } + isolators = append(isolators, cpu.AsIsolator()) + case api.ResourceMemory: + memory, err := appctypes.NewResourceMemoryIsolator(res.request, res.limit) + if err != nil { + return err + } + isolators = append(isolators, memory.AsIsolator()) + default: + return fmt.Errorf("resource type not supported: %v", name) + } + } + + mergeIsolators(app, isolators) + return nil +} + +// mergeIsolators replaces the app.Isolators with isolators. +func mergeIsolators(app *appctypes.App, isolators []appctypes.Isolator) { + for _, is := range isolators { + found := false + for j, js := range app.Isolators { + if is.Name.Equals(js.Name) { + switch is.Name { + case appctypes.LinuxCapabilitiesRetainSetName: + // TODO(yifan): More fine grain merge for capability set instead of override. + fallthrough + case appctypes.LinuxCapabilitiesRevokeSetName: + fallthrough + case appctypes.ResourceCPUName: + fallthrough + case appctypes.ResourceMemoryName: + app.Isolators[j] = is + default: + panic(fmt.Sprintf("unexpected isolator name: %v", is.Name)) + } + found = true + break + } + } + if !found { + app.Isolators = append(app.Isolators, is) + } + } +} + +// mergeEnv merges the optEnv with the image's environments. +// The environments defined in the image will be overridden by +// the ones with the same name in optEnv. +func mergeEnv(app *appctypes.App, optEnv []kubecontainer.EnvVar) { + envMap := make(map[string]string) + for _, e := range app.Environment { + envMap[e.Name] = e.Value + } + for _, e := range optEnv { + envMap[e.Name] = e.Value + } + app.Environment = nil + for name, value := range envMap { + app.Environment = append(app.Environment, appctypes.EnvironmentVariable{ + Name: name, + Value: value, + }) + } +} + +// mergeMounts merges the optMounts with the image's mount points. +// The mount points defined in the image will be overridden by the ones +// with the same name in optMounts. +func mergeMounts(app *appctypes.App, optMounts []kubecontainer.Mount) { + mountMap := make(map[appctypes.ACName]appctypes.MountPoint) + for _, m := range app.MountPoints { + mountMap[m.Name] = m + } + for _, m := range optMounts { + mpName := convertToACName(m.Name) + mountMap[mpName] = appctypes.MountPoint{ + Name: mpName, + Path: m.ContainerPath, + ReadOnly: m.ReadOnly, + } + } + app.MountPoints = nil + for _, mount := range mountMap { + app.MountPoints = append(app.MountPoints, mount) + } +} + +// mergePortMappings merges the optPortMappings with the image's port mappings. +// The port mappings defined in the image will be overridden by the ones +// with the same name in optPortMappings. +func mergePortMappings(app *appctypes.App, optPortMappings []kubecontainer.PortMapping) { + portMap := make(map[appctypes.ACName]appctypes.Port) + for _, p := range app.Ports { + portMap[p.Name] = p + } + for _, p := range optPortMappings { + pName := convertToACName(p.Name) + portMap[pName] = appctypes.Port{ + Name: pName, + Protocol: string(p.Protocol), + Port: uint(p.ContainerPort), + } + } + app.Ports = nil + for _, port := range portMap { + app.Ports = append(app.Ports, port) + } +} + +func verifyNonRoot(app *appctypes.App, ctx *api.SecurityContext) error { + if ctx != nil && ctx.RunAsNonRoot != nil && *ctx.RunAsNonRoot { + if ctx.RunAsUser != nil && *ctx.RunAsUser == 0 { + return fmt.Errorf("container's runAsUser breaks non-root policy") + } + if ctx.RunAsUser == nil && app.User == "0" { + return fmt.Errorf("container has no runAsUser and image will run as root") + } + } + return nil +} + +func setSupplementaryGIDs(app *appctypes.App, podCtx *api.PodSecurityContext) { + if podCtx != nil { + app.SupplementaryGIDs = app.SupplementaryGIDs[:0] + for _, v := range podCtx.SupplementalGroups { + app.SupplementaryGIDs = append(app.SupplementaryGIDs, int(v)) + } + if podCtx.FSGroup != nil { + app.SupplementaryGIDs = append(app.SupplementaryGIDs, int(*podCtx.FSGroup)) + } + } +} + +// setApp merges the container spec with the image's manifest. +func setApp(imgManifest *appcschema.ImageManifest, c *api.Container, opts *kubecontainer.RunContainerOptions, ctx *api.SecurityContext, podCtx *api.PodSecurityContext) error { + app := imgManifest.App + + // Set up Exec. + var command, args []string + cmd, ok := imgManifest.Annotations.Get(appcDockerEntrypoint) + if ok { + err := json.Unmarshal([]byte(cmd), &command) + if err != nil { + return fmt.Errorf("cannot unmarshal ENTRYPOINT %q: %v", cmd, err) + } + } + ag, ok := imgManifest.Annotations.Get(appcDockerCmd) + if ok { + err := json.Unmarshal([]byte(ag), &args) + if err != nil { + return fmt.Errorf("cannot unmarshal CMD %q: %v", ag, err) + } + } + userCommand, userArgs := kubecontainer.ExpandContainerCommandAndArgs(c, opts.Envs) + + if len(userCommand) > 0 { + command = userCommand + args = nil // If 'command' is specified, then drop the default args. + } + if len(userArgs) > 0 { + args = userArgs + } + + exec := append(command, args...) + if len(exec) > 0 { + app.Exec = exec + } + + // Set UID and GIDs. + if err := verifyNonRoot(app, ctx); err != nil { + return err + } + if ctx != nil && ctx.RunAsUser != nil { + app.User = strconv.Itoa(int(*ctx.RunAsUser)) + } + setSupplementaryGIDs(app, podCtx) + + // If 'User' or 'Group' are still empty at this point, + // then apply the root UID and GID. + // TODO(yifan): Instead of using root GID, we should use + // the GID which the user is in. + if app.User == "" { + app.User = "0" + } + if app.Group == "" { + app.Group = "0" + } + + // Set working directory. + if len(c.WorkingDir) > 0 { + app.WorkingDirectory = c.WorkingDir + } + + // Notes that we don't create Mounts section in the pod manifest here, + // as Mounts will be automatically generated by rkt. + mergeMounts(app, opts.Mounts) + mergeEnv(app, opts.Envs) + mergePortMappings(app, opts.PortMappings) + + return setIsolators(app, c, ctx) +} + +// makePodManifest transforms a kubelet pod spec to the rkt pod manifest. +func (r *Runtime) makePodManifest(pod *api.Pod, podIP string, pullSecrets []api.Secret) (*appcschema.PodManifest, error) { + manifest := appcschema.BlankPodManifest() + + listResp, err := r.apisvc.ListPods(context.Background(), &rktapi.ListPodsRequest{ + Detail: true, + Filters: kubernetesPodFilters(pod.UID), + }) + if err != nil { + return nil, fmt.Errorf("couldn't list pods: %v", err) + } + + restartCount := 0 + for _, pod := range listResp.Pods { + manifest := &appcschema.PodManifest{} + err = json.Unmarshal(pod.Manifest, manifest) + if err != nil { + glog.Warningf("rkt: error unmatshaling pod manifest: %v", err) + continue + } + + if countString, ok := manifest.Annotations.Get(k8sRktRestartCountAnno); ok { + num, err := strconv.Atoi(countString) + if err != nil { + glog.Warningf("rkt: error reading restart count on pod: %v", err) + continue + } + if num+1 > restartCount { + restartCount = num + 1 + } + } + } + + requiresPrivileged := false + manifest.Annotations.Set(*appctypes.MustACIdentifier(k8sRktKubeletAnno), k8sRktKubeletAnnoValue) + manifest.Annotations.Set(*appctypes.MustACIdentifier(types.KubernetesPodUIDLabel), string(pod.UID)) + manifest.Annotations.Set(*appctypes.MustACIdentifier(types.KubernetesPodNameLabel), pod.Name) + manifest.Annotations.Set(*appctypes.MustACIdentifier(types.KubernetesPodNamespaceLabel), pod.Namespace) + manifest.Annotations.Set(*appctypes.MustACIdentifier(k8sRktRestartCountAnno), strconv.Itoa(restartCount)) + if stage1Name, ok := pod.Annotations[k8sRktStage1NameAnno]; ok { + requiresPrivileged = true + manifest.Annotations.Set(*appctypes.MustACIdentifier(k8sRktStage1NameAnno), stage1Name) + } + + for _, c := range pod.Spec.Containers { + err := r.newAppcRuntimeApp(pod, podIP, c, requiresPrivileged, pullSecrets, manifest) + if err != nil { + return nil, err + } + } + + // TODO(yifan): Set pod-level isolators once it's supported in kubernetes. + return manifest, nil +} + +// TODO(yifan): Can make rkt handle this when '--net=host'. See https://github.com/coreos/rkt/issues/2430. +func makeHostNetworkMount(opts *kubecontainer.RunContainerOptions) (*kubecontainer.Mount, *kubecontainer.Mount) { + hostsMount := kubecontainer.Mount{ + Name: "kubernetes-hostnetwork-hosts-conf", + ContainerPath: "/etc/hosts", + HostPath: "/etc/hosts", + ReadOnly: true, + } + resolvMount := kubecontainer.Mount{ + Name: "kubernetes-hostnetwork-resolv-conf", + ContainerPath: "/etc/resolv.conf", + HostPath: "/etc/resolv.conf", + ReadOnly: true, + } + opts.Mounts = append(opts.Mounts, hostsMount, resolvMount) + return &hostsMount, &resolvMount +} + +// podFinishedMarkerPath returns the path to a file which should be used to +// indicate the pod exiting, and the time thereof. +// If the file at the path does not exist, the pod should not be exited. If it +// does exist, then the ctime of the file should indicate the time the pod +// exited. +func podFinishedMarkerPath(podDir string, rktUID string) string { + return filepath.Join(podDir, "finished-"+rktUID) +} + +func podFinishedMarkCommand(touchPath, podDir, rktUID string) string { + // TODO, if the path has a `'` character in it, this breaks. + return touchPath + " " + podFinishedMarkerPath(podDir, rktUID) +} + +// podFinishedAt returns the time that a pod exited, or a zero time if it has +// not. +func (r *Runtime) podFinishedAt(podUID kubetypes.UID, rktUID string) time.Time { + markerFile := podFinishedMarkerPath(r.runtimeHelper.GetPodDir(podUID), rktUID) + stat, err := r.os.Stat(markerFile) + if err != nil { + if !os.IsNotExist(err) { + glog.Warningf("rkt: unexpected fs error checking pod finished marker: %v", err) + } + return time.Time{} + } + return stat.ModTime() +} + +func (r *Runtime) makeContainerLogMount(opts *kubecontainer.RunContainerOptions, container *api.Container) (*kubecontainer.Mount, error) { + if opts.PodContainerDir == "" || container.TerminationMessagePath == "" { + return nil, nil + } + + // In docker runtime, the container log path contains the container ID. + // However, for rkt runtime, we cannot get the container ID before the + // the container is launched, so here we generate a random uuid to enable + // us to map a container's termination message path to an unique log file + // on the disk. + randomUID := util.NewUUID() + containerLogPath := path.Join(opts.PodContainerDir, string(randomUID)) + fs, err := r.os.Create(containerLogPath) + if err != nil { + return nil, err + } + + if err := fs.Close(); err != nil { + return nil, err + } + + mnt := kubecontainer.Mount{ + // Use a random name for the termination message mount, so that + // when a container restarts, it will not overwrite the old termination + // message. + Name: fmt.Sprintf("termination-message-%s", randomUID), + ContainerPath: container.TerminationMessagePath, + HostPath: containerLogPath, + ReadOnly: false, + } + opts.Mounts = append(opts.Mounts, mnt) + + return &mnt, nil +} + +func (r *Runtime) newAppcRuntimeApp(pod *api.Pod, podIP string, c api.Container, requiresPrivileged bool, pullSecrets []api.Secret, manifest *appcschema.PodManifest) error { + if requiresPrivileged && !capabilities.Get().AllowPrivileged { + return fmt.Errorf("cannot make %q: running a custom stage1 requires a privileged security context", format.Pod(pod)) + } + if err, _ := r.imagePuller.PullImage(pod, &c, pullSecrets); err != nil { + return nil + } + imgManifest, err := r.getImageManifest(c.Image) + if err != nil { + return err + } + + if imgManifest.App == nil { + imgManifest.App = new(appctypes.App) + } + + imageID, err := r.getImageID(c.Image) + if err != nil { + return err + } + hash, err := appctypes.NewHash(imageID) + if err != nil { + return err + } + + // TODO: determine how this should be handled for rkt + opts, err := r.runtimeHelper.GenerateRunContainerOptions(pod, &c, podIP) + if err != nil { + return err + } + + // create the container log file and make a mount pair. + mnt, err := r.makeContainerLogMount(opts, &c) + if err != nil { + return err + } + + // If run in 'hostnetwork' mode, then mount the host's /etc/resolv.conf and /etc/hosts, + // and add volumes. + var hostsMnt, resolvMnt *kubecontainer.Mount + if kubecontainer.IsHostNetworkPod(pod) { + hostsMnt, resolvMnt = makeHostNetworkMount(opts) + manifest.Volumes = append(manifest.Volumes, appctypes.Volume{ + Name: convertToACName(hostsMnt.Name), + Kind: "host", + Source: hostsMnt.HostPath, + }) + manifest.Volumes = append(manifest.Volumes, appctypes.Volume{ + Name: convertToACName(resolvMnt.Name), + Kind: "host", + Source: resolvMnt.HostPath, + }) + } + + ctx := securitycontext.DetermineEffectiveSecurityContext(pod, &c) + if err := setApp(imgManifest, &c, opts, ctx, pod.Spec.SecurityContext); err != nil { + return err + } + + for _, mnt := range opts.Mounts { + readOnly := mnt.ReadOnly + manifest.Volumes = append(manifest.Volumes, appctypes.Volume{ + Name: convertToACName(mnt.Name), + Source: mnt.HostPath, + Kind: "host", + ReadOnly: &readOnly, + }) + } + + ra := appcschema.RuntimeApp{ + Name: convertToACName(c.Name), + Image: appcschema.RuntimeImage{ID: *hash}, + App: imgManifest.App, + Annotations: []appctypes.Annotation{ + { + Name: *appctypes.MustACIdentifier(k8sRktContainerHashAnno), + Value: strconv.FormatUint(kubecontainer.HashContainer(&c), 10), + }, + { + Name: *appctypes.MustACIdentifier(types.KubernetesContainerNameLabel), + Value: c.Name, + }, + }, + } + + if c.SecurityContext != nil && c.SecurityContext.ReadOnlyRootFilesystem != nil { + ra.ReadOnlyRootFS = *c.SecurityContext.ReadOnlyRootFilesystem + } + + if mnt != nil { + ra.Annotations = append(ra.Annotations, appctypes.Annotation{ + Name: *appctypes.MustACIdentifier(k8sRktTerminationMessagePathAnno), + Value: mnt.HostPath, + }) + + manifest.Volumes = append(manifest.Volumes, appctypes.Volume{ + Name: convertToACName(mnt.Name), + Kind: "host", + Source: mnt.HostPath, + }) + } + + manifest.Apps = append(manifest.Apps, ra) + + // Set global ports. + for _, port := range opts.PortMappings { + if port.HostPort == 0 { + continue + } + manifest.Ports = append(manifest.Ports, appctypes.ExposedPort{ + Name: convertToACName(port.Name), + HostPort: uint(port.HostPort), + }) + } + + return nil +} + +func runningKubernetesPodFilters(uid kubetypes.UID) []*rktapi.PodFilter { + return []*rktapi.PodFilter{ + { + States: []rktapi.PodState{ + rktapi.PodState_POD_STATE_RUNNING, + }, + Annotations: []*rktapi.KeyValue{ + { + Key: k8sRktKubeletAnno, + Value: k8sRktKubeletAnnoValue, + }, + { + Key: types.KubernetesPodUIDLabel, + Value: string(uid), + }, + }, + }, + } +} + +func kubernetesPodFilters(uid kubetypes.UID) []*rktapi.PodFilter { + return []*rktapi.PodFilter{ + { + Annotations: []*rktapi.KeyValue{ + { + Key: k8sRktKubeletAnno, + Value: k8sRktKubeletAnnoValue, + }, + { + Key: types.KubernetesPodUIDLabel, + Value: string(uid), + }, + }, + }, + } +} + +func kubernetesPodsFilters() []*rktapi.PodFilter { + return []*rktapi.PodFilter{ + { + Annotations: []*rktapi.KeyValue{ + { + Key: k8sRktKubeletAnno, + Value: k8sRktKubeletAnnoValue, + }, + }, + }, + } +} + +func newUnitOption(section, name, value string) *unit.UnitOption { + return &unit.UnitOption{Section: section, Name: name, Value: value} +} + +// apiPodToruntimePod converts an api.Pod to kubelet/container.Pod. +func apiPodToruntimePod(uuid string, pod *api.Pod) *kubecontainer.Pod { + p := &kubecontainer.Pod{ + ID: pod.UID, + Name: pod.Name, + Namespace: pod.Namespace, + } + for i := range pod.Spec.Containers { + c := &pod.Spec.Containers[i] + p.Containers = append(p.Containers, &kubecontainer.Container{ + ID: buildContainerID(&containerID{uuid, c.Name}), + Name: c.Name, + Image: c.Image, + Hash: kubecontainer.HashContainer(c), + }) + } + return p +} + +// serviceFilePath returns the absolute path of the service file. +func serviceFilePath(serviceName string) string { + return path.Join(systemdServiceDir, serviceName) +} + +// generateRunCommand crafts a 'rkt run-prepared' command with necessary parameters. +func (r *Runtime) generateRunCommand(pod *api.Pod, uuid, netnsName string) (string, error) { + runPrepared := r.buildCommand("run-prepared").Args + + // Network namespace set up in kubelet; rkt networking not used + runPrepared = append(runPrepared, "--net=host") + + var hostname string + var err error + // Setup DNS and hostname configuration. + if len(netnsName) == 0 { + // TODO(yifan): Let runtimeHelper.GeneratePodHostNameAndDomain() to handle this. + hostname, err = r.os.Hostname() + if err != nil { + return "", err + } + } else { + // Setup DNS. + dnsServers, dnsSearches, err := r.runtimeHelper.GetClusterDNS(pod) + if err != nil { + return "", err + } + for _, server := range dnsServers { + runPrepared = append(runPrepared, fmt.Sprintf("--dns=%s", server)) + } + for _, search := range dnsSearches { + runPrepared = append(runPrepared, fmt.Sprintf("--dns-search=%s", search)) + } + if len(dnsServers) > 0 || len(dnsSearches) > 0 { + runPrepared = append(runPrepared, fmt.Sprintf("--dns-opt=%s", defaultDNSOption)) + } + + // TODO(yifan): host domain is not being used. + hostname, _, err = r.runtimeHelper.GeneratePodHostNameAndDomain(pod) + if err != nil { + return "", err + } + + // Drop the `rkt run-prepared` into the network namespace we + // created. + // TODO: switch to 'ip netns exec' once we can depend on a new + // enough version that doesn't have bugs like + // https://bugzilla.redhat.com/show_bug.cgi?id=882047 + nsenterExec := []string{r.nsenterPath, "--net=\"" + netnsPathFromName(netnsName) + "\"", "--"} + runPrepared = append(nsenterExec, runPrepared...) + } + + runPrepared = append(runPrepared, fmt.Sprintf("--hostname=%s", hostname)) + runPrepared = append(runPrepared, uuid) + return strings.Join(runPrepared, " "), nil +} + +func (r *Runtime) cleanupPodNetwork(pod *api.Pod) error { + glog.V(3).Infof("Calling network plugin %s to tear down pod for %s", r.networkPlugin.Name(), format.Pod(pod)) + + var teardownErr error + containerID := kubecontainer.ContainerID{ID: string(pod.UID)} + if err := r.networkPlugin.TearDownPod(pod.Namespace, pod.Name, containerID); err != nil { + teardownErr = fmt.Errorf("rkt: failed to tear down network for pod %s: %v", format.Pod(pod), err) + } + + if _, err := r.execer.Command("ip", "netns", "del", makePodNetnsName(pod.UID)).Output(); err != nil { + return fmt.Errorf("rkt: Failed to remove network namespace for pod %s: %v", format.Pod(pod), err) + } + + return teardownErr +} + +func (r *Runtime) preparePodArgs(manifest *appcschema.PodManifest, manifestFileName string) []string { + // Order of precedence for the stage1: + // 1) pod annotation (stage1 name) + // 2) kubelet configured stage1 (stage1 path) + // 3) empty; whatever rkt's compiled to default to + stage1ImageCmd := "" + if r.config.Stage1Image != "" { + stage1ImageCmd = "--stage1-path=" + r.config.Stage1Image + } + if stage1Name, ok := manifest.Annotations.Get(k8sRktStage1NameAnno); ok { + stage1ImageCmd = "--stage1-name=" + stage1Name + } + + // Run 'rkt prepare' to get the rkt UUID. + cmds := []string{"prepare", "--quiet", "--pod-manifest", manifestFileName} + if stage1ImageCmd != "" { + cmds = append(cmds, stage1ImageCmd) + } + return cmds +} + +// preparePod will: +// +// 1. Invoke 'rkt prepare' to prepare the pod, and get the rkt pod uuid. +// 2. Create the unit file and save it under systemdUnitDir. +// +// On success, it will return a string that represents name of the unit file +// and the runtime pod. +func (r *Runtime) preparePod(pod *api.Pod, podIP string, pullSecrets []api.Secret, netnsName string) (string, *kubecontainer.Pod, error) { + // Generate the appc pod manifest from the k8s pod spec. + manifest, err := r.makePodManifest(pod, podIP, pullSecrets) + if err != nil { + return "", nil, err + } + manifestFile, err := ioutil.TempFile("", fmt.Sprintf("manifest-%s-", pod.Name)) + if err != nil { + return "", nil, err + } + defer func() { + manifestFile.Close() + if err := r.os.Remove(manifestFile.Name()); err != nil { + glog.Warningf("rkt: Cannot remove temp manifest file %q: %v", manifestFile.Name(), err) + } + }() + + data, err := json.Marshal(manifest) + if err != nil { + return "", nil, err + } + + glog.V(4).Infof("Generating pod manifest for pod %q: %v", format.Pod(pod), string(data)) + // Since File.Write returns error if the written length is less than len(data), + // so check error is enough for us. + if _, err := manifestFile.Write(data); err != nil { + return "", nil, err + } + + prepareCmd := r.preparePodArgs(manifest, manifestFile.Name()) + output, err := r.RunCommand(prepareCmd...) + if err != nil { + return "", nil, err + } + if len(output) != 1 { + return "", nil, fmt.Errorf("invalid output from 'rkt prepare': %v", output) + } + uuid := output[0] + glog.V(4).Infof("'rkt prepare' returns %q", uuid) + + // Create systemd service file for the rkt pod. + runPrepared, err := r.generateRunCommand(pod, uuid, netnsName) + if err != nil { + return "", nil, fmt.Errorf("failed to generate 'rkt run-prepared' command: %v", err) + } + + // TODO handle pod.Spec.HostPID + // TODO handle pod.Spec.HostIPC + + // TODO per container finishedAt, not just per pod + markPodFinished := podFinishedMarkCommand(r.touchPath, r.runtimeHelper.GetPodDir(pod.UID), uuid) + units := []*unit.UnitOption{ + newUnitOption("Service", "ExecStart", runPrepared), + newUnitOption("Service", "ExecStopPost", markPodFinished), + // This enables graceful stop. + newUnitOption("Service", "KillMode", "mixed"), + // Track pod info for garbage collection + newUnitOption(unitKubernetesSection, unitPodUID, string(pod.UID)), + newUnitOption(unitKubernetesSection, unitPodName, pod.Name), + newUnitOption(unitKubernetesSection, unitPodNamespace, pod.Namespace), + } + + serviceName := makePodServiceFileName(uuid) + glog.V(4).Infof("rkt: Creating service file %q for pod %q", serviceName, format.Pod(pod)) + serviceFile, err := r.os.Create(serviceFilePath(serviceName)) + if err != nil { + return "", nil, err + } + if _, err := io.Copy(serviceFile, unit.Serialize(units)); err != nil { + return "", nil, err + } + serviceFile.Close() + + return serviceName, apiPodToruntimePod(uuid, pod), nil +} + +// generateEvents is a helper function that generates some container +// life cycle events for containers in a pod. +func (r *Runtime) generateEvents(runtimePod *kubecontainer.Pod, reason string, failure error) { + // Set up container references. + for _, c := range runtimePod.Containers { + containerID := c.ID + id, err := parseContainerID(containerID) + if err != nil { + glog.Warningf("Invalid container ID %q", containerID) + continue + } + + ref, ok := r.containerRefManager.GetRef(containerID) + if !ok { + glog.Warningf("No ref for container %q", containerID) + continue + } + + // Note that 'rkt id' is the pod id. + uuid := utilstrings.ShortenString(id.uuid, 8) + switch reason { + case "Created": + r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.CreatedContainer, "Created with rkt id %v", uuid) + case "Started": + r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.StartedContainer, "Started with rkt id %v", uuid) + case "Failed": + r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToStartContainer, "Failed to start with rkt id %v with error %v", uuid, failure) + case "Killing": + r.recorder.Eventf(ref, api.EventTypeNormal, kubecontainer.KillingContainer, "Killing with rkt id %v", uuid) + default: + glog.Errorf("rkt: Unexpected event %q", reason) + } + } + return +} + +func makePodNetnsName(podID kubetypes.UID) string { + return fmt.Sprintf("%s_%s", kubernetesUnitPrefix, string(podID)) +} + +func netnsPathFromName(netnsName string) string { + return fmt.Sprintf("/var/run/netns/%s", netnsName) +} + +// setupPodNetwork creates a network namespace for the given pod and calls +// configured NetworkPlugin's setup function on it. +// It returns the namespace name, configured IP (if available), and an error if +// one occured. +func (r *Runtime) setupPodNetwork(pod *api.Pod) (string, string, error) { + netnsName := makePodNetnsName(pod.UID) + + // Create a new network namespace for the pod + r.execer.Command("ip", "netns", "del", netnsName).Output() + _, err := r.execer.Command("ip", "netns", "add", netnsName).Output() + if err != nil { + return "", "", fmt.Errorf("failed to create pod network namespace: %v", err) + } + + // Set up networking with the network plugin + glog.V(3).Infof("Calling network plugin %s to setup pod for %s", r.networkPlugin.Name(), format.Pod(pod)) + containerID := kubecontainer.ContainerID{ID: string(pod.UID)} + err = r.networkPlugin.SetUpPod(pod.Namespace, pod.Name, containerID) + if err != nil { + return "", "", fmt.Errorf("failed to set up pod network: %v", err) + } + status, err := r.networkPlugin.GetPodNetworkStatus(pod.Namespace, pod.Name, containerID) + if err != nil { + return "", "", fmt.Errorf("failed to get status of pod network: %v", err) + } + + if r.configureHairpinMode { + if err = hairpin.SetUpContainerPath(netnsPathFromName(netnsName), network.DefaultInterfaceName); err != nil { + glog.Warningf("Hairpin setup failed for pod %q: %v", format.Pod(pod), err) + } + } + + return netnsName, status.IP.String(), nil +} + +// RunPod first creates the unit file for a pod, and then +// starts the unit over d-bus. +func (r *Runtime) RunPod(pod *api.Pod, pullSecrets []api.Secret) error { + glog.V(4).Infof("Rkt starts to run pod: name %q.", format.Pod(pod)) + + var err error + var netnsName string + var podIP string + if !kubecontainer.IsHostNetworkPod(pod) { + netnsName, podIP, err = r.setupPodNetwork(pod) + if err != nil { + r.cleanupPodNetwork(pod) + return err + } + } + + name, runtimePod, prepareErr := r.preparePod(pod, podIP, pullSecrets, netnsName) + + // Set container references and generate events. + // If preparedPod fails, then send out 'failed' events for each container. + // Otherwise, store the container references so we can use them later to send events. + for i, c := range pod.Spec.Containers { + ref, err := kubecontainer.GenerateContainerRef(pod, &c) + if err != nil { + glog.Errorf("Couldn't make a ref to pod %q, container %v: '%v'", format.Pod(pod), c.Name, err) + continue + } + if prepareErr != nil { + r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToCreateContainer, "Failed to create rkt container with error: %v", prepareErr) + continue + } + containerID := runtimePod.Containers[i].ID + r.containerRefManager.SetRef(containerID, ref) + } + + if prepareErr != nil { + r.cleanupPodNetwork(pod) + return prepareErr + } + + r.generateEvents(runtimePod, "Created", nil) + + // RestartUnit has the same effect as StartUnit if the unit is not running, besides it can restart + // a unit if the unit file is changed and reloaded. + reschan := make(chan string) + _, err = r.systemd.RestartUnit(name, "replace", reschan) + if err != nil { + r.generateEvents(runtimePod, "Failed", err) + r.cleanupPodNetwork(pod) + return err + } + + res := <-reschan + if res != "done" { + err := fmt.Errorf("Failed to restart unit %q: %s", name, res) + r.generateEvents(runtimePod, "Failed", err) + r.cleanupPodNetwork(pod) + return err + } + + r.generateEvents(runtimePod, "Started", nil) + + // This is a temporary solution until we have a clean design on how + // kubelet handles events. See https://github.com/kubernetes/kubernetes/issues/23084. + if err := r.runLifecycleHooks(pod, runtimePod, lifecyclePostStartHook); err != nil { + if errKill := r.KillPod(pod, *runtimePod, nil); errKill != nil { + return errors.NewAggregate([]error{err, errKill}) + } + r.cleanupPodNetwork(pod) + return err + } + + return nil +} + +func (r *Runtime) runPreStopHook(containerID kubecontainer.ContainerID, pod *api.Pod, container *api.Container) error { + glog.V(4).Infof("rkt: Running pre-stop hook for container %q of pod %q", container.Name, format.Pod(pod)) + msg, err := r.runner.Run(containerID, pod, container, container.Lifecycle.PreStop) + if err != nil { + ref, ok := r.containerRefManager.GetRef(containerID) + if !ok { + glog.Warningf("No ref for container %q", containerID) + } else { + r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedPreStopHook, msg) + } + } + return err +} + +func (r *Runtime) runPostStartHook(containerID kubecontainer.ContainerID, pod *api.Pod, container *api.Container) error { + glog.V(4).Infof("rkt: Running post-start hook for container %q of pod %q", container.Name, format.Pod(pod)) + cid, err := parseContainerID(containerID) + if err != nil { + return fmt.Errorf("cannot parse container ID %v", containerID) + } + + isContainerRunning := func() (done bool, err error) { + resp, err := r.apisvc.InspectPod(context.Background(), &rktapi.InspectPodRequest{Id: cid.uuid}) + if err != nil { + return false, fmt.Errorf("failed to inspect rkt pod %q for pod %q", cid.uuid, format.Pod(pod)) + } + + for _, app := range resp.Pod.Apps { + if app.Name == cid.appName { + return app.State == rktapi.AppState_APP_STATE_RUNNING, nil + } + } + return false, fmt.Errorf("failed to find container %q in rkt pod %q", cid.appName, cid.uuid) + } + + // TODO(yifan): Polling the pod's state for now. + timeout := time.Second * 5 + pollInterval := time.Millisecond * 500 + if err := utilwait.Poll(pollInterval, timeout, isContainerRunning); err != nil { + return fmt.Errorf("rkt: Pod %q doesn't become running in %v: %v", format.Pod(pod), timeout, err) + } + + msg, err := r.runner.Run(containerID, pod, container, container.Lifecycle.PostStart) + if err != nil { + ref, ok := r.containerRefManager.GetRef(containerID) + if !ok { + glog.Warningf("No ref for container %q", containerID) + } else { + r.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedPostStartHook, msg) + } + } + return err +} + +type lifecycleHookType string + +const ( + lifecyclePostStartHook lifecycleHookType = "post-start" + lifecyclePreStopHook lifecycleHookType = "pre-stop" +) + +func (r *Runtime) runLifecycleHooks(pod *api.Pod, runtimePod *kubecontainer.Pod, typ lifecycleHookType) error { + var wg sync.WaitGroup + var errlist []error + errCh := make(chan error, len(pod.Spec.Containers)) + + wg.Add(len(pod.Spec.Containers)) + + for i, c := range pod.Spec.Containers { + var hookFunc func(kubecontainer.ContainerID, *api.Pod, *api.Container) error + + switch typ { + case lifecyclePostStartHook: + if c.Lifecycle != nil && c.Lifecycle.PostStart != nil { + hookFunc = r.runPostStartHook + } + case lifecyclePreStopHook: + if c.Lifecycle != nil && c.Lifecycle.PreStop != nil { + hookFunc = r.runPreStopHook + } + default: + errCh <- fmt.Errorf("Unrecognized lifecycle hook type %q for container %q in pod %q", typ, c.Name, format.Pod(pod)) + } + + if hookFunc == nil { + wg.Done() + continue + } + + container := &pod.Spec.Containers[i] + runtimeContainer := runtimePod.FindContainerByName(container.Name) + if runtimeContainer == nil { + // Container already gone. + wg.Done() + continue + } + containerID := runtimeContainer.ID + + go func() { + defer wg.Done() + if err := hookFunc(containerID, pod, container); err != nil { + glog.Errorf("rkt: Failed to run %s hook for container %q of pod %q: %v", typ, container.Name, format.Pod(pod), err) + errCh <- err + } else { + glog.V(4).Infof("rkt: %s hook completed successfully for container %q of pod %q", typ, container.Name, format.Pod(pod)) + } + }() + } + + wg.Wait() + close(errCh) + + for err := range errCh { + errlist = append(errlist, err) + } + return errors.NewAggregate(errlist) +} + +// convertRktPod will convert a rktapi.Pod to a kubecontainer.Pod +func (r *Runtime) convertRktPod(rktpod *rktapi.Pod) (*kubecontainer.Pod, error) { + manifest := &appcschema.PodManifest{} + err := json.Unmarshal(rktpod.Manifest, manifest) + if err != nil { + return nil, err + } + + podUID, ok := manifest.Annotations.Get(types.KubernetesPodUIDLabel) + if !ok { + return nil, fmt.Errorf("pod is missing annotation %s", types.KubernetesPodUIDLabel) + } + podName, ok := manifest.Annotations.Get(types.KubernetesPodNameLabel) + if !ok { + return nil, fmt.Errorf("pod is missing annotation %s", types.KubernetesPodNameLabel) + } + podNamespace, ok := manifest.Annotations.Get(types.KubernetesPodNamespaceLabel) + if !ok { + return nil, fmt.Errorf("pod is missing annotation %s", types.KubernetesPodNamespaceLabel) + } + + kubepod := &kubecontainer.Pod{ + ID: kubetypes.UID(podUID), + Name: podName, + Namespace: podNamespace, + } + + for i, app := range rktpod.Apps { + // The order of the apps is determined by the rkt pod manifest. + // TODO(yifan): Let the server to unmarshal the annotations? https://github.com/coreos/rkt/issues/1872 + hashStr, ok := manifest.Apps[i].Annotations.Get(k8sRktContainerHashAnno) + if !ok { + return nil, fmt.Errorf("app %q is missing annotation %s", app.Name, k8sRktContainerHashAnno) + } + containerHash, err := strconv.ParseUint(hashStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("couldn't parse container's hash %q: %v", hashStr, err) + } + + kubepod.Containers = append(kubepod.Containers, &kubecontainer.Container{ + ID: buildContainerID(&containerID{rktpod.Id, app.Name}), + Name: app.Name, + // By default, the version returned by rkt API service will be "latest" if not specified. + Image: fmt.Sprintf("%s:%s", app.Image.Name, app.Image.Version), + Hash: containerHash, + State: appStateToContainerState(app.State), + }) + } + + return kubepod, nil +} + +// GetPods runs 'systemctl list-unit' and 'rkt list' to get the list of rkt pods. +// Then it will use the result to construct a list of container runtime pods. +// If all is false, then only running pods will be returned, otherwise all pods will be +// returned. +func (r *Runtime) GetPods(all bool) ([]*kubecontainer.Pod, error) { + glog.V(4).Infof("Rkt getting pods") + + listReq := &rktapi.ListPodsRequest{ + Detail: true, + Filters: []*rktapi.PodFilter{ + { + Annotations: []*rktapi.KeyValue{ + { + Key: k8sRktKubeletAnno, + Value: k8sRktKubeletAnnoValue, + }, + }, + }, + }, + } + if !all { + listReq.Filters[0].States = []rktapi.PodState{rktapi.PodState_POD_STATE_RUNNING} + } + listResp, err := r.apisvc.ListPods(context.Background(), listReq) + if err != nil { + return nil, fmt.Errorf("couldn't list pods: %v", err) + } + + pods := make(map[kubetypes.UID]*kubecontainer.Pod) + var podIDs []kubetypes.UID + for _, pod := range listResp.Pods { + pod, err := r.convertRktPod(pod) + if err != nil { + glog.Warningf("rkt: Cannot construct pod from unit file: %v.", err) + continue + } + + // Group pods together. + oldPod, found := pods[pod.ID] + if !found { + pods[pod.ID] = pod + podIDs = append(podIDs, pod.ID) + continue + } + + oldPod.Containers = append(oldPod.Containers, pod.Containers...) + } + + // Convert map to list, using the consistent order from the podIDs array. + var result []*kubecontainer.Pod + for _, id := range podIDs { + result = append(result, pods[id]) + } + + return result, nil +} + +func (r *Runtime) waitPreStopHooks(pod *api.Pod, runningPod *kubecontainer.Pod) { + gracePeriod := int64(minimumGracePeriodInSeconds) + switch { + case pod.DeletionGracePeriodSeconds != nil: + gracePeriod = *pod.DeletionGracePeriodSeconds + case pod.Spec.TerminationGracePeriodSeconds != nil: + gracePeriod = *pod.Spec.TerminationGracePeriodSeconds + } + + done := make(chan struct{}) + go func() { + if err := r.runLifecycleHooks(pod, runningPod, lifecyclePreStopHook); err != nil { + glog.Errorf("rkt: Some pre-stop hooks failed for pod %q: %v", format.Pod(pod), err) + } + close(done) + }() + + select { + case <-time.After(time.Duration(gracePeriod) * time.Second): + glog.V(2).Infof("rkt: Some pre-stop hooks did not complete in %d seconds for pod %q", gracePeriod, format.Pod(pod)) + case <-done: + } +} + +// KillPod invokes 'systemctl kill' to kill the unit that runs the pod. +// TODO: add support for gracePeriodOverride which is used in eviction scenarios +func (r *Runtime) KillPod(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { + glog.V(4).Infof("Rkt is killing pod: name %q.", runningPod.Name) + + if len(runningPod.Containers) == 0 { + glog.V(4).Infof("rkt: Pod %q is already being killed, no action will be taken", runningPod.Name) + return nil + } + + if pod != nil { + r.waitPreStopHooks(pod, &runningPod) + } + + containerID, err := parseContainerID(runningPod.Containers[0].ID) + if err != nil { + glog.Errorf("rkt: Failed to get rkt uuid of the pod %q: %v", runningPod.Name, err) + return err + } + serviceName := makePodServiceFileName(containerID.uuid) + r.generateEvents(&runningPod, "Killing", nil) + for _, c := range runningPod.Containers { + r.containerRefManager.ClearRef(c.ID) + } + + // Touch the systemd service file to update the mod time so it will + // not be garbage collected too soon. + if err := r.os.Chtimes(serviceFilePath(serviceName), time.Now(), time.Now()); err != nil { + glog.Errorf("rkt: Failed to change the modification time of the service file %q: %v", serviceName, err) + return err + } + + // Since all service file have 'KillMode=mixed', the processes in + // the unit's cgroup will receive a SIGKILL if the normal stop timeouts. + reschan := make(chan string) + if _, err = r.systemd.StopUnit(serviceName, "replace", reschan); err != nil { + glog.Errorf("rkt: Failed to stop unit %q: %v", serviceName, err) + return err + } + + res := <-reschan + if res != "done" { + err := fmt.Errorf("invalid result: %s", res) + glog.Errorf("rkt: Failed to stop unit %q: %v", serviceName, err) + return err + } + + // Clean up networking; use running pod details since 'pod' can be nil + if pod == nil || !kubecontainer.IsHostNetworkPod(pod) { + err := r.cleanupPodNetwork(&api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: runningPod.ID, + Name: runningPod.Name, + Namespace: runningPod.Namespace, + }, + }) + if err != nil { + glog.Errorf("rkt: failed to tear down network for unit %q: %v", serviceName, err) + return err + } + } + + return nil +} + +func (r *Runtime) Type() string { + return RktType +} + +func (r *Runtime) Version() (kubecontainer.Version, error) { + r.versions.RLock() + defer r.versions.RUnlock() + return r.versions.binVersion, nil +} + +func (r *Runtime) APIVersion() (kubecontainer.Version, error) { + r.versions.RLock() + defer r.versions.RUnlock() + return r.versions.apiVersion, nil +} + +// Status returns error if rkt is unhealthy, nil otherwise. +func (r *Runtime) Status() error { + return r.checkVersion(minimumRktBinVersion, recommendedRktBinVersion, minimumAppcVersion, minimumRktApiVersion, minimumSystemdVersion) +} + +// SyncPod syncs the running pod to match the specified desired pod. +func (r *Runtime) SyncPod(pod *api.Pod, podStatus api.PodStatus, internalPodStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { + var err error + defer func() { + if err != nil { + result.Fail(err) + } + }() + // TODO: (random-liu) Stop using running pod in SyncPod() + // TODO: (random-liu) Rename podStatus to apiPodStatus, rename internalPodStatus to podStatus, and use new pod status as much as possible, + // we may stop using apiPodStatus someday. + runningPod := kubecontainer.ConvertPodStatusToRunningPod(internalPodStatus) + // Add references to all containers. + unidentifiedContainers := make(map[kubecontainer.ContainerID]*kubecontainer.Container) + for _, c := range runningPod.Containers { + unidentifiedContainers[c.ID] = c + } + + restartPod := false + for _, container := range pod.Spec.Containers { + expectedHash := kubecontainer.HashContainer(&container) + + c := runningPod.FindContainerByName(container.Name) + if c == nil { + if kubecontainer.ShouldContainerBeRestarted(&container, pod, internalPodStatus) { + glog.V(3).Infof("Container %+v is dead, but RestartPolicy says that we should restart it.", container) + // TODO(yifan): Containers in one pod are fate-sharing at this moment, see: + // https://github.com/appc/spec/issues/276. + restartPod = true + break + } + continue + } + + // TODO: check for non-root image directives. See ../docker/manager.go#SyncPod + + // TODO(yifan): Take care of host network change. + containerChanged := c.Hash != 0 && c.Hash != expectedHash + if containerChanged { + glog.Infof("Pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", format.Pod(pod), container.Name, c.Hash, expectedHash) + restartPod = true + break + } + + liveness, found := r.livenessManager.Get(c.ID) + if found && liveness != proberesults.Success && pod.Spec.RestartPolicy != api.RestartPolicyNever { + glog.Infof("Pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name) + restartPod = true + break + } + + delete(unidentifiedContainers, c.ID) + } + + // If there is any unidentified containers, restart the pod. + if len(unidentifiedContainers) > 0 { + restartPod = true + } + + if restartPod { + // Kill the pod only if the pod is actually running. + if len(runningPod.Containers) > 0 { + if err = r.KillPod(pod, runningPod, nil); err != nil { + return + } + } + if err = r.RunPod(pod, pullSecrets); err != nil { + return + } + } + return +} + +// Sort rkt pods by creation time. +type podsByCreatedAt []*rktapi.Pod + +func (s podsByCreatedAt) Len() int { return len(s) } +func (s podsByCreatedAt) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s podsByCreatedAt) Less(i, j int) bool { return s[i].CreatedAt < s[j].CreatedAt } + +// getPodUID returns the pod's API UID, it returns +// empty UID if the UID cannot be determined. +func getPodUID(pod *rktapi.Pod) kubetypes.UID { + for _, anno := range pod.Annotations { + if anno.Key == types.KubernetesPodUIDLabel { + return kubetypes.UID(anno.Value) + } + } + return kubetypes.UID("") +} + +// podIsActive returns true if the pod is embryo, preparing or running. +// If a pod is prepared, it is not guaranteed to be active (e.g. the systemd +// service might fail). +func podIsActive(pod *rktapi.Pod) bool { + return pod.State == rktapi.PodState_POD_STATE_EMBRYO || + pod.State == rktapi.PodState_POD_STATE_PREPARING || + pod.State == rktapi.PodState_POD_STATE_RUNNING +} + +// GetNetNS returns the network namespace path for the given container +func (r *Runtime) GetNetNS(containerID kubecontainer.ContainerID) (string, error) { + // This is a slight hack, kubenet shouldn't be asking us about a container id + // but a pod id. This is because it knows too much about the infra container. + // We pretend the pod.UID is an infra container ID. + // This deception is only possible because we played the same trick in + // `networkPlugin.SetUpPod` and `networkPlugin.TearDownPod`. + return netnsPathFromName(makePodNetnsName(kubetypes.UID(containerID.ID))), nil +} + +func podDetailsFromServiceFile(serviceFilePath string) (string, string, string, error) { + f, err := os.Open(serviceFilePath) + if err != nil { + return "", "", "", err + } + defer f.Close() + + opts, err := unit.Deserialize(f) + if err != nil { + return "", "", "", err + } + + var id, name, namespace string + for _, o := range opts { + if o.Section != unitKubernetesSection { + continue + } + switch o.Name { + case unitPodUID: + id = o.Value + case unitPodName: + name = o.Value + case unitPodNamespace: + namespace = o.Value + } + + if id != "" && name != "" && namespace != "" { + return id, name, namespace, nil + } + } + + return "", "", "", fmt.Errorf("failed to parse pod from file %s", serviceFilePath) +} + +// GarbageCollect collects the pods/containers. +// After one GC iteration: +// - The deleted pods will be removed. +// - If the number of containers exceeds gcPolicy.MaxContainers, +// then containers whose ages are older than gcPolicy.minAge will +// be removed. +func (r *Runtime) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy) error { + var errlist []error + var totalInactiveContainers int + var inactivePods []*rktapi.Pod + var removeCandidates []*rktapi.Pod + var allPods = map[string]*rktapi.Pod{} + + glog.V(4).Infof("rkt: Garbage collecting triggered with policy %v", gcPolicy) + + if err := r.systemd.ResetFailed(); err != nil { + glog.Errorf("rkt: Failed to reset failed systemd services: %v, continue to gc anyway...", err) + } + + // GC all inactive systemd service files and pods. + files, err := r.os.ReadDir(systemdServiceDir) + if err != nil { + glog.Errorf("rkt: Failed to read the systemd service directory: %v", err) + return err + } + + resp, err := r.apisvc.ListPods(context.Background(), &rktapi.ListPodsRequest{Filters: kubernetesPodsFilters()}) + if err != nil { + glog.Errorf("rkt: Failed to list pods: %v", err) + return err + } + + // Mark inactive pods. + for _, pod := range resp.Pods { + allPods[pod.Id] = pod + if !podIsActive(pod) { + uid := getPodUID(pod) + if uid == kubetypes.UID("") { + glog.Errorf("rkt: Cannot get the UID of pod %q, pod is broken, will remove it", pod.Id) + removeCandidates = append(removeCandidates, pod) + continue + } + _, found := r.podGetter.GetPodByUID(uid) + if !found { + removeCandidates = append(removeCandidates, pod) + continue + } + + inactivePods = append(inactivePods, pod) + totalInactiveContainers = totalInactiveContainers + len(pod.Apps) + } + } + + // Remove any orphan service files. + for _, f := range files { + serviceName := f.Name() + if strings.HasPrefix(serviceName, kubernetesUnitPrefix) { + rktUUID := getRktUUIDFromServiceFileName(serviceName) + if _, ok := allPods[rktUUID]; !ok { + glog.V(4).Infof("rkt: No rkt pod found for service file %q, will remove it", serviceName) + + serviceFile := serviceFilePath(serviceName) + + // Network may not be around anymore so errors are ignored + r.cleanupPodNetworkFromServiceFile(serviceFile) + + if err := r.os.Remove(serviceFile); err != nil { + errlist = append(errlist, fmt.Errorf("rkt: Failed to remove service file %q: %v", serviceName, err)) + } + } + } + } + + sort.Sort(podsByCreatedAt(inactivePods)) + + // Enforce GCPolicy.MaxContainers. + for _, pod := range inactivePods { + if totalInactiveContainers <= gcPolicy.MaxContainers { + break + } + creationTime := time.Unix(0, pod.CreatedAt) + if creationTime.Add(gcPolicy.MinAge).Before(time.Now()) { + // The pod is old and we are exceeding the MaxContainers limit. + // Delete the pod. + removeCandidates = append(removeCandidates, pod) + totalInactiveContainers = totalInactiveContainers - len(pod.Apps) + } + } + + // Remove pods and their servie files. + for _, pod := range removeCandidates { + if err := r.removePod(pod.Id); err != nil { + errlist = append(errlist, fmt.Errorf("rkt: Failed to clean up rkt pod %q: %v", pod.Id, err)) + } + } + + return errors.NewAggregate(errlist) +} + +// Read kubernetes pod UUID, namespace, and name from systemd service file and +// use that to clean up any pod network that may still exist. +func (r *Runtime) cleanupPodNetworkFromServiceFile(serviceFilePath string) { + id, name, namespace, err := podDetailsFromServiceFile(serviceFilePath) + if err == nil { + r.cleanupPodNetwork(&api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: kubetypes.UID(id), + Name: name, + Namespace: namespace, + }, + }) + } +} + +// removePod calls 'rkt rm $UUID' to delete a rkt pod, it also remove the systemd service file +// related to the pod. +func (r *Runtime) removePod(uuid string) error { + var errlist []error + glog.V(4).Infof("rkt: GC is removing pod %q", uuid) + + serviceName := makePodServiceFileName(uuid) + serviceFile := serviceFilePath(serviceName) + + // Network may not be around anymore so errors are ignored + r.cleanupPodNetworkFromServiceFile(serviceFile) + + if _, err := r.cli.RunCommand("rm", uuid); err != nil { + errlist = append(errlist, fmt.Errorf("rkt: Failed to remove pod %q: %v", uuid, err)) + } + + // GC systemd service files as well. + if err := r.os.Remove(serviceFile); err != nil { + errlist = append(errlist, fmt.Errorf("rkt: Failed to remove service file %q for pod %q: %v", serviceName, uuid, err)) + } + + return errors.NewAggregate(errlist) +} + +// rktExitError implemets /pkg/util/exec.ExitError interface. +type rktExitError struct{ *exec.ExitError } + +var _ utilexec.ExitError = &rktExitError{} + +func (r *rktExitError) ExitStatus() int { + if status, ok := r.Sys().(syscall.WaitStatus); ok { + return status.ExitStatus() + } + return 0 +} + +func (r *Runtime) AttachContainer(containerID kubecontainer.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + return fmt.Errorf("unimplemented") +} + +// Note: In rkt, the container ID is in the form of "UUID:appName", where UUID is +// the rkt UUID, and appName is the container name. +// TODO(yifan): If the rkt is using lkvm as the stage1 image, then this function will fail. +func (r *Runtime) ExecInContainer(containerID kubecontainer.ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool) error { + glog.V(4).Infof("Rkt execing in container.") + + id, err := parseContainerID(containerID) + if err != nil { + return err + } + args := []string{"enter", fmt.Sprintf("--app=%s", id.appName), id.uuid} + args = append(args, cmd...) + command := r.buildCommand(args...) + + if tty { + p, err := kubecontainer.StartPty(command) + if err != nil { + return err + } + defer p.Close() + + // make sure to close the stdout stream + defer stdout.Close() + + if stdin != nil { + go io.Copy(p, stdin) + } + if stdout != nil { + go io.Copy(stdout, p) + } + return command.Wait() + } + if stdin != nil { + // Use an os.Pipe here as it returns true *os.File objects. + // This way, if you run 'kubectl exec -i bash' (no tty) and type 'exit', + // the call below to command.Run() can unblock because its Stdin is the read half + // of the pipe. + r, w, err := r.os.Pipe() + if err != nil { + return err + } + go io.Copy(w, stdin) + + command.Stdin = r + } + if stdout != nil { + command.Stdout = stdout + } + if stderr != nil { + command.Stderr = stderr + } + return command.Run() +} + +// PortForward executes socat in the pod's network namespace and copies +// data between stream (representing the user's local connection on their +// computer) and the specified port in the container. +// +// TODO: +// - match cgroups of container +// - should we support nsenter + socat on the host? (current impl) +// - should we support nsenter + socat in a container, running with elevated privs and --pid=host? +// +// TODO(yifan): Merge with the same function in dockertools. +// TODO(yifan): If the rkt is using lkvm as the stage1 image, then this function will fail. +func (r *Runtime) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error { + glog.V(4).Infof("Rkt port forwarding in container.") + + listResp, err := r.apisvc.ListPods(context.Background(), &rktapi.ListPodsRequest{ + Detail: true, + Filters: runningKubernetesPodFilters(pod.ID), + }) + if err != nil { + return fmt.Errorf("couldn't list pods: %v", err) + } + + if len(listResp.Pods) != 1 { + var podlist []string + for _, p := range listResp.Pods { + podlist = append(podlist, p.Id) + } + return fmt.Errorf("more than one running rkt pod for the kubernetes pod [%s]", strings.Join(podlist, ", ")) + } + + socatPath, lookupErr := exec.LookPath("socat") + if lookupErr != nil { + return fmt.Errorf("unable to do port forwarding: socat not found.") + } + + args := []string{"-t", fmt.Sprintf("%d", listResp.Pods[0].Pid), "-n", socatPath, "-", fmt.Sprintf("TCP4:localhost:%d", port)} + + nsenterPath, lookupErr := exec.LookPath("nsenter") + if lookupErr != nil { + return fmt.Errorf("unable to do port forwarding: nsenter not found.") + } + + command := exec.Command(nsenterPath, args...) + command.Stdout = stream + + // If we use Stdin, command.Run() won't return until the goroutine that's copying + // from stream finishes. Unfortunately, if you have a client like telnet connected + // via port forwarding, as long as the user's telnet client is connected to the user's + // local listener that port forwarding sets up, the telnet session never exits. This + // means that even if socat has finished running, command.Run() won't ever return + // (because the client still has the connection and stream open). + // + // The work around is to use StdinPipe(), as Wait() (called by Run()) closes the pipe + // when the command (socat) exits. + inPipe, err := command.StdinPipe() + if err != nil { + return fmt.Errorf("unable to do port forwarding: error creating stdin pipe: %v", err) + } + go func() { + io.Copy(inPipe, stream) + inPipe.Close() + }() + + return command.Run() +} + +// appStateToContainerState converts rktapi.AppState to kubecontainer.ContainerState. +func appStateToContainerState(state rktapi.AppState) kubecontainer.ContainerState { + switch state { + case rktapi.AppState_APP_STATE_RUNNING: + return kubecontainer.ContainerStateRunning + case rktapi.AppState_APP_STATE_EXITED: + return kubecontainer.ContainerStateExited + } + return kubecontainer.ContainerStateUnknown +} + +// getPodInfo returns the pod manifest, creation time and restart count of the pod. +func getPodInfo(pod *rktapi.Pod) (podManifest *appcschema.PodManifest, restartCount int, err error) { + // TODO(yifan): The manifest is only used for getting the annotations. + // Consider to let the server to unmarshal the annotations. + var manifest appcschema.PodManifest + if err = json.Unmarshal(pod.Manifest, &manifest); err != nil { + return + } + + if countString, ok := manifest.Annotations.Get(k8sRktRestartCountAnno); ok { + restartCount, err = strconv.Atoi(countString) + if err != nil { + return + } + } + + return &manifest, restartCount, nil +} + +// populateContainerStatus fills the container status according to the app's information. +func populateContainerStatus(pod rktapi.Pod, app rktapi.App, runtimeApp appcschema.RuntimeApp, restartCount int, finishedTime time.Time) (*kubecontainer.ContainerStatus, error) { + hashStr, ok := runtimeApp.Annotations.Get(k8sRktContainerHashAnno) + if !ok { + return nil, fmt.Errorf("No container hash in pod manifest") + } + + hashNum, err := strconv.ParseUint(hashStr, 10, 64) + if err != nil { + return nil, err + } + + var reason, message string + if app.State == rktapi.AppState_APP_STATE_EXITED { + if app.ExitCode == 0 { + reason = "Completed" + } else { + reason = "Error" + } + } + + terminationMessagePath, ok := runtimeApp.Annotations.Get(k8sRktTerminationMessagePathAnno) + if ok { + if data, err := ioutil.ReadFile(terminationMessagePath); err != nil { + message = fmt.Sprintf("Error on reading termination-log %s: %v", terminationMessagePath, err) + } else { + message = string(data) + } + } + + createdTime := time.Unix(0, pod.CreatedAt) + startedTime := time.Unix(0, pod.StartedAt) + + return &kubecontainer.ContainerStatus{ + ID: buildContainerID(&containerID{uuid: pod.Id, appName: app.Name}), + Name: app.Name, + State: appStateToContainerState(app.State), + CreatedAt: createdTime, + StartedAt: startedTime, + FinishedAt: finishedTime, + ExitCode: int(app.ExitCode), + // By default, the version returned by rkt API service will be "latest" if not specified. + Image: fmt.Sprintf("%s:%s", app.Image.Name, app.Image.Version), + ImageID: "rkt://" + app.Image.Id, // TODO(yifan): Add the prefix only in api.PodStatus. + Hash: hashNum, + // TODO(yifan): Note that now all apps share the same restart count, this might + // change once apps don't share the same lifecycle. + // See https://github.com/appc/spec/pull/547. + RestartCount: restartCount, + Reason: reason, + Message: message, + }, nil +} + +// GetPodStatus returns the status for a pod specified by a given UID, name, +// and namespace. It will attempt to find pod's information via a request to +// the rkt api server. +// An error will be returned if the api server returns an error. If the api +// server doesn't error, but doesn't provide meaningful information about the +// pod, a status with no information (other than the passed in arguments) is +// returned anyways. +func (r *Runtime) GetPodStatus(uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) { + podStatus := &kubecontainer.PodStatus{ + ID: uid, + Name: name, + Namespace: namespace, + } + + listResp, err := r.apisvc.ListPods(context.Background(), &rktapi.ListPodsRequest{ + Detail: true, + Filters: kubernetesPodFilters(uid), + }) + if err != nil { + return nil, fmt.Errorf("couldn't list pods: %v", err) + } + + var latestRestartCount int = -1 + + // In this loop, we group all containers from all pods together, + // also we try to find the latest pod, so we can fill other info of the pod below. + for _, pod := range listResp.Pods { + manifest, restartCount, err := getPodInfo(pod) + if err != nil { + glog.Warningf("rkt: Couldn't get necessary info from the rkt pod, (uuid %q): %v", pod.Id, err) + continue + } + + if restartCount > latestRestartCount { + latestRestartCount = restartCount + } + + finishedTime := r.podFinishedAt(uid, pod.Id) + for i, app := range pod.Apps { + // The order of the apps is determined by the rkt pod manifest. + cs, err := populateContainerStatus(*pod, *app, manifest.Apps[i], restartCount, finishedTime) + if err != nil { + glog.Warningf("rkt: Failed to populate container status(uuid %q, app %q): %v", pod.Id, app.Name, err) + continue + } + podStatus.ContainerStatuses = append(podStatus.ContainerStatuses, cs) + } + } + + // TODO(euank): this will not work in host networking mode + containerID := kubecontainer.ContainerID{ID: string(uid)} + if status, err := r.networkPlugin.GetPodNetworkStatus(namespace, name, containerID); err == nil { + podStatus.IP = status.IP.String() + } + + return podStatus, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/rkt_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/rkt_test.go new file mode 100644 index 000000000000..d9e858b901d6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/rkt_test.go @@ -0,0 +1,1767 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rkt + +import ( + "encoding/json" + "fmt" + "net" + "os" + "sort" + "testing" + "time" + + appcschema "github.com/appc/spec/schema" + appctypes "github.com/appc/spec/schema/types" + rktapi "github.com/coreos/rkt/api/v1alpha" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + containertesting "k8s.io/kubernetes/pkg/kubelet/container/testing" + kubetesting "k8s.io/kubernetes/pkg/kubelet/container/testing" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/pkg/kubelet/network" + "k8s.io/kubernetes/pkg/kubelet/network/mock_network" + "k8s.io/kubernetes/pkg/kubelet/rkt/mock_os" + "k8s.io/kubernetes/pkg/kubelet/types" + kubetypes "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/errors" + utilexec "k8s.io/kubernetes/pkg/util/exec" + utiltesting "k8s.io/kubernetes/pkg/util/testing" +) + +func mustMarshalPodManifest(man *appcschema.PodManifest) []byte { + manblob, err := json.Marshal(man) + if err != nil { + panic(err) + } + return manblob +} + +func mustMarshalImageManifest(man *appcschema.ImageManifest) []byte { + manblob, err := json.Marshal(man) + if err != nil { + panic(err) + } + return manblob +} + +func mustRktHash(hash string) *appctypes.Hash { + h, err := appctypes.NewHash(hash) + if err != nil { + panic(err) + } + return h +} + +func makeRktPod(rktPodState rktapi.PodState, + rktPodID, podUID, podName, podNamespace string, podCreatedAt, podStartedAt int64, + podRestartCount string, appNames, imgIDs, imgNames, + containerHashes []string, appStates []rktapi.AppState, + exitcodes []int32) *rktapi.Pod { + + podManifest := &appcschema.PodManifest{ + ACKind: appcschema.PodManifestKind, + ACVersion: appcschema.AppContainerVersion, + Annotations: appctypes.Annotations{ + appctypes.Annotation{ + Name: *appctypes.MustACIdentifier(k8sRktKubeletAnno), + Value: k8sRktKubeletAnnoValue, + }, + appctypes.Annotation{ + Name: *appctypes.MustACIdentifier(types.KubernetesPodUIDLabel), + Value: podUID, + }, + appctypes.Annotation{ + Name: *appctypes.MustACIdentifier(types.KubernetesPodNameLabel), + Value: podName, + }, + appctypes.Annotation{ + Name: *appctypes.MustACIdentifier(types.KubernetesPodNamespaceLabel), + Value: podNamespace, + }, + appctypes.Annotation{ + Name: *appctypes.MustACIdentifier(k8sRktRestartCountAnno), + Value: podRestartCount, + }, + }, + } + + appNum := len(appNames) + if appNum != len(imgNames) || + appNum != len(imgIDs) || + appNum != len(containerHashes) || + appNum != len(appStates) { + panic("inconsistent app number") + } + + apps := make([]*rktapi.App, appNum) + for i := range appNames { + apps[i] = &rktapi.App{ + Name: appNames[i], + State: appStates[i], + Image: &rktapi.Image{ + Id: imgIDs[i], + Name: imgNames[i], + Version: "latest", + Manifest: mustMarshalImageManifest( + &appcschema.ImageManifest{ + ACKind: appcschema.ImageManifestKind, + ACVersion: appcschema.AppContainerVersion, + Name: *appctypes.MustACIdentifier(imgNames[i]), + Annotations: appctypes.Annotations{ + appctypes.Annotation{ + Name: *appctypes.MustACIdentifier(k8sRktContainerHashAnno), + Value: containerHashes[i], + }, + }, + }, + ), + }, + ExitCode: exitcodes[i], + } + podManifest.Apps = append(podManifest.Apps, appcschema.RuntimeApp{ + Name: *appctypes.MustACName(appNames[i]), + Image: appcschema.RuntimeImage{ID: *mustRktHash("sha512-foo")}, + Annotations: appctypes.Annotations{ + appctypes.Annotation{ + Name: *appctypes.MustACIdentifier(k8sRktContainerHashAnno), + Value: containerHashes[i], + }, + }, + }) + } + + return &rktapi.Pod{ + Id: rktPodID, + State: rktPodState, + Apps: apps, + Manifest: mustMarshalPodManifest(podManifest), + StartedAt: podStartedAt, + CreatedAt: podCreatedAt, + } +} + +func TestCheckVersion(t *testing.T) { + fr := newFakeRktInterface() + fs := newFakeSystemd() + r := &Runtime{apisvc: fr, systemd: fs} + + fr.info = rktapi.Info{ + RktVersion: "1.2.3+git", + AppcVersion: "1.2.4+git", + ApiVersion: "1.2.6-alpha", + } + fs.version = "100" + tests := []struct { + minimumRktBinVersion string + recommendedRktBinVersion string + minimumAppcVersion string + minimumRktApiVersion string + minimumSystemdVersion string + err error + calledGetInfo bool + calledSystemVersion bool + }{ + // Good versions. + { + "1.2.3", + "1.2.3", + "1.2.4", + "1.2.5", + "99", + nil, + true, + true, + }, + // Good versions. + { + "1.2.3+git", + "1.2.3+git", + "1.2.4+git", + "1.2.6-alpha", + "100", + nil, + true, + true, + }, + // Requires greater binary version. + { + "1.2.4", + "1.2.4", + "1.2.4", + "1.2.6-alpha", + "100", + fmt.Errorf("rkt: binary version is too old(%v), requires at least %v", fr.info.RktVersion, "1.2.4"), + true, + true, + }, + // Requires greater Appc version. + { + "1.2.3", + "1.2.3", + "1.2.5", + "1.2.6-alpha", + "100", + fmt.Errorf("rkt: appc version is too old(%v), requires at least %v", fr.info.AppcVersion, "1.2.5"), + true, + true, + }, + // Requires greater API version. + { + "1.2.3", + "1.2.3", + "1.2.4", + "1.2.6", + "100", + fmt.Errorf("rkt: API version is too old(%v), requires at least %v", fr.info.ApiVersion, "1.2.6"), + true, + true, + }, + // Requires greater API version. + { + "1.2.3", + "1.2.3", + "1.2.4", + "1.2.7", + "100", + fmt.Errorf("rkt: API version is too old(%v), requires at least %v", fr.info.ApiVersion, "1.2.7"), + true, + true, + }, + // Requires greater systemd version. + { + "1.2.3", + "1.2.3", + "1.2.4", + "1.2.7", + "101", + fmt.Errorf("rkt: systemd version(%v) is too old, requires at least %v", fs.version, "101"), + false, + true, + }, + } + + for i, tt := range tests { + testCaseHint := fmt.Sprintf("test case #%d", i) + err := r.checkVersion(tt.minimumRktBinVersion, tt.recommendedRktBinVersion, tt.minimumAppcVersion, tt.minimumRktApiVersion, tt.minimumSystemdVersion) + assert.Equal(t, tt.err, err, testCaseHint) + + if tt.calledGetInfo { + assert.Equal(t, fr.called, []string{"GetInfo"}, testCaseHint) + } + if tt.calledSystemVersion { + assert.Equal(t, fs.called, []string{"Version"}, testCaseHint) + } + if err == nil { + assert.Equal(t, fr.info.RktVersion, r.versions.binVersion.String(), testCaseHint) + assert.Equal(t, fr.info.AppcVersion, r.versions.appcVersion.String(), testCaseHint) + assert.Equal(t, fr.info.ApiVersion, r.versions.apiVersion.String(), testCaseHint) + } + fr.CleanCalls() + fs.CleanCalls() + } +} + +func TestListImages(t *testing.T) { + fr := newFakeRktInterface() + fs := newFakeSystemd() + r := &Runtime{apisvc: fr, systemd: fs} + + tests := []struct { + images []*rktapi.Image + expected []kubecontainer.Image + }{ + {nil, []kubecontainer.Image{}}, + { + []*rktapi.Image{ + { + Id: "sha512-a2fb8f390702", + Name: "quay.io/coreos/alpine-sh", + Version: "latest", + }, + }, + []kubecontainer.Image{ + { + ID: "sha512-a2fb8f390702", + RepoTags: []string{"quay.io/coreos/alpine-sh:latest"}, + }, + }, + }, + { + []*rktapi.Image{ + { + Id: "sha512-a2fb8f390702", + Name: "quay.io/coreos/alpine-sh", + Version: "latest", + Size: 400, + }, + { + Id: "sha512-c6b597f42816", + Name: "coreos.com/rkt/stage1-coreos", + Version: "0.10.0", + Size: 400, + }, + }, + []kubecontainer.Image{ + { + ID: "sha512-a2fb8f390702", + RepoTags: []string{"quay.io/coreos/alpine-sh:latest"}, + Size: 400, + }, + { + ID: "sha512-c6b597f42816", + RepoTags: []string{"coreos.com/rkt/stage1-coreos:0.10.0"}, + Size: 400, + }, + }, + }, + } + + for i, tt := range tests { + fr.images = tt.images + + images, err := r.ListImages() + if err != nil { + t.Errorf("%v", err) + } + assert.Equal(t, tt.expected, images) + assert.Equal(t, fr.called, []string{"ListImages"}, fmt.Sprintf("test case %d: unexpected called list", i)) + + fr.CleanCalls() + } +} + +func TestGetPods(t *testing.T) { + fr := newFakeRktInterface() + fs := newFakeSystemd() + r := &Runtime{apisvc: fr, systemd: fs} + + ns := func(seconds int64) int64 { + return seconds * 1e9 + } + + tests := []struct { + pods []*rktapi.Pod + result []*kubecontainer.Pod + }{ + // No pods. + {}, + // One pod. + { + []*rktapi.Pod{ + makeRktPod(rktapi.PodState_POD_STATE_RUNNING, + "uuid-4002", "42", "guestbook", "default", + ns(10), ns(10), "7", + []string{"app-1", "app-2"}, + []string{"img-id-1", "img-id-2"}, + []string{"img-name-1", "img-name-2"}, + []string{"1001", "1002"}, + []rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_EXITED}, + []int32{0, 0}, + ), + }, + []*kubecontainer.Pod{ + { + ID: "42", + Name: "guestbook", + Namespace: "default", + Containers: []*kubecontainer.Container{ + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4002:app-1"), + Name: "app-1", + Image: "img-name-1:latest", + Hash: 1001, + State: "running", + }, + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4002:app-2"), + Name: "app-2", + Image: "img-name-2:latest", + Hash: 1002, + State: "exited", + }, + }, + }, + }, + }, + // Multiple pods. + { + []*rktapi.Pod{ + makeRktPod(rktapi.PodState_POD_STATE_RUNNING, + "uuid-4002", "42", "guestbook", "default", + ns(10), ns(20), "7", + []string{"app-1", "app-2"}, + []string{"img-id-1", "img-id-2"}, + []string{"img-name-1", "img-name-2"}, + []string{"1001", "1002"}, + []rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_EXITED}, + []int32{0, 0}, + ), + makeRktPod(rktapi.PodState_POD_STATE_EXITED, + "uuid-4003", "43", "guestbook", "default", + ns(30), ns(40), "7", + []string{"app-11", "app-22"}, + []string{"img-id-11", "img-id-22"}, + []string{"img-name-11", "img-name-22"}, + []string{"10011", "10022"}, + []rktapi.AppState{rktapi.AppState_APP_STATE_EXITED, rktapi.AppState_APP_STATE_EXITED}, + []int32{0, 0}, + ), + makeRktPod(rktapi.PodState_POD_STATE_EXITED, + "uuid-4004", "43", "guestbook", "default", + ns(50), ns(60), "8", + []string{"app-11", "app-22"}, + []string{"img-id-11", "img-id-22"}, + []string{"img-name-11", "img-name-22"}, + []string{"10011", "10022"}, + []rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_RUNNING}, + []int32{0, 0}, + ), + }, + []*kubecontainer.Pod{ + { + ID: "42", + Name: "guestbook", + Namespace: "default", + Containers: []*kubecontainer.Container{ + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4002:app-1"), + Name: "app-1", + Image: "img-name-1:latest", + Hash: 1001, + State: "running", + }, + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4002:app-2"), + Name: "app-2", + Image: "img-name-2:latest", + Hash: 1002, + State: "exited", + }, + }, + }, + { + ID: "43", + Name: "guestbook", + Namespace: "default", + Containers: []*kubecontainer.Container{ + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4003:app-11"), + Name: "app-11", + Image: "img-name-11:latest", + Hash: 10011, + State: "exited", + }, + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4003:app-22"), + Name: "app-22", + Image: "img-name-22:latest", + Hash: 10022, + State: "exited", + }, + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4004:app-11"), + Name: "app-11", + Image: "img-name-11:latest", + Hash: 10011, + State: "running", + }, + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4004:app-22"), + Name: "app-22", + Image: "img-name-22:latest", + Hash: 10022, + State: "running", + }, + }, + }, + }, + }, + } + + for i, tt := range tests { + testCaseHint := fmt.Sprintf("test case #%d", i) + fr.pods = tt.pods + + pods, err := r.GetPods(true) + if err != nil { + t.Errorf("test case #%d: unexpected error: %v", i, err) + } + + assert.Equal(t, tt.result, pods, testCaseHint) + assert.Equal(t, []string{"ListPods"}, fr.called, fmt.Sprintf("test case %d: unexpected called list", i)) + + fr.CleanCalls() + } +} + +func TestGetPodsFilters(t *testing.T) { + fr := newFakeRktInterface() + fs := newFakeSystemd() + r := &Runtime{apisvc: fr, systemd: fs} + + for _, test := range []struct { + All bool + ExpectedFilters []*rktapi.PodFilter + }{ + { + true, + []*rktapi.PodFilter{ + { + Annotations: []*rktapi.KeyValue{ + { + Key: k8sRktKubeletAnno, + Value: k8sRktKubeletAnnoValue, + }, + }, + }, + }, + }, + { + false, + []*rktapi.PodFilter{ + { + States: []rktapi.PodState{rktapi.PodState_POD_STATE_RUNNING}, + Annotations: []*rktapi.KeyValue{ + { + Key: k8sRktKubeletAnno, + Value: k8sRktKubeletAnnoValue, + }, + }, + }, + }, + }, + } { + _, err := r.GetPods(test.All) + if err != nil { + t.Errorf("%v", err) + } + assert.Equal(t, test.ExpectedFilters, fr.podFilters, "filters didn't match when all=%b", test.All) + } +} + +func TestGetPodStatus(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + fr := newFakeRktInterface() + fs := newFakeSystemd() + fnp := mock_network.NewMockNetworkPlugin(ctrl) + fos := &containertesting.FakeOS{} + frh := &fakeRuntimeHelper{} + r := &Runtime{ + apisvc: fr, + systemd: fs, + runtimeHelper: frh, + os: fos, + networkPlugin: fnp, + } + + ns := func(seconds int64) int64 { + return seconds * 1e9 + } + + tests := []struct { + pods []*rktapi.Pod + result *kubecontainer.PodStatus + }{ + // No pods. + { + nil, + &kubecontainer.PodStatus{ID: "42", Name: "guestbook", Namespace: "default"}, + }, + // One pod. + { + []*rktapi.Pod{ + makeRktPod(rktapi.PodState_POD_STATE_RUNNING, + "uuid-4002", "42", "guestbook", "default", + ns(10), ns(20), "7", + []string{"app-1", "app-2"}, + []string{"img-id-1", "img-id-2"}, + []string{"img-name-1", "img-name-2"}, + []string{"1001", "1002"}, + []rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_EXITED}, + []int32{0, 0}, + ), + }, + &kubecontainer.PodStatus{ + ID: "42", + Name: "guestbook", + Namespace: "default", + IP: "10.10.10.42", + ContainerStatuses: []*kubecontainer.ContainerStatus{ + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4002:app-1"), + Name: "app-1", + State: kubecontainer.ContainerStateRunning, + CreatedAt: time.Unix(10, 0), + StartedAt: time.Unix(20, 0), + FinishedAt: time.Unix(0, 30), + Image: "img-name-1:latest", + ImageID: "rkt://img-id-1", + Hash: 1001, + RestartCount: 7, + }, + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4002:app-2"), + Name: "app-2", + State: kubecontainer.ContainerStateExited, + CreatedAt: time.Unix(10, 0), + StartedAt: time.Unix(20, 0), + FinishedAt: time.Unix(0, 30), + Image: "img-name-2:latest", + ImageID: "rkt://img-id-2", + Hash: 1002, + RestartCount: 7, + Reason: "Completed", + }, + }, + }, + }, + // Multiple pods. + { + []*rktapi.Pod{ + makeRktPod(rktapi.PodState_POD_STATE_EXITED, + "uuid-4002", "42", "guestbook", "default", + ns(10), ns(20), "7", + []string{"app-1", "app-2"}, + []string{"img-id-1", "img-id-2"}, + []string{"img-name-1", "img-name-2"}, + []string{"1001", "1002"}, + []rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_EXITED}, + []int32{0, 0}, + ), + makeRktPod(rktapi.PodState_POD_STATE_RUNNING, // The latest pod is running. + "uuid-4003", "42", "guestbook", "default", + ns(10), ns(20), "10", + []string{"app-1", "app-2"}, + []string{"img-id-1", "img-id-2"}, + []string{"img-name-1", "img-name-2"}, + []string{"1001", "1002"}, + []rktapi.AppState{rktapi.AppState_APP_STATE_RUNNING, rktapi.AppState_APP_STATE_EXITED}, + []int32{0, 1}, + ), + }, + &kubecontainer.PodStatus{ + ID: "42", + Name: "guestbook", + Namespace: "default", + IP: "10.10.10.42", + // Result should contain all containers. + ContainerStatuses: []*kubecontainer.ContainerStatus{ + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4002:app-1"), + Name: "app-1", + State: kubecontainer.ContainerStateRunning, + CreatedAt: time.Unix(10, 0), + StartedAt: time.Unix(20, 0), + FinishedAt: time.Unix(0, 30), + Image: "img-name-1:latest", + ImageID: "rkt://img-id-1", + Hash: 1001, + RestartCount: 7, + }, + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4002:app-2"), + Name: "app-2", + State: kubecontainer.ContainerStateExited, + CreatedAt: time.Unix(10, 0), + StartedAt: time.Unix(20, 0), + FinishedAt: time.Unix(0, 30), + Image: "img-name-2:latest", + ImageID: "rkt://img-id-2", + Hash: 1002, + RestartCount: 7, + Reason: "Completed", + }, + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4003:app-1"), + Name: "app-1", + State: kubecontainer.ContainerStateRunning, + CreatedAt: time.Unix(10, 0), + StartedAt: time.Unix(20, 0), + FinishedAt: time.Unix(0, 30), + Image: "img-name-1:latest", + ImageID: "rkt://img-id-1", + Hash: 1001, + RestartCount: 10, + }, + { + ID: kubecontainer.BuildContainerID("rkt", "uuid-4003:app-2"), + Name: "app-2", + State: kubecontainer.ContainerStateExited, + CreatedAt: time.Unix(10, 0), + StartedAt: time.Unix(20, 0), + FinishedAt: time.Unix(0, 30), + Image: "img-name-2:latest", + ImageID: "rkt://img-id-2", + Hash: 1002, + RestartCount: 10, + ExitCode: 1, + Reason: "Error", + }, + }, + }, + }, + } + + for i, tt := range tests { + testCaseHint := fmt.Sprintf("test case #%d", i) + fr.pods = tt.pods + + podTimes := map[string]time.Time{} + for _, pod := range tt.pods { + podTimes[podFinishedMarkerPath(r.runtimeHelper.GetPodDir(tt.result.ID), pod.Id)] = tt.result.ContainerStatuses[0].FinishedAt + } + + r.os.(*containertesting.FakeOS).StatFn = func(name string) (os.FileInfo, error) { + podTime, ok := podTimes[name] + if !ok { + t.Errorf("osStat called with %v, but only knew about %#v", name, podTimes) + } + mockFI := mock_os.NewMockFileInfo(ctrl) + mockFI.EXPECT().ModTime().Return(podTime) + return mockFI, nil + } + + if tt.result.IP != "" { + fnp.EXPECT().GetPodNetworkStatus("default", "guestbook", kubecontainer.ContainerID{ID: "42"}). + Return(&network.PodNetworkStatus{IP: net.ParseIP(tt.result.IP)}, nil) + } else { + fnp.EXPECT().GetPodNetworkStatus("default", "guestbook", kubecontainer.ContainerID{ID: "42"}). + Return(nil, fmt.Errorf("no such network")) + } + + status, err := r.GetPodStatus("42", "guestbook", "default") + if err != nil { + t.Errorf("test case #%d: unexpected error: %v", i, err) + } + + assert.Equal(t, tt.result, status, testCaseHint) + assert.Equal(t, []string{"ListPods"}, fr.called, testCaseHint) + fr.CleanCalls() + } +} + +func generateCapRetainIsolator(t *testing.T, caps ...string) appctypes.Isolator { + retain, err := appctypes.NewLinuxCapabilitiesRetainSet(caps...) + if err != nil { + t.Fatalf("Error generating cap retain isolator: %v", err) + } + return retain.AsIsolator() +} + +func generateCapRevokeIsolator(t *testing.T, caps ...string) appctypes.Isolator { + revoke, err := appctypes.NewLinuxCapabilitiesRevokeSet(caps...) + if err != nil { + t.Fatalf("Error generating cap revoke isolator: %v", err) + } + return revoke.AsIsolator() +} + +func generateCPUIsolator(t *testing.T, request, limit string) appctypes.Isolator { + cpu, err := appctypes.NewResourceCPUIsolator(request, limit) + if err != nil { + t.Fatalf("Error generating cpu resource isolator: %v", err) + } + return cpu.AsIsolator() +} + +func generateMemoryIsolator(t *testing.T, request, limit string) appctypes.Isolator { + memory, err := appctypes.NewResourceMemoryIsolator(request, limit) + if err != nil { + t.Fatalf("Error generating memory resource isolator: %v", err) + } + return memory.AsIsolator() +} + +func baseApp(t *testing.T) *appctypes.App { + return &appctypes.App{ + Exec: appctypes.Exec{"/bin/foo", "bar"}, + SupplementaryGIDs: []int{4, 5, 6}, + WorkingDirectory: "/foo", + Environment: []appctypes.EnvironmentVariable{ + {"env-foo", "bar"}, + }, + MountPoints: []appctypes.MountPoint{ + {Name: *appctypes.MustACName("mnt-foo"), Path: "/mnt-foo", ReadOnly: false}, + }, + Ports: []appctypes.Port{ + {Name: *appctypes.MustACName("port-foo"), Protocol: "TCP", Port: 4242}, + }, + Isolators: []appctypes.Isolator{ + generateCapRetainIsolator(t, "CAP_SYS_ADMIN"), + generateCapRevokeIsolator(t, "CAP_NET_ADMIN"), + generateCPUIsolator(t, "100m", "200m"), + generateMemoryIsolator(t, "10M", "20M"), + }, + } +} + +func baseImageManifest(t *testing.T) *appcschema.ImageManifest { + img := &appcschema.ImageManifest{App: baseApp(t)} + entrypoint, err := json.Marshal([]string{"/bin/foo"}) + if err != nil { + t.Fatal(err) + } + cmd, err := json.Marshal([]string{"bar"}) + if err != nil { + t.Fatal(err) + } + img.Annotations.Set(*appctypes.MustACIdentifier(appcDockerEntrypoint), string(entrypoint)) + img.Annotations.Set(*appctypes.MustACIdentifier(appcDockerCmd), string(cmd)) + return img +} + +func baseAppWithRootUserGroup(t *testing.T) *appctypes.App { + app := baseApp(t) + app.User, app.Group = "0", "0" + return app +} + +type envByName []appctypes.EnvironmentVariable + +func (s envByName) Len() int { return len(s) } +func (s envByName) Less(i, j int) bool { return s[i].Name < s[j].Name } +func (s envByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type mountsByName []appctypes.MountPoint + +func (s mountsByName) Len() int { return len(s) } +func (s mountsByName) Less(i, j int) bool { return s[i].Name < s[j].Name } +func (s mountsByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type portsByName []appctypes.Port + +func (s portsByName) Len() int { return len(s) } +func (s portsByName) Less(i, j int) bool { return s[i].Name < s[j].Name } +func (s portsByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type isolatorsByName []appctypes.Isolator + +func (s isolatorsByName) Len() int { return len(s) } +func (s isolatorsByName) Less(i, j int) bool { return s[i].Name < s[j].Name } +func (s isolatorsByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func sortAppFields(app *appctypes.App) { + sort.Sort(envByName(app.Environment)) + sort.Sort(mountsByName(app.MountPoints)) + sort.Sort(portsByName(app.Ports)) + sort.Sort(isolatorsByName(app.Isolators)) +} + +type sortedStringList []string + +func (s sortedStringList) Len() int { return len(s) } +func (s sortedStringList) Less(i, j int) bool { return s[i] < s[j] } +func (s sortedStringList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func TestSetApp(t *testing.T) { + tmpDir, err := utiltesting.MkTmpdir("rkt_test") + if err != nil { + t.Fatalf("error creating temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + rootUser := int64(0) + nonRootUser := int64(42) + runAsNonRootTrue := true + fsgid := int64(3) + + tests := []struct { + container *api.Container + opts *kubecontainer.RunContainerOptions + ctx *api.SecurityContext + podCtx *api.PodSecurityContext + expect *appctypes.App + err error + }{ + // Nothing should change, but the "User" and "Group" should be filled. + { + container: &api.Container{}, + opts: &kubecontainer.RunContainerOptions{}, + ctx: nil, + podCtx: nil, + expect: baseAppWithRootUserGroup(t), + err: nil, + }, + + // error verifying non-root. + { + container: &api.Container{}, + opts: &kubecontainer.RunContainerOptions{}, + ctx: &api.SecurityContext{ + RunAsNonRoot: &runAsNonRootTrue, + RunAsUser: &rootUser, + }, + podCtx: nil, + expect: nil, + err: fmt.Errorf("container has no runAsUser and image will run as root"), + }, + + // app's args should be changed. + { + container: &api.Container{ + Args: []string{"foo"}, + }, + opts: &kubecontainer.RunContainerOptions{}, + ctx: nil, + podCtx: nil, + expect: &appctypes.App{ + Exec: appctypes.Exec{"/bin/foo", "foo"}, + User: "0", + Group: "0", + SupplementaryGIDs: []int{4, 5, 6}, + WorkingDirectory: "/foo", + Environment: []appctypes.EnvironmentVariable{ + {"env-foo", "bar"}, + }, + MountPoints: []appctypes.MountPoint{ + {Name: *appctypes.MustACName("mnt-foo"), Path: "/mnt-foo", ReadOnly: false}, + }, + Ports: []appctypes.Port{ + {Name: *appctypes.MustACName("port-foo"), Protocol: "TCP", Port: 4242}, + }, + Isolators: []appctypes.Isolator{ + generateCapRetainIsolator(t, "CAP_SYS_ADMIN"), + generateCapRevokeIsolator(t, "CAP_NET_ADMIN"), + generateCPUIsolator(t, "100m", "200m"), + generateMemoryIsolator(t, "10M", "20M"), + }, + }, + err: nil, + }, + + // app should be changed. + { + container: &api.Container{ + Command: []string{"/bin/bar", "$(env-bar)"}, + WorkingDir: tmpDir, + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{"cpu": resource.MustParse("50m"), "memory": resource.MustParse("50M")}, + Requests: api.ResourceList{"cpu": resource.MustParse("5m"), "memory": resource.MustParse("5M")}, + }, + }, + opts: &kubecontainer.RunContainerOptions{ + Envs: []kubecontainer.EnvVar{ + {Name: "env-bar", Value: "foo"}, + }, + Mounts: []kubecontainer.Mount{ + {Name: "mnt-bar", ContainerPath: "/mnt-bar", ReadOnly: true}, + }, + PortMappings: []kubecontainer.PortMapping{ + {Name: "port-bar", Protocol: api.ProtocolTCP, ContainerPort: 1234}, + }, + }, + ctx: &api.SecurityContext{ + Capabilities: &api.Capabilities{ + Add: []api.Capability{"CAP_SYS_CHROOT", "CAP_SYS_BOOT"}, + Drop: []api.Capability{"CAP_SETUID", "CAP_SETGID"}, + }, + RunAsUser: &nonRootUser, + RunAsNonRoot: &runAsNonRootTrue, + }, + podCtx: &api.PodSecurityContext{ + SupplementalGroups: []int64{1, 2}, + FSGroup: &fsgid, + }, + expect: &appctypes.App{ + Exec: appctypes.Exec{"/bin/bar", "foo"}, + User: "42", + Group: "0", + SupplementaryGIDs: []int{1, 2, 3}, + WorkingDirectory: tmpDir, + Environment: []appctypes.EnvironmentVariable{ + {"env-foo", "bar"}, + {"env-bar", "foo"}, + }, + MountPoints: []appctypes.MountPoint{ + {Name: *appctypes.MustACName("mnt-foo"), Path: "/mnt-foo", ReadOnly: false}, + {Name: *appctypes.MustACName("mnt-bar"), Path: "/mnt-bar", ReadOnly: true}, + }, + Ports: []appctypes.Port{ + {Name: *appctypes.MustACName("port-foo"), Protocol: "TCP", Port: 4242}, + {Name: *appctypes.MustACName("port-bar"), Protocol: "TCP", Port: 1234}, + }, + Isolators: []appctypes.Isolator{ + generateCapRetainIsolator(t, "CAP_SYS_CHROOT", "CAP_SYS_BOOT"), + generateCapRevokeIsolator(t, "CAP_SETUID", "CAP_SETGID"), + generateCPUIsolator(t, "5m", "50m"), + generateMemoryIsolator(t, "5M", "50M"), + }, + }, + }, + + // app should be changed. (env, mounts, ports, are overrided). + { + container: &api.Container{ + Name: "hello-world", + Command: []string{"/bin/hello", "$(env-foo)"}, + Args: []string{"hello", "world", "$(env-bar)"}, + WorkingDir: tmpDir, + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{"cpu": resource.MustParse("50m")}, + Requests: api.ResourceList{"memory": resource.MustParse("5M")}, + }, + }, + opts: &kubecontainer.RunContainerOptions{ + Envs: []kubecontainer.EnvVar{ + {Name: "env-foo", Value: "foo"}, + {Name: "env-bar", Value: "bar"}, + }, + Mounts: []kubecontainer.Mount{ + {Name: "mnt-foo", ContainerPath: "/mnt-bar", ReadOnly: true}, + }, + PortMappings: []kubecontainer.PortMapping{ + {Name: "port-foo", Protocol: api.ProtocolTCP, ContainerPort: 1234}, + }, + }, + ctx: &api.SecurityContext{ + Capabilities: &api.Capabilities{ + Add: []api.Capability{"CAP_SYS_CHROOT", "CAP_SYS_BOOT"}, + Drop: []api.Capability{"CAP_SETUID", "CAP_SETGID"}, + }, + RunAsUser: &nonRootUser, + RunAsNonRoot: &runAsNonRootTrue, + }, + podCtx: &api.PodSecurityContext{ + SupplementalGroups: []int64{1, 2}, + FSGroup: &fsgid, + }, + expect: &appctypes.App{ + Exec: appctypes.Exec{"/bin/hello", "foo", "hello", "world", "bar"}, + User: "42", + Group: "0", + SupplementaryGIDs: []int{1, 2, 3}, + WorkingDirectory: tmpDir, + Environment: []appctypes.EnvironmentVariable{ + {"env-foo", "foo"}, + {"env-bar", "bar"}, + }, + MountPoints: []appctypes.MountPoint{ + {Name: *appctypes.MustACName("mnt-foo"), Path: "/mnt-bar", ReadOnly: true}, + }, + Ports: []appctypes.Port{ + {Name: *appctypes.MustACName("port-foo"), Protocol: "TCP", Port: 1234}, + }, + Isolators: []appctypes.Isolator{ + generateCapRetainIsolator(t, "CAP_SYS_CHROOT", "CAP_SYS_BOOT"), + generateCapRevokeIsolator(t, "CAP_SETUID", "CAP_SETGID"), + generateCPUIsolator(t, "50m", "50m"), + generateMemoryIsolator(t, "5M", "5M"), + }, + }, + }, + } + + for i, tt := range tests { + testCaseHint := fmt.Sprintf("test case #%d", i) + img := baseImageManifest(t) + err := setApp(img, tt.container, tt.opts, tt.ctx, tt.podCtx) + if err == nil && tt.err != nil || err != nil && tt.err == nil { + t.Errorf("%s: expect %v, saw %v", testCaseHint, tt.err, err) + } + if err == nil { + sortAppFields(tt.expect) + sortAppFields(img.App) + assert.Equal(t, tt.expect, img.App, testCaseHint) + } + } +} + +func TestGenerateRunCommand(t *testing.T) { + hostName := "test-hostname" + tests := []struct { + pod *api.Pod + uuid string + netnsName string + + dnsServers []string + dnsSearches []string + hostName string + err error + + expect string + }{ + // Case #0, returns error. + { + &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-name-foo", + }, + Spec: api.PodSpec{}, + }, + "rkt-uuid-foo", + "default", + []string{}, + []string{}, + "", + fmt.Errorf("failed to get cluster dns"), + "", + }, + // Case #1, returns no dns, with private-net. + { + &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-name-foo", + }, + }, + "rkt-uuid-foo", + "default", + []string{}, + []string{}, + "pod-hostname-foo", + nil, + " --net=\"/var/run/netns/default\" -- /bin/rkt/rkt --insecure-options=image,ondisk --local-config=/var/rkt/local/data --dir=/var/data run-prepared --net=host --hostname=pod-hostname-foo rkt-uuid-foo", + }, + // Case #2, returns no dns, with host-net. + { + &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-name-foo", + }, + Spec: api.PodSpec{ + SecurityContext: &api.PodSecurityContext{ + HostNetwork: true, + }, + }, + }, + "rkt-uuid-foo", + "", + []string{}, + []string{}, + "", + nil, + fmt.Sprintf("/bin/rkt/rkt --insecure-options=image,ondisk --local-config=/var/rkt/local/data --dir=/var/data run-prepared --net=host --hostname=%s rkt-uuid-foo", hostName), + }, + // Case #3, returns dns, dns searches, with private-net. + { + &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-name-foo", + }, + Spec: api.PodSpec{ + SecurityContext: &api.PodSecurityContext{ + HostNetwork: false, + }, + }, + }, + "rkt-uuid-foo", + "default", + []string{"127.0.0.1"}, + []string{"."}, + "pod-hostname-foo", + nil, + " --net=\"/var/run/netns/default\" -- /bin/rkt/rkt --insecure-options=image,ondisk --local-config=/var/rkt/local/data --dir=/var/data run-prepared --net=host --dns=127.0.0.1 --dns-search=. --dns-opt=ndots:5 --hostname=pod-hostname-foo rkt-uuid-foo", + }, + // Case #4, returns no dns, dns searches, with host-network. + { + &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-name-foo", + }, + Spec: api.PodSpec{ + SecurityContext: &api.PodSecurityContext{ + HostNetwork: true, + }, + }, + }, + "rkt-uuid-foo", + "", + []string{"127.0.0.1"}, + []string{"."}, + "pod-hostname-foo", + nil, + fmt.Sprintf("/bin/rkt/rkt --insecure-options=image,ondisk --local-config=/var/rkt/local/data --dir=/var/data run-prepared --net=host --hostname=%s rkt-uuid-foo", hostName), + }, + } + + rkt := &Runtime{ + os: &kubetesting.FakeOS{HostName: hostName}, + config: &Config{ + Path: "/bin/rkt/rkt", + Stage1Image: "/bin/rkt/stage1-coreos.aci", + Dir: "/var/data", + InsecureOptions: "image,ondisk", + LocalConfigDir: "/var/rkt/local/data", + }, + } + + for i, tt := range tests { + testCaseHint := fmt.Sprintf("test case #%d", i) + rkt.runtimeHelper = &fakeRuntimeHelper{tt.dnsServers, tt.dnsSearches, tt.hostName, "", tt.err} + rkt.execer = &utilexec.FakeExec{CommandScript: []utilexec.FakeCommandAction{func(cmd string, args ...string) utilexec.Cmd { + return utilexec.InitFakeCmd(&utilexec.FakeCmd{}, cmd, args...) + }}} + + // a command should be created of this form, but the returned command shouldn't be called (asserted by having no expectations on it) + + result, err := rkt.generateRunCommand(tt.pod, tt.uuid, tt.netnsName) + assert.Equal(t, tt.err, err, testCaseHint) + assert.Equal(t, tt.expect, result, testCaseHint) + } +} + +func TestLifeCycleHooks(t *testing.T) { + runner := lifecycle.NewFakeHandlerRunner() + fr := newFakeRktInterface() + fs := newFakeSystemd() + + rkt := &Runtime{ + runner: runner, + apisvc: fr, + systemd: fs, + containerRefManager: kubecontainer.NewRefManager(), + } + + tests := []struct { + pod *api.Pod + runtimePod *kubecontainer.Pod + postStartRuns []string + preStopRuns []string + err error + }{ + { + // Case 0, container without any hooks. + &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-1", + Namespace: "ns-1", + UID: "uid-1", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "container-name-1"}, + }, + }, + }, + &kubecontainer.Pod{ + Containers: []*kubecontainer.Container{ + {ID: kubecontainer.BuildContainerID("rkt", "id-1")}, + }, + }, + []string{}, + []string{}, + nil, + }, + { + // Case 1, containers with post-start and pre-stop hooks. + &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-1", + Namespace: "ns-1", + UID: "uid-1", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "container-name-1", + Lifecycle: &api.Lifecycle{ + PostStart: &api.Handler{ + Exec: &api.ExecAction{}, + }, + }, + }, + { + Name: "container-name-2", + Lifecycle: &api.Lifecycle{ + PostStart: &api.Handler{ + HTTPGet: &api.HTTPGetAction{}, + }, + }, + }, + { + Name: "container-name-3", + Lifecycle: &api.Lifecycle{ + PreStop: &api.Handler{ + Exec: &api.ExecAction{}, + }, + }, + }, + { + Name: "container-name-4", + Lifecycle: &api.Lifecycle{ + PreStop: &api.Handler{ + HTTPGet: &api.HTTPGetAction{}, + }, + }, + }, + }, + }, + }, + &kubecontainer.Pod{ + Containers: []*kubecontainer.Container{ + { + ID: kubecontainer.ParseContainerID("rkt://uuid:container-name-4"), + Name: "container-name-4", + }, + { + ID: kubecontainer.ParseContainerID("rkt://uuid:container-name-3"), + Name: "container-name-3", + }, + { + ID: kubecontainer.ParseContainerID("rkt://uuid:container-name-2"), + Name: "container-name-2", + }, + { + ID: kubecontainer.ParseContainerID("rkt://uuid:container-name-1"), + Name: "container-name-1", + }, + }, + }, + []string{ + "exec on pod: pod-1_ns-1(uid-1), container: container-name-1: rkt://uuid:container-name-1", + "http-get on pod: pod-1_ns-1(uid-1), container: container-name-2: rkt://uuid:container-name-2", + }, + []string{ + "exec on pod: pod-1_ns-1(uid-1), container: container-name-3: rkt://uuid:container-name-3", + "http-get on pod: pod-1_ns-1(uid-1), container: container-name-4: rkt://uuid:container-name-4", + }, + nil, + }, + { + // Case 2, one container with invalid hooks. + &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "pod-1", + Namespace: "ns-1", + UID: "uid-1", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "container-name-1", + Lifecycle: &api.Lifecycle{ + PostStart: &api.Handler{}, + PreStop: &api.Handler{}, + }, + }, + }, + }, + }, + &kubecontainer.Pod{ + Containers: []*kubecontainer.Container{ + { + ID: kubecontainer.ParseContainerID("rkt://uuid:container-name-1"), + Name: "container-name-1", + }, + }, + }, + []string{}, + []string{}, + errors.NewAggregate([]error{fmt.Errorf("Invalid handler: %v", &api.Handler{})}), + }, + } + + for i, tt := range tests { + testCaseHint := fmt.Sprintf("test case #%d", i) + + pod := &rktapi.Pod{Id: "uuid"} + for _, c := range tt.runtimePod.Containers { + pod.Apps = append(pod.Apps, &rktapi.App{ + Name: c.Name, + State: rktapi.AppState_APP_STATE_RUNNING, + }) + } + fr.pods = []*rktapi.Pod{pod} + + // Run post-start hooks + err := rkt.runLifecycleHooks(tt.pod, tt.runtimePod, lifecyclePostStartHook) + assert.Equal(t, tt.err, err, testCaseHint) + + sort.Sort(sortedStringList(tt.postStartRuns)) + sort.Sort(sortedStringList(runner.HandlerRuns)) + + assert.Equal(t, tt.postStartRuns, runner.HandlerRuns, testCaseHint) + + runner.Reset() + + // Run pre-stop hooks. + err = rkt.runLifecycleHooks(tt.pod, tt.runtimePod, lifecyclePreStopHook) + assert.Equal(t, tt.err, err, testCaseHint) + + sort.Sort(sortedStringList(tt.preStopRuns)) + sort.Sort(sortedStringList(runner.HandlerRuns)) + + assert.Equal(t, tt.preStopRuns, runner.HandlerRuns, testCaseHint) + + runner.Reset() + } +} + +func TestImageStats(t *testing.T) { + fr := newFakeRktInterface() + rkt := &Runtime{apisvc: fr} + + fr.images = []*rktapi.Image{ + {Size: 100}, + {Size: 200}, + {Size: 300}, + } + + result, err := rkt.ImageStats() + assert.NoError(t, err) + assert.Equal(t, result, &kubecontainer.ImageStats{TotalStorageBytes: 600}) +} + +func TestGarbageCollect(t *testing.T) { + fr := newFakeRktInterface() + fs := newFakeSystemd() + cli := newFakeRktCli() + fakeOS := kubetesting.NewFakeOS() + getter := newFakePodGetter() + + rkt := &Runtime{ + os: fakeOS, + cli: cli, + apisvc: fr, + podGetter: getter, + systemd: fs, + containerRefManager: kubecontainer.NewRefManager(), + } + + fakeApp := &rktapi.App{Name: "app-foo"} + + tests := []struct { + gcPolicy kubecontainer.ContainerGCPolicy + apiPods []*api.Pod + pods []*rktapi.Pod + serviceFilesOnDisk []string + expectedCommands []string + expectedServiceFiles []string + }{ + // All running pods, should not be gc'd. + // Dead, new pods should not be gc'd. + // Dead, old pods should be gc'd. + // Deleted pods should be gc'd. + // Service files without corresponded pods should be removed. + { + kubecontainer.ContainerGCPolicy{ + MinAge: 0, + MaxContainers: 0, + }, + []*api.Pod{ + {ObjectMeta: api.ObjectMeta{UID: "pod-uid-1"}}, + {ObjectMeta: api.ObjectMeta{UID: "pod-uid-2"}}, + {ObjectMeta: api.ObjectMeta{UID: "pod-uid-3"}}, + {ObjectMeta: api.ObjectMeta{UID: "pod-uid-4"}}, + }, + []*rktapi.Pod{ + { + Id: "deleted-foo", + State: rktapi.PodState_POD_STATE_EXITED, + CreatedAt: time.Now().Add(time.Hour).UnixNano(), + StartedAt: time.Now().Add(time.Hour).UnixNano(), + Apps: []*rktapi.App{fakeApp}, + Annotations: []*rktapi.KeyValue{ + { + Key: types.KubernetesPodUIDLabel, + Value: "pod-uid-0", + }, + }, + }, + { + Id: "running-foo", + State: rktapi.PodState_POD_STATE_RUNNING, + CreatedAt: 0, + StartedAt: 0, + Apps: []*rktapi.App{fakeApp}, + Annotations: []*rktapi.KeyValue{ + { + Key: types.KubernetesPodUIDLabel, + Value: "pod-uid-1", + }, + }, + }, + { + Id: "running-bar", + State: rktapi.PodState_POD_STATE_RUNNING, + CreatedAt: 0, + StartedAt: 0, + Apps: []*rktapi.App{fakeApp}, + Annotations: []*rktapi.KeyValue{ + { + Key: types.KubernetesPodUIDLabel, + Value: "pod-uid-2", + }, + }, + }, + { + Id: "dead-old", + State: rktapi.PodState_POD_STATE_EXITED, + CreatedAt: 0, + StartedAt: 0, + Apps: []*rktapi.App{fakeApp}, + Annotations: []*rktapi.KeyValue{ + { + Key: types.KubernetesPodUIDLabel, + Value: "pod-uid-3", + }, + }, + }, + { + Id: "dead-new", + State: rktapi.PodState_POD_STATE_EXITED, + CreatedAt: time.Now().Add(time.Hour).UnixNano(), + StartedAt: time.Now().Add(time.Hour).UnixNano(), + Apps: []*rktapi.App{fakeApp}, + Annotations: []*rktapi.KeyValue{ + { + Key: types.KubernetesPodUIDLabel, + Value: "pod-uid-4", + }, + }, + }, + }, + []string{"k8s_dead-old.service", "k8s_deleted-foo.service", "k8s_non-existing-bar.service"}, + []string{"rkt rm dead-old", "rkt rm deleted-foo"}, + []string{"/run/systemd/system/k8s_dead-old.service", "/run/systemd/system/k8s_deleted-foo.service", "/run/systemd/system/k8s_non-existing-bar.service"}, + }, + // gcPolicy.MaxContainers should be enforced. + // Oldest ones are removed first. + { + kubecontainer.ContainerGCPolicy{ + MinAge: 0, + MaxContainers: 1, + }, + []*api.Pod{ + {ObjectMeta: api.ObjectMeta{UID: "pod-uid-0"}}, + {ObjectMeta: api.ObjectMeta{UID: "pod-uid-1"}}, + {ObjectMeta: api.ObjectMeta{UID: "pod-uid-2"}}, + }, + []*rktapi.Pod{ + { + Id: "dead-2", + State: rktapi.PodState_POD_STATE_EXITED, + CreatedAt: 2, + StartedAt: 2, + Apps: []*rktapi.App{fakeApp}, + Annotations: []*rktapi.KeyValue{ + { + Key: types.KubernetesPodUIDLabel, + Value: "pod-uid-2", + }, + }, + }, + { + Id: "dead-1", + State: rktapi.PodState_POD_STATE_EXITED, + CreatedAt: 1, + StartedAt: 1, + Apps: []*rktapi.App{fakeApp}, + Annotations: []*rktapi.KeyValue{ + { + Key: types.KubernetesPodUIDLabel, + Value: "pod-uid-1", + }, + }, + }, + { + Id: "dead-0", + State: rktapi.PodState_POD_STATE_EXITED, + CreatedAt: 0, + StartedAt: 0, + Apps: []*rktapi.App{fakeApp}, + Annotations: []*rktapi.KeyValue{ + { + Key: types.KubernetesPodUIDLabel, + Value: "pod-uid-0", + }, + }, + }, + }, + []string{"k8s_dead-0.service", "k8s_dead-1.service", "k8s_dead-2.service"}, + []string{"rkt rm dead-0", "rkt rm dead-1"}, + []string{"/run/systemd/system/k8s_dead-0.service", "/run/systemd/system/k8s_dead-1.service"}, + }, + } + + for i, tt := range tests { + testCaseHint := fmt.Sprintf("test case #%d", i) + + ctrl := gomock.NewController(t) + + fakeOS.ReadDirFn = func(dirname string) ([]os.FileInfo, error) { + serviceFileNames := tt.serviceFilesOnDisk + var fileInfos []os.FileInfo + + for _, name := range serviceFileNames { + mockFI := mock_os.NewMockFileInfo(ctrl) + mockFI.EXPECT().Name().Return(name) + fileInfos = append(fileInfos, mockFI) + } + return fileInfos, nil + } + + fr.pods = tt.pods + for _, p := range tt.apiPods { + getter.pods[p.UID] = p + } + + err := rkt.GarbageCollect(tt.gcPolicy) + assert.NoError(t, err, testCaseHint) + + sort.Sort(sortedStringList(tt.expectedCommands)) + sort.Sort(sortedStringList(cli.cmds)) + + assert.Equal(t, tt.expectedCommands, cli.cmds, testCaseHint) + + sort.Sort(sortedStringList(tt.expectedServiceFiles)) + sort.Sort(sortedStringList(fakeOS.Removes)) + + assert.Equal(t, tt.expectedServiceFiles, fakeOS.Removes, testCaseHint) + + // Cleanup after each test. + cli.Reset() + ctrl.Finish() + fakeOS.Removes = []string{} + getter.pods = make(map[kubetypes.UID]*api.Pod) + } +} + +type annotationsByName []appctypes.Annotation + +func (a annotationsByName) Len() int { return len(a) } +func (a annotationsByName) Less(x, y int) bool { return a[x].Name < a[y].Name } +func (a annotationsByName) Swap(x, y int) { a[x], a[y] = a[y], a[x] } + +func TestMakePodManifestAnnotations(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + fr := newFakeRktInterface() + fs := newFakeSystemd() + r := &Runtime{apisvc: fr, systemd: fs} + + testCases := []struct { + in *api.Pod + out *appcschema.PodManifest + outerr error + }{ + { + in: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "uid-1", + Name: "name-1", + Namespace: "namespace-1", + Annotations: map[string]string{ + k8sRktStage1NameAnno: "stage1-override-img", + }, + }, + }, + out: &appcschema.PodManifest{ + Annotations: []appctypes.Annotation{ + { + Name: appctypes.ACIdentifier(k8sRktStage1NameAnno), + Value: "stage1-override-img", + }, + { + Name: appctypes.ACIdentifier(types.KubernetesPodUIDLabel), + Value: "uid-1", + }, + { + Name: appctypes.ACIdentifier(types.KubernetesPodNameLabel), + Value: "name-1", + }, + { + Name: appctypes.ACIdentifier(k8sRktKubeletAnno), + Value: "true", + }, + { + Name: appctypes.ACIdentifier(types.KubernetesPodNamespaceLabel), + Value: "namespace-1", + }, + { + Name: appctypes.ACIdentifier(k8sRktRestartCountAnno), + Value: "0", + }, + }, + }, + }, + } + + for i, testCase := range testCases { + hint := fmt.Sprintf("case #%d", i) + + result, err := r.makePodManifest(testCase.in, "", []api.Secret{}) + assert.Equal(t, err, testCase.outerr, hint) + if err == nil { + sort.Sort(annotationsByName(result.Annotations)) + sort.Sort(annotationsByName(testCase.out.Annotations)) + assert.Equal(t, result.Annotations, testCase.out.Annotations, hint) + } + } +} + +func TestPreparePodArgs(t *testing.T) { + r := &Runtime{ + config: &Config{}, + } + + testCases := []struct { + manifest appcschema.PodManifest + stage1Config string + cmd []string + }{ + { + appcschema.PodManifest{ + Annotations: appctypes.Annotations{ + { + Name: k8sRktStage1NameAnno, + Value: "stage1-image", + }, + }, + }, + "", + []string{"prepare", "--quiet", "--pod-manifest", "file", "--stage1-name=stage1-image"}, + }, + { + appcschema.PodManifest{ + Annotations: appctypes.Annotations{ + { + Name: k8sRktStage1NameAnno, + Value: "stage1-image", + }, + }, + }, + "stage1-path", + []string{"prepare", "--quiet", "--pod-manifest", "file", "--stage1-name=stage1-image"}, + }, + { + appcschema.PodManifest{ + Annotations: appctypes.Annotations{}, + }, + "stage1-path", + []string{"prepare", "--quiet", "--pod-manifest", "file", "--stage1-path=stage1-path"}, + }, + { + appcschema.PodManifest{ + Annotations: appctypes.Annotations{}, + }, + "", + []string{"prepare", "--quiet", "--pod-manifest", "file"}, + }, + } + + for i, testCase := range testCases { + r.config.Stage1Image = testCase.stage1Config + cmd := r.preparePodArgs(&testCase.manifest, "file") + assert.Equal(t, testCase.cmd, cmd, fmt.Sprintf("Test case #%d", i)) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/systemd.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/systemd.go new file mode 100644 index 000000000000..7151cfdce604 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/systemd.go @@ -0,0 +1,110 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rkt + +import ( + "fmt" + "os/exec" + "strconv" + "strings" + + "github.com/coreos/go-systemd/dbus" +) + +// systemdVersion is a type wraps the int to implement kubecontainer.Version interface. +type systemdVersion int + +func (s systemdVersion) String() string { + return fmt.Sprintf("%d", s) +} + +func (s systemdVersion) Compare(other string) (int, error) { + v, err := strconv.Atoi(other) + if err != nil { + return -1, err + } + if int(s) < v { + return -1, nil + } else if int(s) > v { + return 1, nil + } + return 0, nil +} + +// systemdInterface is an abstraction of the go-systemd/dbus to make +// it mockable for testing. +// TODO(yifan): Eventually we should move these functionalities to: +// 1. a package for launching/stopping rkt pods. +// 2. rkt api-service interface for listing pods. +// See https://github.com/coreos/rkt/issues/1769. +type systemdInterface interface { + // Version returns the version of the systemd. + Version() (systemdVersion, error) + // ListUnits lists all the loaded units. + ListUnits() ([]dbus.UnitStatus, error) + // StopUnits stops the unit with the given name. + StopUnit(name string, mode string, ch chan<- string) (int, error) + // StopUnits restarts the unit with the given name. + RestartUnit(name string, mode string, ch chan<- string) (int, error) + // Reload is equivalent to 'systemctl daemon-reload'. + Reload() error + // ResetFailed is equivalent to 'systemctl reset-failed'. + ResetFailed() error +} + +// systemd implements the systemdInterface using dbus and systemctl. +// All the functions other then Version() are already implemented by go-systemd/dbus. +type systemd struct { + *dbus.Conn +} + +// newSystemd creates a systemd object that implements systemdInterface. +func newSystemd() (*systemd, error) { + dbusConn, err := dbus.New() + if err != nil { + return nil, err + } + return &systemd{dbusConn}, nil +} + +// Version returns the version of the systemd. +func (s *systemd) Version() (systemdVersion, error) { + output, err := exec.Command("systemctl", "--version").Output() + if err != nil { + return -1, err + } + // Example output of 'systemctl --version': + // + // systemd 215 + // +PAM +AUDIT +SELINUX +IMA +SYSVINIT +LIBCRYPTSETUP +GCRYPT +ACL +XZ -SECCOMP -APPARMOR + // + lines := strings.Split(string(output), "\n") + tuples := strings.Split(lines[0], " ") + if len(tuples) != 2 { + return -1, fmt.Errorf("rkt: Failed to parse version %v", lines) + } + result, err := strconv.Atoi(string(tuples[1])) + if err != nil { + return -1, err + } + return systemdVersion(result), nil +} + +// ResetFailed calls 'systemctl reset failed' +func (s *systemd) ResetFailed() error { + return exec.Command("systemctl", "reset-failed").Run() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/version.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/version.go new file mode 100644 index 000000000000..32b66b29bcdb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/rkt/version.go @@ -0,0 +1,158 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rkt + +import ( + "fmt" + "sync" + + "github.com/coreos/go-semver/semver" + rktapi "github.com/coreos/rkt/api/v1alpha" + "github.com/golang/glog" + "golang.org/x/net/context" +) + +type versions struct { + sync.RWMutex + binVersion rktVersion + apiVersion rktVersion + appcVersion rktVersion + systemdVersion systemdVersion +} + +// rktVersion implementes kubecontainer.Version interface by implementing +// Compare() and String() (which is implemented by the underlying semver.Version) +type rktVersion struct { + *semver.Version +} + +func newRktVersion(version string) (rktVersion, error) { + sem, err := semver.NewVersion(version) + if err != nil { + return rktVersion{}, err + } + return rktVersion{sem}, nil +} + +func (r rktVersion) Compare(other string) (int, error) { + v, err := semver.NewVersion(other) + if err != nil { + return -1, err + } + + if r.LessThan(*v) { + return -1, nil + } + if v.LessThan(*r.Version) { + return 1, nil + } + return 0, nil +} + +func (r *Runtime) getVersions() error { + r.versions.Lock() + defer r.versions.Unlock() + + // Get systemd version. + var err error + r.versions.systemdVersion, err = r.systemd.Version() + if err != nil { + return err + } + + // Example for the version strings returned by GetInfo(): + // RktVersion:"0.10.0+gitb7349b1" AppcVersion:"0.7.1" ApiVersion:"1.0.0-alpha" + resp, err := r.apisvc.GetInfo(context.Background(), &rktapi.GetInfoRequest{}) + if err != nil { + return err + } + + // Get rkt binary version. + r.versions.binVersion, err = newRktVersion(resp.Info.RktVersion) + if err != nil { + return err + } + + // Get Appc version. + r.versions.appcVersion, err = newRktVersion(resp.Info.AppcVersion) + if err != nil { + return err + } + + // Get rkt API version. + r.versions.apiVersion, err = newRktVersion(resp.Info.ApiVersion) + if err != nil { + return err + } + return nil +} + +// checkVersion tests whether the rkt/systemd/rkt-api-service that meet the version requirement. +// If all version requirements are met, it returns nil. +func (r *Runtime) checkVersion(minimumRktBinVersion, recommendedRktBinVersion, minimumAppcVersion, minimumRktApiVersion, minimumSystemdVersion string) error { + if err := r.getVersions(); err != nil { + return err + } + + r.versions.RLock() + defer r.versions.RUnlock() + + // Check systemd version. + result, err := r.versions.systemdVersion.Compare(minimumSystemdVersion) + if err != nil { + return err + } + if result < 0 { + return fmt.Errorf("rkt: systemd version(%v) is too old, requires at least %v", r.versions.systemdVersion, minimumSystemdVersion) + } + + // Check rkt binary version. + result, err = r.versions.binVersion.Compare(minimumRktBinVersion) + if err != nil { + return err + } + if result < 0 { + return fmt.Errorf("rkt: binary version is too old(%v), requires at least %v", r.versions.binVersion, minimumRktBinVersion) + } + result, err = r.versions.binVersion.Compare(recommendedRktBinVersion) + if err != nil { + return err + } + if result != 0 { + // TODO(yifan): Record an event to expose the information. + glog.Warningf("rkt: current binary version %q is not recommended (recommended version %q)", r.versions.binVersion, recommendedRktBinVersion) + } + + // Check Appc version. + result, err = r.versions.appcVersion.Compare(minimumAppcVersion) + if err != nil { + return err + } + if result < 0 { + return fmt.Errorf("rkt: appc version is too old(%v), requires at least %v", r.versions.appcVersion, minimumAppcVersion) + } + + // Check rkt API version. + result, err = r.versions.apiVersion.Compare(minimumRktApiVersion) + if err != nil { + return err + } + if result < 0 { + return fmt.Errorf("rkt: API version is too old(%v), requires at least %v", r.versions.apiVersion, minimumRktApiVersion) + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/root_context_linux.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/root_context_linux.go new file mode 100644 index 000000000000..a694d71ba119 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/root_context_linux.go @@ -0,0 +1,35 @@ +// +build linux + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "github.com/opencontainers/runc/libcontainer/selinux" +) + +// getRootDirContext gets the SELinux context of the kubelet rootDir +// or returns an error. +func (kl *Kubelet) getRootDirContext() (string, error) { + // If SELinux is not enabled, return an empty string + if !selinux.SelinuxEnabled() { + return "", nil + } + + // Get the SELinux context of the rootDir. + return selinux.Getfilecon(kl.getRootDir()) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/root_context_unsupported.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/root_context_unsupported.go new file mode 100644 index 000000000000..826ac34f0489 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/root_context_unsupported.go @@ -0,0 +1,24 @@ +// +build !linux + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +func (kl *Kubelet) getRootDirContext() (string, error) { + // For now, just return a blank security context on unsupported platforms + return "", nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/runonce.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/runonce.go new file mode 100644 index 000000000000..bf7ee7fcc57e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/runonce.go @@ -0,0 +1,155 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "os" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util/format" +) + +const ( + runOnceManifestDelay = 1 * time.Second + runOnceMaxRetries = 10 + runOnceRetryDelay = 1 * time.Second + runOnceRetryDelayBackoff = 2 +) + +type RunPodResult struct { + Pod *api.Pod + Err error +} + +// RunOnce polls from one configuration update and run the associated pods. +func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult, error) { + // Setup filesystem directories. + if err := kl.setupDataDirs(); err != nil { + return nil, err + } + + // If the container logs directory does not exist, create it. + if _, err := os.Stat(containerLogsDir); err != nil { + if err := kl.os.MkdirAll(containerLogsDir, 0755); err != nil { + glog.Errorf("Failed to create directory %q: %v", containerLogsDir, err) + } + } + + select { + case u := <-updates: + glog.Infof("processing manifest with %d pods", len(u.Pods)) + result, err := kl.runOnce(u.Pods, runOnceRetryDelay) + glog.Infof("finished processing %d pods", len(u.Pods)) + return result, err + case <-time.After(runOnceManifestDelay): + return nil, fmt.Errorf("no pod manifest update after %v", runOnceManifestDelay) + } +} + +// runOnce runs a given set of pods and returns their status. +func (kl *Kubelet) runOnce(pods []*api.Pod, retryDelay time.Duration) (results []RunPodResult, err error) { + ch := make(chan RunPodResult) + admitted := []*api.Pod{} + for _, pod := range pods { + // Check if we can admit the pod. + if ok, reason, message := kl.canAdmitPod(append(admitted, pod), pod); !ok { + kl.rejectPod(pod, reason, message) + } else { + admitted = append(admitted, pod) + } + go func(pod *api.Pod) { + err := kl.runPod(pod, retryDelay) + ch <- RunPodResult{pod, err} + }(pod) + } + + glog.Infof("waiting for %d pods", len(pods)) + failedPods := []string{} + for i := 0; i < len(pods); i++ { + res := <-ch + results = append(results, res) + if res.Err != nil { + // TODO(proppy): report which containers failed the pod. + glog.Infof("failed to start pod %q: %v", format.Pod(res.Pod), res.Err) + failedPods = append(failedPods, format.Pod(res.Pod)) + } else { + glog.Infof("started pod %q", format.Pod(res.Pod)) + } + } + if len(failedPods) > 0 { + return results, fmt.Errorf("error running pods: %v", failedPods) + } + glog.Infof("%d pods started", len(pods)) + return results, err +} + +// runPod runs a single pod and wait until all containers are running. +func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error { + delay := retryDelay + retry := 0 + for { + status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace) + if err != nil { + return fmt.Errorf("Unable to get status for pod %q: %v", format.Pod(pod), err) + } + + if kl.isPodRunning(pod, status) { + glog.Infof("pod %q containers running", format.Pod(pod)) + return nil + } + glog.Infof("pod %q containers not running: syncing", format.Pod(pod)) + + glog.Infof("Creating a mirror pod for static pod %q", format.Pod(pod)) + if err := kl.podManager.CreateMirrorPod(pod); err != nil { + glog.Errorf("Failed creating a mirror pod %q: %v", format.Pod(pod), err) + } + mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) + if err = kl.syncPod(syncPodOptions{ + pod: pod, + mirrorPod: mirrorPod, + podStatus: status, + updateType: kubetypes.SyncPodUpdate, + }); err != nil { + return fmt.Errorf("error syncing pod %q: %v", format.Pod(pod), err) + } + if retry >= runOnceMaxRetries { + return fmt.Errorf("timeout error: pod %q containers not running after %d retries", format.Pod(pod), runOnceMaxRetries) + } + // TODO(proppy): health checking would be better than waiting + checking the state at the next iteration. + glog.Infof("pod %q containers synced, waiting for %v", format.Pod(pod), delay) + time.Sleep(delay) + retry++ + delay *= runOnceRetryDelayBackoff + } +} + +// isPodRunning returns true if all containers of a manifest are running. +func (kl *Kubelet) isPodRunning(pod *api.Pod, status *kubecontainer.PodStatus) bool { + for _, c := range pod.Spec.Containers { + cs := status.FindContainerStatusByName(c.Name) + if cs == nil || cs.State != kubecontainer.ContainerStateRunning { + glog.Infof("Container %q for pod %q not running", c.Name, format.Pod(pod)) + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/runonce_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/runonce_test.go new file mode 100644 index 000000000000..e84e1e823ef8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/runonce_test.go @@ -0,0 +1,149 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "os" + "testing" + "time" + + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/record" + cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" + "k8s.io/kubernetes/pkg/kubelet/cm" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" + "k8s.io/kubernetes/pkg/kubelet/eviction" + "k8s.io/kubernetes/pkg/kubelet/network" + nettest "k8s.io/kubernetes/pkg/kubelet/network/testing" + kubepod "k8s.io/kubernetes/pkg/kubelet/pod" + podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing" + "k8s.io/kubernetes/pkg/kubelet/server/stats" + "k8s.io/kubernetes/pkg/kubelet/status" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" + utiltesting "k8s.io/kubernetes/pkg/util/testing" +) + +func TestRunOnce(t *testing.T) { + cadvisor := &cadvisortest.Mock{} + cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil) + cadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ + Usage: 400 * mb, + Capacity: 1000 * mb, + Available: 600 * mb, + }, nil) + cadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ + Usage: 9 * mb, + Capacity: 10 * mb, + }, nil) + podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient()) + diskSpaceManager, _ := newDiskSpaceManager(cadvisor, DiskSpacePolicy{}) + fakeRuntime := &containertest.FakeRuntime{} + basePath, err := utiltesting.MkTmpdir("kubelet") + if err != nil { + t.Fatalf("can't make a temp rootdir %v", err) + } + defer os.RemoveAll(basePath) + kb := &Kubelet{ + rootDirectory: basePath, + recorder: &record.FakeRecorder{}, + cadvisor: cadvisor, + nodeLister: testNodeLister{}, + nodeInfo: testNodeInfo{}, + statusManager: status.NewManager(nil, podManager), + containerRefManager: kubecontainer.NewRefManager(), + podManager: podManager, + os: &containertest.FakeOS{}, + volumeManager: newVolumeManager(), + diskSpaceManager: diskSpaceManager, + containerRuntime: fakeRuntime, + reasonCache: NewReasonCache(), + clock: util.RealClock{}, + kubeClient: &fake.Clientset{}, + hostname: testKubeletHostname, + nodeName: testKubeletHostname, + } + kb.containerManager = cm.NewStubContainerManager() + + kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone) + // TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency + volumeStatsAggPeriod := time.Second * 10 + kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime) + nodeRef := &api.ObjectReference{ + Kind: "Node", + Name: kb.nodeName, + UID: types.UID(kb.nodeName), + Namespace: "", + } + fakeKillPodFunc := func(pod *api.Pod, podStatus api.PodStatus, gracePeriodOverride *int64) error { + return nil + } + evictionManager, evictionAdmitHandler, err := eviction.NewManager(kb.resourceAnalyzer, eviction.Config{}, fakeKillPodFunc, kb.recorder, nodeRef, kb.clock) + if err != nil { + t.Fatalf("failed to initialize eviction manager: %v", err) + } + kb.evictionManager = evictionManager + kb.AddPodAdmitHandler(evictionAdmitHandler) + if err := kb.setupDataDirs(); err != nil { + t.Errorf("Failed to init data dirs: %v", err) + } + + pods := []*api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar"}, + }, + }, + }, + } + podManager.SetPods(pods) + // The original test here is totally meaningless, because fakeruntime will always return an empty podStatus. While + // the originial logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass. + // Now the logic in isPodRunning is changed, to let the test pass, we set the podstatus directly in fake runtime. + // This is also a meaningless test, because the isPodRunning will also always return true after setting this. However, + // because runonce is never used in kubernetes now, we should deprioritize the cleanup work. + // TODO(random-liu) Fix the test, make it meaningful. + fakeRuntime.PodStatus = kubecontainer.PodStatus{ + ContainerStatuses: []*kubecontainer.ContainerStatus{ + { + Name: "bar", + State: kubecontainer.ContainerStateRunning, + }, + }, + } + results, err := kb.runOnce(pods, time.Millisecond) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if results[0].Err != nil { + t.Errorf("unexpected run pod error: %v", results[0].Err) + } + if results[0].Pod.Name != "foo" { + t.Errorf("unexpected pod: %q", results[0].Pod.Name) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/runtime.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/runtime.go new file mode 100644 index 000000000000..63dd0136363f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/runtime.go @@ -0,0 +1,99 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "sync" + "time" +) + +type runtimeState struct { + sync.Mutex + lastBaseRuntimeSync time.Time + baseRuntimeSyncThreshold time.Duration + networkError error + internalError error + cidr string + initError error +} + +func (s *runtimeState) setRuntimeSync(t time.Time) { + s.Lock() + defer s.Unlock() + s.lastBaseRuntimeSync = t +} + +func (s *runtimeState) setInternalError(err error) { + s.Lock() + defer s.Unlock() + s.internalError = err +} + +func (s *runtimeState) setNetworkState(err error) { + s.Lock() + defer s.Unlock() + s.networkError = err +} + +func (s *runtimeState) setPodCIDR(cidr string) { + s.Lock() + defer s.Unlock() + s.cidr = cidr +} + +func (s *runtimeState) podCIDR() string { + s.Lock() + defer s.Unlock() + return s.cidr +} + +func (s *runtimeState) setInitError(err error) { + s.Lock() + defer s.Unlock() + s.initError = err +} + +func (s *runtimeState) errors() []string { + s.Lock() + defer s.Unlock() + var ret []string + if s.initError != nil { + ret = append(ret, s.initError.Error()) + } + if s.networkError != nil { + ret = append(ret, s.networkError.Error()) + } + if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) { + ret = append(ret, "container runtime is down") + } + if s.internalError != nil { + ret = append(ret, s.internalError.Error()) + } + return ret +} + +func newRuntimeState( + runtimeSyncThreshold time.Duration, +) *runtimeState { + return &runtimeState{ + lastBaseRuntimeSync: time.Time{}, + baseRuntimeSyncThreshold: runtimeSyncThreshold, + networkError: fmt.Errorf("network state unknown"), + internalError: nil, + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/auth.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/auth.go new file mode 100644 index 000000000000..0ab25512c049 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/auth.go @@ -0,0 +1,37 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "k8s.io/kubernetes/pkg/auth/authenticator" + "k8s.io/kubernetes/pkg/auth/authorizer" +) + +// KubeletAuth implements AuthInterface +type KubeletAuth struct { + // authenticator identifies the user for requests to the Kubelet API + authenticator.Request + // authorizerAttributeGetter builds authorization.Attributes for a request to the Kubelet API + authorizer.RequestAttributesGetter + // authorizer determines whether a given authorization.Attributes is allowed + authorizer.Authorizer +} + +// NewKubeletAuth returns a kubelet.AuthInterface composed of the given authenticator, attribute getter, and authorizer +func NewKubeletAuth(authenticator authenticator.Request, authorizerAttributeGetter authorizer.RequestAttributesGetter, authorizer authorizer.Authorizer) AuthInterface { + return &KubeletAuth{authenticator, authorizerAttributeGetter, authorizer} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/doc.go new file mode 100644 index 000000000000..edb357a8e1ae --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package server contains functions related to serving Kubelet's external interface. +package server diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/portforward/constants.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/portforward/constants.go new file mode 100644 index 000000000000..f438670675fe --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/portforward/constants.go @@ -0,0 +1,21 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package portforward contains server-side logic for handling port forwarding requests. +package portforward + +// The subprotocol "portforward.k8s.io" is used for port forwarding. +const PortForwardProtocolV1Name = "portforward.k8s.io" diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/attach.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/attach.go new file mode 100644 index 000000000000..0f9ba7ff5e30 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/attach.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "errors" + "fmt" + "io" + "net/http" + "time" + + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/runtime" +) + +// Attacher knows how to attach to a running container in a pod. +type Attacher interface { + // AttachContainer attaches to the running container in the pod, copying data between in/out/err + // and the container's stdin/stdout/stderr. + AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool) error +} + +// ServeAttach handles requests to attach to a container. After creating/receiving the required +// streams, it delegates the actual attaching to attacher. +func ServeAttach(w http.ResponseWriter, req *http.Request, attacher Attacher, podName string, uid types.UID, container string, idleTimeout, streamCreationTimeout time.Duration, supportedProtocols []string) { + ctx, ok := createStreams(req, w, supportedProtocols, idleTimeout, streamCreationTimeout) + if !ok { + // error is handled by createStreams + return + } + defer ctx.conn.Close() + + err := attacher.AttachContainer(podName, uid, container, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty) + if err != nil { + msg := fmt.Sprintf("error attaching to container: %v", err) + runtime.HandleError(errors.New(msg)) + fmt.Fprint(ctx.errorStream, msg) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/contants.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/contants.go new file mode 100644 index 000000000000..f45cc644032a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/contants.go @@ -0,0 +1,36 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import "time" + +const ( + DefaultStreamCreationTimeout = 30 * time.Second + + // The SPDY subprotocol "channel.k8s.io" is used for remote command + // attachment/execution. This represents the initial unversioned subprotocol, + // which has the known bugs http://issues.k8s.io/13394 and + // http://issues.k8s.io/13395. + StreamProtocolV1Name = "channel.k8s.io" + + // The SPDY subprotocol "v2.channel.k8s.io" is used for remote command + // attachment/execution. It is the second version of the subprotocol and + // resolves the issues present in the first version. + StreamProtocolV2Name = "v2.channel.k8s.io" +) + +var SupportedStreamingProtocols = []string{StreamProtocolV2Name, StreamProtocolV1Name} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/doc.go new file mode 100644 index 000000000000..482e9afc1f62 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package remotecommand contains functions related to executing commands in and attaching to pods. +package remotecommand diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/exec.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/exec.go new file mode 100644 index 000000000000..df9a4b5854e0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/exec.go @@ -0,0 +1,57 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "errors" + "fmt" + "io" + "net/http" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/runtime" +) + +// Executor knows how to execute a command in a container in a pod. +type Executor interface { + // ExecInContainer executes a command in a container in the pod, copying data + // between in/out/err and the container's stdin/stdout/stderr. + ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error +} + +// ServeExec handles requests to execute a command in a container. After +// creating/receiving the required streams, it delegates the actual execution +// to the executor. +func ServeExec(w http.ResponseWriter, req *http.Request, executor Executor, podName string, uid types.UID, container string, idleTimeout, streamCreationTimeout time.Duration, supportedProtocols []string) { + ctx, ok := createStreams(req, w, supportedProtocols, idleTimeout, streamCreationTimeout) + if !ok { + // error is handled by createStreams + return + } + defer ctx.conn.Close() + + cmd := req.URL.Query()[api.ExecCommandParamm] + + err := executor.ExecInContainer(podName, uid, container, cmd, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty) + if err != nil { + msg := fmt.Sprintf("error executing command in container: %v", err) + runtime.HandleError(errors.New(msg)) + fmt.Fprint(ctx.errorStream, msg) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go new file mode 100644 index 000000000000..4b0c588e9fa7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go @@ -0,0 +1,277 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "errors" + "fmt" + "io" + "net/http" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/httpstream" + "k8s.io/kubernetes/pkg/util/httpstream/spdy" + "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/wsstream" + + "github.com/golang/glog" +) + +// options contains details about which streams are required for +// remote command execution. +type options struct { + stdin bool + stdout bool + stderr bool + tty bool + expectedStreams int +} + +// newOptions creates a new options from the Request. +func newOptions(req *http.Request) (*options, error) { + tty := req.FormValue(api.ExecTTYParam) == "1" + stdin := req.FormValue(api.ExecStdinParam) == "1" + stdout := req.FormValue(api.ExecStdoutParam) == "1" + stderr := req.FormValue(api.ExecStderrParam) == "1" + if tty && stderr { + // TODO: make this an error before we reach this method + glog.V(4).Infof("Access to exec with tty and stderr is not supported, bypassing stderr") + stderr = false + } + + // count the streams client asked for, starting with 1 + expectedStreams := 1 + if stdin { + expectedStreams++ + } + if stdout { + expectedStreams++ + } + if stderr { + expectedStreams++ + } + + if expectedStreams == 1 { + return nil, fmt.Errorf("you must specify at least 1 of stdin, stdout, stderr") + } + + return &options{ + stdin: stdin, + stdout: stdout, + stderr: stderr, + tty: tty, + expectedStreams: expectedStreams, + }, nil +} + +// context contains the connection and streams used when +// forwarding an attach or execute session into a container. +type context struct { + conn io.Closer + stdinStream io.ReadCloser + stdoutStream io.WriteCloser + stderrStream io.WriteCloser + errorStream io.WriteCloser + tty bool +} + +// streamAndReply holds both a Stream and a channel that is closed when the stream's reply frame is +// enqueued. Consumers can wait for replySent to be closed prior to proceeding, to ensure that the +// replyFrame is enqueued before the connection's goaway frame is sent (e.g. if a stream was +// received and right after, the connection gets closed). +type streamAndReply struct { + httpstream.Stream + replySent <-chan struct{} +} + +// waitStreamReply waits until either replySent or stop is closed. If replySent is closed, it sends +// an empty struct to the notify channel. +func waitStreamReply(replySent <-chan struct{}, notify chan<- struct{}, stop <-chan struct{}) { + select { + case <-replySent: + notify <- struct{}{} + case <-stop: + } +} + +func createStreams(req *http.Request, w http.ResponseWriter, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*context, bool) { + opts, err := newOptions(req) + if err != nil { + runtime.HandleError(err) + w.WriteHeader(http.StatusBadRequest) + fmt.Fprint(w, err.Error()) + return nil, false + } + + if wsstream.IsWebSocketRequest(req) { + return createWebSocketStreams(req, w, opts, idleTimeout) + } + + protocol, err := httpstream.Handshake(req, w, supportedStreamProtocols) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprint(w, err.Error()) + return nil, false + } + + streamCh := make(chan streamAndReply) + + upgrader := spdy.NewResponseUpgrader() + conn := upgrader.UpgradeResponse(w, req, func(stream httpstream.Stream, replySent <-chan struct{}) error { + streamCh <- streamAndReply{Stream: stream, replySent: replySent} + return nil + }) + // from this point on, we can no longer call methods on response + if conn == nil { + // The upgrader is responsible for notifying the client of any errors that + // occurred during upgrading. All we can do is return here at this point + // if we weren't successful in upgrading. + return nil, false + } + + conn.SetIdleTimeout(idleTimeout) + + var handler protocolHandler + switch protocol { + case StreamProtocolV2Name: + handler = &v2ProtocolHandler{} + case "": + glog.V(4).Infof("Client did not request protocol negotiaion. Falling back to %q", StreamProtocolV1Name) + fallthrough + case StreamProtocolV1Name: + handler = &v1ProtocolHandler{} + } + + expired := time.NewTimer(streamCreationTimeout) + + ctx, err := handler.waitForStreams(streamCh, opts.expectedStreams, expired.C) + if err != nil { + runtime.HandleError(err) + return nil, false + } + + ctx.conn = conn + ctx.tty = opts.tty + return ctx, true +} + +type protocolHandler interface { + // waitForStreams waits for the expected streams or a timeout, returning a + // remoteCommandContext if all the streams were received, or an error if not. + waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) +} + +// v2ProtocolHandler implements the V2 protocol version for streaming command execution. +type v2ProtocolHandler struct{} + +func (*v2ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { + ctx := &context{} + receivedStreams := 0 + replyChan := make(chan struct{}) + stop := make(chan struct{}) + defer close(stop) +WaitForStreams: + for { + select { + case stream := <-streams: + streamType := stream.Headers().Get(api.StreamType) + switch streamType { + case api.StreamTypeError: + ctx.errorStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStdin: + ctx.stdinStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStdout: + ctx.stdoutStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStderr: + ctx.stderrStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + default: + runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType)) + } + case <-replyChan: + receivedStreams++ + if receivedStreams == expectedStreams { + break WaitForStreams + } + case <-expired: + // TODO find a way to return the error to the user. Maybe use a separate + // stream to report errors? + return nil, errors.New("timed out waiting for client to create streams") + } + } + + return ctx, nil +} + +// v1ProtocolHandler implements the V1 protocol version for streaming command execution. +type v1ProtocolHandler struct{} + +func (*v1ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { + ctx := &context{} + receivedStreams := 0 + replyChan := make(chan struct{}) + stop := make(chan struct{}) + defer close(stop) +WaitForStreams: + for { + select { + case stream := <-streams: + streamType := stream.Headers().Get(api.StreamType) + switch streamType { + case api.StreamTypeError: + ctx.errorStream = stream + + // This defer statement shouldn't be here, but due to previous refactoring, it ended up in + // here. This is what 1.0.x kubelets do, so we're retaining that behavior. This is fixed in + // the v2ProtocolHandler. + defer stream.Reset() + + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStdin: + ctx.stdinStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStdout: + ctx.stdoutStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + case api.StreamTypeStderr: + ctx.stderrStream = stream + go waitStreamReply(stream.replySent, replyChan, stop) + default: + runtime.HandleError(fmt.Errorf("Unexpected stream type: %q", streamType)) + } + case <-replyChan: + receivedStreams++ + if receivedStreams == expectedStreams { + break WaitForStreams + } + case <-expired: + // TODO find a way to return the error to the user. Maybe use a separate + // stream to report errors? + return nil, errors.New("timed out waiting for client to create streams") + } + } + + if ctx.stdinStream != nil { + ctx.stdinStream.Close() + } + + return ctx, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/websocket.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/websocket.go new file mode 100644 index 000000000000..06a84c8e7d9e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/websocket.go @@ -0,0 +1,77 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "net/http" + "time" + + "k8s.io/kubernetes/pkg/httplog" + "k8s.io/kubernetes/pkg/util/wsstream" + + "github.com/golang/glog" +) + +// standardShellChannels returns the standard channel types for a shell connection (STDIN 0, STDOUT 1, STDERR 2) +// along with the approximate duplex value. Supported subprotocols are "channel.k8s.io" and +// "base64.channel.k8s.io". +func standardShellChannels(stdin, stdout, stderr bool) []wsstream.ChannelType { + // open three half-duplex channels + channels := []wsstream.ChannelType{wsstream.ReadChannel, wsstream.WriteChannel, wsstream.WriteChannel} + if !stdin { + channels[0] = wsstream.IgnoreChannel + } + if !stdout { + channels[1] = wsstream.IgnoreChannel + } + if !stderr { + channels[2] = wsstream.IgnoreChannel + } + return channels +} + +// createWebSocketStreams returns a remoteCommandContext containing the websocket connection and +// streams needed to perform an exec or an attach. +func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *options, idleTimeout time.Duration) (*context, bool) { + // open the requested channels, and always open the error channel + channels := append(standardShellChannels(opts.stdin, opts.stdout, opts.stderr), wsstream.WriteChannel) + conn := wsstream.NewConn(channels...) + conn.SetIdleTimeout(idleTimeout) + streams, err := conn.Open(httplog.Unlogged(w), req) + if err != nil { + glog.Errorf("Unable to upgrade websocket connection: %v", err) + return nil, false + } + // Send an empty message to the lowest writable channel to notify the client the connection is established + // TODO: make generic to SPDY and WebSockets and do it outside of this method? + switch { + case opts.stdout: + streams[1].Write([]byte{}) + case opts.stderr: + streams[2].Write([]byte{}) + default: + streams[3].Write([]byte{}) + } + return &context{ + conn: conn, + stdinStream: streams[0], + stdoutStream: streams[1], + stderrStream: streams[2], + errorStream: streams[3], + tty: opts.tty, + }, true +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/server.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/server.go new file mode 100644 index 000000000000..f490a5075b11 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/server.go @@ -0,0 +1,959 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/pprof" + "reflect" + "strconv" + "strings" + "sync" + "time" + + restful "github.com/emicklei/go-restful" + "github.com/golang/glog" + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/kubernetes/pkg/api" + apierrs "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/auth/authenticator" + "k8s.io/kubernetes/pkg/auth/authorizer" + "k8s.io/kubernetes/pkg/healthz" + "k8s.io/kubernetes/pkg/httplog" + "k8s.io/kubernetes/pkg/kubelet/cm" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/server/portforward" + "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" + "k8s.io/kubernetes/pkg/kubelet/server/stats" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/configz" + "k8s.io/kubernetes/pkg/util/flushwriter" + "k8s.io/kubernetes/pkg/util/httpstream" + "k8s.io/kubernetes/pkg/util/httpstream/spdy" + "k8s.io/kubernetes/pkg/util/limitwriter" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/volume" +) + +// Server is a http.Handler which exposes kubelet functionality over HTTP. +type Server struct { + auth AuthInterface + host HostInterface + restfulCont containerInterface + resourceAnalyzer stats.ResourceAnalyzer + runtime kubecontainer.Runtime +} + +type TLSOptions struct { + Config *tls.Config + CertFile string + KeyFile string +} + +// containerInterface defines the restful.Container functions used on the root container +type containerInterface interface { + Add(service *restful.WebService) *restful.Container + Handle(path string, handler http.Handler) + Filter(filter restful.FilterFunction) + ServeHTTP(w http.ResponseWriter, r *http.Request) + RegisteredWebServices() []*restful.WebService + + // RegisteredHandlePaths returns the paths of handlers registered directly with the container (non-web-services) + // Used to test filters are being applied on non-web-service handlers + RegisteredHandlePaths() []string +} + +// filteringContainer delegates all Handle(...) calls to Container.HandleWithFilter(...), +// so we can ensure restful.FilterFunctions are used for all handlers +type filteringContainer struct { + *restful.Container + registeredHandlePaths []string +} + +func (a *filteringContainer) Handle(path string, handler http.Handler) { + a.HandleWithFilter(path, handler) + a.registeredHandlePaths = append(a.registeredHandlePaths, path) +} +func (a *filteringContainer) RegisteredHandlePaths() []string { + return a.registeredHandlePaths +} + +// ListenAndServeKubeletServer initializes a server to respond to HTTP network requests on the Kubelet. +func ListenAndServeKubeletServer( + host HostInterface, + resourceAnalyzer stats.ResourceAnalyzer, + address net.IP, + port uint, + tlsOptions *TLSOptions, + auth AuthInterface, + enableDebuggingHandlers bool, + runtime kubecontainer.Runtime) { + glog.Infof("Starting to listen on %s:%d", address, port) + handler := NewServer(host, resourceAnalyzer, auth, enableDebuggingHandlers, runtime) + s := &http.Server{ + Addr: net.JoinHostPort(address.String(), strconv.FormatUint(uint64(port), 10)), + Handler: &handler, + MaxHeaderBytes: 1 << 20, + } + if tlsOptions != nil { + s.TLSConfig = tlsOptions.Config + glog.Fatal(s.ListenAndServeTLS(tlsOptions.CertFile, tlsOptions.KeyFile)) + } else { + glog.Fatal(s.ListenAndServe()) + } +} + +// ListenAndServeKubeletReadOnlyServer initializes a server to respond to HTTP network requests on the Kubelet. +func ListenAndServeKubeletReadOnlyServer(host HostInterface, resourceAnalyzer stats.ResourceAnalyzer, address net.IP, port uint, runtime kubecontainer.Runtime) { + glog.V(1).Infof("Starting to listen read-only on %s:%d", address, port) + s := NewServer(host, resourceAnalyzer, nil, false, runtime) + + server := &http.Server{ + Addr: net.JoinHostPort(address.String(), strconv.FormatUint(uint64(port), 10)), + Handler: &s, + MaxHeaderBytes: 1 << 20, + } + glog.Fatal(server.ListenAndServe()) +} + +// AuthInterface contains all methods required by the auth filters +type AuthInterface interface { + authenticator.Request + authorizer.RequestAttributesGetter + authorizer.Authorizer +} + +// HostInterface contains all the kubelet methods required by the server. +// For testablitiy. +type HostInterface interface { + GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) + GetContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) + GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) + GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error) + GetPods() []*api.Pod + GetRunningPods() ([]*api.Pod, error) + GetPodByName(namespace, name string) (*api.Pod, bool) + RunInContainer(name string, uid types.UID, container string, cmd []string) ([]byte, error) + ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error + AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool) error + GetKubeletContainerLogs(podFullName, containerName string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error + ServeLogs(w http.ResponseWriter, req *http.Request) + PortForward(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error + StreamingConnectionIdleTimeout() time.Duration + ResyncInterval() time.Duration + GetHostname() string + GetNode() (*api.Node, error) + GetNodeConfig() cm.NodeConfig + LatestLoopEntryTime() time.Time + ImagesFsInfo() (cadvisorapiv2.FsInfo, error) + RootFsInfo() (cadvisorapiv2.FsInfo, error) + ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) + PLEGHealthCheck() (bool, error) +} + +// NewServer initializes and configures a kubelet.Server object to handle HTTP requests. +func NewServer( + host HostInterface, + resourceAnalyzer stats.ResourceAnalyzer, + auth AuthInterface, + enableDebuggingHandlers bool, + runtime kubecontainer.Runtime) Server { + server := Server{ + host: host, + resourceAnalyzer: resourceAnalyzer, + auth: auth, + restfulCont: &filteringContainer{Container: restful.NewContainer()}, + runtime: runtime, + } + if auth != nil { + server.InstallAuthFilter() + } + server.InstallDefaultHandlers() + if enableDebuggingHandlers { + server.InstallDebuggingHandlers() + } + return server +} + +// InstallAuthFilter installs authentication filters with the restful Container. +func (s *Server) InstallAuthFilter() { + s.restfulCont.Filter(func(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { + // Authenticate + u, ok, err := s.auth.AuthenticateRequest(req.Request) + if err != nil { + glog.Errorf("Unable to authenticate the request due to an error: %v", err) + resp.WriteErrorString(http.StatusUnauthorized, "Unauthorized") + return + } + if !ok { + resp.WriteErrorString(http.StatusUnauthorized, "Unauthorized") + return + } + + // Get authorization attributes + attrs := s.auth.GetRequestAttributes(u, req.Request) + + // Authorize + if err := s.auth.Authorize(attrs); err != nil { + msg := fmt.Sprintf("Forbidden (user=%s, verb=%s, namespace=%s, resource=%s)", u.GetName(), attrs.GetVerb(), attrs.GetNamespace(), attrs.GetResource()) + glog.V(2).Info(msg) + resp.WriteErrorString(http.StatusForbidden, msg) + return + } + + // Continue + chain.ProcessFilter(req, resp) + }) +} + +// InstallDefaultHandlers registers the default set of supported HTTP request +// patterns with the restful Container. +func (s *Server) InstallDefaultHandlers() { + healthz.InstallHandler(s.restfulCont, + healthz.PingHealthz, + healthz.NamedCheck("syncloop", s.syncLoopHealthCheck), + healthz.NamedCheck("pleg", s.plegHealthCheck), + ) + var ws *restful.WebService + ws = new(restful.WebService) + ws. + Path("/pods"). + Produces(restful.MIME_JSON) + ws.Route(ws.GET(""). + To(s.getPods). + Operation("getPods")) + s.restfulCont.Add(ws) + + s.restfulCont.Add(stats.CreateHandlers(s.host, s.resourceAnalyzer)) + s.restfulCont.Handle("/metrics", prometheus.Handler()) + + ws = new(restful.WebService) + ws. + Path("/spec/"). + Produces(restful.MIME_JSON) + ws.Route(ws.GET(""). + To(s.getSpec). + Operation("getSpec"). + Writes(cadvisorapi.MachineInfo{})) + s.restfulCont.Add(ws) +} + +const pprofBasePath = "/debug/pprof/" + +// InstallDeguggingHandlers registers the HTTP request patterns that serve logs or run commands/containers +func (s *Server) InstallDebuggingHandlers() { + var ws *restful.WebService + + ws = new(restful.WebService) + ws. + Path("/run") + ws.Route(ws.POST("/{podNamespace}/{podID}/{containerName}"). + To(s.getRun). + Operation("getRun")) + ws.Route(ws.POST("/{podNamespace}/{podID}/{uid}/{containerName}"). + To(s.getRun). + Operation("getRun")) + s.restfulCont.Add(ws) + + ws = new(restful.WebService) + ws. + Path("/exec") + ws.Route(ws.GET("/{podNamespace}/{podID}/{containerName}"). + To(s.getExec). + Operation("getExec")) + ws.Route(ws.POST("/{podNamespace}/{podID}/{containerName}"). + To(s.getExec). + Operation("getExec")) + ws.Route(ws.GET("/{podNamespace}/{podID}/{uid}/{containerName}"). + To(s.getExec). + Operation("getExec")) + ws.Route(ws.POST("/{podNamespace}/{podID}/{uid}/{containerName}"). + To(s.getExec). + Operation("getExec")) + s.restfulCont.Add(ws) + + ws = new(restful.WebService) + ws. + Path("/attach") + ws.Route(ws.GET("/{podNamespace}/{podID}/{containerName}"). + To(s.getAttach). + Operation("getAttach")) + ws.Route(ws.POST("/{podNamespace}/{podID}/{containerName}"). + To(s.getAttach). + Operation("getAttach")) + ws.Route(ws.GET("/{podNamespace}/{podID}/{uid}/{containerName}"). + To(s.getAttach). + Operation("getAttach")) + ws.Route(ws.POST("/{podNamespace}/{podID}/{uid}/{containerName}"). + To(s.getAttach). + Operation("getAttach")) + s.restfulCont.Add(ws) + + ws = new(restful.WebService) + ws. + Path("/portForward") + ws.Route(ws.POST("/{podNamespace}/{podID}"). + To(s.getPortForward). + Operation("getPortForward")) + ws.Route(ws.POST("/{podNamespace}/{podID}/{uid}"). + To(s.getPortForward). + Operation("getPortForward")) + s.restfulCont.Add(ws) + + ws = new(restful.WebService) + ws. + Path("/logs/") + ws.Route(ws.GET(""). + To(s.getLogs). + Operation("getLogs")) + ws.Route(ws.GET("/{logpath:*}"). + To(s.getLogs). + Operation("getLogs")) + s.restfulCont.Add(ws) + + ws = new(restful.WebService) + ws. + Path("/containerLogs") + ws.Route(ws.GET("/{podNamespace}/{podID}/{containerName}"). + To(s.getContainerLogs). + Operation("getContainerLogs")) + s.restfulCont.Add(ws) + + configz.InstallHandler(s.restfulCont) + + handlePprofEndpoint := func(req *restful.Request, resp *restful.Response) { + name := strings.TrimPrefix(req.Request.URL.Path, pprofBasePath) + switch name { + case "profile": + pprof.Profile(resp, req.Request) + case "symbol": + pprof.Symbol(resp, req.Request) + case "cmdline": + pprof.Cmdline(resp, req.Request) + default: + pprof.Index(resp, req.Request) + } + } + + // Setup pporf handlers. + ws = new(restful.WebService).Path(pprofBasePath) + ws.Route(ws.GET("/{subpath:*}").To(func(req *restful.Request, resp *restful.Response) { + handlePprofEndpoint(req, resp) + })).Doc("pprof endpoint") + s.restfulCont.Add(ws) + + // The /runningpods endpoint is used for testing only. + ws = new(restful.WebService) + ws. + Path("/runningpods/"). + Produces(restful.MIME_JSON) + ws.Route(ws.GET(""). + To(s.getRunningPods). + Operation("getRunningPods")) + s.restfulCont.Add(ws) +} + +type httpHandler struct { + f func(w http.ResponseWriter, r *http.Request) +} + +func (h *httpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.f(w, r) +} + +// Checks if kubelet's sync loop that updates containers is working. +func (s *Server) syncLoopHealthCheck(req *http.Request) error { + duration := s.host.ResyncInterval() * 2 + minDuration := time.Minute * 5 + if duration < minDuration { + duration = minDuration + } + enterLoopTime := s.host.LatestLoopEntryTime() + if !enterLoopTime.IsZero() && time.Now().After(enterLoopTime.Add(duration)) { + return fmt.Errorf("Sync Loop took longer than expected.") + } + return nil +} + +// Checks if pleg, which lists pods periodically, is healthy. +func (s *Server) plegHealthCheck(req *http.Request) error { + if ok, err := s.host.PLEGHealthCheck(); !ok { + return fmt.Errorf("PLEG took longer than expected: %v", err) + } + return nil +} + +// getContainerLogs handles containerLogs request against the Kubelet +func (s *Server) getContainerLogs(request *restful.Request, response *restful.Response) { + podNamespace := request.PathParameter("podNamespace") + podID := request.PathParameter("podID") + containerName := request.PathParameter("containerName") + + if len(podID) == 0 { + // TODO: Why return JSON when the rest return plaintext errors? + // TODO: Why return plaintext errors? + response.WriteError(http.StatusBadRequest, fmt.Errorf(`{"message": "Missing podID."}`)) + return + } + if len(containerName) == 0 { + // TODO: Why return JSON when the rest return plaintext errors? + response.WriteError(http.StatusBadRequest, fmt.Errorf(`{"message": "Missing container name."}`)) + return + } + if len(podNamespace) == 0 { + // TODO: Why return JSON when the rest return plaintext errors? + response.WriteError(http.StatusBadRequest, fmt.Errorf(`{"message": "Missing podNamespace."}`)) + return + } + + query := request.Request.URL.Query() + // backwards compatibility for the "tail" query parameter + if tail := request.QueryParameter("tail"); len(tail) > 0 { + query["tailLines"] = []string{tail} + // "all" is the same as omitting tail + if tail == "all" { + delete(query, "tailLines") + } + } + // container logs on the kubelet are locked to the v1 API version of PodLogOptions + logOptions := &api.PodLogOptions{} + if err := api.ParameterCodec.DecodeParameters(query, v1.SchemeGroupVersion, logOptions); err != nil { + response.WriteError(http.StatusBadRequest, fmt.Errorf(`{"message": "Unable to decode query."}`)) + return + } + logOptions.TypeMeta = unversioned.TypeMeta{} + if errs := validation.ValidatePodLogOptions(logOptions); len(errs) > 0 { + response.WriteError(apierrs.StatusUnprocessableEntity, fmt.Errorf(`{"message": "Invalid request."}`)) + return + } + + pod, ok := s.host.GetPodByName(podNamespace, podID) + if !ok { + response.WriteError(http.StatusNotFound, fmt.Errorf("pod %q does not exist\n", podID)) + return + } + // Check if containerName is valid. + containerExists := false + for _, container := range pod.Spec.Containers { + if container.Name == containerName { + containerExists = true + } + } + if !containerExists { + for _, container := range pod.Spec.InitContainers { + if container.Name == containerName { + containerExists = true + } + } + } + if !containerExists { + response.WriteError(http.StatusNotFound, fmt.Errorf("container %q not found in pod %q\n", containerName, podID)) + return + } + + if _, ok := response.ResponseWriter.(http.Flusher); !ok { + response.WriteError(http.StatusInternalServerError, fmt.Errorf("unable to convert %v into http.Flusher, cannot show logs\n", reflect.TypeOf(response))) + return + } + fw := flushwriter.Wrap(response.ResponseWriter) + if logOptions.LimitBytes != nil { + fw = limitwriter.New(fw, *logOptions.LimitBytes) + } + response.Header().Set("Transfer-Encoding", "chunked") + if err := s.host.GetKubeletContainerLogs(kubecontainer.GetPodFullName(pod), containerName, logOptions, fw, fw); err != nil { + if err != limitwriter.ErrMaximumWrite { + response.WriteError(http.StatusBadRequest, err) + } + return + } +} + +// encodePods creates an api.PodList object from pods and returns the encoded +// PodList. +func encodePods(pods []*api.Pod) (data []byte, err error) { + podList := new(api.PodList) + for _, pod := range pods { + podList.Items = append(podList.Items, *pod) + } + // TODO: this needs to be parameterized to the kubelet, not hardcoded. Depends on Kubelet + // as API server refactor. + // TODO: Locked to v1, needs to be made generic + codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"}) + return runtime.Encode(codec, podList) +} + +// getPods returns a list of pods bound to the Kubelet and their spec. +func (s *Server) getPods(request *restful.Request, response *restful.Response) { + pods := s.host.GetPods() + data, err := encodePods(pods) + if err != nil { + response.WriteError(http.StatusInternalServerError, err) + return + } + writeJsonResponse(response, data) +} + +// getRunningPods returns a list of pods running on Kubelet. The list is +// provided by the container runtime, and is different from the list returned +// by getPods, which is a set of desired pods to run. +func (s *Server) getRunningPods(request *restful.Request, response *restful.Response) { + pods, err := s.host.GetRunningPods() + if err != nil { + response.WriteError(http.StatusInternalServerError, err) + return + } + data, err := encodePods(pods) + if err != nil { + response.WriteError(http.StatusInternalServerError, err) + return + } + writeJsonResponse(response, data) +} + +// getLogs handles logs requests against the Kubelet. +func (s *Server) getLogs(request *restful.Request, response *restful.Response) { + s.host.ServeLogs(response, request.Request) +} + +// getSpec handles spec requests against the Kubelet. +func (s *Server) getSpec(request *restful.Request, response *restful.Response) { + info, err := s.host.GetCachedMachineInfo() + if err != nil { + response.WriteError(http.StatusInternalServerError, err) + return + } + response.WriteEntity(info) +} + +func getContainerCoordinates(request *restful.Request) (namespace, pod string, uid types.UID, container string) { + namespace = request.PathParameter("podNamespace") + pod = request.PathParameter("podID") + if uidStr := request.PathParameter("uid"); uidStr != "" { + uid = types.UID(uidStr) + } + container = request.PathParameter("containerName") + return +} + +// getAttach handles requests to attach to a container. +func (s *Server) getAttach(request *restful.Request, response *restful.Response) { + podNamespace, podID, uid, container := getContainerCoordinates(request) + pod, ok := s.host.GetPodByName(podNamespace, podID) + if !ok { + response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist")) + return + } + + remotecommand.ServeAttach(response.ResponseWriter, + request.Request, + s.host, + kubecontainer.GetPodFullName(pod), + uid, + container, + s.host.StreamingConnectionIdleTimeout(), + remotecommand.DefaultStreamCreationTimeout, + remotecommand.SupportedStreamingProtocols) +} + +// getExec handles requests to run a command inside a container. +func (s *Server) getExec(request *restful.Request, response *restful.Response) { + podNamespace, podID, uid, container := getContainerCoordinates(request) + pod, ok := s.host.GetPodByName(podNamespace, podID) + if !ok { + response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist")) + return + } + + remotecommand.ServeExec(response.ResponseWriter, + request.Request, + s.host, + kubecontainer.GetPodFullName(pod), + uid, + container, + s.host.StreamingConnectionIdleTimeout(), + remotecommand.DefaultStreamCreationTimeout, + remotecommand.SupportedStreamingProtocols) +} + +// getRun handles requests to run a command inside a container. +func (s *Server) getRun(request *restful.Request, response *restful.Response) { + podNamespace, podID, uid, container := getContainerCoordinates(request) + pod, ok := s.host.GetPodByName(podNamespace, podID) + if !ok { + response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist")) + return + } + command := strings.Split(request.QueryParameter("cmd"), " ") + data, err := s.host.RunInContainer(kubecontainer.GetPodFullName(pod), uid, container, command) + if err != nil { + response.WriteError(http.StatusInternalServerError, err) + return + } + writeJsonResponse(response, data) +} + +func getPodCoordinates(request *restful.Request) (namespace, pod string, uid types.UID) { + namespace = request.PathParameter("podNamespace") + pod = request.PathParameter("podID") + if uidStr := request.PathParameter("uid"); uidStr != "" { + uid = types.UID(uidStr) + } + return +} + +// Derived from go-restful writeJSON. +func writeJsonResponse(response *restful.Response, data []byte) { + if data == nil { + response.WriteHeader(http.StatusOK) + // do not write a nil representation + return + } + response.Header().Set(restful.HEADER_ContentType, restful.MIME_JSON) + response.WriteHeader(http.StatusOK) + if _, err := response.Write(data); err != nil { + glog.Errorf("Error writing response: %v", err) + } +} + +// PortForwarder knows how to forward content from a data stream to/from a port +// in a pod. +type PortForwarder interface { + // PortForwarder copies data between a data stream and a port in a pod. + PortForward(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error +} + +// getPortForward handles a new restful port forward request. It determines the +// pod name and uid and then calls ServePortForward. +func (s *Server) getPortForward(request *restful.Request, response *restful.Response) { + podNamespace, podID, uid := getPodCoordinates(request) + pod, ok := s.host.GetPodByName(podNamespace, podID) + if !ok { + response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist")) + return + } + + podName := kubecontainer.GetPodFullName(pod) + + ServePortForward(response.ResponseWriter, request.Request, s.host, podName, uid, s.host.StreamingConnectionIdleTimeout(), remotecommand.DefaultStreamCreationTimeout) +} + +// ServePortForward handles a port forwarding request. A single request is +// kept alive as long as the client is still alive and the connection has not +// been timed out due to idleness. This function handles multiple forwarded +// connections; i.e., multiple `curl http://localhost:8888/` requests will be +// handled by a single invocation of ServePortForward. +func ServePortForward(w http.ResponseWriter, req *http.Request, portForwarder PortForwarder, podName string, uid types.UID, idleTimeout time.Duration, streamCreationTimeout time.Duration) { + supportedPortForwardProtocols := []string{portforward.PortForwardProtocolV1Name} + _, err := httpstream.Handshake(req, w, supportedPortForwardProtocols) + // negotiated protocol isn't currently used server side, but could be in the future + if err != nil { + // Handshake writes the error to the client + utilruntime.HandleError(err) + return + } + + streamChan := make(chan httpstream.Stream, 1) + + glog.V(5).Infof("Upgrading port forward response") + upgrader := spdy.NewResponseUpgrader() + conn := upgrader.UpgradeResponse(w, req, portForwardStreamReceived(streamChan)) + if conn == nil { + return + } + defer conn.Close() + + glog.V(5).Infof("(conn=%p) setting port forwarding streaming connection idle timeout to %v", conn, idleTimeout) + conn.SetIdleTimeout(idleTimeout) + + h := &portForwardStreamHandler{ + conn: conn, + streamChan: streamChan, + streamPairs: make(map[string]*portForwardStreamPair), + streamCreationTimeout: streamCreationTimeout, + pod: podName, + uid: uid, + forwarder: portForwarder, + } + h.run() +} + +// portForwardStreamReceived is the httpstream.NewStreamHandler for port +// forward streams. It checks each stream's port and stream type headers, +// rejecting any streams that with missing or invalid values. Each valid +// stream is sent to the streams channel. +func portForwardStreamReceived(streams chan httpstream.Stream) func(httpstream.Stream, <-chan struct{}) error { + return func(stream httpstream.Stream, replySent <-chan struct{}) error { + // make sure it has a valid port header + portString := stream.Headers().Get(api.PortHeader) + if len(portString) == 0 { + return fmt.Errorf("%q header is required", api.PortHeader) + } + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return fmt.Errorf("unable to parse %q as a port: %v", portString, err) + } + if port < 1 { + return fmt.Errorf("port %q must be > 0", portString) + } + + // make sure it has a valid stream type header + streamType := stream.Headers().Get(api.StreamType) + if len(streamType) == 0 { + return fmt.Errorf("%q header is required", api.StreamType) + } + if streamType != api.StreamTypeError && streamType != api.StreamTypeData { + return fmt.Errorf("invalid stream type %q", streamType) + } + + streams <- stream + return nil + } +} + +// portForwardStreamHandler is capable of processing multiple port forward +// requests over a single httpstream.Connection. +type portForwardStreamHandler struct { + conn httpstream.Connection + streamChan chan httpstream.Stream + streamPairsLock sync.RWMutex + streamPairs map[string]*portForwardStreamPair + streamCreationTimeout time.Duration + pod string + uid types.UID + forwarder PortForwarder +} + +// getStreamPair returns a portForwardStreamPair for requestID. This creates a +// new pair if one does not yet exist for the requestID. The returned bool is +// true if the pair was created. +func (h *portForwardStreamHandler) getStreamPair(requestID string) (*portForwardStreamPair, bool) { + h.streamPairsLock.Lock() + defer h.streamPairsLock.Unlock() + + if p, ok := h.streamPairs[requestID]; ok { + glog.V(5).Infof("(conn=%p, request=%s) found existing stream pair", h.conn, requestID) + return p, false + } + + glog.V(5).Infof("(conn=%p, request=%s) creating new stream pair", h.conn, requestID) + + p := newPortForwardPair(requestID) + h.streamPairs[requestID] = p + + return p, true +} + +// monitorStreamPair waits for the pair to receive both its error and data +// streams, or for the timeout to expire (whichever happens first), and then +// removes the pair. +func (h *portForwardStreamHandler) monitorStreamPair(p *portForwardStreamPair, timeout <-chan time.Time) { + select { + case <-timeout: + err := fmt.Errorf("(conn=%v, request=%s) timed out waiting for streams", h.conn, p.requestID) + utilruntime.HandleError(err) + p.printError(err.Error()) + case <-p.complete: + glog.V(5).Infof("(conn=%v, request=%s) successfully received error and data streams", h.conn, p.requestID) + } + h.removeStreamPair(p.requestID) +} + +// hasStreamPair returns a bool indicating if a stream pair for requestID +// exists. +func (h *portForwardStreamHandler) hasStreamPair(requestID string) bool { + h.streamPairsLock.RLock() + defer h.streamPairsLock.RUnlock() + + _, ok := h.streamPairs[requestID] + return ok +} + +// removeStreamPair removes the stream pair identified by requestID from streamPairs. +func (h *portForwardStreamHandler) removeStreamPair(requestID string) { + h.streamPairsLock.Lock() + defer h.streamPairsLock.Unlock() + + delete(h.streamPairs, requestID) +} + +// requestID returns the request id for stream. +func (h *portForwardStreamHandler) requestID(stream httpstream.Stream) string { + requestID := stream.Headers().Get(api.PortForwardRequestIDHeader) + if len(requestID) == 0 { + glog.V(5).Infof("(conn=%p) stream received without %s header", h.conn, api.PortForwardRequestIDHeader) + // If we get here, it's because the connection came from an older client + // that isn't generating the request id header + // (https://github.com/kubernetes/kubernetes/blob/843134885e7e0b360eb5441e85b1410a8b1a7a0c/pkg/client/unversioned/portforward/portforward.go#L258-L287) + // + // This is a best-effort attempt at supporting older clients. + // + // When there aren't concurrent new forwarded connections, each connection + // will have a pair of streams (data, error), and the stream IDs will be + // consecutive odd numbers, e.g. 1 and 3 for the first connection. Convert + // the stream ID into a pseudo-request id by taking the stream type and + // using id = stream.Identifier() when the stream type is error, + // and id = stream.Identifier() - 2 when it's data. + // + // NOTE: this only works when there are not concurrent new streams from + // multiple forwarded connections; it's a best-effort attempt at supporting + // old clients that don't generate request ids. If there are concurrent + // new connections, it's possible that 1 connection gets streams whose IDs + // are not consecutive (e.g. 5 and 9 instead of 5 and 7). + streamType := stream.Headers().Get(api.StreamType) + switch streamType { + case api.StreamTypeError: + requestID = strconv.Itoa(int(stream.Identifier())) + case api.StreamTypeData: + requestID = strconv.Itoa(int(stream.Identifier()) - 2) + } + + glog.V(5).Infof("(conn=%p) automatically assigning request ID=%q from stream type=%s, stream ID=%d", h.conn, requestID, streamType, stream.Identifier()) + } + return requestID +} + +// run is the main loop for the portForwardStreamHandler. It processes new +// streams, invoking portForward for each complete stream pair. The loop exits +// when the httpstream.Connection is closed. +func (h *portForwardStreamHandler) run() { + glog.V(5).Infof("(conn=%p) waiting for port forward streams", h.conn) +Loop: + for { + select { + case <-h.conn.CloseChan(): + glog.V(5).Infof("(conn=%p) upgraded connection closed", h.conn) + break Loop + case stream := <-h.streamChan: + requestID := h.requestID(stream) + streamType := stream.Headers().Get(api.StreamType) + glog.V(5).Infof("(conn=%p, request=%s) received new stream of type %s", h.conn, requestID, streamType) + + p, created := h.getStreamPair(requestID) + if created { + go h.monitorStreamPair(p, time.After(h.streamCreationTimeout)) + } + if complete, err := p.add(stream); err != nil { + msg := fmt.Sprintf("error processing stream for request %s: %v", requestID, err) + utilruntime.HandleError(errors.New(msg)) + p.printError(msg) + } else if complete { + go h.portForward(p) + } + } + } +} + +// portForward invokes the portForwardStreamHandler's forwarder.PortForward +// function for the given stream pair. +func (h *portForwardStreamHandler) portForward(p *portForwardStreamPair) { + defer p.dataStream.Close() + defer p.errorStream.Close() + + portString := p.dataStream.Headers().Get(api.PortHeader) + port, _ := strconv.ParseUint(portString, 10, 16) + + glog.V(5).Infof("(conn=%p, request=%s) invoking forwarder.PortForward for port %s", h.conn, p.requestID, portString) + err := h.forwarder.PortForward(h.pod, h.uid, uint16(port), p.dataStream) + glog.V(5).Infof("(conn=%p, request=%s) done invoking forwarder.PortForward for port %s", h.conn, p.requestID, portString) + + if err != nil { + msg := fmt.Errorf("error forwarding port %d to pod %s, uid %v: %v", port, h.pod, h.uid, err) + utilruntime.HandleError(msg) + fmt.Fprint(p.errorStream, msg.Error()) + } +} + +// portForwardStreamPair represents the error and data streams for a port +// forwarding request. +type portForwardStreamPair struct { + lock sync.RWMutex + requestID string + dataStream httpstream.Stream + errorStream httpstream.Stream + complete chan struct{} +} + +// newPortForwardPair creates a new portForwardStreamPair. +func newPortForwardPair(requestID string) *portForwardStreamPair { + return &portForwardStreamPair{ + requestID: requestID, + complete: make(chan struct{}), + } +} + +// add adds the stream to the portForwardStreamPair. If the pair already +// contains a stream for the new stream's type, an error is returned. add +// returns true if both the data and error streams for this pair have been +// received. +func (p *portForwardStreamPair) add(stream httpstream.Stream) (bool, error) { + p.lock.Lock() + defer p.lock.Unlock() + + switch stream.Headers().Get(api.StreamType) { + case api.StreamTypeError: + if p.errorStream != nil { + return false, errors.New("error stream already assigned") + } + p.errorStream = stream + case api.StreamTypeData: + if p.dataStream != nil { + return false, errors.New("data stream already assigned") + } + p.dataStream = stream + } + + complete := p.errorStream != nil && p.dataStream != nil + if complete { + close(p.complete) + } + return complete, nil +} + +// printError writes s to p.errorStream if p.errorStream has been set. +func (p *portForwardStreamPair) printError(s string) { + p.lock.RLock() + defer p.lock.RUnlock() + if p.errorStream != nil { + fmt.Fprint(p.errorStream, s) + } +} + +// ServeHTTP responds to HTTP requests on the Kubelet. +func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + defer httplog.NewLogged(req, &w).StacktraceWhen( + httplog.StatusIsNot( + http.StatusOK, + http.StatusMovedPermanently, + http.StatusTemporaryRedirect, + http.StatusNotFound, + http.StatusSwitchingProtocols, + ), + ).Log() + s.restfulCont.ServeHTTP(w, req) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/server_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/server_test.go new file mode 100644 index 000000000000..2df3088fda9b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/server_test.go @@ -0,0 +1,1713 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/http/httputil" + "reflect" + "strconv" + "strings" + "testing" + "time" + + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" + "k8s.io/kubernetes/pkg/api" + apierrs "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/auth/authorizer" + "k8s.io/kubernetes/pkg/auth/user" + "k8s.io/kubernetes/pkg/kubelet/cm" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + kubecontainertesting "k8s.io/kubernetes/pkg/kubelet/container/testing" + "k8s.io/kubernetes/pkg/kubelet/server/stats" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/httpstream" + "k8s.io/kubernetes/pkg/util/httpstream/spdy" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/volume" +) + +type fakeKubelet struct { + podByNameFunc func(namespace, name string) (*api.Pod, bool) + containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) + rawInfoFunc func(query *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) + machineInfoFunc func() (*cadvisorapi.MachineInfo, error) + podsFunc func() []*api.Pod + runningPodsFunc func() ([]*api.Pod, error) + logFunc func(w http.ResponseWriter, req *http.Request) + runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) + execFunc func(pod string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error + attachFunc func(pod string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool) error + portForwardFunc func(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error + containerLogsFunc func(podFullName, containerName string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error + streamingConnectionIdleTimeoutFunc func() time.Duration + hostnameFunc func() string + resyncInterval time.Duration + loopEntryTime time.Time + plegHealth bool +} + +func (fk *fakeKubelet) ResyncInterval() time.Duration { + return fk.resyncInterval +} + +func (fk *fakeKubelet) LatestLoopEntryTime() time.Time { + return fk.loopEntryTime +} + +func (fk *fakeKubelet) GetPodByName(namespace, name string) (*api.Pod, bool) { + return fk.podByNameFunc(namespace, name) +} + +func (fk *fakeKubelet) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + return fk.containerInfoFunc(podFullName, uid, containerName, req) +} + +func (fk *fakeKubelet) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) { + return fk.rawInfoFunc(req) +} + +func (fk *fakeKubelet) GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error) { + return fk.machineInfoFunc() +} + +func (fk *fakeKubelet) GetPods() []*api.Pod { + return fk.podsFunc() +} + +func (fk *fakeKubelet) GetRunningPods() ([]*api.Pod, error) { + return fk.runningPodsFunc() +} + +func (fk *fakeKubelet) ServeLogs(w http.ResponseWriter, req *http.Request) { + fk.logFunc(w, req) +} + +func (fk *fakeKubelet) GetKubeletContainerLogs(podFullName, containerName string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error { + return fk.containerLogsFunc(podFullName, containerName, logOptions, stdout, stderr) +} + +func (fk *fakeKubelet) GetHostname() string { + return fk.hostnameFunc() +} + +func (fk *fakeKubelet) RunInContainer(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) { + return fk.runFunc(podFullName, uid, containerName, cmd) +} + +func (fk *fakeKubelet) ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error { + return fk.execFunc(name, uid, container, cmd, in, out, err, tty) +} + +func (fk *fakeKubelet) AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool) error { + return fk.attachFunc(name, uid, container, in, out, err, tty) +} + +func (fk *fakeKubelet) PortForward(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error { + return fk.portForwardFunc(name, uid, port, stream) +} + +func (fk *fakeKubelet) StreamingConnectionIdleTimeout() time.Duration { + return fk.streamingConnectionIdleTimeoutFunc() +} + +func (fk *fakeKubelet) PLEGHealthCheck() (bool, error) { return fk.plegHealth, nil } + +// Unused functions +func (_ *fakeKubelet) GetContainerInfoV2(_ string, _ cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) { + return nil, nil +} + +func (_ *fakeKubelet) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) { + return cadvisorapiv2.FsInfo{}, fmt.Errorf("Unsupported Operation ImagesFsInfo") +} + +func (_ *fakeKubelet) RootFsInfo() (cadvisorapiv2.FsInfo, error) { + return cadvisorapiv2.FsInfo{}, fmt.Errorf("Unsupport Operation RootFsInfo") +} + +func (_ *fakeKubelet) GetNode() (*api.Node, error) { return nil, nil } +func (_ *fakeKubelet) GetNodeConfig() cm.NodeConfig { return cm.NodeConfig{} } + +func (fk *fakeKubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) { + return map[string]volume.Volume{}, true +} + +type fakeAuth struct { + authenticateFunc func(*http.Request) (user.Info, bool, error) + attributesFunc func(user.Info, *http.Request) authorizer.Attributes + authorizeFunc func(authorizer.Attributes) (err error) +} + +func (f *fakeAuth) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { + return f.authenticateFunc(req) +} +func (f *fakeAuth) GetRequestAttributes(u user.Info, req *http.Request) authorizer.Attributes { + return f.attributesFunc(u, req) +} +func (f *fakeAuth) Authorize(a authorizer.Attributes) (err error) { + return f.authorizeFunc(a) +} + +type serverTestFramework struct { + serverUnderTest *Server + fakeKubelet *fakeKubelet + fakeAuth *fakeAuth + testHTTPServer *httptest.Server +} + +func newServerTest() *serverTestFramework { + fw := &serverTestFramework{} + fw.fakeKubelet = &fakeKubelet{ + hostnameFunc: func() string { + return "127.0.0.1" + }, + podByNameFunc: func(namespace, name string) (*api.Pod, bool) { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + }, true + }, + plegHealth: true, + } + fw.fakeAuth = &fakeAuth{ + authenticateFunc: func(req *http.Request) (user.Info, bool, error) { + return &user.DefaultInfo{Name: "test"}, true, nil + }, + attributesFunc: func(u user.Info, req *http.Request) authorizer.Attributes { + return &authorizer.AttributesRecord{User: u} + }, + authorizeFunc: func(a authorizer.Attributes) (err error) { + return nil + }, + } + server := NewServer( + fw.fakeKubelet, + stats.NewResourceAnalyzer(fw.fakeKubelet, time.Minute, &kubecontainertesting.FakeRuntime{}), + fw.fakeAuth, + true, + &kubecontainertesting.Mock{}) + fw.serverUnderTest = &server + fw.testHTTPServer = httptest.NewServer(fw.serverUnderTest) + return fw +} + +// encodeJSON returns obj marshalled as a JSON string, panicing on any errors +func encodeJSON(obj interface{}) string { + data, err := json.Marshal(obj) + if err != nil { + panic(err) + } + return string(data) +} + +func readResp(resp *http.Response) (string, error) { + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + return string(body), err +} + +// A helper function to return the correct pod name. +func getPodName(name, namespace string) string { + if namespace == "" { + namespace = kubetypes.NamespaceDefault + } + return name + "_" + namespace +} + +func TestContainerInfo(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + expectedInfo := &cadvisorapi.ContainerInfo{} + podID := "somepod" + expectedPodID := getPodName(podID, "") + expectedContainerName := "goodcontainer" + fw.fakeKubelet.containerInfoFunc = func(podID string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + if podID != expectedPodID || containerName != expectedContainerName { + return nil, fmt.Errorf("bad podID or containerName: podID=%v; containerName=%v", podID, containerName) + } + return expectedInfo, nil + } + + resp, err := http.Get(fw.testHTTPServer.URL + fmt.Sprintf("/stats/%v/%v", podID, expectedContainerName)) + if err != nil { + t.Fatalf("Got error GETing: %v", err) + } + defer resp.Body.Close() + var receivedInfo cadvisorapi.ContainerInfo + err = json.NewDecoder(resp.Body).Decode(&receivedInfo) + if err != nil { + t.Fatalf("received invalid json data: %v", err) + } + if !receivedInfo.Eq(expectedInfo) { + t.Errorf("received wrong data: %#v", receivedInfo) + } +} + +func TestContainerInfoWithUidNamespace(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + expectedInfo := &cadvisorapi.ContainerInfo{} + podID := "somepod" + expectedNamespace := "custom" + expectedPodID := getPodName(podID, expectedNamespace) + expectedContainerName := "goodcontainer" + expectedUid := "9b01b80f-8fb4-11e4-95ab-4200af06647" + fw.fakeKubelet.containerInfoFunc = func(podID string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + if podID != expectedPodID || string(uid) != expectedUid || containerName != expectedContainerName { + return nil, fmt.Errorf("bad podID or uid or containerName: podID=%v; uid=%v; containerName=%v", podID, uid, containerName) + } + return expectedInfo, nil + } + + resp, err := http.Get(fw.testHTTPServer.URL + fmt.Sprintf("/stats/%v/%v/%v/%v", expectedNamespace, podID, expectedUid, expectedContainerName)) + if err != nil { + t.Fatalf("Got error GETing: %v", err) + } + defer resp.Body.Close() + var receivedInfo cadvisorapi.ContainerInfo + err = json.NewDecoder(resp.Body).Decode(&receivedInfo) + if err != nil { + t.Fatalf("received invalid json data: %v", err) + } + if !receivedInfo.Eq(expectedInfo) { + t.Errorf("received wrong data: %#v", receivedInfo) + } +} + +func TestContainerNotFound(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + podID := "somepod" + expectedNamespace := "custom" + expectedContainerName := "slowstartcontainer" + expectedUid := "9b01b80f-8fb4-11e4-95ab-4200af06647" + fw.fakeKubelet.containerInfoFunc = func(podID string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + return nil, kubecontainer.ErrContainerNotFound + } + resp, err := http.Get(fw.testHTTPServer.URL + fmt.Sprintf("/stats/%v/%v/%v/%v", expectedNamespace, podID, expectedUid, expectedContainerName)) + if err != nil { + t.Fatalf("Got error GETing: %v", err) + } + if resp.StatusCode != http.StatusNotFound { + t.Fatalf("Received status %d expecting %d", resp.StatusCode, http.StatusNotFound) + } + defer resp.Body.Close() +} + +func TestRootInfo(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + expectedInfo := &cadvisorapi.ContainerInfo{ + ContainerReference: cadvisorapi.ContainerReference{ + Name: "/", + }, + } + fw.fakeKubelet.rawInfoFunc = func(req *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) { + return map[string]*cadvisorapi.ContainerInfo{ + expectedInfo.Name: expectedInfo, + }, nil + } + + resp, err := http.Get(fw.testHTTPServer.URL + "/stats") + if err != nil { + t.Fatalf("Got error GETing: %v", err) + } + defer resp.Body.Close() + var receivedInfo cadvisorapi.ContainerInfo + err = json.NewDecoder(resp.Body).Decode(&receivedInfo) + if err != nil { + t.Fatalf("received invalid json data: %v", err) + } + if !receivedInfo.Eq(expectedInfo) { + t.Errorf("received wrong data: %#v, expected %#v", receivedInfo, expectedInfo) + } +} + +func TestSubcontainerContainerInfo(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + const kubeletContainer = "/kubelet" + const kubeletSubContainer = "/kubelet/sub" + expectedInfo := map[string]*cadvisorapi.ContainerInfo{ + kubeletContainer: { + ContainerReference: cadvisorapi.ContainerReference{ + Name: kubeletContainer, + }, + }, + kubeletSubContainer: { + ContainerReference: cadvisorapi.ContainerReference{ + Name: kubeletSubContainer, + }, + }, + } + fw.fakeKubelet.rawInfoFunc = func(req *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) { + return expectedInfo, nil + } + + request := fmt.Sprintf("{\"containerName\":%q, \"subcontainers\": true}", kubeletContainer) + resp, err := http.Post(fw.testHTTPServer.URL+"/stats/container", "application/json", bytes.NewBuffer([]byte(request))) + if err != nil { + t.Fatalf("Got error GETing: %v", err) + } + defer resp.Body.Close() + var receivedInfo map[string]*cadvisorapi.ContainerInfo + err = json.NewDecoder(resp.Body).Decode(&receivedInfo) + if err != nil { + t.Fatalf("Received invalid json data: %v", err) + } + if len(receivedInfo) != len(expectedInfo) { + t.Errorf("Received wrong data: %#v, expected %#v", receivedInfo, expectedInfo) + } + + for _, containerName := range []string{kubeletContainer, kubeletSubContainer} { + if _, ok := receivedInfo[containerName]; !ok { + t.Errorf("Expected container %q to be present in result: %#v", containerName, receivedInfo) + } + if !receivedInfo[containerName].Eq(expectedInfo[containerName]) { + t.Errorf("Invalid result for %q: Expected %#v, received %#v", containerName, expectedInfo[containerName], receivedInfo[containerName]) + } + } +} + +func TestMachineInfo(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + expectedInfo := &cadvisorapi.MachineInfo{ + NumCores: 4, + MemoryCapacity: 1024, + } + fw.fakeKubelet.machineInfoFunc = func() (*cadvisorapi.MachineInfo, error) { + return expectedInfo, nil + } + + resp, err := http.Get(fw.testHTTPServer.URL + "/spec") + if err != nil { + t.Fatalf("Got error GETing: %v", err) + } + defer resp.Body.Close() + var receivedInfo cadvisorapi.MachineInfo + err = json.NewDecoder(resp.Body).Decode(&receivedInfo) + if err != nil { + t.Fatalf("received invalid json data: %v", err) + } + if !reflect.DeepEqual(&receivedInfo, expectedInfo) { + t.Errorf("received wrong data: %#v", receivedInfo) + } +} + +func TestServeLogs(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + + content := string(`
kubelet.loggoogle.log
`) + + fw.fakeKubelet.logFunc = func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "text/html") + w.Write([]byte(content)) + } + + resp, err := http.Get(fw.testHTTPServer.URL + "/logs/") + if err != nil { + t.Fatalf("Got error GETing: %v", err) + } + defer resp.Body.Close() + + body, err := httputil.DumpResponse(resp, true) + if err != nil { + // copying the response body did not work + t.Errorf("Cannot copy resp: %#v", err) + } + result := string(body) + if !strings.Contains(result, "kubelet.log") || !strings.Contains(result, "google.log") { + t.Errorf("Received wrong data: %s", result) + } +} + +func TestServeRunInContainer(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + output := "foo bar" + podNamespace := "other" + podName := "foo" + expectedPodName := getPodName(podName, podNamespace) + expectedContainerName := "baz" + expectedCommand := "ls -a" + fw.fakeKubelet.runFunc = func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) { + if podFullName != expectedPodName { + t.Errorf("expected %s, got %s", expectedPodName, podFullName) + } + if containerName != expectedContainerName { + t.Errorf("expected %s, got %s", expectedContainerName, containerName) + } + if strings.Join(cmd, " ") != expectedCommand { + t.Errorf("expected: %s, got %v", expectedCommand, cmd) + } + + return []byte(output), nil + } + + resp, err := http.Post(fw.testHTTPServer.URL+"/run/"+podNamespace+"/"+podName+"/"+expectedContainerName+"?cmd=ls%20-a", "", nil) + + if err != nil { + t.Fatalf("Got error POSTing: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + // copying the response body did not work + t.Errorf("Cannot copy resp: %#v", err) + } + result := string(body) + if result != output { + t.Errorf("expected %s, got %s", output, result) + } +} + +func TestServeRunInContainerWithUID(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + output := "foo bar" + podNamespace := "other" + podName := "foo" + expectedPodName := getPodName(podName, podNamespace) + expectedUID := "7e00838d_-_3523_-_11e4_-_8421_-_42010af0a720" + expectedContainerName := "baz" + expectedCommand := "ls -a" + fw.fakeKubelet.runFunc = func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) { + if podFullName != expectedPodName { + t.Errorf("expected %s, got %s", expectedPodName, podFullName) + } + if string(uid) != expectedUID { + t.Errorf("expected %s, got %s", expectedUID, uid) + } + if containerName != expectedContainerName { + t.Errorf("expected %s, got %s", expectedContainerName, containerName) + } + if strings.Join(cmd, " ") != expectedCommand { + t.Errorf("expected: %s, got %v", expectedCommand, cmd) + } + + return []byte(output), nil + } + + resp, err := http.Post(fw.testHTTPServer.URL+"/run/"+podNamespace+"/"+podName+"/"+expectedUID+"/"+expectedContainerName+"?cmd=ls%20-a", "", nil) + + if err != nil { + t.Fatalf("Got error POSTing: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + // copying the response body did not work + t.Errorf("Cannot copy resp: %#v", err) + } + result := string(body) + if result != output { + t.Errorf("expected %s, got %s", output, result) + } +} + +func TestHealthCheck(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + fw.fakeKubelet.hostnameFunc = func() string { + return "127.0.0.1" + } + + // Test with correct hostname, Docker version + assertHealthIsOk(t, fw.testHTTPServer.URL+"/healthz") + + // Test with incorrect hostname + fw.fakeKubelet.hostnameFunc = func() string { + return "fake" + } + assertHealthIsOk(t, fw.testHTTPServer.URL+"/healthz") +} + +func assertHealthFails(t *testing.T, httpURL string, expectedErrorCode int) { + resp, err := http.Get(httpURL) + if err != nil { + t.Fatalf("Got error GETing: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != expectedErrorCode { + t.Errorf("expected status code %d, got %d", expectedErrorCode, resp.StatusCode) + } +} + +type authTestCase struct { + Method string + Path string +} + +func TestAuthFilters(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + + testcases := []authTestCase{} + + // This is a sanity check that the Handle->HandleWithFilter() delegation is working + // Ideally, these would move to registered web services and this list would get shorter + expectedPaths := []string{"/healthz", "/metrics"} + paths := sets.NewString(fw.serverUnderTest.restfulCont.RegisteredHandlePaths()...) + for _, expectedPath := range expectedPaths { + if !paths.Has(expectedPath) { + t.Errorf("Expected registered handle path %s was missing", expectedPath) + } + } + + // Test all the non-web-service handlers + for _, path := range fw.serverUnderTest.restfulCont.RegisteredHandlePaths() { + testcases = append(testcases, authTestCase{"GET", path}) + testcases = append(testcases, authTestCase{"POST", path}) + // Test subpaths for directory handlers + if strings.HasSuffix(path, "/") { + testcases = append(testcases, authTestCase{"GET", path + "foo"}) + testcases = append(testcases, authTestCase{"POST", path + "foo"}) + } + } + + // Test all the generated web-service paths + for _, ws := range fw.serverUnderTest.restfulCont.RegisteredWebServices() { + for _, r := range ws.Routes() { + testcases = append(testcases, authTestCase{r.Method, r.Path}) + } + } + + for _, tc := range testcases { + var ( + expectedUser = &user.DefaultInfo{Name: "test"} + expectedAttributes = &authorizer.AttributesRecord{User: expectedUser} + + calledAuthenticate = false + calledAuthorize = false + calledAttributes = false + ) + + fw.fakeAuth.authenticateFunc = func(req *http.Request) (user.Info, bool, error) { + calledAuthenticate = true + return expectedUser, true, nil + } + fw.fakeAuth.attributesFunc = func(u user.Info, req *http.Request) authorizer.Attributes { + calledAttributes = true + if u != expectedUser { + t.Fatalf("%s: expected user %v, got %v", tc.Path, expectedUser, u) + } + return expectedAttributes + } + fw.fakeAuth.authorizeFunc = func(a authorizer.Attributes) (err error) { + calledAuthorize = true + if a != expectedAttributes { + t.Fatalf("%s: expected attributes %v, got %v", tc.Path, expectedAttributes, a) + } + return errors.New("Forbidden") + } + + req, err := http.NewRequest(tc.Method, fw.testHTTPServer.URL+tc.Path, nil) + if err != nil { + t.Errorf("%s: unexpected error: %v", tc.Path, err) + continue + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Errorf("%s: unexpected error: %v", tc.Path, err) + continue + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusForbidden { + t.Errorf("%s: unexpected status code %d", tc.Path, resp.StatusCode) + continue + } + + if !calledAuthenticate { + t.Errorf("%s: Authenticate was not called", tc.Path) + continue + } + if !calledAttributes { + t.Errorf("%s: Attributes were not called", tc.Path) + continue + } + if !calledAuthorize { + t.Errorf("%s: Authorize was not called", tc.Path) + continue + } + } +} + +func TestAuthenticationFailure(t *testing.T) { + var ( + expectedUser = &user.DefaultInfo{Name: "test"} + expectedAttributes = &authorizer.AttributesRecord{User: expectedUser} + + calledAuthenticate = false + calledAuthorize = false + calledAttributes = false + ) + + fw := newServerTest() + defer fw.testHTTPServer.Close() + fw.fakeAuth.authenticateFunc = func(req *http.Request) (user.Info, bool, error) { + calledAuthenticate = true + return nil, false, nil + } + fw.fakeAuth.attributesFunc = func(u user.Info, req *http.Request) authorizer.Attributes { + calledAttributes = true + return expectedAttributes + } + fw.fakeAuth.authorizeFunc = func(a authorizer.Attributes) (err error) { + calledAuthorize = true + return errors.New("not allowed") + } + + assertHealthFails(t, fw.testHTTPServer.URL+"/healthz", http.StatusUnauthorized) + + if !calledAuthenticate { + t.Fatalf("Authenticate was not called") + } + if calledAttributes { + t.Fatalf("Attributes was called unexpectedly") + } + if calledAuthorize { + t.Fatalf("Authorize was called unexpectedly") + } +} + +func TestAuthorizationSuccess(t *testing.T) { + var ( + expectedUser = &user.DefaultInfo{Name: "test"} + expectedAttributes = &authorizer.AttributesRecord{User: expectedUser} + + calledAuthenticate = false + calledAuthorize = false + calledAttributes = false + ) + + fw := newServerTest() + defer fw.testHTTPServer.Close() + fw.fakeAuth.authenticateFunc = func(req *http.Request) (user.Info, bool, error) { + calledAuthenticate = true + return expectedUser, true, nil + } + fw.fakeAuth.attributesFunc = func(u user.Info, req *http.Request) authorizer.Attributes { + calledAttributes = true + return expectedAttributes + } + fw.fakeAuth.authorizeFunc = func(a authorizer.Attributes) (err error) { + calledAuthorize = true + return nil + } + + assertHealthIsOk(t, fw.testHTTPServer.URL+"/healthz") + + if !calledAuthenticate { + t.Fatalf("Authenticate was not called") + } + if !calledAttributes { + t.Fatalf("Attributes were not called") + } + if !calledAuthorize { + t.Fatalf("Authorize was not called") + } +} + +func TestSyncLoopCheck(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + fw.fakeKubelet.hostnameFunc = func() string { + return "127.0.0.1" + } + + fw.fakeKubelet.resyncInterval = time.Minute + fw.fakeKubelet.loopEntryTime = time.Now() + + // Test with correct hostname, Docker version + assertHealthIsOk(t, fw.testHTTPServer.URL+"/healthz") + + fw.fakeKubelet.loopEntryTime = time.Now().Add(time.Minute * -10) + assertHealthFails(t, fw.testHTTPServer.URL+"/healthz", http.StatusInternalServerError) +} + +func TestPLEGHealthCheck(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + fw.fakeKubelet.hostnameFunc = func() string { + return "127.0.0.1" + } + + // Test with failed pleg health check. + fw.fakeKubelet.plegHealth = false + assertHealthFails(t, fw.testHTTPServer.URL+"/healthz", http.StatusInternalServerError) +} + +// returns http response status code from the HTTP GET +func assertHealthIsOk(t *testing.T, httpURL string) { + resp, err := http.Get(httpURL) + if err != nil { + t.Fatalf("Got error GETing: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Errorf("expected status code %d, got %d", http.StatusOK, resp.StatusCode) + } + body, readErr := ioutil.ReadAll(resp.Body) + if readErr != nil { + // copying the response body did not work + t.Fatalf("Cannot copy resp: %#v", readErr) + } + result := string(body) + if !strings.Contains(result, "ok") { + t.Errorf("expected body contains ok, got %s", result) + } +} + +func setPodByNameFunc(fw *serverTestFramework, namespace, pod, container string) { + fw.fakeKubelet.podByNameFunc = func(namespace, name string) (*api.Pod, bool) { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Namespace: namespace, + Name: pod, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: container, + }, + }, + }, + }, true + } +} + +func setGetContainerLogsFunc(fw *serverTestFramework, t *testing.T, expectedPodName, expectedContainerName string, expectedLogOptions *api.PodLogOptions, output string) { + fw.fakeKubelet.containerLogsFunc = func(podFullName, containerName string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error { + if podFullName != expectedPodName { + t.Errorf("expected %s, got %s", expectedPodName, podFullName) + } + if containerName != expectedContainerName { + t.Errorf("expected %s, got %s", expectedContainerName, containerName) + } + if !reflect.DeepEqual(expectedLogOptions, logOptions) { + t.Errorf("expected %#v, got %#v", expectedLogOptions, logOptions) + } + + io.WriteString(stdout, output) + return nil + } +} + +// TODO: I really want to be a table driven test +func TestContainerLogs(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + output := "foo bar" + podNamespace := "other" + podName := "foo" + expectedPodName := getPodName(podName, podNamespace) + expectedContainerName := "baz" + setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{}, output) + resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName) + if err != nil { + t.Errorf("Got error GETing: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("Error reading container logs: %v", err) + } + result := string(body) + if result != output { + t.Errorf("Expected: '%v', got: '%v'", output, result) + } +} + +func TestContainerLogsWithLimitBytes(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + output := "foo bar" + podNamespace := "other" + podName := "foo" + expectedPodName := getPodName(podName, podNamespace) + expectedContainerName := "baz" + bytes := int64(3) + setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{LimitBytes: &bytes}, output) + resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?limitBytes=3") + if err != nil { + t.Errorf("Got error GETing: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("Error reading container logs: %v", err) + } + result := string(body) + if result != output[:bytes] { + t.Errorf("Expected: '%v', got: '%v'", output[:bytes], result) + } +} + +func TestContainerLogsWithTail(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + output := "foo bar" + podNamespace := "other" + podName := "foo" + expectedPodName := getPodName(podName, podNamespace) + expectedContainerName := "baz" + expectedTail := int64(5) + setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{TailLines: &expectedTail}, output) + resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?tailLines=5") + if err != nil { + t.Errorf("Got error GETing: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("Error reading container logs: %v", err) + } + result := string(body) + if result != output { + t.Errorf("Expected: '%v', got: '%v'", output, result) + } +} + +func TestContainerLogsWithLegacyTail(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + output := "foo bar" + podNamespace := "other" + podName := "foo" + expectedPodName := getPodName(podName, podNamespace) + expectedContainerName := "baz" + expectedTail := int64(5) + setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{TailLines: &expectedTail}, output) + resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?tail=5") + if err != nil { + t.Errorf("Got error GETing: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("Error reading container logs: %v", err) + } + result := string(body) + if result != output { + t.Errorf("Expected: '%v', got: '%v'", output, result) + } +} + +func TestContainerLogsWithTailAll(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + output := "foo bar" + podNamespace := "other" + podName := "foo" + expectedPodName := getPodName(podName, podNamespace) + expectedContainerName := "baz" + setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{}, output) + resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?tail=all") + if err != nil { + t.Errorf("Got error GETing: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("Error reading container logs: %v", err) + } + result := string(body) + if result != output { + t.Errorf("Expected: '%v', got: '%v'", output, result) + } +} + +func TestContainerLogsWithInvalidTail(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + output := "foo bar" + podNamespace := "other" + podName := "foo" + expectedPodName := getPodName(podName, podNamespace) + expectedContainerName := "baz" + setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{}, output) + resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?tail=-1") + if err != nil { + t.Errorf("Got error GETing: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != apierrs.StatusUnprocessableEntity { + t.Errorf("Unexpected non-error reading container logs: %#v", resp) + } +} + +func TestContainerLogsWithFollow(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + output := "foo bar" + podNamespace := "other" + podName := "foo" + expectedPodName := getPodName(podName, podNamespace) + expectedContainerName := "baz" + setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) + setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &api.PodLogOptions{Follow: true}, output) + resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?follow=1") + if err != nil { + t.Errorf("Got error GETing: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("Error reading container logs: %v", err) + } + result := string(body) + if result != output { + t.Errorf("Expected: '%v', got: '%v'", output, result) + } +} + +func TestServeExecInContainerIdleTimeout(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + + fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { + return 100 * time.Millisecond + } + + podNamespace := "other" + podName := "foo" + expectedContainerName := "baz" + + url := fw.testHTTPServer.URL + "/exec/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?c=ls&c=-a&" + api.ExecStdinParam + "=1" + + upgradeRoundTripper := spdy.NewSpdyRoundTripper(nil) + c := &http.Client{Transport: upgradeRoundTripper} + + resp, err := c.Post(url, "", nil) + if err != nil { + t.Fatalf("Got error POSTing: %v", err) + } + defer resp.Body.Close() + + upgradeRoundTripper.Dialer = &net.Dialer{ + Deadline: time.Now().Add(60 * time.Second), + Timeout: 60 * time.Second, + } + conn, err := upgradeRoundTripper.NewConnection(resp) + if err != nil { + t.Fatalf("Unexpected error creating streaming connection: %s", err) + } + if conn == nil { + t.Fatal("Unexpected nil connection") + } + + <-conn.CloseChan() +} + +func testExecAttach(t *testing.T, verb string) { + tests := []struct { + stdin bool + stdout bool + stderr bool + tty bool + responseStatusCode int + uid bool + }{ + {responseStatusCode: http.StatusBadRequest}, + {stdin: true, responseStatusCode: http.StatusSwitchingProtocols}, + {stdout: true, responseStatusCode: http.StatusSwitchingProtocols}, + {stderr: true, responseStatusCode: http.StatusSwitchingProtocols}, + {stdout: true, stderr: true, responseStatusCode: http.StatusSwitchingProtocols}, + {stdout: true, stderr: true, tty: true, responseStatusCode: http.StatusSwitchingProtocols}, + {stdin: true, stdout: true, stderr: true, responseStatusCode: http.StatusSwitchingProtocols}, + } + + for i, test := range tests { + fw := newServerTest() + defer fw.testHTTPServer.Close() + + fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { + return 0 + } + + podNamespace := "other" + podName := "foo" + expectedPodName := getPodName(podName, podNamespace) + expectedUid := "9b01b80f-8fb4-11e4-95ab-4200af06647" + expectedContainerName := "baz" + expectedCommand := "ls -a" + expectedStdin := "stdin" + expectedStdout := "stdout" + expectedStderr := "stderr" + done := make(chan struct{}) + clientStdoutReadDone := make(chan struct{}) + clientStderrReadDone := make(chan struct{}) + execInvoked := false + attachInvoked := false + + testStreamFunc := func(podFullName string, uid types.UID, containerName string, cmd []string, in io.Reader, out, stderr io.WriteCloser, tty bool, done chan struct{}) error { + defer close(done) + + if podFullName != expectedPodName { + t.Fatalf("%d: podFullName: expected %s, got %s", i, expectedPodName, podFullName) + } + if test.uid && string(uid) != expectedUid { + t.Fatalf("%d: uid: expected %v, got %v", i, expectedUid, uid) + } + if containerName != expectedContainerName { + t.Fatalf("%d: containerName: expected %s, got %s", i, expectedContainerName, containerName) + } + + if test.stdin { + if in == nil { + t.Fatalf("%d: stdin: expected non-nil", i) + } + b := make([]byte, 10) + n, err := in.Read(b) + if err != nil { + t.Fatalf("%d: error reading from stdin: %v", i, err) + } + if e, a := expectedStdin, string(b[0:n]); e != a { + t.Fatalf("%d: stdin: expected to read %v, got %v", i, e, a) + } + } else if in != nil { + t.Fatalf("%d: stdin: expected nil: %#v", i, in) + } + + if test.stdout { + if out == nil { + t.Fatalf("%d: stdout: expected non-nil", i) + } + _, err := out.Write([]byte(expectedStdout)) + if err != nil { + t.Fatalf("%d:, error writing to stdout: %v", i, err) + } + out.Close() + <-clientStdoutReadDone + } else if out != nil { + t.Fatalf("%d: stdout: expected nil: %#v", i, out) + } + + if tty { + if stderr != nil { + t.Fatalf("%d: tty set but received non-nil stderr: %v", i, stderr) + } + } else if test.stderr { + if stderr == nil { + t.Fatalf("%d: stderr: expected non-nil", i) + } + _, err := stderr.Write([]byte(expectedStderr)) + if err != nil { + t.Fatalf("%d:, error writing to stderr: %v", i, err) + } + stderr.Close() + <-clientStderrReadDone + } else if stderr != nil { + t.Fatalf("%d: stderr: expected nil: %#v", i, stderr) + } + + return nil + } + + fw.fakeKubelet.execFunc = func(podFullName string, uid types.UID, containerName string, cmd []string, in io.Reader, out, stderr io.WriteCloser, tty bool) error { + execInvoked = true + if strings.Join(cmd, " ") != expectedCommand { + t.Fatalf("%d: cmd: expected: %s, got %v", i, expectedCommand, cmd) + } + return testStreamFunc(podFullName, uid, containerName, cmd, in, out, stderr, tty, done) + } + + fw.fakeKubelet.attachFunc = func(podFullName string, uid types.UID, containerName string, in io.Reader, out, stderr io.WriteCloser, tty bool) error { + attachInvoked = true + return testStreamFunc(podFullName, uid, containerName, nil, in, out, stderr, tty, done) + } + + var url string + if test.uid { + url = fw.testHTTPServer.URL + "/" + verb + "/" + podNamespace + "/" + podName + "/" + expectedUid + "/" + expectedContainerName + "?ignore=1" + } else { + url = fw.testHTTPServer.URL + "/" + verb + "/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?ignore=1" + } + if verb == "exec" { + url += "&command=ls&command=-a" + } + if test.stdin { + url += "&" + api.ExecStdinParam + "=1" + } + if test.stdout { + url += "&" + api.ExecStdoutParam + "=1" + } + if test.stderr && !test.tty { + url += "&" + api.ExecStderrParam + "=1" + } + if test.tty { + url += "&" + api.ExecTTYParam + "=1" + } + + var ( + resp *http.Response + err error + upgradeRoundTripper httpstream.UpgradeRoundTripper + c *http.Client + ) + + if test.responseStatusCode != http.StatusSwitchingProtocols { + c = &http.Client{} + } else { + upgradeRoundTripper = spdy.NewRoundTripper(nil) + c = &http.Client{Transport: upgradeRoundTripper} + } + + resp, err = c.Post(url, "", nil) + if err != nil { + t.Fatalf("%d: Got error POSTing: %v", i, err) + } + defer resp.Body.Close() + + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("%d: Error reading response body: %v", i, err) + } + + if e, a := test.responseStatusCode, resp.StatusCode; e != a { + t.Fatalf("%d: response status: expected %v, got %v", i, e, a) + } + + if test.responseStatusCode != http.StatusSwitchingProtocols { + continue + } + + conn, err := upgradeRoundTripper.NewConnection(resp) + if err != nil { + t.Fatalf("Unexpected error creating streaming connection: %s", err) + } + if conn == nil { + t.Fatalf("%d: unexpected nil conn", i) + } + defer conn.Close() + + h := http.Header{} + h.Set(api.StreamType, api.StreamTypeError) + if _, err := conn.CreateStream(h); err != nil { + t.Fatalf("%d: error creating error stream: %v", i, err) + } + + if test.stdin { + h.Set(api.StreamType, api.StreamTypeStdin) + stream, err := conn.CreateStream(h) + if err != nil { + t.Fatalf("%d: error creating stdin stream: %v", i, err) + } + _, err = stream.Write([]byte(expectedStdin)) + if err != nil { + t.Fatalf("%d: error writing to stdin stream: %v", i, err) + } + } + + var stdoutStream httpstream.Stream + if test.stdout { + h.Set(api.StreamType, api.StreamTypeStdout) + stdoutStream, err = conn.CreateStream(h) + if err != nil { + t.Fatalf("%d: error creating stdout stream: %v", i, err) + } + } + + var stderrStream httpstream.Stream + if test.stderr && !test.tty { + h.Set(api.StreamType, api.StreamTypeStderr) + stderrStream, err = conn.CreateStream(h) + if err != nil { + t.Fatalf("%d: error creating stderr stream: %v", i, err) + } + } + + if test.stdout { + output := make([]byte, 10) + n, err := stdoutStream.Read(output) + close(clientStdoutReadDone) + if err != nil { + t.Fatalf("%d: error reading from stdout stream: %v", i, err) + } + if e, a := expectedStdout, string(output[0:n]); e != a { + t.Fatalf("%d: stdout: expected '%v', got '%v'", i, e, a) + } + } + + if test.stderr && !test.tty { + output := make([]byte, 10) + n, err := stderrStream.Read(output) + close(clientStderrReadDone) + if err != nil { + t.Fatalf("%d: error reading from stderr stream: %v", i, err) + } + if e, a := expectedStderr, string(output[0:n]); e != a { + t.Fatalf("%d: stderr: expected '%v', got '%v'", i, e, a) + } + } + + // wait for the server to finish before checking if the attach/exec funcs were invoked + <-done + + if verb == "exec" { + if !execInvoked { + t.Errorf("%d: exec was not invoked", i) + } + if attachInvoked { + t.Errorf("%d: attach should not have been invoked", i) + } + } else { + if !attachInvoked { + t.Errorf("%d: attach was not invoked", i) + } + if execInvoked { + t.Errorf("%d: exec should not have been invoked", i) + } + } + } +} + +func TestServeExecInContainer(t *testing.T) { + testExecAttach(t, "exec") +} + +func TestServeAttachContainer(t *testing.T) { + testExecAttach(t, "attach") +} + +func TestServePortForwardIdleTimeout(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + + fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { + return 100 * time.Millisecond + } + + podNamespace := "other" + podName := "foo" + + url := fw.testHTTPServer.URL + "/portForward/" + podNamespace + "/" + podName + + upgradeRoundTripper := spdy.NewRoundTripper(nil) + c := &http.Client{Transport: upgradeRoundTripper} + + resp, err := c.Post(url, "", nil) + if err != nil { + t.Fatalf("Got error POSTing: %v", err) + } + defer resp.Body.Close() + + conn, err := upgradeRoundTripper.NewConnection(resp) + if err != nil { + t.Fatalf("Unexpected error creating streaming connection: %s", err) + } + if conn == nil { + t.Fatal("Unexpected nil connection") + } + defer conn.Close() + + <-conn.CloseChan() +} + +func TestServePortForward(t *testing.T) { + tests := []struct { + port string + uid bool + clientData string + containerData string + shouldError bool + }{ + {port: "", shouldError: true}, + {port: "abc", shouldError: true}, + {port: "-1", shouldError: true}, + {port: "65536", shouldError: true}, + {port: "0", shouldError: true}, + {port: "1", shouldError: false}, + {port: "8000", shouldError: false}, + {port: "8000", clientData: "client data", containerData: "container data", shouldError: false}, + {port: "65535", shouldError: false}, + {port: "65535", uid: true, shouldError: false}, + } + + podNamespace := "other" + podName := "foo" + expectedPodName := getPodName(podName, podNamespace) + expectedUid := "9b01b80f-8fb4-11e4-95ab-4200af06647" + + for i, test := range tests { + fw := newServerTest() + defer fw.testHTTPServer.Close() + + fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { + return 0 + } + + portForwardFuncDone := make(chan struct{}) + + fw.fakeKubelet.portForwardFunc = func(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error { + defer close(portForwardFuncDone) + + if e, a := expectedPodName, name; e != a { + t.Fatalf("%d: pod name: expected '%v', got '%v'", i, e, a) + } + + if e, a := expectedUid, uid; test.uid && e != string(a) { + t.Fatalf("%d: uid: expected '%v', got '%v'", i, e, a) + } + + p, err := strconv.ParseUint(test.port, 10, 16) + if err != nil { + t.Fatalf("%d: error parsing port string '%s': %v", i, test.port, err) + } + if e, a := uint16(p), port; e != a { + t.Fatalf("%d: port: expected '%v', got '%v'", i, e, a) + } + + if test.clientData != "" { + fromClient := make([]byte, 32) + n, err := stream.Read(fromClient) + if err != nil { + t.Fatalf("%d: error reading client data: %v", i, err) + } + if e, a := test.clientData, string(fromClient[0:n]); e != a { + t.Fatalf("%d: client data: expected to receive '%v', got '%v'", i, e, a) + } + } + + if test.containerData != "" { + _, err := stream.Write([]byte(test.containerData)) + if err != nil { + t.Fatalf("%d: error writing container data: %v", i, err) + } + } + + return nil + } + + var url string + if test.uid { + url = fmt.Sprintf("%s/portForward/%s/%s/%s", fw.testHTTPServer.URL, podNamespace, podName, expectedUid) + } else { + url = fmt.Sprintf("%s/portForward/%s/%s", fw.testHTTPServer.URL, podNamespace, podName) + } + + upgradeRoundTripper := spdy.NewRoundTripper(nil) + c := &http.Client{Transport: upgradeRoundTripper} + + resp, err := c.Post(url, "", nil) + if err != nil { + t.Fatalf("%d: Got error POSTing: %v", i, err) + } + defer resp.Body.Close() + + conn, err := upgradeRoundTripper.NewConnection(resp) + if err != nil { + t.Fatalf("Unexpected error creating streaming connection: %s", err) + } + if conn == nil { + t.Fatalf("%d: Unexpected nil connection", i) + } + defer conn.Close() + + headers := http.Header{} + headers.Set("streamType", "error") + headers.Set("port", test.port) + errorStream, err := conn.CreateStream(headers) + _ = errorStream + haveErr := err != nil + if e, a := test.shouldError, haveErr; e != a { + t.Fatalf("%d: create stream: expected err=%t, got %t: %v", i, e, a, err) + } + + if test.shouldError { + continue + } + + headers.Set("streamType", "data") + headers.Set("port", test.port) + dataStream, err := conn.CreateStream(headers) + haveErr = err != nil + if e, a := test.shouldError, haveErr; e != a { + t.Fatalf("%d: create stream: expected err=%t, got %t: %v", i, e, a, err) + } + + if test.clientData != "" { + _, err := dataStream.Write([]byte(test.clientData)) + if err != nil { + t.Fatalf("%d: unexpected error writing client data: %v", i, err) + } + } + + if test.containerData != "" { + fromContainer := make([]byte, 32) + n, err := dataStream.Read(fromContainer) + if err != nil { + t.Fatalf("%d: unexpected error reading container data: %v", i, err) + } + if e, a := test.containerData, string(fromContainer[0:n]); e != a { + t.Fatalf("%d: expected to receive '%v' from container, got '%v'", i, e, a) + } + } + + <-portForwardFuncDone + } +} + +type fakeHttpStream struct { + headers http.Header + id uint32 +} + +func newFakeHttpStream() *fakeHttpStream { + return &fakeHttpStream{ + headers: make(http.Header), + } +} + +var _ httpstream.Stream = &fakeHttpStream{} + +func (s *fakeHttpStream) Read(data []byte) (int, error) { + return 0, nil +} + +func (s *fakeHttpStream) Write(data []byte) (int, error) { + return 0, nil +} + +func (s *fakeHttpStream) Close() error { + return nil +} + +func (s *fakeHttpStream) Reset() error { + return nil +} + +func (s *fakeHttpStream) Headers() http.Header { + return s.headers +} + +func (s *fakeHttpStream) Identifier() uint32 { + return s.id +} + +func TestPortForwardStreamReceived(t *testing.T) { + tests := map[string]struct { + port string + streamType string + expectedError string + }{ + "missing port": { + expectedError: `"port" header is required`, + }, + "unable to parse port": { + port: "abc", + expectedError: `unable to parse "abc" as a port: strconv.ParseUint: parsing "abc": invalid syntax`, + }, + "negative port": { + port: "-1", + expectedError: `unable to parse "-1" as a port: strconv.ParseUint: parsing "-1": invalid syntax`, + }, + "missing stream type": { + port: "80", + expectedError: `"streamType" header is required`, + }, + "valid port with error stream": { + port: "80", + streamType: "error", + }, + "valid port with data stream": { + port: "80", + streamType: "data", + }, + "invalid stream type": { + port: "80", + streamType: "foo", + expectedError: `invalid stream type "foo"`, + }, + } + for name, test := range tests { + streams := make(chan httpstream.Stream, 1) + f := portForwardStreamReceived(streams) + stream := newFakeHttpStream() + if len(test.port) > 0 { + stream.headers.Set("port", test.port) + } + if len(test.streamType) > 0 { + stream.headers.Set("streamType", test.streamType) + } + replySent := make(chan struct{}) + err := f(stream, replySent) + close(replySent) + if len(test.expectedError) > 0 { + if err == nil { + t.Errorf("%s: expected err=%q, but it was nil", name, test.expectedError) + } + if e, a := test.expectedError, err.Error(); e != a { + t.Errorf("%s: expected err=%q, got %q", name, e, a) + } + continue + } + if err != nil { + t.Errorf("%s: unexpected error %v", name, err) + continue + } + if s := <-streams; s != stream { + t.Errorf("%s: expected stream %#v, got %#v", name, stream, s) + } + } +} + +func TestGetStreamPair(t *testing.T) { + timeout := make(chan time.Time) + + h := &portForwardStreamHandler{ + streamPairs: make(map[string]*portForwardStreamPair), + } + + // test adding a new entry + p, created := h.getStreamPair("1") + if p == nil { + t.Fatalf("unexpected nil pair") + } + if !created { + t.Fatal("expected created=true") + } + if p.dataStream != nil { + t.Errorf("unexpected non-nil data stream") + } + if p.errorStream != nil { + t.Errorf("unexpected non-nil error stream") + } + + // start the monitor for this pair + monitorDone := make(chan struct{}) + go func() { + h.monitorStreamPair(p, timeout) + close(monitorDone) + }() + + if !h.hasStreamPair("1") { + t.Fatal("This should still be true") + } + + // make sure we can retrieve an existing entry + p2, created := h.getStreamPair("1") + if created { + t.Fatal("expected created=false") + } + if p != p2 { + t.Fatalf("retrieving an existing pair: expected %#v, got %#v", p, p2) + } + + // removed via complete + dataStream := newFakeHttpStream() + dataStream.headers.Set(api.StreamType, api.StreamTypeData) + complete, err := p.add(dataStream) + if err != nil { + t.Fatalf("unexpected error adding data stream to pair: %v", err) + } + if complete { + t.Fatalf("unexpected complete") + } + + errorStream := newFakeHttpStream() + errorStream.headers.Set(api.StreamType, api.StreamTypeError) + complete, err = p.add(errorStream) + if err != nil { + t.Fatalf("unexpected error adding error stream to pair: %v", err) + } + if !complete { + t.Fatal("unexpected incomplete") + } + + // make sure monitorStreamPair completed + <-monitorDone + + // make sure the pair was removed + if h.hasStreamPair("1") { + t.Fatal("expected removal of pair after both data and error streams received") + } + + // removed via timeout + p, created = h.getStreamPair("2") + if !created { + t.Fatal("expected created=true") + } + if p == nil { + t.Fatal("expected p not to be nil") + } + monitorDone = make(chan struct{}) + go func() { + h.monitorStreamPair(p, timeout) + close(monitorDone) + }() + // cause the timeout + close(timeout) + // make sure monitorStreamPair completed + <-monitorDone + if h.hasStreamPair("2") { + t.Fatal("expected stream pair to be removed") + } +} + +func TestRequestID(t *testing.T) { + h := &portForwardStreamHandler{} + + s := newFakeHttpStream() + s.headers.Set(api.StreamType, api.StreamTypeError) + s.id = 1 + if e, a := "1", h.requestID(s); e != a { + t.Errorf("expected %q, got %q", e, a) + } + + s.headers.Set(api.StreamType, api.StreamTypeData) + s.id = 3 + if e, a := "1", h.requestID(s); e != a { + t.Errorf("expected %q, got %q", e, a) + } + + s.id = 7 + s.headers.Set(api.PortForwardRequestIDHeader, "2") + if e, a := "2", h.requestID(s); e != a { + t.Errorf("expected %q, got %q", e, a) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/doc.go new file mode 100644 index 000000000000..289fdae7064f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stats handles exporting Kubelet and container stats. +// NOTE: We intend to move this functionality into a standalone pod, so this package should be very +// loosely coupled to the rest of the Kubelet. +package stats diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/fs_resource_analyzer.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/fs_resource_analyzer.go new file mode 100644 index 000000000000..c45e34694ada --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/fs_resource_analyzer.go @@ -0,0 +1,107 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stats + +import ( + "sync" + "sync/atomic" + "time" + + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/wait" + + "github.com/golang/glog" +) + +// Map to PodVolumeStats pointers since the addresses for map values are not constant and can cause pain +// if we need ever to get a pointer to one of the values (e.g. you can't) +type Cache map[types.UID]*volumeStatCalculator + +// fsResourceAnalyzerInterface is for embedding fs functions into ResourceAnalyzer +type fsResourceAnalyzerInterface interface { + GetPodVolumeStats(uid types.UID) (PodVolumeStats, bool) +} + +// diskResourceAnalyzer provider stats about fs resource usage +type fsResourceAnalyzer struct { + statsProvider StatsProvider + calcPeriod time.Duration + cachedVolumeStats atomic.Value + startOnce sync.Once +} + +var _ fsResourceAnalyzerInterface = &fsResourceAnalyzer{} + +// newFsResourceAnalyzer returns a new fsResourceAnalyzer implementation +func newFsResourceAnalyzer(statsProvider StatsProvider, calcVolumePeriod time.Duration) *fsResourceAnalyzer { + r := &fsResourceAnalyzer{ + statsProvider: statsProvider, + calcPeriod: calcVolumePeriod, + } + r.cachedVolumeStats.Store(make(Cache)) + return r +} + +// Start eager background caching of volume stats. +func (s *fsResourceAnalyzer) Start() { + s.startOnce.Do(func() { + if s.calcPeriod <= 0 { + glog.Info("Volume stats collection disabled.") + return + } + glog.Info("Starting FS ResourceAnalyzer") + go wait.Forever(func() { s.updateCachedPodVolumeStats() }, s.calcPeriod) + }) +} + +// updateCachedPodVolumeStats calculates and caches the PodVolumeStats for every Pod known to the kubelet. +func (s *fsResourceAnalyzer) updateCachedPodVolumeStats() { + oldCache := s.cachedVolumeStats.Load().(Cache) + newCache := make(Cache) + + // Copy existing entries to new map, creating/starting new entries for pods missing from the cache + for _, pod := range s.statsProvider.GetPods() { + if value, found := oldCache[pod.GetUID()]; !found { + newCache[pod.GetUID()] = newVolumeStatCalculator(s.statsProvider, s.calcPeriod, pod).StartOnce() + } else { + newCache[pod.GetUID()] = value + } + } + + // Stop entries for pods that have been deleted + for uid, entry := range oldCache { + if _, found := newCache[uid]; !found { + entry.StopOnce() + } + } + + // Update the cache reference + s.cachedVolumeStats.Store(newCache) +} + +// GetPodVolumeStats returns the PodVolumeStats for a given pod. Results are looked up from a cache that +// is eagerly populated in the background, and never calculated on the fly. +func (s *fsResourceAnalyzer) GetPodVolumeStats(uid types.UID) (PodVolumeStats, bool) { + cache := s.cachedVolumeStats.Load().(Cache) + if statCalc, found := cache[uid]; !found { + // TODO: Differentiate between stats being empty + // See issue #20679 + return PodVolumeStats{}, false + } else { + return statCalc.GetLatest() + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/handler.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/handler.go new file mode 100644 index 000000000000..531d55350a0f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/handler.go @@ -0,0 +1,244 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stats + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "path" + "time" + + "github.com/golang/glog" + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" + + "github.com/emicklei/go-restful" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/kubelet/cm" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/volume" +) + +// Host methods required by stats handlers. +type StatsProvider interface { + GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) + GetContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) + GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) + GetPodByName(namespace, name string) (*api.Pod, bool) + GetNode() (*api.Node, error) + GetNodeConfig() cm.NodeConfig + ImagesFsInfo() (cadvisorapiv2.FsInfo, error) + RootFsInfo() (cadvisorapiv2.FsInfo, error) + ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) + GetPods() []*api.Pod +} + +type handler struct { + provider StatsProvider + summaryProvider SummaryProvider +} + +func CreateHandlers(provider StatsProvider, summaryProvider SummaryProvider) *restful.WebService { + h := &handler{provider, summaryProvider} + + ws := &restful.WebService{} + ws.Path("/stats/"). + Produces(restful.MIME_JSON) + + endpoints := []struct { + path string + handler restful.RouteFunction + }{ + {"", h.handleStats}, + {"/summary", h.handleSummary}, + {"/container", h.handleSystemContainer}, + {"/{podName}/{containerName}", h.handlePodContainer}, + {"/{namespace}/{podName}/{uid}/{containerName}", h.handlePodContainer}, + } + + for _, e := range endpoints { + for _, method := range []string{"GET", "POST"} { + ws.Route(ws. + Method(method). + Path(e.path). + To(e.handler)) + } + } + + return ws +} + +type StatsRequest struct { + // The name of the container for which to request stats. + // Default: / + ContainerName string `json:"containerName,omitempty"` + + // Max number of stats to return. + // If start and end time are specified this limit is ignored. + // Default: 60 + NumStats int `json:"num_stats,omitempty"` + + // Start time for which to query information. + // If omitted, the beginning of time is assumed. + Start time.Time `json:"start,omitempty"` + + // End time for which to query information. + // If omitted, current time is assumed. + End time.Time `json:"end,omitempty"` + + // Whether to also include information from subcontainers. + // Default: false. + Subcontainers bool `json:"subcontainers,omitempty"` +} + +func (r *StatsRequest) cadvisorRequest() *cadvisorapi.ContainerInfoRequest { + return &cadvisorapi.ContainerInfoRequest{ + NumStats: r.NumStats, + Start: r.Start, + End: r.End, + } +} + +func parseStatsRequest(request *restful.Request) (StatsRequest, error) { + // Default request. + query := StatsRequest{ + NumStats: 60, + } + + err := json.NewDecoder(request.Request.Body).Decode(&query) + if err != nil && err != io.EOF { + return query, err + } + return query, nil +} + +// Handles root container stats requests to /stats +func (h *handler) handleStats(request *restful.Request, response *restful.Response) { + query, err := parseStatsRequest(request) + if err != nil { + handleError(response, "/stats", err) + return + } + + // Root container stats. + statsMap, err := h.provider.GetRawContainerInfo("/", query.cadvisorRequest(), false) + if err != nil { + handleError(response, fmt.Sprintf("/stats %v", query), err) + return + } + writeResponse(response, statsMap["/"]) +} + +// Handles stats summary requests to /stats/summary +func (h *handler) handleSummary(request *restful.Request, response *restful.Response) { + summary, err := h.summaryProvider.Get() + if err != nil { + handleError(response, "/stats/summary", err) + } else { + writeResponse(response, summary) + } +} + +// Handles non-kubernetes container stats requests to /stats/container/ +func (h *handler) handleSystemContainer(request *restful.Request, response *restful.Response) { + query, err := parseStatsRequest(request) + if err != nil { + handleError(response, "/stats/container", err) + return + } + + // Non-Kubernetes container stats. + containerName := path.Join("/", query.ContainerName) + stats, err := h.provider.GetRawContainerInfo( + containerName, query.cadvisorRequest(), query.Subcontainers) + if err != nil { + if _, ok := stats[containerName]; ok { + // If the failure is partial, log it and return a best-effort response. + glog.Errorf("Partial failure issuing GetRawContainerInfo(%v): %v", query, err) + } else { + handleError(response, fmt.Sprintf("/stats/container %v", query), err) + return + } + } + writeResponse(response, stats) +} + +// Handles kubernetes pod/container stats requests to: +// /stats// +// /stats//// +func (h *handler) handlePodContainer(request *restful.Request, response *restful.Response) { + query, err := parseStatsRequest(request) + if err != nil { + handleError(response, request.Request.URL.String(), err) + return + } + + // Default parameters. + params := map[string]string{ + "namespace": api.NamespaceDefault, + "uid": "", + } + for k, v := range request.PathParameters() { + params[k] = v + } + + if params["podName"] == "" || params["containerName"] == "" { + response.WriteErrorString(http.StatusBadRequest, + fmt.Sprintf("Invalid pod container request: %v", params)) + return + } + + pod, ok := h.provider.GetPodByName(params["namespace"], params["podName"]) + if !ok { + glog.V(4).Infof("Container not found: %v", params) + response.WriteError(http.StatusNotFound, kubecontainer.ErrContainerNotFound) + return + } + stats, err := h.provider.GetContainerInfo( + kubecontainer.GetPodFullName(pod), + types.UID(params["uid"]), + params["containerName"], + query.cadvisorRequest()) + + if err != nil { + handleError(response, fmt.Sprintf("%s %v", request.Request.URL.String(), query), err) + return + } + writeResponse(response, stats) +} + +func writeResponse(response *restful.Response, stats interface{}) { + if err := response.WriteAsJson(stats); err != nil { + glog.Errorf("Error writing response: %v", err) + } +} + +// handleError serializes an error object into an HTTP response. +// request is provided for logging. +func handleError(response *restful.Response, request string, err error) { + switch err { + case kubecontainer.ErrContainerNotFound: + response.WriteError(http.StatusNotFound, err) + default: + msg := fmt.Sprintf("Internal Error: %v", err) + glog.Errorf("HTTP InternalServerError serving %s: %s", request, msg) + response.WriteErrorString(http.StatusInternalServerError, msg) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/mocks_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/mocks_test.go new file mode 100644 index 000000000000..34495346099c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/mocks_test.go @@ -0,0 +1,244 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stats + +import "github.com/stretchr/testify/mock" + +import cadvisorapi "github.com/google/cadvisor/info/v1" +import cadvisorapiv2 "github.com/google/cadvisor/info/v2" +import "k8s.io/kubernetes/pkg/api" +import "k8s.io/kubernetes/pkg/kubelet/cm" + +import "k8s.io/kubernetes/pkg/types" +import "k8s.io/kubernetes/pkg/volume" + +// DO NOT EDIT +// GENERATED BY mockery + +type MockStatsProvider struct { + mock.Mock +} + +// GetContainerInfo provides a mock function with given fields: podFullName, uid, containerName, req +func (_m *MockStatsProvider) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + ret := _m.Called(podFullName, uid, containerName, req) + + var r0 *cadvisorapi.ContainerInfo + if rf, ok := ret.Get(0).(func(string, types.UID, string, *cadvisorapi.ContainerInfoRequest) *cadvisorapi.ContainerInfo); ok { + r0 = rf(podFullName, uid, containerName, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*cadvisorapi.ContainerInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, types.UID, string, *cadvisorapi.ContainerInfoRequest) error); ok { + r1 = rf(podFullName, uid, containerName, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetContainerInfoV2 provides a mock function with given fields: name, options +func (_m *MockStatsProvider) GetContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) { + ret := _m.Called(name, options) + + var r0 map[string]cadvisorapiv2.ContainerInfo + if rf, ok := ret.Get(0).(func(string, cadvisorapiv2.RequestOptions) map[string]cadvisorapiv2.ContainerInfo); ok { + r0 = rf(name, options) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]cadvisorapiv2.ContainerInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, cadvisorapiv2.RequestOptions) error); ok { + r1 = rf(name, options) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRawContainerInfo provides a mock function with given fields: containerName, req, subcontainers +func (_m *MockStatsProvider) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) { + ret := _m.Called(containerName, req, subcontainers) + + var r0 map[string]*cadvisorapi.ContainerInfo + if rf, ok := ret.Get(0).(func(string, *cadvisorapi.ContainerInfoRequest, bool) map[string]*cadvisorapi.ContainerInfo); ok { + r0 = rf(containerName, req, subcontainers) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]*cadvisorapi.ContainerInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, *cadvisorapi.ContainerInfoRequest, bool) error); ok { + r1 = rf(containerName, req, subcontainers) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPodByName provides a mock function with given fields: namespace, name +func (_m *MockStatsProvider) GetPodByName(namespace string, name string) (*api.Pod, bool) { + ret := _m.Called(namespace, name) + + var r0 *api.Pod + if rf, ok := ret.Get(0).(func(string, string) *api.Pod); ok { + r0 = rf(namespace, name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*api.Pod) + } + } + + var r1 bool + if rf, ok := ret.Get(1).(func(string, string) bool); ok { + r1 = rf(namespace, name) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// GetNode provides a mock function with given fields: +func (_m *MockStatsProvider) GetNode() (*api.Node, error) { + ret := _m.Called() + + var r0 *api.Node + if rf, ok := ret.Get(0).(func() *api.Node); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*api.Node) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetNodeConfig provides a mock function with given fields: +func (_m *MockStatsProvider) GetNodeConfig() cm.NodeConfig { + ret := _m.Called() + + var r0 cm.NodeConfig + if rf, ok := ret.Get(0).(func() cm.NodeConfig); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(cm.NodeConfig) + } + + return r0 +} + +// ImagesFsInfo provides a mock function with given fields: +func (_m *MockStatsProvider) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) { + ret := _m.Called() + + var r0 cadvisorapiv2.FsInfo + if rf, ok := ret.Get(0).(func() cadvisorapiv2.FsInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(cadvisorapiv2.FsInfo) + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RootFsInfo provides a mock function with given fields: +func (_m *MockStatsProvider) RootFsInfo() (cadvisorapiv2.FsInfo, error) { + ret := _m.Called() + + var r0 cadvisorapiv2.FsInfo + if rf, ok := ret.Get(0).(func() cadvisorapiv2.FsInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(cadvisorapiv2.FsInfo) + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListVolumesForPod provides a mock function with given fields: podUID +func (_m *MockStatsProvider) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) { + ret := _m.Called(podUID) + + var r0 map[string]volume.Volume + if rf, ok := ret.Get(0).(func(types.UID) map[string]volume.Volume); ok { + r0 = rf(podUID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]volume.Volume) + } + } + + var r1 bool + if rf, ok := ret.Get(1).(func(types.UID) bool); ok { + r1 = rf(podUID) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// GetPods provides a mock function with given fields: +func (_m *MockStatsProvider) GetPods() []*api.Pod { + ret := _m.Called() + + var r0 []*api.Pod + if rf, ok := ret.Get(0).(func() []*api.Pod); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*api.Pod) + } + } + + return r0 +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/resource_analyzer.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/resource_analyzer.go new file mode 100644 index 000000000000..a9e9dbbf0ace --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/resource_analyzer.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stats + +import ( + "time" + + "k8s.io/kubernetes/pkg/kubelet/container" +) + +// ResourceAnalyzer provides statistics on node resource consumption +type ResourceAnalyzer interface { + Start() + + fsResourceAnalyzerInterface + SummaryProvider +} + +// resourceAnalyzer implements ResourceAnalyzer +type resourceAnalyzer struct { + *fsResourceAnalyzer + SummaryProvider +} + +var _ ResourceAnalyzer = &resourceAnalyzer{} + +// NewResourceAnalyzer returns a new ResourceAnalyzer +func NewResourceAnalyzer(statsProvider StatsProvider, calVolumeFrequency time.Duration, runtime container.Runtime) ResourceAnalyzer { + fsAnalyzer := newFsResourceAnalyzer(statsProvider, calVolumeFrequency) + summaryProvider := NewSummaryProvider(statsProvider, fsAnalyzer, runtime) + return &resourceAnalyzer{fsAnalyzer, summaryProvider} +} + +// Start starts background functions necessary for the ResourceAnalyzer to function +func (ra *resourceAnalyzer) Start() { + ra.fsResourceAnalyzer.Start() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/summary.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/summary.go new file mode 100644 index 000000000000..4b8f326dca54 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/summary.go @@ -0,0 +1,398 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stats + +import ( + "fmt" + "runtime" + "strings" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" + "k8s.io/kubernetes/pkg/kubelet/cm" + "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/leaky" + "k8s.io/kubernetes/pkg/kubelet/network" + "k8s.io/kubernetes/pkg/kubelet/types" + kubetypes "k8s.io/kubernetes/pkg/types" + + "github.com/golang/glog" + + cadvisorapiv1 "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" +) + +type SummaryProvider interface { + // Get provides a new Summary using the latest results from cadvisor + Get() (*stats.Summary, error) +} + +type summaryProviderImpl struct { + provider StatsProvider + fsResourceAnalyzer fsResourceAnalyzerInterface + runtime container.Runtime +} + +var _ SummaryProvider = &summaryProviderImpl{} + +// NewSummaryProvider returns a new SummaryProvider +func NewSummaryProvider(statsProvider StatsProvider, fsResourceAnalyzer fsResourceAnalyzerInterface, cruntime container.Runtime) SummaryProvider { + stackBuff := []byte{} + runtime.Stack(stackBuff, false) + return &summaryProviderImpl{statsProvider, fsResourceAnalyzer, cruntime} +} + +// Get implements the SummaryProvider interface +// Query cadvisor for the latest resource metrics and build into a summary +func (sp *summaryProviderImpl) Get() (*stats.Summary, error) { + options := cadvisorapiv2.RequestOptions{ + IdType: cadvisorapiv2.TypeName, + Count: 2, // 2 samples are needed to compute "instantaneous" CPU + Recursive: true, + } + infos, err := sp.provider.GetContainerInfoV2("/", options) + if err != nil { + if _, ok := infos["/"]; ok { + // If the failure is partial, log it and return a best-effort response. + glog.Errorf("Partial failure issuing GetContainerInfoV2: %v", err) + } else { + return nil, fmt.Errorf("failed GetContainerInfoV2: %v", err) + } + } + + // TODO(timstclair): Consider returning a best-effort response if any of the following errors + // occur. + node, err := sp.provider.GetNode() + if err != nil { + return nil, fmt.Errorf("failed GetNode: %v", err) + } + + nodeConfig := sp.provider.GetNodeConfig() + rootFsInfo, err := sp.provider.RootFsInfo() + if err != nil { + return nil, fmt.Errorf("failed RootFsInfo: %v", err) + } + imageFsInfo, err := sp.provider.ImagesFsInfo() + if err != nil { + return nil, fmt.Errorf("failed DockerImagesFsInfo: %v", err) + } + imageStats, err := sp.runtime.ImageStats() + if err != nil || imageStats == nil { + return nil, fmt.Errorf("failed ImageStats: %v", err) + } + sb := &summaryBuilder{sp.fsResourceAnalyzer, node, nodeConfig, rootFsInfo, imageFsInfo, *imageStats, infos} + return sb.build() +} + +// summaryBuilder aggregates the datastructures provided by cadvisor into a Summary result +type summaryBuilder struct { + fsResourceAnalyzer fsResourceAnalyzerInterface + node *api.Node + nodeConfig cm.NodeConfig + rootFsInfo cadvisorapiv2.FsInfo + imageFsInfo cadvisorapiv2.FsInfo + imageStats container.ImageStats + infos map[string]cadvisorapiv2.ContainerInfo +} + +// build returns a Summary from aggregating the input data +func (sb *summaryBuilder) build() (*stats.Summary, error) { + rootInfo, found := sb.infos["/"] + if !found { + return nil, fmt.Errorf("Missing stats for root container") + } + + rootStats := sb.containerInfoV2ToStats("", &rootInfo) + nodeStats := stats.NodeStats{ + NodeName: sb.node.Name, + CPU: rootStats.CPU, + Memory: rootStats.Memory, + Network: sb.containerInfoV2ToNetworkStats("node:"+sb.node.Name, &rootInfo), + Fs: &stats.FsStats{ + AvailableBytes: &sb.rootFsInfo.Available, + CapacityBytes: &sb.rootFsInfo.Capacity, + UsedBytes: &sb.rootFsInfo.Usage}, + StartTime: rootStats.StartTime, + Runtime: &stats.RuntimeStats{ + ImageFs: &stats.FsStats{ + AvailableBytes: &sb.imageFsInfo.Available, + CapacityBytes: &sb.imageFsInfo.Capacity, + UsedBytes: &sb.imageStats.TotalStorageBytes, + }, + }, + } + + systemContainers := map[string]string{ + stats.SystemContainerKubelet: sb.nodeConfig.KubeletCgroupsName, + stats.SystemContainerRuntime: sb.nodeConfig.RuntimeCgroupsName, + stats.SystemContainerMisc: sb.nodeConfig.SystemCgroupsName, + } + for sys, name := range systemContainers { + if info, ok := sb.infos[name]; ok { + nodeStats.SystemContainers = append(nodeStats.SystemContainers, sb.containerInfoV2ToStats(sys, &info)) + } + } + + summary := stats.Summary{ + Node: nodeStats, + Pods: sb.buildSummaryPods(), + } + return &summary, nil +} + +// containerInfoV2FsStats populates the container fs stats +func (sb *summaryBuilder) containerInfoV2FsStats( + info *cadvisorapiv2.ContainerInfo, + cs *stats.ContainerStats) { + + // The container logs live on the node rootfs device + cs.Logs = &stats.FsStats{ + AvailableBytes: &sb.rootFsInfo.Available, + CapacityBytes: &sb.rootFsInfo.Capacity, + } + + // The container rootFs lives on the imageFs devices (which may not be the node root fs) + cs.Rootfs = &stats.FsStats{ + AvailableBytes: &sb.imageFsInfo.Available, + CapacityBytes: &sb.imageFsInfo.Capacity, + } + lcs, found := sb.latestContainerStats(info) + if !found { + return + } + cfs := lcs.Filesystem + if cfs != nil && cfs.BaseUsageBytes != nil { + rootfsUsage := *cfs.BaseUsageBytes + cs.Rootfs.UsedBytes = &rootfsUsage + if cfs.TotalUsageBytes != nil { + logsUsage := *cfs.TotalUsageBytes - *cfs.BaseUsageBytes + cs.Logs.UsedBytes = &logsUsage + } + } +} + +// latestContainerStats returns the latest container stats from cadvisor, or nil if none exist +func (sb *summaryBuilder) latestContainerStats(info *cadvisorapiv2.ContainerInfo) (*cadvisorapiv2.ContainerStats, bool) { + stats := info.Stats + if len(stats) < 1 { + return nil, false + } + latest := stats[len(stats)-1] + if latest == nil { + return nil, false + } + return latest, true +} + +// buildSummaryPods aggregates and returns the container stats in cinfos by the Pod managing the container. +// Containers not managed by a Pod are omitted. +func (sb *summaryBuilder) buildSummaryPods() []stats.PodStats { + // Map each container to a pod and update the PodStats with container data + podToStats := map[stats.PodReference]*stats.PodStats{} + for key, cinfo := range sb.infos { + // on systemd using devicemapper each mount into the container has an associated cgroup. + // we ignore them to ensure we do not get duplicate entries in our summary. + // for details on .mount units: http://man7.org/linux/man-pages/man5/systemd.mount.5.html + if strings.HasSuffix(key, ".mount") { + continue + } + // Build the Pod key if this container is managed by a Pod + if !sb.isPodManagedContainer(&cinfo) { + continue + } + ref := sb.buildPodRef(&cinfo) + + // Lookup the PodStats for the pod using the PodRef. If none exists, initialize a new entry. + podStats, found := podToStats[ref] + if !found { + podStats = &stats.PodStats{PodRef: ref} + podToStats[ref] = podStats + } + + // Update the PodStats entry with the stats from the container by adding it to stats.Containers + containerName := types.GetContainerName(cinfo.Spec.Labels) + if containerName == leaky.PodInfraContainerName { + // Special case for infrastructure container which is hidden from the user and has network stats + podStats.Network = sb.containerInfoV2ToNetworkStats("pod:"+ref.Namespace+"_"+ref.Name, &cinfo) + podStats.StartTime = unversioned.NewTime(cinfo.Spec.CreationTime) + } else { + podStats.Containers = append(podStats.Containers, sb.containerInfoV2ToStats(containerName, &cinfo)) + } + } + + // Add each PodStats to the result + result := make([]stats.PodStats, 0, len(podToStats)) + for _, podStats := range podToStats { + // Lookup the volume stats for each pod + podUID := kubetypes.UID(podStats.PodRef.UID) + if vstats, found := sb.fsResourceAnalyzer.GetPodVolumeStats(podUID); found { + podStats.VolumeStats = vstats.Volumes + } + result = append(result, *podStats) + } + return result +} + +// buildPodRef returns a PodReference that identifies the Pod managing cinfo +func (sb *summaryBuilder) buildPodRef(cinfo *cadvisorapiv2.ContainerInfo) stats.PodReference { + podName := types.GetPodName(cinfo.Spec.Labels) + podNamespace := types.GetPodNamespace(cinfo.Spec.Labels) + podUID := types.GetPodUID(cinfo.Spec.Labels) + return stats.PodReference{Name: podName, Namespace: podNamespace, UID: podUID} +} + +// isPodManagedContainer returns true if the cinfo container is managed by a Pod +func (sb *summaryBuilder) isPodManagedContainer(cinfo *cadvisorapiv2.ContainerInfo) bool { + podName := types.GetPodName(cinfo.Spec.Labels) + podNamespace := types.GetPodNamespace(cinfo.Spec.Labels) + managed := podName != "" && podNamespace != "" + if !managed && podName != podNamespace { + glog.Warningf( + "Expect container to have either both podName (%s) and podNamespace (%s) labels, or neither.", + podName, podNamespace) + } + return managed +} + +func (sb *summaryBuilder) containerInfoV2ToStats( + name string, + info *cadvisorapiv2.ContainerInfo) stats.ContainerStats { + cStats := stats.ContainerStats{ + StartTime: unversioned.NewTime(info.Spec.CreationTime), + Name: name, + } + cstat, found := sb.latestContainerStats(info) + if !found { + return cStats + } + if info.Spec.HasCpu { + cpuStats := stats.CPUStats{ + Time: unversioned.NewTime(cstat.Timestamp), + } + if cstat.CpuInst != nil { + cpuStats.UsageNanoCores = &cstat.CpuInst.Usage.Total + } + if cstat.Cpu != nil { + cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total + } + cStats.CPU = &cpuStats + } + if info.Spec.HasMemory { + pageFaults := cstat.Memory.ContainerData.Pgfault + majorPageFaults := cstat.Memory.ContainerData.Pgmajfault + cStats.Memory = &stats.MemoryStats{ + Time: unversioned.NewTime(cstat.Timestamp), + UsageBytes: &cstat.Memory.Usage, + WorkingSetBytes: &cstat.Memory.WorkingSet, + RSSBytes: &cstat.Memory.RSS, + PageFaults: &pageFaults, + MajorPageFaults: &majorPageFaults, + } + // availableBytes = memory limit (if known) - workingset + if !isMemoryUnlimited(info.Spec.Memory.Limit) { + availableBytes := info.Spec.Memory.Limit - cstat.Memory.WorkingSet + cStats.Memory.AvailableBytes = &availableBytes + } + } + + sb.containerInfoV2FsStats(info, &cStats) + cStats.UserDefinedMetrics = sb.containerInfoV2ToUserDefinedMetrics(info) + return cStats +} + +// Size after which we consider memory to be "unlimited". This is not +// MaxInt64 due to rounding by the kernel. +// TODO: cadvisor should export this https://github.com/google/cadvisor/blob/master/metrics/prometheus.go#L596 +const maxMemorySize = uint64(1 << 62) + +func isMemoryUnlimited(v uint64) bool { + return v > maxMemorySize +} + +func (sb *summaryBuilder) containerInfoV2ToNetworkStats(name string, info *cadvisorapiv2.ContainerInfo) *stats.NetworkStats { + if !info.Spec.HasNetwork { + return nil + } + cstat, found := sb.latestContainerStats(info) + if !found { + return nil + } + for _, inter := range cstat.Network.Interfaces { + if inter.Name == network.DefaultInterfaceName { + return &stats.NetworkStats{ + Time: unversioned.NewTime(cstat.Timestamp), + RxBytes: &inter.RxBytes, + RxErrors: &inter.RxErrors, + TxBytes: &inter.TxBytes, + TxErrors: &inter.TxErrors, + } + } + } + glog.Warningf("Missing default interface %q for %s", network.DefaultInterfaceName, name) + return nil +} + +func (sb *summaryBuilder) containerInfoV2ToUserDefinedMetrics(info *cadvisorapiv2.ContainerInfo) []stats.UserDefinedMetric { + type specVal struct { + ref stats.UserDefinedMetricDescriptor + valType cadvisorapiv1.DataType + time time.Time + value float64 + } + udmMap := map[string]*specVal{} + for _, spec := range info.Spec.CustomMetrics { + udmMap[spec.Name] = &specVal{ + ref: stats.UserDefinedMetricDescriptor{ + Name: spec.Name, + Type: stats.UserDefinedMetricType(spec.Type), + Units: spec.Units, + }, + valType: spec.Format, + } + } + for _, stat := range info.Stats { + for name, values := range stat.CustomMetrics { + specVal, ok := udmMap[name] + if !ok { + glog.Warningf("spec for custom metric %q is missing from cAdvisor output. Spec: %+v, Metrics: %+v", name, info.Spec, stat.CustomMetrics) + continue + } + for _, value := range values { + // Pick the most recent value + if value.Timestamp.Before(specVal.time) { + continue + } + specVal.time = value.Timestamp + specVal.value = value.FloatValue + if specVal.valType == cadvisorapiv1.IntType { + specVal.value = float64(value.IntValue) + } + } + } + } + var udm []stats.UserDefinedMetric + for _, specVal := range udmMap { + udm = append(udm, stats.UserDefinedMetric{ + UserDefinedMetricDescriptor: specVal.ref, + Time: unversioned.NewTime(specVal.time), + Value: specVal.value, + }) + } + return udm +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_test.go new file mode 100644 index 000000000000..e78cbe278128 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/summary_test.go @@ -0,0 +1,422 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stats + +import ( + "testing" + "time" + + "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/info/v2" + fuzz "github.com/google/gofuzz" + "github.com/stretchr/testify/assert" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + kubestats "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" + "k8s.io/kubernetes/pkg/kubelet/cm" + "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/leaky" +) + +const ( + // Offsets from seed value in generated container stats. + offsetCPUUsageCores = iota + offsetCPUUsageCoreSeconds + offsetMemPageFaults + offsetMemMajorPageFaults + offsetMemUsageBytes + offsetMemRSSBytes + offsetMemWorkingSetBytes + offsetNetRxBytes + offsetNetRxErrors + offsetNetTxBytes + offsetNetTxErrors +) + +var ( + timestamp = time.Now() + creationTime = timestamp.Add(-5 * time.Minute) +) + +func TestBuildSummary(t *testing.T) { + node := api.Node{} + node.Name = "FooNode" + nodeConfig := cm.NodeConfig{ + RuntimeCgroupsName: "/docker-daemon", + SystemCgroupsName: "/system", + KubeletCgroupsName: "/kubelet", + } + const ( + namespace0 = "test0" + namespace2 = "test2" + ) + const ( + seedRoot = 0 + seedRuntime = 100 + seedKubelet = 200 + seedMisc = 300 + seedPod0Infra = 1000 + seedPod0Container0 = 2000 + seedPod0Container1 = 2001 + seedPod1Infra = 3000 + seedPod1Container = 4000 + seedPod2Infra = 5000 + seedPod2Container = 6000 + ) + const ( + pName0 = "pod0" + pName1 = "pod1" + pName2 = "pod0" // ensure pName2 conflicts with pName0, but is in a different namespace + ) + const ( + cName00 = "c0" + cName01 = "c1" + cName10 = "c0" // ensure cName10 conflicts with cName02, but is in a different pod + cName20 = "c1" // ensure cName20 conflicts with cName01, but is in a different pod + namespace + ) + + prf0 := kubestats.PodReference{Name: pName0, Namespace: namespace0, UID: "UID" + pName0} + prf1 := kubestats.PodReference{Name: pName1, Namespace: namespace0, UID: "UID" + pName1} + prf2 := kubestats.PodReference{Name: pName2, Namespace: namespace2, UID: "UID" + pName2} + infos := map[string]v2.ContainerInfo{ + "/": summaryTestContainerInfo(seedRoot, "", "", ""), + "/docker-daemon": summaryTestContainerInfo(seedRuntime, "", "", ""), + "/kubelet": summaryTestContainerInfo(seedKubelet, "", "", ""), + "/system": summaryTestContainerInfo(seedMisc, "", "", ""), + // Pod0 - Namespace0 + "/pod0-i": summaryTestContainerInfo(seedPod0Infra, pName0, namespace0, leaky.PodInfraContainerName), + "/pod0-c0": summaryTestContainerInfo(seedPod0Container0, pName0, namespace0, cName00), + "/pod0-c1": summaryTestContainerInfo(seedPod0Container1, pName0, namespace0, cName01), + // Pod1 - Namespace0 + "/pod1-i": summaryTestContainerInfo(seedPod1Infra, pName1, namespace0, leaky.PodInfraContainerName), + "/pod1-c0": summaryTestContainerInfo(seedPod1Container, pName1, namespace0, cName10), + // Pod2 - Namespace2 + "/pod2-i": summaryTestContainerInfo(seedPod2Infra, pName2, namespace2, leaky.PodInfraContainerName), + "/pod2-c0": summaryTestContainerInfo(seedPod2Container, pName2, namespace2, cName20), + } + + rootfs := v2.FsInfo{} + imagefs := v2.FsInfo{} + + // memory limit overrides for each container (used to test available bytes if a memory limit is known) + memoryLimitOverrides := map[string]uint64{ + "/": uint64(1 << 30), + "/pod2-c0": uint64(1 << 15), + } + for name, memoryLimitOverride := range memoryLimitOverrides { + info, found := infos[name] + if !found { + t.Errorf("No container defined with name %v", name) + } + info.Spec.Memory.Limit = memoryLimitOverride + infos[name] = info + } + + sb := &summaryBuilder{ + newFsResourceAnalyzer(&MockStatsProvider{}, time.Minute*5), &node, nodeConfig, rootfs, imagefs, container.ImageStats{}, infos} + summary, err := sb.build() + + assert.NoError(t, err) + nodeStats := summary.Node + assert.Equal(t, "FooNode", nodeStats.NodeName) + assert.EqualValues(t, testTime(creationTime, seedRoot).Unix(), nodeStats.StartTime.Time.Unix()) + checkCPUStats(t, "Node", seedRoot, nodeStats.CPU) + checkMemoryStats(t, "Node", seedRoot, infos["/"], nodeStats.Memory) + checkNetworkStats(t, "Node", seedRoot, nodeStats.Network) + + systemSeeds := map[string]int{ + kubestats.SystemContainerRuntime: seedRuntime, + kubestats.SystemContainerKubelet: seedKubelet, + kubestats.SystemContainerMisc: seedMisc, + } + systemContainerToNodeCgroup := map[string]string{ + kubestats.SystemContainerRuntime: nodeConfig.RuntimeCgroupsName, + kubestats.SystemContainerKubelet: nodeConfig.KubeletCgroupsName, + kubestats.SystemContainerMisc: nodeConfig.SystemCgroupsName, + } + for _, sys := range nodeStats.SystemContainers { + name := sys.Name + info := infos[systemContainerToNodeCgroup[name]] + seed, found := systemSeeds[name] + if !found { + t.Errorf("Unknown SystemContainer: %q", name) + } + assert.EqualValues(t, testTime(creationTime, seed).Unix(), sys.StartTime.Time.Unix(), name+".StartTime") + checkCPUStats(t, name, seed, sys.CPU) + checkMemoryStats(t, name, seed, info, sys.Memory) + } + + assert.Equal(t, 3, len(summary.Pods)) + indexPods := make(map[kubestats.PodReference]kubestats.PodStats, len(summary.Pods)) + for _, pod := range summary.Pods { + indexPods[pod.PodRef] = pod + } + + // Validate Pod0 Results + ps, found := indexPods[prf0] + assert.True(t, found) + assert.Len(t, ps.Containers, 2) + indexCon := make(map[string]kubestats.ContainerStats, len(ps.Containers)) + for _, con := range ps.Containers { + indexCon[con.Name] = con + } + con := indexCon[cName00] + assert.EqualValues(t, testTime(creationTime, seedPod0Container0).Unix(), con.StartTime.Time.Unix()) + checkCPUStats(t, "Pod0Container0", seedPod0Container0, con.CPU) + checkMemoryStats(t, "Pod0Conainer0", seedPod0Container0, infos["/pod0-c0"], con.Memory) + + con = indexCon[cName01] + assert.EqualValues(t, testTime(creationTime, seedPod0Container1).Unix(), con.StartTime.Time.Unix()) + checkCPUStats(t, "Pod0Container1", seedPod0Container1, con.CPU) + checkMemoryStats(t, "Pod0Container1", seedPod0Container1, infos["/pod0-c1"], con.Memory) + + assert.EqualValues(t, testTime(creationTime, seedPod0Infra).Unix(), ps.StartTime.Time.Unix()) + checkNetworkStats(t, "Pod0", seedPod0Infra, ps.Network) + + // Validate Pod1 Results + ps, found = indexPods[prf1] + assert.True(t, found) + assert.Len(t, ps.Containers, 1) + con = ps.Containers[0] + assert.Equal(t, cName10, con.Name) + checkCPUStats(t, "Pod1Container0", seedPod1Container, con.CPU) + checkMemoryStats(t, "Pod1Container0", seedPod1Container, infos["/pod1-c0"], con.Memory) + checkNetworkStats(t, "Pod1", seedPod1Infra, ps.Network) + + // Validate Pod2 Results + ps, found = indexPods[prf2] + assert.True(t, found) + assert.Len(t, ps.Containers, 1) + con = ps.Containers[0] + assert.Equal(t, cName20, con.Name) + checkCPUStats(t, "Pod2Container0", seedPod2Container, con.CPU) + checkMemoryStats(t, "Pod2Container0", seedPod2Container, infos["/pod2-c0"], con.Memory) + checkNetworkStats(t, "Pod2", seedPod2Infra, ps.Network) +} + +func generateCustomMetricSpec() []v1.MetricSpec { + f := fuzz.New().NilChance(0).Funcs( + func(e *v1.MetricSpec, c fuzz.Continue) { + c.Fuzz(&e.Name) + switch c.Intn(3) { + case 0: + e.Type = v1.MetricGauge + case 1: + e.Type = v1.MetricCumulative + case 2: + e.Type = v1.MetricDelta + } + switch c.Intn(2) { + case 0: + e.Format = v1.IntType + case 1: + e.Format = v1.FloatType + } + c.Fuzz(&e.Units) + }) + var ret []v1.MetricSpec + f.Fuzz(&ret) + return ret +} + +func generateCustomMetrics(spec []v1.MetricSpec) map[string][]v1.MetricVal { + ret := map[string][]v1.MetricVal{} + for _, metricSpec := range spec { + f := fuzz.New().NilChance(0).Funcs( + func(e *v1.MetricVal, c fuzz.Continue) { + switch metricSpec.Format { + case v1.IntType: + c.Fuzz(&e.IntValue) + case v1.FloatType: + c.Fuzz(&e.FloatValue) + } + }) + + var metrics []v1.MetricVal + f.Fuzz(&metrics) + ret[metricSpec.Name] = metrics + } + return ret +} + +func summaryTestContainerInfo(seed int, podName string, podNamespace string, containerName string) v2.ContainerInfo { + labels := map[string]string{} + if podName != "" { + labels = map[string]string{ + "io.kubernetes.pod.name": podName, + "io.kubernetes.pod.uid": "UID" + podName, + "io.kubernetes.pod.namespace": podNamespace, + "io.kubernetes.container.name": containerName, + } + } + // by default, kernel will set memory.limit_in_bytes to 1 << 63 if not bounded + unlimitedMemory := uint64(1 << 63) + spec := v2.ContainerSpec{ + CreationTime: testTime(creationTime, seed), + HasCpu: true, + HasMemory: true, + HasNetwork: true, + Labels: labels, + Memory: v2.MemorySpec{ + Limit: unlimitedMemory, + }, + CustomMetrics: generateCustomMetricSpec(), + } + + stats := v2.ContainerStats{ + Timestamp: testTime(timestamp, seed), + Cpu: &v1.CpuStats{}, + CpuInst: &v2.CpuInstStats{}, + Memory: &v1.MemoryStats{ + Usage: uint64(seed + offsetMemUsageBytes), + WorkingSet: uint64(seed + offsetMemWorkingSetBytes), + RSS: uint64(seed + offsetMemRSSBytes), + ContainerData: v1.MemoryStatsMemoryData{ + Pgfault: uint64(seed + offsetMemPageFaults), + Pgmajfault: uint64(seed + offsetMemMajorPageFaults), + }, + }, + Network: &v2.NetworkStats{ + Interfaces: []v1.InterfaceStats{{ + Name: "eth0", + RxBytes: uint64(seed + offsetNetRxBytes), + RxErrors: uint64(seed + offsetNetRxErrors), + TxBytes: uint64(seed + offsetNetTxBytes), + TxErrors: uint64(seed + offsetNetTxErrors), + }, { + Name: "cbr0", + RxBytes: 100, + RxErrors: 100, + TxBytes: 100, + TxErrors: 100, + }}, + }, + CustomMetrics: generateCustomMetrics(spec.CustomMetrics), + } + stats.Cpu.Usage.Total = uint64(seed + offsetCPUUsageCoreSeconds) + stats.CpuInst.Usage.Total = uint64(seed + offsetCPUUsageCores) + return v2.ContainerInfo{ + Spec: spec, + Stats: []*v2.ContainerStats{&stats}, + } +} + +func testTime(base time.Time, seed int) time.Time { + return base.Add(time.Duration(seed) * time.Second) +} + +func checkNetworkStats(t *testing.T, label string, seed int, stats *kubestats.NetworkStats) { + assert.NotNil(t, stats) + assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".Net.Time") + assert.EqualValues(t, seed+offsetNetRxBytes, *stats.RxBytes, label+".Net.RxBytes") + assert.EqualValues(t, seed+offsetNetRxErrors, *stats.RxErrors, label+".Net.RxErrors") + assert.EqualValues(t, seed+offsetNetTxBytes, *stats.TxBytes, label+".Net.TxBytes") + assert.EqualValues(t, seed+offsetNetTxErrors, *stats.TxErrors, label+".Net.TxErrors") +} + +func checkCPUStats(t *testing.T, label string, seed int, stats *kubestats.CPUStats) { + assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".CPU.Time") + assert.EqualValues(t, seed+offsetCPUUsageCores, *stats.UsageNanoCores, label+".CPU.UsageCores") + assert.EqualValues(t, seed+offsetCPUUsageCoreSeconds, *stats.UsageCoreNanoSeconds, label+".CPU.UsageCoreSeconds") +} + +func checkMemoryStats(t *testing.T, label string, seed int, info v2.ContainerInfo, stats *kubestats.MemoryStats) { + assert.EqualValues(t, testTime(timestamp, seed).Unix(), stats.Time.Time.Unix(), label+".Mem.Time") + assert.EqualValues(t, seed+offsetMemUsageBytes, *stats.UsageBytes, label+".Mem.UsageBytes") + assert.EqualValues(t, seed+offsetMemWorkingSetBytes, *stats.WorkingSetBytes, label+".Mem.WorkingSetBytes") + assert.EqualValues(t, seed+offsetMemRSSBytes, *stats.RSSBytes, label+".Mem.RSSBytes") + assert.EqualValues(t, seed+offsetMemPageFaults, *stats.PageFaults, label+".Mem.PageFaults") + assert.EqualValues(t, seed+offsetMemMajorPageFaults, *stats.MajorPageFaults, label+".Mem.MajorPageFaults") + if !info.Spec.HasMemory || isMemoryUnlimited(info.Spec.Memory.Limit) { + assert.Nil(t, stats.AvailableBytes, label+".Mem.AvailableBytes") + } else { + expected := info.Spec.Memory.Limit - *stats.WorkingSetBytes + assert.EqualValues(t, expected, *stats.AvailableBytes, label+".Mem.AvailableBytes") + } +} + +func TestCustomMetrics(t *testing.T) { + spec := []v1.MetricSpec{ + { + Name: "qos", + Type: v1.MetricGauge, + Format: v1.IntType, + Units: "per second", + }, + { + Name: "cpuLoad", + Type: v1.MetricCumulative, + Format: v1.FloatType, + Units: "count", + }, + } + timestamp1 := time.Now() + timestamp2 := time.Now().Add(time.Minute) + metrics := map[string][]v1.MetricVal{ + "qos": { + { + Timestamp: timestamp1, + IntValue: 10, + }, + { + Timestamp: timestamp2, + IntValue: 100, + }, + }, + "cpuLoad": { + { + Timestamp: timestamp1, + FloatValue: 1.2, + }, + { + Timestamp: timestamp2, + FloatValue: 2.1, + }, + }, + } + cInfo := v2.ContainerInfo{ + Spec: v2.ContainerSpec{ + CustomMetrics: spec, + }, + Stats: []*v2.ContainerStats{ + { + CustomMetrics: metrics, + }, + }, + } + sb := &summaryBuilder{} + assert.Contains(t, sb.containerInfoV2ToUserDefinedMetrics(&cInfo), + kubestats.UserDefinedMetric{ + UserDefinedMetricDescriptor: kubestats.UserDefinedMetricDescriptor{ + Name: "qos", + Type: kubestats.MetricGauge, + Units: "per second", + }, + Time: unversioned.NewTime(timestamp2), + Value: 100, + }, + kubestats.UserDefinedMetric{ + UserDefinedMetricDescriptor: kubestats.UserDefinedMetricDescriptor{ + Name: "cpuLoad", + Type: kubestats.MetricCumulative, + Units: "count", + }, + Time: unversioned.NewTime(timestamp2), + Value: 2.1, + }) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_caculator.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_caculator.go new file mode 100644 index 000000000000..65bc6254ce1f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_caculator.go @@ -0,0 +1,122 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stats + +import ( + "sync" + "sync/atomic" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/volume" + + "github.com/golang/glog" +) + +// volumeStatCalculator calculates volume metrics for a given pod periodically in the background and caches the result +type volumeStatCalculator struct { + statsProvider StatsProvider + jitterPeriod time.Duration + pod *api.Pod + stopChannel chan struct{} + startO sync.Once + stopO sync.Once + latest atomic.Value +} + +// PodVolumeStats encapsulates all VolumeStats for a pod +type PodVolumeStats struct { + Volumes []stats.VolumeStats +} + +// newVolumeStatCalculator creates a new VolumeStatCalculator +func newVolumeStatCalculator(statsProvider StatsProvider, jitterPeriod time.Duration, pod *api.Pod) *volumeStatCalculator { + return &volumeStatCalculator{ + statsProvider: statsProvider, + jitterPeriod: jitterPeriod, + pod: pod, + stopChannel: make(chan struct{}), + } +} + +// StartOnce starts pod volume calc that will occur periodically in the background until s.StopOnce is called +func (s *volumeStatCalculator) StartOnce() *volumeStatCalculator { + s.startO.Do(func() { + go wait.JitterUntil(func() { + s.calcAndStoreStats() + }, s.jitterPeriod, 1.0, true, s.stopChannel) + }) + return s +} + +// StopOnce stops background pod volume calculation. Will not stop a currently executing calculations until +// they complete their current iteration. +func (s *volumeStatCalculator) StopOnce() *volumeStatCalculator { + s.stopO.Do(func() { + close(s.stopChannel) + }) + return s +} + +// getLatest returns the most recent PodVolumeStats from the cache +func (s *volumeStatCalculator) GetLatest() (PodVolumeStats, bool) { + if result := s.latest.Load(); result == nil { + return PodVolumeStats{}, false + } else { + return result.(PodVolumeStats), true + } +} + +// calcAndStoreStats calculates PodVolumeStats for a given pod and writes the result to the s.latest cache. +func (s *volumeStatCalculator) calcAndStoreStats() { + // Find all Volumes for the Pod + volumes, found := s.statsProvider.ListVolumesForPod(s.pod.UID) + if !found { + return + } + + // Call GetMetrics on each Volume and copy the result to a new VolumeStats.FsStats + stats := make([]stats.VolumeStats, 0, len(volumes)) + for name, v := range volumes { + metric, err := v.GetMetrics() + if err != nil { + // Expected for Volumes that don't support Metrics + // TODO: Disambiguate unsupported from errors + // See issue #20676 + glog.V(4).Infof("Failed to calculate volume metrics for pod %s volume %s: %+v", format.Pod(s.pod), name, err) + continue + } + stats = append(stats, s.parsePodVolumeStats(name, metric)) + } + + // Store the new stats + s.latest.Store(PodVolumeStats{Volumes: stats}) +} + +// parsePodVolumeStats converts (internal) volume.Metrics to (external) stats.VolumeStats structures +func (s *volumeStatCalculator) parsePodVolumeStats(podName string, metric *volume.Metrics) stats.VolumeStats { + available := uint64(metric.Available.Value()) + capacity := uint64(metric.Capacity.Value()) + used := uint64((metric.Used.Value())) + return stats.VolumeStats{ + Name: podName, + FsStats: stats.FsStats{AvailableBytes: &available, CapacityBytes: &capacity, UsedBytes: &used}, + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/status/generate.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/status/generate.go new file mode 100644 index 000000000000..cc000929a073 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/status/generate.go @@ -0,0 +1,134 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "fmt" + "strings" + + "k8s.io/kubernetes/pkg/api" +) + +// GeneratePodReadyCondition returns ready condition if all containers in a pod are ready, else it +// returns an unready condition. +func GeneratePodReadyCondition(spec *api.PodSpec, containerStatuses []api.ContainerStatus, podPhase api.PodPhase) api.PodCondition { + // Find if all containers are ready or not. + if containerStatuses == nil { + return api.PodCondition{ + Type: api.PodReady, + Status: api.ConditionFalse, + Reason: "UnknownContainerStatuses", + } + } + unknownContainers := []string{} + unreadyContainers := []string{} + for _, container := range spec.Containers { + if containerStatus, ok := api.GetContainerStatus(containerStatuses, container.Name); ok { + if !containerStatus.Ready { + unreadyContainers = append(unreadyContainers, container.Name) + } + } else { + unknownContainers = append(unknownContainers, container.Name) + } + } + + // If all containers are known and succeeded, just return PodCompleted. + if podPhase == api.PodSucceeded && len(unknownContainers) == 0 { + return api.PodCondition{ + Type: api.PodReady, + Status: api.ConditionFalse, + Reason: "PodCompleted", + } + } + + unreadyMessages := []string{} + if len(unknownContainers) > 0 { + unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers)) + } + if len(unreadyContainers) > 0 { + unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unready status: %s", unreadyContainers)) + } + unreadyMessage := strings.Join(unreadyMessages, ", ") + if unreadyMessage != "" { + return api.PodCondition{ + Type: api.PodReady, + Status: api.ConditionFalse, + Reason: "ContainersNotReady", + Message: unreadyMessage, + } + } + + return api.PodCondition{ + Type: api.PodReady, + Status: api.ConditionTrue, + } +} + +// GeneratePodInitializedCondition returns initialized condition if all init containers in a pod are ready, else it +// returns an uninitialized condition. +func GeneratePodInitializedCondition(spec *api.PodSpec, containerStatuses []api.ContainerStatus, podPhase api.PodPhase) api.PodCondition { + // Find if all containers are ready or not. + if containerStatuses == nil && len(spec.InitContainers) > 0 { + return api.PodCondition{ + Type: api.PodInitialized, + Status: api.ConditionFalse, + Reason: "UnknownContainerStatuses", + } + } + unknownContainers := []string{} + unreadyContainers := []string{} + for _, container := range spec.InitContainers { + if containerStatus, ok := api.GetContainerStatus(containerStatuses, container.Name); ok { + if !containerStatus.Ready { + unreadyContainers = append(unreadyContainers, container.Name) + } + } else { + unknownContainers = append(unknownContainers, container.Name) + } + } + + // If all init containers are known and succeeded, just return PodCompleted. + if podPhase == api.PodSucceeded && len(unknownContainers) == 0 { + return api.PodCondition{ + Type: api.PodInitialized, + Status: api.ConditionTrue, + Reason: "PodCompleted", + } + } + + unreadyMessages := []string{} + if len(unknownContainers) > 0 { + unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers)) + } + if len(unreadyContainers) > 0 { + unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with incomplete status: %s", unreadyContainers)) + } + unreadyMessage := strings.Join(unreadyMessages, ", ") + if unreadyMessage != "" { + return api.PodCondition{ + Type: api.PodInitialized, + Status: api.ConditionFalse, + Reason: "ContainersNotInitialized", + Message: unreadyMessage, + } + } + + return api.PodCondition{ + Type: api.PodInitialized, + Status: api.ConditionTrue, + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/status/generate_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/status/generate_test.go new file mode 100644 index 000000000000..2d39c238d550 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/status/generate_test.go @@ -0,0 +1,143 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +func TestGeneratePodReadyCondition(t *testing.T) { + tests := []struct { + spec *api.PodSpec + containerStatuses []api.ContainerStatus + podPhase api.PodPhase + expected api.PodCondition + }{ + { + spec: nil, + containerStatuses: nil, + podPhase: api.PodRunning, + expected: getReadyCondition(false, "UnknownContainerStatuses", ""), + }, + { + spec: &api.PodSpec{}, + containerStatuses: []api.ContainerStatus{}, + podPhase: api.PodRunning, + expected: getReadyCondition(true, "", ""), + }, + { + spec: &api.PodSpec{ + Containers: []api.Container{ + {Name: "1234"}, + }, + }, + containerStatuses: []api.ContainerStatus{}, + podPhase: api.PodRunning, + expected: getReadyCondition(false, "ContainersNotReady", "containers with unknown status: [1234]"), + }, + { + spec: &api.PodSpec{ + Containers: []api.Container{ + {Name: "1234"}, + {Name: "5678"}, + }, + }, + containerStatuses: []api.ContainerStatus{ + getReadyStatus("1234"), + getReadyStatus("5678"), + }, + podPhase: api.PodRunning, + expected: getReadyCondition(true, "", ""), + }, + { + spec: &api.PodSpec{ + Containers: []api.Container{ + {Name: "1234"}, + {Name: "5678"}, + }, + }, + containerStatuses: []api.ContainerStatus{ + getReadyStatus("1234"), + }, + podPhase: api.PodRunning, + expected: getReadyCondition(false, "ContainersNotReady", "containers with unknown status: [5678]"), + }, + { + spec: &api.PodSpec{ + Containers: []api.Container{ + {Name: "1234"}, + {Name: "5678"}, + }, + }, + containerStatuses: []api.ContainerStatus{ + getReadyStatus("1234"), + getNotReadyStatus("5678"), + }, + podPhase: api.PodRunning, + expected: getReadyCondition(false, "ContainersNotReady", "containers with unready status: [5678]"), + }, + { + spec: &api.PodSpec{ + Containers: []api.Container{ + {Name: "1234"}, + }, + }, + containerStatuses: []api.ContainerStatus{ + getNotReadyStatus("1234"), + }, + podPhase: api.PodSucceeded, + expected: getReadyCondition(false, "PodCompleted", ""), + }, + } + + for i, test := range tests { + condition := GeneratePodReadyCondition(test.spec, test.containerStatuses, test.podPhase) + if !reflect.DeepEqual(condition, test.expected) { + t.Errorf("On test case %v, expected:\n%+v\ngot\n%+v\n", i, test.expected, condition) + } + } +} + +func getReadyCondition(ready bool, reason, message string) api.PodCondition { + status := api.ConditionFalse + if ready { + status = api.ConditionTrue + } + return api.PodCondition{ + Type: api.PodReady, + Status: status, + Reason: reason, + Message: message, + } +} + +func getReadyStatus(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + Ready: true, + } +} + +func getNotReadyStatus(cName string) api.ContainerStatus { + return api.ContainerStatus{ + Name: cName, + Ready: false, + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/status/manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/status/manager.go new file mode 100644 index 000000000000..c9b5b1e4e2cd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/status/manager.go @@ -0,0 +1,569 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "sort" + "sync" + "time" + + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + kubepod "k8s.io/kubernetes/pkg/kubelet/pod" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/kubelet/util/format" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/diff" + "k8s.io/kubernetes/pkg/util/wait" +) + +// A wrapper around api.PodStatus that includes a version to enforce that stale pod statuses are +// not sent to the API server. +type versionedPodStatus struct { + status api.PodStatus + // Monotonically increasing version number (per pod). + version uint64 + // Pod name & namespace, for sending updates to API server. + podName string + podNamespace string +} + +type podStatusSyncRequest struct { + podUID types.UID + status versionedPodStatus +} + +// Updates pod statuses in apiserver. Writes only when new status has changed. +// All methods are thread-safe. +type manager struct { + kubeClient clientset.Interface + podManager kubepod.Manager + // Map from pod UID to sync status of the corresponding pod. + podStatuses map[types.UID]versionedPodStatus + podStatusesLock sync.RWMutex + podStatusChannel chan podStatusSyncRequest + // Map from (mirror) pod UID to latest status version successfully sent to the API server. + // apiStatusVersions must only be accessed from the sync thread. + apiStatusVersions map[types.UID]uint64 +} + +// status.Manager is the Source of truth for kubelet pod status, and should be kept up-to-date with +// the latest api.PodStatus. It also syncs updates back to the API server. +type Manager interface { + // Start the API server status sync loop. + Start() + + // GetPodStatus returns the cached status for the provided pod UID, as well as whether it + // was a cache hit. + GetPodStatus(uid types.UID) (api.PodStatus, bool) + + // SetPodStatus caches updates the cached status for the given pod, and triggers a status update. + SetPodStatus(pod *api.Pod, status api.PodStatus) + + // SetContainerReadiness updates the cached container status with the given readiness, and + // triggers a status update. + SetContainerReadiness(podUID types.UID, containerID kubecontainer.ContainerID, ready bool) + + // TerminatePod resets the container status for the provided pod to terminated and triggers + // a status update. + TerminatePod(pod *api.Pod) + + // RemoveOrphanedStatuses scans the status cache and removes any entries for pods not included in + // the provided podUIDs. + RemoveOrphanedStatuses(podUIDs map[types.UID]bool) +} + +const syncPeriod = 10 * time.Second + +func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager) Manager { + return &manager{ + kubeClient: kubeClient, + podManager: podManager, + podStatuses: make(map[types.UID]versionedPodStatus), + podStatusChannel: make(chan podStatusSyncRequest, 1000), // Buffer up to 1000 statuses + apiStatusVersions: make(map[types.UID]uint64), + } +} + +// isStatusEqual returns true if the given pod statuses are equal, false otherwise. +// This method normalizes the status before comparing so as to make sure that meaningless +// changes will be ignored. +func isStatusEqual(oldStatus, status *api.PodStatus) bool { + return api.Semantic.DeepEqual(status, oldStatus) +} + +func (m *manager) Start() { + // Don't start the status manager if we don't have a client. This will happen + // on the master, where the kubelet is responsible for bootstrapping the pods + // of the master components. + if m.kubeClient == nil { + glog.Infof("Kubernetes client is nil, not starting status manager.") + return + } + + glog.Info("Starting to sync pod status with apiserver") + syncTicker := time.Tick(syncPeriod) + // syncPod and syncBatch share the same go routine to avoid sync races. + go wait.Forever(func() { + select { + case syncRequest := <-m.podStatusChannel: + m.syncPod(syncRequest.podUID, syncRequest.status) + case <-syncTicker: + m.syncBatch() + } + }, 0) +} + +func (m *manager) GetPodStatus(uid types.UID) (api.PodStatus, bool) { + m.podStatusesLock.RLock() + defer m.podStatusesLock.RUnlock() + status, ok := m.podStatuses[m.podManager.TranslatePodUID(uid)] + return status.status, ok +} + +func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) { + m.podStatusesLock.Lock() + defer m.podStatusesLock.Unlock() + // Make sure we're caching a deep copy. + status, err := copyStatus(&status) + if err != nil { + return + } + // Force a status update if deletion timestamp is set. This is necessary + // because if the pod is in the non-running state, the pod worker still + // needs to be able to trigger an update and/or deletion. + m.updateStatusInternal(pod, status, pod.DeletionTimestamp != nil) +} + +func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontainer.ContainerID, ready bool) { + m.podStatusesLock.Lock() + defer m.podStatusesLock.Unlock() + + pod, ok := m.podManager.GetPodByUID(podUID) + if !ok { + glog.V(4).Infof("Pod %q has been deleted, no need to update readiness", string(podUID)) + return + } + + oldStatus, found := m.podStatuses[pod.UID] + if !found { + glog.Warningf("Container readiness changed before pod has synced: %q - %q", + format.Pod(pod), containerID.String()) + return + } + + // Find the container to update. + containerStatus, _, ok := findContainerStatus(&oldStatus.status, containerID.String()) + if !ok { + glog.Warningf("Container readiness changed for unknown container: %q - %q", + format.Pod(pod), containerID.String()) + return + } + + if containerStatus.Ready == ready { + glog.V(4).Infof("Container readiness unchanged (%v): %q - %q", ready, + format.Pod(pod), containerID.String()) + return + } + + // Make sure we're not updating the cached version. + status, err := copyStatus(&oldStatus.status) + if err != nil { + return + } + containerStatus, _, _ = findContainerStatus(&status, containerID.String()) + containerStatus.Ready = ready + + // Update pod condition. + readyConditionIndex := -1 + for i, condition := range status.Conditions { + if condition.Type == api.PodReady { + readyConditionIndex = i + break + } + } + readyCondition := GeneratePodReadyCondition(&pod.Spec, status.ContainerStatuses, status.Phase) + if readyConditionIndex != -1 { + status.Conditions[readyConditionIndex] = readyCondition + } else { + glog.Warningf("PodStatus missing PodReady condition: %+v", status) + status.Conditions = append(status.Conditions, readyCondition) + } + + m.updateStatusInternal(pod, status, false) +} + +func findContainerStatus(status *api.PodStatus, containerID string) (containerStatus *api.ContainerStatus, init bool, ok bool) { + // Find the container to update. + containerIndex := -1 + for i, c := range status.ContainerStatuses { + if c.ContainerID == containerID { + containerIndex = i + break + } + } + if containerIndex != -1 { + return &status.ContainerStatuses[containerIndex], false, true + } + + for i, c := range status.InitContainerStatuses { + if c.ContainerID == containerID { + containerIndex = i + break + } + } + if containerIndex != -1 { + return &status.InitContainerStatuses[containerIndex], true, true + } + return nil, false, false +} + +func (m *manager) TerminatePod(pod *api.Pod) { + m.podStatusesLock.Lock() + defer m.podStatusesLock.Unlock() + oldStatus := &pod.Status + if cachedStatus, ok := m.podStatuses[pod.UID]; ok { + oldStatus = &cachedStatus.status + } + status, err := copyStatus(oldStatus) + if err != nil { + return + } + for i := range status.ContainerStatuses { + status.ContainerStatuses[i].State = api.ContainerState{ + Terminated: &api.ContainerStateTerminated{}, + } + } + for i := range status.InitContainerStatuses { + status.InitContainerStatuses[i].State = api.ContainerState{ + Terminated: &api.ContainerStateTerminated{}, + } + } + m.updateStatusInternal(pod, pod.Status, true) +} + +// updateStatusInternal updates the internal status cache, and queues an update to the api server if +// necessary. Returns whether an update was triggered. +// This method IS NOT THREAD SAFE and must be called from a locked function. +func (m *manager) updateStatusInternal(pod *api.Pod, status api.PodStatus, forceUpdate bool) bool { + var oldStatus api.PodStatus + cachedStatus, isCached := m.podStatuses[pod.UID] + if isCached { + oldStatus = cachedStatus.status + } else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok { + oldStatus = mirrorPod.Status + } else { + oldStatus = pod.Status + } + + // Set ReadyCondition.LastTransitionTime. + if _, readyCondition := api.GetPodCondition(&status, api.PodReady); readyCondition != nil { + // Need to set LastTransitionTime. + lastTransitionTime := unversioned.Now() + _, oldReadyCondition := api.GetPodCondition(&oldStatus, api.PodReady) + if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status { + lastTransitionTime = oldReadyCondition.LastTransitionTime + } + readyCondition.LastTransitionTime = lastTransitionTime + } + + // Set InitializedCondition.LastTransitionTime. + if _, initCondition := api.GetPodCondition(&status, api.PodInitialized); initCondition != nil { + // Need to set LastTransitionTime. + lastTransitionTime := unversioned.Now() + _, oldInitCondition := api.GetPodCondition(&oldStatus, api.PodInitialized) + if oldInitCondition != nil && initCondition.Status == oldInitCondition.Status { + lastTransitionTime = oldInitCondition.LastTransitionTime + } + initCondition.LastTransitionTime = lastTransitionTime + } + + // ensure that the start time does not change across updates. + if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() { + status.StartTime = oldStatus.StartTime + } else if status.StartTime.IsZero() { + // if the status has no start time, we need to set an initial time + now := unversioned.Now() + status.StartTime = &now + } + + normalizeStatus(&status) + // The intent here is to prevent concurrent updates to a pod's status from + // clobbering each other so the phase of a pod progresses monotonically. + if isCached && isStatusEqual(&cachedStatus.status, &status) && !forceUpdate { + glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status) + return false // No new status. + } + + newStatus := versionedPodStatus{ + status: status, + version: cachedStatus.version + 1, + podName: pod.Name, + podNamespace: pod.Namespace, + } + m.podStatuses[pod.UID] = newStatus + + select { + case m.podStatusChannel <- podStatusSyncRequest{pod.UID, newStatus}: + return true + default: + // Let the periodic syncBatch handle the update if the channel is full. + // We can't block, since we hold the mutex lock. + glog.V(4).Infof("Skpping the status update for pod %q for now because the channel is full; status: %+v", + format.Pod(pod), status) + return false + } +} + +// deletePodStatus simply removes the given pod from the status cache. +func (m *manager) deletePodStatus(uid types.UID) { + m.podStatusesLock.Lock() + defer m.podStatusesLock.Unlock() + delete(m.podStatuses, uid) +} + +// TODO(filipg): It'd be cleaner if we can do this without signal from user. +func (m *manager) RemoveOrphanedStatuses(podUIDs map[types.UID]bool) { + m.podStatusesLock.Lock() + defer m.podStatusesLock.Unlock() + for key := range m.podStatuses { + if _, ok := podUIDs[key]; !ok { + glog.V(5).Infof("Removing %q from status map.", key) + delete(m.podStatuses, key) + } + } +} + +// syncBatch syncs pods statuses with the apiserver. +func (m *manager) syncBatch() { + var updatedStatuses []podStatusSyncRequest + podToMirror, mirrorToPod := m.podManager.GetUIDTranslations() + func() { // Critical section + m.podStatusesLock.RLock() + defer m.podStatusesLock.RUnlock() + + // Clean up orphaned versions. + for uid := range m.apiStatusVersions { + _, hasPod := m.podStatuses[uid] + _, hasMirror := mirrorToPod[uid] + if !hasPod && !hasMirror { + delete(m.apiStatusVersions, uid) + } + } + + for uid, status := range m.podStatuses { + syncedUID := uid + if mirrorUID, ok := podToMirror[uid]; ok { + syncedUID = mirrorUID + } + if m.needsUpdate(syncedUID, status) { + updatedStatuses = append(updatedStatuses, podStatusSyncRequest{uid, status}) + } else if m.needsReconcile(uid, status.status) { + // Delete the apiStatusVersions here to force an update on the pod status + // In most cases the deleted apiStatusVersions here should be filled + // soon after the following syncPod() [If the syncPod() sync an update + // successfully]. + delete(m.apiStatusVersions, syncedUID) + updatedStatuses = append(updatedStatuses, podStatusSyncRequest{uid, status}) + } + } + }() + + for _, update := range updatedStatuses { + m.syncPod(update.podUID, update.status) + } +} + +// syncPod syncs the given status with the API server. The caller must not hold the lock. +func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { + if !m.needsUpdate(uid, status) { + glog.V(1).Infof("Status for pod %q is up-to-date; skipping", uid) + return + } + + // TODO: make me easier to express from client code + pod, err := m.kubeClient.Core().Pods(status.podNamespace).Get(status.podName) + if errors.IsNotFound(err) { + glog.V(3).Infof("Pod %q (%s) does not exist on the server", status.podName, uid) + // If the Pod is deleted the status will be cleared in + // RemoveOrphanedStatuses, so we just ignore the update here. + return + } + if err == nil { + translatedUID := m.podManager.TranslatePodUID(pod.UID) + if len(translatedUID) > 0 && translatedUID != uid { + glog.V(3).Infof("Pod %q was deleted and then recreated, skipping status update", format.Pod(pod)) + m.deletePodStatus(uid) + return + } + pod.Status = status.status + // TODO: handle conflict as a retry, make that easier too. + pod, err = m.kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod) + if err == nil { + glog.V(3).Infof("Status for pod %q updated successfully: %+v", format.Pod(pod), status) + m.apiStatusVersions[pod.UID] = status.version + if kubepod.IsMirrorPod(pod) { + // We don't handle graceful deletion of mirror pods. + return + } + if pod.DeletionTimestamp == nil { + return + } + if !notRunning(pod.Status.ContainerStatuses) { + glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod)) + return + } + deleteOptions := api.NewDeleteOptions(0) + // Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace. + deleteOptions.Preconditions = api.NewUIDPreconditions(string(pod.UID)) + if err := m.kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, deleteOptions); err == nil { + glog.V(3).Infof("Pod %q fully terminated and removed from etcd", format.Pod(pod)) + m.deletePodStatus(uid) + return + } + } + } + + // We failed to update status, wait for periodic sync to retry. + glog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err) +} + +// needsUpdate returns whether the status is stale for the given pod UID. +// This method is not thread safe, and most only be accessed by the sync thread. +func (m *manager) needsUpdate(uid types.UID, status versionedPodStatus) bool { + latest, ok := m.apiStatusVersions[uid] + return !ok || latest < status.version +} + +// needsReconcile compares the given status with the status in the pod manager (which +// in fact comes from apiserver), returns whether the status needs to be reconciled with +// the apiserver. Now when pod status is inconsistent between apiserver and kubelet, +// kubelet should forcibly send an update to reconclie the inconsistence, because kubelet +// should be the source of truth of pod status. +// NOTE(random-liu): It's simpler to pass in mirror pod uid and get mirror pod by uid, but +// now the pod manager only supports getting mirror pod by static pod, so we have to pass +// static pod uid here. +// TODO(random-liu): Simplify the logic when mirror pod manager is added. +func (m *manager) needsReconcile(uid types.UID, status api.PodStatus) bool { + // The pod could be a static pod, so we should translate first. + pod, ok := m.podManager.GetPodByUID(uid) + if !ok { + glog.V(4).Infof("Pod %q has been deleted, no need to reconcile", string(uid)) + return false + } + // If the pod is a static pod, we should check its mirror pod, because only status in mirror pod is meaningful to us. + if kubepod.IsStaticPod(pod) { + mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod) + if !ok { + glog.V(4).Infof("Static pod %q has no corresponding mirror pod, no need to reconcile", format.Pod(pod)) + return false + } + pod = mirrorPod + } + + podStatus, err := copyStatus(&pod.Status) + if err != nil { + return false + } + normalizeStatus(&podStatus) + + if isStatusEqual(&podStatus, &status) { + // If the status from the source is the same with the cached status, + // reconcile is not needed. Just return. + return false + } + glog.V(3).Infof("Pod status is inconsistent with cached status for pod %q, a reconciliation should be triggered:\n %+v", format.Pod(pod), + diff.ObjectDiff(podStatus, status)) + + return true +} + +// We add this function, because apiserver only supports *RFC3339* now, which means that the timestamp returned by +// apiserver has no nanosecond infromation. However, the timestamp returned by unversioned.Now() contains nanosecond, +// so when we do comparison between status from apiserver and cached status, isStatusEqual() will always return false. +// There is related issue #15262 and PR #15263 about this. +// In fact, the best way to solve this is to do it on api side. However for now, we normalize the status locally in +// kubelet temporarily. +// TODO(random-liu): Remove timestamp related logic after apiserver supports nanosecond or makes it consistent. +func normalizeStatus(status *api.PodStatus) *api.PodStatus { + normalizeTimeStamp := func(t *unversioned.Time) { + *t = t.Rfc3339Copy() + } + normalizeContainerState := func(c *api.ContainerState) { + if c.Running != nil { + normalizeTimeStamp(&c.Running.StartedAt) + } + if c.Terminated != nil { + normalizeTimeStamp(&c.Terminated.StartedAt) + normalizeTimeStamp(&c.Terminated.FinishedAt) + } + } + + if status.StartTime != nil { + normalizeTimeStamp(status.StartTime) + } + for i := range status.Conditions { + condition := &status.Conditions[i] + normalizeTimeStamp(&condition.LastProbeTime) + normalizeTimeStamp(&condition.LastTransitionTime) + } + + // update container statuses + for i := range status.ContainerStatuses { + cstatus := &status.ContainerStatuses[i] + normalizeContainerState(&cstatus.State) + normalizeContainerState(&cstatus.LastTerminationState) + } + // Sort the container statuses, so that the order won't affect the result of comparison + sort.Sort(kubetypes.SortedContainerStatuses(status.ContainerStatuses)) + + // update init container statuses + for i := range status.InitContainerStatuses { + cstatus := &status.InitContainerStatuses[i] + normalizeContainerState(&cstatus.State) + normalizeContainerState(&cstatus.LastTerminationState) + } + // Sort the container statuses, so that the order won't affect the result of comparison + sort.Sort(kubetypes.SortedContainerStatuses(status.InitContainerStatuses)) + return status +} + +// notRunning returns true if every status is terminated or waiting, or the status list +// is empty. +func notRunning(statuses []api.ContainerStatus) bool { + for _, status := range statuses { + if status.State.Terminated == nil && status.State.Waiting == nil { + return false + } + } + return true +} + +func copyStatus(source *api.PodStatus) (api.PodStatus, error) { + clone, err := api.Scheme.DeepCopy(source) + if err != nil { + glog.Errorf("Failed to clone status %+v: %v", source, err) + return api.PodStatus{}, err + } + status := *clone.(*api.PodStatus) + return status, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/status/manager_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/status/manager_test.go new file mode 100644 index 000000000000..a9875549928d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/status/manager_test.go @@ -0,0 +1,780 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "fmt" + "math/rand" + "strconv" + "testing" + "time" + + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/testing/core" + + "github.com/stretchr/testify/assert" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + kubepod "k8s.io/kubernetes/pkg/kubelet/pod" + podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/runtime" +) + +// Generate new instance of test pod with the same initial value. +func getTestPod() *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + } +} + +// After adding reconciliation, if status in pod manager is different from the cached status, a reconciliation +// will be triggered, which will mess up all the old unit test. +// To simplify the implementation of unit test, we add testSyncBatch() here, it will make sure the statuses in +// pod manager the same with cached ones before syncBatch() so as to avoid reconciling. +func (m *manager) testSyncBatch() { + for uid, status := range m.podStatuses { + pod, ok := m.podManager.GetPodByUID(uid) + if ok { + pod.Status = status.status + } + pod, ok = m.podManager.GetMirrorPodByPod(pod) + if ok { + pod.Status = status.status + } + } + m.syncBatch() +} + +func newTestManager(kubeClient clientset.Interface) *manager { + podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient()) + podManager.AddPod(getTestPod()) + return NewManager(kubeClient, podManager).(*manager) +} + +func generateRandomMessage() string { + return strconv.Itoa(rand.Int()) +} + +func getRandomPodStatus() api.PodStatus { + return api.PodStatus{ + Message: generateRandomMessage(), + } +} + +func verifyActions(t *testing.T, kubeClient clientset.Interface, expectedActions []core.Action) { + actions := kubeClient.(*fake.Clientset).Actions() + if len(actions) != len(expectedActions) { + t.Fatalf("unexpected actions, got: %+v expected: %+v", actions, expectedActions) + return + } + for i := 0; i < len(actions); i++ { + e := expectedActions[i] + a := actions[i] + if !a.Matches(e.GetVerb(), e.GetResource().Resource) || a.GetSubresource() != e.GetSubresource() { + t.Errorf("unexpected actions, got: %+v expected: %+v", actions, expectedActions) + } + } +} + +func verifyUpdates(t *testing.T, manager *manager, expectedUpdates int) { + // Consume all updates in the channel. + numUpdates := 0 + for { + hasUpdate := true + select { + case <-manager.podStatusChannel: + numUpdates++ + default: + hasUpdate = false + } + + if !hasUpdate { + break + } + } + + if numUpdates != expectedUpdates { + t.Errorf("unexpected number of updates %d, expected %d", numUpdates, expectedUpdates) + } +} + +func TestNewStatus(t *testing.T) { + syncer := newTestManager(&fake.Clientset{}) + testPod := getTestPod() + syncer.SetPodStatus(testPod, getRandomPodStatus()) + verifyUpdates(t, syncer, 1) + + status := expectPodStatus(t, syncer, testPod) + if status.StartTime.IsZero() { + t.Errorf("SetPodStatus did not set a proper start time value") + } +} + +func TestNewStatusPreservesPodStartTime(t *testing.T) { + syncer := newTestManager(&fake.Clientset{}) + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Status: api.PodStatus{}, + } + now := unversioned.Now() + startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute)) + pod.Status.StartTime = &startTime + syncer.SetPodStatus(pod, getRandomPodStatus()) + + status := expectPodStatus(t, syncer, pod) + if !status.StartTime.Time.Equal(startTime.Time) { + t.Errorf("Unexpected start time, expected %v, actual %v", startTime, status.StartTime) + } +} + +func getReadyPodStatus() api.PodStatus { + return api.PodStatus{ + Conditions: []api.PodCondition{ + { + Type: api.PodReady, + Status: api.ConditionTrue, + }, + }, + } +} + +func TestNewStatusSetsReadyTransitionTime(t *testing.T) { + syncer := newTestManager(&fake.Clientset{}) + podStatus := getReadyPodStatus() + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Status: api.PodStatus{}, + } + syncer.SetPodStatus(pod, podStatus) + verifyUpdates(t, syncer, 1) + status := expectPodStatus(t, syncer, pod) + readyCondition := api.GetPodReadyCondition(status) + if readyCondition.LastTransitionTime.IsZero() { + t.Errorf("Unexpected: last transition time not set") + } +} + +func TestChangedStatus(t *testing.T) { + syncer := newTestManager(&fake.Clientset{}) + testPod := getTestPod() + syncer.SetPodStatus(testPod, getRandomPodStatus()) + syncer.SetPodStatus(testPod, getRandomPodStatus()) + verifyUpdates(t, syncer, 2) +} + +func TestChangedStatusKeepsStartTime(t *testing.T) { + syncer := newTestManager(&fake.Clientset{}) + testPod := getTestPod() + now := unversioned.Now() + firstStatus := getRandomPodStatus() + firstStatus.StartTime = &now + syncer.SetPodStatus(testPod, firstStatus) + syncer.SetPodStatus(testPod, getRandomPodStatus()) + verifyUpdates(t, syncer, 2) + finalStatus := expectPodStatus(t, syncer, testPod) + if finalStatus.StartTime.IsZero() { + t.Errorf("StartTime should not be zero") + } + expected := now.Rfc3339Copy() + if !finalStatus.StartTime.Equal(expected) { + t.Errorf("Expected %v, but got %v", expected, finalStatus.StartTime) + } +} + +func TestChangedStatusUpdatesLastTransitionTime(t *testing.T) { + syncer := newTestManager(&fake.Clientset{}) + podStatus := getReadyPodStatus() + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Status: api.PodStatus{}, + } + syncer.SetPodStatus(pod, podStatus) + verifyUpdates(t, syncer, 1) + oldStatus := expectPodStatus(t, syncer, pod) + anotherStatus := getReadyPodStatus() + anotherStatus.Conditions[0].Status = api.ConditionFalse + syncer.SetPodStatus(pod, anotherStatus) + verifyUpdates(t, syncer, 1) + newStatus := expectPodStatus(t, syncer, pod) + + oldReadyCondition := api.GetPodReadyCondition(oldStatus) + newReadyCondition := api.GetPodReadyCondition(newStatus) + if newReadyCondition.LastTransitionTime.IsZero() { + t.Errorf("Unexpected: last transition time not set") + } + if newReadyCondition.LastTransitionTime.Before(oldReadyCondition.LastTransitionTime) { + t.Errorf("Unexpected: new transition time %s, is before old transition time %s", newReadyCondition.LastTransitionTime, oldReadyCondition.LastTransitionTime) + } +} + +func TestUnchangedStatus(t *testing.T) { + syncer := newTestManager(&fake.Clientset{}) + testPod := getTestPod() + podStatus := getRandomPodStatus() + syncer.SetPodStatus(testPod, podStatus) + syncer.SetPodStatus(testPod, podStatus) + verifyUpdates(t, syncer, 1) +} + +func TestUnchangedStatusPreservesLastTransitionTime(t *testing.T) { + syncer := newTestManager(&fake.Clientset{}) + podStatus := getReadyPodStatus() + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Status: api.PodStatus{}, + } + syncer.SetPodStatus(pod, podStatus) + verifyUpdates(t, syncer, 1) + oldStatus := expectPodStatus(t, syncer, pod) + anotherStatus := getReadyPodStatus() + syncer.SetPodStatus(pod, anotherStatus) + // No update. + verifyUpdates(t, syncer, 0) + newStatus := expectPodStatus(t, syncer, pod) + + oldReadyCondition := api.GetPodReadyCondition(oldStatus) + newReadyCondition := api.GetPodReadyCondition(newStatus) + if newReadyCondition.LastTransitionTime.IsZero() { + t.Errorf("Unexpected: last transition time not set") + } + if !oldReadyCondition.LastTransitionTime.Equal(newReadyCondition.LastTransitionTime) { + t.Errorf("Unexpected: new transition time %s, is not equal to old transition time %s", newReadyCondition.LastTransitionTime, oldReadyCondition.LastTransitionTime) + } +} + +func TestSyncBatchIgnoresNotFound(t *testing.T) { + client := fake.Clientset{} + syncer := newTestManager(&client) + client.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) { + return true, nil, errors.NewNotFound(api.Resource("pods"), "test-pod") + }) + syncer.SetPodStatus(getTestPod(), getRandomPodStatus()) + syncer.testSyncBatch() + + verifyActions(t, syncer.kubeClient, []core.Action{ + core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: unversioned.GroupVersionResource{Resource: "pods"}}}, + }) +} + +func TestSyncBatch(t *testing.T) { + syncer := newTestManager(&fake.Clientset{}) + testPod := getTestPod() + syncer.kubeClient = fake.NewSimpleClientset(testPod) + syncer.SetPodStatus(testPod, getRandomPodStatus()) + syncer.testSyncBatch() + verifyActions(t, syncer.kubeClient, []core.Action{ + core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: unversioned.GroupVersionResource{Resource: "pods"}}}, + core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: unversioned.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}, + }, + ) +} + +func TestSyncBatchChecksMismatchedUID(t *testing.T) { + syncer := newTestManager(&fake.Clientset{}) + pod := getTestPod() + pod.UID = "first" + syncer.podManager.AddPod(pod) + differentPod := getTestPod() + differentPod.UID = "second" + syncer.podManager.AddPod(differentPod) + syncer.kubeClient = fake.NewSimpleClientset(pod) + syncer.SetPodStatus(differentPod, getRandomPodStatus()) + syncer.testSyncBatch() + verifyActions(t, syncer.kubeClient, []core.Action{ + core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: unversioned.GroupVersionResource{Resource: "pods"}}}, + }) +} + +func TestSyncBatchNoDeadlock(t *testing.T) { + client := &fake.Clientset{} + m := newTestManager(client) + pod := getTestPod() + + // Setup fake client. + var ret api.Pod + var err error + client.AddReactor("*", "pods", func(action core.Action) (bool, runtime.Object, error) { + switch action := action.(type) { + case core.GetAction: + assert.Equal(t, pod.Name, action.GetName(), "Unexpeted GetAction: %+v", action) + case core.UpdateAction: + assert.Equal(t, pod.Name, action.GetObject().(*api.Pod).Name, "Unexpeted UpdateAction: %+v", action) + default: + assert.Fail(t, "Unexpected Action: %+v", action) + } + return true, &ret, err + }) + + pod.Status.ContainerStatuses = []api.ContainerStatus{{State: api.ContainerState{Running: &api.ContainerStateRunning{}}}} + + getAction := core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: unversioned.GroupVersionResource{Resource: "pods"}}} + updateAction := core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: unversioned.GroupVersionResource{Resource: "pods"}, Subresource: "status"}} + + // Pod not found. + ret = *pod + err = errors.NewNotFound(api.Resource("pods"), pod.Name) + m.SetPodStatus(pod, getRandomPodStatus()) + m.testSyncBatch() + verifyActions(t, client, []core.Action{getAction}) + client.ClearActions() + + // Pod was recreated. + ret.UID = "other_pod" + err = nil + m.SetPodStatus(pod, getRandomPodStatus()) + m.testSyncBatch() + verifyActions(t, client, []core.Action{getAction}) + client.ClearActions() + + // Pod not deleted (success case). + ret = *pod + m.SetPodStatus(pod, getRandomPodStatus()) + m.testSyncBatch() + verifyActions(t, client, []core.Action{getAction, updateAction}) + client.ClearActions() + + // Pod is terminated, but still running. + pod.DeletionTimestamp = new(unversioned.Time) + m.SetPodStatus(pod, getRandomPodStatus()) + m.testSyncBatch() + verifyActions(t, client, []core.Action{getAction, updateAction}) + client.ClearActions() + + // Pod is terminated successfully. + pod.Status.ContainerStatuses[0].State.Running = nil + pod.Status.ContainerStatuses[0].State.Terminated = &api.ContainerStateTerminated{} + m.SetPodStatus(pod, getRandomPodStatus()) + m.testSyncBatch() + verifyActions(t, client, []core.Action{getAction, updateAction}) + client.ClearActions() + + // Error case. + err = fmt.Errorf("intentional test error") + m.SetPodStatus(pod, getRandomPodStatus()) + m.testSyncBatch() + verifyActions(t, client, []core.Action{getAction}) + client.ClearActions() +} + +func TestStaleUpdates(t *testing.T) { + pod := getTestPod() + client := fake.NewSimpleClientset(pod) + m := newTestManager(client) + + status := api.PodStatus{Message: "initial status"} + m.SetPodStatus(pod, status) + status.Message = "first version bump" + m.SetPodStatus(pod, status) + status.Message = "second version bump" + m.SetPodStatus(pod, status) + verifyUpdates(t, m, 3) + + t.Logf("First sync pushes latest status.") + m.testSyncBatch() + verifyActions(t, m.kubeClient, []core.Action{ + core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: unversioned.GroupVersionResource{Resource: "pods"}}}, + core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: unversioned.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}, + }) + client.ClearActions() + + for i := 0; i < 2; i++ { + t.Logf("Next 2 syncs should be ignored (%d).", i) + m.testSyncBatch() + verifyActions(t, m.kubeClient, []core.Action{}) + } + + t.Log("Unchanged status should not send an update.") + m.SetPodStatus(pod, status) + verifyUpdates(t, m, 0) + + t.Log("... unless it's stale.") + m.apiStatusVersions[pod.UID] = m.apiStatusVersions[pod.UID] - 1 + + m.SetPodStatus(pod, status) + m.testSyncBatch() + verifyActions(t, m.kubeClient, []core.Action{ + core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: unversioned.GroupVersionResource{Resource: "pods"}}}, + core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: unversioned.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}, + }) + + // Nothing stuck in the pipe. + verifyUpdates(t, m, 0) +} + +// shuffle returns a new shuffled list of container statuses. +func shuffle(statuses []api.ContainerStatus) []api.ContainerStatus { + numStatuses := len(statuses) + randIndexes := rand.Perm(numStatuses) + shuffled := make([]api.ContainerStatus, numStatuses) + for i := 0; i < numStatuses; i++ { + shuffled[i] = statuses[randIndexes[i]] + } + return shuffled +} + +func TestStatusEquality(t *testing.T) { + containerStatus := []api.ContainerStatus{} + for i := 0; i < 10; i++ { + s := api.ContainerStatus{ + Name: fmt.Sprintf("container%d", i), + } + containerStatus = append(containerStatus, s) + } + podStatus := api.PodStatus{ + ContainerStatuses: containerStatus, + } + for i := 0; i < 10; i++ { + oldPodStatus := api.PodStatus{ + ContainerStatuses: shuffle(podStatus.ContainerStatuses), + } + normalizeStatus(&oldPodStatus) + normalizeStatus(&podStatus) + if !isStatusEqual(&oldPodStatus, &podStatus) { + t.Fatalf("Order of container statuses should not affect normalized equality.") + } + } +} + +func TestStaticPodStatus(t *testing.T) { + staticPod := getTestPod() + staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"} + mirrorPod := getTestPod() + mirrorPod.UID = "mirror-12345678" + mirrorPod.Annotations = map[string]string{ + kubetypes.ConfigSourceAnnotationKey: "api", + kubetypes.ConfigMirrorAnnotationKey: "mirror", + } + client := fake.NewSimpleClientset(mirrorPod) + m := newTestManager(client) + m.podManager.AddPod(staticPod) + m.podManager.AddPod(mirrorPod) + // Verify setup. + assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod") + assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod") + assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID) + + status := getRandomPodStatus() + now := unversioned.Now() + status.StartTime = &now + + m.SetPodStatus(staticPod, status) + retrievedStatus := expectPodStatus(t, m, staticPod) + normalizeStatus(&status) + assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus) + retrievedStatus, _ = m.GetPodStatus(mirrorPod.UID) + assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus) + // Should translate mirrorPod / staticPod UID. + m.testSyncBatch() + verifyActions(t, m.kubeClient, []core.Action{ + core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: unversioned.GroupVersionResource{Resource: "pods"}}}, + core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: unversioned.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}, + }) + updateAction := client.Actions()[1].(core.UpdateActionImpl) + updatedPod := updateAction.Object.(*api.Pod) + assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID) + assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status) + client.ClearActions() + + // No changes. + m.testSyncBatch() + verifyActions(t, m.kubeClient, []core.Action{}) + + // Mirror pod identity changes. + m.podManager.DeletePod(mirrorPod) + mirrorPod.UID = "new-mirror-pod" + mirrorPod.Status = api.PodStatus{} + m.podManager.AddPod(mirrorPod) + // Expect update to new mirrorPod. + m.testSyncBatch() + verifyActions(t, m.kubeClient, []core.Action{ + core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: unversioned.GroupVersionResource{Resource: "pods"}}}, + core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: unversioned.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}, + }) + updateAction = client.Actions()[1].(core.UpdateActionImpl) + updatedPod = updateAction.Object.(*api.Pod) + assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID) + assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status) +} + +func TestSetContainerReadiness(t *testing.T) { + cID1 := kubecontainer.ContainerID{Type: "test", ID: "1"} + cID2 := kubecontainer.ContainerID{Type: "test", ID: "2"} + containerStatuses := []api.ContainerStatus{ + { + Name: "c1", + ContainerID: cID1.String(), + Ready: false, + }, { + Name: "c2", + ContainerID: cID2.String(), + Ready: false, + }, + } + status := api.PodStatus{ + ContainerStatuses: containerStatuses, + Conditions: []api.PodCondition{{ + Type: api.PodReady, + Status: api.ConditionFalse, + }}, + } + pod := getTestPod() + pod.Spec.Containers = []api.Container{{Name: "c1"}, {Name: "c2"}} + + // Verify expected readiness of containers & pod. + verifyReadiness := func(step string, status *api.PodStatus, c1Ready, c2Ready, podReady bool) { + for _, c := range status.ContainerStatuses { + switch c.ContainerID { + case cID1.String(): + if c.Ready != c1Ready { + t.Errorf("[%s] Expected readiness of c1 to be %v but was %v", step, c1Ready, c.Ready) + } + case cID2.String(): + if c.Ready != c2Ready { + t.Errorf("[%s] Expected readiness of c2 to be %v but was %v", step, c2Ready, c.Ready) + } + default: + t.Fatalf("[%s] Unexpected container: %+v", step, c) + } + } + if status.Conditions[0].Type != api.PodReady { + t.Fatalf("[%s] Unexpected condition: %+v", step, status.Conditions[0]) + } else if ready := (status.Conditions[0].Status == api.ConditionTrue); ready != podReady { + t.Errorf("[%s] Expected readiness of pod to be %v but was %v", step, podReady, ready) + } + } + + m := newTestManager(&fake.Clientset{}) + // Add test pod because the container spec has been changed. + m.podManager.AddPod(pod) + + t.Log("Setting readiness before status should fail.") + m.SetContainerReadiness(pod.UID, cID1, true) + verifyUpdates(t, m, 0) + if status, ok := m.GetPodStatus(pod.UID); ok { + t.Errorf("Unexpected PodStatus: %+v", status) + } + + t.Log("Setting initial status.") + m.SetPodStatus(pod, status) + verifyUpdates(t, m, 1) + status = expectPodStatus(t, m, pod) + verifyReadiness("initial", &status, false, false, false) + + t.Log("Setting unchanged readiness should do nothing.") + m.SetContainerReadiness(pod.UID, cID1, false) + verifyUpdates(t, m, 0) + status = expectPodStatus(t, m, pod) + verifyReadiness("unchanged", &status, false, false, false) + + t.Log("Setting container readiness should generate update but not pod readiness.") + m.SetContainerReadiness(pod.UID, cID1, true) + verifyUpdates(t, m, 1) + status = expectPodStatus(t, m, pod) + verifyReadiness("c1 ready", &status, true, false, false) + + t.Log("Setting both containers to ready should update pod readiness.") + m.SetContainerReadiness(pod.UID, cID2, true) + verifyUpdates(t, m, 1) + status = expectPodStatus(t, m, pod) + verifyReadiness("all ready", &status, true, true, true) + + t.Log("Setting non-existant container readiness should fail.") + m.SetContainerReadiness(pod.UID, kubecontainer.ContainerID{Type: "test", ID: "foo"}, true) + verifyUpdates(t, m, 0) + status = expectPodStatus(t, m, pod) + verifyReadiness("ignore non-existant", &status, true, true, true) +} + +func TestSyncBatchCleanupVersions(t *testing.T) { + m := newTestManager(&fake.Clientset{}) + testPod := getTestPod() + mirrorPod := getTestPod() + mirrorPod.UID = "mirror-uid" + mirrorPod.Name = "mirror_pod" + mirrorPod.Annotations = map[string]string{ + kubetypes.ConfigSourceAnnotationKey: "api", + kubetypes.ConfigMirrorAnnotationKey: "mirror", + } + + // Orphaned pods should be removed. + m.apiStatusVersions[testPod.UID] = 100 + m.apiStatusVersions[mirrorPod.UID] = 200 + m.testSyncBatch() + if _, ok := m.apiStatusVersions[testPod.UID]; ok { + t.Errorf("Should have cleared status for testPod") + } + if _, ok := m.apiStatusVersions[mirrorPod.UID]; ok { + t.Errorf("Should have cleared status for mirrorPod") + } + + // Non-orphaned pods should not be removed. + m.SetPodStatus(testPod, getRandomPodStatus()) + m.podManager.AddPod(mirrorPod) + staticPod := mirrorPod + staticPod.UID = "static-uid" + staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"} + m.podManager.AddPod(staticPod) + m.apiStatusVersions[testPod.UID] = 100 + m.apiStatusVersions[mirrorPod.UID] = 200 + m.testSyncBatch() + if _, ok := m.apiStatusVersions[testPod.UID]; !ok { + t.Errorf("Should not have cleared status for testPod") + } + if _, ok := m.apiStatusVersions[mirrorPod.UID]; !ok { + t.Errorf("Should not have cleared status for mirrorPod") + } +} + +func TestReconcilePodStatus(t *testing.T) { + testPod := getTestPod() + client := fake.NewSimpleClientset(testPod) + syncer := newTestManager(client) + syncer.SetPodStatus(testPod, getRandomPodStatus()) + // Call syncBatch directly to test reconcile + syncer.syncBatch() // The apiStatusVersions should be set now + + podStatus, ok := syncer.GetPodStatus(testPod.UID) + if !ok { + t.Fatalf("Should find pod status for pod: %+v", testPod) + } + testPod.Status = podStatus + + // If the pod status is the same, a reconciliation is not needed, + // syncBatch should do nothing + syncer.podManager.UpdatePod(testPod) + if syncer.needsReconcile(testPod.UID, podStatus) { + t.Errorf("Pod status is the same, a reconciliation is not needed") + } + client.ClearActions() + syncer.syncBatch() + verifyActions(t, client, []core.Action{}) + + // If the pod status is the same, only the timestamp is in Rfc3339 format (lower precision without nanosecond), + // a reconciliation is not needed, syncBatch should do nothing. + // The StartTime should have been set in SetPodStatus(). + // TODO(random-liu): Remove this later when api becomes consistent for timestamp. + normalizedStartTime := testPod.Status.StartTime.Rfc3339Copy() + testPod.Status.StartTime = &normalizedStartTime + syncer.podManager.UpdatePod(testPod) + if syncer.needsReconcile(testPod.UID, podStatus) { + t.Errorf("Pod status only differs for timestamp format, a reconciliation is not needed") + } + client.ClearActions() + syncer.syncBatch() + verifyActions(t, client, []core.Action{}) + + // If the pod status is different, a reconciliation is needed, syncBatch should trigger an update + testPod.Status = getRandomPodStatus() + syncer.podManager.UpdatePod(testPod) + if !syncer.needsReconcile(testPod.UID, podStatus) { + t.Errorf("Pod status is different, a reconciliation is needed") + } + client.ClearActions() + syncer.syncBatch() + verifyActions(t, client, []core.Action{ + core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: unversioned.GroupVersionResource{Resource: "pods"}}}, + core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: unversioned.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}, + }) +} + +func expectPodStatus(t *testing.T, m *manager, pod *api.Pod) api.PodStatus { + status, ok := m.GetPodStatus(pod.UID) + if !ok { + t.Fatalf("Expected PodStatus for %q not found", pod.UID) + } + return status +} + +func TestDeletePods(t *testing.T) { + pod := getTestPod() + // Set the deletion timestamp. + pod.DeletionTimestamp = new(unversioned.Time) + client := fake.NewSimpleClientset(pod) + m := newTestManager(client) + m.podManager.AddPod(pod) + + status := getRandomPodStatus() + now := unversioned.Now() + status.StartTime = &now + m.SetPodStatus(pod, status) + + m.testSyncBatch() + // Expect to see an delete action. + verifyActions(t, m.kubeClient, []core.Action{ + core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: unversioned.GroupVersionResource{Resource: "pods"}}}, + core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: unversioned.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}, + core.DeleteActionImpl{ActionImpl: core.ActionImpl{Verb: "delete", Resource: unversioned.GroupVersionResource{Resource: "pods"}}}, + }) +} + +func TestDoNotDeleteMirrorPods(t *testing.T) { + staticPod := getTestPod() + staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"} + mirrorPod := getTestPod() + mirrorPod.UID = "mirror-12345678" + mirrorPod.Annotations = map[string]string{ + kubetypes.ConfigSourceAnnotationKey: "api", + kubetypes.ConfigMirrorAnnotationKey: "mirror", + } + // Set the deletion timestamp. + mirrorPod.DeletionTimestamp = new(unversioned.Time) + client := fake.NewSimpleClientset(mirrorPod) + m := newTestManager(client) + m.podManager.AddPod(staticPod) + m.podManager.AddPod(mirrorPod) + // Verify setup. + assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod") + assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod") + assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID) + + status := getRandomPodStatus() + now := unversioned.Now() + status.StartTime = &now + m.SetPodStatus(staticPod, status) + + m.testSyncBatch() + // Expect not to see an delete action. + verifyActions(t, m.kubeClient, []core.Action{ + core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: unversioned.GroupVersionResource{Resource: "pods"}}}, + core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: unversioned.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}, + }) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/constants.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/constants.go new file mode 100644 index 000000000000..060fec752abf --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/constants.go @@ -0,0 +1,22 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +const ( + // system default DNS resolver configuration + ResolvConfDefault = "/etc/resolv.conf" +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/doc.go new file mode 100644 index 000000000000..104ff4e356f8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Common types in the Kubelet. +package types diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/labels.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/labels.go new file mode 100644 index 000000000000..24f91f94978d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/labels.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +const ( + KubernetesPodNameLabel = "io.kubernetes.pod.name" + KubernetesPodNamespaceLabel = "io.kubernetes.pod.namespace" + KubernetesPodUIDLabel = "io.kubernetes.pod.uid" + KubernetesContainerNameLabel = "io.kubernetes.container.name" +) + +func GetContainerName(labels map[string]string) string { + return labels[KubernetesContainerNameLabel] +} + +func GetPodName(labels map[string]string) string { + return labels[KubernetesPodNameLabel] +} + +func GetPodUID(labels map[string]string) string { + return labels[KubernetesPodUIDLabel] +} + +func GetPodNamespace(labels map[string]string) string { + return labels[KubernetesPodNamespaceLabel] +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go new file mode 100644 index 000000000000..88bcb54b32e1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go @@ -0,0 +1,131 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" +) + +const ConfigSourceAnnotationKey = "kubernetes.io/config.source" +const ConfigMirrorAnnotationKey = "kubernetes.io/config.mirror" +const ConfigFirstSeenAnnotationKey = "kubernetes.io/config.seen" +const ConfigHashAnnotationKey = "kubernetes.io/config.hash" + +// PodOperation defines what changes will be made on a pod configuration. +type PodOperation int + +const ( + // This is the current pod configuration + SET PodOperation = iota + // Pods with the given ids are new to this source + ADD + // Pods with the given ids have been removed from this source + REMOVE + // Pods with the given ids have been updated in this source + UPDATE + // Pods with the given ids have unexpected status in this source, + // kubelet should reconcile status with this source + RECONCILE + + // These constants identify the sources of pods + // Updates from a file + FileSource = "file" + // Updates from querying a web page + HTTPSource = "http" + // Updates from Kubernetes API Server + ApiserverSource = "api" + // Updates from all sources + AllSource = "*" + + NamespaceDefault = api.NamespaceDefault +) + +// PodUpdate defines an operation sent on the channel. You can add or remove single services by +// sending an array of size one and Op == ADD|REMOVE (with REMOVE, only the ID is required). +// For setting the state of the system to a given state for this source configuration, set +// Pods as desired and Op to SET, which will reset the system state to that specified in this +// operation for this source channel. To remove all pods, set Pods to empty object and Op to SET. +// +// Additionally, Pods should never be nil - it should always point to an empty slice. While +// functionally similar, this helps our unit tests properly check that the correct PodUpdates +// are generated. +type PodUpdate struct { + Pods []*api.Pod + Op PodOperation + Source string +} + +// Gets all validated sources from the specified sources. +func GetValidatedSources(sources []string) ([]string, error) { + validated := make([]string, 0, len(sources)) + for _, source := range sources { + switch source { + case AllSource: + return []string{FileSource, HTTPSource, ApiserverSource}, nil + case FileSource, HTTPSource, ApiserverSource: + validated = append(validated, source) + break + case "": + break + default: + return []string{}, fmt.Errorf("unknown pod source %q", source) + } + } + return validated, nil +} + +// GetPodSource returns the source of the pod based on the annotation. +func GetPodSource(pod *api.Pod) (string, error) { + if pod.Annotations != nil { + if source, ok := pod.Annotations[ConfigSourceAnnotationKey]; ok { + return source, nil + } + } + return "", fmt.Errorf("cannot get source of pod %q", pod.UID) +} + +// SyncPodType classifies pod updates, eg: create, update. +type SyncPodType int + +const ( + // SyncPodSync is when the pod is synced to ensure desired state + SyncPodSync SyncPodType = iota + // SyncPodUpdate is when the pod is updated from source + SyncPodUpdate + // SyncPodCreate is when the pod is created from source + SyncPodCreate + // SyncPodKill is when the pod is killed based on a trigger internal to the kubelet for eviction. + // If a SyncPodKill request is made to pod workers, the request is never dropped, and will always be processed. + SyncPodKill +) + +func (sp SyncPodType) String() string { + switch sp { + case SyncPodCreate: + return "create" + case SyncPodUpdate: + return "update" + case SyncPodSync: + return "sync" + case SyncPodKill: + return "kill" + default: + return "unknown" + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/pod_update_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/pod_update_test.go new file mode 100644 index 000000000000..a753bb587277 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/pod_update_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGetValidatedSources(t *testing.T) { + // Empty. + sources, err := GetValidatedSources([]string{""}) + require.NoError(t, err) + require.Len(t, sources, 0) + + // Success. + sources, err = GetValidatedSources([]string{FileSource, ApiserverSource}) + require.NoError(t, err) + require.Len(t, sources, 2) + + // All. + sources, err = GetValidatedSources([]string{AllSource}) + require.NoError(t, err) + require.Len(t, sources, 3) + + // Unknown source. + sources, err = GetValidatedSources([]string{"taco"}) + require.Error(t, err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/types.go new file mode 100644 index 000000000000..7776ee9e366c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/types/types.go @@ -0,0 +1,77 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "net/http" + "time" + + "k8s.io/kubernetes/pkg/api" +) + +// TODO: Reconcile custom types in kubelet/types and this subpackage + +type HttpGetter interface { + Get(url string) (*http.Response, error) +} + +// Timestamp wraps around time.Time and offers utilities to format and parse +// the time using RFC3339Nano +type Timestamp struct { + time time.Time +} + +// NewTimestamp returns a Timestamp object using the current time. +func NewTimestamp() *Timestamp { + return &Timestamp{time.Now()} +} + +// ConvertToTimestamp takes a string, parses it using the RFC3339Nano layout, +// and converts it to a Timestamp object. +func ConvertToTimestamp(timeString string) *Timestamp { + parsed, _ := time.Parse(time.RFC3339Nano, timeString) + return &Timestamp{parsed} +} + +// Get returns the time as time.Time. +func (t *Timestamp) Get() time.Time { + return t.time +} + +// GetString returns the time in the string format using the RFC3339Nano +// layout. +func (t *Timestamp) GetString() string { + return t.time.Format(time.RFC3339Nano) +} + +// A type to help sort container statuses based on container names. +type SortedContainerStatuses []api.ContainerStatus + +func (s SortedContainerStatuses) Len() int { return len(s) } +func (s SortedContainerStatuses) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s SortedContainerStatuses) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +// Reservation represents reserved resources for non-pod components. +type Reservation struct { + // System represents resources reserved for non-kubernetes components. + System api.ResourceList + // Kubernetes represents resources reserved for kubernetes system components. + Kubernetes api.ResourceList +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util.go new file mode 100644 index 000000000000..ae2d94bfa12d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util.go @@ -0,0 +1,115 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/capabilities" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/securitycontext" +) + +// Check whether we have the capabilities to run the specified pod. +func canRunPod(pod *api.Pod) error { + if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostNetwork { + allowed, err := allowHostNetwork(pod) + if err != nil { + return err + } + if !allowed { + return fmt.Errorf("pod with UID %q specified host networking, but is disallowed", pod.UID) + } + } + + if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostPID { + allowed, err := allowHostPID(pod) + if err != nil { + return err + } + if !allowed { + return fmt.Errorf("pod with UID %q specified host PID, but is disallowed", pod.UID) + } + } + + if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostIPC { + allowed, err := allowHostIPC(pod) + if err != nil { + return err + } + if !allowed { + return fmt.Errorf("pod with UID %q specified host ipc, but is disallowed", pod.UID) + } + } + + if !capabilities.Get().AllowPrivileged { + for _, container := range pod.Spec.Containers { + if securitycontext.HasPrivilegedRequest(&container) { + return fmt.Errorf("pod with UID %q specified privileged container, but is disallowed", pod.UID) + } + } + for _, container := range pod.Spec.InitContainers { + if securitycontext.HasPrivilegedRequest(&container) { + return fmt.Errorf("pod with UID %q specified privileged container, but is disallowed", pod.UID) + } + } + } + return nil +} + +// Determined whether the specified pod is allowed to use host networking +func allowHostNetwork(pod *api.Pod) (bool, error) { + podSource, err := kubetypes.GetPodSource(pod) + if err != nil { + return false, err + } + for _, source := range capabilities.Get().PrivilegedSources.HostNetworkSources { + if source == podSource { + return true, nil + } + } + return false, nil +} + +// Determined whether the specified pod is allowed to use host networking +func allowHostPID(pod *api.Pod) (bool, error) { + podSource, err := kubetypes.GetPodSource(pod) + if err != nil { + return false, err + } + for _, source := range capabilities.Get().PrivilegedSources.HostPIDSources { + if source == podSource { + return true, nil + } + } + return false, nil +} + +// Determined whether the specified pod is allowed to use host ipc +func allowHostIPC(pod *api.Pod) (bool, error) { + podSource, err := kubetypes.GetPodSource(pod) + if err != nil { + return false, err + } + for _, source := range capabilities.Get().PrivilegedSources.HostIPCSources { + if source == podSource { + return true, nil + } + } + return false, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/cache/object_cache.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/cache/object_cache.go new file mode 100644 index 000000000000..9bb809c0d06d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/cache/object_cache.go @@ -0,0 +1,84 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "time" + + expirationCache "k8s.io/kubernetes/pkg/client/cache" +) + +// ObjectCache is a simple wrapper of expiration cache that +// 1. use string type key +// 2. has a updater to get value directly if it is expired +// 3. then update the cache +type ObjectCache struct { + cache expirationCache.Store + updater func() (interface{}, error) +} + +// objectEntry is a object with string type key. +type objectEntry struct { + key string + obj interface{} +} + +// NewObjectCache creates ObjectCache with a updater. +// updater returns a object to cache. +func NewObjectCache(f func() (interface{}, error), ttl time.Duration) *ObjectCache { + return &ObjectCache{ + updater: f, + cache: expirationCache.NewTTLStore(stringKeyFunc, ttl), + } +} + +// stringKeyFunc is a string as cache key function +func stringKeyFunc(obj interface{}) (string, error) { + key := obj.(objectEntry).key + return key, nil +} + +// Get gets cached objectEntry by using a unique string as the key. +func (c *ObjectCache) Get(key string) (interface{}, error) { + value, ok, err := c.cache.Get(objectEntry{key: key}) + if err != nil { + return nil, err + } + if !ok { + obj, err := c.updater() + if err != nil { + return nil, err + } + err = c.cache.Add(objectEntry{ + key: key, + obj: obj, + }) + if err != nil { + return nil, err + } + return obj, nil + } + return value.(objectEntry).obj, nil +} + +func (c *ObjectCache) Add(key string, obj interface{}) error { + err := c.cache.Add(objectEntry{key: key, obj: obj}) + if err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/cache/object_cache_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/cache/object_cache_test.go new file mode 100644 index 000000000000..e53ae0df02bd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/cache/object_cache_test.go @@ -0,0 +1,96 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "testing" + "time" + + expirationCache "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/util" +) + +type testObject struct { + key string + val string +} + +// A fake objectCache for unit test. +func NewFakeObjectCache(f func() (interface{}, error), ttl time.Duration, clock util.Clock) *ObjectCache { + ttlPolicy := &expirationCache.TTLPolicy{Ttl: ttl, Clock: clock} + deleteChan := make(chan string, 1) + return &ObjectCache{ + updater: f, + cache: expirationCache.NewFakeExpirationStore(stringKeyFunc, deleteChan, ttlPolicy, clock), + } +} + +func TestAddAndGet(t *testing.T) { + testObj := testObject{ + key: "foo", + val: "bar", + } + objectCache := NewFakeObjectCache(func() (interface{}, error) { + return nil, fmt.Errorf("Unexpected Error: updater should never be called in this test!") + }, 1*time.Hour, util.NewFakeClock(time.Now())) + + err := objectCache.Add(testObj.key, testObj.val) + if err != nil { + t.Errorf("Unable to add obj %#v by key: %s", testObj, testObj.key) + } + value, err := objectCache.Get(testObj.key) + if err != nil { + t.Errorf("Unable to get obj %#v by key: %s", testObj, testObj.key) + } + if value.(string) != testObj.val { + t.Errorf("Expected to get cached value: %#v, but got: %s", testObj.val, value.(string)) + } + +} + +func TestExpirationBasic(t *testing.T) { + unexpectedVal := "bar" + expectedVal := "bar2" + + testObj := testObject{ + key: "foo", + val: unexpectedVal, + } + + fakeClock := util.NewFakeClock(time.Now()) + + objectCache := NewFakeObjectCache(func() (interface{}, error) { + return expectedVal, nil + }, 1*time.Second, fakeClock) + + err := objectCache.Add(testObj.key, testObj.val) + if err != nil { + t.Errorf("Unable to add obj %#v by key: %s", testObj, testObj.key) + } + + // sleep 2s so cache should be expired. + fakeClock.Sleep(2 * time.Second) + + value, err := objectCache.Get(testObj.key) + if err != nil { + t.Errorf("Unable to get obj %#v by key: %s", testObj, testObj.key) + } + if value.(string) != expectedVal { + t.Errorf("Expected to get cached value: %#v, but got: %s", expectedVal, value.(string)) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/doc.go new file mode 100644 index 000000000000..b7e74c7f296b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Utility functions. +package util diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/format/pod.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/format/pod.go new file mode 100644 index 000000000000..506f2a785197 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/format/pod.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package format + +import ( + "fmt" + "strings" + + "k8s.io/kubernetes/pkg/api" +) + +type podHandler func(*api.Pod) string + +// Pod returns a string reprenetating a pod in a human readable format, +// with pod UID as part of the string. +func Pod(pod *api.Pod) string { + // Use underscore as the delimiter because it is not allowed in pod name + // (DNS subdomain format), while allowed in the container name format. + return fmt.Sprintf("%s_%s(%s)", pod.Name, pod.Namespace, pod.UID) +} + +// Pods returns a string representating a list of pods in a human +// readable format. +func Pods(pods []*api.Pod) string { + return aggregatePods(pods, Pod) +} + +func aggregatePods(pods []*api.Pod, handler podHandler) string { + podStrings := make([]string, 0, len(pods)) + for _, pod := range pods { + podStrings = append(podStrings, handler(pod)) + } + return fmt.Sprintf(strings.Join(podStrings, ", ")) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/format/resources.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/format/resources.go new file mode 100644 index 000000000000..4e90c295c86a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/format/resources.go @@ -0,0 +1,36 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package format + +import ( + "fmt" + "sort" + "strings" + + "k8s.io/kubernetes/pkg/api" +) + +// ResourceList returns a string representation of a resource list in a human readable format. +func ResourceList(resources api.ResourceList) string { + resourceStrings := make([]string, 0, len(resources)) + for key, value := range resources { + resourceStrings = append(resourceStrings, fmt.Sprintf("%v=%v", key, value.String())) + } + // sort the results for consistent log output + sort.Strings(resourceStrings) + return strings.Join(resourceStrings, ",") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/format/resources_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/format/resources_test.go new file mode 100644 index 000000000000..bbb8812066b5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/format/resources_test.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package format + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" +) + +func TestResourceList(t *testing.T) { + resourceList := api.ResourceList{} + resourceList[api.ResourceCPU] = resource.MustParse("100m") + resourceList[api.ResourceMemory] = resource.MustParse("5Gi") + actual := ResourceList(resourceList) + expected := "cpu=100m,memory=5Gi" + if actual != expected { + t.Errorf("Unexpected result, actual: %v, expected: %v", actual, expected) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils.go new file mode 100644 index 000000000000..fa700396ecf0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ioutils + +import "io" + +// writeCloserWrapper represents a WriteCloser whose closer operation is noop. +type writeCloserWrapper struct { + Writer io.Writer +} + +func (w *writeCloserWrapper) Write(buf []byte) (int, error) { + return w.Writer.Write(buf) +} + +func (w *writeCloserWrapper) Close() error { + return nil +} + +// WriteCloserWrapper returns a writeCloserWrapper. +func WriteCloserWrapper(w io.Writer) io.WriteCloser { + return &writeCloserWrapper{w} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/queue/work_queue.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/queue/work_queue.go new file mode 100644 index 000000000000..48d0919d9b1f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/queue/work_queue.go @@ -0,0 +1,67 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queue + +import ( + "sync" + "time" + + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" +) + +// WorkQueue allows queuing items with a timestamp. An item is +// considered ready to process if the timestamp has expired. +type WorkQueue interface { + // GetWork dequeues and returns all ready items. + GetWork() []types.UID + // Enqueue inserts a new item or overwrites an existing item. + Enqueue(item types.UID, delay time.Duration) +} + +type basicWorkQueue struct { + clock util.Clock + lock sync.Mutex + queue map[types.UID]time.Time +} + +var _ WorkQueue = &basicWorkQueue{} + +func NewBasicWorkQueue(clock util.Clock) WorkQueue { + queue := make(map[types.UID]time.Time) + return &basicWorkQueue{queue: queue, clock: clock} +} + +func (q *basicWorkQueue) GetWork() []types.UID { + q.lock.Lock() + defer q.lock.Unlock() + now := q.clock.Now() + var items []types.UID + for k, v := range q.queue { + if v.Before(now) { + items = append(items, k) + delete(q.queue, k) + } + } + return items +} + +func (q *basicWorkQueue) Enqueue(item types.UID, delay time.Duration) { + q.lock.Lock() + defer q.lock.Unlock() + q.queue[item] = q.clock.Now().Add(delay) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/queue/work_queue_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/queue/work_queue_test.go new file mode 100644 index 000000000000..40ba6d95d888 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/util/queue/work_queue_test.go @@ -0,0 +1,65 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queue + +import ( + "testing" + "time" + + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" +) + +func newTestBasicWorkQueue() (*basicWorkQueue, *util.FakeClock) { + fakeClock := util.NewFakeClock(time.Now()) + wq := &basicWorkQueue{ + clock: fakeClock, + queue: make(map[types.UID]time.Time), + } + return wq, fakeClock +} + +func compareResults(t *testing.T, expected, actual []types.UID) { + expectedSet := sets.NewString() + for _, u := range expected { + expectedSet.Insert(string(u)) + } + actualSet := sets.NewString() + for _, u := range actual { + actualSet.Insert(string(u)) + } + if !expectedSet.Equal(actualSet) { + t.Errorf("Expected %#v, got %#v", expectedSet.List(), actualSet.List()) + } +} + +func TestGetWork(t *testing.T) { + q, clock := newTestBasicWorkQueue() + q.Enqueue(types.UID("foo1"), -1*time.Minute) + q.Enqueue(types.UID("foo2"), -1*time.Minute) + q.Enqueue(types.UID("foo3"), 1*time.Minute) + q.Enqueue(types.UID("foo4"), 1*time.Minute) + expected := []types.UID{types.UID("foo1"), types.UID("foo2")} + compareResults(t, expected, q.GetWork()) + compareResults(t, []types.UID{}, q.GetWork()) + // Dial the time to 1 hour ahead. + clock.Step(time.Hour) + expected = []types.UID{types.UID("foo3"), types.UID("foo4")} + compareResults(t, expected, q.GetWork()) + compareResults(t, []types.UID{}, q.GetWork()) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/volume_manager.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/volume_manager.go new file mode 100644 index 000000000000..d72432518899 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/volume_manager.go @@ -0,0 +1,62 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "sync" + + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" +) + +// volumeManager manages the volumes for the pods running on the kubelet. +// Currently it only does book keeping, but it can be expanded to +// take care of the volumePlugins. +type volumeManager struct { + lock sync.RWMutex + volumeMaps map[types.UID]kubecontainer.VolumeMap +} + +func newVolumeManager() *volumeManager { + vm := &volumeManager{} + vm.volumeMaps = make(map[types.UID]kubecontainer.VolumeMap) + return vm +} + +// SetVolumes sets the volume map for a pod. +// TODO(yifan): Currently we assume the volume is already mounted, so we only do a book keeping here. +func (vm *volumeManager) SetVolumes(podUID types.UID, podVolumes kubecontainer.VolumeMap) { + vm.lock.Lock() + defer vm.lock.Unlock() + vm.volumeMaps[podUID] = podVolumes +} + +// GetVolumes returns the volume map which are already mounted on the host machine +// for a pod. +func (vm *volumeManager) GetVolumes(podUID types.UID) (kubecontainer.VolumeMap, bool) { + vm.lock.RLock() + defer vm.lock.RUnlock() + vol, ok := vm.volumeMaps[podUID] + return vol, ok +} + +// DeleteVolumes removes the reference to a volume map for a pod. +func (vm *volumeManager) DeleteVolumes(podUID types.UID) { + vm.lock.Lock() + defer vm.lock.Unlock() + delete(vm.volumeMaps, podUID) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/volumes.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/volumes.go new file mode 100644 index 000000000000..3651043fee07 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/kubelet/volumes.go @@ -0,0 +1,433 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/cloudprovider" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/io" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/util/strings" + "k8s.io/kubernetes/pkg/volume" +) + +const ( + volumeGidAnnotationKey = "pv.beta.kubernetes.io/gid" +) + +// This just exports required functions from kubelet proper, for use by volume +// plugins. +type volumeHost struct { + kubelet *Kubelet +} + +func (vh *volumeHost) GetPluginDir(pluginName string) string { + return vh.kubelet.getPluginDir(pluginName) +} + +func (vh *volumeHost) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string { + return vh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName) +} + +func (vh *volumeHost) GetPodPluginDir(podUID types.UID, pluginName string) string { + return vh.kubelet.getPodPluginDir(podUID, pluginName) +} + +func (vh *volumeHost) GetKubeClient() clientset.Interface { + return vh.kubelet.kubeClient +} + +// NewWrapperMounter attempts to create a volume mounter +// from a volume Spec, pod and volume options. +// Returns a new volume Mounter or an error. +func (vh *volumeHost) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { + // The name of wrapper volume is set to "wrapped_{wrapped_volume_name}" + wrapperVolumeName := "wrapped_" + volName + if spec.Volume != nil { + spec.Volume.Name = wrapperVolumeName + } + + return vh.kubelet.newVolumeMounterFromPlugins(&spec, pod, opts) +} + +// NewWrapperUnmounter attempts to create a volume unmounter +// from a volume name and pod uid. +// Returns a new volume Unmounter or an error. +func (vh *volumeHost) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) { + // The name of wrapper volume is set to "wrapped_{wrapped_volume_name}" + wrapperVolumeName := "wrapped_" + volName + if spec.Volume != nil { + spec.Volume.Name = wrapperVolumeName + } + + plugin, err := vh.kubelet.volumePluginMgr.FindPluginBySpec(&spec) + if err != nil { + return nil, err + } + + return plugin.NewUnmounter(spec.Name(), podUID) +} + +func (vh *volumeHost) GetCloudProvider() cloudprovider.Interface { + return vh.kubelet.cloud +} + +func (vh *volumeHost) GetMounter() mount.Interface { + return vh.kubelet.mounter +} + +func (vh *volumeHost) GetWriter() io.Writer { + return vh.kubelet.writer +} + +// Returns the hostname of the host kubelet is running on +func (vh *volumeHost) GetHostName() string { + return vh.kubelet.hostname +} + +// mountExternalVolumes mounts the volumes declared in a pod, attaching them +// to the host if necessary, and returns a map containing information about +// the volumes for the pod or an error. This method is run multiple times, +// and requires that implementations of Attach() and SetUp() be idempotent. +// +// Note, in the future, the attach-detach controller will handle attaching and +// detaching volumes; this call site will be maintained for backward- +// compatibility with current behavior of static pods and pods created via the +// Kubelet's http API. +func (kl *Kubelet) mountExternalVolumes(pod *api.Pod) (kubecontainer.VolumeMap, error) { + podVolumes := make(kubecontainer.VolumeMap) + for i := range pod.Spec.Volumes { + var fsGroup *int64 + if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.FSGroup != nil { + fsGroup = pod.Spec.SecurityContext.FSGroup + } + + rootContext, err := kl.getRootDirContext() + if err != nil { + return nil, err + } + + var volSpec *volume.Spec + if pod.Spec.Volumes[i].VolumeSource.PersistentVolumeClaim != nil { + claimName := pod.Spec.Volumes[i].PersistentVolumeClaim.ClaimName + pv, err := kl.getPersistentVolumeByClaimName(claimName, pod.Namespace) + if err != nil { + glog.Errorf("Could not find persistentVolume for claim %s err %v", claimName, err) + return nil, err + } + kl.applyPersistentVolumeAnnotations(pv, pod) + volSpec = volume.NewSpecFromPersistentVolume(pv, pod.Spec.Volumes[i].PersistentVolumeClaim.ReadOnly) + } else { + volSpec = volume.NewSpecFromVolume(&pod.Spec.Volumes[i]) + } + // Try to use a plugin for this volume. + mounter, err := kl.newVolumeMounterFromPlugins(volSpec, pod, volume.VolumeOptions{RootContext: rootContext}) + if err != nil { + glog.Errorf("Could not create volume mounter for pod %s: %v", pod.UID, err) + return nil, err + } + + // some volumes require attachment before mounter's setup. + // The plugin can be nil, but non-nil errors are legitimate errors. + // For non-nil plugins, Attachment to a node is required before Mounter's setup. + attacher, err := kl.newVolumeAttacherFromPlugins(volSpec, pod) + if err != nil { + glog.Errorf("Could not create volume attacher for pod %s: %v", pod.UID, err) + return nil, err + } + if attacher != nil { + // If the device path is already mounted, avoid an expensive call to the + // cloud provider. + deviceMountPath := attacher.GetDeviceMountPath(volSpec) + notMountPoint, err := kl.mounter.IsLikelyNotMountPoint(deviceMountPath) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if notMountPoint { + err = attacher.Attach(volSpec, kl.hostname) + if err != nil { + return nil, err + } + + devicePath, err := attacher.WaitForAttach(volSpec, maxWaitForVolumeOps) + if err != nil { + return nil, err + } + + if err = attacher.MountDevice(volSpec, devicePath, deviceMountPath, kl.mounter); err != nil { + return nil, err + } + } + } + + err = mounter.SetUp(fsGroup) + if err != nil { + return nil, err + } + podVolumes[pod.Spec.Volumes[i].Name] = kubecontainer.VolumeInfo{Mounter: mounter} + } + return podVolumes, nil +} + +type volumeTuple struct { + Kind string + Name string +} + +// ListVolumesForPod returns a map of the volumes associated with the given pod +func (kl *Kubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) { + result := map[string]volume.Volume{} + vm, ok := kl.volumeManager.GetVolumes(podUID) + if !ok { + return result, false + } + for name, info := range vm { + result[name] = info.Mounter + } + return result, true +} + +// getPodVolumes examines the directory structure for a pod and returns +// information about the name and kind of each presently mounted volume, or an +// error. +func (kl *Kubelet) getPodVolumes(podUID types.UID) ([]*volumeTuple, error) { + var volumes []*volumeTuple + podVolDir := kl.getPodVolumesDir(podUID) + volumeKindDirs, err := ioutil.ReadDir(podVolDir) + if err != nil { + glog.Errorf("Could not read directory %s: %v", podVolDir, err) + } + for _, volumeKindDir := range volumeKindDirs { + volumeKind := volumeKindDir.Name() + volumeKindPath := path.Join(podVolDir, volumeKind) + // ioutil.ReadDir exits without returning any healthy dir when encountering the first lstat error + // but skipping dirs means no cleanup for healthy volumes. switching to a no-exit api solves this problem + volumeNameDirs, volumeNameDirsStat, err := util.ReadDirNoExit(volumeKindPath) + if err != nil { + return []*volumeTuple{}, fmt.Errorf("could not read directory %s: %v", volumeKindPath, err) + } + for i, volumeNameDir := range volumeNameDirs { + if volumeNameDir != nil { + volumes = append(volumes, &volumeTuple{Kind: volumeKind, Name: volumeNameDir.Name()}) + } else { + glog.Errorf("Could not read directory %s: %v", podVolDir, volumeNameDirsStat[i]) + } + } + } + return volumes, nil +} + +// cleaner is a union struct to allow separating detaching from the cleaner. +// some volumes require detachment but not all. Unmounter cannot be nil but Detacher is optional. +type cleaner struct { + Unmounter volume.Unmounter + Detacher *volume.Detacher +} + +// getPodVolumesFromDisk examines directory structure to determine volumes that +// are presently active and mounted. Returns a union struct containing a volume.Unmounter +// and potentially a volume.Detacher. +func (kl *Kubelet) getPodVolumesFromDisk() map[string]cleaner { + currentVolumes := make(map[string]cleaner) + podUIDs, err := kl.listPodsFromDisk() + if err != nil { + glog.Errorf("Could not get pods from disk: %v", err) + return map[string]cleaner{} + } + // Find the volumes for each on-disk pod. + for _, podUID := range podUIDs { + volumes, err := kl.getPodVolumes(podUID) + if err != nil { + glog.Errorf("%v", err) + continue + } + for _, volume := range volumes { + identifier := fmt.Sprintf("%s/%s", podUID, volume.Name) + glog.V(5).Infof("Making a volume.Unmounter for volume %s/%s of pod %s", volume.Kind, volume.Name, podUID) + // TODO(thockin) This should instead return a reference to an extant + // volume object, except that we don't actually hold on to pod specs + // or volume objects. + + // Try to use a plugin for this volume. + unmounter, err := kl.newVolumeUnmounterFromPlugins(volume.Kind, volume.Name, podUID) + if err != nil { + glog.Errorf("Could not create volume unmounter for %s: %v", volume.Name, err) + continue + } + + tuple := cleaner{Unmounter: unmounter} + detacher, err := kl.newVolumeDetacherFromPlugins(volume.Kind, volume.Name, podUID) + // plugin can be nil but a non-nil error is a legitimate error + if err != nil { + glog.Errorf("Could not create volume detacher for %s: %v", volume.Name, err) + continue + } + if detacher != nil { + tuple.Detacher = &detacher + } + currentVolumes[identifier] = tuple + } + } + return currentVolumes +} + +func (kl *Kubelet) getPersistentVolumeByClaimName(claimName string, namespace string) (*api.PersistentVolume, error) { + claim, err := kl.kubeClient.Core().PersistentVolumeClaims(namespace).Get(claimName) + if err != nil { + glog.Errorf("Error finding claim: %+v\n", claimName) + return nil, err + } + glog.V(5).Infof("Found claim %v ", claim) + + if claim.Spec.VolumeName == "" { + return nil, fmt.Errorf("The claim %+v is not yet bound to a volume", claimName) + } + + pv, err := kl.kubeClient.Core().PersistentVolumes().Get(claim.Spec.VolumeName) + if err != nil { + glog.Errorf("Error finding persistent volume for claim: %+v\n", claimName) + return nil, err + } + + if pv.Spec.ClaimRef == nil { + return nil, fmt.Errorf("The volume is not yet bound to the claim. Expected to find the bind on volume.Spec.ClaimRef: %+v", pv) + } + + if pv.Spec.ClaimRef.UID != claim.UID { + return nil, fmt.Errorf("Expected volume.Spec.ClaimRef.UID %+v but have %+v", pv.Spec.ClaimRef.UID, claim.UID) + } + + return pv, nil +} + +func (kl *Kubelet) applyPersistentVolumeAnnotations(pv *api.PersistentVolume, pod *api.Pod) error { + // If a GID annotation is provided set the GID attribute. + if volumeGid, ok := pv.Annotations[volumeGidAnnotationKey]; ok { + gid, err := strconv.ParseInt(volumeGid, 10, 64) + if err != nil { + return fmt.Errorf("Invalid value for %s %v", volumeGidAnnotationKey, err) + } + + if pod.Spec.SecurityContext == nil { + pod.Spec.SecurityContext = &api.PodSecurityContext{} + } + for _, existingGid := range pod.Spec.SecurityContext.SupplementalGroups { + if gid == existingGid { + return nil + } + } + pod.Spec.SecurityContext.SupplementalGroups = append(pod.Spec.SecurityContext.SupplementalGroups, gid) + } + + return nil +} + +// newVolumeMounterFromPlugins attempts to find a plugin by volume spec, pod +// and volume options and then creates a Mounter. +// Returns a valid Unmounter or an error. +func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { + plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec) + if err != nil { + return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err) + } + physicalMounter, err := plugin.NewMounter(spec, pod, opts) + if err != nil { + return nil, fmt.Errorf("failed to instantiate mounter for volume: %s using plugin: %s with a root cause: %v", spec.Name(), plugin.Name(), err) + } + glog.V(10).Infof("Used volume plugin %q to mount %s", plugin.Name(), spec.Name()) + return physicalMounter, nil +} + +// newVolumeAttacherFromPlugins attempts to find a plugin from a volume spec +// and then create an Attacher. +// Returns: +// - an attacher if one exists +// - an error if no plugin was found for the volume +// or the attacher failed to instantiate +// - nil if there is no appropriate attacher for this volume +func (kl *Kubelet) newVolumeAttacherFromPlugins(spec *volume.Spec, pod *api.Pod) (volume.Attacher, error) { + plugin, err := kl.volumePluginMgr.FindAttachablePluginBySpec(spec) + if err != nil { + return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err) + } + if plugin == nil { + // Not found but not an error. + return nil, nil + } + + attacher, err := plugin.NewAttacher() + if err != nil { + return nil, fmt.Errorf("failed to instantiate volume attacher for %s: %v", spec.Name(), err) + } + glog.V(3).Infof("Used volume plugin %q to attach %s/%s", plugin.Name(), spec.Name()) + return attacher, nil +} + +// newVolumeUnmounterFromPlugins attempts to find a plugin by name and then +// create an Unmounter. +// Returns a valid Unmounter or an error. +func (kl *Kubelet) newVolumeUnmounterFromPlugins(kind string, name string, podUID types.UID) (volume.Unmounter, error) { + plugName := strings.UnescapeQualifiedNameForDisk(kind) + plugin, err := kl.volumePluginMgr.FindPluginByName(plugName) + if err != nil { + // TODO: Maybe we should launch a cleanup of this dir? + return nil, fmt.Errorf("can't use volume plugins for %s/%s: %v", podUID, kind, err) + } + + unmounter, err := plugin.NewUnmounter(name, podUID) + if err != nil { + return nil, fmt.Errorf("failed to instantiate volume plugin for %s/%s: %v", podUID, kind, err) + } + glog.V(5).Infof("Used volume plugin %q to unmount %s/%s", plugin.Name(), podUID, kind) + return unmounter, nil +} + +// newVolumeDetacherFromPlugins attempts to find a plugin by a name and then +// create a Detacher. +// Returns: +// - a detacher if one exists +// - an error if no plugin was found for the volume +// or the detacher failed to instantiate +// - nil if there is no appropriate detacher for this volume +func (kl *Kubelet) newVolumeDetacherFromPlugins(kind string, name string, podUID types.UID) (volume.Detacher, error) { + plugName := strings.UnescapeQualifiedNameForDisk(kind) + plugin, err := kl.volumePluginMgr.FindAttachablePluginByName(plugName) + if err != nil { + return nil, fmt.Errorf("can't use volume plugins for %s/%s: %v", podUID, kind, err) + } + if plugin == nil { + // Not found but not an error. + return nil, nil + } + + detacher, err := plugin.NewDetacher() + if err != nil { + return nil, fmt.Errorf("failed to instantiate volume plugin for %s/%s: %v", podUID, kind, err) + } + return detacher, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/labels/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/labels/deep_copy_generated.go new file mode 100644 index 000000000000..e48099d2df42 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/labels/deep_copy_generated.go @@ -0,0 +1,45 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package labels + +import ( + conversion "k8s.io/kubernetes/pkg/conversion" + sets "k8s.io/kubernetes/pkg/util/sets" +) + +func DeepCopy_labels_Requirement(in Requirement, out *Requirement, c *conversion.Cloner) error { + out.key = in.key + out.operator = in.operator + if in.strValues != nil { + in, out := in.strValues, &out.strValues + *out = make(sets.String) + for key, val := range in { + newVal := new(sets.Empty) + if err := sets.DeepCopy_sets_Empty(val, newVal, c); err != nil { + return err + } + (*out)[key] = *newVal + } + } else { + out.strValues = nil + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/labels/labels_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/labels/labels_test.go new file mode 100644 index 000000000000..8d3834d51462 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/labels/labels_test.go @@ -0,0 +1,60 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "testing" +) + +func matches(t *testing.T, ls Set, want string) { + if ls.String() != want { + t.Errorf("Expected '%s', but got '%s'", want, ls.String()) + } +} + +func TestSetString(t *testing.T) { + matches(t, Set{"x": "y"}, "x=y") + matches(t, Set{"foo": "bar"}, "foo=bar") + matches(t, Set{"foo": "bar", "baz": "qup"}, "baz=qup,foo=bar") + + // TODO: Make our label representation robust enough to handle labels + // with ",=!" characters in their names. +} + +func TestLabelHas(t *testing.T) { + labelHasTests := []struct { + Ls Labels + Key string + Has bool + }{ + {Set{"x": "y"}, "x", true}, + {Set{"x": ""}, "x", true}, + {Set{"x": "y"}, "foo", false}, + } + for _, lh := range labelHasTests { + if has := lh.Ls.Has(lh.Key); has != lh.Has { + t.Errorf("%#v.Has(%#v) => %v, expected %v", lh.Ls, lh.Key, has, lh.Has) + } + } +} + +func TestLabelGet(t *testing.T) { + ls := Set{"x": "y"} + if ls.Get("x") != "y" { + t.Errorf("Set.Get is broken") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/labels/selector.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/labels/selector.go index fb48b10e9e00..ab64ecc80904 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/labels/selector.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/labels/selector.go @@ -764,19 +764,16 @@ func parse(selector string) (internalSelector, error) { return internalSelector(items), err } -var qualifiedNameErrorMsg string = fmt.Sprintf(`must be a qualified name (at most %d characters, matching regex %s), with an optional DNS subdomain prefix (at most %d characters, matching regex %s) and slash (/): e.g. "MyName" or "example.com/MyName"`, validation.QualifiedNameMaxLength, validation.QualifiedNameFmt, validation.DNS1123SubdomainMaxLength, validation.DNS1123SubdomainFmt) -var labelValueErrorMsg string = fmt.Sprintf(`must have at most %d characters, matching regex %s: e.g. "MyValue" or ""`, validation.LabelValueMaxLength, validation.LabelValueFmt) - func validateLabelKey(k string) error { - if !validation.IsQualifiedName(k) { - return fmt.Errorf("invalid label key: %s", qualifiedNameErrorMsg) + if errs := validation.IsQualifiedName(k); len(errs) != 0 { + return fmt.Errorf("invalid label key %q: %s", k, strings.Join(errs, "; ")) } return nil } func validateLabelValue(v string) error { - if !validation.IsValidLabelValue(v) { - return fmt.Errorf("invalid label value: %s", labelValueErrorMsg) + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) } return nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/labels/selector_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/labels/selector_test.go new file mode 100644 index 000000000000..5fbb1fc762cc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/labels/selector_test.go @@ -0,0 +1,574 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/util/sets" +) + +func TestSelectorParse(t *testing.T) { + testGoodStrings := []string{ + "x=a,y=b,z=c", + "", + "x!=a,y=b", + "x=", + "x= ", + "x=,z= ", + "x= ,z= ", + "!x", + "x>1.1", + "x>1.1,z<5.3", + } + testBadStrings := []string{ + "x=a||y=b", + "x==a==b", + "!x=a", + "x1.1", Set{"x": "1.2"}) + expectMatch(t, "x<1.1", Set{"x": "0.8"}) + expectNoMatch(t, "x=z", Set{}) + expectNoMatch(t, "x=y", Set{"x": "z"}) + expectNoMatch(t, "x=y,z=w", Set{"x": "w", "z": "w"}) + expectNoMatch(t, "x!=y,z!=w", Set{"x": "z", "z": "w"}) + expectNoMatch(t, "x", Set{"y": "z"}) + expectNoMatch(t, "!x", Set{"x": "z"}) + expectNoMatch(t, "x>1.1", Set{"x": "0.8"}) + expectNoMatch(t, "x<1.1", Set{"x": "1.1"}) + + labelset := Set{ + "foo": "bar", + "baz": "blah", + } + expectMatch(t, "foo=bar", labelset) + expectMatch(t, "baz=blah", labelset) + expectMatch(t, "foo=bar,baz=blah", labelset) + expectNoMatch(t, "foo=blah", labelset) + expectNoMatch(t, "baz=bar", labelset) + expectNoMatch(t, "foo=bar,foobar=bar,baz=blah", labelset) +} + +func expectMatchDirect(t *testing.T, selector, ls Set) { + if !SelectorFromSet(selector).Matches(ls) { + t.Errorf("Wanted %s to match '%s', but it did not.\n", selector, ls) + } +} + +func expectNoMatchDirect(t *testing.T, selector, ls Set) { + if SelectorFromSet(selector).Matches(ls) { + t.Errorf("Wanted '%s' to not match '%s', but it did.", selector, ls) + } +} + +func TestSetMatches(t *testing.T) { + labelset := Set{ + "foo": "bar", + "baz": "blah", + } + expectMatchDirect(t, Set{}, labelset) + expectMatchDirect(t, Set{"foo": "bar"}, labelset) + expectMatchDirect(t, Set{"baz": "blah"}, labelset) + expectMatchDirect(t, Set{"foo": "bar", "baz": "blah"}, labelset) + + //TODO: bad values not handled for the moment in SelectorFromSet + //expectNoMatchDirect(t, Set{"foo": "=blah"}, labelset) + //expectNoMatchDirect(t, Set{"baz": "=bar"}, labelset) + //expectNoMatchDirect(t, Set{"foo": "=bar", "foobar": "bar", "baz": "blah"}, labelset) +} + +func TestNilMapIsValid(t *testing.T) { + selector := Set(nil).AsSelector() + if selector == nil { + t.Errorf("Selector for nil set should be Everything") + } + if !selector.Empty() { + t.Errorf("Selector for nil set should be Empty") + } +} + +func TestSetIsEmpty(t *testing.T) { + if !(Set{}).AsSelector().Empty() { + t.Errorf("Empty set should be empty") + } + if !(NewSelector()).Empty() { + t.Errorf("Nil Selector should be empty") + } +} + +func TestLexer(t *testing.T) { + testcases := []struct { + s string + t Token + }{ + {"", EndOfStringToken}, + {",", CommaToken}, + {"notin", NotInToken}, + {"in", InToken}, + {"=", EqualsToken}, + {"==", DoubleEqualsToken}, + {">", GreaterThanToken}, + {"<", LessThanToken}, + //Note that Lex returns the longest valid token found + {"!", DoesNotExistToken}, + {"!=", NotEqualsToken}, + {"(", OpenParToken}, + {")", ClosedParToken}, + //Non-"special" characters are considered part of an identifier + {"~", IdentifierToken}, + {"||", IdentifierToken}, + } + for _, v := range testcases { + l := &Lexer{s: v.s, pos: 0} + token, lit := l.Lex() + if token != v.t { + t.Errorf("Got %d it should be %d for '%s'", token, v.t, v.s) + } + if v.t != ErrorToken && lit != v.s { + t.Errorf("Got '%s' it should be '%s'", lit, v.s) + } + } +} + +func min(l, r int) (m int) { + m = r + if l < r { + m = l + } + return m +} + +func TestLexerSequence(t *testing.T) { + testcases := []struct { + s string + t []Token + }{ + {"key in ( value )", []Token{IdentifierToken, InToken, OpenParToken, IdentifierToken, ClosedParToken}}, + {"key notin ( value )", []Token{IdentifierToken, NotInToken, OpenParToken, IdentifierToken, ClosedParToken}}, + {"key in ( value1, value2 )", []Token{IdentifierToken, InToken, OpenParToken, IdentifierToken, CommaToken, IdentifierToken, ClosedParToken}}, + {"key", []Token{IdentifierToken}}, + {"!key", []Token{DoesNotExistToken, IdentifierToken}}, + {"()", []Token{OpenParToken, ClosedParToken}}, + {"x in (),y", []Token{IdentifierToken, InToken, OpenParToken, ClosedParToken, CommaToken, IdentifierToken}}, + {"== != (), = notin", []Token{DoubleEqualsToken, NotEqualsToken, OpenParToken, ClosedParToken, CommaToken, EqualsToken, NotInToken}}, + {"key>1.1", []Token{IdentifierToken, GreaterThanToken, IdentifierToken}}, + {"key<0.8", []Token{IdentifierToken, LessThanToken, IdentifierToken}}, + } + for _, v := range testcases { + var literals []string + var tokens []Token + l := &Lexer{s: v.s, pos: 0} + for { + token, lit := l.Lex() + if token == EndOfStringToken { + break + } + tokens = append(tokens, token) + literals = append(literals, lit) + } + if len(tokens) != len(v.t) { + t.Errorf("Bad number of tokens for '%s %d, %d", v.s, len(tokens), len(v.t)) + } + for i := 0; i < min(len(tokens), len(v.t)); i++ { + if tokens[i] != v.t[i] { + t.Errorf("Test '%s': Mismatching in token type found '%v' it should be '%v'", v.s, tokens[i], v.t[i]) + } + } + } +} +func TestParserLookahead(t *testing.T) { + testcases := []struct { + s string + t []Token + }{ + {"key in ( value )", []Token{IdentifierToken, InToken, OpenParToken, IdentifierToken, ClosedParToken, EndOfStringToken}}, + {"key notin ( value )", []Token{IdentifierToken, NotInToken, OpenParToken, IdentifierToken, ClosedParToken, EndOfStringToken}}, + {"key in ( value1, value2 )", []Token{IdentifierToken, InToken, OpenParToken, IdentifierToken, CommaToken, IdentifierToken, ClosedParToken, EndOfStringToken}}, + {"key", []Token{IdentifierToken, EndOfStringToken}}, + {"!key", []Token{DoesNotExistToken, IdentifierToken, EndOfStringToken}}, + {"()", []Token{OpenParToken, ClosedParToken, EndOfStringToken}}, + {"", []Token{EndOfStringToken}}, + {"x in (),y", []Token{IdentifierToken, InToken, OpenParToken, ClosedParToken, CommaToken, IdentifierToken, EndOfStringToken}}, + {"== != (), = notin", []Token{DoubleEqualsToken, NotEqualsToken, OpenParToken, ClosedParToken, CommaToken, EqualsToken, NotInToken, EndOfStringToken}}, + {"key>1.1", []Token{IdentifierToken, GreaterThanToken, IdentifierToken, EndOfStringToken}}, + {"key<0.8", []Token{IdentifierToken, LessThanToken, IdentifierToken, EndOfStringToken}}, + } + for _, v := range testcases { + p := &Parser{l: &Lexer{s: v.s, pos: 0}, position: 0} + p.scan() + if len(p.scannedItems) != len(v.t) { + t.Errorf("Expected %d items found %d", len(v.t), len(p.scannedItems)) + } + for { + token, lit := p.lookahead(KeyAndOperator) + + token2, lit2 := p.consume(KeyAndOperator) + if token == EndOfStringToken { + break + } + if token != token2 || lit != lit2 { + t.Errorf("Bad values") + } + } + } +} + +func TestRequirementConstructor(t *testing.T) { + requirementConstructorTests := []struct { + Key string + Op Operator + Vals sets.String + Success bool + }{ + {"x", InOperator, nil, false}, + {"x", NotInOperator, sets.NewString(), false}, + {"x", InOperator, sets.NewString("foo"), true}, + {"x", NotInOperator, sets.NewString("foo"), true}, + {"x", ExistsOperator, nil, true}, + {"x", DoesNotExistOperator, nil, true}, + {"1foo", InOperator, sets.NewString("bar"), true}, + {"1234", InOperator, sets.NewString("bar"), true}, + {"y", GreaterThanOperator, sets.NewString("1.1"), true}, + {"z", LessThanOperator, sets.NewString("5.3"), true}, + {"foo", GreaterThanOperator, sets.NewString("bar"), false}, + {"barz", LessThanOperator, sets.NewString("blah"), false}, + {strings.Repeat("a", 254), ExistsOperator, nil, false}, //breaks DNS rule that len(key) <= 253 + } + for _, rc := range requirementConstructorTests { + if _, err := NewRequirement(rc.Key, rc.Op, rc.Vals); err == nil && !rc.Success { + t.Errorf("expected error with key:%#v op:%v vals:%v, got no error", rc.Key, rc.Op, rc.Vals) + } else if err != nil && rc.Success { + t.Errorf("expected no error with key:%#v op:%v vals:%v, got:%v", rc.Key, rc.Op, rc.Vals, err) + } + } +} + +func TestToString(t *testing.T) { + var req Requirement + toStringTests := []struct { + In *internalSelector + Out string + Valid bool + }{ + + {&internalSelector{ + getRequirement("x", InOperator, sets.NewString("abc", "def"), t), + getRequirement("y", NotInOperator, sets.NewString("jkl"), t), + getRequirement("z", ExistsOperator, nil, t)}, + "x in (abc,def),y notin (jkl),z", true}, + {&internalSelector{ + getRequirement("x", NotInOperator, sets.NewString("abc", "def"), t), + getRequirement("y", NotEqualsOperator, sets.NewString("jkl"), t), + getRequirement("z", DoesNotExistOperator, nil, t)}, + "x notin (abc,def),y!=jkl,!z", true}, + {&internalSelector{ + getRequirement("x", InOperator, sets.NewString("abc", "def"), t), + req}, // adding empty req for the trailing ',' + "x in (abc,def),", false}, + {&internalSelector{ + getRequirement("x", NotInOperator, sets.NewString("abc"), t), + getRequirement("y", InOperator, sets.NewString("jkl", "mno"), t), + getRequirement("z", NotInOperator, sets.NewString(""), t)}, + "x notin (abc),y in (jkl,mno),z notin ()", true}, + {&internalSelector{ + getRequirement("x", EqualsOperator, sets.NewString("abc"), t), + getRequirement("y", DoubleEqualsOperator, sets.NewString("jkl"), t), + getRequirement("z", NotEqualsOperator, sets.NewString("a"), t), + getRequirement("z", ExistsOperator, nil, t)}, + "x=abc,y==jkl,z!=a,z", true}, + {&internalSelector{ + getRequirement("x", GreaterThanOperator, sets.NewString("2.4"), t), + getRequirement("y", LessThanOperator, sets.NewString("7.1"), t), + getRequirement("z", ExistsOperator, nil, t)}, + "x>2.4,y<7.1,z", true}, + } + for _, ts := range toStringTests { + if out := ts.In.String(); out == "" && ts.Valid { + t.Errorf("%+v.String() => '%v' expected no error", ts.In, out) + } else if out != ts.Out { + t.Errorf("%+v.String() => '%v' want '%v'", ts.In, out, ts.Out) + } + } +} + +func TestRequirementSelectorMatching(t *testing.T) { + var req Requirement + labelSelectorMatchingTests := []struct { + Set Set + Sel Selector + Match bool + }{ + {Set{"x": "foo", "y": "baz"}, &internalSelector{ + req, + }, false}, + {Set{"x": "foo", "y": "baz"}, &internalSelector{ + getRequirement("x", InOperator, sets.NewString("foo"), t), + getRequirement("y", NotInOperator, sets.NewString("alpha"), t), + }, true}, + {Set{"x": "foo", "y": "baz"}, &internalSelector{ + getRequirement("x", InOperator, sets.NewString("foo"), t), + getRequirement("y", InOperator, sets.NewString("alpha"), t), + }, false}, + {Set{"y": ""}, &internalSelector{ + getRequirement("x", NotInOperator, sets.NewString(""), t), + getRequirement("y", ExistsOperator, nil, t), + }, true}, + {Set{"y": ""}, &internalSelector{ + getRequirement("x", DoesNotExistOperator, nil, t), + getRequirement("y", ExistsOperator, nil, t), + }, true}, + {Set{"y": ""}, &internalSelector{ + getRequirement("x", NotInOperator, sets.NewString(""), t), + getRequirement("y", DoesNotExistOperator, nil, t), + }, false}, + {Set{"y": "baz"}, &internalSelector{ + getRequirement("x", InOperator, sets.NewString(""), t), + }, false}, + {Set{"z": "1.2"}, &internalSelector{ + getRequirement("z", GreaterThanOperator, sets.NewString("1.0"), t), + }, true}, + {Set{"z": "v1.2"}, &internalSelector{ + getRequirement("z", GreaterThanOperator, sets.NewString("1.0"), t), + }, false}, + } + for _, lsm := range labelSelectorMatchingTests { + if match := lsm.Sel.Matches(lsm.Set); match != lsm.Match { + t.Errorf("%+v.Matches(%#v) => %v, want %v", lsm.Sel, lsm.Set, match, lsm.Match) + } + } +} + +func TestSetSelectorParser(t *testing.T) { + setSelectorParserTests := []struct { + In string + Out Selector + Match bool + Valid bool + }{ + {"", NewSelector(), true, true}, + {"\rx", internalSelector{ + getRequirement("x", ExistsOperator, nil, t), + }, true, true}, + {"this-is-a-dns.domain.com/key-with-dash", internalSelector{ + getRequirement("this-is-a-dns.domain.com/key-with-dash", ExistsOperator, nil, t), + }, true, true}, + {"this-is-another-dns.domain.com/key-with-dash in (so,what)", internalSelector{ + getRequirement("this-is-another-dns.domain.com/key-with-dash", InOperator, sets.NewString("so", "what"), t), + }, true, true}, + {"0.1.2.domain/99 notin (10.10.100.1, tick.tack.clock)", internalSelector{ + getRequirement("0.1.2.domain/99", NotInOperator, sets.NewString("10.10.100.1", "tick.tack.clock"), t), + }, true, true}, + {"foo in (abc)", internalSelector{ + getRequirement("foo", InOperator, sets.NewString("abc"), t), + }, true, true}, + {"x notin\n (abc)", internalSelector{ + getRequirement("x", NotInOperator, sets.NewString("abc"), t), + }, true, true}, + {"x notin \t (abc,def)", internalSelector{ + getRequirement("x", NotInOperator, sets.NewString("abc", "def"), t), + }, true, true}, + {"x in (abc,def)", internalSelector{ + getRequirement("x", InOperator, sets.NewString("abc", "def"), t), + }, true, true}, + {"x in (abc,)", internalSelector{ + getRequirement("x", InOperator, sets.NewString("abc", ""), t), + }, true, true}, + {"x in ()", internalSelector{ + getRequirement("x", InOperator, sets.NewString(""), t), + }, true, true}, + {"x notin (abc,,def),bar,z in (),w", internalSelector{ + getRequirement("bar", ExistsOperator, nil, t), + getRequirement("w", ExistsOperator, nil, t), + getRequirement("x", NotInOperator, sets.NewString("abc", "", "def"), t), + getRequirement("z", InOperator, sets.NewString(""), t), + }, true, true}, + {"x,y in (a)", internalSelector{ + getRequirement("y", InOperator, sets.NewString("a"), t), + getRequirement("x", ExistsOperator, nil, t), + }, false, true}, + {"x=a", internalSelector{ + getRequirement("x", EqualsOperator, sets.NewString("a"), t), + }, true, true}, + {"x>1.1", internalSelector{ + getRequirement("x", GreaterThanOperator, sets.NewString("1.1"), t), + }, true, true}, + {"x<7.1", internalSelector{ + getRequirement("x", LessThanOperator, sets.NewString("7.1"), t), + }, true, true}, + {"x=a,y!=b", internalSelector{ + getRequirement("x", EqualsOperator, sets.NewString("a"), t), + getRequirement("y", NotEqualsOperator, sets.NewString("b"), t), + }, true, true}, + {"x=a,y!=b,z in (h,i,j)", internalSelector{ + getRequirement("x", EqualsOperator, sets.NewString("a"), t), + getRequirement("y", NotEqualsOperator, sets.NewString("b"), t), + getRequirement("z", InOperator, sets.NewString("h", "i", "j"), t), + }, true, true}, + {"x=a||y=b", internalSelector{}, false, false}, + {"x,,y", nil, true, false}, + {",x,y", nil, true, false}, + {"x nott in (y)", nil, true, false}, + {"x notin ( )", internalSelector{ + getRequirement("x", NotInOperator, sets.NewString(""), t), + }, true, true}, + {"x notin (, a)", internalSelector{ + getRequirement("x", NotInOperator, sets.NewString("", "a"), t), + }, true, true}, + {"a in (xyz),", nil, true, false}, + {"a in (xyz)b notin ()", nil, true, false}, + {"a ", internalSelector{ + getRequirement("a", ExistsOperator, nil, t), + }, true, true}, + {"a in (x,y,notin, z,in)", internalSelector{ + getRequirement("a", InOperator, sets.NewString("in", "notin", "x", "y", "z"), t), + }, true, true}, // operator 'in' inside list of identifiers + {"a in (xyz abc)", nil, false, false}, // no comma + {"a notin(", nil, true, false}, // bad formed + {"a (", nil, false, false}, // cpar + {"(", nil, false, false}, // opar + } + + for _, ssp := range setSelectorParserTests { + if sel, err := Parse(ssp.In); err != nil && ssp.Valid { + t.Errorf("Parse(%s) => %v expected no error", ssp.In, err) + } else if err == nil && !ssp.Valid { + t.Errorf("Parse(%s) => %+v expected error", ssp.In, sel) + } else if ssp.Match && !reflect.DeepEqual(sel, ssp.Out) { + t.Errorf("Parse(%s) => parse output '%#v' doesn't match '%#v' expected match", ssp.In, sel, ssp.Out) + } + } +} + +func getRequirement(key string, op Operator, vals sets.String, t *testing.T) Requirement { + req, err := NewRequirement(key, op, vals) + if err != nil { + t.Errorf("NewRequirement(%v, %v, %v) resulted in error:%v", key, op, vals, err) + return Requirement{} + } + return *req +} + +func TestAdd(t *testing.T) { + testCases := []struct { + name string + sel Selector + key string + operator Operator + values []string + refSelector Selector + }{ + { + "keyInOperator", + internalSelector{}, + "key", + InOperator, + []string{"value"}, + internalSelector{Requirement{"key", InOperator, sets.NewString("value")}}, + }, + { + "keyEqualsOperator", + internalSelector{Requirement{"key", InOperator, sets.NewString("value")}}, + "key2", + EqualsOperator, + []string{"value2"}, + internalSelector{ + Requirement{"key", InOperator, sets.NewString("value")}, + Requirement{"key2", EqualsOperator, sets.NewString("value2")}, + }, + }, + } + for _, ts := range testCases { + req, err := NewRequirement(ts.key, ts.operator, sets.NewString(ts.values...)) + if err != nil { + t.Errorf("%s - Unable to create labels.Requirement", ts.name) + } + ts.sel = ts.sel.Add(*req) + if !reflect.DeepEqual(ts.sel, ts.refSelector) { + t.Errorf("%s - Expected %v found %v", ts.name, ts.refSelector, ts.sel) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/OWNERS b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/OWNERS new file mode 100644 index 000000000000..89f31a6bedb2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/OWNERS @@ -0,0 +1,5 @@ +assignees: + - derekwaynecarr + - lavalamp + - mikedanese + - nikhiljindal diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/controller.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/controller.go new file mode 100644 index 000000000000..95c7b49bdab3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/controller.go @@ -0,0 +1,391 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package master + +import ( + "fmt" + "net" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/endpoints" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/registry/endpoint" + "k8s.io/kubernetes/pkg/registry/namespace" + "k8s.io/kubernetes/pkg/registry/service" + servicecontroller "k8s.io/kubernetes/pkg/registry/service/ipallocator/controller" + portallocatorcontroller "k8s.io/kubernetes/pkg/registry/service/portallocator/controller" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/intstr" + utilnet "k8s.io/kubernetes/pkg/util/net" + "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/wait" +) + +// Controller is the controller manager for the core bootstrap Kubernetes controller +// loops, which manage creating the "kubernetes" service, the "default" and "kube-system" +// namespace, and provide the IP repair check on service IPs +type Controller struct { + NamespaceRegistry namespace.Registry + ServiceRegistry service.Registry + // TODO: MasterCount is yucky + MasterCount int + + ServiceClusterIPRegistry service.RangeRegistry + ServiceClusterIPInterval time.Duration + ServiceClusterIPRange *net.IPNet + + ServiceNodePortRegistry service.RangeRegistry + ServiceNodePortInterval time.Duration + ServiceNodePortRange utilnet.PortRange + + EndpointRegistry endpoint.Registry + EndpointInterval time.Duration + + SystemNamespaces []string + SystemNamespacesInterval time.Duration + + PublicIP net.IP + + ServiceIP net.IP + ServicePort int + ExtraServicePorts []api.ServicePort + ExtraEndpointPorts []api.EndpointPort + PublicServicePort int + KubernetesServiceNodePort int + + runner *util.Runner +} + +// Start begins the core controller loops that must exist for bootstrapping +// a cluster. +func (c *Controller) Start() { + if c.runner != nil { + return + } + + repairClusterIPs := servicecontroller.NewRepair(c.ServiceClusterIPInterval, c.ServiceRegistry, c.ServiceClusterIPRange, c.ServiceClusterIPRegistry) + repairNodePorts := portallocatorcontroller.NewRepair(c.ServiceNodePortInterval, c.ServiceRegistry, c.ServiceNodePortRange, c.ServiceNodePortRegistry) + + // run all of the controllers once prior to returning from Start. + if err := repairClusterIPs.RunOnce(); err != nil { + // If we fail to repair cluster IPs apiserver is useless. We should restart and retry. + glog.Fatalf("Unable to perform initial IP allocation check: %v", err) + } + if err := repairNodePorts.RunOnce(); err != nil { + // If we fail to repair node ports apiserver is useless. We should restart and retry. + glog.Fatalf("Unable to perform initial service nodePort check: %v", err) + } + // Service definition is reconciled during first run to correct port and type per expectations. + if err := c.UpdateKubernetesService(true); err != nil { + glog.Errorf("Unable to perform initial Kubernetes service initialization: %v", err) + } + + c.runner = util.NewRunner(c.RunKubernetesNamespaces, c.RunKubernetesService, repairClusterIPs.RunUntil, repairNodePorts.RunUntil) + c.runner.Start() +} + +// RunKubernetesNamespaces periodically makes sure that all internal namespaces exist +func (c *Controller) RunKubernetesNamespaces(ch chan struct{}) { + wait.Until(func() { + // Loop the system namespace list, and create them if they do not exist + for _, ns := range c.SystemNamespaces { + if err := c.CreateNamespaceIfNeeded(ns); err != nil { + runtime.HandleError(fmt.Errorf("unable to create required kubernetes system namespace %s: %v", ns, err)) + } + } + }, c.SystemNamespacesInterval, ch) +} + +// RunKubernetesService periodically updates the kubernetes service +func (c *Controller) RunKubernetesService(ch chan struct{}) { + wait.Until(func() { + // Service definition is not reconciled after first + // run, ports and type will be corrected only during + // start. + if err := c.UpdateKubernetesService(false); err != nil { + runtime.HandleError(fmt.Errorf("unable to sync kubernetes service: %v", err)) + } + }, c.EndpointInterval, ch) +} + +// UpdateKubernetesService attempts to update the default Kube service. +func (c *Controller) UpdateKubernetesService(reconcile bool) error { + // Update service & endpoint records. + // TODO: when it becomes possible to change this stuff, + // stop polling and start watching. + // TODO: add endpoints of all replicas, not just the elected master. + if err := c.CreateNamespaceIfNeeded(api.NamespaceDefault); err != nil { + return err + } + if c.ServiceIP != nil { + servicePorts, serviceType := createPortAndServiceSpec(c.ServicePort, c.KubernetesServiceNodePort, "https", c.ExtraServicePorts) + if err := c.CreateOrUpdateMasterServiceIfNeeded("kubernetes", c.ServiceIP, servicePorts, serviceType, reconcile); err != nil { + return err + } + endpointPorts := createEndpointPortSpec(c.PublicServicePort, "https", c.ExtraEndpointPorts) + if err := c.ReconcileEndpoints("kubernetes", c.PublicIP, endpointPorts, reconcile); err != nil { + return err + } + } + return nil +} + +// CreateNamespaceIfNeeded will create a namespace if it doesn't already exist +func (c *Controller) CreateNamespaceIfNeeded(ns string) error { + ctx := api.NewContext() + if _, err := c.NamespaceRegistry.GetNamespace(ctx, ns); err == nil { + // the namespace already exists + return nil + } + newNs := &api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: ns, + Namespace: "", + }, + } + err := c.NamespaceRegistry.CreateNamespace(ctx, newNs) + if err != nil && errors.IsAlreadyExists(err) { + err = nil + } + return err +} + +// createPortAndServiceSpec creates an array of service ports. +// If the NodePort value is 0, just the servicePort is used, otherwise, a node port is exposed. +func createPortAndServiceSpec(servicePort int, nodePort int, servicePortName string, extraServicePorts []api.ServicePort) ([]api.ServicePort, api.ServiceType) { + //Use the Cluster IP type for the service port if NodePort isn't provided. + //Otherwise, we will be binding the master service to a NodePort. + servicePorts := []api.ServicePort{{Protocol: api.ProtocolTCP, + Port: int32(servicePort), + Name: servicePortName, + TargetPort: intstr.FromInt(servicePort)}} + serviceType := api.ServiceTypeClusterIP + if nodePort > 0 { + servicePorts[0].NodePort = int32(nodePort) + serviceType = api.ServiceTypeNodePort + } + if extraServicePorts != nil { + servicePorts = append(servicePorts, extraServicePorts...) + } + return servicePorts, serviceType +} + +// createEndpointPortSpec creates an array of endpoint ports +func createEndpointPortSpec(endpointPort int, endpointPortName string, extraEndpointPorts []api.EndpointPort) []api.EndpointPort { + endpointPorts := []api.EndpointPort{{Protocol: api.ProtocolTCP, + Port: int32(endpointPort), + Name: endpointPortName, + }} + if extraEndpointPorts != nil { + endpointPorts = append(endpointPorts, extraEndpointPorts...) + } + return endpointPorts +} + +// CreateMasterServiceIfNeeded will create the specified service if it +// doesn't already exist. +func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, serviceIP net.IP, servicePorts []api.ServicePort, serviceType api.ServiceType, reconcile bool) error { + ctx := api.NewDefaultContext() + if s, err := c.ServiceRegistry.GetService(ctx, serviceName); err == nil { + // The service already exists. + if reconcile { + if svc, updated := getMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated { + glog.Warningf("Resetting master service %q to %#v", serviceName, svc) + _, err := c.ServiceRegistry.UpdateService(ctx, svc) + return err + } + } + return nil + } + svc := &api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: serviceName, + Namespace: api.NamespaceDefault, + Labels: map[string]string{"provider": "kubernetes", "component": "apiserver"}, + }, + Spec: api.ServiceSpec{ + Ports: servicePorts, + // maintained by this code, not by the pod selector + Selector: nil, + ClusterIP: serviceIP.String(), + SessionAffinity: api.ServiceAffinityClientIP, + Type: serviceType, + }, + } + if err := rest.BeforeCreate(service.Strategy, ctx, svc); err != nil { + return err + } + + _, err := c.ServiceRegistry.CreateService(ctx, svc) + if err != nil && errors.IsAlreadyExists(err) { + err = nil + } + return err +} + +// ReconcileEndpoints sets the endpoints for the given apiserver service (ro or rw). +// ReconcileEndpoints expects that the endpoints objects it manages will all be +// managed only by ReconcileEndpoints; therefore, to understand this, you need only +// understand the requirements and the body of this function. +// +// Requirements: +// * All apiservers MUST use the same ports for their {rw, ro} services. +// * All apiservers MUST use ReconcileEndpoints and only ReconcileEndpoints to manage the +// endpoints for their {rw, ro} services. +// * All apiservers MUST know and agree on the number of apiservers expected +// to be running (c.masterCount). +// * ReconcileEndpoints is called periodically from all apiservers. +// +func (c *Controller) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error { + ctx := api.NewDefaultContext() + e, err := c.EndpointRegistry.GetEndpoints(ctx, serviceName) + if err != nil { + e = &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: serviceName, + Namespace: api.NamespaceDefault, + }, + } + } + + // First, determine if the endpoint is in the format we expect (one + // subset, ports matching endpointPorts, N IP addresses). + formatCorrect, ipCorrect, portsCorrect := checkEndpointSubsetFormat(e, ip.String(), endpointPorts, c.MasterCount, reconcilePorts) + if !formatCorrect { + // Something is egregiously wrong, just re-make the endpoints record. + e.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: ip.String()}}, + Ports: endpointPorts, + }} + glog.Warningf("Resetting endpoints for master service %q to %v", serviceName, e) + return c.EndpointRegistry.UpdateEndpoints(ctx, e) + } + if ipCorrect && portsCorrect { + return nil + } + if !ipCorrect { + // We *always* add our own IP address. + e.Subsets[0].Addresses = append(e.Subsets[0].Addresses, api.EndpointAddress{IP: ip.String()}) + + // Lexicographic order is retained by this step. + e.Subsets = endpoints.RepackSubsets(e.Subsets) + + // If too many IP addresses, remove the ones lexicographically after our + // own IP address. Given the requirements stated at the top of + // this function, this should cause the list of IP addresses to + // become eventually correct. + if addrs := &e.Subsets[0].Addresses; len(*addrs) > c.MasterCount { + // addrs is a pointer because we're going to mutate it. + for i, addr := range *addrs { + if addr.IP == ip.String() { + for len(*addrs) > c.MasterCount { + // wrap around if necessary. + remove := (i + 1) % len(*addrs) + *addrs = append((*addrs)[:remove], (*addrs)[remove+1:]...) + } + break + } + } + } + } + if !portsCorrect { + // Reset ports. + e.Subsets[0].Ports = endpointPorts + } + glog.Warningf("Resetting endpoints for master service %q to %v", serviceName, e) + return c.EndpointRegistry.UpdateEndpoints(ctx, e) +} + +// Determine if the endpoint is in the format ReconcileEndpoints expects. +// +// Return values: +// * formatCorrect is true if exactly one subset is found. +// * ipCorrect is true when current master's IP is found and the number +// of addresses is less than or equal to the master count. +// * portsCorrect is true when endpoint ports exactly match provided ports. +// portsCorrect is only evaluated when reconcilePorts is set to true. +func checkEndpointSubsetFormat(e *api.Endpoints, ip string, ports []api.EndpointPort, count int, reconcilePorts bool) (formatCorrect bool, ipCorrect bool, portsCorrect bool) { + if len(e.Subsets) != 1 { + return false, false, false + } + sub := &e.Subsets[0] + portsCorrect = true + if reconcilePorts { + if len(sub.Ports) != len(ports) { + portsCorrect = false + } + for i, port := range ports { + if len(sub.Ports) <= i || port != sub.Ports[i] { + portsCorrect = false + break + } + } + } + for _, addr := range sub.Addresses { + if addr.IP == ip { + ipCorrect = len(sub.Addresses) <= count + break + } + } + return true, ipCorrect, portsCorrect +} + +// * getMasterServiceUpdateIfNeeded sets service attributes for the +// given apiserver service. +// * getMasterServiceUpdateIfNeeded expects that the service object it +// manages will be managed only by getMasterServiceUpdateIfNeeded; +// therefore, to understand this, you need only understand the +// requirements and the body of this function. +// * getMasterServiceUpdateIfNeeded ensures that the correct ports are +// are set. +// +// Requirements: +// * All apiservers MUST use getMasterServiceUpdateIfNeeded and only +// getMasterServiceUpdateIfNeeded to manage service attributes +// * updateMasterService is called periodically from all apiservers. +func getMasterServiceUpdateIfNeeded(svc *api.Service, servicePorts []api.ServicePort, serviceType api.ServiceType) (s *api.Service, updated bool) { + // Determine if the service is in the format we expect + // (servicePorts are present and service type matches) + formatCorrect := checkServiceFormat(svc, servicePorts, serviceType) + if formatCorrect { + return svc, false + } + svc.Spec.Ports = servicePorts + svc.Spec.Type = serviceType + return svc, true +} + +// Determine if the service is in the correct format +// getMasterServiceUpdateIfNeeded expects (servicePorts are correct +// and service type matches). +func checkServiceFormat(s *api.Service, ports []api.ServicePort, serviceType api.ServiceType) (formatCorrect bool) { + if s.Spec.Type != serviceType { + return false + } + if len(ports) != len(s.Spec.Ports) { + return false + } + for i, port := range ports { + if port != s.Spec.Ports[i] { + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/controller_test.go new file mode 100644 index 000000000000..e79c4af69b00 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/controller_test.go @@ -0,0 +1,869 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package master + +import ( + "errors" + "net" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/registrytest" + "k8s.io/kubernetes/pkg/util/intstr" +) + +func TestReconcileEndpoints(t *testing.T) { + ns := api.NamespaceDefault + om := func(name string) api.ObjectMeta { + return api.ObjectMeta{Namespace: ns, Name: name} + } + reconcile_tests := []struct { + testName string + serviceName string + ip string + endpointPorts []api.EndpointPort + additionalMasters int + endpoints *api.EndpointsList + expectUpdate *api.Endpoints // nil means none expected + }{ + { + testName: "no existing endpoints", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + endpoints: nil, + expectUpdate: &api.Endpoints{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }, + }, + { + testName: "existing endpoints satisfy", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }}, + }, + }, + { + testName: "existing endpoints satisfy but too many", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}, {IP: "4.3.2.1"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }}, + }, + expectUpdate: &api.Endpoints{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }, + }, + { + testName: "existing endpoints satisfy but too many + extra masters", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + additionalMasters: 3, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "1.2.3.4"}, + {IP: "4.3.2.1"}, + {IP: "4.3.2.2"}, + {IP: "4.3.2.3"}, + {IP: "4.3.2.4"}, + }, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }}, + }, + expectUpdate: &api.Endpoints{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "1.2.3.4"}, + {IP: "4.3.2.2"}, + {IP: "4.3.2.3"}, + {IP: "4.3.2.4"}, + }, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }, + }, + { + testName: "existing endpoints satisfy but too many + extra masters + delete first", + serviceName: "foo", + ip: "4.3.2.4", + endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + additionalMasters: 3, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "1.2.3.4"}, + {IP: "4.3.2.1"}, + {IP: "4.3.2.2"}, + {IP: "4.3.2.3"}, + {IP: "4.3.2.4"}, + }, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }}, + }, + expectUpdate: &api.Endpoints{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "4.3.2.1"}, + {IP: "4.3.2.2"}, + {IP: "4.3.2.3"}, + {IP: "4.3.2.4"}, + }, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }, + }, + { + testName: "existing endpoints satisfy and endpoint addresses length less than master count", + serviceName: "foo", + ip: "4.3.2.2", + endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + additionalMasters: 3, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "4.3.2.1"}, + {IP: "4.3.2.2"}, + }, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }}, + }, + expectUpdate: nil, + }, + { + testName: "existing endpoints current IP missing and address length less than master count", + serviceName: "foo", + ip: "4.3.2.2", + endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + additionalMasters: 3, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "4.3.2.1"}, + }, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }}, + }, + expectUpdate: &api.Endpoints{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{ + {IP: "4.3.2.1"}, + {IP: "4.3.2.2"}, + }, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }, + }, + { + testName: "existing endpoints wrong name", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("bar"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }}, + }, + expectUpdate: &api.Endpoints{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }, + }, + { + testName: "existing endpoints wrong IP", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "4.3.2.1"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }}, + }, + expectUpdate: &api.Endpoints{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }, + }, + { + testName: "existing endpoints wrong port", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 9090, Protocol: "TCP"}}, + }}, + }}, + }, + expectUpdate: &api.Endpoints{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }, + }, + { + testName: "existing endpoints wrong protocol", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "UDP"}}, + }}, + }}, + }, + expectUpdate: &api.Endpoints{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }, + }, + { + testName: "existing endpoints wrong port name", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{{Name: "baz", Port: 8080, Protocol: "TCP"}}, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }}, + }, + expectUpdate: &api.Endpoints{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "baz", Port: 8080, Protocol: "TCP"}}, + }}, + }, + }, + { + testName: "existing endpoints extra service ports satisfy", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{ + {Name: "foo", Port: 8080, Protocol: "TCP"}, + {Name: "bar", Port: 1000, Protocol: "TCP"}, + {Name: "baz", Port: 1010, Protocol: "TCP"}, + }, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{ + {Name: "foo", Port: 8080, Protocol: "TCP"}, + {Name: "bar", Port: 1000, Protocol: "TCP"}, + {Name: "baz", Port: 1010, Protocol: "TCP"}, + }, + }}, + }}, + }, + }, + { + testName: "existing endpoints extra service ports missing port", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{ + {Name: "foo", Port: 8080, Protocol: "TCP"}, + {Name: "bar", Port: 1000, Protocol: "TCP"}, + }, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }}, + }, + expectUpdate: &api.Endpoints{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{ + {Name: "foo", Port: 8080, Protocol: "TCP"}, + {Name: "bar", Port: 1000, Protocol: "TCP"}, + }, + }}, + }, + }, + } + for _, test := range reconcile_tests { + master := Controller{MasterCount: test.additionalMasters + 1} + registry := ®istrytest.EndpointRegistry{ + Endpoints: test.endpoints, + } + master.EndpointRegistry = registry + err := master.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, true) + if err != nil { + t.Errorf("case %q: unexpected error: %v", test.testName, err) + } + if test.expectUpdate != nil { + if len(registry.Updates) != 1 { + t.Errorf("case %q: unexpected updates: %v", test.testName, registry.Updates) + } else if e, a := test.expectUpdate, ®istry.Updates[0]; !reflect.DeepEqual(e, a) { + t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a) + } + } + if test.expectUpdate == nil && len(registry.Updates) > 0 { + t.Errorf("case %q: no update expected, yet saw: %v", test.testName, registry.Updates) + } + } + + non_reconcile_tests := []struct { + testName string + serviceName string + ip string + endpointPorts []api.EndpointPort + additionalMasters int + endpoints *api.EndpointsList + expectUpdate *api.Endpoints // nil means none expected + }{ + { + testName: "existing endpoints extra service ports missing port no update", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{ + {Name: "foo", Port: 8080, Protocol: "TCP"}, + {Name: "bar", Port: 1000, Protocol: "TCP"}, + }, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }}, + }, + expectUpdate: nil, + }, + { + testName: "existing endpoints extra service ports, wrong ports, wrong IP", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{ + {Name: "foo", Port: 8080, Protocol: "TCP"}, + {Name: "bar", Port: 1000, Protocol: "TCP"}, + }, + endpoints: &api.EndpointsList{ + Items: []api.Endpoints{{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "4.3.2.1"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }}, + }, + expectUpdate: &api.Endpoints{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }, + }, + { + testName: "no existing endpoints", + serviceName: "foo", + ip: "1.2.3.4", + endpointPorts: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + endpoints: nil, + expectUpdate: &api.Endpoints{ + ObjectMeta: om("foo"), + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4"}}, + Ports: []api.EndpointPort{{Name: "foo", Port: 8080, Protocol: "TCP"}}, + }}, + }, + }, + } + for _, test := range non_reconcile_tests { + master := Controller{MasterCount: test.additionalMasters + 1} + registry := ®istrytest.EndpointRegistry{ + Endpoints: test.endpoints, + } + master.EndpointRegistry = registry + err := master.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, false) + if err != nil { + t.Errorf("case %q: unexpected error: %v", test.testName, err) + } + if test.expectUpdate != nil { + if len(registry.Updates) != 1 { + t.Errorf("case %q: unexpected updates: %v", test.testName, registry.Updates) + } else if e, a := test.expectUpdate, ®istry.Updates[0]; !reflect.DeepEqual(e, a) { + t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a) + } + } + if test.expectUpdate == nil && len(registry.Updates) > 0 { + t.Errorf("case %q: no update expected, yet saw: %v", test.testName, registry.Updates) + } + } + +} + +func TestCreateOrUpdateMasterService(t *testing.T) { + ns := api.NamespaceDefault + om := func(name string) api.ObjectMeta { + return api.ObjectMeta{Namespace: ns, Name: name} + } + + create_tests := []struct { + testName string + serviceName string + servicePorts []api.ServicePort + serviceType api.ServiceType + expectCreate *api.Service // nil means none expected + }{ + { + testName: "service does not exist", + serviceName: "foo", + servicePorts: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + serviceType: api.ServiceTypeClusterIP, + expectCreate: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + }, + } + for _, test := range create_tests { + master := Controller{MasterCount: 1} + registry := ®istrytest.ServiceRegistry{ + Err: errors.New("unable to get svc"), + } + master.ServiceRegistry = registry + master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, false) + if test.expectCreate != nil { + if len(registry.List.Items) != 1 { + t.Errorf("case %q: unexpected creations: %v", test.testName, registry.List.Items) + } else if e, a := test.expectCreate.Spec, registry.List.Items[0].Spec; !reflect.DeepEqual(e, a) { + t.Errorf("case %q: expected create:\n%#v\ngot:\n%#v\n", test.testName, e, a) + } + } + if test.expectCreate == nil && len(registry.List.Items) > 1 { + t.Errorf("case %q: no create expected, yet saw: %v", test.testName, registry.List.Items) + } + } + + reconcile_tests := []struct { + testName string + serviceName string + servicePorts []api.ServicePort + serviceType api.ServiceType + service *api.Service + expectUpdate *api.Service // nil means none expected + }{ + { + testName: "service definition wrong port", + serviceName: "foo", + servicePorts: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + serviceType: api.ServiceTypeClusterIP, + service: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8000, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + expectUpdate: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + }, + { + testName: "service definition missing port", + serviceName: "foo", + servicePorts: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + {Name: "baz", Port: 1000, Protocol: "TCP", TargetPort: intstr.FromInt(1000)}, + }, + serviceType: api.ServiceTypeClusterIP, + service: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + expectUpdate: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + {Name: "baz", Port: 1000, Protocol: "TCP", TargetPort: intstr.FromInt(1000)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + }, + { + testName: "service definition incorrect port", + serviceName: "foo", + servicePorts: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + serviceType: api.ServiceTypeClusterIP, + service: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "bar", Port: 1000, Protocol: "UDP", TargetPort: intstr.FromInt(1000)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + expectUpdate: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + }, + { + testName: "service definition incorrect port name", + serviceName: "foo", + servicePorts: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + serviceType: api.ServiceTypeClusterIP, + service: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 1000, Protocol: "UDP", TargetPort: intstr.FromInt(1000)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + expectUpdate: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + }, + { + testName: "service definition incorrect target port", + serviceName: "foo", + servicePorts: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + serviceType: api.ServiceTypeClusterIP, + service: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(1000)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + expectUpdate: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + }, + { + testName: "service definition incorrect protocol", + serviceName: "foo", + servicePorts: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + serviceType: api.ServiceTypeClusterIP, + service: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "UDP", TargetPort: intstr.FromInt(8080)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + expectUpdate: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + }, + { + testName: "service definition has incorrect type", + serviceName: "foo", + servicePorts: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + serviceType: api.ServiceTypeClusterIP, + service: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeNodePort, + }, + }, + expectUpdate: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + }, + { + testName: "service definition satisfies", + serviceName: "foo", + servicePorts: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + serviceType: api.ServiceTypeClusterIP, + service: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + expectUpdate: nil, + }, + } + for _, test := range reconcile_tests { + master := Controller{MasterCount: 1} + registry := ®istrytest.ServiceRegistry{ + Service: test.service, + } + master.ServiceRegistry = registry + err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, true) + if err != nil { + t.Errorf("case %q: unexpected error: %v", test.testName, err) + } + if test.expectUpdate != nil { + if len(registry.Updates) != 1 { + t.Errorf("case %q: unexpected updates: %v", test.testName, registry.Updates) + } else if e, a := test.expectUpdate, ®istry.Updates[0]; !reflect.DeepEqual(e, a) { + t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a) + } + } + if test.expectUpdate == nil && len(registry.Updates) > 0 { + t.Errorf("case %q: no update expected, yet saw: %v", test.testName, registry.Updates) + } + } + + non_reconcile_tests := []struct { + testName string + serviceName string + servicePorts []api.ServicePort + serviceType api.ServiceType + service *api.Service + expectUpdate *api.Service // nil means none expected + }{ + { + testName: "service definition wrong port, no expected update", + serviceName: "foo", + servicePorts: []api.ServicePort{ + {Name: "foo", Port: 8080, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, + }, + serviceType: api.ServiceTypeClusterIP, + service: &api.Service{ + ObjectMeta: om("foo"), + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + {Name: "foo", Port: 1000, Protocol: "TCP", TargetPort: intstr.FromInt(1000)}, + }, + Selector: nil, + ClusterIP: "1.2.3.4", + SessionAffinity: api.ServiceAffinityClientIP, + Type: api.ServiceTypeClusterIP, + }, + }, + expectUpdate: nil, + }, + } + for _, test := range non_reconcile_tests { + master := Controller{MasterCount: 1} + registry := ®istrytest.ServiceRegistry{ + Service: test.service, + } + master.ServiceRegistry = registry + err := master.CreateOrUpdateMasterServiceIfNeeded(test.serviceName, net.ParseIP("1.2.3.4"), test.servicePorts, test.serviceType, false) + if err != nil { + t.Errorf("case %q: unexpected error: %v", test.testName, err) + } + if test.expectUpdate != nil { + if len(registry.Updates) != 1 { + t.Errorf("case %q: unexpected updates: %v", test.testName, registry.Updates) + } else if e, a := test.expectUpdate, ®istry.Updates[0]; !reflect.DeepEqual(e, a) { + t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a) + } + } + if test.expectUpdate == nil && len(registry.Updates) > 0 { + t.Errorf("case %q: no update expected, yet saw: %v", test.testName, registry.Updates) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/doc.go new file mode 100644 index 000000000000..cc21977b8009 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package master contains code for setting up and running a Kubernetes +// cluster master. +package master diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/import_known_versions.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/import_known_versions.go new file mode 100644 index 000000000000..f7ad207fae30 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/import_known_versions.go @@ -0,0 +1,39 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package master + +// These imports are the API groups the API server will support. +import ( + "fmt" + + _ "k8s.io/kubernetes/pkg/api/install" + "k8s.io/kubernetes/pkg/apimachinery/registered" + _ "k8s.io/kubernetes/pkg/apis/apps/install" + _ "k8s.io/kubernetes/pkg/apis/authorization/install" + _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" + _ "k8s.io/kubernetes/pkg/apis/batch/install" + _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" + _ "k8s.io/kubernetes/pkg/apis/extensions/install" + _ "k8s.io/kubernetes/pkg/apis/policy/install" + _ "k8s.io/kubernetes/pkg/apis/rbac/install" +) + +func init() { + if missingVersions := registered.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { + panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/master.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/master.go new file mode 100644 index 000000000000..736e53603286 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/master.go @@ -0,0 +1,1062 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package master + +import ( + "fmt" + "io" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/api/unversioned" + apiv1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/apps" + appsapi "k8s.io/kubernetes/pkg/apis/apps/v1alpha1" + "k8s.io/kubernetes/pkg/apis/autoscaling" + autoscalingapiv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" + "k8s.io/kubernetes/pkg/apis/batch" + batchapiv1 "k8s.io/kubernetes/pkg/apis/batch/v1" + batchapiv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + "k8s.io/kubernetes/pkg/apis/extensions" + extensionsapiv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/apis/policy" + policyapiv1alpha1 "k8s.io/kubernetes/pkg/apis/policy/v1alpha1" + "k8s.io/kubernetes/pkg/apis/rbac" + rbacapi "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" + rbacvalidation "k8s.io/kubernetes/pkg/apis/rbac/validation" + "k8s.io/kubernetes/pkg/apiserver" + apiservermetrics "k8s.io/kubernetes/pkg/apiserver/metrics" + "k8s.io/kubernetes/pkg/genericapiserver" + "k8s.io/kubernetes/pkg/healthz" + kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" + "k8s.io/kubernetes/pkg/master/ports" + "k8s.io/kubernetes/pkg/registry/clusterrole" + clusterroleetcd "k8s.io/kubernetes/pkg/registry/clusterrole/etcd" + clusterrolepolicybased "k8s.io/kubernetes/pkg/registry/clusterrole/policybased" + "k8s.io/kubernetes/pkg/registry/clusterrolebinding" + clusterrolebindingetcd "k8s.io/kubernetes/pkg/registry/clusterrolebinding/etcd" + clusterrolebindingpolicybased "k8s.io/kubernetes/pkg/registry/clusterrolebinding/policybased" + "k8s.io/kubernetes/pkg/registry/componentstatus" + configmapetcd "k8s.io/kubernetes/pkg/registry/configmap/etcd" + controlleretcd "k8s.io/kubernetes/pkg/registry/controller/etcd" + deploymentetcd "k8s.io/kubernetes/pkg/registry/deployment/etcd" + "k8s.io/kubernetes/pkg/registry/endpoint" + endpointsetcd "k8s.io/kubernetes/pkg/registry/endpoint/etcd" + eventetcd "k8s.io/kubernetes/pkg/registry/event/etcd" + expcontrolleretcd "k8s.io/kubernetes/pkg/registry/experimental/controller/etcd" + "k8s.io/kubernetes/pkg/registry/generic" + ingressetcd "k8s.io/kubernetes/pkg/registry/ingress/etcd" + jobetcd "k8s.io/kubernetes/pkg/registry/job/etcd" + limitrangeetcd "k8s.io/kubernetes/pkg/registry/limitrange/etcd" + "k8s.io/kubernetes/pkg/registry/namespace" + namespaceetcd "k8s.io/kubernetes/pkg/registry/namespace/etcd" + networkpolicyetcd "k8s.io/kubernetes/pkg/registry/networkpolicy/etcd" + "k8s.io/kubernetes/pkg/registry/node" + nodeetcd "k8s.io/kubernetes/pkg/registry/node/etcd" + pvetcd "k8s.io/kubernetes/pkg/registry/persistentvolume/etcd" + pvcetcd "k8s.io/kubernetes/pkg/registry/persistentvolumeclaim/etcd" + petsetetcd "k8s.io/kubernetes/pkg/registry/petset/etcd" + podetcd "k8s.io/kubernetes/pkg/registry/pod/etcd" + poddisruptionbudgetetcd "k8s.io/kubernetes/pkg/registry/poddisruptionbudget/etcd" + pspetcd "k8s.io/kubernetes/pkg/registry/podsecuritypolicy/etcd" + podtemplateetcd "k8s.io/kubernetes/pkg/registry/podtemplate/etcd" + replicasetetcd "k8s.io/kubernetes/pkg/registry/replicaset/etcd" + resourcequotaetcd "k8s.io/kubernetes/pkg/registry/resourcequota/etcd" + "k8s.io/kubernetes/pkg/registry/role" + roleetcd "k8s.io/kubernetes/pkg/registry/role/etcd" + rolepolicybased "k8s.io/kubernetes/pkg/registry/role/policybased" + "k8s.io/kubernetes/pkg/registry/rolebinding" + rolebindingetcd "k8s.io/kubernetes/pkg/registry/rolebinding/etcd" + rolebindingpolicybased "k8s.io/kubernetes/pkg/registry/rolebinding/policybased" + secretetcd "k8s.io/kubernetes/pkg/registry/secret/etcd" + "k8s.io/kubernetes/pkg/registry/service" + etcdallocator "k8s.io/kubernetes/pkg/registry/service/allocator/etcd" + serviceetcd "k8s.io/kubernetes/pkg/registry/service/etcd" + ipallocator "k8s.io/kubernetes/pkg/registry/service/ipallocator" + serviceaccountetcd "k8s.io/kubernetes/pkg/registry/serviceaccount/etcd" + thirdpartyresourceetcd "k8s.io/kubernetes/pkg/registry/thirdpartyresource/etcd" + "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata" + thirdpartyresourcedataetcd "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/etcd" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + etcdmetrics "k8s.io/kubernetes/pkg/storage/etcd/metrics" + etcdutil "k8s.io/kubernetes/pkg/storage/etcd/util" + "k8s.io/kubernetes/pkg/util/wait" + + daemonetcd "k8s.io/kubernetes/pkg/registry/daemonset/etcd" + horizontalpodautoscaleretcd "k8s.io/kubernetes/pkg/registry/horizontalpodautoscaler/etcd" + + "github.com/golang/glog" + "github.com/prometheus/client_golang/prometheus" + "k8s.io/kubernetes/pkg/registry/service/allocator" + "k8s.io/kubernetes/pkg/registry/service/portallocator" +) + +type Config struct { + *genericapiserver.Config + + EnableCoreControllers bool + DeleteCollectionWorkers int + EventTTL time.Duration + KubeletClient kubeletclient.KubeletClient + // Used to start and monitor tunneling + Tunneler genericapiserver.Tunneler + + disableThirdPartyControllerForTesting bool +} + +// Master contains state for a Kubernetes cluster master/api server. +type Master struct { + *genericapiserver.GenericAPIServer + + // Map of v1 resources to their REST storages. + v1ResourcesStorage map[string]rest.Storage + + enableCoreControllers bool + deleteCollectionWorkers int + // registries are internal client APIs for accessing the storage layer + // TODO: define the internal typed interface in a way that clients can + // also be replaced + nodeRegistry node.Registry + namespaceRegistry namespace.Registry + serviceRegistry service.Registry + endpointRegistry endpoint.Registry + serviceClusterIPAllocator service.RangeRegistry + serviceNodePortAllocator service.RangeRegistry + + // storage for third party objects + thirdPartyStorage storage.Interface + // map from api path to a tuple of (storage for the objects, APIGroup) + thirdPartyResources map[string]thirdPartyEntry + // protects the map + thirdPartyResourcesLock sync.RWMutex + // Useful for reliable testing. Shouldn't be used otherwise. + disableThirdPartyControllerForTesting bool + + // Used to start and monitor tunneling + tunneler genericapiserver.Tunneler +} + +// thirdPartyEntry combines objects storage and API group into one struct +// for easy lookup. +type thirdPartyEntry struct { + storage *thirdpartyresourcedataetcd.REST + group unversioned.APIGroup +} + +// New returns a new instance of Master from the given config. +// Certain config fields will be set to a default value if unset. +// Certain config fields must be specified, including: +// KubeletClient +func New(c *Config) (*Master, error) { + if c.KubeletClient == nil { + return nil, fmt.Errorf("Master.New() called with config.KubeletClient == nil") + } + + s, err := genericapiserver.New(c.Config) + if err != nil { + return nil, err + } + + m := &Master{ + GenericAPIServer: s, + enableCoreControllers: c.EnableCoreControllers, + deleteCollectionWorkers: c.DeleteCollectionWorkers, + tunneler: c.Tunneler, + + disableThirdPartyControllerForTesting: c.disableThirdPartyControllerForTesting, + } + m.InstallAPIs(c) + + // TODO: Attempt clean shutdown? + if m.enableCoreControllers { + m.NewBootstrapController().Start() + } + + return m, nil +} + +var defaultMetricsHandler = prometheus.Handler().ServeHTTP + +// MetricsWithReset is a handler that resets metrics when DELETE is passed to the endpoint. +func MetricsWithReset(w http.ResponseWriter, req *http.Request) { + if req.Method == "DELETE" { + apiservermetrics.Reset() + etcdmetrics.Reset() + io.WriteString(w, "metrics reset\n") + return + } + defaultMetricsHandler(w, req) +} + +func (m *Master) InstallAPIs(c *Config) { + apiGroupsInfo := []genericapiserver.APIGroupInfo{} + + // Install v1 unless disabled. + if c.APIResourceConfigSource.AnyResourcesForVersionEnabled(apiv1.SchemeGroupVersion) { + // Install v1 API. + m.initV1ResourcesStorage(c) + apiGroupInfo := genericapiserver.APIGroupInfo{ + GroupMeta: *registered.GroupOrDie(api.GroupName), + VersionedResourcesStorageMap: map[string]map[string]rest.Storage{ + "v1": m.v1ResourcesStorage, + }, + IsLegacyGroup: true, + Scheme: api.Scheme, + ParameterCodec: api.ParameterCodec, + NegotiatedSerializer: api.Codecs, + } + if autoscalingGroupVersion := (unversioned.GroupVersion{Group: "autoscaling", Version: "v1"}); registered.IsEnabledVersion(autoscalingGroupVersion) { + apiGroupInfo.SubresourceGroupVersionKind = map[string]unversioned.GroupVersionKind{ + "replicationcontrollers/scale": autoscalingGroupVersion.WithKind("Scale"), + } + } + apiGroupsInfo = append(apiGroupsInfo, apiGroupInfo) + } + + // Run the tunneler. + healthzChecks := []healthz.HealthzChecker{} + if m.tunneler != nil { + m.tunneler.Run(m.getNodeAddresses) + healthzChecks = append(healthzChecks, healthz.NamedCheck("SSH Tunnel Check", m.IsTunnelSyncHealthy)) + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "apiserver_proxy_tunnel_sync_latency_secs", + Help: "The time since the last successful synchronization of the SSH tunnels for proxy requests.", + }, func() float64 { return float64(m.tunneler.SecondsSinceSync()) }) + } + healthz.InstallHandler(m.MuxHelper, healthzChecks...) + + if c.EnableProfiling { + m.MuxHelper.HandleFunc("/metrics", MetricsWithReset) + } else { + m.MuxHelper.HandleFunc("/metrics", defaultMetricsHandler) + } + + // allGroups records all supported groups at /apis + allGroups := []unversioned.APIGroup{} + + // Install extensions unless disabled. + if c.APIResourceConfigSource.AnyResourcesForVersionEnabled(extensionsapiv1beta1.SchemeGroupVersion) { + var err error + m.thirdPartyStorage, err = c.StorageFactory.New(extensions.Resource("thirdpartyresources")) + if err != nil { + glog.Fatalf("Error getting third party storage: %v", err) + } + m.thirdPartyResources = map[string]thirdPartyEntry{} + + extensionResources := m.getExtensionResources(c) + extensionsGroupMeta := registered.GroupOrDie(extensions.GroupName) + + apiGroupInfo := genericapiserver.APIGroupInfo{ + GroupMeta: *extensionsGroupMeta, + VersionedResourcesStorageMap: map[string]map[string]rest.Storage{ + "v1beta1": extensionResources, + }, + OptionsExternalVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion, + Scheme: api.Scheme, + ParameterCodec: api.ParameterCodec, + NegotiatedSerializer: api.Codecs, + } + apiGroupsInfo = append(apiGroupsInfo, apiGroupInfo) + + extensionsGVForDiscovery := unversioned.GroupVersionForDiscovery{ + GroupVersion: extensionsGroupMeta.GroupVersion.String(), + Version: extensionsGroupMeta.GroupVersion.Version, + } + group := unversioned.APIGroup{ + Name: extensionsGroupMeta.GroupVersion.Group, + Versions: []unversioned.GroupVersionForDiscovery{extensionsGVForDiscovery}, + PreferredVersion: extensionsGVForDiscovery, + } + allGroups = append(allGroups, group) + } + + // Install autoscaling unless disabled. + if c.APIResourceConfigSource.AnyResourcesForVersionEnabled(autoscalingapiv1.SchemeGroupVersion) { + autoscalingResources := m.getAutoscalingResources(c) + autoscalingGroupMeta := registered.GroupOrDie(autoscaling.GroupName) + + // Hard code preferred group version to autoscaling/v1 + autoscalingGroupMeta.GroupVersion = autoscalingapiv1.SchemeGroupVersion + + apiGroupInfo := genericapiserver.APIGroupInfo{ + GroupMeta: *autoscalingGroupMeta, + VersionedResourcesStorageMap: map[string]map[string]rest.Storage{ + "v1": autoscalingResources, + }, + OptionsExternalVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion, + Scheme: api.Scheme, + ParameterCodec: api.ParameterCodec, + NegotiatedSerializer: api.Codecs, + } + apiGroupsInfo = append(apiGroupsInfo, apiGroupInfo) + + autoscalingGVForDiscovery := unversioned.GroupVersionForDiscovery{ + GroupVersion: autoscalingGroupMeta.GroupVersion.String(), + Version: autoscalingGroupMeta.GroupVersion.Version, + } + group := unversioned.APIGroup{ + Name: autoscalingGroupMeta.GroupVersion.Group, + Versions: []unversioned.GroupVersionForDiscovery{autoscalingGVForDiscovery}, + PreferredVersion: autoscalingGVForDiscovery, + } + allGroups = append(allGroups, group) + } + + // Install batch unless disabled. + if c.APIResourceConfigSource.AnyResourcesForVersionEnabled(batchapiv1.SchemeGroupVersion) || + c.APIResourceConfigSource.AnyResourcesForVersionEnabled(batchapiv2alpha1.SchemeGroupVersion) { + batchv1Resources := m.getBatchResources(c, batchapiv1.SchemeGroupVersion) + batchGroupMeta := registered.GroupOrDie(batch.GroupName) + + // Hard code preferred group version to batch/v1 + batchGroupMeta.GroupVersion = batchapiv1.SchemeGroupVersion + + apiGroupInfo := genericapiserver.APIGroupInfo{ + GroupMeta: *batchGroupMeta, + VersionedResourcesStorageMap: map[string]map[string]rest.Storage{ + "v1": batchv1Resources, + }, + OptionsExternalVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion, + Scheme: api.Scheme, + ParameterCodec: api.ParameterCodec, + NegotiatedSerializer: api.Codecs, + } + if c.APIResourceConfigSource.AnyResourcesForVersionEnabled(batchapiv2alpha1.SchemeGroupVersion) { + batchv2alpha1Resources := m.getBatchResources(c, batchapiv2alpha1.SchemeGroupVersion) + apiGroupInfo.VersionedResourcesStorageMap["v2alpha1"] = batchv2alpha1Resources + } + + apiGroupsInfo = append(apiGroupsInfo, apiGroupInfo) + + batchGVForDiscovery := unversioned.GroupVersionForDiscovery{ + GroupVersion: batchGroupMeta.GroupVersion.String(), + Version: batchGroupMeta.GroupVersion.Version, + } + group := unversioned.APIGroup{ + Name: batchGroupMeta.GroupVersion.Group, + Versions: []unversioned.GroupVersionForDiscovery{batchGVForDiscovery}, + PreferredVersion: batchGVForDiscovery, + } + allGroups = append(allGroups, group) + } + + if c.APIResourceConfigSource.AnyResourcesForVersionEnabled(policyapiv1alpha1.SchemeGroupVersion) { + policyResources := m.getPolicyResources(c) + policyGroupMeta := registered.GroupOrDie(policy.GroupName) + + // Hard code preferred group version to policy/v1alpha1 + policyGroupMeta.GroupVersion = policyapiv1alpha1.SchemeGroupVersion + + apiGroupInfo := genericapiserver.APIGroupInfo{ + GroupMeta: *policyGroupMeta, + VersionedResourcesStorageMap: map[string]map[string]rest.Storage{ + "v1alpha1": policyResources, + }, + OptionsExternalVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion, + Scheme: api.Scheme, + ParameterCodec: api.ParameterCodec, + NegotiatedSerializer: api.Codecs, + } + apiGroupsInfo = append(apiGroupsInfo, apiGroupInfo) + + policyGVForDiscovery := unversioned.GroupVersionForDiscovery{ + GroupVersion: policyGroupMeta.GroupVersion.String(), + Version: policyGroupMeta.GroupVersion.Version, + } + group := unversioned.APIGroup{ + Name: policyGroupMeta.GroupVersion.Group, + Versions: []unversioned.GroupVersionForDiscovery{policyGVForDiscovery}, + PreferredVersion: policyGVForDiscovery, + } + allGroups = append(allGroups, group) + + } + + if c.APIResourceConfigSource.AnyResourcesForVersionEnabled(appsapi.SchemeGroupVersion) { + appsResources := m.getAppsResources(c) + appsGroupMeta := registered.GroupOrDie(apps.GroupName) + + // Hard code preferred group version to apps/v1alpha1 + appsGroupMeta.GroupVersion = appsapi.SchemeGroupVersion + + apiGroupInfo := genericapiserver.APIGroupInfo{ + GroupMeta: *appsGroupMeta, + VersionedResourcesStorageMap: map[string]map[string]rest.Storage{ + "v1alpha1": appsResources, + }, + OptionsExternalVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion, + Scheme: api.Scheme, + ParameterCodec: api.ParameterCodec, + NegotiatedSerializer: api.Codecs, + } + apiGroupsInfo = append(apiGroupsInfo, apiGroupInfo) + + appsGVForDiscovery := unversioned.GroupVersionForDiscovery{ + GroupVersion: appsGroupMeta.GroupVersion.String(), + Version: appsGroupMeta.GroupVersion.Version, + } + group := unversioned.APIGroup{ + Name: appsGroupMeta.GroupVersion.Group, + Versions: []unversioned.GroupVersionForDiscovery{appsGVForDiscovery}, + PreferredVersion: appsGVForDiscovery, + } + allGroups = append(allGroups, group) + + } + + if c.APIResourceConfigSource.AnyResourcesForVersionEnabled(rbacapi.SchemeGroupVersion) { + rbacResources := m.getRBACResources(c) + rbacGroupMeta := registered.GroupOrDie(rbac.GroupName) + + // Hard code preferred group version to rbac/v1alpha1 + rbacGroupMeta.GroupVersion = rbacapi.SchemeGroupVersion + + apiGroupInfo := genericapiserver.APIGroupInfo{ + GroupMeta: *rbacGroupMeta, + VersionedResourcesStorageMap: map[string]map[string]rest.Storage{ + "v1alpha1": rbacResources, + }, + OptionsExternalVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion, + Scheme: api.Scheme, + ParameterCodec: api.ParameterCodec, + NegotiatedSerializer: api.Codecs, + } + apiGroupsInfo = append(apiGroupsInfo, apiGroupInfo) + + rbacGVForDiscovery := unversioned.GroupVersionForDiscovery{ + GroupVersion: rbacGroupMeta.GroupVersion.String(), + Version: rbacGroupMeta.GroupVersion.Version, + } + group := unversioned.APIGroup{ + Name: rbacGroupMeta.GroupVersion.Group, + Versions: []unversioned.GroupVersionForDiscovery{rbacGVForDiscovery}, + PreferredVersion: rbacGVForDiscovery, + } + allGroups = append(allGroups, group) + + } + + if err := m.InstallAPIGroups(apiGroupsInfo); err != nil { + glog.Fatalf("Error in registering group versions: %v", err) + } +} + +func (m *Master) initV1ResourcesStorage(c *Config) { + restOptions := func(resource string) generic.RESTOptions { + return m.GetRESTOptionsOrDie(c, api.Resource(resource)) + } + + podTemplateStorage := podtemplateetcd.NewREST(restOptions("podTemplates")) + + eventStorage := eventetcd.NewREST(restOptions("events"), uint64(c.EventTTL.Seconds())) + limitRangeStorage := limitrangeetcd.NewREST(restOptions("limitRanges")) + + resourceQuotaStorage, resourceQuotaStatusStorage := resourcequotaetcd.NewREST(restOptions("resourceQuotas")) + secretStorage := secretetcd.NewREST(restOptions("secrets")) + serviceAccountStorage := serviceaccountetcd.NewREST(restOptions("serviceAccounts")) + persistentVolumeStorage, persistentVolumeStatusStorage := pvetcd.NewREST(restOptions("persistentVolumes")) + persistentVolumeClaimStorage, persistentVolumeClaimStatusStorage := pvcetcd.NewREST(restOptions("persistentVolumeClaims")) + configMapStorage := configmapetcd.NewREST(restOptions("configMaps")) + + namespaceStorage, namespaceStatusStorage, namespaceFinalizeStorage := namespaceetcd.NewREST(restOptions("namespaces")) + m.namespaceRegistry = namespace.NewRegistry(namespaceStorage) + + endpointsStorage := endpointsetcd.NewREST(restOptions("endpoints")) + m.endpointRegistry = endpoint.NewRegistry(endpointsStorage) + + nodeStorage := nodeetcd.NewStorage(restOptions("nodes"), c.KubeletClient, m.ProxyTransport) + m.nodeRegistry = node.NewRegistry(nodeStorage.Node) + + podStorage := podetcd.NewStorage( + restOptions("pods"), + kubeletclient.ConnectionInfoGetter(nodeStorage.Node), + m.ProxyTransport, + ) + + serviceRESTStorage, serviceStatusStorage := serviceetcd.NewREST(restOptions("services")) + m.serviceRegistry = service.NewRegistry(serviceRESTStorage) + + var serviceClusterIPRegistry service.RangeRegistry + serviceClusterIPRange := m.ServiceClusterIPRange + if serviceClusterIPRange == nil { + glog.Fatalf("service clusterIPRange is nil") + return + } + + serviceStorage, err := c.StorageFactory.New(api.Resource("services")) + if err != nil { + glog.Fatal(err.Error()) + } + + serviceClusterIPAllocator := ipallocator.NewAllocatorCIDRRange(serviceClusterIPRange, func(max int, rangeSpec string) allocator.Interface { + mem := allocator.NewAllocationMap(max, rangeSpec) + // TODO etcdallocator package to return a storage interface via the storageFactory + etcd := etcdallocator.NewEtcd(mem, "/ranges/serviceips", api.Resource("serviceipallocations"), serviceStorage) + serviceClusterIPRegistry = etcd + return etcd + }) + m.serviceClusterIPAllocator = serviceClusterIPRegistry + + var serviceNodePortRegistry service.RangeRegistry + serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.ServiceNodePortRange, func(max int, rangeSpec string) allocator.Interface { + mem := allocator.NewAllocationMap(max, rangeSpec) + // TODO etcdallocator package to return a storage interface via the storageFactory + etcd := etcdallocator.NewEtcd(mem, "/ranges/servicenodeports", api.Resource("servicenodeportallocations"), serviceStorage) + serviceNodePortRegistry = etcd + return etcd + }) + m.serviceNodePortAllocator = serviceNodePortRegistry + + controllerStorage := controlleretcd.NewStorage(restOptions("replicationControllers")) + + serviceRest := service.NewStorage(m.serviceRegistry, m.endpointRegistry, serviceClusterIPAllocator, serviceNodePortAllocator, m.ProxyTransport) + + // TODO: Factor out the core API registration + m.v1ResourcesStorage = map[string]rest.Storage{ + "pods": podStorage.Pod, + "pods/attach": podStorage.Attach, + "pods/status": podStorage.Status, + "pods/log": podStorage.Log, + "pods/exec": podStorage.Exec, + "pods/portforward": podStorage.PortForward, + "pods/proxy": podStorage.Proxy, + "pods/binding": podStorage.Binding, + "bindings": podStorage.Binding, + + "podTemplates": podTemplateStorage, + + "replicationControllers": controllerStorage.Controller, + "replicationControllers/status": controllerStorage.Status, + + "services": serviceRest.Service, + "services/proxy": serviceRest.Proxy, + "services/status": serviceStatusStorage, + + "endpoints": endpointsStorage, + + "nodes": nodeStorage.Node, + "nodes/status": nodeStorage.Status, + "nodes/proxy": nodeStorage.Proxy, + + "events": eventStorage, + + "limitRanges": limitRangeStorage, + "resourceQuotas": resourceQuotaStorage, + "resourceQuotas/status": resourceQuotaStatusStorage, + "namespaces": namespaceStorage, + "namespaces/status": namespaceStatusStorage, + "namespaces/finalize": namespaceFinalizeStorage, + "secrets": secretStorage, + "serviceAccounts": serviceAccountStorage, + "persistentVolumes": persistentVolumeStorage, + "persistentVolumes/status": persistentVolumeStatusStorage, + "persistentVolumeClaims": persistentVolumeClaimStorage, + "persistentVolumeClaims/status": persistentVolumeClaimStatusStorage, + "configMaps": configMapStorage, + + "componentStatuses": componentstatus.NewStorage(func() map[string]apiserver.Server { return m.getServersToValidate(c) }), + } + if registered.IsEnabledVersion(unversioned.GroupVersion{Group: "autoscaling", Version: "v1"}) { + m.v1ResourcesStorage["replicationControllers/scale"] = controllerStorage.Scale + } +} + +// NewBootstrapController returns a controller for watching the core capabilities of the master. +func (m *Master) NewBootstrapController() *Controller { + return &Controller{ + NamespaceRegistry: m.namespaceRegistry, + ServiceRegistry: m.serviceRegistry, + MasterCount: m.MasterCount, + + EndpointRegistry: m.endpointRegistry, + EndpointInterval: 10 * time.Second, + + SystemNamespaces: []string{api.NamespaceSystem}, + SystemNamespacesInterval: 1 * time.Minute, + + ServiceClusterIPRegistry: m.serviceClusterIPAllocator, + ServiceClusterIPRange: m.ServiceClusterIPRange, + ServiceClusterIPInterval: 3 * time.Minute, + + ServiceNodePortRegistry: m.serviceNodePortAllocator, + ServiceNodePortRange: m.ServiceNodePortRange, + ServiceNodePortInterval: 3 * time.Minute, + + PublicIP: m.ClusterIP, + + ServiceIP: m.ServiceReadWriteIP, + ServicePort: m.ServiceReadWritePort, + ExtraServicePorts: m.ExtraServicePorts, + ExtraEndpointPorts: m.ExtraEndpointPorts, + PublicServicePort: m.PublicReadWritePort, + KubernetesServiceNodePort: m.KubernetesServiceNodePort, + } +} + +func (m *Master) getServersToValidate(c *Config) map[string]apiserver.Server { + serversToValidate := map[string]apiserver.Server{ + "controller-manager": {Addr: "127.0.0.1", Port: ports.ControllerManagerPort, Path: "/healthz"}, + "scheduler": {Addr: "127.0.0.1", Port: ports.SchedulerPort, Path: "/healthz"}, + } + + for ix, machine := range c.StorageFactory.Backends() { + etcdUrl, err := url.Parse(machine) + if err != nil { + glog.Errorf("Failed to parse etcd url for validation: %v", err) + continue + } + var port int + var addr string + if strings.Contains(etcdUrl.Host, ":") { + var portString string + addr, portString, err = net.SplitHostPort(etcdUrl.Host) + if err != nil { + glog.Errorf("Failed to split host/port: %s (%v)", etcdUrl.Host, err) + continue + } + port, _ = strconv.Atoi(portString) + } else { + addr = etcdUrl.Host + port = 4001 + } + // TODO: etcd health checking should be abstracted in the storage tier + serversToValidate[fmt.Sprintf("etcd-%d", ix)] = apiserver.Server{ + Addr: addr, + EnableHTTPS: etcdUrl.Scheme == "https", + Port: port, + Path: "/health", + Validate: etcdutil.EtcdHealthCheck, + } + } + return serversToValidate +} + +// HasThirdPartyResource returns true if a particular third party resource currently installed. +func (m *Master) HasThirdPartyResource(rsrc *extensions.ThirdPartyResource) (bool, error) { + _, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(rsrc) + if err != nil { + return false, err + } + path := makeThirdPartyPath(group) + services := m.HandlerContainer.RegisteredWebServices() + for ix := range services { + if services[ix].RootPath() == path { + return true, nil + } + } + return false, nil +} + +func (m *Master) removeThirdPartyStorage(path string) error { + m.thirdPartyResourcesLock.Lock() + defer m.thirdPartyResourcesLock.Unlock() + storage, found := m.thirdPartyResources[path] + if found { + if err := m.removeAllThirdPartyResources(storage.storage); err != nil { + return err + } + delete(m.thirdPartyResources, path) + m.RemoveAPIGroupForDiscovery(getThirdPartyGroupName(path)) + } + return nil +} + +// RemoveThirdPartyResource removes all resources matching `path`. Also deletes any stored data +func (m *Master) RemoveThirdPartyResource(path string) error { + if err := m.removeThirdPartyStorage(path); err != nil { + return err + } + + services := m.HandlerContainer.RegisteredWebServices() + for ix := range services { + root := services[ix].RootPath() + if root == path || strings.HasPrefix(root, path+"/") { + m.HandlerContainer.Remove(services[ix]) + } + } + return nil +} + +func (m *Master) removeAllThirdPartyResources(registry *thirdpartyresourcedataetcd.REST) error { + ctx := api.NewDefaultContext() + existingData, err := registry.List(ctx, nil) + if err != nil { + return err + } + list, ok := existingData.(*extensions.ThirdPartyResourceDataList) + if !ok { + return fmt.Errorf("expected a *ThirdPartyResourceDataList, got %#v", list) + } + for ix := range list.Items { + item := &list.Items[ix] + if _, err := registry.Delete(ctx, item.Name, nil); err != nil { + return err + } + } + return nil +} + +// ListThirdPartyResources lists all currently installed third party resources +func (m *Master) ListThirdPartyResources() []string { + m.thirdPartyResourcesLock.RLock() + defer m.thirdPartyResourcesLock.RUnlock() + result := []string{} + for key := range m.thirdPartyResources { + result = append(result, key) + } + return result +} + +func (m *Master) addThirdPartyResourceStorage(path string, storage *thirdpartyresourcedataetcd.REST, apiGroup unversioned.APIGroup) { + m.thirdPartyResourcesLock.Lock() + defer m.thirdPartyResourcesLock.Unlock() + m.thirdPartyResources[path] = thirdPartyEntry{storage, apiGroup} + m.AddAPIGroupForDiscovery(apiGroup) +} + +// InstallThirdPartyResource installs a third party resource specified by 'rsrc'. When a resource is +// installed a corresponding RESTful resource is added as a valid path in the web service provided by +// the master. +// +// For example, if you install a resource ThirdPartyResource{ Name: "foo.company.com", Versions: {"v1"} } +// then the following RESTful resource is created on the server: +// http:///apis/company.com/v1/foos/... +func (m *Master) InstallThirdPartyResource(rsrc *extensions.ThirdPartyResource) error { + kind, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(rsrc) + if err != nil { + return err + } + thirdparty := m.thirdpartyapi(group, kind, rsrc.Versions[0].Name) + if err := thirdparty.InstallREST(m.HandlerContainer); err != nil { + glog.Fatalf("Unable to setup thirdparty api: %v", err) + } + path := makeThirdPartyPath(group) + groupVersion := unversioned.GroupVersionForDiscovery{ + GroupVersion: group + "/" + rsrc.Versions[0].Name, + Version: rsrc.Versions[0].Name, + } + apiGroup := unversioned.APIGroup{ + Name: group, + Versions: []unversioned.GroupVersionForDiscovery{groupVersion}, + PreferredVersion: groupVersion, + } + apiserver.AddGroupWebService(api.Codecs, m.HandlerContainer, path, apiGroup) + m.addThirdPartyResourceStorage(path, thirdparty.Storage[strings.ToLower(kind)+"s"].(*thirdpartyresourcedataetcd.REST), apiGroup) + apiserver.InstallServiceErrorHandler(api.Codecs, m.HandlerContainer, m.NewRequestInfoResolver(), []string{thirdparty.GroupVersion.String()}) + return nil +} + +func (m *Master) thirdpartyapi(group, kind, version string) *apiserver.APIGroupVersion { + resourceStorage := thirdpartyresourcedataetcd.NewREST( + generic.RESTOptions{ + Storage: m.thirdPartyStorage, + Decorator: generic.UndecoratedStorage, + DeleteCollectionWorkers: m.deleteCollectionWorkers, + }, + group, + kind, + ) + + apiRoot := makeThirdPartyPath("") + + storage := map[string]rest.Storage{ + strings.ToLower(kind) + "s": resourceStorage, + } + + optionsExternalVersion := registered.GroupOrDie(api.GroupName).GroupVersion + internalVersion := unversioned.GroupVersion{Group: group, Version: runtime.APIVersionInternal} + externalVersion := unversioned.GroupVersion{Group: group, Version: version} + + return &apiserver.APIGroupVersion{ + Root: apiRoot, + GroupVersion: externalVersion, + RequestInfoResolver: m.NewRequestInfoResolver(), + + Creater: thirdpartyresourcedata.NewObjectCreator(group, version, api.Scheme), + Convertor: api.Scheme, + Copier: api.Scheme, + Typer: api.Scheme, + + Mapper: thirdpartyresourcedata.NewMapper(registered.GroupOrDie(extensions.GroupName).RESTMapper, kind, version, group), + Linker: registered.GroupOrDie(extensions.GroupName).SelfLinker, + Storage: storage, + OptionsExternalVersion: &optionsExternalVersion, + + Serializer: thirdpartyresourcedata.NewNegotiatedSerializer(api.Codecs, kind, externalVersion, internalVersion), + ParameterCodec: thirdpartyresourcedata.NewThirdPartyParameterCodec(api.ParameterCodec), + + Context: m.RequestContextMapper, + + MinRequestTimeout: m.MinRequestTimeout, + } +} + +func (m *Master) GetRESTOptionsOrDie(c *Config, resource unversioned.GroupResource) generic.RESTOptions { + storage, err := c.StorageFactory.New(resource) + if err != nil { + glog.Fatalf("Unable to find storage destination for %v, due to %v", resource, err.Error()) + } + + return generic.RESTOptions{ + Storage: storage, + Decorator: m.StorageDecorator(), + DeleteCollectionWorkers: m.deleteCollectionWorkers, + } +} + +// getExperimentalResources returns the resources for extensions api +func (m *Master) getExtensionResources(c *Config) map[string]rest.Storage { + restOptions := func(resource string) generic.RESTOptions { + return m.GetRESTOptionsOrDie(c, extensions.Resource(resource)) + } + + // TODO update when we support more than one version of this group + version := extensionsapiv1beta1.SchemeGroupVersion + + storage := map[string]rest.Storage{} + + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("horizontalpodautoscalers")) { + hpaStorage, hpaStatusStorage := horizontalpodautoscaleretcd.NewREST(restOptions("horizontalpodautoscalers")) + storage["horizontalpodautoscalers"] = hpaStorage + storage["horizontalpodautoscalers/status"] = hpaStatusStorage + + controllerStorage := expcontrolleretcd.NewStorage(m.GetRESTOptionsOrDie(c, api.Resource("replicationControllers"))) + storage["replicationcontrollers"] = controllerStorage.ReplicationController + storage["replicationcontrollers/scale"] = controllerStorage.Scale + } + thirdPartyResourceStorage := thirdpartyresourceetcd.NewREST(restOptions("thirdpartyresources")) + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("thirdpartyresources")) { + thirdPartyControl := ThirdPartyController{ + master: m, + thirdPartyResourceRegistry: thirdPartyResourceStorage, + } + if !m.disableThirdPartyControllerForTesting { + go wait.Forever(func() { + if err := thirdPartyControl.SyncResources(); err != nil { + glog.Warningf("third party resource sync failed: %v", err) + } + }, 10*time.Second) + } + storage["thirdpartyresources"] = thirdPartyResourceStorage + } + + daemonSetStorage, daemonSetStatusStorage := daemonetcd.NewREST(restOptions("daemonsets")) + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("daemonsets")) { + storage["daemonsets"] = daemonSetStorage + storage["daemonsets/status"] = daemonSetStatusStorage + } + deploymentStorage := deploymentetcd.NewStorage(restOptions("deployments")) + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("deployments")) { + storage["deployments"] = deploymentStorage.Deployment + storage["deployments/status"] = deploymentStorage.Status + storage["deployments/rollback"] = deploymentStorage.Rollback + storage["deployments/scale"] = deploymentStorage.Scale + } + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("jobs")) { + jobsStorage, jobsStatusStorage := jobetcd.NewREST(restOptions("jobs")) + storage["jobs"] = jobsStorage + storage["jobs/status"] = jobsStatusStorage + } + ingressStorage, ingressStatusStorage := ingressetcd.NewREST(restOptions("ingresses")) + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("ingresses")) { + storage["ingresses"] = ingressStorage + storage["ingresses/status"] = ingressStatusStorage + } + podSecurityPolicyStorage := pspetcd.NewREST(restOptions("podsecuritypolicy")) + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("podsecuritypolicy")) { + storage["podSecurityPolicies"] = podSecurityPolicyStorage + } + replicaSetStorage := replicasetetcd.NewStorage(restOptions("replicasets")) + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("replicasets")) { + storage["replicasets"] = replicaSetStorage.ReplicaSet + storage["replicasets/status"] = replicaSetStorage.Status + storage["replicasets/scale"] = replicaSetStorage.Scale + } + networkPolicyStorage := networkpolicyetcd.NewREST(restOptions("networkpolicies")) + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("networkpolicies")) { + storage["networkpolicies"] = networkPolicyStorage + } + + return storage +} + +// getAutoscalingResources returns the resources for autoscaling api +func (m *Master) getAutoscalingResources(c *Config) map[string]rest.Storage { + // TODO update when we support more than one version of this group + version := autoscalingapiv1.SchemeGroupVersion + + storage := map[string]rest.Storage{} + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("horizontalpodautoscalers")) { + hpaStorage, hpaStatusStorage := horizontalpodautoscaleretcd.NewREST(m.GetRESTOptionsOrDie(c, autoscaling.Resource("horizontalpodautoscalers"))) + storage["horizontalpodautoscalers"] = hpaStorage + storage["horizontalpodautoscalers/status"] = hpaStatusStorage + } + return storage +} + +// getBatchResources returns the resources for batch api +func (m *Master) getBatchResources(c *Config, version unversioned.GroupVersion) map[string]rest.Storage { + storage := map[string]rest.Storage{} + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("jobs")) { + jobsStorage, jobsStatusStorage := jobetcd.NewREST(m.GetRESTOptionsOrDie(c, batch.Resource("jobs"))) + storage["jobs"] = jobsStorage + storage["jobs/status"] = jobsStatusStorage + } + return storage +} + +// getPolicyResources returns the resources for policy api +func (m *Master) getPolicyResources(c *Config) map[string]rest.Storage { + // TODO update when we support more than one version of this group + version := policyapiv1alpha1.SchemeGroupVersion + + storage := map[string]rest.Storage{} + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("poddisruptionbudgets")) { + poddisruptionbudgetStorage, poddisruptionbudgetStatusStorage := poddisruptionbudgetetcd.NewREST(m.GetRESTOptionsOrDie(c, policy.Resource("poddisruptionbudgets"))) + storage["poddisruptionbudgets"] = poddisruptionbudgetStorage + storage["poddisruptionbudgets/status"] = poddisruptionbudgetStatusStorage + } + return storage +} + +// getAppsResources returns the resources for apps api +func (m *Master) getAppsResources(c *Config) map[string]rest.Storage { + // TODO update when we support more than one version of this group + version := appsapi.SchemeGroupVersion + + storage := map[string]rest.Storage{} + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("petsets")) { + petsetStorage, petsetStatusStorage := petsetetcd.NewREST(m.GetRESTOptionsOrDie(c, apps.Resource("petsets"))) + storage["petsets"] = petsetStorage + storage["petsets/status"] = petsetStatusStorage + } + return storage +} + +func (m *Master) getRBACResources(c *Config) map[string]rest.Storage { + version := rbacapi.SchemeGroupVersion + + once := new(sync.Once) + var authorizationRuleResolver rbacvalidation.AuthorizationRuleResolver + newRuleValidator := func() rbacvalidation.AuthorizationRuleResolver { + once.Do(func() { + authorizationRuleResolver = rbacvalidation.NewDefaultRuleResolver( + role.NewRegistry(roleetcd.NewREST(m.GetRESTOptionsOrDie(c, rbac.Resource("roles")))), + rolebinding.NewRegistry(rolebindingetcd.NewREST(m.GetRESTOptionsOrDie(c, rbac.Resource("rolebindings")))), + clusterrole.NewRegistry(clusterroleetcd.NewREST(m.GetRESTOptionsOrDie(c, rbac.Resource("clusterroles")))), + clusterrolebinding.NewRegistry(clusterrolebindingetcd.NewREST(m.GetRESTOptionsOrDie(c, rbac.Resource("clusterrolebindings")))), + ) + }) + return authorizationRuleResolver + } + + storage := map[string]rest.Storage{} + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("roles")) { + rolesStorage := roleetcd.NewREST(m.GetRESTOptionsOrDie(c, rbac.Resource("roles"))) + storage["roles"] = rolepolicybased.NewStorage(rolesStorage, newRuleValidator(), c.AuthorizerRBACSuperUser) + } + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("rolebindings")) { + roleBindingsStorage := rolebindingetcd.NewREST(m.GetRESTOptionsOrDie(c, rbac.Resource("rolebindings"))) + storage["rolebindings"] = rolebindingpolicybased.NewStorage(roleBindingsStorage, newRuleValidator(), c.AuthorizerRBACSuperUser) + } + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("clusterroles")) { + clusterRolesStorage := clusterroleetcd.NewREST(m.GetRESTOptionsOrDie(c, rbac.Resource("clusterroles"))) + storage["clusterroles"] = clusterrolepolicybased.NewStorage(clusterRolesStorage, newRuleValidator(), c.AuthorizerRBACSuperUser) + } + if c.APIResourceConfigSource.ResourceEnabled(version.WithResource("clusterrolebindings")) { + clusterRoleBindingsStorage := clusterrolebindingetcd.NewREST(m.GetRESTOptionsOrDie(c, rbac.Resource("clusterrolebindings"))) + storage["clusterrolebindings"] = clusterrolebindingpolicybased.NewStorage(clusterRoleBindingsStorage, newRuleValidator(), c.AuthorizerRBACSuperUser) + } + return storage +} + +// findExternalAddress returns ExternalIP of provided node with fallback to LegacyHostIP. +func findExternalAddress(node *api.Node) (string, error) { + var fallback string + for ix := range node.Status.Addresses { + addr := &node.Status.Addresses[ix] + if addr.Type == api.NodeExternalIP { + return addr.Address, nil + } + if fallback == "" && addr.Type == api.NodeLegacyHostIP { + fallback = addr.Address + } + } + if fallback != "" { + return fallback, nil + } + return "", fmt.Errorf("Couldn't find external address: %v", node) +} + +func (m *Master) getNodeAddresses() ([]string, error) { + nodes, err := m.nodeRegistry.ListNodes(api.NewDefaultContext(), nil) + if err != nil { + return nil, err + } + addrs := []string{} + for ix := range nodes.Items { + node := &nodes.Items[ix] + addr, err := findExternalAddress(node) + if err != nil { + return nil, err + } + addrs = append(addrs, addr) + } + return addrs, nil +} + +func (m *Master) IsTunnelSyncHealthy(req *http.Request) error { + if m.tunneler == nil { + return nil + } + lag := m.tunneler.SecondsSinceSync() + if lag > 600 { + return fmt.Errorf("Tunnel sync is taking to long: %d", lag) + } + sshKeyLag := m.tunneler.SecondsSinceSSHKeySync() + if sshKeyLag > 600 { + return fmt.Errorf("SSHKey sync is taking to long: %d", sshKeyLag) + } + return nil +} + +func DefaultAPIResourceConfigSource() *genericapiserver.ResourceConfig { + ret := genericapiserver.NewResourceConfig() + ret.EnableVersions(apiv1.SchemeGroupVersion, extensionsapiv1beta1.SchemeGroupVersion, batchapiv1.SchemeGroupVersion, autoscalingapiv1.SchemeGroupVersion, appsapi.SchemeGroupVersion, policyapiv1alpha1.SchemeGroupVersion, rbacapi.SchemeGroupVersion) + + // all extensions resources except these are disabled by default + ret.EnableResources( + extensionsapiv1beta1.SchemeGroupVersion.WithResource("daemonsets"), + extensionsapiv1beta1.SchemeGroupVersion.WithResource("deployments"), + extensionsapiv1beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"), + extensionsapiv1beta1.SchemeGroupVersion.WithResource("ingresses"), + extensionsapiv1beta1.SchemeGroupVersion.WithResource("jobs"), + extensionsapiv1beta1.SchemeGroupVersion.WithResource("replicasets"), + extensionsapiv1beta1.SchemeGroupVersion.WithResource("thirdpartyresources"), + ) + + return ret +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/master_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/master_test.go new file mode 100644 index 000000000000..7ed52a0e4982 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/master_test.go @@ -0,0 +1,1088 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package master + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + apiv1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/apps" + appsapi "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/autoscaling" + autoscalingapiv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" + "k8s.io/kubernetes/pkg/apis/batch" + batchapiv1 "k8s.io/kubernetes/pkg/apis/batch/v1" + batchapiv2alpha1 "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + "k8s.io/kubernetes/pkg/apis/extensions" + extensionsapiv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/apiserver" + "k8s.io/kubernetes/pkg/genericapiserver" + "k8s.io/kubernetes/pkg/kubelet/client" + "k8s.io/kubernetes/pkg/registry/endpoint" + "k8s.io/kubernetes/pkg/registry/namespace" + "k8s.io/kubernetes/pkg/registry/registrytest" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + "k8s.io/kubernetes/pkg/storage/etcd/etcdtest" + etcdtesting "k8s.io/kubernetes/pkg/storage/etcd/testing" + "k8s.io/kubernetes/pkg/storage/storagebackend" + "k8s.io/kubernetes/pkg/util/intstr" + utilnet "k8s.io/kubernetes/pkg/util/net" + "k8s.io/kubernetes/pkg/util/sets" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +// setUp is a convience function for setting up for (most) tests. +func setUp(t *testing.T) (*Master, *etcdtesting.EtcdTestServer, Config, *assert.Assertions) { + server := etcdtesting.NewEtcdTestClientServer(t) + + master := &Master{ + GenericAPIServer: &genericapiserver.GenericAPIServer{}, + } + config := Config{ + Config: &genericapiserver.Config{}, + } + + storageConfig := storagebackend.Config{ + Prefix: etcdtest.PathPrefix(), + CAFile: server.CAFile, + KeyFile: server.KeyFile, + CertFile: server.CertFile, + } + for _, url := range server.ClientURLs { + storageConfig.ServerList = append(storageConfig.ServerList, url.String()) + } + + resourceEncoding := genericapiserver.NewDefaultResourceEncodingConfig() + resourceEncoding.SetVersionEncoding(api.GroupName, *testapi.Default.GroupVersion(), unversioned.GroupVersion{Group: api.GroupName, Version: runtime.APIVersionInternal}) + resourceEncoding.SetVersionEncoding(autoscaling.GroupName, *testapi.Autoscaling.GroupVersion(), unversioned.GroupVersion{Group: autoscaling.GroupName, Version: runtime.APIVersionInternal}) + resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), unversioned.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal}) + resourceEncoding.SetVersionEncoding(apps.GroupName, *testapi.Apps.GroupVersion(), unversioned.GroupVersion{Group: apps.GroupName, Version: runtime.APIVersionInternal}) + resourceEncoding.SetVersionEncoding(extensions.GroupName, *testapi.Extensions.GroupVersion(), unversioned.GroupVersion{Group: extensions.GroupName, Version: runtime.APIVersionInternal}) + resourceEncoding.SetVersionEncoding(rbac.GroupName, *testapi.Rbac.GroupVersion(), unversioned.GroupVersion{Group: rbac.GroupName, Version: runtime.APIVersionInternal}) + storageFactory := genericapiserver.NewDefaultStorageFactory(storageConfig, testapi.StorageMediaType(), api.Codecs, resourceEncoding, DefaultAPIResourceConfigSource()) + + config.StorageFactory = storageFactory + config.APIResourceConfigSource = DefaultAPIResourceConfigSource() + config.PublicAddress = net.ParseIP("192.168.10.4") + config.Serializer = api.Codecs + config.KubeletClient = client.FakeKubeletClient{} + config.APIPrefix = "/api" + config.APIGroupPrefix = "/apis" + config.APIResourceConfigSource = DefaultAPIResourceConfigSource() + config.ProxyDialer = func(network, addr string) (net.Conn, error) { return nil, nil } + config.ProxyTLSClientConfig = &tls.Config{} + + // TODO: this is kind of hacky. The trouble is that the sync loop + // runs in a go-routine and there is no way to validate in the test + // that the sync routine has actually run. The right answer here + // is probably to add some sort of callback that we can register + // to validate that it's actually been run, but for now we don't + // run the sync routine and register types manually. + config.disableThirdPartyControllerForTesting = true + + master.nodeRegistry = registrytest.NewNodeRegistry([]string{"node1", "node2"}, api.NodeResources{}) + + return master, server, config, assert.New(t) +} + +func newMaster(t *testing.T) (*Master, *etcdtesting.EtcdTestServer, Config, *assert.Assertions) { + _, etcdserver, config, assert := setUp(t) + + master, err := New(&config) + if err != nil { + t.Fatalf("Error in bringing up the master: %v", err) + } + + return master, etcdserver, config, assert +} + +// limitedAPIResourceConfigSource only enables the core group, the extensions group, the batch group, and the autoscaling group. +func limitedAPIResourceConfigSource() *genericapiserver.ResourceConfig { + ret := genericapiserver.NewResourceConfig() + ret.EnableVersions(apiv1.SchemeGroupVersion, extensionsapiv1beta1.SchemeGroupVersion, + batchapiv1.SchemeGroupVersion, batchapiv2alpha1.SchemeGroupVersion, + appsapi.SchemeGroupVersion, autoscalingapiv1.SchemeGroupVersion) + return ret +} + +// newLimitedMaster only enables the core group, the extensions group, the batch group, and the autoscaling group. +func newLimitedMaster(t *testing.T) (*Master, *etcdtesting.EtcdTestServer, Config, *assert.Assertions) { + _, etcdserver, config, assert := setUp(t) + config.APIResourceConfigSource = limitedAPIResourceConfigSource() + master, err := New(&config) + if err != nil { + t.Fatalf("Error in bringing up the master: %v", err) + } + + return master, etcdserver, config, assert +} + +// TestNew verifies that the New function returns a Master +// using the configuration properly. +func TestNew(t *testing.T) { + master, etcdserver, config, assert := newMaster(t) + defer etcdserver.Terminate(t) + + // Verify many of the variables match their config counterparts + assert.Equal(master.enableCoreControllers, config.EnableCoreControllers) + assert.Equal(master.tunneler, config.Tunneler) + assert.Equal(master.APIPrefix, config.APIPrefix) + assert.Equal(master.APIGroupPrefix, config.APIGroupPrefix) + assert.Equal(master.RequestContextMapper, config.RequestContextMapper) + assert.Equal(master.MasterCount, config.MasterCount) + assert.Equal(master.ClusterIP, config.PublicAddress) + assert.Equal(master.PublicReadWritePort, config.ReadWritePort) + assert.Equal(master.ServiceReadWriteIP, config.ServiceReadWriteIP) + + // These functions should point to the same memory location + masterDialer, _ := utilnet.Dialer(master.ProxyTransport) + masterDialerFunc := fmt.Sprintf("%p", masterDialer) + configDialerFunc := fmt.Sprintf("%p", config.ProxyDialer) + assert.Equal(masterDialerFunc, configDialerFunc) + + assert.Equal(master.ProxyTransport.(*http.Transport).TLSClientConfig, config.ProxyTLSClientConfig) +} + +// TestNamespaceSubresources ensures the namespace subresource parsing in apiserver/handlers.go doesn't drift +func TestNamespaceSubresources(t *testing.T) { + master, etcdserver, _, _ := newMaster(t) + defer etcdserver.Terminate(t) + + expectedSubresources := apiserver.NamespaceSubResourcesForTest + foundSubresources := sets.NewString() + + for k := range master.v1ResourcesStorage { + parts := strings.Split(k, "/") + if len(parts) == 2 && parts[0] == "namespaces" { + foundSubresources.Insert(parts[1]) + } + } + + if !reflect.DeepEqual(expectedSubresources.List(), foundSubresources.List()) { + t.Errorf("Expected namespace subresources %#v, got %#v. Update apiserver/handlers.go#namespaceSubresources", expectedSubresources.List(), foundSubresources.List()) + } +} + +// TestGetServersToValidate verifies the unexported getServersToValidate function +func TestGetServersToValidate(t *testing.T) { + master, etcdserver, config, assert := setUp(t) + defer etcdserver.Terminate(t) + + servers := master.getServersToValidate(&config) + + // Expected servers to validate: scheduler, controller-manager and etcd. + assert.Equal(3, len(servers), "unexpected server list: %#v", servers) + + for _, server := range []string{"scheduler", "controller-manager", "etcd-0"} { + if _, ok := servers[server]; !ok { + t.Errorf("server list missing: %s", server) + } + } +} + +// TestFindExternalAddress verifies both pass and fail cases for the unexported +// findExternalAddress function +func TestFindExternalAddress(t *testing.T) { + assert := assert.New(t) + expectedIP := "172.0.0.1" + + nodes := []*api.Node{new(api.Node), new(api.Node), new(api.Node)} + nodes[0].Status.Addresses = []api.NodeAddress{{"ExternalIP", expectedIP}} + nodes[1].Status.Addresses = []api.NodeAddress{{"LegacyHostIP", expectedIP}} + nodes[2].Status.Addresses = []api.NodeAddress{{"ExternalIP", expectedIP}, {"LegacyHostIP", "172.0.0.2"}} + + // Pass Case + for _, node := range nodes { + ip, err := findExternalAddress(node) + assert.NoError(err, "error getting node external address") + assert.Equal(expectedIP, ip, "expected ip to be %s, but was %s", expectedIP, ip) + } + + // Fail case + _, err := findExternalAddress(new(api.Node)) + assert.Error(err, "expected findExternalAddress to fail on a node with missing ip information") +} + +// TestNewBootstrapController verifies master fields are properly copied into controller +func TestNewBootstrapController(t *testing.T) { + // Tests a subset of inputs to ensure they are set properly in the controller + master, etcdserver, _, assert := setUp(t) + defer etcdserver.Terminate(t) + + portRange := utilnet.PortRange{Base: 10, Size: 10} + + master.namespaceRegistry = namespace.NewRegistry(nil) + master.serviceRegistry = registrytest.NewServiceRegistry() + master.endpointRegistry = endpoint.NewRegistry(nil) + + master.ServiceNodePortRange = portRange + master.MasterCount = 1 + master.ServiceReadWritePort = 1000 + master.PublicReadWritePort = 1010 + + controller := master.NewBootstrapController() + + assert.Equal(controller.NamespaceRegistry, master.namespaceRegistry) + assert.Equal(controller.EndpointRegistry, master.endpointRegistry) + assert.Equal(controller.ServiceRegistry, master.serviceRegistry) + assert.Equal(controller.ServiceNodePortRange, portRange) + assert.Equal(controller.MasterCount, master.MasterCount) + assert.Equal(controller.ServicePort, master.ServiceReadWritePort) + assert.Equal(controller.PublicServicePort, master.PublicReadWritePort) +} + +// TestControllerServicePorts verifies master extraServicePorts are +// correctly copied into controller +func TestControllerServicePorts(t *testing.T) { + master, etcdserver, _, assert := setUp(t) + defer etcdserver.Terminate(t) + + master.namespaceRegistry = namespace.NewRegistry(nil) + master.serviceRegistry = registrytest.NewServiceRegistry() + master.endpointRegistry = endpoint.NewRegistry(nil) + + master.ExtraServicePorts = []api.ServicePort{ + { + Name: "additional-port-1", + Port: 1000, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(1000), + }, + { + Name: "additional-port-2", + Port: 1010, + Protocol: api.ProtocolTCP, + TargetPort: intstr.FromInt(1010), + }, + } + + controller := master.NewBootstrapController() + + assert.Equal(int32(1000), controller.ExtraServicePorts[0].Port) + assert.Equal(int32(1010), controller.ExtraServicePorts[1].Port) +} + +// TestGetNodeAddresses verifies that proper results are returned +// when requesting node addresses. +func TestGetNodeAddresses(t *testing.T) { + master, etcdserver, _, assert := setUp(t) + defer etcdserver.Terminate(t) + + // Fail case (no addresses associated with nodes) + nodes, _ := master.nodeRegistry.ListNodes(api.NewDefaultContext(), nil) + addrs, err := master.getNodeAddresses() + + assert.Error(err, "getNodeAddresses should have caused an error as there are no addresses.") + assert.Equal([]string(nil), addrs) + + // Pass case with External type IP + nodes, _ = master.nodeRegistry.ListNodes(api.NewDefaultContext(), nil) + for index := range nodes.Items { + nodes.Items[index].Status.Addresses = []api.NodeAddress{{Type: api.NodeExternalIP, Address: "127.0.0.1"}} + } + addrs, err = master.getNodeAddresses() + assert.NoError(err, "getNodeAddresses should not have returned an error.") + assert.Equal([]string{"127.0.0.1", "127.0.0.1"}, addrs) + + // Pass case with LegacyHost type IP + nodes, _ = master.nodeRegistry.ListNodes(api.NewDefaultContext(), nil) + for index := range nodes.Items { + nodes.Items[index].Status.Addresses = []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: "127.0.0.2"}} + } + addrs, err = master.getNodeAddresses() + assert.NoError(err, "getNodeAddresses failback should not have returned an error.") + assert.Equal([]string{"127.0.0.2", "127.0.0.2"}, addrs) +} + +// Because we need to be backwards compatible with release 1.1, at endpoints +// that exist in release 1.1, the responses should have empty APIVersion. +func TestAPIVersionOfDiscoveryEndpoints(t *testing.T) { + master, etcdserver, _, assert := newMaster(t) + defer etcdserver.Terminate(t) + + server := httptest.NewServer(master.HandlerContainer.ServeMux) + + // /api exists in release-1.1 + resp, err := http.Get(server.URL + "/api") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + apiVersions := unversioned.APIVersions{} + assert.NoError(decodeResponse(resp, &apiVersions)) + assert.Equal(apiVersions.APIVersion, "") + + // /api/v1 exists in release-1.1 + resp, err = http.Get(server.URL + "/api/v1") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + resourceList := unversioned.APIResourceList{} + assert.NoError(decodeResponse(resp, &resourceList)) + assert.Equal(resourceList.APIVersion, "") + + // /apis exists in release-1.1 + resp, err = http.Get(server.URL + "/apis") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + groupList := unversioned.APIGroupList{} + assert.NoError(decodeResponse(resp, &groupList)) + assert.Equal(groupList.APIVersion, "") + + // /apis/extensions exists in release-1.1 + resp, err = http.Get(server.URL + "/apis/extensions") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + group := unversioned.APIGroup{} + assert.NoError(decodeResponse(resp, &group)) + assert.Equal(group.APIVersion, "") + + // /apis/extensions/v1beta1 exists in release-1.1 + resp, err = http.Get(server.URL + "/apis/extensions/v1beta1") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + resourceList = unversioned.APIResourceList{} + assert.NoError(decodeResponse(resp, &resourceList)) + assert.Equal(resourceList.APIVersion, "") + + // /apis/autoscaling doesn't exist in release-1.1, so the APIVersion field + // should be non-empty in the results returned by the server. + resp, err = http.Get(server.URL + "/apis/autoscaling") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + group = unversioned.APIGroup{} + assert.NoError(decodeResponse(resp, &group)) + assert.Equal(group.APIVersion, "v1") + + // apis/autoscaling/v1 doesn't exist in release-1.1, so the APIVersion field + // should be non-empty in the results returned by the server. + + resp, err = http.Get(server.URL + "/apis/autoscaling/v1") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + resourceList = unversioned.APIResourceList{} + assert.NoError(decodeResponse(resp, &resourceList)) + assert.Equal(resourceList.APIVersion, "v1") + +} + +func TestDiscoveryAtAPIS(t *testing.T) { + master, etcdserver, _, assert := newLimitedMaster(t) + defer etcdserver.Terminate(t) + + server := httptest.NewServer(master.HandlerContainer.ServeMux) + resp, err := http.Get(server.URL + "/apis") + if !assert.NoError(err) { + t.Errorf("unexpected error: %v", err) + } + + assert.Equal(http.StatusOK, resp.StatusCode) + + groupList := unversioned.APIGroupList{} + assert.NoError(decodeResponse(resp, &groupList)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + expectGroupNames := sets.NewString(autoscaling.GroupName, batch.GroupName, apps.GroupName, extensions.GroupName) + expectVersions := map[string][]unversioned.GroupVersionForDiscovery{ + autoscaling.GroupName: { + { + GroupVersion: testapi.Autoscaling.GroupVersion().String(), + Version: testapi.Autoscaling.GroupVersion().Version, + }, + }, + apps.GroupName: { + { + GroupVersion: testapi.Apps.GroupVersion().String(), + Version: testapi.Apps.GroupVersion().Version, + }, + }, + extensions.GroupName: { + { + GroupVersion: testapi.Extensions.GroupVersion().String(), + Version: testapi.Extensions.GroupVersion().Version, + }, + }, + } + var batchVersions []unversioned.GroupVersionForDiscovery + for _, gv := range testapi.Batch.GroupVersions() { + batchVersions = append(batchVersions, unversioned.GroupVersionForDiscovery{ + GroupVersion: gv.String(), + Version: gv.Version, + }) + } + expectVersions[batch.GroupName] = batchVersions + + expectPreferredVersion := map[string]unversioned.GroupVersionForDiscovery{ + autoscaling.GroupName: { + GroupVersion: registered.GroupOrDie(autoscaling.GroupName).GroupVersion.String(), + Version: registered.GroupOrDie(autoscaling.GroupName).GroupVersion.Version, + }, + batch.GroupName: { + GroupVersion: registered.GroupOrDie(batch.GroupName).GroupVersion.String(), + Version: registered.GroupOrDie(batch.GroupName).GroupVersion.Version, + }, + apps.GroupName: { + GroupVersion: registered.GroupOrDie(apps.GroupName).GroupVersion.String(), + Version: registered.GroupOrDie(apps.GroupName).GroupVersion.Version, + }, + extensions.GroupName: { + GroupVersion: registered.GroupOrDie(extensions.GroupName).GroupVersion.String(), + Version: registered.GroupOrDie(extensions.GroupName).GroupVersion.Version, + }, + } + + assert.Equal(3, len(groupList.Groups)) + for _, group := range groupList.Groups { + if !expectGroupNames.Has(group.Name) { + t.Errorf("got unexpected group %s", group.Name) + } + assert.Equal(expectVersions[group.Name], group.Versions) + assert.Equal(expectPreferredVersion[group.Name], group.PreferredVersion) + } + + thirdPartyGV := unversioned.GroupVersionForDiscovery{GroupVersion: "company.com/v1", Version: "v1"} + master.addThirdPartyResourceStorage("/apis/company.com/v1", nil, + unversioned.APIGroup{ + Name: "company.com", + Versions: []unversioned.GroupVersionForDiscovery{thirdPartyGV}, + PreferredVersion: thirdPartyGV, + }) + + resp, err = http.Get(server.URL + "/apis") + if !assert.NoError(err) { + t.Errorf("unexpected error: %v", err) + } + assert.Equal(http.StatusOK, resp.StatusCode) + assert.NoError(decodeResponse(resp, &groupList)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + assert.Equal(4, len(groupList.Groups)) + + expectGroupNames.Insert("company.com") + expectVersions["company.com"] = []unversioned.GroupVersionForDiscovery{thirdPartyGV} + expectPreferredVersion["company.com"] = thirdPartyGV + for _, group := range groupList.Groups { + if !expectGroupNames.Has(group.Name) { + t.Errorf("got unexpected group %s", group.Name) + } + assert.Equal(expectVersions[group.Name], group.Versions) + assert.Equal(expectPreferredVersion[group.Name], group.PreferredVersion) + } +} + +var versionsToTest = []string{"v1", "v3"} + +type Foo struct { + unversioned.TypeMeta `json:",inline"` + api.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"` + + SomeField string `json:"someField"` + OtherField int `json:"otherField"` +} + +type FooList struct { + unversioned.TypeMeta `json:",inline"` + unversioned.ListMeta `json:"metadata,omitempty" description:"standard list metadata; see http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata"` + + Items []Foo `json:"items"` +} + +func initThirdParty(t *testing.T, version string) (*Master, *etcdtesting.EtcdTestServer, *httptest.Server, *assert.Assertions) { + master, etcdserver, _, assert := newMaster(t) + + api := &extensions.ThirdPartyResource{ + ObjectMeta: api.ObjectMeta{ + Name: "foo.company.com", + }, + Versions: []extensions.APIVersion{ + { + Name: version, + }, + }, + } + _, master.ServiceClusterIPRange, _ = net.ParseCIDR("10.0.0.0/24") + + if !assert.NoError(master.InstallThirdPartyResource(api)) { + t.FailNow() + } + + server := httptest.NewServer(master.HandlerContainer.ServeMux) + return master, etcdserver, server, assert +} + +func TestInstallThirdPartyAPIList(t *testing.T) { + for _, version := range versionsToTest { + testInstallThirdPartyAPIListVersion(t, version) + } +} + +func testInstallThirdPartyAPIListVersion(t *testing.T, version string) { + tests := []struct { + items []Foo + }{ + {}, + { + items: []Foo{}, + }, + { + items: []Foo{ + { + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + TypeMeta: unversioned.TypeMeta{ + Kind: "Foo", + APIVersion: version, + }, + SomeField: "test field", + OtherField: 10, + }, + { + ObjectMeta: api.ObjectMeta{ + Name: "bar", + }, + TypeMeta: unversioned.TypeMeta{ + Kind: "Foo", + APIVersion: version, + }, + SomeField: "test field another", + OtherField: 20, + }, + }, + }, + } + for _, test := range tests { + func() { + master, etcdserver, server, assert := initThirdParty(t, version) + defer server.Close() + defer etcdserver.Terminate(t) + + if test.items != nil { + err := createThirdPartyList(master.thirdPartyStorage, "/ThirdPartyResourceData/company.com/foos/default", test.items) + if !assert.NoError(err) { + return + } + } + + resp, err := http.Get(server.URL + "/apis/company.com/" + version + "/namespaces/default/foos") + if !assert.NoError(err) { + return + } + defer resp.Body.Close() + + assert.Equal(http.StatusOK, resp.StatusCode) + + data, err := ioutil.ReadAll(resp.Body) + assert.NoError(err) + + list := FooList{} + if err = json.Unmarshal(data, &list); err != nil { + t.Errorf("unexpected error: %v", err) + } + + if test.items == nil { + if len(list.Items) != 0 { + t.Errorf("expected no items, saw: %v", list.Items) + } + return + } + + if len(list.Items) != len(test.items) { + t.Fatalf("unexpected length: %d vs %d", len(list.Items), len(test.items)) + } + // The order of elements in LIST is not guaranteed. + mapping := make(map[string]int) + for ix := range test.items { + mapping[test.items[ix].Name] = ix + } + for ix := range list.Items { + // Copy things that are set dynamically on the server + expectedObj := test.items[mapping[list.Items[ix].Name]] + expectedObj.SelfLink = list.Items[ix].SelfLink + expectedObj.ResourceVersion = list.Items[ix].ResourceVersion + expectedObj.Namespace = list.Items[ix].Namespace + expectedObj.UID = list.Items[ix].UID + expectedObj.CreationTimestamp = list.Items[ix].CreationTimestamp + + // We endure the order of items by sorting them (using 'mapping') + // so that this function passes. + if !reflect.DeepEqual(list.Items[ix], expectedObj) { + t.Errorf("expected:\n%#v\nsaw:\n%#v\n", expectedObj, list.Items[ix]) + } + } + }() + } +} + +func encodeToThirdParty(name string, obj interface{}) (runtime.Object, error) { + serial, err := json.Marshal(obj) + if err != nil { + return nil, err + } + thirdPartyData := extensions.ThirdPartyResourceData{ + ObjectMeta: api.ObjectMeta{Name: name}, + Data: serial, + } + return &thirdPartyData, nil +} + +func createThirdPartyObject(s storage.Interface, path, name string, obj interface{}) error { + data, err := encodeToThirdParty(name, obj) + if err != nil { + return err + } + return s.Create(context.TODO(), etcdtest.AddPrefix(path), data, nil, 0) +} + +func createThirdPartyList(s storage.Interface, path string, list []Foo) error { + for _, obj := range list { + if err := createThirdPartyObject(s, path+"/"+obj.Name, obj.Name, obj); err != nil { + return err + } + } + return nil +} + +func decodeResponse(resp *http.Response, obj interface{}) error { + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + if err := json.Unmarshal(data, obj); err != nil { + return err + } + return nil +} + +func TestInstallThirdPartyAPIGet(t *testing.T) { + for _, version := range versionsToTest { + testInstallThirdPartyAPIGetVersion(t, version) + } +} + +func testInstallThirdPartyAPIGetVersion(t *testing.T, version string) { + master, etcdserver, server, assert := initThirdParty(t, version) + defer server.Close() + defer etcdserver.Terminate(t) + + expectedObj := Foo{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + TypeMeta: unversioned.TypeMeta{ + Kind: "Foo", + APIVersion: version, + }, + SomeField: "test field", + OtherField: 10, + } + if !assert.NoError(createThirdPartyObject(master.thirdPartyStorage, "/ThirdPartyResourceData/company.com/foos/default/test", "test", expectedObj)) { + t.FailNow() + return + } + + resp, err := http.Get(server.URL + "/apis/company.com/" + version + "/namespaces/default/foos/test") + if !assert.NoError(err) { + return + } + + assert.Equal(http.StatusOK, resp.StatusCode) + + item := Foo{} + assert.NoError(decodeResponse(resp, &item)) + if !assert.False(reflect.DeepEqual(item, expectedObj)) { + t.Errorf("expected objects to not be equal:\n%v\nsaw:\n%v\n", expectedObj, item) + } + // Fill in data that the apiserver injects + expectedObj.SelfLink = item.SelfLink + expectedObj.ResourceVersion = item.ResourceVersion + if !assert.True(reflect.DeepEqual(item, expectedObj)) { + t.Errorf("expected:\n%#v\nsaw:\n%#v\n", expectedObj, item) + } +} + +func TestInstallThirdPartyAPIPost(t *testing.T) { + for _, version := range versionsToTest { + testInstallThirdPartyAPIPostForVersion(t, version) + } +} + +func testInstallThirdPartyAPIPostForVersion(t *testing.T, version string) { + master, etcdserver, server, assert := initThirdParty(t, version) + defer server.Close() + defer etcdserver.Terminate(t) + + inputObj := Foo{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + TypeMeta: unversioned.TypeMeta{ + Kind: "Foo", + APIVersion: "company.com/" + version, + }, + SomeField: "test field", + OtherField: 10, + } + data, err := json.Marshal(inputObj) + if !assert.NoError(err) { + return + } + + resp, err := http.Post(server.URL+"/apis/company.com/"+version+"/namespaces/default/foos", "application/json", bytes.NewBuffer(data)) + if !assert.NoError(err) { + t.Fatalf("unexpected error: %v", err) + } + + assert.Equal(http.StatusCreated, resp.StatusCode) + + item := Foo{} + assert.NoError(decodeResponse(resp, &item)) + + // fill in fields set by the apiserver + expectedObj := inputObj + expectedObj.SelfLink = item.SelfLink + expectedObj.ResourceVersion = item.ResourceVersion + expectedObj.Namespace = item.Namespace + expectedObj.UID = item.UID + expectedObj.CreationTimestamp = item.CreationTimestamp + if !assert.True(reflect.DeepEqual(item, expectedObj)) { + t.Errorf("expected:\n%v\nsaw:\n%v\n", expectedObj, item) + } + + thirdPartyObj := extensions.ThirdPartyResourceData{} + err = master.thirdPartyStorage.Get( + context.TODO(), etcdtest.AddPrefix("/ThirdPartyResourceData/company.com/foos/default/test"), + &thirdPartyObj, false) + if !assert.NoError(err) { + t.FailNow() + } + + item = Foo{} + assert.NoError(json.Unmarshal(thirdPartyObj.Data, &item)) + + if !assert.True(reflect.DeepEqual(item, inputObj)) { + t.Errorf("expected:\n%v\nsaw:\n%v\n", inputObj, item) + } +} + +func TestInstallThirdPartyAPIDelete(t *testing.T) { + for _, version := range versionsToTest { + testInstallThirdPartyAPIDeleteVersion(t, version) + } +} + +func testInstallThirdPartyAPIDeleteVersion(t *testing.T, version string) { + master, etcdserver, server, assert := initThirdParty(t, version) + defer server.Close() + defer etcdserver.Terminate(t) + + expectedObj := Foo{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + TypeMeta: unversioned.TypeMeta{ + Kind: "Foo", + }, + SomeField: "test field", + OtherField: 10, + } + if !assert.NoError(createThirdPartyObject(master.thirdPartyStorage, "/ThirdPartyResourceData/company.com/foos/default/test", "test", expectedObj)) { + t.FailNow() + return + } + + resp, err := http.Get(server.URL + "/apis/company.com/" + version + "/namespaces/default/foos/test") + if !assert.NoError(err) { + return + } + + assert.Equal(http.StatusOK, resp.StatusCode) + + item := Foo{} + assert.NoError(decodeResponse(resp, &item)) + + // Fill in fields set by the apiserver + expectedObj.SelfLink = item.SelfLink + expectedObj.ResourceVersion = item.ResourceVersion + expectedObj.Namespace = item.Namespace + if !assert.True(reflect.DeepEqual(item, expectedObj)) { + t.Errorf("expected:\n%v\nsaw:\n%v\n", expectedObj, item) + } + + resp, err = httpDelete(server.URL + "/apis/company.com/" + version + "/namespaces/default/foos/test") + if !assert.NoError(err) { + return + } + + assert.Equal(http.StatusOK, resp.StatusCode) + + resp, err = http.Get(server.URL + "/apis/company.com/" + version + "/namespaces/default/foos/test") + if !assert.NoError(err) { + return + } + + assert.Equal(http.StatusNotFound, resp.StatusCode) + + expectedDeletedKey := etcdtest.AddPrefix("ThirdPartyResourceData/company.com/foos/default/test") + thirdPartyObj := extensions.ThirdPartyResourceData{} + err = master.thirdPartyStorage.Get( + context.TODO(), expectedDeletedKey, &thirdPartyObj, false) + if !storage.IsNotFound(err) { + t.Errorf("expected deletion didn't happen: %v", err) + } +} + +func httpDelete(url string) (*http.Response, error) { + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return nil, err + } + client := &http.Client{} + return client.Do(req) +} + +func TestInstallThirdPartyAPIListOptions(t *testing.T) { + for _, version := range versionsToTest { + testInstallThirdPartyAPIListOptionsForVersion(t, version) + } +} + +func testInstallThirdPartyAPIListOptionsForVersion(t *testing.T, version string) { + _, etcdserver, server, assert := initThirdParty(t, version) + defer server.Close() + defer etcdserver.Terminate(t) + + // send a GET request with query parameter + resp, err := httpGetWithRV(server.URL + "/apis/company.com/" + version + "/namespaces/default/foos") + if !assert.NoError(err) { + t.Fatalf("unexpected error: %v", err) + } + assert.Equal(http.StatusOK, resp.StatusCode) +} + +func httpGetWithRV(url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + q := req.URL.Query() + // resourceversion is part of a ListOptions + q.Add("resourceversion", "0") + req.URL.RawQuery = q.Encode() + client := &http.Client{} + return client.Do(req) +} + +func TestInstallThirdPartyResourceRemove(t *testing.T) { + for _, version := range versionsToTest { + testInstallThirdPartyResourceRemove(t, version) + } +} + +func testInstallThirdPartyResourceRemove(t *testing.T, version string) { + master, etcdserver, server, assert := initThirdParty(t, version) + defer server.Close() + defer etcdserver.Terminate(t) + + expectedObj := Foo{ + ObjectMeta: api.ObjectMeta{ + Name: "test", + }, + TypeMeta: unversioned.TypeMeta{ + Kind: "Foo", + }, + SomeField: "test field", + OtherField: 10, + } + if !assert.NoError(createThirdPartyObject(master.thirdPartyStorage, "/ThirdPartyResourceData/company.com/foos/default/test", "test", expectedObj)) { + t.FailNow() + return + } + secondObj := expectedObj + secondObj.Name = "bar" + if !assert.NoError(createThirdPartyObject(master.thirdPartyStorage, "/ThirdPartyResourceData/company.com/foos/default/bar", "bar", secondObj)) { + t.FailNow() + return + } + + resp, err := http.Get(server.URL + "/apis/company.com/" + version + "/namespaces/default/foos/test") + if !assert.NoError(err) { + t.FailNow() + return + } + + if resp.StatusCode != http.StatusOK { + t.Errorf("unexpected status: %v", resp) + } + + item := Foo{} + if err := decodeResponse(resp, &item); err != nil { + t.Errorf("unexpected error: %v", err) + } + + // TODO: validate etcd set things here + item.ObjectMeta = expectedObj.ObjectMeta + + if !assert.True(reflect.DeepEqual(item, expectedObj)) { + t.Errorf("expected:\n%v\nsaw:\n%v\n", expectedObj, item) + } + + path := makeThirdPartyPath("company.com") + master.RemoveThirdPartyResource(path) + + resp, err = http.Get(server.URL + "/apis/company.com/" + version + "/namespaces/default/foos/test") + if !assert.NoError(err) { + return + } + + if resp.StatusCode != http.StatusNotFound { + t.Errorf("unexpected status: %v", resp) + } + + expectedDeletedKeys := []string{ + etcdtest.AddPrefix("/ThirdPartyResourceData/company.com/foos/default/test"), + etcdtest.AddPrefix("/ThirdPartyResourceData/company.com/foos/default/bar"), + } + for _, key := range expectedDeletedKeys { + thirdPartyObj := extensions.ThirdPartyResourceData{} + err := master.thirdPartyStorage.Get(context.TODO(), key, &thirdPartyObj, false) + if !storage.IsNotFound(err) { + t.Errorf("expected deletion didn't happen: %v", err) + } + } + installed := master.ListThirdPartyResources() + if len(installed) != 0 { + t.Errorf("Resource(s) still installed: %v", installed) + } + services := master.HandlerContainer.RegisteredWebServices() + for ix := range services { + if strings.HasPrefix(services[ix].RootPath(), "/apis/company.com") { + t.Errorf("Web service still installed at %s: %#v", services[ix].RootPath(), services[ix]) + } + } +} + +func TestThirdPartyDiscovery(t *testing.T) { + for _, version := range versionsToTest { + testThirdPartyDiscovery(t, version) + } +} + +type FakeTunneler struct { + SecondsSinceSyncValue int64 + SecondsSinceSSHKeySyncValue int64 +} + +func (t *FakeTunneler) Run(genericapiserver.AddressFunc) {} +func (t *FakeTunneler) Stop() {} +func (t *FakeTunneler) Dial(net, addr string) (net.Conn, error) { return nil, nil } +func (t *FakeTunneler) SecondsSinceSync() int64 { return t.SecondsSinceSyncValue } +func (t *FakeTunneler) SecondsSinceSSHKeySync() int64 { return t.SecondsSinceSSHKeySyncValue } + +// TestIsTunnelSyncHealthy verifies that the 600 second lag test +// is honored. +func TestIsTunnelSyncHealthy(t *testing.T) { + assert := assert.New(t) + tunneler := &FakeTunneler{} + master := &Master{ + GenericAPIServer: &genericapiserver.GenericAPIServer{}, + tunneler: tunneler, + } + + // Pass case: 540 second lag + tunneler.SecondsSinceSyncValue = 540 + err := master.IsTunnelSyncHealthy(nil) + assert.NoError(err, "IsTunnelSyncHealthy() should not have returned an error.") + + // Fail case: 720 second lag + tunneler.SecondsSinceSyncValue = 720 + err = master.IsTunnelSyncHealthy(nil) + assert.Error(err, "IsTunnelSyncHealthy() should have returned an error.") +} + +func testThirdPartyDiscovery(t *testing.T, version string) { + _, etcdserver, server, assert := initThirdParty(t, version) + defer server.Close() + defer etcdserver.Terminate(t) + + resp, err := http.Get(server.URL + "/apis/company.com/") + if !assert.NoError(err) { + return + } + assert.Equal(http.StatusOK, resp.StatusCode) + + group := unversioned.APIGroup{} + assert.NoError(decodeResponse(resp, &group)) + assert.Equal(group.APIVersion, "v1") + assert.Equal(group.Kind, "APIGroup") + assert.Equal(group.Name, "company.com") + expectedVersion := unversioned.GroupVersionForDiscovery{ + GroupVersion: "company.com/" + version, + Version: version, + } + + assert.Equal(group.Versions, []unversioned.GroupVersionForDiscovery{expectedVersion}) + assert.Equal(group.PreferredVersion, expectedVersion) + + resp, err = http.Get(server.URL + "/apis/company.com/" + version) + if !assert.NoError(err) { + return + } + assert.Equal(http.StatusOK, resp.StatusCode) + + resourceList := unversioned.APIResourceList{} + assert.NoError(decodeResponse(resp, &resourceList)) + assert.Equal(resourceList.APIVersion, "v1") + assert.Equal(resourceList.Kind, "APIResourceList") + assert.Equal(resourceList.GroupVersion, "company.com/"+version) + assert.Equal(resourceList.APIResources, []unversioned.APIResource{ + { + Name: "foos", + Namespaced: true, + Kind: "Foo", + }, + }) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/ports/ports.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/ports/ports.go index 3e36603acaec..246a1a562d3a 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/ports/ports.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/ports/ports.go @@ -18,7 +18,7 @@ package ports const ( // ProxyPort is the default port for the proxy healthz server. - // May be overriden by a flag at startup. + // May be overridden by a flag at startup. ProxyStatusPort = 10249 // KubeletPort is the default port for the kubelet server on each host machine. // May be overridden by a flag at startup. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/thirdparty_controller_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/thirdparty_controller_test.go new file mode 100644 index 000000000000..4b52e8994933 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/master/thirdparty_controller_test.go @@ -0,0 +1,177 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package master + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + expapi "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata" + "k8s.io/kubernetes/pkg/util/sets" +) + +type FakeAPIInterface struct { + removed []string + installed []*expapi.ThirdPartyResource + apis []string + t *testing.T +} + +func (f *FakeAPIInterface) RemoveThirdPartyResource(path string) error { + f.removed = append(f.removed, path) + return nil +} + +func (f *FakeAPIInterface) InstallThirdPartyResource(rsrc *expapi.ThirdPartyResource) error { + f.installed = append(f.installed, rsrc) + _, group, _ := thirdpartyresourcedata.ExtractApiGroupAndKind(rsrc) + f.apis = append(f.apis, makeThirdPartyPath(group)) + return nil +} + +func (f *FakeAPIInterface) HasThirdPartyResource(rsrc *expapi.ThirdPartyResource) (bool, error) { + if f.apis == nil { + return false, nil + } + _, group, _ := thirdpartyresourcedata.ExtractApiGroupAndKind(rsrc) + path := makeThirdPartyPath(group) + for _, api := range f.apis { + if api == path { + return true, nil + } + } + return false, nil +} + +func (f *FakeAPIInterface) ListThirdPartyResources() []string { + return f.apis +} + +func TestSyncAPIs(t *testing.T) { + resourcesNamed := func(names ...string) []expapi.ThirdPartyResource { + result := []expapi.ThirdPartyResource{} + for _, name := range names { + result = append(result, expapi.ThirdPartyResource{ObjectMeta: api.ObjectMeta{Name: name}}) + } + return result + } + + tests := []struct { + list *expapi.ThirdPartyResourceList + apis []string + expectedInstalled []string + expectedRemoved []string + name string + }{ + { + list: &expapi.ThirdPartyResourceList{ + Items: resourcesNamed("foo.example.com"), + }, + expectedInstalled: []string{"foo.example.com"}, + name: "simple add", + }, + { + list: &expapi.ThirdPartyResourceList{ + Items: resourcesNamed("foo.example.com"), + }, + apis: []string{ + "/apis/example.com", + "/apis/example.com/v1", + }, + name: "does nothing", + }, + { + list: &expapi.ThirdPartyResourceList{ + Items: resourcesNamed("foo.example.com"), + }, + apis: []string{ + "/apis/example.com", + "/apis/example.com/v1", + "/apis/example.co", + "/apis/example.co/v1", + }, + name: "deletes substring API", + expectedRemoved: []string{ + "/apis/example.co", + "/apis/example.co/v1", + }, + }, + { + list: &expapi.ThirdPartyResourceList{ + Items: resourcesNamed("foo.example.com", "foo.company.com"), + }, + apis: []string{ + "/apis/company.com", + "/apis/company.com/v1", + }, + expectedInstalled: []string{"foo.example.com"}, + name: "adds with existing", + }, + { + list: &expapi.ThirdPartyResourceList{ + Items: resourcesNamed("foo.example.com"), + }, + apis: []string{ + "/apis/company.com", + "/apis/company.com/v1", + }, + expectedInstalled: []string{"foo.example.com"}, + expectedRemoved: []string{"/apis/company.com", "/apis/company.com/v1"}, + name: "removes with existing", + }, + } + + for _, test := range tests { + fake := FakeAPIInterface{ + apis: test.apis, + t: t, + } + + cntrl := ThirdPartyController{master: &fake} + + if err := cntrl.syncResourceList(test.list); err != nil { + t.Errorf("[%s] unexpected error: %v", test.name, err) + } + if len(test.expectedInstalled) != len(fake.installed) { + t.Errorf("[%s] unexpected installed APIs: %d, expected %d (%#v)", test.name, len(fake.installed), len(test.expectedInstalled), fake.installed[0]) + continue + } else { + names := sets.String{} + for ix := range fake.installed { + names.Insert(fake.installed[ix].Name) + } + for _, name := range test.expectedInstalled { + if !names.Has(name) { + t.Errorf("[%s] missing installed API: %s", test.name, name) + } + } + } + if len(test.expectedRemoved) != len(fake.removed) { + t.Errorf("[%s] unexpected installed APIs: %d, expected %d", test.name, len(fake.removed), len(test.expectedRemoved)) + continue + } else { + names := sets.String{} + names.Insert(fake.removed...) + for _, name := range test.expectedRemoved { + if !names.Has(name) { + t.Errorf("[%s] missing removed API: %s (%s)", test.name, name, names) + } + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/doc.go new file mode 100644 index 000000000000..2486e9b74266 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package generic provides a generic object store interface and a +// generic label/field matching type. +package generic diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/matcher.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/matcher.go new file mode 100644 index 000000000000..08e2df7b4563 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/matcher.go @@ -0,0 +1,142 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" +) + +// AttrFunc returns label and field sets for List or Watch to compare against, or an error. +type AttrFunc func(obj runtime.Object) (label labels.Set, field fields.Set, err error) + +// ObjectMetaFieldsSet returns a fields set that represents the ObjectMeta. +func ObjectMetaFieldsSet(objectMeta api.ObjectMeta, hasNamespaceField bool) fields.Set { + if !hasNamespaceField { + return fields.Set{ + "metadata.name": objectMeta.Name, + } + } + return fields.Set{ + "metadata.name": objectMeta.Name, + "metadata.namespace": objectMeta.Namespace, + } +} + +// MergeFieldsSets merges a fields'set from fragment into the source. +func MergeFieldsSets(source fields.Set, fragment fields.Set) fields.Set { + for k, value := range fragment { + source[k] = value + } + return source +} + +// SelectionPredicate implements a generic predicate that can be passed to +// GenericRegistry's List or Watch methods. Implements the Matcher interface. +type SelectionPredicate struct { + Label labels.Selector + Field fields.Selector + GetAttrs AttrFunc +} + +// Matches returns true if the given object's labels and fields (as +// returned by s.GetAttrs) match s.Label and s.Field. An error is +// returned if s.GetAttrs fails. +func (s *SelectionPredicate) Matches(obj runtime.Object) (bool, error) { + if s.Label.Empty() && s.Field.Empty() { + return true, nil + } + labels, fields, err := s.GetAttrs(obj) + if err != nil { + return false, err + } + return s.Label.Matches(labels) && s.Field.Matches(fields), nil +} + +// MatchesSingle will return (name, true) if and only if s.Field matches on the object's +// name. +func (s *SelectionPredicate) MatchesSingle() (string, bool) { + // TODO: should be namespace.name + if name, ok := s.Field.RequiresExactMatch("metadata.name"); ok { + return name, true + } + return "", false +} + +// Matcher can return true if an object matches the Matcher's selection +// criteria. If it is known that the matcher will match only a single object +// then MatchesSingle should return the key of that object and true. This is an +// optimization only--Matches() should continue to work. +type Matcher interface { + // Matches should return true if obj matches this matcher's requirements. + Matches(obj runtime.Object) (matchesThisObject bool, err error) + + // If this matcher matches a single object, return the key for that + // object and true here. This will greatly increase efficiency. You + // must still implement Matches(). Note that key does NOT need to + // include the object's namespace. + MatchesSingle() (key string, matchesSingleObject bool) + + // TODO: when we start indexing objects, add something like the below: + // MatchesIndices() (indexName []string, indexValue []string) + // where indexName/indexValue are the same length. +} + +// MatcherFunc makes a matcher from the provided function. For easy definition +// of matchers for testing. Note: use SelectionPredicate above for real code! +func MatcherFunc(f func(obj runtime.Object) (bool, error)) Matcher { + return matcherFunc(f) +} + +type matcherFunc func(obj runtime.Object) (bool, error) + +// Matches calls the embedded function. +func (m matcherFunc) Matches(obj runtime.Object) (bool, error) { + return m(obj) +} + +// MatchesSingle always returns "", false-- because this is a predicate +// implementation of Matcher. +func (m matcherFunc) MatchesSingle() (string, bool) { + return "", false +} + +// MatchOnKey returns a matcher that will send only the object matching key +// through the matching function f. For testing! +// Note: use SelectionPredicate above for real code! +func MatchOnKey(key string, f func(obj runtime.Object) (bool, error)) Matcher { + return matchKey{key, f} +} + +type matchKey struct { + key string + matcherFunc +} + +// MatchesSingle always returns its key, true. +func (m matchKey) MatchesSingle() (string, bool) { + return m.key, true +} + +var ( + // Assert implementations match the interface. + _ = Matcher(matchKey{}) + _ = Matcher(&SelectionPredicate{}) + _ = Matcher(matcherFunc(nil)) +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/matcher_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/matcher_test.go new file mode 100644 index 000000000000..17c7fb3636dc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/matcher_test.go @@ -0,0 +1,130 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "errors" + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" +) + +type Ignored struct { + ID string +} + +type IgnoredList struct { + Items []Ignored +} + +func (obj *Ignored) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } +func (obj *IgnoredList) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } + +func TestSelectionPredicate(t *testing.T) { + table := map[string]struct { + labelSelector, fieldSelector string + labels labels.Set + fields fields.Set + err error + shouldMatch bool + matchSingleKey string + }{ + "A": { + labelSelector: "name=foo", + fieldSelector: "uid=12345", + labels: labels.Set{"name": "foo"}, + fields: fields.Set{"uid": "12345"}, + shouldMatch: true, + }, + "B": { + labelSelector: "name=foo", + fieldSelector: "uid=12345", + labels: labels.Set{"name": "foo"}, + fields: fields.Set{}, + shouldMatch: false, + }, + "C": { + labelSelector: "name=foo", + fieldSelector: "uid=12345", + labels: labels.Set{}, + fields: fields.Set{"uid": "12345"}, + shouldMatch: false, + }, + "D": { + fieldSelector: "metadata.name=12345", + labels: labels.Set{}, + fields: fields.Set{"metadata.name": "12345"}, + shouldMatch: true, + matchSingleKey: "12345", + }, + "error": { + labelSelector: "name=foo", + fieldSelector: "uid=12345", + err: errors.New("maybe this is a 'wrong object type' error"), + shouldMatch: false, + }, + } + + for name, item := range table { + parsedLabel, err := labels.Parse(item.labelSelector) + if err != nil { + panic(err) + } + parsedField, err := fields.ParseSelector(item.fieldSelector) + if err != nil { + panic(err) + } + sp := &SelectionPredicate{ + Label: parsedLabel, + Field: parsedField, + GetAttrs: func(runtime.Object) (label labels.Set, field fields.Set, err error) { + return item.labels, item.fields, item.err + }, + } + got, err := sp.Matches(&Ignored{}) + if e, a := item.err, err; e != a { + t.Errorf("%v: expected %v, got %v", name, e, a) + continue + } + if e, a := item.shouldMatch, got; e != a { + t.Errorf("%v: expected %v, got %v", name, e, a) + } + if key := item.matchSingleKey; key != "" { + got, ok := sp.MatchesSingle() + if !ok { + t.Errorf("%v: expected single match", name) + } + if e, a := key, got; e != a { + t.Errorf("%v: expected %v, got %v", name, e, a) + } + } + } +} + +func TestSingleMatch(t *testing.T) { + m := MatchOnKey("pod-name-here", func(obj runtime.Object) (bool, error) { return true, nil }) + got, ok := m.MatchesSingle() + if !ok { + t.Errorf("Expected MatchesSingle to return true") + } + if e, a := "pod-name-here", got; e != a { + t.Errorf("Expected %#v, got %#v", e, a) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/options.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/options.go new file mode 100644 index 000000000000..eea52c995b13 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/options.go @@ -0,0 +1,28 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + pkgstorage "k8s.io/kubernetes/pkg/storage" +) + +// RESTOptions is set of configuration options to generic registries. +type RESTOptions struct { + Storage pkgstorage.Interface + Decorator StorageDecorator + DeleteCollectionWorkers int +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/registry/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/registry/doc.go new file mode 100644 index 000000000000..ee972408dce2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/registry/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package etcd has a generic implementation of a registry that +// stores things in etcd. +package registry diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/registry/storage_factory.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/registry/storage_factory.go new file mode 100644 index 000000000000..f1c265113bf9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/registry/storage_factory.go @@ -0,0 +1,37 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" +) + +// Creates a cacher on top of the given 'storageInterface'. +func StorageWithCacher( + storageInterface storage.Interface, + capacity int, + objectType runtime.Object, + resourcePrefix string, + scopeStrategy rest.NamespaceScopedStrategy, + newListFunc func() runtime.Object) storage.Interface { + return storage.NewCacher( + storageInterface, capacity, etcdstorage.APIObjectVersioner{}, + objectType, resourcePrefix, scopeStrategy, newListFunc) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/registry/store.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/registry/store.go new file mode 100644 index 000000000000..e5f120124d83 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/registry/store.go @@ -0,0 +1,880 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "fmt" + "reflect" + "strings" + "sync" + "time" + + "k8s.io/kubernetes/pkg/api" + kubeerr "k8s.io/kubernetes/pkg/api/errors" + storeerr "k8s.io/kubernetes/pkg/api/errors/storage" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/watch" + + "github.com/golang/glog" +) + +// EnableGarbageCollector affects the handling of Update and Delete requests. It +// must be synced with the corresponding flag in kube-controller-manager. +var EnableGarbageCollector bool + +// Store implements generic.Registry. +// It's intended to be embeddable, so that you can implement any +// non-generic functions if needed. +// You must supply a value for every field below before use; these are +// left public as it's meant to be overridable if need be. +// This object is intended to be copyable so that it can be used in +// different ways but share the same underlying behavior. +// +// The intended use of this type is embedding within a Kind specific +// RESTStorage implementation. This type provides CRUD semantics on +// a Kubelike resource, handling details like conflict detection with +// ResourceVersion and semantics. The RESTCreateStrategy and +// RESTUpdateStrategy are generic across all backends, and encapsulate +// logic specific to the API. +// +// TODO: make the default exposed methods exactly match a generic RESTStorage +type Store struct { + // Called to make a new object, should return e.g., &api.Pod{} + NewFunc func() runtime.Object + + // Called to make a new listing object, should return e.g., &api.PodList{} + NewListFunc func() runtime.Object + + // Used for error reporting + QualifiedResource unversioned.GroupResource + + // Used for listing/watching; should not include trailing "/" + KeyRootFunc func(ctx api.Context) string + + // Called for Create/Update/Get/Delete. Note that 'namespace' can be + // gotten from ctx. + KeyFunc func(ctx api.Context, name string) (string, error) + + // Called to get the name of an object + ObjectNameFunc func(obj runtime.Object) (string, error) + + // Return the TTL objects should be persisted with. Update is true if this + // is an operation against an existing object. Existing is the current TTL + // or the default for this operation. + TTLFunc func(obj runtime.Object, existing uint64, update bool) (uint64, error) + + // Returns a matcher corresponding to the provided labels and fields. + PredicateFunc func(label labels.Selector, field fields.Selector) generic.Matcher + + // DeleteCollectionWorkers is the maximum number of workers in a single + // DeleteCollection call. + DeleteCollectionWorkers int + + // Called on all objects returned from the underlying store, after + // the exit hooks are invoked. Decorators are intended for integrations + // that are above storage and should only be used for specific cases where + // storage of the value is not appropriate, since they cannot + // be watched. + Decorator rest.ObjectFunc + // Allows extended behavior during creation, required + CreateStrategy rest.RESTCreateStrategy + // On create of an object, attempt to run a further operation. + AfterCreate rest.ObjectFunc + // Allows extended behavior during updates, required + UpdateStrategy rest.RESTUpdateStrategy + // On update of an object, attempt to run a further operation. + AfterUpdate rest.ObjectFunc + // Allows extended behavior during updates, optional + DeleteStrategy rest.RESTDeleteStrategy + // On deletion of an object, attempt to run a further operation. + AfterDelete rest.ObjectFunc + // If true, return the object that was deleted. Otherwise, return a generic + // success status response. + ReturnDeletedObject bool + // Allows extended behavior during export, optional + ExportStrategy rest.RESTExportStrategy + + // Used for all storage access functions + Storage storage.Interface +} + +const OptimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" + +// NamespaceKeyRootFunc is the default function for constructing storage paths to resource directories enforcing namespace rules. +func NamespaceKeyRootFunc(ctx api.Context, prefix string) string { + key := prefix + ns, ok := api.NamespaceFrom(ctx) + if ok && len(ns) > 0 { + key = key + "/" + ns + } + return key +} + +// NamespaceKeyFunc is the default function for constructing storage paths to a resource relative to prefix enforcing namespace rules. +// If no namespace is on context, it errors. +func NamespaceKeyFunc(ctx api.Context, prefix string, name string) (string, error) { + key := NamespaceKeyRootFunc(ctx, prefix) + ns, ok := api.NamespaceFrom(ctx) + if !ok || len(ns) == 0 { + return "", kubeerr.NewBadRequest("Namespace parameter required.") + } + if len(name) == 0 { + return "", kubeerr.NewBadRequest("Name parameter required.") + } + if msgs := validation.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", kubeerr.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) + } + key = key + "/" + name + return key, nil +} + +// NoNamespaceKeyFunc is the default function for constructing storage paths to a resource relative to prefix without a namespace +func NoNamespaceKeyFunc(ctx api.Context, prefix string, name string) (string, error) { + if len(name) == 0 { + return "", kubeerr.NewBadRequest("Name parameter required.") + } + if msgs := validation.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", kubeerr.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) + } + key := prefix + "/" + name + return key, nil +} + +// New implements RESTStorage +func (e *Store) New() runtime.Object { + return e.NewFunc() +} + +// NewList implements RESTLister +func (e *Store) NewList() runtime.Object { + return e.NewListFunc() +} + +// List returns a list of items matching labels and field +func (e *Store) List(ctx api.Context, options *api.ListOptions) (runtime.Object, error) { + label := labels.Everything() + if options != nil && options.LabelSelector != nil { + label = options.LabelSelector + } + field := fields.Everything() + if options != nil && options.FieldSelector != nil { + field = options.FieldSelector + } + return e.ListPredicate(ctx, e.PredicateFunc(label, field), options) +} + +// ListPredicate returns a list of all the items matching m. +func (e *Store) ListPredicate(ctx api.Context, m generic.Matcher, options *api.ListOptions) (runtime.Object, error) { + list := e.NewListFunc() + filterFunc := e.filterAndDecorateFunction(m) + if name, ok := m.MatchesSingle(); ok { + if key, err := e.KeyFunc(ctx, name); err == nil { + err := e.Storage.GetToList(ctx, key, filterFunc, list) + return list, storeerr.InterpretListError(err, e.QualifiedResource) + } + // if we cannot extract a key based on the current context, the optimization is skipped + } + + if options == nil { + options = &api.ListOptions{ResourceVersion: "0"} + } + err := e.Storage.List(ctx, e.KeyRootFunc(ctx), options.ResourceVersion, filterFunc, list) + return list, storeerr.InterpretListError(err, e.QualifiedResource) +} + +// Create inserts a new item according to the unique key from the object. +func (e *Store) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) { + if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { + return nil, err + } + name, err := e.ObjectNameFunc(obj) + if err != nil { + return nil, err + } + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, err + } + ttl, err := e.calculateTTL(obj, 0, false) + if err != nil { + return nil, err + } + out := e.NewFunc() + if err := e.Storage.Create(ctx, key, obj, out, ttl); err != nil { + err = storeerr.InterpretCreateError(err, e.QualifiedResource, name) + err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj) + return nil, err + } + if e.AfterCreate != nil { + if err := e.AfterCreate(out); err != nil { + return nil, err + } + } + if e.Decorator != nil { + if err := e.Decorator(obj); err != nil { + return nil, err + } + } + return out, nil +} + +// shouldDelete checks if a Update is removing all the object's finalizers. If so, +// it further checks if the object's DeletionGracePeriodSeconds is 0. If so, it +// returns true. +func (e *Store) shouldDelete(ctx api.Context, key string, obj, existing runtime.Object) bool { + if !EnableGarbageCollector { + return false + } + newMeta, err := api.ObjectMetaFor(obj) + if err != nil { + utilruntime.HandleError(err) + return false + } + oldMeta, err := api.ObjectMetaFor(existing) + if err != nil { + utilruntime.HandleError(err) + return false + } + return len(newMeta.Finalizers) == 0 && oldMeta.DeletionGracePeriodSeconds != nil && *oldMeta.DeletionGracePeriodSeconds == 0 +} + +func (e *Store) deleteForEmptyFinalizers(ctx api.Context, name, key string, obj runtime.Object, preconditions *storage.Preconditions) (runtime.Object, bool, error) { + out := e.NewFunc() + glog.V(6).Infof("going to delete %s from regitry, triggered by update", name) + if err := e.Storage.Delete(ctx, key, out, preconditions); err != nil { + // Deletion is racy, i.e., there could be multiple update + // requests to remove all finalizers from the object, so we + // ignore the NotFound error. + if storage.IsNotFound(err) { + _, err := e.finalizeDelete(obj, true) + // clients are expecting an updated object if a PUT succeeded, + // but finalizeDelete returns a unversioned.Status, so return + // the object in the request instead. + return obj, false, err + } + return nil, false, storeerr.InterpretDeleteError(err, e.QualifiedResource, name) + } + _, err := e.finalizeDelete(out, true) + // clients are expecting an updated object if a PUT succeeded, but + // finalizeDelete returns a unversioned.Status, so return the object in + // the request instead. + return obj, false, err +} + +// Update performs an atomic update and set of the object. Returns the result of the update +// or an error. If the registry allows create-on-update, the create flow will be executed. +// A bool is returned along with the object and any errors, to indicate object creation. +func (e *Store) Update(ctx api.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, false, err + } + + var ( + creatingObj runtime.Object + creating = false + ) + + storagePreconditions := &storage.Preconditions{} + if preconditions := objInfo.Preconditions(); preconditions != nil { + storagePreconditions.UID = preconditions.UID + } + + out := e.NewFunc() + // deleteObj is only used in case a deletion is carried out + var deleteObj runtime.Object + err = e.Storage.GuaranteedUpdate(ctx, key, out, true, storagePreconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { + // Given the existing object, get the new object + obj, err := objInfo.UpdatedObject(ctx, existing) + if err != nil { + return nil, nil, err + } + + // If AllowUnconditionalUpdate() is true and the object specified by the user does not have a resource version, + // then we populate it with the latest version. + // Else, we check that the version specified by the user matches the version of latest storage object. + resourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj) + if err != nil { + return nil, nil, err + } + doUnconditionalUpdate := resourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate() + + version, err := e.Storage.Versioner().ObjectResourceVersion(existing) + if err != nil { + return nil, nil, err + } + if version == 0 { + if !e.UpdateStrategy.AllowCreateOnUpdate() { + return nil, nil, kubeerr.NewNotFound(e.QualifiedResource, name) + } + creating = true + creatingObj = obj + if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { + return nil, nil, err + } + ttl, err := e.calculateTTL(obj, 0, false) + if err != nil { + return nil, nil, err + } + return obj, &ttl, nil + } + + creating = false + creatingObj = nil + if doUnconditionalUpdate { + // Update the object's resource version to match the latest storage object's resource version. + err = e.Storage.Versioner().UpdateObject(obj, res.ResourceVersion) + if err != nil { + return nil, nil, err + } + } else { + // Check if the object's resource version matches the latest resource version. + newVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj) + if err != nil { + return nil, nil, err + } + if newVersion == 0 { + // TODO: The Invalid error should has a field for Resource. + // After that field is added, we should fill the Resource and + // leave the Kind field empty. See the discussion in #18526. + qualifiedKind := unversioned.GroupKind{Group: e.QualifiedResource.Group, Kind: e.QualifiedResource.Resource} + fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), newVersion, "must be specified for an update")} + return nil, nil, kubeerr.NewInvalid(qualifiedKind, name, fieldErrList) + } + if newVersion != version { + return nil, nil, kubeerr.NewConflict(e.QualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg)) + } + } + if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil { + return nil, nil, err + } + delete := e.shouldDelete(ctx, key, obj, existing) + if delete { + deleteObj = obj + return nil, nil, errEmptiedFinalizers + } + ttl, err := e.calculateTTL(obj, res.TTL, true) + if err != nil { + return nil, nil, err + } + if int64(ttl) != res.TTL { + return obj, &ttl, nil + } + return obj, nil, nil + }) + + if err != nil { + // delete the object + if err == errEmptiedFinalizers { + return e.deleteForEmptyFinalizers(ctx, name, key, deleteObj, storagePreconditions) + } + if creating { + err = storeerr.InterpretCreateError(err, e.QualifiedResource, name) + err = rest.CheckGeneratedNameError(e.CreateStrategy, err, creatingObj) + } else { + err = storeerr.InterpretUpdateError(err, e.QualifiedResource, name) + } + return nil, false, err + } + if creating { + if e.AfterCreate != nil { + if err := e.AfterCreate(out); err != nil { + return nil, false, err + } + } + } else { + if e.AfterUpdate != nil { + if err := e.AfterUpdate(out); err != nil { + return nil, false, err + } + } + } + if e.Decorator != nil { + if err := e.Decorator(out); err != nil { + return nil, false, err + } + } + return out, creating, nil +} + +// Get retrieves the item from storage. +func (e *Store) Get(ctx api.Context, name string) (runtime.Object, error) { + obj := e.NewFunc() + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, err + } + if err := e.Storage.Get(ctx, key, obj, false); err != nil { + return nil, storeerr.InterpretGetError(err, e.QualifiedResource, name) + } + if e.Decorator != nil { + if err := e.Decorator(obj); err != nil { + return nil, err + } + } + return obj, nil +} + +var ( + errAlreadyDeleting = fmt.Errorf("abort delete") + errDeleteNow = fmt.Errorf("delete now") + errEmptiedFinalizers = fmt.Errorf("emptied finalizers") +) + +// return if we need to update the finalizers of the object, and the desired list of finalizers +func shouldUpdateFinalizers(accessor meta.Object, options *api.DeleteOptions) (shouldUpdate bool, newFinalizers []string) { + if options == nil || options.OrphanDependents == nil { + return false, accessor.GetFinalizers() + } + shouldOrphan := *options.OrphanDependents + alreadyOrphan := false + finalizers := accessor.GetFinalizers() + newFinalizers = make([]string, 0, len(finalizers)) + for _, f := range finalizers { + if f == api.FinalizerOrphan { + alreadyOrphan = true + if !shouldOrphan { + continue + } + } + newFinalizers = append(newFinalizers, f) + } + if shouldOrphan && !alreadyOrphan { + newFinalizers = append(newFinalizers, api.FinalizerOrphan) + } + shouldUpdate = shouldOrphan != alreadyOrphan + return shouldUpdate, newFinalizers +} + +// markAsDeleting sets the obj's DeletionGracePeriodSeconds to 0, and sets the +// DeletionTimestamp to "now". Finalizers are watching for such updates and will +// finalize the object if their IDs are present in the object's Finalizers list. +func markAsDeleting(obj runtime.Object) (err error) { + objectMeta, kerr := api.ObjectMetaFor(obj) + if kerr != nil { + return kerr + } + now := unversioned.NewTime(time.Now()) + objectMeta.DeletionTimestamp = &now + var zero int64 = 0 + objectMeta.DeletionGracePeriodSeconds = &zero + return nil +} + +// this functions need to be kept synced with updateForGracefulDeletionAndFinalizers. +func (e *Store) updateForGracefulDeletion(ctx api.Context, name, key string, options *api.DeleteOptions, preconditions storage.Preconditions, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) { + lastGraceful := int64(0) + out = e.NewFunc() + err = e.Storage.GuaranteedUpdate( + ctx, key, out, false, &preconditions, + storage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) { + graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, existing, options) + if err != nil { + return nil, err + } + if pendingGraceful { + return nil, errAlreadyDeleting + } + if !graceful { + return nil, errDeleteNow + } + lastGraceful = *options.GracePeriodSeconds + lastExisting = existing + return existing, nil + }), + ) + switch err { + case nil: + if lastGraceful > 0 { + return nil, false, false, out, lastExisting + } + // If we are here, the registry supports grace period mechanism and + // we are intentionally delete gracelessly. In this case, we may + // enter a race with other k8s components. If other component wins + // the race, the object will not be found, and we should tolerate + // the NotFound error. See + // https://github.com/kubernetes/kubernetes/issues/19403 for + // details. + return nil, true, true, out, lastExisting + case errDeleteNow: + // we've updated the object to have a zero grace period, or it's already at 0, so + // we should fall through and truly delete the object. + return nil, false, true, out, lastExisting + case errAlreadyDeleting: + out, err = e.finalizeDelete(in, true) + return err, false, false, out, lastExisting + default: + return storeerr.InterpretUpdateError(err, e.QualifiedResource, name), false, false, out, lastExisting + } +} + +// this functions need to be kept synced with updateForGracefulDeletion. +func (e *Store) updateForGracefulDeletionAndFinalizers(ctx api.Context, name, key string, options *api.DeleteOptions, preconditions storage.Preconditions, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) { + lastGraceful := int64(0) + var pendingFinalizers bool + out = e.NewFunc() + err = e.Storage.GuaranteedUpdate( + ctx, key, out, false, &preconditions, + storage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) { + graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, existing, options) + if err != nil { + return nil, err + } + if pendingGraceful { + return nil, errAlreadyDeleting + } + + // Add/remove the orphan finalizer as the options dictates. + // Note that this occurs after checking pendingGraceufl, so + // finalizers cannot be updated via DeleteOptions if deletion has + // started. + existingAccessor, err := meta.Accessor(existing) + if err != nil { + return nil, err + } + shouldUpdate, newFinalizers := shouldUpdateFinalizers(existingAccessor, options) + if shouldUpdate { + existingAccessor.SetFinalizers(newFinalizers) + } + + if !graceful { + // set the DeleteGracePeriods to 0 if the object has pendingFinalizers but not supporting graceful deletion + pendingFinalizers = len(existingAccessor.GetFinalizers()) != 0 + if pendingFinalizers { + glog.V(6).Infof("update the DeletionTimestamp to \"now\" and GracePeriodSeconds to 0 for object %s, because it has pending finalizers", name) + err = markAsDeleting(existing) + if err != nil { + return nil, err + } + return existing, nil + } + return nil, errDeleteNow + } + lastGraceful = *options.GracePeriodSeconds + lastExisting = existing + return existing, nil + }), + ) + switch err { + case nil: + // If there are pending finalizers, we never delete the object immediately. + if pendingFinalizers { + return nil, false, false, out, lastExisting + } + if lastGraceful > 0 { + return nil, false, false, out, lastExisting + } + // If we are here, the registry supports grace period mechanism and + // we are intentionally delete gracelessly. In this case, we may + // enter a race with other k8s components. If other component wins + // the race, the object will not be found, and we should tolerate + // the NotFound error. See + // https://github.com/kubernetes/kubernetes/issues/19403 for + // details. + return nil, true, true, out, lastExisting + case errDeleteNow: + // we've updated the object to have a zero grace period, or it's already at 0, so + // we should fall through and truly delete the object. + return nil, false, true, out, lastExisting + case errAlreadyDeleting: + out, err = e.finalizeDelete(in, true) + return err, false, false, out, lastExisting + default: + return storeerr.InterpretUpdateError(err, e.QualifiedResource, name), false, false, out, lastExisting + } +} + +// Delete removes the item from storage. +func (e *Store) Delete(ctx api.Context, name string, options *api.DeleteOptions) (runtime.Object, error) { + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, err + } + + obj := e.NewFunc() + if err := e.Storage.Get(ctx, key, obj, false); err != nil { + return nil, storeerr.InterpretDeleteError(err, e.QualifiedResource, name) + } + // support older consumers of delete by treating "nil" as delete immediately + if options == nil { + options = api.NewDeleteOptions(0) + } + var preconditions storage.Preconditions + if options.Preconditions != nil { + preconditions.UID = options.Preconditions.UID + } + graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, obj, options) + if err != nil { + return nil, err + } + // this means finalizers cannot be updated via DeleteOptions if a deletion is already pending + if pendingGraceful { + return e.finalizeDelete(obj, false) + } + // check if obj has pending finalizers + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, kubeerr.NewInternalError(err) + } + pendingFinalizers := len(accessor.GetFinalizers()) != 0 + var ignoreNotFound bool + var deleteImmediately bool = true + var lastExisting, out runtime.Object + if !EnableGarbageCollector { + // TODO: remove the check on graceful, because we support no-op updates now. + if graceful { + err, ignoreNotFound, deleteImmediately, out, lastExisting = e.updateForGracefulDeletion(ctx, name, key, options, preconditions, obj) + } + } else { + shouldUpdateFinalizers, _ := shouldUpdateFinalizers(accessor, options) + // TODO: remove the check, because we support no-op updates now. + if graceful || pendingFinalizers || shouldUpdateFinalizers { + err, ignoreNotFound, deleteImmediately, out, lastExisting = e.updateForGracefulDeletionAndFinalizers(ctx, name, key, options, preconditions, obj) + } + } + // !deleteImmediately covers all cases where err != nil. We keep both to be future-proof. + if !deleteImmediately || err != nil { + return out, err + } + + // delete immediately, or no graceful deletion supported + glog.V(6).Infof("going to delete %s from regitry: ", name) + out = e.NewFunc() + if err := e.Storage.Delete(ctx, key, out, &preconditions); err != nil { + // Please refer to the place where we set ignoreNotFound for the reason + // why we ignore the NotFound error . + if storage.IsNotFound(err) && ignoreNotFound && lastExisting != nil { + // The lastExisting object may not be the last state of the object + // before its deletion, but it's the best approximation. + return e.finalizeDelete(lastExisting, true) + } + return nil, storeerr.InterpretDeleteError(err, e.QualifiedResource, name) + } + return e.finalizeDelete(out, true) +} + +// DeleteCollection remove all items returned by List with a given ListOptions from storage. +// +// DeleteCollection is currently NOT atomic. It can happen that only subset of objects +// will be deleted from storage, and then an error will be returned. +// In case of success, the list of deleted objects will be returned. +// +// TODO: Currently, there is no easy way to remove 'directory' entry from storage (if we +// are removing all objects of a given type) with the current API (it's technically +// possibly with storage API, but watch is not delivered correctly then). +// It will be possible to fix it with v3 etcd API. +func (e *Store) DeleteCollection(ctx api.Context, options *api.DeleteOptions, listOptions *api.ListOptions) (runtime.Object, error) { + listObj, err := e.List(ctx, listOptions) + if err != nil { + return nil, err + } + items, err := meta.ExtractList(listObj) + if err != nil { + return nil, err + } + // Spawn a number of goroutines, so that we can issue requests to storage + // in parallel to speed up deletion. + // TODO: Make this proportional to the number of items to delete, up to + // DeleteCollectionWorkers (it doesn't make much sense to spawn 16 + // workers to delete 10 items). + workersNumber := e.DeleteCollectionWorkers + if workersNumber < 1 { + workersNumber = 1 + } + wg := sync.WaitGroup{} + toProcess := make(chan int, 2*workersNumber) + errs := make(chan error, workersNumber+1) + + go func() { + defer utilruntime.HandleCrash(func(panicReason interface{}) { + errs <- fmt.Errorf("DeleteCollection distributor panicked: %v", panicReason) + }) + for i := 0; i < len(items); i++ { + toProcess <- i + } + close(toProcess) + }() + + wg.Add(workersNumber) + for i := 0; i < workersNumber; i++ { + go func() { + // panics don't cross goroutine boundaries + defer utilruntime.HandleCrash(func(panicReason interface{}) { + errs <- fmt.Errorf("DeleteCollection goroutine panicked: %v", panicReason) + }) + defer wg.Done() + + for { + index, ok := <-toProcess + if !ok { + return + } + accessor, err := meta.Accessor(items[index]) + if err != nil { + errs <- err + return + } + if _, err := e.Delete(ctx, accessor.GetName(), options); err != nil && !kubeerr.IsNotFound(err) { + glog.V(4).Infof("Delete %s in DeleteCollection failed: %v", accessor.GetName(), err) + errs <- err + return + } + } + }() + } + wg.Wait() + select { + case err := <-errs: + return nil, err + default: + return listObj, nil + } +} + +func (e *Store) finalizeDelete(obj runtime.Object, runHooks bool) (runtime.Object, error) { + if runHooks && e.AfterDelete != nil { + if err := e.AfterDelete(obj); err != nil { + return nil, err + } + } + if e.ReturnDeletedObject { + if e.Decorator != nil { + if err := e.Decorator(obj); err != nil { + return nil, err + } + } + return obj, nil + } + return &unversioned.Status{Status: unversioned.StatusSuccess}, nil +} + +// Watch makes a matcher for the given label and field, and calls +// WatchPredicate. If possible, you should customize PredicateFunc to produre a +// matcher that matches by key. generic.SelectionPredicate does this for you +// automatically. +func (e *Store) Watch(ctx api.Context, options *api.ListOptions) (watch.Interface, error) { + label := labels.Everything() + if options != nil && options.LabelSelector != nil { + label = options.LabelSelector + } + field := fields.Everything() + if options != nil && options.FieldSelector != nil { + field = options.FieldSelector + } + resourceVersion := "" + if options != nil { + resourceVersion = options.ResourceVersion + } + return e.WatchPredicate(ctx, e.PredicateFunc(label, field), resourceVersion) +} + +// WatchPredicate starts a watch for the items that m matches. +func (e *Store) WatchPredicate(ctx api.Context, m generic.Matcher, resourceVersion string) (watch.Interface, error) { + filterFunc := e.filterAndDecorateFunction(m) + + if name, ok := m.MatchesSingle(); ok { + if key, err := e.KeyFunc(ctx, name); err == nil { + if err != nil { + return nil, err + } + return e.Storage.Watch(ctx, key, resourceVersion, filterFunc) + } + // if we cannot extract a key based on the current context, the optimization is skipped + } + + return e.Storage.WatchList(ctx, e.KeyRootFunc(ctx), resourceVersion, filterFunc) +} + +func (e *Store) filterAndDecorateFunction(m generic.Matcher) func(runtime.Object) bool { + return func(obj runtime.Object) bool { + matches, err := m.Matches(obj) + if err != nil { + glog.Errorf("unable to match watch: %v", err) + return false + } + if matches && e.Decorator != nil { + if err := e.Decorator(obj); err != nil { + glog.Errorf("unable to decorate watch: %v", err) + return false + } + } + return matches + } +} + +// calculateTTL is a helper for retrieving the updated TTL for an object or returning an error +// if the TTL cannot be calculated. The defaultTTL is changed to 1 if less than zero. Zero means +// no TTL, not expire immediately. +func (e *Store) calculateTTL(obj runtime.Object, defaultTTL int64, update bool) (ttl uint64, err error) { + // TODO: validate this is assertion is still valid. + // etcd may return a negative TTL for a node if the expiration has not occurred due + // to server lag - we will ensure that the value is at least set. + if defaultTTL < 0 { + defaultTTL = 1 + } + ttl = uint64(defaultTTL) + if e.TTLFunc != nil { + ttl, err = e.TTLFunc(obj, ttl, update) + } + return ttl, err +} + +func exportObjectMeta(accessor meta.Object, exact bool) { + accessor.SetUID("") + if !exact { + accessor.SetNamespace("") + } + accessor.SetCreationTimestamp(unversioned.Time{}) + accessor.SetDeletionTimestamp(nil) + accessor.SetResourceVersion("") + accessor.SetSelfLink("") + if len(accessor.GetGenerateName()) > 0 && !exact { + accessor.SetName("") + } +} + +// Implements the rest.Exporter interface +func (e *Store) Export(ctx api.Context, name string, opts unversioned.ExportOptions) (runtime.Object, error) { + obj, err := e.Get(ctx, name) + if err != nil { + return nil, err + } + if accessor, err := meta.Accessor(obj); err == nil { + exportObjectMeta(accessor, opts.Exact) + } else { + glog.V(4).Infof("Object of type %v does not have ObjectMeta: %v", reflect.TypeOf(obj), err) + } + + if e.ExportStrategy != nil { + if err = e.ExportStrategy.Export(obj, opts.Exact); err != nil { + return nil, err + } + } else { + e.CreateStrategy.PrepareForCreate(obj) + } + return obj, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/registry/store_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/registry/store_test.go new file mode 100644 index 000000000000..e26bec885d6a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/registry/store_test.go @@ -0,0 +1,977 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "fmt" + "path" + "reflect" + "strconv" + "testing" + + "sync" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/runtime" + etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" + "k8s.io/kubernetes/pkg/storage/etcd/etcdtest" + etcdtesting "k8s.io/kubernetes/pkg/storage/etcd/testing" + storagetesting "k8s.io/kubernetes/pkg/storage/testing" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +type testRESTStrategy struct { + runtime.ObjectTyper + api.NameGenerator + namespaceScoped bool + allowCreateOnUpdate bool + allowUnconditionalUpdate bool +} + +func (t *testRESTStrategy) NamespaceScoped() bool { return t.namespaceScoped } +func (t *testRESTStrategy) AllowCreateOnUpdate() bool { return t.allowCreateOnUpdate } +func (t *testRESTStrategy) AllowUnconditionalUpdate() bool { return t.allowUnconditionalUpdate } + +func (t *testRESTStrategy) PrepareForCreate(obj runtime.Object) { + metaObj, err := meta.Accessor(obj) + if err != nil { + panic(err.Error()) + } + labels := metaObj.GetLabels() + if labels == nil { + labels = map[string]string{} + } + labels["prepare_create"] = "true" + metaObj.SetLabels(labels) +} + +func (t *testRESTStrategy) PrepareForUpdate(obj, old runtime.Object) {} +func (t *testRESTStrategy) Validate(ctx api.Context, obj runtime.Object) field.ErrorList { + return nil +} +func (t *testRESTStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList { + return nil +} +func (t *testRESTStrategy) Canonicalize(obj runtime.Object) {} + +func hasCreated(t *testing.T, pod *api.Pod) func(runtime.Object) bool { + return func(obj runtime.Object) bool { + actualPod := obj.(*api.Pod) + if !api.Semantic.DeepDerivative(pod.Status, actualPod.Status) { + t.Errorf("not a deep derivative %#v", actualPod) + return false + } + return api.HasObjectMetaSystemFieldValues(&actualPod.ObjectMeta) + } +} + +func NewTestGenericStoreRegistry(t *testing.T) (*etcdtesting.EtcdTestServer, *Store) { + podPrefix := "/pods" + server := etcdtesting.NewEtcdTestClientServer(t) + s := etcdstorage.NewEtcdStorage(server.Client, testapi.Default.StorageCodec(), etcdtest.PathPrefix(), false, etcdtest.DeserializationCacheSize) + strategy := &testRESTStrategy{api.Scheme, api.SimpleNameGenerator, true, false, true} + + return server, &Store{ + NewFunc: func() runtime.Object { return &api.Pod{} }, + NewListFunc: func() runtime.Object { return &api.PodList{} }, + QualifiedResource: api.Resource("pods"), + CreateStrategy: strategy, + UpdateStrategy: strategy, + DeleteStrategy: strategy, + KeyRootFunc: func(ctx api.Context) string { + return podPrefix + }, + KeyFunc: func(ctx api.Context, id string) (string, error) { + if _, ok := api.NamespaceFrom(ctx); !ok { + return "", fmt.Errorf("namespace is required") + } + return path.Join(podPrefix, id), nil + }, + ObjectNameFunc: func(obj runtime.Object) (string, error) { return obj.(*api.Pod).Name, nil }, + PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher { + return &generic.SelectionPredicate{ + Label: label, + Field: field, + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + pod, ok := obj.(*api.Pod) + if !ok { + return nil, nil, fmt.Errorf("not a pod") + } + return labels.Set(pod.ObjectMeta.Labels), generic.ObjectMetaFieldsSet(pod.ObjectMeta, true), nil + }, + } + }, + Storage: s, + } +} + +// setMatcher is a matcher that matches any pod with id in the set. +// Makes testing simpler. +type setMatcher struct { + sets.String +} + +func (sm setMatcher) Matches(obj runtime.Object) (bool, error) { + pod, ok := obj.(*api.Pod) + if !ok { + return false, fmt.Errorf("wrong object") + } + return sm.Has(pod.Name), nil +} + +func (sm setMatcher) MatchesSingle() (string, bool) { + if sm.Len() == 1 { + // Since pod name is its key, we can optimize this case. + return sm.List()[0], true + } + return "", false +} + +// everythingMatcher matches everything +type everythingMatcher struct{} + +func (everythingMatcher) Matches(obj runtime.Object) (bool, error) { + return true, nil +} + +func (everythingMatcher) MatchesSingle() (string, bool) { + return "", false +} + +func TestStoreList(t *testing.T) { + podA := &api.Pod{ + ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "bar"}, + Spec: api.PodSpec{NodeName: "machine"}, + } + podB := &api.Pod{ + ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "foo"}, + Spec: api.PodSpec{NodeName: "machine"}, + } + + testContext := api.WithNamespace(api.NewContext(), "test") + noNamespaceContext := api.NewContext() + + table := map[string]struct { + in *api.PodList + m generic.Matcher + out runtime.Object + context api.Context + }{ + "notFound": { + in: nil, + m: everythingMatcher{}, + out: &api.PodList{Items: []api.Pod{}}, + }, + "normal": { + in: &api.PodList{Items: []api.Pod{*podA, *podB}}, + m: everythingMatcher{}, + out: &api.PodList{Items: []api.Pod{*podA, *podB}}, + }, + "normalFiltered": { + in: &api.PodList{Items: []api.Pod{*podA, *podB}}, + m: setMatcher{sets.NewString("foo")}, + out: &api.PodList{Items: []api.Pod{*podB}}, + }, + "normalFilteredNoNamespace": { + in: &api.PodList{Items: []api.Pod{*podA, *podB}}, + m: setMatcher{sets.NewString("foo")}, + out: &api.PodList{Items: []api.Pod{*podB}}, + context: noNamespaceContext, + }, + "normalFilteredMatchMultiple": { + in: &api.PodList{Items: []api.Pod{*podA, *podB}}, + m: setMatcher{sets.NewString("foo", "makeMatchSingleReturnFalse")}, + out: &api.PodList{Items: []api.Pod{*podB}}, + }, + } + + for name, item := range table { + ctx := testContext + if item.context != nil { + ctx = item.context + } + server, registry := NewTestGenericStoreRegistry(t) + + if item.in != nil { + if err := storagetesting.CreateList("/pods", registry.Storage, item.in); err != nil { + t.Errorf("Unexpected error %v", err) + } + } + + list, err := registry.ListPredicate(ctx, item.m, nil) + if err != nil { + t.Errorf("Unexpected error %v", err) + continue + } + + // DeepDerivative e,a is needed here b/c the storage layer sets ResourceVersion + if e, a := item.out, list; !api.Semantic.DeepDerivative(e, a) { + t.Errorf("%v: Expected %#v, got %#v", name, e, a) + } + server.Terminate(t) + } +} + +func TestStoreCreate(t *testing.T) { + podA := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test"}, + Spec: api.PodSpec{NodeName: "machine"}, + } + podB := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test"}, + Spec: api.PodSpec{NodeName: "machine2"}, + } + + testContext := api.WithNamespace(api.NewContext(), "test") + server, registry := NewTestGenericStoreRegistry(t) + defer server.Terminate(t) + + // create the object + objA, err := registry.Create(testContext, podA) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // get the object + checkobj, err := registry.Get(testContext, podA.Name) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // verify objects are equal + if e, a := objA, checkobj; !reflect.DeepEqual(e, a) { + t.Errorf("Expected %#v, got %#v", e, a) + } + + // now try to create the second pod + _, err = registry.Create(testContext, podB) + if !errors.IsAlreadyExists(err) { + t.Errorf("Unexpected error: %v", err) + } +} + +func updateAndVerify(t *testing.T, ctx api.Context, registry *Store, pod *api.Pod) bool { + obj, _, err := registry.Update(ctx, pod.Name, rest.DefaultUpdatedObjectInfo(pod, api.Scheme)) + if err != nil { + t.Errorf("Unexpected error: %v", err) + return false + } + checkObj, err := registry.Get(ctx, pod.Name) + if err != nil { + t.Errorf("Unexpected error: %v", err) + return false + } + if e, a := obj, checkObj; !reflect.DeepEqual(e, a) { + t.Errorf("Expected %#v, got %#v", e, a) + return false + } + return true +} + +func TestStoreUpdate(t *testing.T) { + podA := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test"}, + Spec: api.PodSpec{NodeName: "machine"}, + } + podB := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test"}, + Spec: api.PodSpec{NodeName: "machine2"}, + } + podAWithResourceVersion := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test", ResourceVersion: "7"}, + Spec: api.PodSpec{NodeName: "machine"}, + } + + testContext := api.WithNamespace(api.NewContext(), "test") + server, registry := NewTestGenericStoreRegistry(t) + defer server.Terminate(t) + + // Test1 try to update a non-existing node + _, _, err := registry.Update(testContext, podA.Name, rest.DefaultUpdatedObjectInfo(podA, api.Scheme)) + if !errors.IsNotFound(err) { + t.Errorf("Unexpected error: %v", err) + } + + // Test2 createIfNotFound and verify + registry.UpdateStrategy.(*testRESTStrategy).allowCreateOnUpdate = true + if !updateAndVerify(t, testContext, registry, podA) { + t.Errorf("Unexpected error updating podA") + } + registry.UpdateStrategy.(*testRESTStrategy).allowCreateOnUpdate = false + + // Test3 outofDate + _, _, err = registry.Update(testContext, podAWithResourceVersion.Name, rest.DefaultUpdatedObjectInfo(podAWithResourceVersion, api.Scheme)) + if !errors.IsConflict(err) { + t.Errorf("Unexpected error updating podAWithResourceVersion: %v", err) + } + + // Test4 normal update and verify + if !updateAndVerify(t, testContext, registry, podB) { + t.Errorf("Unexpected error updating podB") + } + + // Test5 unconditional update + // NOTE: The logic for unconditional updates doesn't make sense to me, and imho should be removed. + // doUnconditionalUpdate := resourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate() + // ^^ That condition can *never be true due to the creation of root objects. + // + // registry.UpdateStrategy.(*testRESTStrategy).allowUnconditionalUpdate = true + // updateAndVerify(t, testContext, registry, podAWithResourceVersion) + +} + +func TestNoOpUpdates(t *testing.T) { + server, registry := NewTestGenericStoreRegistry(t) + defer server.Terminate(t) + + newPod := func() *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Namespace: api.NamespaceDefault, + Name: "foo", + Labels: map[string]string{"prepare_create": "true"}, + }, + Spec: api.PodSpec{NodeName: "machine"}, + } + } + + var err error + var createResult runtime.Object + if createResult, err = registry.Create(api.NewDefaultContext(), newPod()); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + createdPod, err := registry.Get(api.NewDefaultContext(), "foo") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + var updateResult runtime.Object + p := newPod() + if updateResult, _, err = registry.Update(api.NewDefaultContext(), p.Name, rest.DefaultUpdatedObjectInfo(p, api.Scheme)); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // Check whether we do not return empty result on no-op update. + if !reflect.DeepEqual(createResult, updateResult) { + t.Errorf("no-op update should return a correct value, got: %#v", updateResult) + } + + updatedPod, err := registry.Get(api.NewDefaultContext(), "foo") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + createdMeta, err := meta.Accessor(createdPod) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + updatedMeta, err := meta.Accessor(updatedPod) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + if createdMeta.GetResourceVersion() != updatedMeta.GetResourceVersion() { + t.Errorf("no-op update should be ignored and not written to etcd") + } +} + +// TODO: Add a test to check no-op update if we have object with ResourceVersion +// already stored in etcd. Currently there is no easy way to store object with +// ResourceVersion in etcd. + +type testPodExport struct{} + +func (t testPodExport) Export(obj runtime.Object, exact bool) error { + pod := obj.(*api.Pod) + if pod.Labels == nil { + pod.Labels = map[string]string{} + } + pod.Labels["exported"] = "true" + pod.Labels["exact"] = strconv.FormatBool(exact) + + return nil +} + +func TestStoreCustomExport(t *testing.T) { + podA := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Namespace: "test", + Name: "foo", + Labels: map[string]string{}, + }, + Spec: api.PodSpec{NodeName: "machine"}, + } + + server, registry := NewTestGenericStoreRegistry(t) + defer server.Terminate(t) + + registry.ExportStrategy = testPodExport{} + + testContext := api.WithNamespace(api.NewContext(), "test") + registry.UpdateStrategy.(*testRESTStrategy).allowCreateOnUpdate = true + if !updateAndVerify(t, testContext, registry, &podA) { + t.Errorf("Unexpected error updating podA") + } + + obj, err := registry.Export(testContext, podA.Name, unversioned.ExportOptions{}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + exportedPod := obj.(*api.Pod) + if exportedPod.Labels["exported"] != "true" { + t.Errorf("expected: exported->true, found: %s", exportedPod.Labels["exported"]) + } + if exportedPod.Labels["exact"] != "false" { + t.Errorf("expected: exact->false, found: %s", exportedPod.Labels["exact"]) + } + if exportedPod.Labels["prepare_create"] != "true" { + t.Errorf("expected: prepare_create->true, found: %s", exportedPod.Labels["prepare_create"]) + } + delete(exportedPod.Labels, "exported") + delete(exportedPod.Labels, "exact") + delete(exportedPod.Labels, "prepare_create") + exportObjectMeta(&podA.ObjectMeta, false) + podA.Spec = exportedPod.Spec + if !reflect.DeepEqual(&podA, exportedPod) { + t.Errorf("expected:\n%v\nsaw:\n%v\n", &podA, exportedPod) + } +} + +func TestStoreBasicExport(t *testing.T) { + podA := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Namespace: "test", + Name: "foo", + Labels: map[string]string{}, + }, + Spec: api.PodSpec{NodeName: "machine"}, + Status: api.PodStatus{HostIP: "1.2.3.4"}, + } + + server, registry := NewTestGenericStoreRegistry(t) + defer server.Terminate(t) + + testContext := api.WithNamespace(api.NewContext(), "test") + registry.UpdateStrategy.(*testRESTStrategy).allowCreateOnUpdate = true + if !updateAndVerify(t, testContext, registry, &podA) { + t.Errorf("Unexpected error updating podA") + } + + obj, err := registry.Export(testContext, podA.Name, unversioned.ExportOptions{}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + exportedPod := obj.(*api.Pod) + if exportedPod.Labels["prepare_create"] != "true" { + t.Errorf("expected: prepare_create->true, found: %s", exportedPod.Labels["prepare_create"]) + } + delete(exportedPod.Labels, "prepare_create") + exportObjectMeta(&podA.ObjectMeta, false) + podA.Spec = exportedPod.Spec + if !reflect.DeepEqual(&podA, exportedPod) { + t.Errorf("expected:\n%v\nsaw:\n%v\n", &podA, exportedPod) + } +} + +func TestStoreGet(t *testing.T) { + podA := &api.Pod{ + ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "foo"}, + Spec: api.PodSpec{NodeName: "machine"}, + } + + testContext := api.WithNamespace(api.NewContext(), "test") + server, registry := NewTestGenericStoreRegistry(t) + defer server.Terminate(t) + + _, err := registry.Get(testContext, podA.Name) + if !errors.IsNotFound(err) { + t.Errorf("Unexpected error: %v", err) + } + + registry.UpdateStrategy.(*testRESTStrategy).allowCreateOnUpdate = true + if !updateAndVerify(t, testContext, registry, podA) { + t.Errorf("Unexpected error updating podA") + } +} + +func TestStoreDelete(t *testing.T) { + podA := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.PodSpec{NodeName: "machine"}, + } + + testContext := api.WithNamespace(api.NewContext(), "test") + server, registry := NewTestGenericStoreRegistry(t) + defer server.Terminate(t) + + // test failure condition + _, err := registry.Delete(testContext, podA.Name, nil) + if !errors.IsNotFound(err) { + t.Errorf("Unexpected error: %v", err) + } + + // create pod + _, err = registry.Create(testContext, podA) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // delete object + _, err = registry.Delete(testContext, podA.Name, nil) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // try to get a item which should be deleted + _, err = registry.Get(testContext, podA.Name) + if !errors.IsNotFound(err) { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestStoreHandleFinalizers(t *testing.T) { + EnableGarbageCollector = true + defer func() { EnableGarbageCollector = false }() + podWithFinalizer := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", Finalizers: []string{"foo.com/x"}}, + Spec: api.PodSpec{NodeName: "machine"}, + } + + testContext := api.WithNamespace(api.NewContext(), "test") + server, registry := NewTestGenericStoreRegistry(t) + defer server.Terminate(t) + + // create pod + _, err := registry.Create(testContext, podWithFinalizer) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // delete object with nil delete options doesn't delete the object + _, err = registry.Delete(testContext, podWithFinalizer.Name, nil) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // the object should still exist + obj, err := registry.Get(testContext, podWithFinalizer.Name) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + podWithFinalizer, ok := obj.(*api.Pod) + if !ok { + t.Errorf("Unexpected object: %#v", obj) + } + if podWithFinalizer.ObjectMeta.DeletionTimestamp == nil { + t.Errorf("Expect the object to have DeletionTimestamp set, but got %#v", podWithFinalizer.ObjectMeta) + } + if podWithFinalizer.ObjectMeta.DeletionGracePeriodSeconds == nil || *podWithFinalizer.ObjectMeta.DeletionGracePeriodSeconds != 0 { + t.Errorf("Expect the object to have 0 DeletionGracePeriodSecond, but got %#v", podWithFinalizer.ObjectMeta) + } + + updatedPodWithFinalizer := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", Finalizers: []string{"foo.com/x"}, ResourceVersion: podWithFinalizer.ObjectMeta.ResourceVersion}, + Spec: api.PodSpec{NodeName: "machine"}, + } + _, _, err = registry.Update(testContext, updatedPodWithFinalizer.ObjectMeta.Name, rest.DefaultUpdatedObjectInfo(updatedPodWithFinalizer, api.Scheme)) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // the object should still exist, because it still has a finalizer + obj, err = registry.Get(testContext, podWithFinalizer.Name) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + podWithFinalizer, ok = obj.(*api.Pod) + if !ok { + t.Errorf("Unexpected object: %#v", obj) + } + + podWithNoFinalizer := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: podWithFinalizer.ObjectMeta.ResourceVersion}, + Spec: api.PodSpec{NodeName: "anothermachine"}, + } + _, _, err = registry.Update(testContext, podWithFinalizer.ObjectMeta.Name, rest.DefaultUpdatedObjectInfo(podWithNoFinalizer, api.Scheme)) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + // the pod should be removed, because it's finalizer is removed + _, err = registry.Get(testContext, podWithFinalizer.Name) + if !errors.IsNotFound(err) { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestStoreDeleteWithOrphanDependents(t *testing.T) { + EnableGarbageCollector = true + defer func() { EnableGarbageCollector = false }() + podWithOrphanFinalizer := func(name string) *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: name, Finalizers: []string{"foo.com/x", api.FinalizerOrphan, "bar.com/y"}}, + Spec: api.PodSpec{NodeName: "machine"}, + } + } + podWithOtherFinalizers := func(name string) *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: name, Finalizers: []string{"foo.com/x", "bar.com/y"}}, + Spec: api.PodSpec{NodeName: "machine"}, + } + } + podWithNoFinalizer := func(name string) *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: name}, + Spec: api.PodSpec{NodeName: "machine"}, + } + } + podWithOnlyOrphanFinalizer := func(name string) *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: name, Finalizers: []string{api.FinalizerOrphan}}, + Spec: api.PodSpec{NodeName: "machine"}, + } + } + trueVar, falseVar := true, false + orphanOptions := &api.DeleteOptions{OrphanDependents: &trueVar} + nonOrphanOptions := &api.DeleteOptions{OrphanDependents: &falseVar} + nilOrphanOptions := &api.DeleteOptions{} + + testcases := []struct { + pod *api.Pod + options *api.DeleteOptions + expectNotFound bool + updatedFinalizers []string + }{ + // cases run with DeleteOptions.OrphanDedependents=true + { + podWithOrphanFinalizer("pod1"), + orphanOptions, + false, + []string{"foo.com/x", api.FinalizerOrphan, "bar.com/y"}, + }, + { + podWithOtherFinalizers("pod2"), + orphanOptions, + false, + []string{"foo.com/x", "bar.com/y", api.FinalizerOrphan}, + }, + { + podWithNoFinalizer("pod3"), + orphanOptions, + false, + []string{api.FinalizerOrphan}, + }, + { + podWithOnlyOrphanFinalizer("pod4"), + orphanOptions, + false, + []string{api.FinalizerOrphan}, + }, + // cases run with DeleteOptions.OrphanDedependents=false + { + podWithOrphanFinalizer("pod5"), + nonOrphanOptions, + false, + []string{"foo.com/x", "bar.com/y"}, + }, + { + podWithOtherFinalizers("pod6"), + nonOrphanOptions, + false, + []string{"foo.com/x", "bar.com/y"}, + }, + { + podWithNoFinalizer("pod7"), + nonOrphanOptions, + true, + []string{}, + }, + { + podWithOnlyOrphanFinalizer("pod8"), + nonOrphanOptions, + true, + []string{}, + }, + // cases run with nil DeleteOptions, the finalizers are not updated. + { + podWithOrphanFinalizer("pod9"), + nil, + false, + []string{"foo.com/x", api.FinalizerOrphan, "bar.com/y"}, + }, + { + podWithOtherFinalizers("pod10"), + nil, + false, + []string{"foo.com/x", "bar.com/y"}, + }, + { + podWithNoFinalizer("pod11"), + nil, + true, + []string{}, + }, + { + podWithOnlyOrphanFinalizer("pod12"), + nil, + false, + []string{api.FinalizerOrphan}, + }, + // cases run with non-nil DeleteOptions, but nil OrphanDependents, it's treated as OrphanDependents=true + { + podWithOrphanFinalizer("pod13"), + nilOrphanOptions, + false, + []string{"foo.com/x", api.FinalizerOrphan, "bar.com/y"}, + }, + { + podWithOtherFinalizers("pod14"), + nilOrphanOptions, + false, + []string{"foo.com/x", "bar.com/y"}, + }, + { + podWithNoFinalizer("pod15"), + nilOrphanOptions, + true, + []string{}, + }, + { + podWithOnlyOrphanFinalizer("pod16"), + nilOrphanOptions, + false, + []string{api.FinalizerOrphan}, + }, + } + + testContext := api.WithNamespace(api.NewContext(), "test") + server, registry := NewTestGenericStoreRegistry(t) + defer server.Terminate(t) + + for _, tc := range testcases { + // create pod + _, err := registry.Create(testContext, tc.pod) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + _, err = registry.Delete(testContext, tc.pod.Name, tc.options) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + obj, err := registry.Get(testContext, tc.pod.Name) + if tc.expectNotFound && (err == nil || !errors.IsNotFound(err)) { + t.Fatalf("Unexpected error: %v", err) + } + if !tc.expectNotFound && err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if !tc.expectNotFound { + pod, ok := obj.(*api.Pod) + if !ok { + t.Fatalf("Expect the object to be a pod, but got %#v", obj) + } + if pod.ObjectMeta.DeletionTimestamp == nil { + t.Errorf("Expect the object to have DeletionTimestamp set, but got %#v", pod.ObjectMeta) + } + if pod.ObjectMeta.DeletionGracePeriodSeconds == nil || *pod.ObjectMeta.DeletionGracePeriodSeconds != 0 { + t.Errorf("Expect the object to have 0 DeletionGracePeriodSecond, but got %#v", pod.ObjectMeta) + } + if e, a := tc.updatedFinalizers, pod.ObjectMeta.Finalizers; !reflect.DeepEqual(e, a) { + t.Errorf("Expect object %s to have finalizers %v, got %v", pod.ObjectMeta.Name, e, a) + } + } + } +} + +func TestStoreDeleteCollection(t *testing.T) { + podA := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + podB := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}} + + testContext := api.WithNamespace(api.NewContext(), "test") + server, registry := NewTestGenericStoreRegistry(t) + defer server.Terminate(t) + + if _, err := registry.Create(testContext, podA); err != nil { + t.Errorf("Unexpected error: %v", err) + } + if _, err := registry.Create(testContext, podB); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // Delete all pods. + deleted, err := registry.DeleteCollection(testContext, nil, &api.ListOptions{}) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + deletedPods := deleted.(*api.PodList) + if len(deletedPods.Items) != 2 { + t.Errorf("Unexpected number of pods deleted: %d, expected: 2", len(deletedPods.Items)) + } + + if _, err := registry.Get(testContext, podA.Name); !errors.IsNotFound(err) { + t.Errorf("Unexpected error: %v", err) + } + if _, err := registry.Get(testContext, podB.Name); !errors.IsNotFound(err) { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestStoreDeleteCollectionNotFound(t *testing.T) { + server, registry := NewTestGenericStoreRegistry(t) + defer server.Terminate(t) + + testContext := api.WithNamespace(api.NewContext(), "test") + + podA := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + podB := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}} + + for i := 0; i < 10; i++ { + // Setup + if _, err := registry.Create(testContext, podA); err != nil { + t.Errorf("Unexpected error: %v", err) + } + if _, err := registry.Create(testContext, podB); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // Kick off multiple delete collection calls to test notfound behavior + wg := &sync.WaitGroup{} + for j := 0; j < 2; j++ { + wg.Add(1) + go func() { + defer wg.Done() + _, err := registry.DeleteCollection(testContext, nil, &api.ListOptions{}) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + }() + } + wg.Wait() + + if _, err := registry.Get(testContext, podA.Name); !errors.IsNotFound(err) { + t.Errorf("Unexpected error: %v", err) + } + if _, err := registry.Get(testContext, podB.Name); !errors.IsNotFound(err) { + t.Errorf("Unexpected error: %v", err) + } + } +} + +// Test whether objects deleted with DeleteCollection are correctly delivered +// to watchers. +func TestStoreDeleteCollectionWithWatch(t *testing.T) { + podA := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + + testContext := api.WithNamespace(api.NewContext(), "test") + server, registry := NewTestGenericStoreRegistry(t) + defer server.Terminate(t) + + objCreated, err := registry.Create(testContext, podA) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + podCreated := objCreated.(*api.Pod) + + watcher, err := registry.WatchPredicate(testContext, setMatcher{sets.NewString("foo")}, podCreated.ResourceVersion) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer watcher.Stop() + + if _, err := registry.DeleteCollection(testContext, nil, &api.ListOptions{}); err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + got, open := <-watcher.ResultChan() + if !open { + t.Errorf("Unexpected channel close") + } else { + if got.Type != "DELETED" { + t.Errorf("Unexpected event type: %s", got.Type) + } + gotObject := got.Object.(*api.Pod) + gotObject.ResourceVersion = podCreated.ResourceVersion + if e, a := podCreated, gotObject; !reflect.DeepEqual(e, a) { + t.Errorf("Expected: %#v, got: %#v", e, a) + } + } +} + +func TestStoreWatch(t *testing.T) { + testContext := api.WithNamespace(api.NewContext(), "test") + noNamespaceContext := api.NewContext() + + table := map[string]struct { + generic.Matcher + context api.Context + }{ + "single": { + Matcher: setMatcher{sets.NewString("foo")}, + }, + "multi": { + Matcher: setMatcher{sets.NewString("foo", "bar")}, + }, + "singleNoNamespace": { + Matcher: setMatcher{sets.NewString("foo")}, + context: noNamespaceContext, + }, + } + + for name, m := range table { + ctx := testContext + if m.context != nil { + ctx = m.context + } + podA := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: "test", + }, + Spec: api.PodSpec{NodeName: "machine"}, + } + + server, registry := NewTestGenericStoreRegistry(t) + wi, err := registry.WatchPredicate(ctx, m, "0") + if err != nil { + t.Errorf("%v: unexpected error: %v", name, err) + } else { + obj, err := registry.Create(testContext, podA) + if err != nil { + got, open := <-wi.ResultChan() + if !open { + t.Errorf("%v: unexpected channel close", name) + } else { + if e, a := obj, got.Object; !reflect.DeepEqual(e, a) { + t.Errorf("Expected %#v, got %#v", e, a) + } + } + } + wi.Stop() + } + + server.Terminate(t) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/doc.go new file mode 100644 index 000000000000..fef461387795 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rest has generic implementations of resources used for +// REST responses +package rest diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/proxy.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/proxy.go new file mode 100644 index 000000000000..ca28831c8c0f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/proxy.go @@ -0,0 +1,242 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "io" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "sync" + "time" + + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/util/httpstream" + "k8s.io/kubernetes/pkg/util/net" + "k8s.io/kubernetes/pkg/util/proxy" + + "github.com/golang/glog" + "github.com/mxk/go-flowrate/flowrate" +) + +// UpgradeAwareProxyHandler is a handler for proxy requests that may require an upgrade +type UpgradeAwareProxyHandler struct { + UpgradeRequired bool + Location *url.URL + // Transport provides an optional round tripper to use to proxy. If nil, the default proxy transport is used + Transport http.RoundTripper + // WrapTransport indicates whether the provided Transport should be wrapped with default proxy transport behavior (URL rewriting, X-Forwarded-* header setting) + WrapTransport bool + FlushInterval time.Duration + MaxBytesPerSec int64 + Responder ErrorResponder +} + +const defaultFlushInterval = 200 * time.Millisecond + +// ErrorResponder abstracts error reporting to the proxy handler to remove the need to hardcode a particular +// error format. +type ErrorResponder interface { + Error(err error) +} + +// NewUpgradeAwareProxyHandler creates a new proxy handler with a default flush interval. Responder is required for returning +// errors to the caller. +func NewUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder ErrorResponder) *UpgradeAwareProxyHandler { + return &UpgradeAwareProxyHandler{ + Location: location, + Transport: transport, + WrapTransport: wrapTransport, + UpgradeRequired: upgradeRequired, + FlushInterval: defaultFlushInterval, + Responder: responder, + } +} + +// ServeHTTP handles the proxy request +func (h *UpgradeAwareProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if len(h.Location.Scheme) == 0 { + h.Location.Scheme = "http" + } + if h.tryUpgrade(w, req) { + return + } + if h.UpgradeRequired { + h.Responder.Error(errors.NewBadRequest("Upgrade request required")) + return + } + + loc := *h.Location + loc.RawQuery = req.URL.RawQuery + + // If original request URL ended in '/', append a '/' at the end of the + // of the proxy URL + if !strings.HasSuffix(loc.Path, "/") && strings.HasSuffix(req.URL.Path, "/") { + loc.Path += "/" + } + + // From pkg/apiserver/proxy.go#ServeHTTP: + // Redirect requests with an empty path to a location that ends with a '/' + // This is essentially a hack for http://issue.k8s.io/4958. + // Note: Keep this code after tryUpgrade to not break that flow. + if len(loc.Path) == 0 { + var queryPart string + if len(req.URL.RawQuery) > 0 { + queryPart = "?" + req.URL.RawQuery + } + w.Header().Set("Location", req.URL.Path+"/"+queryPart) + w.WriteHeader(http.StatusMovedPermanently) + return + } + + if h.Transport == nil || h.WrapTransport { + h.Transport = h.defaultProxyTransport(req.URL, h.Transport) + } + + newReq, err := http.NewRequest(req.Method, loc.String(), req.Body) + if err != nil { + h.Responder.Error(err) + return + } + newReq.Header = req.Header + newReq.ContentLength = req.ContentLength + // Copy the TransferEncoding is for future-proofing. Currently Go only supports "chunked" and + // it can determine the TransferEncoding based on ContentLength and the Body. + newReq.TransferEncoding = req.TransferEncoding + + proxy := httputil.NewSingleHostReverseProxy(&url.URL{Scheme: h.Location.Scheme, Host: h.Location.Host}) + proxy.Transport = h.Transport + proxy.FlushInterval = h.FlushInterval + proxy.ServeHTTP(w, newReq) +} + +// tryUpgrade returns true if the request was handled. +func (h *UpgradeAwareProxyHandler) tryUpgrade(w http.ResponseWriter, req *http.Request) bool { + if !httpstream.IsUpgradeRequest(req) { + return false + } + + backendConn, err := proxy.DialURL(h.Location, h.Transport) + if err != nil { + h.Responder.Error(err) + return true + } + defer backendConn.Close() + + requestHijackedConn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + h.Responder.Error(err) + return true + } + defer requestHijackedConn.Close() + + newReq, err := http.NewRequest(req.Method, h.Location.String(), req.Body) + if err != nil { + h.Responder.Error(err) + return true + } + newReq.Header = req.Header + + if err = newReq.Write(backendConn); err != nil { + h.Responder.Error(err) + return true + } + + wg := &sync.WaitGroup{} + wg.Add(2) + + go func() { + var writer io.WriteCloser + if h.MaxBytesPerSec > 0 { + writer = flowrate.NewWriter(backendConn, h.MaxBytesPerSec) + } else { + writer = backendConn + } + _, err := io.Copy(writer, requestHijackedConn) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + glog.Errorf("Error proxying data from client to backend: %v", err) + } + wg.Done() + }() + + go func() { + var reader io.ReadCloser + if h.MaxBytesPerSec > 0 { + reader = flowrate.NewReader(backendConn, h.MaxBytesPerSec) + } else { + reader = backendConn + } + _, err := io.Copy(requestHijackedConn, reader) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + glog.Errorf("Error proxying data from backend to client: %v", err) + } + wg.Done() + }() + + wg.Wait() + return true +} + +func (h *UpgradeAwareProxyHandler) defaultProxyTransport(url *url.URL, internalTransport http.RoundTripper) http.RoundTripper { + scheme := url.Scheme + host := url.Host + suffix := h.Location.Path + if strings.HasSuffix(url.Path, "/") && !strings.HasSuffix(suffix, "/") { + suffix += "/" + } + pathPrepend := strings.TrimSuffix(url.Path, suffix) + rewritingTransport := &proxy.Transport{ + Scheme: scheme, + Host: host, + PathPrepend: pathPrepend, + RoundTripper: internalTransport, + } + return &corsRemovingTransport{ + RoundTripper: rewritingTransport, + } +} + +// corsRemovingTransport is a wrapper for an internal transport. It removes CORS headers +// from the internal response. +type corsRemovingTransport struct { + http.RoundTripper +} + +func (p *corsRemovingTransport) RoundTrip(req *http.Request) (*http.Response, error) { + resp, err := p.RoundTripper.RoundTrip(req) + if err != nil { + return nil, err + } + removeCORSHeaders(resp) + return resp, nil +} + +var _ = net.RoundTripperWrapper(&corsRemovingTransport{}) + +func (rt *corsRemovingTransport) WrappedRoundTripper() http.RoundTripper { + return rt.RoundTripper +} + +// removeCORSHeaders strip CORS headers sent from the backend +// This should be called on all responses before returning +func removeCORSHeaders(resp *http.Response) { + resp.Header.Del("Access-Control-Allow-Credentials") + resp.Header.Del("Access-Control-Allow-Headers") + resp.Header.Del("Access-Control-Allow-Methods") + resp.Header.Del("Access-Control-Allow-Origin") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/proxy_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/proxy_test.go new file mode 100644 index 000000000000..8b9268402977 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/proxy_test.go @@ -0,0 +1,725 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "bytes" + "compress/gzip" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "reflect" + "strconv" + "strings" + "testing" + + "golang.org/x/net/websocket" + + utilnet "k8s.io/kubernetes/pkg/util/net" + "k8s.io/kubernetes/pkg/util/proxy" +) + +type fakeResponder struct { + called bool + err error +} + +func (r *fakeResponder) Error(err error) { + if r.called { + panic("called twice") + } + r.called = true + r.err = err +} + +type SimpleBackendHandler struct { + requestURL url.URL + requestHeader http.Header + requestBody []byte + requestMethod string + responseBody string + responseHeader map[string]string + t *testing.T +} + +func (s *SimpleBackendHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.requestURL = *req.URL + s.requestHeader = req.Header + s.requestMethod = req.Method + var err error + s.requestBody, err = ioutil.ReadAll(req.Body) + if err != nil { + s.t.Errorf("Unexpected error: %v", err) + return + } + + if s.responseHeader != nil { + for k, v := range s.responseHeader { + w.Header().Add(k, v) + } + } + w.Write([]byte(s.responseBody)) +} + +func validateParameters(t *testing.T, name string, actual url.Values, expected map[string]string) { + for k, v := range expected { + actualValue, ok := actual[k] + if !ok { + t.Errorf("%s: Expected parameter %s not received", name, k) + continue + } + if actualValue[0] != v { + t.Errorf("%s: Parameter %s values don't match. Actual: %#v, Expected: %s", + name, k, actualValue, v) + } + } +} + +func validateHeaders(t *testing.T, name string, actual http.Header, expected map[string]string, notExpected []string) { + for k, v := range expected { + actualValue, ok := actual[k] + if !ok { + t.Errorf("%s: Expected header %s not received", name, k) + continue + } + if actualValue[0] != v { + t.Errorf("%s: Header %s values don't match. Actual: %s, Expected: %s", + name, k, actualValue, v) + } + } + if notExpected == nil { + return + } + for _, h := range notExpected { + if _, present := actual[h]; present { + t.Errorf("%s: unexpected header: %s", name, h) + } + } +} + +func TestServeHTTP(t *testing.T) { + tests := []struct { + name string + method string + requestPath string + expectedPath string + requestBody string + requestParams map[string]string + requestHeader map[string]string + responseHeader map[string]string + expectedRespHeader map[string]string + notExpectedRespHeader []string + upgradeRequired bool + expectError func(err error) bool + }{ + { + name: "root path, simple get", + method: "GET", + requestPath: "/", + expectedPath: "/", + }, + { + name: "no upgrade header sent", + method: "GET", + requestPath: "/", + upgradeRequired: true, + expectError: func(err error) bool { + return err != nil && strings.Contains(err.Error(), "Upgrade request required") + }, + }, + { + name: "simple path, get", + method: "GET", + requestPath: "/path/to/test", + expectedPath: "/path/to/test", + }, + { + name: "request params", + method: "POST", + requestPath: "/some/path/", + expectedPath: "/some/path/", + requestParams: map[string]string{"param1": "value/1", "param2": "value%2"}, + requestBody: "test request body", + }, + { + name: "request headers", + method: "PUT", + requestPath: "/some/path", + expectedPath: "/some/path", + requestHeader: map[string]string{"Header1": "value1", "Header2": "value2"}, + }, + { + name: "empty path - slash should be added", + method: "GET", + requestPath: "", + expectedPath: "/", + }, + { + name: "remove CORS headers", + method: "GET", + requestPath: "/some/path", + expectedPath: "/some/path", + responseHeader: map[string]string{ + "Header1": "value1", + "Access-Control-Allow-Origin": "some.server", + "Access-Control-Allow-Methods": "GET"}, + expectedRespHeader: map[string]string{ + "Header1": "value1", + }, + notExpectedRespHeader: []string{ + "Access-Control-Allow-Origin", + "Access-Control-Allow-Methods", + }, + }, + } + + for i, test := range tests { + func() { + backendResponse := "Hello" + backendResponseHeader := test.responseHeader + // Test a simple header if not specified in the test + if backendResponseHeader == nil && test.expectedRespHeader == nil { + backendResponseHeader = map[string]string{"Content-Type": "text/html"} + test.expectedRespHeader = map[string]string{"Content-Type": "text/html"} + } + backendHandler := &SimpleBackendHandler{ + responseBody: backendResponse, + responseHeader: backendResponseHeader, + } + backendServer := httptest.NewServer(backendHandler) + defer backendServer.Close() + + responder := &fakeResponder{} + backendURL, _ := url.Parse(backendServer.URL) + backendURL.Path = test.requestPath + proxyHandler := &UpgradeAwareProxyHandler{ + Location: backendURL, + Responder: responder, + UpgradeRequired: test.upgradeRequired, + } + proxyServer := httptest.NewServer(proxyHandler) + defer proxyServer.Close() + proxyURL, _ := url.Parse(proxyServer.URL) + proxyURL.Path = test.requestPath + paramValues := url.Values{} + for k, v := range test.requestParams { + paramValues[k] = []string{v} + } + proxyURL.RawQuery = paramValues.Encode() + var requestBody io.Reader + if test.requestBody != "" { + requestBody = bytes.NewBufferString(test.requestBody) + } + req, err := http.NewRequest(test.method, proxyURL.String(), requestBody) + if test.requestHeader != nil { + header := http.Header{} + for k, v := range test.requestHeader { + header.Add(k, v) + } + req.Header = header + } + if err != nil { + t.Errorf("Error creating client request: %v", err) + } + client := &http.Client{} + res, err := client.Do(req) + if err != nil { + t.Errorf("Error from proxy request: %v", err) + } + + if test.expectError != nil { + if !responder.called { + t.Errorf("%d: responder was not invoked", i) + return + } + if !test.expectError(responder.err) { + t.Errorf("%d: unexpected error: %v", i, responder.err) + } + return + } + + // Validate backend request + // Method + if backendHandler.requestMethod != test.method { + t.Errorf("Unexpected request method: %s. Expected: %s", + backendHandler.requestMethod, test.method) + } + + // Body + if string(backendHandler.requestBody) != test.requestBody { + t.Errorf("Unexpected request body: %s. Expected: %s", + string(backendHandler.requestBody), test.requestBody) + } + + // Path + if backendHandler.requestURL.Path != test.expectedPath { + t.Errorf("Unexpected request path: %s", backendHandler.requestURL.Path) + } + // Parameters + validateParameters(t, test.name, backendHandler.requestURL.Query(), test.requestParams) + + // Headers + validateHeaders(t, test.name+" backend request", backendHandler.requestHeader, + test.requestHeader, nil) + + // Validate proxy response + + // Response Headers + validateHeaders(t, test.name+" backend headers", res.Header, test.expectedRespHeader, test.notExpectedRespHeader) + + // Validate Body + responseBody, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("Unexpected error reading response body: %v", err) + } + if rb := string(responseBody); rb != backendResponse { + t.Errorf("Did not get expected response body: %s. Expected: %s", rb, backendResponse) + } + + // Error + if responder.called { + t.Errorf("Unexpected proxy handler error: %v", responder.err) + } + }() + } +} + +func TestProxyUpgrade(t *testing.T) { + + localhostPool := x509.NewCertPool() + if !localhostPool.AppendCertsFromPEM(localhostCert) { + t.Errorf("error setting up localhostCert pool") + } + + testcases := map[string]struct { + ServerFunc func(http.Handler) *httptest.Server + ProxyTransport http.RoundTripper + }{ + "http": { + ServerFunc: httptest.NewServer, + ProxyTransport: nil, + }, + "https (invalid hostname + InsecureSkipVerify)": { + ServerFunc: func(h http.Handler) *httptest.Server { + cert, err := tls.X509KeyPair(exampleCert, exampleKey) + if err != nil { + t.Errorf("https (invalid hostname): proxy_test: %v", err) + } + ts := httptest.NewUnstartedServer(h) + ts.TLS = &tls.Config{ + Certificates: []tls.Certificate{cert}, + } + ts.StartTLS() + return ts + }, + ProxyTransport: utilnet.SetTransportDefaults(&http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}), + }, + "https (valid hostname + RootCAs)": { + ServerFunc: func(h http.Handler) *httptest.Server { + cert, err := tls.X509KeyPair(localhostCert, localhostKey) + if err != nil { + t.Errorf("https (valid hostname): proxy_test: %v", err) + } + ts := httptest.NewUnstartedServer(h) + ts.TLS = &tls.Config{ + Certificates: []tls.Certificate{cert}, + } + ts.StartTLS() + return ts + }, + ProxyTransport: utilnet.SetTransportDefaults(&http.Transport{TLSClientConfig: &tls.Config{RootCAs: localhostPool}}), + }, + "https (valid hostname + RootCAs + custom dialer)": { + ServerFunc: func(h http.Handler) *httptest.Server { + cert, err := tls.X509KeyPair(localhostCert, localhostKey) + if err != nil { + t.Errorf("https (valid hostname): proxy_test: %v", err) + } + ts := httptest.NewUnstartedServer(h) + ts.TLS = &tls.Config{ + Certificates: []tls.Certificate{cert}, + } + ts.StartTLS() + return ts + }, + ProxyTransport: utilnet.SetTransportDefaults(&http.Transport{Dial: net.Dial, TLSClientConfig: &tls.Config{RootCAs: localhostPool}}), + }, + } + + for k, tc := range testcases { + + backendServer := tc.ServerFunc(websocket.Handler(func(ws *websocket.Conn) { + defer ws.Close() + body := make([]byte, 5) + ws.Read(body) + ws.Write([]byte("hello " + string(body))) + })) + defer backendServer.Close() + + serverURL, _ := url.Parse(backendServer.URL) + proxyHandler := &UpgradeAwareProxyHandler{ + Location: serverURL, + Transport: tc.ProxyTransport, + } + proxy := httptest.NewServer(proxyHandler) + defer proxy.Close() + + ws, err := websocket.Dial("ws://"+proxy.Listener.Addr().String()+"/some/path", "", "http://127.0.0.1/") + if err != nil { + t.Fatalf("%s: websocket dial err: %s", k, err) + } + defer ws.Close() + + if _, err := ws.Write([]byte("world")); err != nil { + t.Fatalf("%s: write err: %s", k, err) + } + + response := make([]byte, 20) + n, err := ws.Read(response) + if err != nil { + t.Fatalf("%s: read err: %s", k, err) + } + if e, a := "hello world", string(response[0:n]); e != a { + t.Fatalf("%s: expected '%#v', got '%#v'", k, e, a) + } + } +} + +func TestDefaultProxyTransport(t *testing.T) { + tests := []struct { + name, + url, + location, + expectedScheme, + expectedHost, + expectedPathPrepend string + }{ + + { + name: "simple path", + url: "http://test.server:8080/a/test/location", + location: "http://localhost/location", + expectedScheme: "http", + expectedHost: "test.server:8080", + expectedPathPrepend: "/a/test", + }, + { + name: "empty path", + url: "http://test.server:8080/a/test/", + location: "http://localhost", + expectedScheme: "http", + expectedHost: "test.server:8080", + expectedPathPrepend: "/a/test", + }, + { + name: "location ending in slash", + url: "http://test.server:8080/a/test/", + location: "http://localhost/", + expectedScheme: "http", + expectedHost: "test.server:8080", + expectedPathPrepend: "/a/test", + }, + } + + for _, test := range tests { + locURL, _ := url.Parse(test.location) + URL, _ := url.Parse(test.url) + h := UpgradeAwareProxyHandler{ + Location: locURL, + } + result := h.defaultProxyTransport(URL, nil) + transport := result.(*corsRemovingTransport).RoundTripper.(*proxy.Transport) + if transport.Scheme != test.expectedScheme { + t.Errorf("%s: unexpected scheme. Actual: %s, Expected: %s", test.name, transport.Scheme, test.expectedScheme) + } + if transport.Host != test.expectedHost { + t.Errorf("%s: unexpected host. Actual: %s, Expected: %s", test.name, transport.Host, test.expectedHost) + } + if transport.PathPrepend != test.expectedPathPrepend { + t.Errorf("%s: unexpected path prepend. Actual: %s, Expected: %s", test.name, transport.PathPrepend, test.expectedPathPrepend) + } + } +} + +func TestProxyRequestContentLengthAndTransferEncoding(t *testing.T) { + chunk := func(data []byte) []byte { + out := &bytes.Buffer{} + chunker := httputil.NewChunkedWriter(out) + for _, b := range data { + if _, err := chunker.Write([]byte{b}); err != nil { + panic(err) + } + } + chunker.Close() + out.Write([]byte("\r\n")) + return out.Bytes() + } + + zip := func(data []byte) []byte { + out := &bytes.Buffer{} + zipper := gzip.NewWriter(out) + if _, err := zipper.Write(data); err != nil { + panic(err) + } + zipper.Close() + return out.Bytes() + } + + sampleData := []byte("abcde") + + table := map[string]struct { + reqHeaders http.Header + reqBody []byte + + expectedHeaders http.Header + expectedBody []byte + }{ + "content-length": { + reqHeaders: http.Header{ + "Content-Length": []string{"5"}, + }, + reqBody: sampleData, + + expectedHeaders: http.Header{ + "Content-Length": []string{"5"}, + "Content-Encoding": nil, // none set + "Transfer-Encoding": nil, // none set + }, + expectedBody: sampleData, + }, + + "content-length + identity transfer-encoding": { + reqHeaders: http.Header{ + "Content-Length": []string{"5"}, + "Transfer-Encoding": []string{"identity"}, + }, + reqBody: sampleData, + + expectedHeaders: http.Header{ + "Content-Length": []string{"5"}, + "Content-Encoding": nil, // none set + "Transfer-Encoding": nil, // gets removed + }, + expectedBody: sampleData, + }, + + "content-length + gzip content-encoding": { + reqHeaders: http.Header{ + "Content-Length": []string{strconv.Itoa(len(zip(sampleData)))}, + "Content-Encoding": []string{"gzip"}, + }, + reqBody: zip(sampleData), + + expectedHeaders: http.Header{ + "Content-Length": []string{strconv.Itoa(len(zip(sampleData)))}, + "Content-Encoding": []string{"gzip"}, + "Transfer-Encoding": nil, // none set + }, + expectedBody: zip(sampleData), + }, + + "chunked transfer-encoding": { + reqHeaders: http.Header{ + "Transfer-Encoding": []string{"chunked"}, + }, + reqBody: chunk(sampleData), + + expectedHeaders: http.Header{ + "Content-Length": nil, // none set + "Content-Encoding": nil, // none set + "Transfer-Encoding": nil, // Transfer-Encoding gets removed + }, + expectedBody: sampleData, // sample data is unchunked + }, + + "chunked transfer-encoding + gzip content-encoding": { + reqHeaders: http.Header{ + "Content-Encoding": []string{"gzip"}, + "Transfer-Encoding": []string{"chunked"}, + }, + reqBody: chunk(zip(sampleData)), + + expectedHeaders: http.Header{ + "Content-Length": nil, // none set + "Content-Encoding": []string{"gzip"}, + "Transfer-Encoding": nil, // gets removed + }, + expectedBody: zip(sampleData), // sample data is unchunked, but content-encoding is preserved + }, + + // "Transfer-Encoding: gzip" is not supported by go + // See http/transfer.go#fixTransferEncoding (https://golang.org/src/net/http/transfer.go#L427) + // Once it is supported, this test case should succeed + // + // "gzip+chunked transfer-encoding": { + // reqHeaders: http.Header{ + // "Transfer-Encoding": []string{"chunked,gzip"}, + // }, + // reqBody: chunk(zip(sampleData)), + // + // expectedHeaders: http.Header{ + // "Content-Length": nil, // no content-length headers + // "Transfer-Encoding": nil, // Transfer-Encoding gets removed + // }, + // expectedBody: sampleData, + // }, + } + + successfulResponse := "backend passed tests" + for k, item := range table { + // Start the downstream server + downstreamServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // Verify headers + for header, v := range item.expectedHeaders { + if !reflect.DeepEqual(v, req.Header[header]) { + t.Errorf("%s: Expected headers for %s to be %v, got %v", k, header, v, req.Header[header]) + } + } + + // Read body + body, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("%s: unexpected error %v", k, err) + } + req.Body.Close() + + // Verify length + if req.ContentLength > 0 && req.ContentLength != int64(len(body)) { + t.Errorf("%s: ContentLength was %d, len(data) was %d", k, req.ContentLength, len(body)) + } + + // Verify content + if !bytes.Equal(item.expectedBody, body) { + t.Errorf("%s: Expected %q, got %q", k, string(item.expectedBody), string(body)) + } + + // Write successful response + w.Write([]byte(successfulResponse)) + })) + defer downstreamServer.Close() + + responder := &fakeResponder{} + backendURL, _ := url.Parse(downstreamServer.URL) + proxyHandler := &UpgradeAwareProxyHandler{ + Location: backendURL, + Responder: responder, + UpgradeRequired: false, + } + proxyServer := httptest.NewServer(proxyHandler) + defer proxyServer.Close() + + // Dial the proxy server + conn, err := net.Dial(proxyServer.Listener.Addr().Network(), proxyServer.Listener.Addr().String()) + if err != nil { + t.Errorf("unexpected error %v", err) + continue + } + defer conn.Close() + + // Add standard http 1.1 headers + if item.reqHeaders == nil { + item.reqHeaders = http.Header{} + } + item.reqHeaders.Add("Connection", "close") + item.reqHeaders.Add("Host", proxyServer.Listener.Addr().String()) + + // Write the request headers + if _, err := fmt.Fprint(conn, "POST / HTTP/1.1\r\n"); err != nil { + t.Fatalf("%s unexpected error %v", k, err) + } + for header, values := range item.reqHeaders { + for _, value := range values { + if _, err := fmt.Fprintf(conn, "%s: %s\r\n", header, value); err != nil { + t.Fatalf("%s: unexpected error %v", k, err) + } + } + } + // Header separator + if _, err := fmt.Fprint(conn, "\r\n"); err != nil { + t.Fatalf("%s: unexpected error %v", k, err) + } + // Body + if _, err := conn.Write(item.reqBody); err != nil { + t.Fatalf("%s: unexpected error %v", k, err) + } + + // Read response + response, err := ioutil.ReadAll(conn) + if err != nil { + t.Errorf("%s: unexpected error %v", k, err) + continue + } + if !strings.HasSuffix(string(response), successfulResponse) { + t.Errorf("%s: Did not get successful response: %s", k, string(response)) + continue + } + } +} + +// exampleCert was generated from crypto/tls/generate_cert.go with the following command: +// go run generate_cert.go --rsa-bits 512 --host example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +var exampleCert = []byte(`-----BEGIN CERTIFICATE----- +MIIBcjCCAR6gAwIBAgIQBOUTYowZaENkZi0faI9DgTALBgkqhkiG9w0BAQswEjEQ +MA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2MDAw +MFowEjEQMA4GA1UEChMHQWNtZSBDbzBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQCZ +xfR3sgeHBraGFfF/24tTn4PRVAHOf2UOOxSQRs+aYjNqimFqf/SRIblQgeXdBJDR +gVK5F1Js2zwlehw0bHxRAgMBAAGjUDBOMA4GA1UdDwEB/wQEAwIApDATBgNVHSUE +DDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MBYGA1UdEQQPMA2CC2V4YW1w +bGUuY29tMAsGCSqGSIb3DQEBCwNBAI/mfBB8dm33IpUl+acSyWfL6gX5Wc0FFyVj +dKeesE1XBuPX1My/rzU6Oy/YwX7LOL4FaeNUS6bbL4axSLPKYSs= +-----END CERTIFICATE-----`) + +var exampleKey = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIBOgIBAAJBAJnF9HeyB4cGtoYV8X/bi1Ofg9FUAc5/ZQ47FJBGz5piM2qKYWp/ +9JEhuVCB5d0EkNGBUrkXUmzbPCV6HDRsfFECAwEAAQJBAJLH9yPuButniACTn5L5 +IJQw1mWQt6zBw9eCo41YWkA0866EgjC53aPZaRjXMp0uNJGdIsys2V5rCOOLWN2C +ODECIQDICHsi8QQQ9wpuJy8X5l8MAfxHL+DIqI84wQTeVM91FQIhAMTME8A18/7h +1Ad6drdnxAkuC0tX6Sx0LDozrmen+HFNAiAlcEDrt0RVkIcpOrg7tuhPLQf0oudl +Zvb3Xlj069awSQIgcT15E/43w2+RASifzVNhQ2MCTr1sSA8lL+xzK+REmnUCIBhQ +j4139pf8Re1J50zBxS/JlQfgDQi9sO9pYeiHIxNs +-----END RSA PRIVATE KEY-----`) + +// localhostCert was generated from crypto/tls/generate_cert.go with the following command: +// go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +var localhostCert = []byte(`-----BEGIN CERTIFICATE----- +MIIBdzCCASOgAwIBAgIBADALBgkqhkiG9w0BAQUwEjEQMA4GA1UEChMHQWNtZSBD +bzAeFw03MDAxMDEwMDAwMDBaFw00OTEyMzEyMzU5NTlaMBIxEDAOBgNVBAoTB0Fj +bWUgQ28wWjALBgkqhkiG9w0BAQEDSwAwSAJBAN55NcYKZeInyTuhcCwFMhDHCmwa +IUSdtXdcbItRB/yfXGBhiex00IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEA +AaNoMGYwDgYDVR0PAQH/BAQDAgCkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1Ud +EwEB/wQFMAMBAf8wLgYDVR0RBCcwJYILZXhhbXBsZS5jb22HBH8AAAGHEAAAAAAA +AAAAAAAAAAAAAAEwCwYJKoZIhvcNAQEFA0EAAoQn/ytgqpiLcZu9XKbCJsJcvkgk +Se6AbGXgSlq+ZCEVo0qIwSgeBqmsJxUu7NCSOwVJLYNEBO2DtIxoYVk+MA== +-----END CERTIFICATE-----`) + +// localhostKey is the private key for localhostCert. +var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIBPAIBAAJBAN55NcYKZeInyTuhcCwFMhDHCmwaIUSdtXdcbItRB/yfXGBhiex0 +0IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEAAQJBAQdUx66rfh8sYsgfdcvV +NoafYpnEcB5s4m/vSVe6SU7dCK6eYec9f9wpT353ljhDUHq3EbmE4foNzJngh35d +AekCIQDhRQG5Li0Wj8TM4obOnnXUXf1jRv0UkzE9AHWLG5q3AwIhAPzSjpYUDjVW +MCUXgckTpKCuGwbJk7424Nb8bLzf3kllAiA5mUBgjfr/WtFSJdWcPQ4Zt9KTMNKD +EUO0ukpTwEIl6wIhAMbGqZK3zAAFdq8DD2jPx+UJXnh0rnOkZBzDtJ6/iN69AiEA +1Aq8MJgTaYsDQWyU/hDq5YkDJc9e9DSCvUIzqxQWMQE= +-----END RSA PRIVATE KEY-----`) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/response_checker.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/response_checker.go new file mode 100644 index 000000000000..b0c61075c135 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/response_checker.go @@ -0,0 +1,71 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// Check the http error status from a location URL. +// And convert an error into a structured API object. +// Finally ensure we close the body before returning the error +type HttpResponseChecker interface { + Check(resp *http.Response) error +} + +// Max length read from the response body of a location which returns error status +const ( + maxReadLength = 50000 +) + +// A generic http response checker to transform the error. +type GenericHttpResponseChecker struct { + QualifiedResource unversioned.GroupResource + Name string +} + +func (checker GenericHttpResponseChecker) Check(resp *http.Response) error { + if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent { + defer resp.Body.Close() + bodyBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, maxReadLength)) + if err != nil { + return errors.NewInternalError(err) + } + bodyText := string(bodyBytes) + + switch { + case resp.StatusCode == http.StatusInternalServerError: + return errors.NewInternalError(fmt.Errorf("%s", bodyText)) + case resp.StatusCode == http.StatusBadRequest: + return errors.NewBadRequest(bodyText) + case resp.StatusCode == http.StatusNotFound: + return errors.NewGenericServerResponse(resp.StatusCode, "", checker.QualifiedResource, checker.Name, bodyText, 0, false) + } + return errors.NewGenericServerResponse(resp.StatusCode, "", checker.QualifiedResource, checker.Name, bodyText, 0, false) + } + return nil +} + +func NewGenericHttpResponseChecker(qualifiedResource unversioned.GroupResource, name string) GenericHttpResponseChecker { + return GenericHttpResponseChecker{QualifiedResource: qualifiedResource, Name: name} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/response_checker_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/response_checker_test.go new file mode 100644 index 000000000000..f1ad62020a8b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/response_checker_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" +) + +func TestGenericHttpResponseChecker(t *testing.T) { + responseChecker := NewGenericHttpResponseChecker(api.Resource("pods"), "foo") + tests := []struct { + resp *http.Response + expectError bool + expected error + name string + }{ + { + resp: &http.Response{ + Body: ioutil.NopCloser(bytes.NewBufferString("Success")), + StatusCode: http.StatusOK, + }, + expectError: false, + name: "ok", + }, + { + resp: &http.Response{ + Body: ioutil.NopCloser(bytes.NewBufferString("Invalid request.")), + StatusCode: http.StatusBadRequest, + }, + expectError: true, + expected: errors.NewBadRequest("Invalid request."), + name: "bad request", + }, + { + resp: &http.Response{ + Body: ioutil.NopCloser(bytes.NewBufferString("Pod does not exist.")), + StatusCode: http.StatusInternalServerError, + }, + expectError: true, + expected: errors.NewInternalError(fmt.Errorf("%s", "Pod does not exist.")), + name: "internal server error", + }, + } + for _, test := range tests { + err := responseChecker.Check(test.resp) + if test.expectError && err == nil { + t.Error("unexpected non-error") + } + if !test.expectError && err != nil { + t.Errorf("unexpected error: %v", err) + } + if test.expectError && !reflect.DeepEqual(err, test.expected) { + t.Errorf("expected: %s, saw: %s", test.expected, err) + } + } +} + +func TestGenericHttpResponseCheckerLimitReader(t *testing.T) { + responseChecker := NewGenericHttpResponseChecker(api.Resource("pods"), "foo") + excessedString := strings.Repeat("a", (maxReadLength + 10000)) + resp := &http.Response{ + Body: ioutil.NopCloser(bytes.NewBufferString(excessedString)), + StatusCode: http.StatusBadRequest, + } + err := responseChecker.Check(resp) + if err == nil { + t.Error("unexpected non-error") + } + if len(err.Error()) != maxReadLength { + t.Errorf("expected lenth of error message: %d, saw: %d", maxReadLength, len(err.Error())) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/streamer.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/streamer.go new file mode 100644 index 000000000000..afa9eb5b5dc0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/streamer.go @@ -0,0 +1,79 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "io" + "net/http" + "net/url" + "strings" + + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// LocationStreamer is a resource that streams the contents of a particular +// location URL +type LocationStreamer struct { + Location *url.URL + Transport http.RoundTripper + ContentType string + Flush bool + ResponseChecker HttpResponseChecker +} + +// a LocationStreamer must implement a rest.ResourceStreamer +var _ rest.ResourceStreamer = &LocationStreamer{} + +func (obj *LocationStreamer) GetObjectKind() unversioned.ObjectKind { + return unversioned.EmptyObjectKind +} + +// InputStream returns a stream with the contents of the URL location. If no location is provided, +// a null stream is returned. +func (s *LocationStreamer) InputStream(apiVersion, acceptHeader string) (stream io.ReadCloser, flush bool, contentType string, err error) { + if s.Location == nil { + // If no location was provided, return a null stream + return nil, false, "", nil + } + transport := s.Transport + if transport == nil { + transport = http.DefaultTransport + } + client := &http.Client{Transport: transport} + resp, err := client.Get(s.Location.String()) + if err != nil { + return nil, false, "", err + } + + if s.ResponseChecker != nil { + if err = s.ResponseChecker.Check(resp); err != nil { + return nil, false, "", err + } + } + + contentType = s.ContentType + if len(contentType) == 0 { + contentType = resp.Header.Get("Content-Type") + if len(contentType) > 0 { + contentType = strings.TrimSpace(strings.SplitN(contentType, ";", 2)[0]) + } + } + flush = s.Flush + stream = resp.Body + return +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/streamer_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/streamer_test.go new file mode 100644 index 000000000000..956222837644 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/rest/streamer_test.go @@ -0,0 +1,148 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" +) + +func TestInputStreamReader(t *testing.T) { + resultString := "Test output" + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte(resultString)) + })) + defer s.Close() + u, err := url.Parse(s.URL) + if err != nil { + t.Errorf("Error parsing server URL: %v", err) + return + } + streamer := &LocationStreamer{ + Location: u, + } + readCloser, _, _, err := streamer.InputStream("", "") + if err != nil { + t.Errorf("Unexpected error when getting stream: %v", err) + return + } + defer readCloser.Close() + result, err := ioutil.ReadAll(readCloser) + if string(result) != resultString { + t.Errorf("Stream content does not match. Got: %s. Expected: %s.", string(result), resultString) + } +} + +func TestInputStreamNullLocation(t *testing.T) { + streamer := &LocationStreamer{ + Location: nil, + } + readCloser, _, _, err := streamer.InputStream("", "") + if err != nil { + t.Errorf("Unexpected error when getting stream with null location: %v", err) + } + if readCloser != nil { + t.Errorf("Expected stream to be nil. Got: %#v", readCloser) + } +} + +type testTransport struct { + body string + err error +} + +func (tt *testTransport) RoundTrip(req *http.Request) (*http.Response, error) { + r := bufio.NewReader(bytes.NewBufferString(tt.body)) + return http.ReadResponse(r, req) +} + +func fakeTransport(mime, message string) http.RoundTripper { + content := fmt.Sprintf("HTTP/1.1 200 OK\nContent-Type: %s\n\n%s", mime, message) + return &testTransport{body: content} +} + +func TestInputStreamContentType(t *testing.T) { + location, _ := url.Parse("http://www.example.com") + streamer := &LocationStreamer{ + Location: location, + Transport: fakeTransport("application/json", "hello world"), + } + readCloser, _, contentType, err := streamer.InputStream("", "") + if err != nil { + t.Errorf("Unexpected error when getting stream: %v", err) + return + } + defer readCloser.Close() + if contentType != "application/json" { + t.Errorf("Unexpected content type. Got: %s. Expected: application/json", contentType) + } +} + +func TestInputStreamTransport(t *testing.T) { + message := "hello world" + location, _ := url.Parse("http://www.example.com") + streamer := &LocationStreamer{ + Location: location, + Transport: fakeTransport("text/plain", message), + } + readCloser, _, _, err := streamer.InputStream("", "") + if err != nil { + t.Errorf("Unexpected error when getting stream: %v", err) + return + } + defer readCloser.Close() + result, err := ioutil.ReadAll(readCloser) + if string(result) != message { + t.Errorf("Stream content does not match. Got: %s. Expected: %s.", string(result), message) + } +} + +func fakeInternalServerErrorTransport(mime, message string) http.RoundTripper { + content := fmt.Sprintf("HTTP/1.1 500 \"Internal Server Error\"\nContent-Type: %s\n\n%s", mime, message) + return &testTransport{body: content} +} + +func TestInputStreamInternalServerErrorTransport(t *testing.T) { + message := "Pod is in PodPending" + location, _ := url.Parse("http://www.example.com") + streamer := &LocationStreamer{ + Location: location, + Transport: fakeInternalServerErrorTransport("text/plain", message), + ResponseChecker: NewGenericHttpResponseChecker(api.Resource(""), ""), + } + expectedError := errors.NewInternalError(fmt.Errorf("%s", message)) + + _, _, _, err := streamer.InputStream("", "") + if err == nil { + t.Errorf("unexpected non-error") + return + } + + if !reflect.DeepEqual(err, expectedError) { + t.Errorf("StreamInternalServerError does not match. Got: %s. Expected: %s.", err, expectedError) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/storage_decorator.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/storage_decorator.go new file mode 100644 index 000000000000..70109efe3364 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/generic/storage_decorator.go @@ -0,0 +1,44 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" +) + +// StorageDecorator is a function signature for producing +// a storage.Interface from given parameters. +type StorageDecorator func( + storageInterface storage.Interface, + capacity int, + objectType runtime.Object, + resourcePrefix string, + scopeStrategy rest.NamespaceScopedStrategy, + newListFunc func() runtime.Object) storage.Interface + +// Returns given 'storageInterface' without any decoration. +func UndecoratedStorage( + storageInterface storage.Interface, + capacity int, + objectType runtime.Object, + resourcePrefix string, + scopeStrategy rest.NamespaceScopedStrategy, + newListFunc func() runtime.Object) storage.Interface { + return storageInterface +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/codec.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/codec.go new file mode 100644 index 000000000000..377cdf88b026 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/codec.go @@ -0,0 +1,484 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thirdpartyresourcedata + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/url" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + apiutil "k8s.io/kubernetes/pkg/api/util" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +type thirdPartyObjectConverter struct { + converter runtime.ObjectConvertor +} + +func (t *thirdPartyObjectConverter) ConvertToVersion(in runtime.Object, outVersion unversioned.GroupVersion) (out runtime.Object, err error) { + switch in.(type) { + // This seems weird, but in this case the ThirdPartyResourceData is really just a wrapper on the raw 3rd party data. + // The actual thing printed/sent to server is the actual raw third party resource data, which only has one version. + case *extensions.ThirdPartyResourceData: + return in, nil + default: + return t.converter.ConvertToVersion(in, outVersion) + } +} + +func (t *thirdPartyObjectConverter) Convert(in, out interface{}) error { + return t.converter.Convert(in, out) +} + +func (t *thirdPartyObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return t.converter.ConvertFieldLabel(version, kind, label, value) +} + +func NewThirdPartyObjectConverter(converter runtime.ObjectConvertor) runtime.ObjectConvertor { + return &thirdPartyObjectConverter{converter} +} + +type thirdPartyResourceDataMapper struct { + mapper meta.RESTMapper + kind string + version string + group string +} + +var _ meta.RESTMapper = &thirdPartyResourceDataMapper{} + +func (t *thirdPartyResourceDataMapper) getResource() unversioned.GroupVersionResource { + plural, _ := meta.KindToResource(t.getKind()) + + return plural +} + +func (t *thirdPartyResourceDataMapper) getKind() unversioned.GroupVersionKind { + return unversioned.GroupVersionKind{Group: t.group, Version: t.version, Kind: t.kind} +} + +func (t *thirdPartyResourceDataMapper) isThirdPartyResource(partialResource unversioned.GroupVersionResource) bool { + actualResource := t.getResource() + if strings.ToLower(partialResource.Resource) != strings.ToLower(actualResource.Resource) { + return false + } + if len(partialResource.Group) != 0 && partialResource.Group != actualResource.Group { + return false + } + if len(partialResource.Version) != 0 && partialResource.Version != actualResource.Version { + return false + } + + return true +} + +func (t *thirdPartyResourceDataMapper) ResourcesFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { + if t.isThirdPartyResource(resource) { + return []unversioned.GroupVersionResource{t.getResource()}, nil + } + return t.mapper.ResourcesFor(resource) +} + +func (t *thirdPartyResourceDataMapper) KindsFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) { + if t.isThirdPartyResource(resource) { + return []unversioned.GroupVersionKind{t.getKind()}, nil + } + return t.mapper.KindsFor(resource) +} + +func (t *thirdPartyResourceDataMapper) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { + if t.isThirdPartyResource(resource) { + return t.getResource(), nil + } + return t.mapper.ResourceFor(resource) +} + +func (t *thirdPartyResourceDataMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { + if t.isThirdPartyResource(resource) { + return t.getKind(), nil + } + return t.mapper.KindFor(resource) +} + +func (t *thirdPartyResourceDataMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if len(versions) != 1 { + return nil, fmt.Errorf("unexpected set of versions: %v", versions) + } + if gk.Group != t.group { + return nil, fmt.Errorf("unknown group %q expected %s", gk.Group, t.group) + } + if gk.Kind != "ThirdPartyResourceData" { + return nil, fmt.Errorf("unknown kind %s expected %s", gk.Kind, t.kind) + } + if versions[0] != t.version { + return nil, fmt.Errorf("unknown version %q expected %q", versions[0], t.version) + } + + // TODO figure out why we're doing this rewriting + extensionGK := unversioned.GroupKind{Group: extensions.GroupName, Kind: "ThirdPartyResourceData"} + + mapping, err := t.mapper.RESTMapping(extensionGK, registered.GroupOrDie(extensions.GroupName).GroupVersion.Version) + if err != nil { + return nil, err + } + mapping.ObjectConvertor = &thirdPartyObjectConverter{mapping.ObjectConvertor} + return mapping, nil +} + +func (t *thirdPartyResourceDataMapper) AliasesForResource(resource string) ([]string, bool) { + return t.mapper.AliasesForResource(resource) +} + +func (t *thirdPartyResourceDataMapper) ResourceSingularizer(resource string) (singular string, err error) { + return t.mapper.ResourceSingularizer(resource) +} + +func NewMapper(mapper meta.RESTMapper, kind, version, group string) meta.RESTMapper { + return &thirdPartyResourceDataMapper{ + mapper: mapper, + kind: kind, + version: version, + group: group, + } +} + +type thirdPartyResourceDataCodecFactory struct { + runtime.NegotiatedSerializer + kind string + encodeGV unversioned.GroupVersion + decodeGV unversioned.GroupVersion +} + +func NewNegotiatedSerializer(s runtime.NegotiatedSerializer, kind string, encodeGV, decodeGV unversioned.GroupVersion) runtime.NegotiatedSerializer { + return &thirdPartyResourceDataCodecFactory{ + NegotiatedSerializer: s, + + kind: kind, + encodeGV: encodeGV, + decodeGV: decodeGV, + } +} + +func (t *thirdPartyResourceDataCodecFactory) SupportedMediaTypes() []string { + supported := sets.NewString(t.NegotiatedSerializer.SupportedMediaTypes()...) + return supported.Intersection(sets.NewString("application/json", "application/yaml")).List() +} + +func (t *thirdPartyResourceDataCodecFactory) SupportedStreamingMediaTypes() []string { + supported := sets.NewString(t.NegotiatedSerializer.SupportedStreamingMediaTypes()...) + return supported.Intersection(sets.NewString("application/json", "application/json;stream=watch")).List() +} + +func (t *thirdPartyResourceDataCodecFactory) EncoderForVersion(s runtime.Encoder, gv unversioned.GroupVersion) runtime.Encoder { + return &thirdPartyResourceDataEncoder{delegate: t.NegotiatedSerializer.EncoderForVersion(s, gv), kind: t.kind} +} + +func (t *thirdPartyResourceDataCodecFactory) DecoderToVersion(s runtime.Decoder, gv unversioned.GroupVersion) runtime.Decoder { + return NewDecoder(t.NegotiatedSerializer.DecoderToVersion(s, gv), t.kind) +} + +func NewCodec(delegate runtime.Codec, kind string) runtime.Codec { + return runtime.NewCodec(NewEncoder(delegate, kind), NewDecoder(delegate, kind)) +} + +type thirdPartyResourceDataDecoder struct { + delegate runtime.Decoder + kind string +} + +func NewDecoder(delegate runtime.Decoder, kind string) runtime.Decoder { + return &thirdPartyResourceDataDecoder{delegate: delegate, kind: kind} +} + +var _ runtime.Decoder = &thirdPartyResourceDataDecoder{} + +func parseObject(data []byte) (map[string]interface{}, error) { + var obj interface{} + if err := json.Unmarshal(data, &obj); err != nil { + fmt.Printf("Invalid JSON:\n%s\n", string(data)) + return nil, err + } + mapObj, ok := obj.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected object: %#v", obj) + } + return mapObj, nil +} + +func (t *thirdPartyResourceDataDecoder) populate(data []byte) (runtime.Object, error) { + mapObj, err := parseObject(data) + if err != nil { + return nil, err + } + return t.populateFromObject(mapObj, data) +} + +func (t *thirdPartyResourceDataDecoder) populateFromObject(mapObj map[string]interface{}, data []byte) (runtime.Object, error) { + typeMeta := unversioned.TypeMeta{} + if err := json.Unmarshal(data, &typeMeta); err != nil { + return nil, err + } + switch typeMeta.Kind { + case t.kind: + result := &extensions.ThirdPartyResourceData{} + if err := t.populateResource(result, mapObj, data); err != nil { + return nil, err + } + return result, nil + case t.kind + "List": + list := &extensions.ThirdPartyResourceDataList{} + if err := t.populateListResource(list, mapObj); err != nil { + return nil, err + } + return list, nil + default: + return nil, fmt.Errorf("unexpected kind: %s, expected %s", typeMeta.Kind, t.kind) + } +} + +func (t *thirdPartyResourceDataDecoder) populateResource(objIn *extensions.ThirdPartyResourceData, mapObj map[string]interface{}, data []byte) error { + metadata, ok := mapObj["metadata"].(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected object for metadata: %#v", mapObj["metadata"]) + } + + metadataData, err := json.Marshal(metadata) + if err != nil { + return err + } + + if err := json.Unmarshal(metadataData, &objIn.ObjectMeta); err != nil { + return err + } + // Override API Version with the ThirdPartyResourceData value + // TODO: fix this hard code + objIn.APIVersion = v1beta1.SchemeGroupVersion.String() + + objIn.Data = data + return nil +} + +func (t *thirdPartyResourceDataDecoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { + if into == nil { + obj, err := t.populate(data) + if err != nil { + return nil, nil, err + } + return obj, gvk, nil + } + thirdParty, ok := into.(*extensions.ThirdPartyResourceData) + if !ok { + return nil, nil, fmt.Errorf("unexpected object: %#v", into) + } + + var dataObj interface{} + if err := json.Unmarshal(data, &dataObj); err != nil { + return nil, nil, err + } + mapObj, ok := dataObj.(map[string]interface{}) + if !ok { + + return nil, nil, fmt.Errorf("unexpected object: %#v", dataObj) + } + /*if gvk.Kind != "ThirdPartyResourceData" { + return nil, nil, fmt.Errorf("unexpected kind: %s", gvk.Kind) + }*/ + actual := &unversioned.GroupVersionKind{} + if kindObj, found := mapObj["kind"]; !found { + if gvk == nil { + return nil, nil, runtime.NewMissingKindErr(string(data)) + } + mapObj["kind"] = gvk.Kind + actual.Kind = gvk.Kind + } else { + kindStr, ok := kindObj.(string) + if !ok { + return nil, nil, fmt.Errorf("unexpected object for 'kind': %v", kindObj) + } + if kindStr != t.kind { + return nil, nil, fmt.Errorf("kind doesn't match, expecting: %s, got %s", gvk.Kind, kindStr) + } + actual.Kind = t.kind + } + if versionObj, found := mapObj["apiVersion"]; !found { + if gvk == nil { + return nil, nil, runtime.NewMissingVersionErr(string(data)) + } + mapObj["apiVersion"] = gvk.GroupVersion().String() + actual.Group, actual.Version = gvk.Group, gvk.Version + } else { + versionStr, ok := versionObj.(string) + if !ok { + return nil, nil, fmt.Errorf("unexpected object for 'apiVersion': %v", versionObj) + } + if gvk != nil && versionStr != gvk.GroupVersion().String() { + return nil, nil, fmt.Errorf("version doesn't match, expecting: %v, got %s", gvk.GroupVersion(), versionStr) + } + gv, err := unversioned.ParseGroupVersion(versionStr) + if err != nil { + return nil, nil, err + } + actual.Group, actual.Version = gv.Group, gv.Version + } + + mapObj, err := parseObject(data) + if err != nil { + return nil, actual, err + } + if err := t.populateResource(thirdParty, mapObj, data); err != nil { + return nil, actual, err + } + return thirdParty, actual, nil +} + +func (t *thirdPartyResourceDataDecoder) populateListResource(objIn *extensions.ThirdPartyResourceDataList, mapObj map[string]interface{}) error { + items, ok := mapObj["items"].([]interface{}) + if !ok { + return fmt.Errorf("unexpected object for items: %#v", mapObj["items"]) + } + objIn.Items = make([]extensions.ThirdPartyResourceData, len(items)) + for ix := range items { + objData, err := json.Marshal(items[ix]) + if err != nil { + return err + } + objMap, err := parseObject(objData) + if err != nil { + return err + } + if err := t.populateResource(&objIn.Items[ix], objMap, objData); err != nil { + return err + } + } + return nil +} + +const template = `{ + "kind": "%s", + "items": [ %s ] +}` + +type thirdPartyResourceDataEncoder struct { + delegate runtime.Encoder + kind string +} + +func NewEncoder(delegate runtime.Encoder, kind string) runtime.Encoder { + return &thirdPartyResourceDataEncoder{delegate: delegate, kind: kind} +} + +var _ runtime.Encoder = &thirdPartyResourceDataEncoder{} + +func encodeToJSON(obj *extensions.ThirdPartyResourceData, stream io.Writer) error { + var objOut interface{} + if err := json.Unmarshal(obj.Data, &objOut); err != nil { + return err + } + objMap, ok := objOut.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected type: %v", objOut) + } + objMap["metadata"] = obj.ObjectMeta + encoder := json.NewEncoder(stream) + return encoder.Encode(objMap) +} + +func (t *thirdPartyResourceDataEncoder) EncodeToStream(obj runtime.Object, stream io.Writer, overrides ...unversioned.GroupVersion) (err error) { + switch obj := obj.(type) { + case *extensions.ThirdPartyResourceData: + return encodeToJSON(obj, stream) + case *extensions.ThirdPartyResourceDataList: + // TODO: There must be a better way to do this... + dataStrings := make([]string, len(obj.Items)) + for ix := range obj.Items { + buff := &bytes.Buffer{} + err := encodeToJSON(&obj.Items[ix], buff) + if err != nil { + return err + } + dataStrings[ix] = buff.String() + } + fmt.Fprintf(stream, template, t.kind+"List", strings.Join(dataStrings, ",")) + return nil + case *unversioned.Status, *unversioned.APIResourceList: + return t.delegate.EncodeToStream(obj, stream, overrides...) + default: + return fmt.Errorf("unexpected object to encode: %#v", obj) + } +} + +func NewObjectCreator(group, version string, delegate runtime.ObjectCreater) runtime.ObjectCreater { + return &thirdPartyResourceDataCreator{group, version, delegate} +} + +type thirdPartyResourceDataCreator struct { + group string + version string + delegate runtime.ObjectCreater +} + +func (t *thirdPartyResourceDataCreator) New(kind unversioned.GroupVersionKind) (out runtime.Object, err error) { + switch kind.Kind { + case "ThirdPartyResourceData": + if apiutil.GetGroupVersion(t.group, t.version) != kind.GroupVersion().String() { + return nil, fmt.Errorf("unknown kind %v", kind) + } + return &extensions.ThirdPartyResourceData{}, nil + case "ThirdPartyResourceDataList": + if apiutil.GetGroupVersion(t.group, t.version) != kind.GroupVersion().String() { + return nil, fmt.Errorf("unknown kind %v", kind) + } + return &extensions.ThirdPartyResourceDataList{}, nil + // TODO: this list needs to be formalized higher in the chain + case "ListOptions", "WatchEvent": + if apiutil.GetGroupVersion(t.group, t.version) == kind.GroupVersion().String() { + // Translate third party group to external group. + gvk := registered.EnabledVersionsForGroup(api.GroupName)[0].WithKind(kind.Kind) + return t.delegate.New(gvk) + } + return t.delegate.New(kind) + default: + return t.delegate.New(kind) + } +} + +func NewThirdPartyParameterCodec(p runtime.ParameterCodec) runtime.ParameterCodec { + return &thirdPartyParameterCodec{p} +} + +type thirdPartyParameterCodec struct { + delegate runtime.ParameterCodec +} + +func (t *thirdPartyParameterCodec) DecodeParameters(parameters url.Values, from unversioned.GroupVersion, into runtime.Object) error { + return t.delegate.DecodeParameters(parameters, v1.SchemeGroupVersion, into) +} + +func (t *thirdPartyParameterCodec) EncodeParameters(obj runtime.Object, to unversioned.GroupVersion) (url.Values, error) { + return t.delegate.EncodeParameters(obj, v1.SchemeGroupVersion) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/codec_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/codec_test.go new file mode 100644 index 000000000000..942de910ccb2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/codec_test.go @@ -0,0 +1,181 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thirdpartyresourcedata + +import ( + "encoding/json" + "reflect" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/runtime" +) + +type Foo struct { + unversioned.TypeMeta `json:",inline"` + api.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"` + + SomeField string `json:"someField"` + OtherField int `json:"otherField"` +} + +type FooList struct { + unversioned.TypeMeta `json:",inline"` + unversioned.ListMeta `json:"metadata,omitempty" description:"standard list metadata; see http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata"` + + Items []Foo `json:"items"` +} + +func TestCodec(t *testing.T) { + tests := []struct { + obj *Foo + expectErr bool + name string + }{ + { + obj: &Foo{ObjectMeta: api.ObjectMeta{Name: "bar"}}, + expectErr: true, + name: "missing kind", + }, + { + obj: &Foo{ObjectMeta: api.ObjectMeta{Name: "bar"}, TypeMeta: unversioned.TypeMeta{Kind: "Foo"}}, + name: "basic", + }, + { + obj: &Foo{ObjectMeta: api.ObjectMeta{Name: "bar", ResourceVersion: "baz"}, TypeMeta: unversioned.TypeMeta{Kind: "Foo"}}, + name: "resource version", + }, + { + obj: &Foo{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + CreationTimestamp: unversioned.Time{Time: time.Unix(100, 0)}, + }, + TypeMeta: unversioned.TypeMeta{Kind: "Foo"}, + }, + name: "creation time", + }, + { + obj: &Foo{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + ResourceVersion: "baz", + Labels: map[string]string{"foo": "bar", "baz": "blah"}, + }, + TypeMeta: unversioned.TypeMeta{Kind: "Foo"}, + }, + name: "labels", + }, + } + for _, test := range tests { + d := &thirdPartyResourceDataDecoder{kind: "Foo", delegate: testapi.Extensions.Codec()} + e := &thirdPartyResourceDataEncoder{kind: "Foo", delegate: testapi.Extensions.Codec()} + data, err := json.Marshal(test.obj) + if err != nil { + t.Errorf("[%s] unexpected error: %v", test.name, err) + continue + } + obj, err := runtime.Decode(d, data) + if err != nil && !test.expectErr { + t.Errorf("[%s] unexpected error: %v", test.name, err) + continue + } + if test.expectErr { + if err == nil { + t.Errorf("[%s] unexpected non-error", test.name) + } + continue + } + rsrcObj, ok := obj.(*extensions.ThirdPartyResourceData) + if !ok { + t.Errorf("[%s] unexpected object: %v", test.name, obj) + continue + } + if !reflect.DeepEqual(rsrcObj.ObjectMeta, test.obj.ObjectMeta) { + t.Errorf("[%s]\nexpected\n%v\nsaw\n%v\n", test.name, rsrcObj.ObjectMeta, test.obj.ObjectMeta) + } + var output Foo + if err := json.Unmarshal(rsrcObj.Data, &output); err != nil { + t.Errorf("[%s] unexpected error: %v", test.name, err) + continue + } + if !reflect.DeepEqual(&output, test.obj) { + t.Errorf("[%s]\nexpected\n%v\nsaw\n%v\n", test.name, test.obj, &output) + } + + data, err = runtime.Encode(e, rsrcObj) + if err != nil { + t.Errorf("[%s] unexpected error: %v", test.name, err) + } + + var output2 Foo + if err := json.Unmarshal(data, &output2); err != nil { + t.Errorf("[%s] unexpected error: %v", test.name, err) + continue + } + if !reflect.DeepEqual(&output2, test.obj) { + t.Errorf("[%s]\nexpected\n%v\nsaw\n%v\n", test.name, test.obj, &output2) + } + } +} + +func TestCreater(t *testing.T) { + creater := NewObjectCreator("creater group", "creater version", api.Scheme) + tests := []struct { + name string + kind unversioned.GroupVersionKind + expectedObj runtime.Object + expectErr bool + }{ + { + name: "valid ThirdPartyResourceData creation", + kind: unversioned.GroupVersionKind{Group: "creater group", Version: "creater version", Kind: "ThirdPartyResourceData"}, + expectedObj: &extensions.ThirdPartyResourceData{}, + expectErr: false, + }, + { + name: "invalid ThirdPartyResourceData creation", + kind: unversioned.GroupVersionKind{Version: "invalid version", Kind: "ThirdPartyResourceData"}, + expectedObj: nil, + expectErr: true, + }, + { + name: "valid ListOptions creation", + kind: unversioned.GroupVersionKind{Version: "v1", Kind: "ListOptions"}, + expectedObj: &v1.ListOptions{}, + expectErr: false, + }, + } + for _, test := range tests { + out, err := creater.New(test.kind) + if err != nil && !test.expectErr { + t.Errorf("[%s] unexpected error: %v", test.name, err) + } + if err == nil && test.expectErr { + t.Errorf("[%s] unexpected non-error", test.name) + } + if !reflect.DeepEqual(test.expectedObj, out) { + t.Errorf("[%s] unexpected error: expect: %v, got: %v", test.name, test.expectedObj, out) + } + + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go new file mode 100644 index 000000000000..62e2dc1e3eb7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package thirdpartyresourcedata provides Registry interface and its REST +// implementation for storing ThirdPartyResourceData api objects. +package thirdpartyresourcedata diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/etcd/etcd.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/etcd/etcd.go new file mode 100644 index 000000000000..55eaf09b17c6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/etcd/etcd.go @@ -0,0 +1,78 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/registry/generic/registry" + "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata" + "k8s.io/kubernetes/pkg/runtime" +) + +// REST implements a RESTStorage for ThirdPartyResourceDatas against etcd +type REST struct { + *registry.Store + kind string +} + +// NewREST returns a registry which will store ThirdPartyResourceData in the given helper +func NewREST(opts generic.RESTOptions, group, kind string) *REST { + prefix := "/ThirdPartyResourceData/" + group + "/" + strings.ToLower(kind) + "s" + + // We explicitly do NOT do any decoration here yet. + storageInterface := opts.Storage + + store := ®istry.Store{ + NewFunc: func() runtime.Object { return &extensions.ThirdPartyResourceData{} }, + NewListFunc: func() runtime.Object { return &extensions.ThirdPartyResourceDataList{} }, + KeyRootFunc: func(ctx api.Context) string { + return registry.NamespaceKeyRootFunc(ctx, prefix) + }, + KeyFunc: func(ctx api.Context, id string) (string, error) { + return registry.NamespaceKeyFunc(ctx, prefix, id) + }, + ObjectNameFunc: func(obj runtime.Object) (string, error) { + return obj.(*extensions.ThirdPartyResourceData).Name, nil + }, + PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher { + return thirdpartyresourcedata.Matcher(label, field) + }, + QualifiedResource: extensions.Resource("thirdpartyresourcedatas"), + DeleteCollectionWorkers: opts.DeleteCollectionWorkers, + CreateStrategy: thirdpartyresourcedata.Strategy, + UpdateStrategy: thirdpartyresourcedata.Strategy, + DeleteStrategy: thirdpartyresourcedata.Strategy, + + Storage: storageInterface, + } + + return &REST{ + Store: store, + kind: kind, + } +} + +// Implements the rest.KindProvider interface +func (r *REST) Kind() string { + return r.kind +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go new file mode 100644 index 000000000000..6b4eabf8acb8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go @@ -0,0 +1,121 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + // Ensure that extensions/v1beta1 package is initialized. + _ "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/registry/registrytest" + "k8s.io/kubernetes/pkg/runtime" + etcdtesting "k8s.io/kubernetes/pkg/storage/etcd/testing" +) + +func newStorage(t *testing.T) (*REST, *etcdtesting.EtcdTestServer) { + etcdStorage, server := registrytest.NewEtcdStorage(t, extensions.GroupName) + restOptions := generic.RESTOptions{Storage: etcdStorage, Decorator: generic.UndecoratedStorage, DeleteCollectionWorkers: 1} + return NewREST(restOptions, "foo", "bar"), server +} + +func validNewThirdPartyResourceData(name string) *extensions.ThirdPartyResourceData { + return &extensions.ThirdPartyResourceData{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Namespace: api.NamespaceDefault, + }, + Data: []byte("foobarbaz"), + } +} + +func TestCreate(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store) + rsrc := validNewThirdPartyResourceData("foo") + rsrc.ObjectMeta = api.ObjectMeta{} + test.TestCreate( + // valid + rsrc, + // invalid + &extensions.ThirdPartyResourceData{}, + ) +} + +func TestUpdate(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store) + test.TestUpdate( + // valid + validNewThirdPartyResourceData("foo"), + // updateFunc + func(obj runtime.Object) runtime.Object { + object := obj.(*extensions.ThirdPartyResourceData) + object.Data = []byte("new description") + return object + }, + ) +} + +func TestDelete(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store) + test.TestDelete(validNewThirdPartyResourceData("foo")) +} + +func TestGet(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store) + test.TestGet(validNewThirdPartyResourceData("foo")) +} + +func TestList(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store) + test.TestList(validNewThirdPartyResourceData("foo")) +} + +func TestWatch(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + test := registrytest.New(t, storage.Store) + test.TestWatch( + validNewThirdPartyResourceData("foo"), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"foo": "bar"}, + }, + // matching fields + []fields.Set{}, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + {"name": "foo"}, + }, + ) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/registry.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/registry.go new file mode 100644 index 000000000000..058276d1e2f5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/registry.go @@ -0,0 +1,80 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thirdpartyresourcedata + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/watch" +) + +// Registry is an interface implemented by things that know how to store ThirdPartyResourceData objects. +type Registry interface { + ListThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (*extensions.ThirdPartyResourceDataList, error) + WatchThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (watch.Interface, error) + GetThirdPartyResourceData(ctx api.Context, name string) (*extensions.ThirdPartyResourceData, error) + CreateThirdPartyResourceData(ctx api.Context, resource *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) + UpdateThirdPartyResourceData(ctx api.Context, resource *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) + DeleteThirdPartyResourceData(ctx api.Context, name string) error +} + +// storage puts strong typing around storage calls +type storage struct { + rest.StandardStorage +} + +// NewRegistry returns a new Registry interface for the given Storage. Any mismatched +// types will panic. +func NewRegistry(s rest.StandardStorage) Registry { + return &storage{s} +} + +func (s *storage) ListThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (*extensions.ThirdPartyResourceDataList, error) { + obj, err := s.List(ctx, options) + if err != nil { + return nil, err + } + return obj.(*extensions.ThirdPartyResourceDataList), nil +} + +func (s *storage) WatchThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (watch.Interface, error) { + return s.Watch(ctx, options) +} + +func (s *storage) GetThirdPartyResourceData(ctx api.Context, name string) (*extensions.ThirdPartyResourceData, error) { + obj, err := s.Get(ctx, name) + if err != nil { + return nil, err + } + return obj.(*extensions.ThirdPartyResourceData), nil +} + +func (s *storage) CreateThirdPartyResourceData(ctx api.Context, ThirdPartyResourceData *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) { + obj, err := s.Create(ctx, ThirdPartyResourceData) + return obj.(*extensions.ThirdPartyResourceData), err +} + +func (s *storage) UpdateThirdPartyResourceData(ctx api.Context, ThirdPartyResourceData *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) { + obj, _, err := s.Update(ctx, ThirdPartyResourceData.Name, rest.DefaultUpdatedObjectInfo(ThirdPartyResourceData, api.Scheme)) + return obj.(*extensions.ThirdPartyResourceData), err +} + +func (s *storage) DeleteThirdPartyResourceData(ctx api.Context, name string) error { + _, err := s.Delete(ctx, name, nil) + return err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/strategy.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/strategy.go new file mode 100644 index 000000000000..9f7673d7c06e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/strategy.go @@ -0,0 +1,92 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thirdpartyresourcedata + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/extensions/validation" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// strategy implements behavior for ThirdPartyResource objects +type strategy struct { + runtime.ObjectTyper + api.NameGenerator +} + +// Strategy is the default logic that applies when creating and updating ThirdPartyResource +// objects via the REST API. +var Strategy = strategy{api.Scheme, api.SimpleNameGenerator} + +var _ = rest.RESTCreateStrategy(Strategy) + +var _ = rest.RESTUpdateStrategy(Strategy) + +func (strategy) NamespaceScoped() bool { + return true +} + +func (strategy) PrepareForCreate(obj runtime.Object) { +} + +func (strategy) Validate(ctx api.Context, obj runtime.Object) field.ErrorList { + return validation.ValidateThirdPartyResourceData(obj.(*extensions.ThirdPartyResourceData)) +} + +// Canonicalize normalizes the object after validation. +func (strategy) Canonicalize(obj runtime.Object) { +} + +func (strategy) AllowCreateOnUpdate() bool { + return false +} + +func (strategy) PrepareForUpdate(obj, old runtime.Object) { +} + +func (strategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList { + return validation.ValidateThirdPartyResourceDataUpdate(obj.(*extensions.ThirdPartyResourceData), old.(*extensions.ThirdPartyResourceData)) +} + +func (strategy) AllowUnconditionalUpdate() bool { + return true +} + +// Matcher returns a generic matcher for a given label and field selector. +func Matcher(label labels.Selector, field fields.Selector) generic.Matcher { + return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { + sa, ok := obj.(*extensions.ThirdPartyResourceData) + if !ok { + return false, fmt.Errorf("not a ThirdPartyResourceData") + } + fields := SelectableFields(sa) + return label.Matches(labels.Set(sa.Labels)) && field.Matches(fields), nil + }) +} + +// SelectableFields returns a label set that can be used for filter selection +func SelectableFields(obj *extensions.ThirdPartyResourceData) labels.Set { + return labels.Set{} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/strategy_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/strategy_test.go new file mode 100644 index 000000000000..75e821944b19 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/strategy_test.go @@ -0,0 +1,35 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thirdpartyresourcedata + +import ( + "testing" + + _ "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func TestSelectableFieldLabelConversions(t *testing.T) { + apitesting.TestSelectableFieldLabelConversionsOfKind(t, + testapi.Extensions.GroupVersion().String(), + "ThirdPartyResourceData", + SelectableFields(&extensions.ThirdPartyResourceData{}), + nil, + ) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/util.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/util.go new file mode 100644 index 000000000000..120981e85ec2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/util.go @@ -0,0 +1,68 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thirdpartyresourcedata + +import ( + "fmt" + "strings" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func ExtractGroupVersionKind(list *extensions.ThirdPartyResourceList) ([]unversioned.GroupVersion, []unversioned.GroupVersionKind, error) { + gvs := []unversioned.GroupVersion{} + gvks := []unversioned.GroupVersionKind{} + for ix := range list.Items { + rsrc := &list.Items[ix] + kind, group, err := ExtractApiGroupAndKind(rsrc) + if err != nil { + return nil, nil, err + } + for _, version := range rsrc.Versions { + gv := unversioned.GroupVersion{Group: group, Version: version.Name} + gvs = append(gvs, gv) + gvks = append(gvks, unversioned.GroupVersionKind{Group: group, Version: version.Name, Kind: kind}) + } + } + return gvs, gvks, nil +} + +func convertToCamelCase(input string) string { + result := "" + toUpper := true + for ix := range input { + char := input[ix] + if toUpper { + result = result + string([]byte{(char - 32)}) + toUpper = false + } else if char == '-' { + toUpper = true + } else { + result = result + string([]byte{char}) + } + } + return result +} + +func ExtractApiGroupAndKind(rsrc *extensions.ThirdPartyResource) (kind string, group string, err error) { + parts := strings.Split(rsrc.Name, ".") + if len(parts) < 3 { + return "", "", fmt.Errorf("unexpectedly short resource name: %s, expected at least ..", rsrc.Name) + } + return convertToCamelCase(parts[0]), strings.Join(parts[1:], "."), nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/util_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/util_test.go new file mode 100644 index 000000000000..a18722c17979 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/util_test.go @@ -0,0 +1,66 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thirdpartyresourcedata + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func TestExtractAPIGroupAndKind(t *testing.T) { + tests := []struct { + input string + expectedKind string + expectedGroup string + expectErr bool + }{ + { + input: "foo.company.com", + expectedKind: "Foo", + expectedGroup: "company.com", + }, + { + input: "cron-tab.company.com", + expectedKind: "CronTab", + expectedGroup: "company.com", + }, + { + input: "foo", + expectErr: true, + }, + } + + for _, test := range tests { + kind, group, err := ExtractApiGroupAndKind(&extensions.ThirdPartyResource{ObjectMeta: api.ObjectMeta{Name: test.input}}) + if err != nil && !test.expectErr { + t.Errorf("unexpected error: %v", err) + continue + } + if err == nil && test.expectErr { + t.Errorf("unexpected non-error") + continue + } + if kind != test.expectedKind { + t.Errorf("expected: %s, saw: %s", test.expectedKind, kind) + } + if group != test.expectedGroup { + t.Errorf("expected: %s, saw: %s", test.expectedGroup, group) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/codec.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/codec.go index bde0ae9755d5..e86c0861c73e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/codec.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/codec.go @@ -18,6 +18,7 @@ package runtime import ( "bytes" + "encoding/base64" "fmt" "io" "net/url" @@ -77,13 +78,13 @@ func EncodeOrDie(e Encoder, obj Object) string { // UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or // invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object. -func UseOrCreateObject(t Typer, c ObjectCreater, gvk unversioned.GroupVersionKind, obj Object) (Object, error) { +func UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk unversioned.GroupVersionKind, obj Object) (Object, error) { if obj != nil { - into, _, err := t.ObjectKind(obj) + into, _, err := t.ObjectKinds(obj) if err != nil { return nil, err } - if gvk == *into { + if gvk == into[0] { return obj, nil } } @@ -115,7 +116,7 @@ func (n NoopDecoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into // NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back. func NewParameterCodec(scheme *Scheme) ParameterCodec { return ¶meterCodec{ - typer: ObjectTyperToTyper(scheme), + typer: scheme, convertor: scheme, creator: scheme, } @@ -123,7 +124,7 @@ func NewParameterCodec(scheme *Scheme) ParameterCodec { // parameterCodec implements conversion to and from query parameters and objects. type parameterCodec struct { - typer Typer + typer ObjectTyper convertor ObjectConvertor creator ObjectCreater } @@ -136,10 +137,11 @@ func (c *parameterCodec) DecodeParameters(parameters url.Values, from unversione if len(parameters) == 0 { return nil } - targetGVK, _, err := c.typer.ObjectKind(into) + targetGVKs, _, err := c.typer.ObjectKinds(into) if err != nil { return err } + targetGVK := targetGVKs[0] if targetGVK.GroupVersion() == from { return c.convertor.Convert(¶meters, into) } @@ -156,12 +158,13 @@ func (c *parameterCodec) DecodeParameters(parameters url.Values, from unversione // EncodeParameters converts the provided object into the to version, then converts that object to url.Values. // Returns an error if conversion is not possible. func (c *parameterCodec) EncodeParameters(obj Object, to unversioned.GroupVersion) (url.Values, error) { - gvk, _, err := c.typer.ObjectKind(obj) + gvks, _, err := c.typer.ObjectKinds(obj) if err != nil { return nil, err } + gvk := gvks[0] if to != gvk.GroupVersion() { - out, err := c.convertor.ConvertToVersion(obj, to.String()) + out, err := c.convertor.ConvertToVersion(obj, to) if err != nil { return nil, err } @@ -169,3 +172,27 @@ func (c *parameterCodec) EncodeParameters(obj Object, to unversioned.GroupVersio } return queryparams.Convert(obj) } + +type base64Serializer struct { + Serializer +} + +func NewBase64Serializer(s Serializer) Serializer { + return &base64Serializer{s} +} + +func (s base64Serializer) EncodeToStream(obj Object, stream io.Writer, overrides ...unversioned.GroupVersion) error { + e := base64.NewEncoder(base64.StdEncoding, stream) + err := s.Serializer.EncodeToStream(obj, e, overrides...) + e.Close() + return err +} + +func (s base64Serializer) Decode(data []byte, defaults *unversioned.GroupVersionKind, into Object) (Object, *unversioned.GroupVersionKind, error) { + out := make([]byte, base64.StdEncoding.DecodedLen(len(data))) + n, err := base64.StdEncoding.Decode(out, data) + if err != nil { + return nil, nil, err + } + return s.Serializer.Decode(out[:n], defaults, into) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/conversion.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/conversion.go index c13d9d042d25..69cf00fea5d6 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/conversion.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/conversion.go @@ -40,13 +40,13 @@ func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, st // DefaultStringConversions are helpers for converting []string and string to real values. var DefaultStringConversions = []interface{}{ - convertStringSliceToString, - convertStringSliceToInt, - convertStringSliceToBool, - convertStringSliceToInt64, + Convert_Slice_string_To_string, + Convert_Slice_string_To_int, + Convert_Slice_string_To_bool, + Convert_Slice_string_To_int64, } -func convertStringSliceToString(input *[]string, out *string, s conversion.Scope) error { +func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error { if len(*input) == 0 { *out = "" } @@ -54,7 +54,7 @@ func convertStringSliceToString(input *[]string, out *string, s conversion.Scope return nil } -func convertStringSliceToInt(input *[]string, out *int, s conversion.Scope) error { +func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error { if len(*input) == 0 { *out = 0 } @@ -67,10 +67,10 @@ func convertStringSliceToInt(input *[]string, out *int, s conversion.Scope) erro return nil } -// converStringSliceToBool will convert a string parameter to boolean. +// Conver_Slice_string_To_bool will convert a string parameter to boolean. // Only the absence of a value, a value of "false", or a value of "0" resolve to false. // Any other value (including empty string) resolves to true. -func convertStringSliceToBool(input *[]string, out *bool, s conversion.Scope) error { +func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error { if len(*input) == 0 { *out = false return nil @@ -84,7 +84,7 @@ func convertStringSliceToBool(input *[]string, out *bool, s conversion.Scope) er return nil } -func convertStringSliceToInt64(input *[]string, out *int64, s conversion.Scope) error { +func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error { if len(*input) == 0 { *out = 0 } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/conversion_generator.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/conversion_generator.go deleted file mode 100644 index 9971b7b20dfc..000000000000 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/conversion_generator.go +++ /dev/null @@ -1,913 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "fmt" - "io" - "log" - "path" - "reflect" - goruntime "runtime" - "sort" - "strings" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util/sets" -) - -type ConversionGenerator interface { - GenerateConversionsForType(groupVersion unversioned.GroupVersion, reflection reflect.Type) error - WriteConversionFunctions(w io.Writer) error - RegisterConversionFunctions(w io.Writer, pkg string) error - AddImport(pkg string) string - RepackImports(exclude sets.String) - WriteImports(w io.Writer) error - OverwritePackage(pkg, overwrite string) - AssumePrivateConversions() -} - -func NewConversionGenerator(scheme *Scheme, targetPkg string) ConversionGenerator { - g := &conversionGenerator{ - scheme: scheme, - - nameFormat: "Convert_%s_%s_To_%s_%s", - generatedNamePrefix: "auto", - targetPkg: targetPkg, - - publicFuncs: make(map[typePair]functionName), - convertibles: make(map[reflect.Type]reflect.Type), - overridden: make(map[reflect.Type]bool), - pkgOverwrites: make(map[string]string), - imports: make(map[string]string), - shortImports: make(map[string]string), - } - g.targetPackage(targetPkg) - g.AddImport("reflect") - g.AddImport("k8s.io/kubernetes/pkg/conversion") - return g -} - -var complexTypes []reflect.Kind = []reflect.Kind{reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct} - -type functionName struct { - name string - packageName string -} - -type conversionGenerator struct { - scheme *Scheme - - nameFormat string - generatedNamePrefix string - targetPkg string - - publicFuncs map[typePair]functionName - convertibles map[reflect.Type]reflect.Type - overridden map[reflect.Type]bool - // If pkgOverwrites is set for a given package name, that package name - // will be replaced while writing conversion function. If empty, package - // name will be omitted. - pkgOverwrites map[string]string - // map of package names to shortname - imports map[string]string - // map of short names to package names - shortImports map[string]string - - // A buffer that is used for storing lines that needs to be written. - linesToPrint []string - - // if true, we assume conversions on the scheme are not available to us in the current package - assumePrivateConversions bool -} - -func (g *conversionGenerator) AssumePrivateConversions() { - g.assumePrivateConversions = true -} - -func (g *conversionGenerator) AddImport(pkg string) string { - return g.addImportByPath(pkg) -} - -func (g *conversionGenerator) GenerateConversionsForType(gv unversioned.GroupVersion, reflection reflect.Type) error { - kind := reflection.Name() - // TODO this is equivalent to what it did before, but it needs to be fixed for the proper group - internalVersion := gv - internalVersion.Version = APIVersionInternal - - internalObj, err := g.scheme.New(internalVersion.WithKind(kind)) - if err != nil { - return fmt.Errorf("cannot create an object of type %v in internal version", kind) - } - internalObjType := reflect.TypeOf(internalObj) - if internalObjType.Kind() != reflect.Ptr { - return fmt.Errorf("created object should be of type Ptr: %v", internalObjType.Kind()) - } - inErr := g.generateConversionsBetween(reflection, internalObjType.Elem()) - outErr := g.generateConversionsBetween(internalObjType.Elem(), reflection) - if inErr != nil || outErr != nil { - return fmt.Errorf("errors: %v, %v", inErr, outErr) - } - return nil -} - -// primitiveConversion returns true if the two types can be converted via a cast. -func primitiveConversion(inType, outType reflect.Type) (string, bool) { - switch inType.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - switch outType.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - return outType.Name(), true - } - } - return "", false -} - -func (g *conversionGenerator) generateConversionsBetween(inType, outType reflect.Type) error { - existingConversion := g.scheme.Converter().HasConversionFunc(inType, outType) && g.scheme.Converter().HasConversionFunc(outType, inType) - - // Avoid processing the same type multiple times. - if value, found := g.convertibles[inType]; found { - if value != outType { - return fmt.Errorf("multiple possible convertibles for %v", inType) - } - return nil - } - if inType == outType { - switch inType.Kind() { - case reflect.Ptr: - return g.generateConversionsBetween(inType.Elem(), inType.Elem()) - case reflect.Struct: - // pointers to structs invoke new(inType) - g.addImportByPath(inType.PkgPath()) - } - g.rememberConversionFunction(inType, inType, false) - // Don't generate conversion methods for the same type. - return nil - } - - if _, ok := primitiveConversion(inType, outType); ok { - return nil - } - - if inType.Kind() != outType.Kind() { - if existingConversion { - g.rememberConversionFunction(inType, outType, false) - g.rememberConversionFunction(outType, inType, false) - return nil - } - return fmt.Errorf("cannot convert types of different kinds: %v %v", inType, outType) - } - - g.addImportByPath(inType.PkgPath()) - g.addImportByPath(outType.PkgPath()) - - // We should be able to generate conversions both sides. - switch inType.Kind() { - case reflect.Map: - inErr := g.generateConversionsForMap(inType, outType) - outErr := g.generateConversionsForMap(outType, inType) - if !existingConversion && (inErr != nil || outErr != nil) { - return inErr - } - // We don't add it to g.convertibles - maps should be handled correctly - // inside appropriate conversion functions. - return nil - case reflect.Ptr: - inErr := g.generateConversionsBetween(inType.Elem(), outType.Elem()) - outErr := g.generateConversionsBetween(outType.Elem(), inType.Elem()) - if !existingConversion && (inErr != nil || outErr != nil) { - return inErr - } - // We don't add it to g.convertibles - maps should be handled correctly - // inside appropriate conversion functions. - return nil - case reflect.Slice: - inErr := g.generateConversionsForSlice(inType, outType) - outErr := g.generateConversionsForSlice(outType, inType) - if !existingConversion && (inErr != nil || outErr != nil) { - return inErr - } - // We don't add it to g.convertibles - slices should be handled correctly - // inside appropriate conversion functions. - return nil - case reflect.Interface: - // TODO(wojtek-t): Currently we don't support converting interfaces. - return fmt.Errorf("interfaces are not supported") - case reflect.Struct: - inErr := g.generateConversionsForStruct(inType, outType) - outErr := g.generateConversionsForStruct(outType, inType) - if !existingConversion && (inErr != nil || outErr != nil) { - return inErr - } - g.rememberConversionFunction(inType, outType, true) - if existingConversion { - g.overridden[inType] = true - } - g.convertibles[inType] = outType - return nil - default: - // All simple types should be handled correctly with default conversion. - return nil - } -} - -func isComplexType(reflection reflect.Type) bool { - for _, complexType := range complexTypes { - if complexType == reflection.Kind() { - return true - } - } - return false -} - -func (g *conversionGenerator) rememberConversionFunction(inType, outType reflect.Type, willGenerate bool) { - if _, ok := g.publicFuncs[typePair{inType, outType}]; ok { - return - } - - if v, ok := g.scheme.Converter().ConversionFuncValue(inType, outType); ok { - if fn := goruntime.FuncForPC(v.Pointer()); fn != nil { - name := fn.Name() - var p, n string - if last := strings.LastIndex(name, "."); last != -1 { - p = name[:last] - n = name[last+1:] - } else { - n = name - } - if isPublic(n) { - g.AddImport(p) - g.publicFuncs[typePair{inType, outType}] = functionName{name: n, packageName: p} - } else { - log.Printf("WARNING: Cannot generate conversion %v -> %v, method %q is private", inType, outType, fn.Name()) - } - } else { - log.Printf("WARNING: Cannot generate conversion %v -> %v, method is not accessible", inType, outType) - } - } else if willGenerate { - g.publicFuncs[typePair{inType, outType}] = functionName{name: g.conversionFunctionName(inType, outType)} - } -} - -func isPublic(name string) bool { - return strings.ToUpper(name[:1]) == name[:1] -} - -func (g *conversionGenerator) generateConversionsForMap(inType, outType reflect.Type) error { - inKey := inType.Key() - outKey := outType.Key() - g.addImportByPath(inKey.PkgPath()) - g.addImportByPath(outKey.PkgPath()) - if err := g.generateConversionsBetween(inKey, outKey); err != nil { - return err - } - inValue := inType.Elem() - outValue := outType.Elem() - g.addImportByPath(inValue.PkgPath()) - g.addImportByPath(outValue.PkgPath()) - if err := g.generateConversionsBetween(inValue, outValue); err != nil { - return err - } - return nil -} - -func (g *conversionGenerator) generateConversionsForSlice(inType, outType reflect.Type) error { - inElem := inType.Elem() - outElem := outType.Elem() - // slice conversion requires the package for the destination type in order to instantiate the map - g.addImportByPath(outElem.PkgPath()) - if err := g.generateConversionsBetween(inElem, outElem); err != nil { - return err - } - return nil -} - -func (g *conversionGenerator) generateConversionsForStruct(inType, outType reflect.Type) error { - for i := 0; i < inType.NumField(); i++ { - inField := inType.Field(i) - outField, found := outType.FieldByName(inField.Name) - if !found { - return fmt.Errorf("couldn't find a corresponding field %v in %v", inField.Name, outType) - } - if isComplexType(inField.Type) { - if err := g.generateConversionsBetween(inField.Type, outField.Type); err != nil { - return err - } - } - } - return nil -} - -// A buffer of lines that will be written. -type bufferedLine struct { - line string - indentation int -} - -type buffer struct { - lines []bufferedLine -} - -func newBuffer() *buffer { - return &buffer{ - lines: make([]bufferedLine, 0), - } -} - -func (b *buffer) addLine(line string, indent int) { - b.lines = append(b.lines, bufferedLine{line, indent}) -} - -func (b *buffer) flushLines(w io.Writer) error { - for _, line := range b.lines { - indentation := strings.Repeat("\t", line.indentation) - fullLine := fmt.Sprintf("%s%s", indentation, line.line) - if _, err := io.WriteString(w, fullLine); err != nil { - return err - } - } - return nil -} - -type byName []reflect.Type - -func (s byName) Len() int { - return len(s) -} - -func (s byName) Less(i, j int) bool { - fullNameI := s[i].PkgPath() + "/" + s[i].Name() - fullNameJ := s[j].PkgPath() + "/" + s[j].Name() - return fullNameI < fullNameJ -} - -func (s byName) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (g *conversionGenerator) targetPackage(pkg string) { - g.imports[pkg] = "" - g.shortImports[""] = pkg -} - -func (g *conversionGenerator) RepackImports(exclude sets.String) { - var packages []string - for key := range g.imports { - packages = append(packages, key) - } - sort.Strings(packages) - g.imports = make(map[string]string) - g.shortImports = make(map[string]string) - g.targetPackage(g.targetPkg) - for _, pkg := range packages { - if !exclude.Has(pkg) { - g.addImportByPath(pkg) - } - } -} - -func (g *conversionGenerator) WriteImports(w io.Writer) error { - var packages []string - for key := range g.imports { - packages = append(packages, key) - } - sort.Strings(packages) - - buffer := newBuffer() - indent := 0 - buffer.addLine("import (\n", indent) - for _, importPkg := range packages { - if len(importPkg) == 0 { - continue - } - if len(g.imports[importPkg]) == 0 { - continue - } - buffer.addLine(fmt.Sprintf("%s \"%s\"\n", g.imports[importPkg], importPkg), indent+1) - } - buffer.addLine(")\n", indent) - buffer.addLine("\n", indent) - if err := buffer.flushLines(w); err != nil { - return err - } - return nil -} - -func (g *conversionGenerator) WriteConversionFunctions(w io.Writer) error { - // It's desired to print conversion functions always in the same order - // (e.g. for better tracking of what has really been added). - var keys []reflect.Type - for key := range g.convertibles { - keys = append(keys, key) - } - sort.Sort(byName(keys)) - - buffer := newBuffer() - indent := 0 - for _, inType := range keys { - outType := g.convertibles[inType] - // All types in g.convertibles are structs. - if inType.Kind() != reflect.Struct { - return fmt.Errorf("non-struct conversions are not-supported") - } - if err := g.writeConversionForType(buffer, inType, outType, indent); err != nil { - return err - } - } - if err := buffer.flushLines(w); err != nil { - return err - } - return nil -} - -func (g *conversionGenerator) writeRegisterHeader(b *buffer, pkg string, indent int) { - b.addLine("func init() {\n", indent) - b.addLine(fmt.Sprintf("err := %s.AddGeneratedConversionFuncs(\n", pkg), indent+1) -} - -func (g *conversionGenerator) writeRegisterFooter(b *buffer, indent int) { - b.addLine(")\n", indent+1) - b.addLine("if err != nil {\n", indent+1) - b.addLine("// If one of the conversion functions is malformed, detect it immediately.\n", indent+2) - b.addLine("panic(err)\n", indent+2) - b.addLine("}\n", indent+1) - b.addLine("}\n", indent) - b.addLine("\n", indent) -} - -func (g *conversionGenerator) RegisterConversionFunctions(w io.Writer, pkg string) error { - // Write conversion function names alphabetically ordered. - var names []string - for inType, outType := range g.convertibles { - names = append(names, g.generatedFunctionName(inType, outType)) - } - sort.Strings(names) - - buffer := newBuffer() - indent := 0 - g.writeRegisterHeader(buffer, pkg, indent) - for _, name := range names { - buffer.addLine(fmt.Sprintf("%s,\n", name), indent+2) - } - g.writeRegisterFooter(buffer, indent) - if err := buffer.flushLines(w); err != nil { - return err - } - return nil -} - -func (g *conversionGenerator) addImportByPath(pkg string) string { - if name, ok := g.imports[pkg]; ok { - return name - } - name := path.Base(pkg) - if _, ok := g.shortImports[name]; !ok { - g.imports[pkg] = name - g.shortImports[name] = pkg - return name - } - if dirname := path.Base(path.Dir(pkg)); len(dirname) > 0 { - name = dirname + name - if _, ok := g.shortImports[name]; !ok { - g.imports[pkg] = name - g.shortImports[name] = pkg - return name - } - if subdirname := path.Base(path.Dir(path.Dir(pkg))); len(subdirname) > 0 { - name = subdirname + name - if _, ok := g.shortImports[name]; !ok { - g.imports[pkg] = name - g.shortImports[name] = pkg - return name - } - } - } - for i := 2; i < 100; i++ { - generatedName := fmt.Sprintf("%s%d", name, i) - if _, ok := g.shortImports[generatedName]; !ok { - g.imports[pkg] = generatedName - g.shortImports[generatedName] = pkg - return generatedName - } - } - panic(fmt.Sprintf("unable to find a unique name for the package path %q: %v", pkg, g.shortImports)) -} - -func (g *conversionGenerator) typeName(inType reflect.Type) string { - switch inType.Kind() { - case reflect.Slice: - return fmt.Sprintf("[]%s", g.typeName(inType.Elem())) - case reflect.Ptr: - return fmt.Sprintf("*%s", g.typeName(inType.Elem())) - case reflect.Map: - if len(inType.Name()) == 0 { - return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) - } - fallthrough - default: - pkg, name := inType.PkgPath(), inType.Name() - if len(name) == 0 && inType.Kind() == reflect.Struct { - return "struct{}" - } - if len(pkg) == 0 { - // Default package. - return name - } - if val, found := g.pkgOverwrites[pkg]; found { - pkg = val - } - if len(pkg) == 0 { - return name - } - short := g.addImportByPath(pkg) - if len(short) > 0 { - return fmt.Sprintf("%s.%s", short, name) - } - return name - } -} - -func (g *conversionGenerator) writeDefaultingFunc(b *buffer, inType reflect.Type, indent int) error { - getStmt := "if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {\n" - b.addLine(getStmt, indent) - callFormat := "defaulting.(func(*%s))(in)\n" - callStmt := fmt.Sprintf(callFormat, g.typeName(inType)) - b.addLine(callStmt, indent+1) - b.addLine("}\n", indent) - return nil -} - -func packageForName(inType reflect.Type) string { - if inType.PkgPath() == "" { - return "" - } - slices := strings.Split(inType.PkgPath(), "/") - return slices[len(slices)-1] -} - -func (g *conversionGenerator) conversionFunctionName(inType, outType reflect.Type) string { - funcNameFormat := g.nameFormat - inPkg := packageForName(inType) - outPkg := packageForName(outType) - funcName := fmt.Sprintf(funcNameFormat, inPkg, inType.Name(), outPkg, outType.Name()) - return funcName -} - -func (g *conversionGenerator) conversionFunctionCall(inType, outType reflect.Type, scopeName string, args ...string) string { - if named, ok := g.publicFuncs[typePair{inType, outType}]; ok { - args[len(args)-1] = scopeName - name := named.name - localPackageName, ok := g.imports[named.packageName] - if !ok { - panic(fmt.Sprintf("have not defined an import for %s", named.packageName)) - } - if len(named.packageName) > 0 && len(localPackageName) > 0 { - name = localPackageName + "." + name - } - return fmt.Sprintf("%s(%s)", name, strings.Join(args, ", ")) - } - log.Printf("WARNING: Using reflection to convert %v -> %v (no public conversion)", inType, outType) - return fmt.Sprintf("%s.Convert(%s)", scopeName, strings.Join(args, ", ")) -} - -func (g *conversionGenerator) generatedFunctionName(inType, outType reflect.Type) string { - return g.generatedNamePrefix + g.conversionFunctionName(inType, outType) -} - -func (g *conversionGenerator) writeHeader(b *buffer, name, inType, outType string, indent int) { - format := "func %s(in *%s, out *%s, s conversion.Scope) error {\n" - stmt := fmt.Sprintf(format, name, inType, outType) - b.addLine(stmt, indent) -} - -func (g *conversionGenerator) writeFooter(b *buffer, indent int) { - b.addLine("return nil\n", indent+1) - b.addLine("}\n", indent) -} - -func (g *conversionGenerator) writeConversionForMap(b *buffer, inField, outField reflect.StructField, indent int) error { - ifFormat := "if in.%s != nil {\n" - ifStmt := fmt.Sprintf(ifFormat, inField.Name) - b.addLine(ifStmt, indent) - makeFormat := "out.%s = make(%s)\n" - makeStmt := fmt.Sprintf(makeFormat, outField.Name, g.typeName(outField.Type)) - b.addLine(makeStmt, indent+1) - forFormat := "for key, val := range in.%s {\n" - forStmt := fmt.Sprintf(forFormat, inField.Name) - b.addLine(forStmt, indent+1) - - // Whether we need to explicitly create a new value. - newValue := false - if isComplexType(inField.Type.Elem()) || !inField.Type.Elem().ConvertibleTo(outField.Type.Elem()) { - newValue = true - newFormat := "newVal := %s{}\n" - newStmt := fmt.Sprintf(newFormat, g.typeName(outField.Type.Elem())) - b.addLine(newStmt, indent+2) - call := g.conversionFunctionCall(inField.Type.Elem(), outField.Type.Elem(), "s", "&val", "&newVal", "0") - convertStmt := fmt.Sprintf("if err := %s; err != nil {\n", call) - b.addLine(convertStmt, indent+2) - b.addLine("return err\n", indent+3) - b.addLine("}\n", indent+2) - } - if inField.Type.Key().ConvertibleTo(outField.Type.Key()) { - value := "val" - if newValue { - value = "newVal" - } - assignStmt := "" - if inField.Type.Key().AssignableTo(outField.Type.Key()) { - assignStmt = fmt.Sprintf("out.%s[key] = %s\n", outField.Name, value) - } else { - assignStmt = fmt.Sprintf("out.%s[%s(key)] = %s\n", outField.Name, g.typeName(outField.Type.Key()), value) - } - b.addLine(assignStmt, indent+2) - } else { - // TODO(wojtek-t): Support maps with keys that are non-convertible to each other. - return fmt.Errorf("conversions between unconvertible keys in map are not supported.") - } - b.addLine("}\n", indent+1) - b.addLine("} else {\n", indent) - nilFormat := "out.%s = nil\n" - nilStmt := fmt.Sprintf(nilFormat, outField.Name) - b.addLine(nilStmt, indent+1) - b.addLine("}\n", indent) - return nil -} - -func (g *conversionGenerator) writeConversionForSlice(b *buffer, inField, outField reflect.StructField, indent int) error { - ifFormat := "if in.%s != nil {\n" - ifStmt := fmt.Sprintf(ifFormat, inField.Name) - b.addLine(ifStmt, indent) - makeFormat := "out.%s = make(%s, len(in.%s))\n" - makeStmt := fmt.Sprintf(makeFormat, outField.Name, g.typeName(outField.Type), inField.Name) - b.addLine(makeStmt, indent+1) - forFormat := "for i := range in.%s {\n" - forStmt := fmt.Sprintf(forFormat, inField.Name) - b.addLine(forStmt, indent+1) - - assigned := false - switch inField.Type.Elem().Kind() { - case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct: - // Don't copy these via assignment/conversion! - default: - // This should handle all simple types. - if inField.Type.Elem().AssignableTo(outField.Type.Elem()) { - assignFormat := "out.%s[i] = in.%s[i]\n" - assignStmt := fmt.Sprintf(assignFormat, outField.Name, inField.Name) - b.addLine(assignStmt, indent+2) - assigned = true - } else if inField.Type.Elem().ConvertibleTo(outField.Type.Elem()) { - assignFormat := "out.%s[i] = %s(in.%s[i])\n" - assignStmt := fmt.Sprintf(assignFormat, outField.Name, g.typeName(outField.Type.Elem()), inField.Name) - b.addLine(assignStmt, indent+2) - assigned = true - } - } - if !assigned { - call := g.conversionFunctionCall(inField.Type.Elem(), outField.Type.Elem(), "s", "&in."+inField.Name+"[i]", "&out."+outField.Name+"[i]", "0") - assignStmt := fmt.Sprintf("if err := %s; err != nil {\n", call) - b.addLine(assignStmt, indent+2) - b.addLine("return err\n", indent+3) - b.addLine("}\n", indent+2) - } - b.addLine("}\n", indent+1) - b.addLine("} else {\n", indent) - nilFormat := "out.%s = nil\n" - nilStmt := fmt.Sprintf(nilFormat, outField.Name) - b.addLine(nilStmt, indent+1) - b.addLine("}\n", indent) - return nil -} - -func (g *conversionGenerator) writeConversionForPtr(b *buffer, inField, outField reflect.StructField, indent int) error { - switch inField.Type.Elem().Kind() { - case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct: - // Don't copy these via assignment/conversion! - default: - // This should handle pointers to all simple types. - assignable := inField.Type.Elem().AssignableTo(outField.Type.Elem()) - convertible := inField.Type.Elem().ConvertibleTo(outField.Type.Elem()) - if assignable || convertible { - ifFormat := "if in.%s != nil {\n" - ifStmt := fmt.Sprintf(ifFormat, inField.Name) - b.addLine(ifStmt, indent) - newFormat := "out.%s = new(%s)\n" - newStmt := fmt.Sprintf(newFormat, outField.Name, g.typeName(outField.Type.Elem())) - b.addLine(newStmt, indent+1) - } - if assignable { - assignFormat := "*out.%s = *in.%s\n" - assignStmt := fmt.Sprintf(assignFormat, outField.Name, inField.Name) - b.addLine(assignStmt, indent+1) - } else if convertible { - assignFormat := "*out.%s = %s(*in.%s)\n" - assignStmt := fmt.Sprintf(assignFormat, outField.Name, g.typeName(outField.Type.Elem()), inField.Name) - b.addLine(assignStmt, indent+1) - } - if assignable || convertible { - b.addLine("} else {\n", indent) - nilFormat := "out.%s = nil\n" - nilStmt := fmt.Sprintf(nilFormat, outField.Name) - b.addLine(nilStmt, indent+1) - b.addLine("}\n", indent) - return nil - } - } - - b.addLine(fmt.Sprintf("// unable to generate simple pointer conversion for %v -> %v\n", inField.Type.Elem(), outField.Type.Elem()), indent) - ifFormat := "if in.%s != nil {\n" - ifStmt := fmt.Sprintf(ifFormat, inField.Name) - b.addLine(ifStmt, indent) - assignStmt := "" - if _, ok := g.publicFuncs[typePair{inField.Type.Elem(), outField.Type.Elem()}]; ok { - newFormat := "out.%s = new(%s)\n" - newStmt := fmt.Sprintf(newFormat, outField.Name, g.typeName(outField.Type.Elem())) - b.addLine(newStmt, indent+1) - call := g.conversionFunctionCall(inField.Type.Elem(), outField.Type.Elem(), "s", "in."+inField.Name, "out."+outField.Name, "0") - assignStmt = fmt.Sprintf("if err := %s; err != nil {\n", call) - } else { - call := g.conversionFunctionCall(inField.Type.Elem(), outField.Type.Elem(), "s", "&in."+inField.Name, "&out."+outField.Name, "0") - assignStmt = fmt.Sprintf("if err := %s; err != nil {\n", call) - } - b.addLine(assignStmt, indent+1) - b.addLine("return err\n", indent+2) - b.addLine("}\n", indent+1) - b.addLine("} else {\n", indent) - nilFormat := "out.%s = nil\n" - nilStmt := fmt.Sprintf(nilFormat, outField.Name) - b.addLine(nilStmt, indent+1) - b.addLine("}\n", indent) - return nil -} - -func (g *conversionGenerator) canTryConversion(b *buffer, inType reflect.Type, inField, outField reflect.StructField, indent int) (bool, error) { - if inField.Type.Kind() != outField.Type.Kind() { - if !g.overridden[inType] { - return false, fmt.Errorf("input %s.%s (%s) does not match output (%s) and conversion is not overridden", inType, inField.Name, inField.Type.Kind(), outField.Type.Kind()) - } - b.addLine(fmt.Sprintf("// in.%s has no peer in out\n", inField.Name), indent) - return false, nil - } - return true, nil -} - -func (g *conversionGenerator) writeConversionForStruct(b *buffer, inType, outType reflect.Type, indent int) error { - for i := 0; i < inType.NumField(); i++ { - inField := inType.Field(i) - outField, found := outType.FieldByName(inField.Name) - if !found { - if !g.overridden[inType] { - return fmt.Errorf("input %s.%s has no peer in output %s and conversion is not overridden", inType, inField.Name, outType) - } - b.addLine(fmt.Sprintf("// in.%s has no peer in out\n", inField.Name), indent) - continue - } - - if g.scheme.Converter().IsConversionIgnored(inField.Type, outField.Type) { - continue - } - - existsConversion := g.scheme.Converter().HasConversionFunc(inField.Type, outField.Type) - _, hasPublicConversion := g.publicFuncs[typePair{inField.Type, outField.Type}] - // TODO: This allows a private conversion for a slice to take precedence over a public - // conversion for the field, even though that is technically slower. We should report when - // we generate an inefficient conversion. - if existsConversion || hasPublicConversion { - // Use the conversion method that is already defined. - call := g.conversionFunctionCall(inField.Type, outField.Type, "s", "&in."+inField.Name, "&out."+outField.Name, "0") - assignStmt := fmt.Sprintf("if err := %s; err != nil {\n", call) - b.addLine(assignStmt, indent) - b.addLine("return err\n", indent+1) - b.addLine("}\n", indent) - continue - } - - switch inField.Type.Kind() { - case reflect.Map: - if try, err := g.canTryConversion(b, inType, inField, outField, indent); err != nil { - return err - } else if !try { - continue - } - if err := g.writeConversionForMap(b, inField, outField, indent); err != nil { - return err - } - continue - case reflect.Ptr: - if try, err := g.canTryConversion(b, inType, inField, outField, indent); err != nil { - return err - } else if !try { - continue - } - if err := g.writeConversionForPtr(b, inField, outField, indent); err != nil { - return err - } - continue - case reflect.Slice: - if try, err := g.canTryConversion(b, inType, inField, outField, indent); err != nil { - return err - } else if !try { - continue - } - if err := g.writeConversionForSlice(b, inField, outField, indent); err != nil { - return err - } - continue - case reflect.Interface, reflect.Struct: - // Don't copy these via assignment/conversion! - default: - // This should handle all simple types. - if inField.Type.AssignableTo(outField.Type) { - assignFormat := "out.%s = in.%s\n" - assignStmt := fmt.Sprintf(assignFormat, outField.Name, inField.Name) - b.addLine(assignStmt, indent) - continue - } - if inField.Type.ConvertibleTo(outField.Type) { - assignFormat := "out.%s = %s(in.%s)\n" - assignStmt := fmt.Sprintf(assignFormat, outField.Name, g.typeName(outField.Type), inField.Name) - b.addLine(assignStmt, indent) - continue - } - } - - call := g.conversionFunctionCall(inField.Type, outField.Type, "s", "&in."+inField.Name, "&out."+outField.Name, "0") - assignStmt := fmt.Sprintf("if err := %s; err != nil {\n", call) - b.addLine(assignStmt, indent) - b.addLine("return err\n", indent+1) - b.addLine("}\n", indent) - } - return nil -} - -func (g *conversionGenerator) writeConversionForType(b *buffer, inType, outType reflect.Type, indent int) error { - // Always emit the auto-generated name. - autoFuncName := g.generatedFunctionName(inType, outType) - g.writeHeader(b, autoFuncName, g.typeName(inType), g.typeName(outType), indent) - if err := g.writeDefaultingFunc(b, inType, indent+1); err != nil { - return err - } - switch inType.Kind() { - case reflect.Struct: - if err := g.writeConversionForStruct(b, inType, outType, indent+1); err != nil { - return err - } - default: - return fmt.Errorf("type not supported: %v", inType) - } - g.writeFooter(b, indent) - b.addLine("\n", 0) - - if !g.overridden[inType] { - // Also emit the "user-facing" name. - userFuncName := g.conversionFunctionName(inType, outType) - g.writeHeader(b, userFuncName, g.typeName(inType), g.typeName(outType), indent) - b.addLine(fmt.Sprintf("return %s(in, out, s)\n", autoFuncName), indent+1) - b.addLine("}\n\n", 0) - } - - return nil -} - -func (g *conversionGenerator) existsConversionFunction(inType, outType reflect.Type) bool { - if val, found := g.convertibles[inType]; found && val == outType { - return true - } - if val, found := g.convertibles[outType]; found && val == inType { - return true - } - return false -} - -// TODO(wojtek-t): We should somehow change the conversion methods registered under: -// pkg/runtime/scheme.go to implement the naming convention for conversion functions -// and get rid of this hack. -type typePair struct { - inType reflect.Type - outType reflect.Type -} - -var defaultConversions []typePair = []typePair{} - -func (g *conversionGenerator) OverwritePackage(pkg, overwrite string) { - g.pkgOverwrites[pkg] = overwrite -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/conversion_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/conversion_test.go new file mode 100644 index 000000000000..6105e5aad5af --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/conversion_test.go @@ -0,0 +1,135 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime_test + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +type InternalComplex struct { + runtime.TypeMeta + String string + Integer int + Integer64 int64 + Int64 int64 + Bool bool +} + +type ExternalComplex struct { + runtime.TypeMeta `json:",inline"` + String string `json:"string" description:"testing"` + Integer int `json:"int"` + Integer64 int64 `json:",omitempty"` + Int64 int64 + Bool bool `json:"bool"` +} + +func (obj *InternalComplex) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ExternalComplex) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } + +func TestStringMapConversion(t *testing.T) { + internalGV := unversioned.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} + externalGV := unversioned.GroupVersion{Group: "test.group", Version: "external"} + + scheme := runtime.NewScheme() + scheme.Log(t) + scheme.AddKnownTypeWithName(internalGV.WithKind("Complex"), &InternalComplex{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("Complex"), &ExternalComplex{}) + + testCases := map[string]struct { + input map[string][]string + errFn func(error) bool + expected runtime.Object + }{ + "ignores omitempty": { + input: map[string][]string{ + "String": {"not_used"}, + "string": {"value"}, + "int": {"1"}, + "Integer64": {"2"}, + }, + expected: &ExternalComplex{String: "value", Integer: 1}, + }, + "returns error on bad int": { + input: map[string][]string{ + "int": {"a"}, + }, + errFn: func(err error) bool { return err != nil }, + expected: &ExternalComplex{}, + }, + "parses int64": { + input: map[string][]string{ + "Int64": {"-1"}, + }, + expected: &ExternalComplex{Int64: -1}, + }, + "returns error on bad int64": { + input: map[string][]string{ + "Int64": {"a"}, + }, + errFn: func(err error) bool { return err != nil }, + expected: &ExternalComplex{}, + }, + "parses boolean true": { + input: map[string][]string{ + "bool": {"true"}, + }, + expected: &ExternalComplex{Bool: true}, + }, + "parses boolean any value": { + input: map[string][]string{ + "bool": {"foo"}, + }, + expected: &ExternalComplex{Bool: true}, + }, + "parses boolean false": { + input: map[string][]string{ + "bool": {"false"}, + }, + expected: &ExternalComplex{Bool: false}, + }, + "parses boolean empty value": { + input: map[string][]string{ + "bool": {""}, + }, + expected: &ExternalComplex{Bool: true}, + }, + "parses boolean no value": { + input: map[string][]string{ + "bool": {}, + }, + expected: &ExternalComplex{Bool: false}, + }, + } + + for k, tc := range testCases { + out := &ExternalComplex{} + if err := scheme.Convert(&tc.input, out); (tc.errFn == nil && err != nil) || (tc.errFn != nil && !tc.errFn(err)) { + t.Errorf("%s: unexpected error: %v", k, err) + continue + } else if err != nil { + continue + } + if !reflect.DeepEqual(out, tc.expected) { + t.Errorf("%s: unexpected output: %#v", k, out) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/deep_copy_generated.go new file mode 100644 index 000000000000..988b97401770 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/deep_copy_generated.go @@ -0,0 +1,150 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package runtime + +import ( + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" + reflect "reflect" +) + +func DeepCopy_runtime_RawExtension(in RawExtension, out *RawExtension, c *conversion.Cloner) error { + if in.Raw != nil { + in, out := in.Raw, &out.Raw + *out = make([]byte, len(in)) + copy(*out, in) + } else { + out.Raw = nil + } + if in.Object == nil { + out.Object = nil + } else if newVal, err := c.DeepCopy(in.Object); err != nil { + return err + } else { + out.Object = newVal.(Object) + } + return nil +} + +func DeepCopy_runtime_Scheme(in Scheme, out *Scheme, c *conversion.Cloner) error { + if in.gvkToType != nil { + in, out := in.gvkToType, &out.gvkToType + *out = make(map[unversioned.GroupVersionKind]reflect.Type) + for range in { + // FIXME: Copying unassignable keys unsupported unversioned.GroupVersionKind + } + } else { + out.gvkToType = nil + } + if in.typeToGVK != nil { + in, out := in.typeToGVK, &out.typeToGVK + *out = make(map[reflect.Type][]unversioned.GroupVersionKind) + for range in { + // FIXME: Copying unassignable keys unsupported reflect.Type + } + } else { + out.typeToGVK = nil + } + if in.unversionedTypes != nil { + in, out := in.unversionedTypes, &out.unversionedTypes + *out = make(map[reflect.Type]unversioned.GroupVersionKind) + for range in { + // FIXME: Copying unassignable keys unsupported reflect.Type + } + } else { + out.unversionedTypes = nil + } + if in.unversionedKinds != nil { + in, out := in.unversionedKinds, &out.unversionedKinds + *out = make(map[string]reflect.Type) + for key, val := range in { + if newVal, err := c.DeepCopy(val); err != nil { + return err + } else { + (*out)[key] = newVal.(reflect.Type) + } + } + } else { + out.unversionedKinds = nil + } + if in.fieldLabelConversionFuncs != nil { + in, out := in.fieldLabelConversionFuncs, &out.fieldLabelConversionFuncs + *out = make(map[string]map[string]FieldLabelConversionFunc) + for key, val := range in { + if newVal, err := c.DeepCopy(val); err != nil { + return err + } else { + (*out)[key] = newVal.(map[string]FieldLabelConversionFunc) + } + } + } else { + out.fieldLabelConversionFuncs = nil + } + if in.converter != nil { + in, out := in.converter, &out.converter + *out = new(conversion.Converter) + if err := conversion.DeepCopy_conversion_Converter(*in, *out, c); err != nil { + return err + } + } else { + out.converter = nil + } + if in.cloner != nil { + in, out := in.cloner, &out.cloner + *out = new(conversion.Cloner) + if err := conversion.DeepCopy_conversion_Cloner(*in, *out, c); err != nil { + return err + } + } else { + out.cloner = nil + } + return nil +} + +func DeepCopy_runtime_SerializerInfo(in SerializerInfo, out *SerializerInfo, c *conversion.Cloner) error { + if in.Serializer == nil { + out.Serializer = nil + } else if newVal, err := c.DeepCopy(in.Serializer); err != nil { + return err + } else { + out.Serializer = newVal.(Serializer) + } + out.EncodesAsText = in.EncodesAsText + out.MediaType = in.MediaType + return nil +} + +func DeepCopy_runtime_StreamSerializerInfo(in StreamSerializerInfo, out *StreamSerializerInfo, c *conversion.Cloner) error { + if err := DeepCopy_runtime_SerializerInfo(in.SerializerInfo, &out.SerializerInfo, c); err != nil { + return err + } + if in.Framer == nil { + out.Framer = nil + } else if newVal, err := c.DeepCopy(in.Framer); err != nil { + return err + } else { + out.Framer = newVal.(Framer) + } + if err := DeepCopy_runtime_SerializerInfo(in.Embedded, &out.Embedded, c); err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/deep_copy_generator.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/deep_copy_generator.go deleted file mode 100644 index 4790969f1299..000000000000 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/deep_copy_generator.go +++ /dev/null @@ -1,609 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "fmt" - "io" - "path" - "reflect" - "sort" - "strings" - - "k8s.io/kubernetes/pkg/util/sets" -) - -// TODO(wojtek-t): As suggested in #8320, we should consider the strategy -// to first do the shallow copy and then recurse into things that need a -// deep copy (maps, pointers, slices). That sort of copy function would -// need one parameter - a pointer to the thing it's supposed to expand, -// and it would involve a lot less memory copying. -type DeepCopyGenerator interface { - // Adds a type to a generator. - // If the type is non-struct, it will return an error, otherwise deep-copy - // functions for this type and all nested types will be generated. - AddType(inType reflect.Type) error - - // ReplaceType registers a type that should be used instead of the type - // with the provided pkgPath and name. - ReplaceType(pkgPath, name string, in interface{}) - - // AddImport registers a package name with the generator and returns its - // short name. - AddImport(pkgPath string) string - - // RepackImports creates a stable ordering of import short names - RepackImports() - - // Writes all imports that are necessary for deep-copy function and - // their registration. - WriteImports(w io.Writer) error - - // Writes deel-copy functions for all types added via AddType() method - // and their nested types. - WriteDeepCopyFunctions(w io.Writer) error - - // Writes an init() function that registers all the generated deep-copy - // functions. - RegisterDeepCopyFunctions(w io.Writer, pkg string) error - - // When generating code, all references to "pkg" package name will be - // replaced with "overwrite". It is used mainly to replace references - // to name of the package in which the code will be created with empty - // string. - OverwritePackage(pkg, overwrite string) -} - -func NewDeepCopyGenerator(scheme *Scheme, targetPkg string, include sets.String) DeepCopyGenerator { - g := &deepCopyGenerator{ - scheme: scheme, - targetPkg: targetPkg, - copyables: make(map[reflect.Type]bool), - imports: make(map[string]string), - shortImports: make(map[string]string), - pkgOverwrites: make(map[string]string), - replace: make(map[pkgPathNamePair]reflect.Type), - include: include, - } - g.targetPackage(targetPkg) - g.AddImport("k8s.io/kubernetes/pkg/conversion") - return g -} - -type pkgPathNamePair struct { - PkgPath string - Name string -} - -type deepCopyGenerator struct { - scheme *Scheme - targetPkg string - copyables map[reflect.Type]bool - // map of package names to shortname - imports map[string]string - // map of short names to package names - shortImports map[string]string - pkgOverwrites map[string]string - replace map[pkgPathNamePair]reflect.Type - include sets.String -} - -func (g *deepCopyGenerator) addImportByPath(pkg string) string { - if name, ok := g.imports[pkg]; ok { - return name - } - name := path.Base(pkg) - if _, ok := g.shortImports[name]; !ok { - g.imports[pkg] = name - g.shortImports[name] = pkg - return name - } - if dirname := path.Base(path.Dir(pkg)); len(dirname) > 0 { - name = dirname + name - if _, ok := g.shortImports[name]; !ok { - g.imports[pkg] = name - g.shortImports[name] = pkg - return name - } - if subdirname := path.Base(path.Dir(path.Dir(pkg))); len(subdirname) > 0 { - name = subdirname + name - if _, ok := g.shortImports[name]; !ok { - g.imports[pkg] = name - g.shortImports[name] = pkg - return name - } - } - } - for i := 2; i < 100; i++ { - generatedName := fmt.Sprintf("%s%d", name, i) - if _, ok := g.shortImports[generatedName]; !ok { - g.imports[pkg] = generatedName - g.shortImports[generatedName] = pkg - return generatedName - } - } - panic(fmt.Sprintf("unable to find a unique name for the package path %q: %v", pkg, g.shortImports)) -} - -func (g *deepCopyGenerator) targetPackage(pkg string) { - g.imports[pkg] = "" - g.shortImports[""] = pkg -} - -func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error { - if _, found := g.copyables[inType]; found { - return nil - } - switch inType.Kind() { - case reflect.Map: - if err := g.addAllRecursiveTypes(inType.Key()); err != nil { - return err - } - if err := g.addAllRecursiveTypes(inType.Elem()); err != nil { - return err - } - case reflect.Slice, reflect.Ptr: - if err := g.addAllRecursiveTypes(inType.Elem()); err != nil { - return err - } - case reflect.Interface: - g.addImportByPath(inType.PkgPath()) - return nil - case reflect.Struct: - g.addImportByPath(inType.PkgPath()) - found := false - for s := range g.include { - if strings.HasPrefix(inType.PkgPath(), s) { - found = true - break - } - } - if !found { - return nil - } - for i := 0; i < inType.NumField(); i++ { - inField := inType.Field(i) - if err := g.addAllRecursiveTypes(inField.Type); err != nil { - return err - } - } - g.copyables[inType] = true - default: - // Simple types should be copied automatically. - } - return nil -} - -func (g *deepCopyGenerator) AddImport(pkg string) string { - return g.addImportByPath(pkg) -} - -// ReplaceType registers a replacement type to be used instead of the named type -func (g *deepCopyGenerator) ReplaceType(pkgPath, name string, t interface{}) { - g.replace[pkgPathNamePair{pkgPath, name}] = reflect.TypeOf(t) -} - -func (g *deepCopyGenerator) AddType(inType reflect.Type) error { - if inType.Kind() != reflect.Struct { - return fmt.Errorf("non-struct copies are not supported") - } - return g.addAllRecursiveTypes(inType) -} - -func (g *deepCopyGenerator) RepackImports() { - var packages []string - for key := range g.imports { - packages = append(packages, key) - } - sort.Strings(packages) - g.imports = make(map[string]string) - g.shortImports = make(map[string]string) - - g.targetPackage(g.targetPkg) - for _, pkg := range packages { - g.addImportByPath(pkg) - } -} - -func (g *deepCopyGenerator) WriteImports(w io.Writer) error { - var packages []string - for key := range g.imports { - packages = append(packages, key) - } - sort.Strings(packages) - - buffer := newBuffer() - indent := 0 - buffer.addLine("import (\n", indent) - for _, importPkg := range packages { - if len(importPkg) == 0 { - continue - } - if len(g.imports[importPkg]) == 0 { - continue - } - buffer.addLine(fmt.Sprintf("%s \"%s\"\n", g.imports[importPkg], importPkg), indent+1) - } - buffer.addLine(")\n", indent) - buffer.addLine("\n", indent) - if err := buffer.flushLines(w); err != nil { - return err - } - return nil -} - -type byPkgAndName []reflect.Type - -func (s byPkgAndName) Len() int { - return len(s) -} - -func (s byPkgAndName) Less(i, j int) bool { - fullNameI := s[i].PkgPath() + "/" + s[i].Name() - fullNameJ := s[j].PkgPath() + "/" + s[j].Name() - return fullNameI < fullNameJ -} - -func (s byPkgAndName) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (g *deepCopyGenerator) nameForType(inType reflect.Type) string { - switch inType.Kind() { - case reflect.Slice: - return fmt.Sprintf("[]%s", g.typeName(inType.Elem())) - case reflect.Ptr: - return fmt.Sprintf("*%s", g.typeName(inType.Elem())) - case reflect.Map: - if len(inType.Name()) == 0 { - return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) - } - fallthrough - default: - pkg, name := inType.PkgPath(), inType.Name() - if len(name) == 0 && inType.Kind() == reflect.Struct { - return "struct{}" - } - if len(pkg) == 0 { - // Default package. - return name - } - if val, found := g.pkgOverwrites[pkg]; found { - pkg = val - } - if len(pkg) == 0 { - return name - } - short := g.addImportByPath(pkg) - if len(short) > 0 { - return fmt.Sprintf("%s.%s", short, name) - } - return name - } -} - -func (g *deepCopyGenerator) typeName(inType reflect.Type) string { - if t, ok := g.replace[pkgPathNamePair{inType.PkgPath(), inType.Name()}]; ok { - return g.nameForType(t) - } - return g.nameForType(inType) -} - -func (g *deepCopyGenerator) deepCopyFunctionName(inType reflect.Type) string { - funcNameFormat := "deepCopy_%s_%s" - inPkg := packageForName(inType) - funcName := fmt.Sprintf(funcNameFormat, inPkg, inType.Name()) - return funcName -} - -func (g *deepCopyGenerator) writeHeader(b *buffer, inType reflect.Type, indent int) { - format := "func %s(in %s, out *%s, c *conversion.Cloner) error {\n" - stmt := fmt.Sprintf(format, g.deepCopyFunctionName(inType), g.typeName(inType), g.typeName(inType)) - b.addLine(stmt, indent) -} - -func (g *deepCopyGenerator) writeFooter(b *buffer, indent int) { - b.addLine("return nil\n", indent+1) - b.addLine("}\n", indent) -} - -func (g *deepCopyGenerator) WriteDeepCopyFunctions(w io.Writer) error { - var keys []reflect.Type - for key := range g.copyables { - keys = append(keys, key) - } - sort.Sort(byPkgAndName(keys)) - - buffer := newBuffer() - indent := 0 - for _, inType := range keys { - if err := g.writeDeepCopyForType(buffer, inType, indent); err != nil { - return err - } - buffer.addLine("\n", 0) - } - if err := buffer.flushLines(w); err != nil { - return err - } - return nil -} - -func (g *deepCopyGenerator) writeDeepCopyForMap(b *buffer, inField reflect.StructField, indent int) error { - ifFormat := "if in.%s != nil {\n" - ifStmt := fmt.Sprintf(ifFormat, inField.Name) - b.addLine(ifStmt, indent) - newFormat := "out.%s = make(%s)\n" - newStmt := fmt.Sprintf(newFormat, inField.Name, g.typeName(inField.Type)) - b.addLine(newStmt, indent+1) - forFormat := "for key, val := range in.%s {\n" - forStmt := fmt.Sprintf(forFormat, inField.Name) - b.addLine(forStmt, indent+1) - - switch inField.Type.Key().Kind() { - case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct: - return fmt.Errorf("not supported") - default: - switch inField.Type.Elem().Kind() { - case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct: - if _, found := g.copyables[inField.Type.Elem()]; found { - newFormat := "newVal := new(%s)\n" - newStmt := fmt.Sprintf(newFormat, g.typeName(inField.Type.Elem())) - b.addLine(newStmt, indent+2) - assignFormat := "if err := %s(val, newVal, c); err != nil {\n" - funcName := g.deepCopyFunctionName(inField.Type.Elem()) - assignStmt := fmt.Sprintf(assignFormat, funcName) - b.addLine(assignStmt, indent+2) - b.addLine("return err\n", indent+3) - b.addLine("}\n", indent+2) - setFormat := "out.%s[key] = *newVal\n" - setStmt := fmt.Sprintf(setFormat, inField.Name) - b.addLine(setStmt, indent+2) - } else { - ifStmt := "if newVal, err := c.DeepCopy(val); err != nil {\n" - b.addLine(ifStmt, indent+2) - b.addLine("return err\n", indent+3) - b.addLine("} else {\n", indent+2) - assignFormat := "out.%s[key] = newVal.(%s)\n" - assignStmt := fmt.Sprintf(assignFormat, inField.Name, g.typeName(inField.Type.Elem())) - b.addLine(assignStmt, indent+3) - b.addLine("}\n", indent+2) - } - default: - assignFormat := "out.%s[key] = val\n" - assignStmt := fmt.Sprintf(assignFormat, inField.Name) - b.addLine(assignStmt, indent+2) - } - } - b.addLine("}\n", indent+1) - b.addLine("} else {\n", indent) - elseFormat := "out.%s = nil\n" - elseStmt := fmt.Sprintf(elseFormat, inField.Name) - b.addLine(elseStmt, indent+1) - b.addLine("}\n", indent) - return nil -} - -func (g *deepCopyGenerator) writeDeepCopyForPtr(b *buffer, inField reflect.StructField, indent int) error { - ifFormat := "if in.%s != nil {\n" - ifStmt := fmt.Sprintf(ifFormat, inField.Name) - b.addLine(ifStmt, indent) - - kind := inField.Type.Elem().Kind() - switch kind { - case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct: - if _, found := g.copyables[inField.Type.Elem()]; found { - newFormat := "out.%s = new(%s)\n" - newStmt := fmt.Sprintf(newFormat, inField.Name, g.typeName(inField.Type.Elem())) - b.addLine(newStmt, indent+1) - assignFormat := "if err := %s(*in.%s, out.%s, c); err != nil {\n" - funcName := g.deepCopyFunctionName(inField.Type.Elem()) - assignStmt := fmt.Sprintf(assignFormat, funcName, inField.Name, inField.Name) - b.addLine(assignStmt, indent+1) - b.addLine("return err\n", indent+2) - b.addLine("}\n", indent+1) - } else { - ifFormat := "if newVal, err := c.DeepCopy(in.%s); err != nil {\n" - ifStmt := fmt.Sprintf(ifFormat, inField.Name) - b.addLine(ifStmt, indent+1) - b.addLine("return err\n", indent+2) - if kind != reflect.Struct { - b.addLine("} else if newVal == nil {\n", indent+1) - b.addLine(fmt.Sprintf("out.%s = nil\n", inField.Name), indent+2) - } - b.addLine("} else {\n", indent+1) - assignFormat := "out.%s = newVal.(%s)\n" - assignStmt := fmt.Sprintf(assignFormat, inField.Name, g.typeName(inField.Type)) - b.addLine(assignStmt, indent+2) - b.addLine("}\n", indent+1) - } - default: - newFormat := "out.%s = new(%s)\n" - newStmt := fmt.Sprintf(newFormat, inField.Name, g.typeName(inField.Type.Elem())) - b.addLine(newStmt, indent+1) - assignFormat := "*out.%s = *in.%s\n" - assignStmt := fmt.Sprintf(assignFormat, inField.Name, inField.Name) - b.addLine(assignStmt, indent+1) - } - b.addLine("} else {\n", indent) - elseFormat := "out.%s = nil\n" - elseStmt := fmt.Sprintf(elseFormat, inField.Name) - b.addLine(elseStmt, indent+1) - b.addLine("}\n", indent) - return nil -} - -func (g *deepCopyGenerator) writeDeepCopyForSlice(b *buffer, inField reflect.StructField, indent int) error { - ifFormat := "if in.%s != nil {\n" - ifStmt := fmt.Sprintf(ifFormat, inField.Name) - b.addLine(ifStmt, indent) - newFormat := "out.%s = make(%s, len(in.%s))\n" - newStmt := fmt.Sprintf(newFormat, inField.Name, g.typeName(inField.Type), inField.Name) - b.addLine(newStmt, indent+1) - forFormat := "for i := range in.%s {\n" - forStmt := fmt.Sprintf(forFormat, inField.Name) - b.addLine(forStmt, indent+1) - - kind := inField.Type.Elem().Kind() - switch kind { - case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct: - if _, found := g.copyables[inField.Type.Elem()]; found { - assignFormat := "if err := %s(in.%s[i], &out.%s[i], c); err != nil {\n" - funcName := g.deepCopyFunctionName(inField.Type.Elem()) - assignStmt := fmt.Sprintf(assignFormat, funcName, inField.Name, inField.Name) - b.addLine(assignStmt, indent+2) - b.addLine("return err\n", indent+3) - b.addLine("}\n", indent+2) - } else { - ifFormat := "if newVal, err := c.DeepCopy(in.%s[i]); err != nil {\n" - ifStmt := fmt.Sprintf(ifFormat, inField.Name) - b.addLine(ifStmt, indent+2) - b.addLine("return err\n", indent+3) - if kind != reflect.Struct { - b.addLine("} else if newVal == nil {\n", indent+2) - b.addLine(fmt.Sprintf("out.%s[i] = nil\n", inField.Name), indent+3) - } - b.addLine("} else {\n", indent+2) - assignFormat := "out.%s[i] = newVal.(%s)\n" - assignStmt := fmt.Sprintf(assignFormat, inField.Name, g.typeName(inField.Type.Elem())) - b.addLine(assignStmt, indent+3) - b.addLine("}\n", indent+2) - } - default: - assignFormat := "out.%s[i] = in.%s[i]\n" - assignStmt := fmt.Sprintf(assignFormat, inField.Name, inField.Name) - b.addLine(assignStmt, indent+2) - } - b.addLine("}\n", indent+1) - b.addLine("} else {\n", indent) - elseFormat := "out.%s = nil\n" - elseStmt := fmt.Sprintf(elseFormat, inField.Name) - b.addLine(elseStmt, indent+1) - b.addLine("}\n", indent) - return nil -} - -func (g *deepCopyGenerator) writeDeepCopyForStruct(b *buffer, inType reflect.Type, indent int) error { - for i := 0; i < inType.NumField(); i++ { - inField := inType.Field(i) - switch inField.Type.Kind() { - case reflect.Map: - if err := g.writeDeepCopyForMap(b, inField, indent); err != nil { - return err - } - case reflect.Ptr: - if err := g.writeDeepCopyForPtr(b, inField, indent); err != nil { - return err - } - case reflect.Slice: - if err := g.writeDeepCopyForSlice(b, inField, indent); err != nil { - return err - } - case reflect.Interface: - ifFormat := "if newVal, err := c.DeepCopy(in.%s); err != nil {\n" - ifStmt := fmt.Sprintf(ifFormat, inField.Name) - b.addLine(ifStmt, indent) - b.addLine("return err\n", indent+1) - b.addLine("} else if newVal == nil {\n", indent) - b.addLine(fmt.Sprintf("out.%s = nil\n", inField.Name), indent+1) - b.addLine("} else {\n", indent) - copyFormat := "out.%s = newVal.(%s)\n" - copyStmt := fmt.Sprintf(copyFormat, inField.Name, g.typeName(inField.Type)) - b.addLine(copyStmt, indent+1) - b.addLine("}\n", indent) - case reflect.Struct: - if _, found := g.copyables[inField.Type]; found { - ifFormat := "if err := %s(in.%s, &out.%s, c); err != nil {\n" - funcName := g.deepCopyFunctionName(inField.Type) - ifStmt := fmt.Sprintf(ifFormat, funcName, inField.Name, inField.Name) - b.addLine(ifStmt, indent) - b.addLine("return err\n", indent+1) - b.addLine("}\n", indent) - } else { - ifFormat := "if newVal, err := c.DeepCopy(in.%s); err != nil {\n" - ifStmt := fmt.Sprintf(ifFormat, inField.Name) - b.addLine(ifStmt, indent) - b.addLine("return err\n", indent+1) - b.addLine("} else {\n", indent) - assignFormat := "out.%s = newVal.(%s)\n" - assignStmt := fmt.Sprintf(assignFormat, inField.Name, g.typeName(inField.Type)) - b.addLine(assignStmt, indent+1) - b.addLine("}\n", indent) - } - default: - // This should handle all simple types. - assignFormat := "out.%s = in.%s\n" - assignStmt := fmt.Sprintf(assignFormat, inField.Name, inField.Name) - b.addLine(assignStmt, indent) - } - } - return nil -} - -func (g *deepCopyGenerator) writeDeepCopyForType(b *buffer, inType reflect.Type, indent int) error { - g.writeHeader(b, inType, indent) - switch inType.Kind() { - case reflect.Struct: - if err := g.writeDeepCopyForStruct(b, inType, indent+1); err != nil { - return err - } - default: - return fmt.Errorf("type not supported: %v", inType) - } - g.writeFooter(b, indent) - return nil -} - -func (g *deepCopyGenerator) writeRegisterHeader(b *buffer, pkg string, indent int) { - b.addLine("func init() {\n", indent) - registerFormat := "err := %s.AddGeneratedDeepCopyFuncs(\n" - b.addLine(fmt.Sprintf(registerFormat, pkg), indent+1) -} - -func (g *deepCopyGenerator) writeRegisterFooter(b *buffer, indent int) { - b.addLine(")\n", indent+1) - b.addLine("if err != nil {\n", indent+1) - b.addLine("// if one of the deep copy functions is malformed, detect it immediately.\n", indent+2) - b.addLine("panic(err)\n", indent+2) - b.addLine("}\n", indent+1) - b.addLine("}\n", indent) - b.addLine("\n", indent) -} - -func (g *deepCopyGenerator) RegisterDeepCopyFunctions(w io.Writer, pkg string) error { - var keys []reflect.Type - for key := range g.copyables { - keys = append(keys, key) - } - sort.Sort(byPkgAndName(keys)) - - buffer := newBuffer() - indent := 0 - g.writeRegisterHeader(buffer, pkg, indent) - for _, inType := range keys { - funcStmt := fmt.Sprintf("%s,\n", g.deepCopyFunctionName(inType)) - buffer.addLine(funcStmt, indent+2) - } - g.writeRegisterFooter(buffer, indent) - if err := buffer.flushLines(w); err != nil { - return err - } - return nil -} - -func (g *deepCopyGenerator) OverwritePackage(pkg, overwrite string) { - g.pkgOverwrites[pkg] = overwrite -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/embedded.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/embedded.go index 0934d6837c2b..a62080e39aca 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/embedded.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/embedded.go @@ -24,9 +24,9 @@ import ( ) type encodable struct { - e Encoder `json:"-"` + E Encoder `json:"-"` obj Object - versions []unversioned.GroupVersion `json:"-"` + versions []unversioned.GroupVersion } func (e encodable) GetObjectKind() unversioned.ObjectKind { return e.obj.GetObjectKind() } @@ -47,7 +47,7 @@ func (re encodable) UnmarshalJSON(in []byte) error { // Marshal may get called on pointers or values, so implement MarshalJSON on value. // http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go func (re encodable) MarshalJSON() ([]byte, error) { - return Encode(re.e, re.obj) + return Encode(re.E, re.obj) } // NewEncodableList creates an object that will be encoded with the provided codec on demand. @@ -69,56 +69,68 @@ func (re *Unknown) UnmarshalJSON(in []byte) error { return errors.New("runtime.Unknown: UnmarshalJSON on nil pointer") } re.TypeMeta = TypeMeta{} - re.RawJSON = append(re.RawJSON[0:0], in...) + re.Raw = append(re.Raw[0:0], in...) + re.ContentEncoding = "" + re.ContentType = ContentTypeJSON return nil } // Marshal may get called on pointers or values, so implement MarshalJSON on value. // http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go func (re Unknown) MarshalJSON() ([]byte, error) { - if re.RawJSON == nil { + // If ContentType is unset, we assume this is JSON. + if re.ContentType != "" && re.ContentType != ContentTypeJSON { + return nil, errors.New("runtime.Unknown: MarshalJSON on non-json data") + } + if re.Raw == nil { return []byte("null"), nil } - return re.RawJSON, nil + return re.Raw, nil +} + +func Convert_runtime_Object_To_runtime_RawExtension(in *Object, out *RawExtension, s conversion.Scope) error { + if in == nil { + out.Raw = []byte("null") + return nil + } + obj := *in + if unk, ok := obj.(*Unknown); ok { + if unk.Raw != nil { + out.Raw = unk.Raw + return nil + } + obj = out.Object + } + if obj == nil { + out.Raw = nil + return nil + } + out.Object = obj + return nil +} + +func Convert_runtime_RawExtension_To_runtime_Object(in *RawExtension, out *Object, s conversion.Scope) error { + if in.Object != nil { + *out = in.Object + return nil + } + data := in.Raw + if len(data) == 0 || (len(data) == 4 && string(data) == "null") { + *out = nil + return nil + } + *out = &Unknown{ + Raw: data, + // TODO: Set ContentEncoding and ContentType appropriately. + // Currently we set ContentTypeJSON to make tests passing. + ContentType: ContentTypeJSON, + } + return nil } func DefaultEmbeddedConversions() []interface{} { return []interface{}{ - func(in *Object, out *RawExtension, s conversion.Scope) error { - if in == nil { - out.RawJSON = []byte("null") - return nil - } - obj := *in - if unk, ok := obj.(*Unknown); ok { - if unk.RawJSON != nil { - out.RawJSON = unk.RawJSON - return nil - } - obj = out.Object - } - if obj == nil { - out.RawJSON = nil - return nil - } - out.Object = obj - return nil - }, - - func(in *RawExtension, out *Object, s conversion.Scope) error { - if in.Object != nil { - *out = in.Object - return nil - } - data := in.RawJSON - if len(data) == 0 || (len(data) == 4 && string(data) == "null") { - *out = nil - return nil - } - *out = &Unknown{ - RawJSON: data, - } - return nil - }, + Convert_runtime_Object_To_runtime_RawExtension, + Convert_runtime_RawExtension_To_runtime_Object, } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/embedded_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/embedded_test.go new file mode 100644 index 000000000000..6a143fb086d6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/embedded_test.go @@ -0,0 +1,290 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime_test + +import ( + "encoding/json" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer" + "k8s.io/kubernetes/pkg/util/diff" +) + +type EmbeddedTest struct { + runtime.TypeMeta + ID string + Object runtime.Object + EmptyObject runtime.Object +} + +type EmbeddedTestExternal struct { + runtime.TypeMeta `json:",inline"` + ID string `json:"id,omitempty"` + Object runtime.RawExtension `json:"object,omitempty"` + EmptyObject runtime.RawExtension `json:"emptyObject,omitempty"` +} + +type ObjectTest struct { + runtime.TypeMeta + + ID string + Items []runtime.Object +} + +type ObjectTestExternal struct { + runtime.TypeMeta `yaml:",inline" json:",inline"` + + ID string `json:"id,omitempty"` + Items []runtime.RawExtension `json:"items,omitempty"` +} + +func (obj *ObjectTest) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ObjectTestExternal) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *EmbeddedTest) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *EmbeddedTestExternal) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } + +func TestDecodeEmptyRawExtensionAsObject(t *testing.T) { + internalGV := unversioned.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} + externalGV := unversioned.GroupVersion{Group: "test.group", Version: "v1test"} + externalGVK := externalGV.WithKind("ObjectTest") + + s := runtime.NewScheme() + s.AddKnownTypes(internalGV, &ObjectTest{}) + s.AddKnownTypeWithName(externalGVK, &ObjectTestExternal{}) + + codec := serializer.NewCodecFactory(s).LegacyCodec(externalGV) + + obj, gvk, err := codec.Decode([]byte(`{"kind":"`+externalGVK.Kind+`","apiVersion":"`+externalGV.String()+`","items":[{}]}`), nil, nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + test := obj.(*ObjectTest) + if unk, ok := test.Items[0].(*runtime.Unknown); !ok || unk.Kind != "" || unk.APIVersion != "" || string(unk.Raw) != "{}" || unk.ContentType != runtime.ContentTypeJSON { + t.Fatalf("unexpected object: %#v", test.Items[0]) + } + if *gvk != externalGVK { + t.Fatalf("unexpected kind: %#v", gvk) + } + + obj, gvk, err = codec.Decode([]byte(`{"kind":"`+externalGVK.Kind+`","apiVersion":"`+externalGV.String()+`","items":[{"kind":"Other","apiVersion":"v1"}]}`), nil, nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + test = obj.(*ObjectTest) + if unk, ok := test.Items[0].(*runtime.Unknown); !ok || unk.Kind != "" || unk.APIVersion != "" || string(unk.Raw) != `{"kind":"Other","apiVersion":"v1"}` || unk.ContentType != runtime.ContentTypeJSON { + t.Fatalf("unexpected object: %#v", test.Items[0]) + } + if *gvk != externalGVK { + t.Fatalf("unexpected kind: %#v", gvk) + } +} + +func TestArrayOfRuntimeObject(t *testing.T) { + internalGV := unversioned.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} + externalGV := unversioned.GroupVersion{Group: "test.group", Version: "v1test"} + + s := runtime.NewScheme() + s.AddKnownTypes(internalGV, &EmbeddedTest{}) + s.AddKnownTypeWithName(externalGV.WithKind("EmbeddedTest"), &EmbeddedTestExternal{}) + s.AddKnownTypes(internalGV, &ObjectTest{}) + s.AddKnownTypeWithName(externalGV.WithKind("ObjectTest"), &ObjectTestExternal{}) + + codec := serializer.NewCodecFactory(s).LegacyCodec(externalGV) + + innerItems := []runtime.Object{ + &EmbeddedTest{ID: "baz"}, + } + items := []runtime.Object{ + &EmbeddedTest{ID: "foo"}, + &EmbeddedTest{ID: "bar"}, + // TODO: until YAML is removed, this JSON must be in ascending key order to ensure consistent roundtrip serialization + &runtime.Unknown{ + Raw: []byte(`{"apiVersion":"unknown.group/unknown","foo":"bar","kind":"OtherTest"}`), + ContentType: runtime.ContentTypeJSON, + }, + &ObjectTest{ + Items: runtime.NewEncodableList(codec, innerItems), + }, + } + internal := &ObjectTest{ + Items: runtime.NewEncodableList(codec, items), + } + wire, err := runtime.Encode(codec, internal) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + t.Logf("Wire format is:\n%s\n", string(wire)) + + obj := &ObjectTestExternal{} + if err := json.Unmarshal(wire, obj); err != nil { + t.Fatalf("unexpected error: %v", err) + } + t.Logf("exact wire is: %s", string(obj.Items[0].Raw)) + + items[3] = &ObjectTest{Items: innerItems} + internal.Items = items + + decoded, err := runtime.Decode(codec, wire) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + list, err := meta.ExtractList(decoded) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if errs := runtime.DecodeList(list, codec); len(errs) > 0 { + t.Fatalf("unexpected error: %v", errs) + } + + list2, err := meta.ExtractList(list[3]) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if errs := runtime.DecodeList(list2, codec); len(errs) > 0 { + t.Fatalf("unexpected error: %v", errs) + } + if err := meta.SetList(list[3], list2); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // we want DecodeList to set type meta if possible, even on runtime.Unknown objects + internal.Items[2].(*runtime.Unknown).TypeMeta = runtime.TypeMeta{Kind: "OtherTest", APIVersion: "unknown.group/unknown"} + if e, a := internal.Items, list; !reflect.DeepEqual(e, a) { + t.Errorf("mismatched decoded: %s", diff.ObjectGoPrintSideBySide(e, a)) + } +} + +func TestNestedObject(t *testing.T) { + internalGV := unversioned.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} + externalGV := unversioned.GroupVersion{Group: "test.group", Version: "v1test"} + embeddedTestExternalGVK := externalGV.WithKind("EmbeddedTest") + + s := runtime.NewScheme() + s.AddKnownTypes(internalGV, &EmbeddedTest{}) + s.AddKnownTypeWithName(embeddedTestExternalGVK, &EmbeddedTestExternal{}) + + codec := serializer.NewCodecFactory(s).LegacyCodec(externalGV) + + inner := &EmbeddedTest{ + ID: "inner", + } + outer := &EmbeddedTest{ + ID: "outer", + Object: runtime.NewEncodable(codec, inner), + } + + wire, err := runtime.Encode(codec, outer) + if err != nil { + t.Fatalf("Unexpected encode error '%v'", err) + } + + t.Logf("Wire format is:\n%v\n", string(wire)) + + decoded, err := runtime.Decode(codec, wire) + if err != nil { + t.Fatalf("Unexpected decode error %v", err) + } + + // for later tests + outer.Object = inner + + if e, a := outer, decoded; reflect.DeepEqual(e, a) { + t.Errorf("Expected unequal %#v %#v", e, a) + } + + obj, err := runtime.Decode(codec, decoded.(*EmbeddedTest).Object.(*runtime.Unknown).Raw) + if err != nil { + t.Fatal(err) + } + decoded.(*EmbeddedTest).Object = obj + if e, a := outer, decoded; !reflect.DeepEqual(e, a) { + t.Errorf("Expected equal %#v %#v", e, a) + } + + // test JSON decoding of the external object, which should preserve + // raw bytes + var externalViaJSON EmbeddedTestExternal + err = json.Unmarshal(wire, &externalViaJSON) + if err != nil { + t.Fatalf("Unexpected decode error %v", err) + } + if externalViaJSON.Kind == "" || externalViaJSON.APIVersion == "" || externalViaJSON.ID != "outer" { + t.Errorf("Expected objects to have type info set, got %#v", externalViaJSON) + } + if !reflect.DeepEqual(externalViaJSON.EmptyObject.Raw, []byte("null")) || len(externalViaJSON.Object.Raw) == 0 { + t.Errorf("Expected deserialization of nested objects into bytes, got %#v", externalViaJSON) + } + + // test JSON decoding, too, since Decode uses yaml unmarshalling. + // Generic Unmarshalling of JSON cannot load the nested objects because there is + // no default schema set. Consumers wishing to get direct JSON decoding must use + // the external representation + var decodedViaJSON EmbeddedTest + err = json.Unmarshal(wire, &decodedViaJSON) + if err == nil || !strings.Contains(err.Error(), "unmarshal object into Go value of type runtime.Object") { + t.Fatalf("Unexpected decode error %v", err) + } + if a := decodedViaJSON; a.Object != nil || a.EmptyObject != nil { + t.Errorf("Expected embedded objects to be nil: %#v", a) + } +} + +// TestDeepCopyOfRuntimeObject checks to make sure that runtime.Objects's can be passed through DeepCopy with fidelity +func TestDeepCopyOfRuntimeObject(t *testing.T) { + internalGV := unversioned.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} + externalGV := unversioned.GroupVersion{Group: "test.group", Version: "v1test"} + embeddedTestExternalGVK := externalGV.WithKind("EmbeddedTest") + + s := runtime.NewScheme() + s.AddKnownTypes(internalGV, &EmbeddedTest{}) + s.AddKnownTypeWithName(embeddedTestExternalGVK, &EmbeddedTestExternal{}) + + original := &EmbeddedTest{ + ID: "outer", + Object: &EmbeddedTest{ + ID: "inner", + }, + } + + codec := serializer.NewCodecFactory(s).LegacyCodec(externalGV) + + originalData, err := runtime.Encode(codec, original) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + t.Logf("originalRole = %v\n", string(originalData)) + + copyOfOriginal, err := s.DeepCopy(original) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + copiedData, err := runtime.Encode(codec, copyOfOriginal.(runtime.Object)) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + t.Logf("copyOfRole = %v\n", string(copiedData)) + + if !reflect.DeepEqual(original, copyOfOriginal) { + t.Errorf("expected \n%v\n, got \n%v", string(originalData), string(copiedData)) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/extension.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/extension.go index 629f675b69e7..eca82986eab9 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/extension.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/extension.go @@ -25,14 +25,14 @@ func (re *RawExtension) UnmarshalJSON(in []byte) error { if re == nil { return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer") } - re.RawJSON = append(re.RawJSON[0:0], in...) + re.Raw = append(re.Raw[0:0], in...) return nil } // Marshal may get called on pointers or values, so implement MarshalJSON on value. // http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go func (re RawExtension) MarshalJSON() ([]byte, error) { - if re.RawJSON == nil { + if re.Raw == nil { // TODO: this is to support legacy behavior of JSONPrinter and YAMLPrinter, which // expect to call json.Marshal on arbitrary versioned objects (even those not in // the scheme). pkg/kubectl/resource#AsVersionedObjects and its interaction with @@ -43,5 +43,6 @@ func (re RawExtension) MarshalJSON() ([]byte, error) { } return []byte("null"), nil } - return re.RawJSON, nil + // TODO: Check whether ContentType is actually JSON before returning it. + return re.Raw, nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/extension_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/extension_test.go new file mode 100644 index 000000000000..3545284e9998 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/extension_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime_test + +import ( + "encoding/json" + "testing" + + "k8s.io/kubernetes/pkg/runtime" +) + +func TestEmbeddedRawExtensionMarshal(t *testing.T) { + type test struct { + Ext runtime.RawExtension + } + + extension := test{Ext: runtime.RawExtension{Raw: []byte(`{"foo":"bar"}`)}} + data, err := json.Marshal(extension) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if string(data) != `{"Ext":{"foo":"bar"}}` { + t.Errorf("unexpected data: %s", string(data)) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/generated.pb.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/generated.pb.go new file mode 100644 index 000000000000..289268483632 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/generated.pb.go @@ -0,0 +1,689 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/runtime/generated.proto +// DO NOT EDIT! + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/runtime/generated.proto + + It has these top-level messages: + RawExtension + TypeMeta + Unknown +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *RawExtension) Reset() { *m = RawExtension{} } +func (m *RawExtension) String() string { return proto.CompactTextString(m) } +func (*RawExtension) ProtoMessage() {} + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (m *TypeMeta) String() string { return proto.CompactTextString(m) } +func (*TypeMeta) ProtoMessage() {} + +func (m *Unknown) Reset() { *m = Unknown{} } +func (m *Unknown) String() string { return proto.CompactTextString(m) } +func (*Unknown) ProtoMessage() {} + +func init() { + proto.RegisterType((*RawExtension)(nil), "k8s.io.kubernetes.pkg.runtime.RawExtension") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.kubernetes.pkg.runtime.TypeMeta") + proto.RegisterType((*Unknown)(nil), "k8s.io.kubernetes.pkg.runtime.Unknown") +} +func (m *RawExtension) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RawExtension) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Raw != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Raw))) + i += copy(data[i:], m.Raw) + } + return i, nil +} + +func (m *TypeMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TypeMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + return i, nil +} + +func (m *Unknown) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Unknown) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) + n1, err := m.TypeMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.Raw != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Raw))) + i += copy(data[i:], m.Raw) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) + i += copy(data[i:], m.ContentEncoding) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) + i += copy(data[i:], m.ContentType) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *RawExtension) Size() (n int) { + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypeMeta) Size() (n int) { + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Unknown) Size() (n int) { + var l int + _ = l + l = m.TypeMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ContentEncoding) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContentType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RawExtension) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Unknown) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Unknown: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Unknown: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TypeMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContentEncoding", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContentEncoding = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContentType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContentType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/generated.proto b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/generated.proto new file mode 100644 index 000000000000..c878b49968d5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/generated.proto @@ -0,0 +1,121 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.runtime; + +import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "runtime"; + +// RawExtension is used to hold extensions in external versions. +// +// To use this, make a field which has RawExtension as its type in your external, versioned +// struct, and Object in your internal struct. You also need to register your +// various plugin types. +// +// // Internal package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // External package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // On the wire, the JSON will look something like this: +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } +// +// So what happens? Decode first uses json or yaml to unmarshal the serialized data into +// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. +// The next step is to copy (using pkg/conversion) into the internal struct. The runtime +// package's DefaultScheme has conversion functions installed which will unpack the +// JSON stored in RawExtension, turning it into the correct object type, and storing it +// in the Object. (TODO: In the case where the object is of an unknown type, a +// runtime.Unknown object will be created and stored.) +// +// +protobuf=true +message RawExtension { + // Raw is the underlying serialization of this object. + // + // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. + optional bytes raw = 1; +} + +// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, +// like this: +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) { unversioned.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind +// +// TypeMeta is provided here for convenience. You may use it directly from this package or define +// your own with the same fields. +// +// +protobuf=true +message TypeMeta { + optional string apiVersion = 1; + + optional string kind = 2; +} + +// Unknown allows api objects with unknown types to be passed-through. This can be used +// to deal with the API objects from a plug-in. Unknown objects still have functioning +// TypeMeta features-- kind, version, etc. +// TODO: Make this object have easy access to field based accessors and settors for +// metadata and field mutatation. +// +// +protobuf=true +message Unknown { + optional TypeMeta typeMeta = 1; + + // Raw will hold the complete serialized object which couldn't be matched + // with a registered type. Most likely, nothing should be done with this + // except for passing it through the system. + optional bytes raw = 2; + + // ContentEncoding is encoding used to encode 'Raw' data. + // Unspecified means no encoding. + optional string contentEncoding = 3; + + // ContentType is serialization method used to serialize 'Raw'. + // Unspecified means ContentTypeJSON. + optional string contentType = 4; +} + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/helper.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/helper.go index 4a76e81dc9f9..3181ddf7e7e1 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/helper.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/helper.go @@ -18,6 +18,7 @@ package runtime import ( "fmt" + "io" "reflect" "k8s.io/kubernetes/pkg/api/unversioned" @@ -25,25 +26,65 @@ import ( "k8s.io/kubernetes/pkg/util/errors" ) -type objectTyperToTyper struct { - typer ObjectTyper +// unsafeObjectConvertor implements ObjectConvertor using the unsafe conversion path. +type unsafeObjectConvertor struct { + *Scheme } -func (t objectTyperToTyper) ObjectKind(obj Object) (*unversioned.GroupVersionKind, bool, error) { - gvk, err := t.typer.ObjectKind(obj) - if err != nil { - return nil, false, err +var _ ObjectConvertor = unsafeObjectConvertor{} + +// ConvertToVersion converts in to the provided outVersion without copying the input first, which +// is only safe if the output object is not mutated or reused. +func (c unsafeObjectConvertor) ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (Object, error) { + return c.Scheme.UnsafeConvertToVersion(in, outVersion) +} + +// UnsafeObjectConvertor performs object conversion without copying the object structure, +// for use when the converted object will not be reused or mutated. Primarily for use within +// versioned codecs, which use the external object for serialization but do not return it. +func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor { + return unsafeObjectConvertor{scheme} +} + +// SetField puts the value of src, into fieldName, which must be a member of v. +// The value of src must be assignable to the field. +func SetField(src interface{}, v reflect.Value, fieldName string) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) } - unversionedType, ok := t.typer.IsUnversioned(obj) - if !ok { - // ObjectTyper violates its contract - return nil, false, fmt.Errorf("typer returned a kind for %v, but then reported it was not in the scheme with IsUnversioned", reflect.TypeOf(obj)) + srcValue := reflect.ValueOf(src) + if srcValue.Type().AssignableTo(field.Type()) { + field.Set(srcValue) + return nil } - return &gvk, unversionedType, nil + if srcValue.Type().ConvertibleTo(field.Type()) { + field.Set(srcValue.Convert(field.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", srcValue.Type(), field.Type()) } -func ObjectTyperToTyper(typer ObjectTyper) Typer { - return objectTyperToTyper{typer: typer} +// Field puts the value of fieldName, which must be a member of v, into dest, +// which must be a variable to which this field's value can be assigned. +func Field(v reflect.Value, fieldName string, dest interface{}) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + } + destValue, err := conversion.EnforcePtr(dest) + if err != nil { + return err + } + if field.Type().AssignableTo(destValue.Type()) { + destValue.Set(field) + return nil + } + if field.Type().ConvertibleTo(destValue.Type()) { + destValue.Set(field.Convert(destValue.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), destValue.Type()) } // fieldPtr puts the address of fieldName, which must be a member of v, @@ -80,14 +121,16 @@ func EncodeList(e Encoder, objects []Object, overrides ...unversioned.GroupVersi errs = append(errs, err) continue } - objects[i] = &Unknown{RawJSON: data} + // TODO: Set ContentEncoding and ContentType. + objects[i] = &Unknown{Raw: data} } return errors.NewAggregate(errs) } func decodeListItem(obj *Unknown, decoders []Decoder) (Object, error) { for _, decoder := range decoders { - obj, err := Decode(decoder, obj.RawJSON) + // TODO: Decode based on ContentType. + obj, err := Decode(decoder, obj.Raw) if err != nil { if IsNotRegisteredError(err) { continue @@ -99,7 +142,7 @@ func decodeListItem(obj *Unknown, decoders []Decoder) (Object, error) { // could not decode, so leave the object as Unknown, but give the decoders the // chance to set Unknown.TypeMeta if it is available. for _, decoder := range decoders { - if err := DecodeInto(decoder, obj.RawJSON, obj); err == nil { + if err := DecodeInto(decoder, obj.Raw, obj); err == nil { return obj, nil } } @@ -130,19 +173,9 @@ type MultiObjectTyper []ObjectTyper var _ ObjectTyper = MultiObjectTyper{} -func (m MultiObjectTyper) ObjectKind(obj Object) (gvk unversioned.GroupVersionKind, err error) { +func (m MultiObjectTyper) ObjectKinds(obj Object) (gvks []unversioned.GroupVersionKind, unversionedType bool, err error) { for _, t := range m { - gvk, err = t.ObjectKind(obj) - if err == nil { - return - } - } - return -} - -func (m MultiObjectTyper) ObjectKinds(obj Object) (gvks []unversioned.GroupVersionKind, err error) { - for _, t := range m { - gvks, err = t.ObjectKinds(obj) + gvks, unversionedType, err = t.ObjectKinds(obj) if err == nil { return } @@ -159,11 +192,21 @@ func (m MultiObjectTyper) Recognizes(gvk unversioned.GroupVersionKind) bool { return false } -func (m MultiObjectTyper) IsUnversioned(obj Object) (bool, bool) { - for _, t := range m { - if unversioned, ok := t.IsUnversioned(obj); ok { - return unversioned, true - } +// SetZeroValue would set the object of objPtr to zero value of its type. +func SetZeroValue(objPtr Object) error { + v, err := conversion.EnforcePtr(objPtr) + if err != nil { + return err } - return false, false + v.Set(reflect.Zero(v.Type())) + return nil } + +// DefaultFramer is valid for any stream that can read objects serially without +// any separation in the stream. +var DefaultFramer = defaultFramer{} + +type defaultFramer struct{} + +func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r } +func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/helper_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/helper_test.go new file mode 100644 index 000000000000..e15a0e799f4b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/helper_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime_test + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/runtime" +) + +func TestDecodeList(t *testing.T) { + pl := &api.List{ + Items: []runtime.Object{ + &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}}, + &runtime.Unknown{ + TypeMeta: runtime.TypeMeta{Kind: "Pod", APIVersion: testapi.Default.GroupVersion().String()}, + Raw: []byte(`{"kind":"Pod","apiVersion":"` + testapi.Default.GroupVersion().String() + `","metadata":{"name":"test"}}`), + ContentType: runtime.ContentTypeJSON, + }, + &runtime.Unstructured{ + Object: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "Bar", + "test": "value", + }, + }, + }, + } + if errs := runtime.DecodeList(pl.Items, testapi.Default.Codec()); len(errs) != 0 { + t.Fatalf("unexpected error %v", errs) + } + if pod, ok := pl.Items[1].(*api.Pod); !ok || pod.Name != "test" { + t.Errorf("object not converted: %#v", pl.Items[1]) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/interfaces.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/interfaces.go index 67b37b4401b5..0fbc930319cd 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/interfaces.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/interfaces.go @@ -30,15 +30,6 @@ const ( APIVersionInternal = "__internal" ) -// Typer retrieves information about an object's group, version, and kind. -type Typer interface { - // ObjectKind returns the version and kind of the provided object, or an - // error if the object is not recognized (IsNotRegisteredError will return true). - // It returns whether the object is considered unversioned at the same time. - // TODO: align the signature of ObjectTyper with this interface - ObjectKind(Object) (*unversioned.GroupVersionKind, bool, error) -} - type Encoder interface { // EncodeToStream writes an object to a stream. Override versions may be provided for each group // that enforce a certain versioning. Implementations may return errors if the versions are incompatible, @@ -80,20 +71,88 @@ type ParameterCodec interface { EncodeParameters(obj Object, to unversioned.GroupVersion) (url.Values, error) } +// Framer is a factory for creating readers and writers that obey a particular framing pattern. +type Framer interface { + NewFrameReader(r io.ReadCloser) io.ReadCloser + NewFrameWriter(w io.Writer) io.Writer +} + +// SerializerInfo contains information about a specific serialization format +type SerializerInfo struct { + Serializer + // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. + EncodesAsText bool + // MediaType is the value that represents this serializer over the wire. + MediaType string +} + +// StreamSerializerInfo contains information about a specific stream serialization format +type StreamSerializerInfo struct { + SerializerInfo + // Framer is the factory for retrieving streams that separate objects on the wire + Framer + // Embedded is the type of the nested serialization that should be used. + Embedded SerializerInfo +} + // NegotiatedSerializer is an interface used for obtaining encoders, decoders, and serializers -// for multiple supported media types. +// for multiple supported media types. This would commonly be accepted by a server component +// that performs HTTP content negotiation to accept multiple formats. type NegotiatedSerializer interface { + // SupportedMediaTypes is the media types supported for reading and writing single objects. SupportedMediaTypes() []string - SerializerForMediaType(mediaType string, options map[string]string) (Serializer, bool) - EncoderForVersion(serializer Serializer, gv unversioned.GroupVersion) Encoder - DecoderToVersion(serializer Serializer, gv unversioned.GroupVersion) Decoder + // SerializerForMediaType returns a serializer for the provided media type. params is the set of + // parameters applied to the media type that may modify the resulting output. ok will be false + // if no serializer matched the media type. + SerializerForMediaType(mediaType string, params map[string]string) (s SerializerInfo, ok bool) + + // SupportedStreamingMediaTypes returns the media types of the supported streaming serializers. + // Streaming serializers control how multiple objects are written to a stream output. + SupportedStreamingMediaTypes() []string + // StreamingSerializerForMediaType returns a serializer for the provided media type that supports + // reading and writing multiple objects to a stream. It returns a framer and serializer, or an + // error if no such serializer can be created. Params is the set of parameters applied to the + // media type that may modify the resulting output. ok will be false if no serializer matched + // the media type. + StreamingSerializerForMediaType(mediaType string, params map[string]string) (s StreamSerializerInfo, ok bool) + + // EncoderForVersion returns an encoder that ensures objects being written to the provided + // serializer are in the provided group version. + // TODO: take multiple group versions + EncoderForVersion(serializer Encoder, gv unversioned.GroupVersion) Encoder + // DecoderForVersion returns a decoder that ensures objects being read by the provided + // serializer are in the provided group version by default. + // TODO: take multiple group versions + DecoderToVersion(serializer Decoder, gv unversioned.GroupVersion) Decoder +} + +// StorageSerializer is an interface used for obtaining encoders, decoders, and serializers +// that can read and write data at rest. This would commonly be used by client tools that must +// read files, or server side storage interfaces that persist restful objects. +type StorageSerializer interface { + // SerializerForMediaType returns a serializer for the provided media type. Options is a set of + // parameters applied to the media type that may modify the resulting output. + SerializerForMediaType(mediaType string, options map[string]string) (SerializerInfo, bool) + + // UniversalDeserializer returns a Serializer that can read objects in multiple supported formats + // by introspecting the data at rest. + UniversalDeserializer() Decoder + + // EncoderForVersion returns an encoder that ensures objects being written to the provided + // serializer are in the provided group version. + // TODO: take multiple group versions + EncoderForVersion(serializer Encoder, gv unversioned.GroupVersion) Encoder + // DecoderForVersion returns a decoder that ensures objects being read by the provided + // serializer are in the provided group version by default. + // TODO: take multiple group versions + DecoderToVersion(serializer Decoder, gv unversioned.GroupVersion) Decoder } /////////////////////////////////////////////////////////////////////////////// // Non-codec interfaces type ObjectVersioner interface { - ConvertToVersion(in Object, outVersion string) (out Object, err error) + ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (out Object, err error) } // ObjectConvertor converts an object to a different version. @@ -103,27 +162,21 @@ type ObjectConvertor interface { Convert(in, out interface{}) error // ConvertToVersion takes the provided object and converts it the provided version. This // method does not guarantee that the in object is not mutated. - ConvertToVersion(in Object, outVersion string) (out Object, err error) + ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (out Object, err error) ConvertFieldLabel(version, kind, label, value string) (string, string, error) } // ObjectTyper contains methods for extracting the APIVersion and Kind // of objects. type ObjectTyper interface { - // ObjectKind returns the default group,version,kind of the provided object, or an - // error if the object is not recognized (IsNotRegisteredError will return true). - ObjectKind(Object) (unversioned.GroupVersionKind, error) - // ObjectKinds returns the all possible group,version,kind of the provided object, or an - // error if the object is not recognized (IsNotRegisteredError will return true). - ObjectKinds(Object) ([]unversioned.GroupVersionKind, error) + // ObjectKinds returns the all possible group,version,kind of the provided object, true if + // the object is unversioned, or an error if the object is not recognized + // (IsNotRegisteredError will return true). + ObjectKinds(Object) ([]unversioned.GroupVersionKind, bool, error) // Recognizes returns true if the scheme is able to handle the provided version and kind, // or more precisely that the provided version is a possible conversion or decoding // target. Recognizes(gvk unversioned.GroupVersionKind) bool - // IsUnversioned returns true if the provided object is considered unversioned and thus - // should have Version and Group suppressed in the output. If the object is not recognized - // in the scheme, ok is false. - IsUnversioned(Object) (unversioned bool, ok bool) } // ObjectCreater contains methods for instantiating an object by kind and version. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/protobuf/protobuf.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/protobuf/protobuf.go deleted file mode 100644 index cc050a50b23f..000000000000 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/protobuf/protobuf.go +++ /dev/null @@ -1,158 +0,0 @@ -// +build proto - -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package protobuf - -import ( - "fmt" - "io" - "net/url" - "reflect" - - "github.com/gogo/protobuf/proto" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// NewCodec -func NewCodec(version string, creater runtime.ObjectCreater, typer runtime.ObjectTyper, convertor runtime.ObjectConvertor) runtime.Codec { - return &codec{ - version: version, - creater: creater, - typer: typer, - convertor: convertor, - } -} - -// codec decodes protobuf objects -type codec struct { - version string - outputVersion string - creater runtime.ObjectCreater - typer runtime.ObjectTyper - convertor runtime.ObjectConvertor -} - -var _ runtime.Codec = codec{} - -func (c codec) Decode(data []byte) (runtime.Object, error) { - unknown := &runtime.Unknown{} - if err := proto.Unmarshal(data, unknown); err != nil { - return nil, err - } - obj, err := c.creater.New(unknown.APIVersion, unknown.Kind) - if err != nil { - return nil, err - } - pobj, ok := obj.(proto.Message) - if !ok { - return nil, fmt.Errorf("runtime object is not a proto.Message: %v", reflect.TypeOf(obj)) - } - if err := proto.Unmarshal(unknown.RawJSON, pobj); err != nil { - return nil, err - } - if unknown.APIVersion != c.outputVersion { - out, err := c.convertor.ConvertToVersion(obj, c.outputVersion) - if err != nil { - return nil, err - } - obj = out - } - return obj, nil -} - -func (c codec) DecodeToVersion(data []byte, version unversioned.GroupVersion) (runtime.Object, error) { - return nil, fmt.Errorf("unimplemented") -} - -func (c codec) DecodeInto(data []byte, obj runtime.Object) error { - version, kind, err := c.typer.ObjectVersionAndKind(obj) - if err != nil { - return err - } - unknown := &runtime.Unknown{} - if err := proto.Unmarshal(data, unknown); err != nil { - return err - } - if unknown.APIVersion == version && unknown.Kind == kind { - pobj, ok := obj.(proto.Message) - if !ok { - return fmt.Errorf("runtime object is not a proto.Message: %v", reflect.TypeOf(obj)) - } - - return proto.Unmarshal(unknown.RawJSON, pobj) - } - - versioned, err := c.creater.New(unknown.APIVersion, unknown.Kind) - if err != nil { - return err - } - - pobj, ok := versioned.(proto.Message) - if !ok { - return fmt.Errorf("runtime object is not a proto.Message: %v", reflect.TypeOf(obj)) - } - - if err := proto.Unmarshal(unknown.RawJSON, pobj); err != nil { - return err - } - return c.convertor.Convert(versioned, obj) -} - -func (c codec) DecodeIntoWithSpecifiedVersionKind(data []byte, obj runtime.Object, kind unversioned.GroupVersionKind) error { - return fmt.Errorf("unimplemented") -} - -func (c codec) DecodeParametersInto(parameters url.Values, obj runtime.Object) error { - return fmt.Errorf("unimplemented") -} - -func (c codec) Encode(obj runtime.Object) (data []byte, err error) { - version, kind, err := c.typer.ObjectVersionAndKind(obj) - if err != nil { - return nil, err - } - if len(version) == 0 { - version = c.version - converted, err := c.convertor.ConvertToVersion(obj, version) - if err != nil { - return nil, err - } - obj = converted - } - m, ok := obj.(proto.Marshaler) - if !ok { - return nil, fmt.Errorf("object %v (kind: %s in version: %s) does not implement ProtoBuf marshalling", reflect.TypeOf(obj), kind, c.version) - } - b, err := m.Marshal() - if err != nil { - return nil, err - } - return (&runtime.Unknown{ - TypeMeta: runtime.TypeMeta{ - Kind: kind, - APIVersion: version, - }, - RawJSON: b, - }).Marshal() -} - -func (c codec) EncodeToStream(obj runtime.Object, stream io.Writer) error { - return fmt.Errorf("unimplemented") -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/register.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/register.go index ec58b345de5f..5201a15ffd5f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/register.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/register.go @@ -21,18 +21,19 @@ import ( ) // SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta -func (obj *TypeMeta) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) { +func (obj *TypeMeta) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() } // GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta -func (obj *TypeMeta) GroupVersionKind() *unversioned.GroupVersionKind { +func (obj *TypeMeta) GroupVersionKind() unversioned.GroupVersionKind { return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } -func (obj *Unknown) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *Unstructured) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *UnstructuredList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *Unknown) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } + +func (obj *Unstructured) GetObjectKind() unversioned.ObjectKind { return obj } +func (obj *UnstructuredList) GetObjectKind() unversioned.ObjectKind { return obj } // GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind // interface if no objects are provided, or the ObjectKind interface of the object in the diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/scheme.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/scheme.go index 9a4a708963fb..c49c43e93ffd 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/scheme.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/scheme.go @@ -124,11 +124,8 @@ func (s *Scheme) nameFunc(t reflect.Type) string { // fromScope gets the input version, desired output version, and desired Scheme // from a conversion.Scope. -func (s *Scheme) fromScope(scope conversion.Scope) (inVersion, outVersion string, scheme *Scheme) { - scheme = s - inVersion = scope.Meta().SrcVersion - outVersion = scope.Meta().DestVersion - return inVersion, outVersion, scheme +func (s *Scheme) fromScope(scope conversion.Scope) *Scheme { + return s } // Converter allows access to the converter for the scheme @@ -214,31 +211,32 @@ func (s *Scheme) KnownTypes(gv unversioned.GroupVersion) map[string]reflect.Type return types } -// ObjectKind returns the group,version,kind of the go object, -// or an error if it's not a pointer or is unregistered. -func (s *Scheme) ObjectKind(obj Object) (unversioned.GroupVersionKind, error) { - gvks, err := s.ObjectKinds(obj) +// ObjectKind returns the group,version,kind of the go object and true if this object +// is considered unversioned, or an error if it's not a pointer or is unregistered. +func (s *Scheme) ObjectKind(obj Object) (unversioned.GroupVersionKind, bool, error) { + gvks, unversionedType, err := s.ObjectKinds(obj) if err != nil { - return unversioned.GroupVersionKind{}, err + return unversioned.GroupVersionKind{}, false, err } - return gvks[0], nil + return gvks[0], unversionedType, nil } -// ObjectKinds returns all possible group,version,kind of the go object, -// or an error if it's not a pointer or is unregistered. -func (s *Scheme) ObjectKinds(obj Object) ([]unversioned.GroupVersionKind, error) { +// ObjectKinds returns all possible group,version,kind of the go object, true if the +// object is considered unversioned, or an error if it's not a pointer or is unregistered. +func (s *Scheme) ObjectKinds(obj Object) ([]unversioned.GroupVersionKind, bool, error) { v, err := conversion.EnforcePtr(obj) if err != nil { - return nil, err + return nil, false, err } t := v.Type() gvks, ok := s.typeToGVK[t] if !ok { - return nil, ¬RegisteredErr{t: t} + return nil, false, ¬RegisteredErr{t: t} } + _, unversionedType := s.unversionedTypes[t] - return gvks, nil + return gvks, unversionedType, nil } // Recognizes returns true if the scheme is able to handle the provided group,version,kind @@ -275,6 +273,14 @@ func (s *Scheme) New(kind unversioned.GroupVersionKind) (Object, error) { return nil, ¬RegisteredErr{gvk: kind} } +// AddGenericConversionFunc adds a function that accepts the ConversionFunc call pattern +// (for two conversion types) to the converter. These functions are checked first during +// a normal conversion, but are otherwise not called. Use AddConversionFuncs when registering +// typed conversions. +func (s *Scheme) AddGenericConversionFunc(fn conversion.GenericConversionFunc) { + s.converter.AddGenericConversionFunc(fn) +} + // Log sets a logger on the scheme. For test purposes only func (s *Scheme) Log(l conversion.DebugLogger) { s.converter.Debug = l @@ -434,13 +440,13 @@ func (s *Scheme) Convert(in, out interface{}) error { inVersion := unversioned.GroupVersion{Group: "unknown", Version: "unknown"} outVersion := unversioned.GroupVersion{Group: "unknown", Version: "unknown"} if inObj, ok := in.(Object); ok { - if gvk, err := s.ObjectKind(inObj); err == nil { - inVersion = gvk.GroupVersion() + if gvks, _, err := s.ObjectKinds(inObj); err == nil { + inVersion = gvks[0].GroupVersion() } } if outObj, ok := out.(Object); ok { - if gvk, err := s.ObjectKind(outObj); err == nil { - outVersion = gvk.GroupVersion() + if gvks, _, err := s.ObjectKinds(outObj); err == nil { + outVersion = gvks[0].GroupVersion() } } flags, meta := s.generateConvertMeta(inVersion, outVersion, in) @@ -468,16 +474,12 @@ func (s *Scheme) ConvertFieldLabel(version, kind, label, value string) (string, // contain the inKind (or a mapping by name defined with AddKnownTypeWithName). Will also // return an error if the conversion does not result in a valid Object being // returned. The serializer handles loading/serializing nested objects. -func (s *Scheme) ConvertToVersion(in Object, outVersion string) (Object, error) { - gv, err := unversioned.ParseGroupVersion(outVersion) - if err != nil { - return nil, err - } +func (s *Scheme) ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (Object, error) { switch in.(type) { case *Unknown, *Unstructured, *UnstructuredList: old := in.GetObjectKind().GroupVersionKind() defer in.GetObjectKind().SetGroupVersionKind(old) - setTargetVersion(in, s, gv) + setTargetVersion(in, s, outVersion) return in, nil } t := reflect.TypeOf(in) @@ -501,9 +503,9 @@ func (s *Scheme) ConvertToVersion(in Object, outVersion string) (Object, error) kind = kinds[0] } - outKind := gv.WithKind(kind.Kind) + outKind := outVersion.WithKind(kind.Kind) - inKind, err := s.ObjectKind(in) + inKinds, _, err := s.ObjectKinds(in) if err != nil { return nil, err } @@ -513,29 +515,109 @@ func (s *Scheme) ConvertToVersion(in Object, outVersion string) (Object, error) return nil, err } - flags, meta := s.generateConvertMeta(inKind.GroupVersion(), gv, in) + flags, meta := s.generateConvertMeta(inKinds[0].GroupVersion(), outVersion, in) + if err := s.converter.Convert(in, out, flags, meta); err != nil { + return nil, err + } + + setTargetVersion(out, s, outVersion) + return out, nil +} + +// UnsafeConvertToVersion will convert in to the provided outVersion if such a conversion is possible, +// but does not guarantee the output object does not share fields with the input object. It attempts to be as +// efficient as possible when doing conversion. +func (s *Scheme) UnsafeConvertToVersion(in Object, outVersion unversioned.GroupVersion) (Object, error) { + switch t := in.(type) { + case *Unknown: + t.APIVersion = outVersion.String() + return t, nil + case *Unstructured: + t.SetAPIVersion(outVersion.String()) + return t, nil + case *UnstructuredList: + t.SetAPIVersion(outVersion.String()) + return t, nil + } + + // determine the incoming kinds with as few allocations as possible. + t := reflect.TypeOf(in) + if t.Kind() != reflect.Ptr { + return nil, fmt.Errorf("only pointer types may be converted: %v", t) + } + t = t.Elem() + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) + } + kinds, ok := s.typeToGVK[t] + if !ok || len(kinds) == 0 { + return nil, fmt.Errorf("%v is not a registered type and cannot be converted into version %q", t, outVersion) + } + + // if the Go type is also registered to the destination kind, no conversion is necessary + for i := range kinds { + if kinds[i].Version == outVersion.Version && kinds[i].Group == outVersion.Group { + setTargetKind(in, kinds[i]) + return in, nil + } + } + + // type is unversioned, no conversion necessary + // it should be possible to avoid this allocation + if unversionedKind, ok := s.unversionedTypes[t]; ok { + kind := unversionedKind + outKind := outVersion.WithKind(kind.Kind) + setTargetKind(in, outKind) + return in, nil + } + + // allocate a new object as the target using the target kind + // TODO: this should look in the target group version and find the first kind that matches, rather than the + // first kind registered in typeToGVK + kind := kinds[0] + kind.Version = outVersion.Version + kind.Group = outVersion.Group + out, err := s.New(kind) + if err != nil { + return nil, err + } + + // TODO: try to avoid the allocations here - in fast paths we are not likely to need these flags or meta + flags, meta := s.converter.DefaultMeta(t) if err := s.converter.Convert(in, out, flags, meta); err != nil { return nil, err } - setTargetVersion(out, s, gv) + setTargetKind(out, kind) return out, nil } // generateConvertMeta constructs the meta value we pass to Convert. func (s *Scheme) generateConvertMeta(srcGroupVersion, destGroupVersion unversioned.GroupVersion, in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { - flags, meta := s.converter.DefaultMeta(reflect.TypeOf(in)) - meta.SrcVersion = srcGroupVersion.String() - meta.DestVersion = destGroupVersion.String() - return flags, meta + return s.converter.DefaultMeta(reflect.TypeOf(in)) } +// setTargetVersion is deprecated and should be replaced by use of setTargetKind func setTargetVersion(obj Object, raw *Scheme, gv unversioned.GroupVersion) { if gv.Version == APIVersionInternal { // internal is a special case - obj.GetObjectKind().SetGroupVersionKind(nil) + obj.GetObjectKind().SetGroupVersionKind(unversioned.GroupVersionKind{}) + return + } + if gvks, _, _ := raw.ObjectKinds(obj); len(gvks) > 0 { + obj.GetObjectKind().SetGroupVersionKind(unversioned.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: gvks[0].Kind}) } else { - gvk, _ := raw.ObjectKind(obj) - obj.GetObjectKind().SetGroupVersionKind(&unversioned.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: gvk.Kind}) + obj.GetObjectKind().SetGroupVersionKind(unversioned.GroupVersionKind{Group: gv.Group, Version: gv.Version}) + } +} + +// setTargetKind sets the kind on an object, taking into account whether the target kind is the internal version. +func setTargetKind(obj Object, kind unversioned.GroupVersionKind) { + if kind.Version == APIVersionInternal { + // internal is a special case + // TODO: look at removing the need to special case this + obj.GetObjectKind().SetGroupVersionKind(unversioned.GroupVersionKind{}) + return } + obj.GetObjectKind().SetGroupVersionKind(kind) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/scheme_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/scheme_test.go new file mode 100644 index 000000000000..61d6bb1315e8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/scheme_test.go @@ -0,0 +1,646 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime_test + +import ( + "reflect" + "testing" + + "github.com/google/gofuzz" + flag "github.com/spf13/pflag" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer" + "k8s.io/kubernetes/pkg/util/diff" +) + +var fuzzIters = flag.Int("fuzz-iters", 50, "How many fuzzing iterations to do.") + +type InternalSimple struct { + runtime.TypeMeta `json:",inline"` + TestString string `json:"testString"` +} + +type ExternalSimple struct { + runtime.TypeMeta `json:",inline"` + TestString string `json:"testString"` +} + +func (obj *InternalSimple) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ExternalSimple) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } + +func TestScheme(t *testing.T) { + internalGV := unversioned.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} + externalGV := unversioned.GroupVersion{Group: "test.group", Version: "testExternal"} + + scheme := runtime.NewScheme() + scheme.AddKnownTypeWithName(internalGV.WithKind("Simple"), &InternalSimple{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("Simple"), &ExternalSimple{}) + + // If set, would clear TypeMeta during conversion. + //scheme.AddIgnoredConversionType(&TypeMeta{}, &TypeMeta{}) + + // test that scheme is an ObjectTyper + var _ runtime.ObjectTyper = scheme + + internalToExternalCalls := 0 + externalToInternalCalls := 0 + + // Register functions to verify that scope.Meta() gets set correctly. + err := scheme.AddConversionFuncs( + func(in *InternalSimple, out *ExternalSimple, scope conversion.Scope) error { + scope.Convert(&in.TypeMeta, &out.TypeMeta, 0) + scope.Convert(&in.TestString, &out.TestString, 0) + internalToExternalCalls++ + return nil + }, + func(in *ExternalSimple, out *InternalSimple, scope conversion.Scope) error { + scope.Convert(&in.TypeMeta, &out.TypeMeta, 0) + scope.Convert(&in.TestString, &out.TestString, 0) + externalToInternalCalls++ + return nil + }, + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + codecs := serializer.NewCodecFactory(scheme) + codec := codecs.LegacyCodec(externalGV) + jsonserializer, _ := codecs.SerializerForFileExtension("json") + + simple := &InternalSimple{ + TestString: "foo", + } + + // Test Encode, Decode, DecodeInto, and DecodeToVersion + obj := runtime.Object(simple) + data, err := runtime.Encode(codec, obj) + if err != nil { + t.Fatal(err) + } + + obj2, err := runtime.Decode(codec, data) + if err != nil { + t.Fatal(err) + } + if _, ok := obj2.(*InternalSimple); !ok { + t.Fatalf("Got wrong type") + } + if e, a := simple, obj2; !reflect.DeepEqual(e, a) { + t.Errorf("Expected:\n %#v,\n Got:\n %#v", e, a) + } + + obj3 := &InternalSimple{} + if err := runtime.DecodeInto(codec, data, obj3); err != nil { + t.Fatal(err) + } + // clearing TypeMeta is a function of the scheme, which we do not test here (ConvertToVersion + // does not automatically clear TypeMeta anymore). + simple.TypeMeta = runtime.TypeMeta{Kind: "Simple", APIVersion: externalGV.String()} + if e, a := simple, obj3; !reflect.DeepEqual(e, a) { + t.Errorf("Expected:\n %#v,\n Got:\n %#v", e, a) + } + + obj4, err := runtime.Decode(jsonserializer, data) + if err != nil { + t.Fatal(err) + } + if _, ok := obj4.(*ExternalSimple); !ok { + t.Fatalf("Got wrong type") + } + + // Test Convert + external := &ExternalSimple{} + err = scheme.Convert(simple, external) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if e, a := simple.TestString, external.TestString; e != a { + t.Errorf("Expected %v, got %v", e, a) + } + + // Encode and Convert should each have caused an increment. + if e, a := 2, internalToExternalCalls; e != a { + t.Errorf("Expected %v, got %v", e, a) + } + // DecodeInto and Decode should each have caused an increment because of a conversion + if e, a := 2, externalToInternalCalls; e != a { + t.Errorf("Expected %v, got %v", e, a) + } +} + +func TestBadJSONRejection(t *testing.T) { + scheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(scheme) + jsonserializer, _ := codecs.SerializerForFileExtension("json") + + badJSONMissingKind := []byte(`{ }`) + if _, err := runtime.Decode(jsonserializer, badJSONMissingKind); err == nil { + t.Errorf("Did not reject despite lack of kind field: %s", badJSONMissingKind) + } + badJSONUnknownType := []byte(`{"kind": "bar"}`) + if _, err1 := runtime.Decode(jsonserializer, badJSONUnknownType); err1 == nil { + t.Errorf("Did not reject despite use of unknown type: %s", badJSONUnknownType) + } + /*badJSONKindMismatch := []byte(`{"kind": "Pod"}`) + if err2 := DecodeInto(badJSONKindMismatch, &Minion{}); err2 == nil { + t.Errorf("Kind is set but doesn't match the object type: %s", badJSONKindMismatch) + }*/ +} + +type ExtensionA struct { + runtime.TypeMeta `json:",inline"` + TestString string `json:"testString"` +} + +type ExtensionB struct { + runtime.TypeMeta `json:",inline"` + TestString string `json:"testString"` +} + +type ExternalExtensionType struct { + runtime.TypeMeta `json:",inline"` + Extension runtime.RawExtension `json:"extension"` +} + +type InternalExtensionType struct { + runtime.TypeMeta `json:",inline"` + Extension runtime.Object `json:"extension"` +} + +type ExternalOptionalExtensionType struct { + runtime.TypeMeta `json:",inline"` + Extension runtime.RawExtension `json:"extension,omitempty"` +} + +type InternalOptionalExtensionType struct { + runtime.TypeMeta `json:",inline"` + Extension runtime.Object `json:"extension,omitempty"` +} + +func (obj *ExtensionA) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ExtensionB) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ExternalExtensionType) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *InternalExtensionType) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ExternalOptionalExtensionType) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *InternalOptionalExtensionType) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } + +func TestExternalToInternalMapping(t *testing.T) { + internalGV := unversioned.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} + externalGV := unversioned.GroupVersion{Group: "test.group", Version: "testExternal"} + + scheme := runtime.NewScheme() + scheme.AddKnownTypeWithName(internalGV.WithKind("OptionalExtensionType"), &InternalOptionalExtensionType{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("OptionalExtensionType"), &ExternalOptionalExtensionType{}) + + codec := serializer.NewCodecFactory(scheme).LegacyCodec(externalGV) + + table := []struct { + obj runtime.Object + encoded string + }{ + { + &InternalOptionalExtensionType{Extension: nil}, + `{"kind":"OptionalExtensionType","apiVersion":"` + externalGV.String() + `"}`, + }, + } + + for i, item := range table { + gotDecoded, err := runtime.Decode(codec, []byte(item.encoded)) + if err != nil { + t.Errorf("unexpected error '%v' (%v)", err, item.encoded) + } else if e, a := item.obj, gotDecoded; !reflect.DeepEqual(e, a) { + t.Errorf("%d: unexpected objects:\n%s", i, diff.ObjectGoPrintSideBySide(e, a)) + } + } +} + +func TestExtensionMapping(t *testing.T) { + internalGV := unversioned.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} + externalGV := unversioned.GroupVersion{Group: "test.group", Version: "testExternal"} + + scheme := runtime.NewScheme() + scheme.AddKnownTypeWithName(internalGV.WithKind("ExtensionType"), &InternalExtensionType{}) + scheme.AddKnownTypeWithName(internalGV.WithKind("OptionalExtensionType"), &InternalOptionalExtensionType{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("ExtensionType"), &ExternalExtensionType{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("OptionalExtensionType"), &ExternalOptionalExtensionType{}) + + // register external first when the object is the same in both schemes, so ObjectVersionAndKind reports the + // external version. + scheme.AddKnownTypeWithName(externalGV.WithKind("A"), &ExtensionA{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("B"), &ExtensionB{}) + scheme.AddKnownTypeWithName(internalGV.WithKind("A"), &ExtensionA{}) + scheme.AddKnownTypeWithName(internalGV.WithKind("B"), &ExtensionB{}) + + codec := serializer.NewCodecFactory(scheme).LegacyCodec(externalGV) + + table := []struct { + obj runtime.Object + expected runtime.Object + encoded string + }{ + { + &InternalExtensionType{ + Extension: runtime.NewEncodable(codec, &ExtensionA{TestString: "foo"}), + }, + &InternalExtensionType{ + Extension: &runtime.Unknown{ + Raw: []byte(`{"apiVersion":"test.group/testExternal","kind":"A","testString":"foo"}`), + ContentType: runtime.ContentTypeJSON, + }, + }, + // apiVersion is set in the serialized object for easier consumption by clients + `{"apiVersion":"` + externalGV.String() + `","kind":"ExtensionType","extension":{"apiVersion":"test.group/testExternal","kind":"A","testString":"foo"}} +`, + }, { + &InternalExtensionType{Extension: runtime.NewEncodable(codec, &ExtensionB{TestString: "bar"})}, + &InternalExtensionType{ + Extension: &runtime.Unknown{ + Raw: []byte(`{"apiVersion":"test.group/testExternal","kind":"B","testString":"bar"}`), + ContentType: runtime.ContentTypeJSON, + }, + }, + // apiVersion is set in the serialized object for easier consumption by clients + `{"apiVersion":"` + externalGV.String() + `","kind":"ExtensionType","extension":{"apiVersion":"test.group/testExternal","kind":"B","testString":"bar"}} +`, + }, { + &InternalExtensionType{Extension: nil}, + &InternalExtensionType{ + Extension: nil, + }, + `{"apiVersion":"` + externalGV.String() + `","kind":"ExtensionType","extension":null} +`, + }, + } + + for i, item := range table { + gotEncoded, err := runtime.Encode(codec, item.obj) + if err != nil { + t.Errorf("unexpected error '%v' (%#v)", err, item.obj) + } else if e, a := item.encoded, string(gotEncoded); e != a { + t.Errorf("expected\n%#v\ngot\n%#v\n", e, a) + } + + gotDecoded, err := runtime.Decode(codec, []byte(item.encoded)) + if err != nil { + t.Errorf("unexpected error '%v' (%v)", err, item.encoded) + } else if e, a := item.expected, gotDecoded; !reflect.DeepEqual(e, a) { + t.Errorf("%d: unexpected objects:\n%s", i, diff.ObjectGoPrintSideBySide(e, a)) + } + } +} + +func TestEncode(t *testing.T) { + internalGV := unversioned.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} + externalGV := unversioned.GroupVersion{Group: "test.group", Version: "testExternal"} + + scheme := runtime.NewScheme() + scheme.AddKnownTypeWithName(internalGV.WithKind("Simple"), &InternalSimple{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("Simple"), &ExternalSimple{}) + + codec := serializer.NewCodecFactory(scheme).LegacyCodec(externalGV) + + test := &InternalSimple{ + TestString: "I'm the same", + } + obj := runtime.Object(test) + data, err := runtime.Encode(codec, obj) + obj2, gvk, err2 := codec.Decode(data, nil, nil) + if err != nil || err2 != nil { + t.Fatalf("Failure: '%v' '%v'", err, err2) + } + if _, ok := obj2.(*InternalSimple); !ok { + t.Fatalf("Got wrong type") + } + if !reflect.DeepEqual(obj2, test) { + t.Errorf("Expected:\n %#v,\n Got:\n %#v", test, obj2) + } + if !reflect.DeepEqual(gvk, &unversioned.GroupVersionKind{Group: "test.group", Version: "testExternal", Kind: "Simple"}) { + t.Errorf("unexpected gvk returned by decode: %#v", gvk) + } +} + +func TestUnversionedTypes(t *testing.T) { + internalGV := unversioned.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} + externalGV := unversioned.GroupVersion{Group: "test.group", Version: "testExternal"} + otherGV := unversioned.GroupVersion{Group: "group", Version: "other"} + + scheme := runtime.NewScheme() + scheme.AddUnversionedTypes(externalGV, &InternalSimple{}) + scheme.AddKnownTypeWithName(internalGV.WithKind("Simple"), &InternalSimple{}) + scheme.AddKnownTypeWithName(externalGV.WithKind("Simple"), &ExternalSimple{}) + scheme.AddKnownTypeWithName(otherGV.WithKind("Simple"), &ExternalSimple{}) + + codec := serializer.NewCodecFactory(scheme).LegacyCodec(externalGV) + + if unv, ok := scheme.IsUnversioned(&InternalSimple{}); !unv || !ok { + t.Fatalf("type not unversioned and in scheme: %t %t", unv, ok) + } + + kinds, _, err := scheme.ObjectKinds(&InternalSimple{}) + if err != nil { + t.Fatal(err) + } + kind := kinds[0] + if kind != externalGV.WithKind("InternalSimple") { + t.Fatalf("unexpected: %#v", kind) + } + + test := &InternalSimple{ + TestString: "I'm the same", + } + obj := runtime.Object(test) + data, err := runtime.Encode(codec, obj) + if err != nil { + t.Fatal(err) + } + obj2, gvk, err := codec.Decode(data, nil, nil) + if err != nil { + t.Fatal(err) + } + if _, ok := obj2.(*InternalSimple); !ok { + t.Fatalf("Got wrong type") + } + if !reflect.DeepEqual(obj2, test) { + t.Errorf("Expected:\n %#v,\n Got:\n %#v", test, obj2) + } + // object is serialized as an unversioned object (in the group and version it was defined in) + if !reflect.DeepEqual(gvk, &unversioned.GroupVersionKind{Group: "test.group", Version: "testExternal", Kind: "InternalSimple"}) { + t.Errorf("unexpected gvk returned by decode: %#v", gvk) + } + + // when serialized to a different group, the object is kept in its preferred name + codec = serializer.NewCodecFactory(scheme).LegacyCodec(otherGV) + data, err = runtime.Encode(codec, obj) + if err != nil { + t.Fatal(err) + } + if string(data) != `{"apiVersion":"test.group/testExternal","kind":"InternalSimple","testString":"I'm the same"}`+"\n" { + t.Errorf("unexpected data: %s", data) + } +} + +// Test a weird version/kind embedding format. +type MyWeirdCustomEmbeddedVersionKindField struct { + ID string `json:"ID,omitempty"` + APIVersion string `json:"myVersionKey,omitempty"` + ObjectKind string `json:"myKindKey,omitempty"` + Z string `json:"Z,omitempty"` + Y uint64 `json:"Y,omitempty"` +} + +type TestType1 struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` + C int8 `json:"C,omitempty"` + D int16 `json:"D,omitempty"` + E int32 `json:"E,omitempty"` + F int64 `json:"F,omitempty"` + G uint `json:"G,omitempty"` + H uint8 `json:"H,omitempty"` + I uint16 `json:"I,omitempty"` + J uint32 `json:"J,omitempty"` + K uint64 `json:"K,omitempty"` + L bool `json:"L,omitempty"` + M map[string]int `json:"M,omitempty"` + N map[string]TestType2 `json:"N,omitempty"` + O *TestType2 `json:"O,omitempty"` + P []TestType2 `json:"Q,omitempty"` +} + +type TestType2 struct { + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` +} + +type ExternalTestType2 struct { + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` +} +type ExternalTestType1 struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` + C int8 `json:"C,omitempty"` + D int16 `json:"D,omitempty"` + E int32 `json:"E,omitempty"` + F int64 `json:"F,omitempty"` + G uint `json:"G,omitempty"` + H uint8 `json:"H,omitempty"` + I uint16 `json:"I,omitempty"` + J uint32 `json:"J,omitempty"` + K uint64 `json:"K,omitempty"` + L bool `json:"L,omitempty"` + M map[string]int `json:"M,omitempty"` + N map[string]ExternalTestType2 `json:"N,omitempty"` + O *ExternalTestType2 `json:"O,omitempty"` + P []ExternalTestType2 `json:"Q,omitempty"` +} + +type ExternalInternalSame struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A TestType2 `json:"A,omitempty"` +} + +func (obj *MyWeirdCustomEmbeddedVersionKindField) GetObjectKind() unversioned.ObjectKind { return obj } +func (obj *MyWeirdCustomEmbeddedVersionKindField) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { + obj.APIVersion, obj.ObjectKind = gvk.ToAPIVersionAndKind() +} +func (obj *MyWeirdCustomEmbeddedVersionKindField) GroupVersionKind() unversioned.GroupVersionKind { + return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.ObjectKind) +} + +func (obj *ExternalInternalSame) GetObjectKind() unversioned.ObjectKind { + return &obj.MyWeirdCustomEmbeddedVersionKindField +} + +func (obj *TestType1) GetObjectKind() unversioned.ObjectKind { + return &obj.MyWeirdCustomEmbeddedVersionKindField +} + +func (obj *ExternalTestType1) GetObjectKind() unversioned.ObjectKind { + return &obj.MyWeirdCustomEmbeddedVersionKindField +} + +func (obj *TestType2) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } +func (obj *ExternalTestType2) GetObjectKind() unversioned.ObjectKind { + return unversioned.EmptyObjectKind +} + +// TestObjectFuzzer can randomly populate all the above objects. +var TestObjectFuzzer = fuzz.New().NilChance(.5).NumElements(1, 100).Funcs( + func(j *MyWeirdCustomEmbeddedVersionKindField, c fuzz.Continue) { + // We have to customize the randomization of MyWeirdCustomEmbeddedVersionKindFields because their + // APIVersion and Kind must remain blank in memory. + j.APIVersion = "" + j.ObjectKind = "" + j.ID = c.RandString() + }, +) + +// Returns a new Scheme set up with the test objects. +func GetTestScheme() *runtime.Scheme { + internalGV := unversioned.GroupVersion{Version: "__internal"} + externalGV := unversioned.GroupVersion{Version: "v1"} + + s := runtime.NewScheme() + // Ordinarily, we wouldn't add TestType2, but because this is a test and + // both types are from the same package, we need to get it into the system + // so that converter will match it with ExternalType2. + s.AddKnownTypes(internalGV, &TestType1{}, &TestType2{}, &ExternalInternalSame{}) + s.AddKnownTypes(externalGV, &ExternalInternalSame{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType1"), &ExternalTestType1{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType2"), &ExternalTestType2{}) + s.AddKnownTypeWithName(internalGV.WithKind("TestType3"), &TestType1{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType3"), &ExternalTestType1{}) + return s +} + +func TestKnownTypes(t *testing.T) { + s := GetTestScheme() + if len(s.KnownTypes(unversioned.GroupVersion{Group: "group", Version: "v2"})) != 0 { + t.Errorf("should have no known types for v2") + } + + types := s.KnownTypes(unversioned.GroupVersion{Version: "v1"}) + for _, s := range []string{"TestType1", "TestType2", "TestType3", "ExternalInternalSame"} { + if _, ok := types[s]; !ok { + t.Errorf("missing type %q", s) + } + } +} + +func TestConvertToVersion(t *testing.T) { + s := GetTestScheme() + tt := &TestType1{A: "I'm not a pointer object"} + other, err := s.ConvertToVersion(tt, unversioned.GroupVersion{Version: "v1"}) + if err != nil { + t.Fatalf("Failure: %v", err) + } + converted, ok := other.(*ExternalTestType1) + if !ok { + t.Fatalf("Got wrong type") + } + if tt.A != converted.A { + t.Fatalf("Failed to convert object correctly: %#v", converted) + } +} + +func TestMetaValues(t *testing.T) { + internalGV := unversioned.GroupVersion{Group: "test.group", Version: "__internal"} + externalGV := unversioned.GroupVersion{Group: "test.group", Version: "externalVersion"} + + s := runtime.NewScheme() + s.AddKnownTypeWithName(internalGV.WithKind("Simple"), &InternalSimple{}) + s.AddKnownTypeWithName(externalGV.WithKind("Simple"), &ExternalSimple{}) + + internalToExternalCalls := 0 + externalToInternalCalls := 0 + + // Register functions to verify that scope.Meta() gets set correctly. + err := s.AddConversionFuncs( + func(in *InternalSimple, out *ExternalSimple, scope conversion.Scope) error { + t.Logf("internal -> external") + scope.Convert(&in.TestString, &out.TestString, 0) + internalToExternalCalls++ + return nil + }, + func(in *ExternalSimple, out *InternalSimple, scope conversion.Scope) error { + t.Logf("external -> internal") + scope.Convert(&in.TestString, &out.TestString, 0) + externalToInternalCalls++ + return nil + }, + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + simple := &InternalSimple{ + TestString: "foo", + } + + s.Log(t) + + out, err := s.ConvertToVersion(simple, externalGV) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + internal, err := s.ConvertToVersion(out, internalGV) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if e, a := simple, internal; !reflect.DeepEqual(e, a) { + t.Errorf("Expected:\n %#v,\n Got:\n %#v", e, a) + } + + if e, a := 1, internalToExternalCalls; e != a { + t.Errorf("Expected %v, got %v", e, a) + } + if e, a := 1, externalToInternalCalls; e != a { + t.Errorf("Expected %v, got %v", e, a) + } +} + +func TestMetaValuesUnregisteredConvert(t *testing.T) { + type InternalSimple struct { + Version string `json:"apiVersion,omitempty"` + Kind string `json:"kind,omitempty"` + TestString string `json:"testString"` + } + type ExternalSimple struct { + Version string `json:"apiVersion,omitempty"` + Kind string `json:"kind,omitempty"` + TestString string `json:"testString"` + } + s := runtime.NewScheme() + // We deliberately don't register the types. + + internalToExternalCalls := 0 + + // Register functions to verify that scope.Meta() gets set correctly. + err := s.AddConversionFuncs( + func(in *InternalSimple, out *ExternalSimple, scope conversion.Scope) error { + scope.Convert(&in.TestString, &out.TestString, 0) + internalToExternalCalls++ + return nil + }, + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + simple := &InternalSimple{TestString: "foo"} + external := &ExternalSimple{} + err = s.Convert(simple, external) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if e, a := simple.TestString, external.TestString; e != a { + t.Errorf("Expected %v, got %v", e, a) + } + + // Verify that our conversion handler got called. + if e, a := 1, internalToExternalCalls; e != a { + t.Errorf("Expected %v, got %v", e, a) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go index 6f6310d9c83a..96af0cfd3544 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go @@ -24,47 +24,112 @@ import ( "k8s.io/kubernetes/pkg/runtime/serializer/versioning" ) +// serializerExtensions are for serializers that are conditionally compiled in +var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){} + type serializerType struct { AcceptContentTypes []string ContentType string FileExtensions []string - Serializer runtime.Serializer - PrettySerializer runtime.Serializer -} + // EncodesAsText should be true if this content type can be represented safely in UTF-8 + EncodesAsText bool -// NewCodecFactory provides methods for retrieving serializers for the supported wire formats -// and conversion wrappers to define preferred internal and external versions. In the future, -// as the internal version is used less, callers may instead use a defaulting serializer and -// only convert objects which are shared internally (Status, common API machinery). -// TODO: allow other codecs to be compiled in? -// TODO: accept a scheme interface -func NewCodecFactory(scheme *runtime.Scheme) CodecFactory { - return newCodecFactory(scheme, json.DefaultMetaFactory) + Serializer runtime.Serializer + PrettySerializer runtime.Serializer + // RawSerializer serializes an object without adding a type wrapper. Some serializers, like JSON + // automatically include identifying type information with the JSON. Others, like Protobuf, need + // a wrapper object that includes type information. This serializer should be set if the serializer + // can serialize / deserialize objects without type info. Note that this serializer will always + // be expected to pass into or a gvk to Decode, since no type information will be available on + // the object itself. + RawSerializer runtime.Serializer + // Specialize gives the type the opportunity to return a different serializer implementation if + // the content type contains alternate operations. Here it is used to implement "pretty" as an + // option to application/json, but could also be used to allow serializers to perform type + // defaulting or alter output. + Specialize func(map[string]string) (runtime.Serializer, bool) + + AcceptStreamContentTypes []string + StreamContentType string + + Framer runtime.Framer + StreamSerializer runtime.Serializer + StreamSpecialize func(map[string]string) (runtime.Serializer, bool) } -// newCodecFactory is a helper for testing that allows a different metafactory to be specified. -func newCodecFactory(scheme *runtime.Scheme, mf json.MetaFactory) CodecFactory { - jsonSerializer := json.NewSerializer(mf, scheme, runtime.ObjectTyperToTyper(scheme), false) - jsonPrettySerializer := json.NewSerializer(mf, scheme, runtime.ObjectTyperToTyper(scheme), true) - yamlSerializer := json.NewYAMLSerializer(mf, scheme, runtime.ObjectTyperToTyper(scheme)) +func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []serializerType { + jsonSerializer := json.NewSerializer(mf, scheme, scheme, false) + jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true) + yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme) + serializers := []serializerType{ { AcceptContentTypes: []string{"application/json"}, ContentType: "application/json", FileExtensions: []string{"json"}, + EncodesAsText: true, Serializer: jsonSerializer, PrettySerializer: jsonPrettySerializer, + + AcceptStreamContentTypes: []string{"application/json", "application/json;stream=watch"}, + StreamContentType: "application/json", + Framer: json.Framer, + StreamSerializer: jsonSerializer, }, { AcceptContentTypes: []string{"application/yaml"}, ContentType: "application/yaml", FileExtensions: []string{"yaml"}, + EncodesAsText: true, Serializer: yamlSerializer, + + // TODO: requires runtime.RawExtension to properly distinguish when the nested content is + // yaml, because the yaml encoder invokes MarshalJSON first + //AcceptStreamContentTypes: []string{"application/yaml", "application/yaml;stream=watch"}, + //StreamContentType: "application/yaml;stream=watch", + //Framer: json.YAMLFramer, + //StreamSerializer: yamlSerializer, }, } + + for _, fn := range serializerExtensions { + if serializer, ok := fn(scheme); ok { + serializers = append(serializers, serializer) + } + } + return serializers +} + +// CodecFactory provides methods for retrieving codecs and serializers for specific +// versions and content types. +type CodecFactory struct { + scheme *runtime.Scheme + serializers []serializerType + universal runtime.Decoder + accepts []string + streamingAccepts []string + + legacySerializer runtime.Serializer +} + +// NewCodecFactory provides methods for retrieving serializers for the supported wire formats +// and conversion wrappers to define preferred internal and external versions. In the future, +// as the internal version is used less, callers may instead use a defaulting serializer and +// only convert objects which are shared internally (Status, common API machinery). +// TODO: allow other codecs to be compiled in? +// TODO: accept a scheme interface +func NewCodecFactory(scheme *runtime.Scheme) CodecFactory { + serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory) + return newCodecFactory(scheme, serializers) +} + +// newCodecFactory is a helper for testing that allows a different metafactory to be specified. +func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory { decoders := make([]runtime.Decoder, 0, len(serializers)) accepts := []string{} alreadyAccepted := make(map[string]struct{}) + + var legacySerializer runtime.Serializer for _, d := range serializers { decoders = append(decoders, d.Serializer) for _, mediaType := range d.AcceptContentTypes { @@ -73,27 +138,40 @@ func newCodecFactory(scheme *runtime.Scheme, mf json.MetaFactory) CodecFactory { } alreadyAccepted[mediaType] = struct{}{} accepts = append(accepts, mediaType) + if mediaType == "application/json" { + legacySerializer = d.Serializer + } } } + if legacySerializer == nil { + legacySerializer = serializers[0].Serializer + } + + streamAccepts := []string{} + alreadyAccepted = make(map[string]struct{}) + for _, d := range serializers { + if len(d.StreamContentType) == 0 { + continue + } + for _, mediaType := range d.AcceptStreamContentTypes { + if _, ok := alreadyAccepted[mediaType]; ok { + continue + } + alreadyAccepted[mediaType] = struct{}{} + streamAccepts = append(streamAccepts, mediaType) + } + } + return CodecFactory{ scheme: scheme, serializers: serializers, universal: recognizer.NewDecoder(decoders...), - accepts: accepts, - legacySerializer: jsonSerializer, - } -} + accepts: accepts, + streamingAccepts: streamAccepts, -// CodecFactory provides methods for retrieving codecs and serializers for specific -// versions and content types. -type CodecFactory struct { - scheme *runtime.Scheme - serializers []serializerType - universal runtime.Decoder - accepts []string - - legacySerializer runtime.Serializer + legacySerializer: legacySerializer, + } } var _ runtime.NegotiatedSerializer = &CodecFactory{} @@ -103,13 +181,18 @@ func (f CodecFactory) SupportedMediaTypes() []string { return f.accepts } +// SupportedStreamingMediaTypes returns the RFC2046 media types that this factory has stream serializers for. +func (f CodecFactory) SupportedStreamingMediaTypes() []string { + return f.streamingAccepts +} + // LegacyCodec encodes output to a given API version, and decodes output into the internal form from // any recognized source. The returned codec will always encode output to JSON. // // This method is deprecated - clients and servers should negotiate a serializer by mime-type and // invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder(). func (f CodecFactory) LegacyCodec(version ...unversioned.GroupVersion) runtime.Codec { - return f.CodecForVersions(runtime.NewCodec(f.legacySerializer, f.universal), version, nil) + return versioning.NewCodecForScheme(f.scheme, f.legacySerializer, f.universal, version, nil) } // UniversalDeserializer can convert any stored data recognized by this factory into a Go object that satisfies @@ -127,40 +210,91 @@ func (f CodecFactory) UniversalDeserializer() runtime.Decoder { // // TODO: the decoder will eventually be removed in favor of dealing with objects in their versioned form func (f CodecFactory) UniversalDecoder(versions ...unversioned.GroupVersion) runtime.Decoder { - return f.CodecForVersions(runtime.NoopEncoder{f.universal}, nil, versions) + return f.CodecForVersions(nil, f.universal, nil, versions) } // CodecFor creates a codec with the provided serializer. If an object is decoded and its group is not in the list, // it will default to runtime.APIVersionInternal. If encode is not specified for an object's group, the object is not // converted. If encode or decode are nil, no conversion is performed. -func (f CodecFactory) CodecForVersions(serializer runtime.Serializer, encode []unversioned.GroupVersion, decode []unversioned.GroupVersion) runtime.Codec { - return versioning.NewCodecForScheme(f.scheme, serializer, encode, decode) +func (f CodecFactory) CodecForVersions(encoder runtime.Encoder, decoder runtime.Decoder, encode []unversioned.GroupVersion, decode []unversioned.GroupVersion) runtime.Codec { + return versioning.NewCodecForScheme(f.scheme, encoder, decoder, encode, decode) } // DecoderToVersion returns a decoder that targets the provided group version. -func (f CodecFactory) DecoderToVersion(serializer runtime.Serializer, gv unversioned.GroupVersion) runtime.Decoder { - return f.CodecForVersions(serializer, nil, []unversioned.GroupVersion{gv}) +func (f CodecFactory) DecoderToVersion(decoder runtime.Decoder, gv unversioned.GroupVersion) runtime.Decoder { + return f.CodecForVersions(nil, decoder, nil, []unversioned.GroupVersion{gv}) } // EncoderForVersion returns an encoder that targets the provided group version. -func (f CodecFactory) EncoderForVersion(serializer runtime.Serializer, gv unversioned.GroupVersion) runtime.Encoder { - return f.CodecForVersions(serializer, []unversioned.GroupVersion{gv}, nil) +func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv unversioned.GroupVersion) runtime.Encoder { + return f.CodecForVersions(encoder, nil, []unversioned.GroupVersion{gv}, nil) } // SerializerForMediaType returns a serializer that matches the provided RFC2046 mediaType, or false if no such // serializer exists -func (f CodecFactory) SerializerForMediaType(mediaType string, options map[string]string) (runtime.Serializer, bool) { +func (f CodecFactory) SerializerForMediaType(mediaType string, params map[string]string) (runtime.SerializerInfo, bool) { for _, s := range f.serializers { for _, accepted := range s.AcceptContentTypes { if accepted == mediaType { - if v, ok := options["pretty"]; ok && v == "1" && s.PrettySerializer != nil { - return s.PrettySerializer, true + // specialization abstracts variants to the content type + if s.Specialize != nil && len(params) > 0 { + serializer, ok := s.Specialize(params) + // TODO: return formatted mediaType+params + return runtime.SerializerInfo{Serializer: serializer, MediaType: s.ContentType, EncodesAsText: s.EncodesAsText}, ok } - return s.Serializer, true + + // legacy support for ?pretty=1 continues, but this is more formally defined + if v, ok := params["pretty"]; ok && v == "1" && s.PrettySerializer != nil { + return runtime.SerializerInfo{Serializer: s.PrettySerializer, MediaType: s.ContentType, EncodesAsText: s.EncodesAsText}, true + } + + // return the base variant + return runtime.SerializerInfo{Serializer: s.Serializer, MediaType: s.ContentType, EncodesAsText: s.EncodesAsText}, true } } } - return nil, false + return runtime.SerializerInfo{}, false +} + +// StreamingSerializerForMediaType returns a serializer that matches the provided RFC2046 mediaType, or false if no such +// serializer exists +func (f CodecFactory) StreamingSerializerForMediaType(mediaType string, params map[string]string) (runtime.StreamSerializerInfo, bool) { + for _, s := range f.serializers { + for _, accepted := range s.AcceptStreamContentTypes { + if accepted == mediaType { + // TODO: accept params + nested, ok := f.SerializerForMediaType(s.ContentType, nil) + if !ok { + panic("no serializer defined for internal content type") + } + + if s.StreamSpecialize != nil && len(params) > 0 { + serializer, ok := s.StreamSpecialize(params) + // TODO: return formatted mediaType+params + return runtime.StreamSerializerInfo{ + SerializerInfo: runtime.SerializerInfo{ + Serializer: serializer, + MediaType: s.StreamContentType, + EncodesAsText: s.EncodesAsText, + }, + Framer: s.Framer, + Embedded: nested, + }, ok + } + + return runtime.StreamSerializerInfo{ + SerializerInfo: runtime.SerializerInfo{ + Serializer: s.StreamSerializer, + MediaType: s.StreamContentType, + EncodesAsText: s.EncodesAsText, + }, + Framer: s.Framer, + Embedded: nested, + }, true + } + } + } + return runtime.StreamSerializerInfo{}, false } // SerializerForFileExtension returns a serializer for the provided extension, or false if no serializer matches. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/codec_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/codec_test.go new file mode 100644 index 000000000000..e1307d3c8e57 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/codec_test.go @@ -0,0 +1,400 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "encoding/json" + "fmt" + "log" + "os" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/diff" + + "github.com/ghodss/yaml" + "github.com/google/gofuzz" + flag "github.com/spf13/pflag" +) + +var fuzzIters = flag.Int("fuzz-iters", 50, "How many fuzzing iterations to do.") + +type testMetaFactory struct{} + +func (testMetaFactory) Interpret(data []byte) (*unversioned.GroupVersionKind, error) { + findKind := struct { + APIVersion string `json:"myVersionKey,omitempty"` + ObjectKind string `json:"myKindKey,omitempty"` + }{} + // yaml is a superset of json, so we use it to decode here. That way, + // we understand both. + if err := yaml.Unmarshal(data, &findKind); err != nil { + return nil, fmt.Errorf("couldn't get version/kind: %v", err) + } + gv, err := unversioned.ParseGroupVersion(findKind.APIVersion) + if err != nil { + return nil, err + } + return &unversioned.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.ObjectKind}, nil +} + +// Test a weird version/kind embedding format. +type MyWeirdCustomEmbeddedVersionKindField struct { + ID string `json:"ID,omitempty"` + APIVersion string `json:"myVersionKey,omitempty"` + ObjectKind string `json:"myKindKey,omitempty"` + Z string `json:"Z,omitempty"` + Y uint64 `json:"Y,omitempty"` +} + +type TestType1 struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` + C int8 `json:"C,omitempty"` + D int16 `json:"D,omitempty"` + E int32 `json:"E,omitempty"` + F int64 `json:"F,omitempty"` + G uint `json:"G,omitempty"` + H uint8 `json:"H,omitempty"` + I uint16 `json:"I,omitempty"` + J uint32 `json:"J,omitempty"` + K uint64 `json:"K,omitempty"` + L bool `json:"L,omitempty"` + M map[string]int `json:"M,omitempty"` + N map[string]TestType2 `json:"N,omitempty"` + O *TestType2 `json:"O,omitempty"` + P []TestType2 `json:"Q,omitempty"` +} + +type TestType2 struct { + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` +} + +type ExternalTestType2 struct { + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` +} +type ExternalTestType1 struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A string `json:"A,omitempty"` + B int `json:"B,omitempty"` + C int8 `json:"C,omitempty"` + D int16 `json:"D,omitempty"` + E int32 `json:"E,omitempty"` + F int64 `json:"F,omitempty"` + G uint `json:"G,omitempty"` + H uint8 `json:"H,omitempty"` + I uint16 `json:"I,omitempty"` + J uint32 `json:"J,omitempty"` + K uint64 `json:"K,omitempty"` + L bool `json:"L,omitempty"` + M map[string]int `json:"M,omitempty"` + N map[string]ExternalTestType2 `json:"N,omitempty"` + O *ExternalTestType2 `json:"O,omitempty"` + P []ExternalTestType2 `json:"Q,omitempty"` +} + +type ExternalInternalSame struct { + MyWeirdCustomEmbeddedVersionKindField `json:",inline"` + A TestType2 `json:"A,omitempty"` +} + +// TestObjectFuzzer can randomly populate all the above objects. +var TestObjectFuzzer = fuzz.New().NilChance(.5).NumElements(1, 100).Funcs( + func(j *MyWeirdCustomEmbeddedVersionKindField, c fuzz.Continue) { + c.FuzzNoCustom(j) + j.APIVersion = "" + j.ObjectKind = "" + }, +) + +func (obj *MyWeirdCustomEmbeddedVersionKindField) GetObjectKind() unversioned.ObjectKind { return obj } +func (obj *MyWeirdCustomEmbeddedVersionKindField) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { + obj.APIVersion, obj.ObjectKind = gvk.ToAPIVersionAndKind() +} +func (obj *MyWeirdCustomEmbeddedVersionKindField) GroupVersionKind() unversioned.GroupVersionKind { + return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.ObjectKind) +} + +func (obj *ExternalInternalSame) GetObjectKind() unversioned.ObjectKind { + return &obj.MyWeirdCustomEmbeddedVersionKindField +} + +func (obj *TestType1) GetObjectKind() unversioned.ObjectKind { + return &obj.MyWeirdCustomEmbeddedVersionKindField +} + +func (obj *ExternalTestType1) GetObjectKind() unversioned.ObjectKind { + return &obj.MyWeirdCustomEmbeddedVersionKindField +} + +func (obj *TestType2) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } +func (obj *ExternalTestType2) GetObjectKind() unversioned.ObjectKind { + return unversioned.EmptyObjectKind +} + +// Returns a new Scheme set up with the test objects. +func GetTestScheme() (*runtime.Scheme, runtime.Codec) { + internalGV := unversioned.GroupVersion{Version: runtime.APIVersionInternal} + externalGV := unversioned.GroupVersion{Version: "v1"} + externalGV2 := unversioned.GroupVersion{Version: "v2"} + + s := runtime.NewScheme() + // Ordinarily, we wouldn't add TestType2, but because this is a test and + // both types are from the same package, we need to get it into the system + // so that converter will match it with ExternalType2. + s.AddKnownTypes(internalGV, &TestType1{}, &TestType2{}, &ExternalInternalSame{}) + s.AddKnownTypes(externalGV, &ExternalInternalSame{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType1"), &ExternalTestType1{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType2"), &ExternalTestType2{}) + s.AddKnownTypeWithName(internalGV.WithKind("TestType3"), &TestType1{}) + s.AddKnownTypeWithName(externalGV.WithKind("TestType3"), &ExternalTestType1{}) + s.AddKnownTypeWithName(externalGV2.WithKind("TestType1"), &ExternalTestType1{}) + + s.AddUnversionedTypes(externalGV, &unversioned.Status{}) + + cf := newCodecFactory(s, newSerializersForScheme(s, testMetaFactory{})) + codec := cf.LegacyCodec(unversioned.GroupVersion{Version: "v1"}) + return s, codec +} + +func objDiff(a, b interface{}) string { + ab, err := json.Marshal(a) + if err != nil { + panic("a") + } + bb, err := json.Marshal(b) + if err != nil { + panic("b") + } + return diff.StringDiff(string(ab), string(bb)) + + // An alternate diff attempt, in case json isn't showing you + // the difference. (reflect.DeepEqual makes a distinction between + // nil and empty slices, for example.) + //return diff.StringDiff( + // fmt.Sprintf("%#v", a), + // fmt.Sprintf("%#v", b), + //) +} + +var semantic = conversion.EqualitiesOrDie( + func(a, b MyWeirdCustomEmbeddedVersionKindField) bool { + a.APIVersion, a.ObjectKind = "", "" + b.APIVersion, b.ObjectKind = "", "" + return a == b + }, +) + +func runTest(t *testing.T, source interface{}) { + name := reflect.TypeOf(source).Elem().Name() + TestObjectFuzzer.Fuzz(source) + + _, codec := GetTestScheme() + data, err := runtime.Encode(codec, source.(runtime.Object)) + if err != nil { + t.Errorf("%v: %v (%#v)", name, err, source) + return + } + obj2, err := runtime.Decode(codec, data) + if err != nil { + t.Errorf("%v: %v (%v)", name, err, string(data)) + return + } + if !semantic.DeepEqual(source, obj2) { + t.Errorf("1: %v: diff: %v", name, diff.ObjectGoPrintSideBySide(source, obj2)) + return + } + obj3 := reflect.New(reflect.TypeOf(source).Elem()).Interface() + if err := runtime.DecodeInto(codec, data, obj3.(runtime.Object)); err != nil { + t.Errorf("2: %v: %v", name, err) + return + } + if !semantic.DeepEqual(source, obj3) { + t.Errorf("3: %v: diff: %v", name, objDiff(source, obj3)) + return + } +} + +func TestTypes(t *testing.T) { + table := []interface{}{ + &TestType1{}, + &ExternalInternalSame{}, + } + for _, item := range table { + // Try a few times, since runTest uses random values. + for i := 0; i < *fuzzIters; i++ { + runTest(t, item) + } + } +} + +func TestVersionedEncoding(t *testing.T) { + s, codec := GetTestScheme() + out, err := runtime.Encode(codec, &TestType1{}, unversioned.GroupVersion{Version: "v2"}) + if err != nil { + t.Fatal(err) + } + if string(out) != `{"myVersionKey":"v2","myKindKey":"TestType1"}`+"\n" { + t.Fatal(string(out)) + } + _, err = runtime.Encode(codec, &TestType1{}, unversioned.GroupVersion{Version: "v3"}) + if err == nil { + t.Fatal(err) + } + + cf := newCodecFactory(s, newSerializersForScheme(s, testMetaFactory{})) + encoder, _ := cf.SerializerForFileExtension("json") + + // codec that is unversioned uses the target version + unversionedCodec := cf.CodecForVersions(encoder, nil, nil, nil) + _, err = runtime.Encode(unversionedCodec, &TestType1{}, unversioned.GroupVersion{Version: "v3"}) + if err == nil || !runtime.IsNotRegisteredError(err) { + t.Fatal(err) + } + + // unversioned encode with no versions is written directly to wire + out, err = runtime.Encode(unversionedCodec, &TestType1{}) + if err != nil { + t.Fatal(err) + } + if string(out) != `{"myVersionKey":"__internal","myKindKey":"TestType1"}`+"\n" { + t.Fatal(string(out)) + } +} + +func TestMultipleNames(t *testing.T) { + _, codec := GetTestScheme() + + obj, _, err := codec.Decode([]byte(`{"myKindKey":"TestType3","myVersionKey":"v1","A":"value"}`), nil, nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + internal := obj.(*TestType1) + if internal.A != "value" { + t.Fatalf("unexpected decoded object: %#v", internal) + } + + out, err := runtime.Encode(codec, internal) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !strings.Contains(string(out), `"myKindKey":"TestType1"`) { + t.Errorf("unexpected encoded output: %s", string(out)) + } +} + +func TestConvertTypesWhenDefaultNamesMatch(t *testing.T) { + internalGV := unversioned.GroupVersion{Version: runtime.APIVersionInternal} + externalGV := unversioned.GroupVersion{Version: "v1"} + + s := runtime.NewScheme() + // create two names internally, with TestType1 being preferred + s.AddKnownTypeWithName(internalGV.WithKind("TestType1"), &TestType1{}) + s.AddKnownTypeWithName(internalGV.WithKind("OtherType1"), &TestType1{}) + // create two names externally, with TestType1 being preferred + s.AddKnownTypeWithName(externalGV.WithKind("TestType1"), &ExternalTestType1{}) + s.AddKnownTypeWithName(externalGV.WithKind("OtherType1"), &ExternalTestType1{}) + + ext := &ExternalTestType1{} + ext.APIVersion = "v1" + ext.ObjectKind = "OtherType1" + ext.A = "test" + data, err := json.Marshal(ext) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + expect := &TestType1{A: "test"} + + codec := newCodecFactory(s, newSerializersForScheme(s, testMetaFactory{})).LegacyCodec(unversioned.GroupVersion{Version: "v1"}) + + obj, err := runtime.Decode(codec, data) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !semantic.DeepEqual(expect, obj) { + t.Errorf("unexpected object: %#v", obj) + } + + into := &TestType1{} + if err := runtime.DecodeInto(codec, data, into); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !semantic.DeepEqual(expect, into) { + t.Errorf("unexpected object: %#v", obj) + } +} + +func TestEncode_Ptr(t *testing.T) { + _, codec := GetTestScheme() + tt := &TestType1{A: "I am a pointer object"} + data, err := runtime.Encode(codec, tt) + obj2, err2 := runtime.Decode(codec, data) + if err != nil || err2 != nil { + t.Fatalf("Failure: '%v' '%v'\n%s", err, err2, data) + } + if _, ok := obj2.(*TestType1); !ok { + t.Fatalf("Got wrong type") + } + if !semantic.DeepEqual(obj2, tt) { + t.Errorf("Expected:\n %#v,\n Got:\n %#v", tt, obj2) + } +} + +func TestBadJSONRejection(t *testing.T) { + log.SetOutput(os.Stderr) + _, codec := GetTestScheme() + badJSONs := [][]byte{ + []byte(`{"myVersionKey":"v1"}`), // Missing kind + []byte(`{"myVersionKey":"v1","myKindKey":"bar"}`), // Unknown kind + []byte(`{"myVersionKey":"bar","myKindKey":"TestType1"}`), // Unknown version + []byte(`{"myKindKey":"TestType1"}`), // Missing version + } + for _, b := range badJSONs { + if _, err := runtime.Decode(codec, b); err == nil { + t.Errorf("Did not reject bad json: %s", string(b)) + } + } + badJSONKindMismatch := []byte(`{"myVersionKey":"v1","myKindKey":"ExternalInternalSame"}`) + if err := runtime.DecodeInto(codec, badJSONKindMismatch, &TestType1{}); err == nil { + t.Errorf("Kind is set but doesn't match the object type: %s", badJSONKindMismatch) + } + if err := runtime.DecodeInto(codec, []byte(``), &TestType1{}); err != nil { + t.Errorf("Should allow empty decode: %v", err) + } + if _, _, err := codec.Decode([]byte(``), &unversioned.GroupVersionKind{Kind: "ExternalInternalSame"}, nil); err == nil { + t.Errorf("Did not give error for empty data with only kind default") + } + if _, _, err := codec.Decode([]byte(`{"myVersionKey":"v1"}`), &unversioned.GroupVersionKind{Kind: "ExternalInternalSame"}, nil); err != nil { + t.Errorf("Gave error for version and kind default") + } + if _, _, err := codec.Decode([]byte(`{"myKindKey":"ExternalInternalSame"}`), &unversioned.GroupVersionKind{Version: "v1"}, nil); err != nil { + t.Errorf("Gave error for version and kind default") + } + if _, _, err := codec.Decode([]byte(``), &unversioned.GroupVersionKind{Kind: "ExternalInternalSame", Version: "v1"}, nil); err != nil { + t.Errorf("Gave error for version and kind defaulted: %v", err) + } + if _, err := runtime.Decode(codec, []byte(``)); err == nil { + t.Errorf("Did not give error for empty data") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/deep_copy_generated.go new file mode 100644 index 000000000000..afeecabc2bdf --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/deep_copy_generated.go @@ -0,0 +1,80 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package serializer + +import ( + conversion "k8s.io/kubernetes/pkg/conversion" + runtime "k8s.io/kubernetes/pkg/runtime" +) + +func DeepCopy_serializer_CodecFactory(in CodecFactory, out *CodecFactory, c *conversion.Cloner) error { + if in.scheme != nil { + in, out := in.scheme, &out.scheme + *out = new(runtime.Scheme) + if err := runtime.DeepCopy_runtime_Scheme(*in, *out, c); err != nil { + return err + } + } else { + out.scheme = nil + } + if in.serializers != nil { + in, out := in.serializers, &out.serializers + *out = make([]serializerType, len(in)) + for i := range in { + if newVal, err := c.DeepCopy(in[i]); err != nil { + return err + } else { + (*out)[i] = newVal.(serializerType) + } + } + } else { + out.serializers = nil + } + if in.universal == nil { + out.universal = nil + } else if newVal, err := c.DeepCopy(in.universal); err != nil { + return err + } else { + out.universal = newVal.(runtime.Decoder) + } + if in.accepts != nil { + in, out := in.accepts, &out.accepts + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.accepts = nil + } + if in.streamingAccepts != nil { + in, out := in.streamingAccepts, &out.streamingAccepts + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.streamingAccepts = nil + } + if in.legacySerializer == nil { + out.legacySerializer = nil + } else if newVal, err := c.DeepCopy(in.legacySerializer); err != nil { + return err + } else { + out.legacySerializer = newVal.(runtime.Serializer) + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go index f9fb4bbfb227..c4a2987a9c76 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go @@ -25,12 +25,13 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/framer" utilyaml "k8s.io/kubernetes/pkg/util/yaml" ) // NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer // is not nil, the object has the group, version, and kind fields set. -func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.Typer, pretty bool) runtime.Serializer { +func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { return &Serializer{ meta: meta, creater: creater, @@ -43,7 +44,7 @@ func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtim // NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer // is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that // matches JSON, and will error if constructs are used that do not serialize to JSON. -func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.Typer) runtime.Serializer { +func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { return &Serializer{ meta: meta, creater: creater, @@ -55,11 +56,14 @@ func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer ru type Serializer struct { meta MetaFactory creater runtime.ObjectCreater - typer runtime.Typer + typer runtime.ObjectTyper yaml bool pretty bool } +// Serializer implements Serializer +var _ runtime.Serializer = &Serializer{} + // Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then // load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, the raw data will be // extracted and no decoding will be performed. If into is not registered with the typer, then the object will be straight decoded using @@ -105,14 +109,14 @@ func (s *Serializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKi } if unk, ok := into.(*runtime.Unknown); ok && unk != nil { - unk.RawJSON = originalData - // TODO: set content type here - unk.GetObjectKind().SetGroupVersionKind(actual) + unk.Raw = originalData + unk.ContentType = runtime.ContentTypeJSON + unk.GetObjectKind().SetGroupVersionKind(*actual) return unk, actual, nil } if into != nil { - typed, _, err := s.typer.ObjectKind(into) + types, _, err := s.typer.ObjectKinds(into) switch { case runtime.IsNotRegisteredError(err): if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(into); err != nil { @@ -122,6 +126,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKi case err != nil: return nil, actual, err default: + typed := types[0] if len(actual.Kind) == 0 { actual.Kind = typed.Kind } @@ -182,10 +187,57 @@ func (s *Serializer) EncodeToStream(obj runtime.Object, w io.Writer, overrides . } // RecognizesData implements the RecognizingDecoder interface. -func (s *Serializer) RecognizesData(peek io.Reader) (bool, error) { - _, ok := utilyaml.GuessJSONStream(peek, 2048) +func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { if s.yaml { - return !ok, nil + // we could potentially look for '---' + return false, true, nil + } + _, ok = utilyaml.GuessJSONStream(peek, 2048) + return ok, false, nil +} + +// Framer is the default JSON framing behavior, with newlines delimiting individual objects. +var Framer = jsonFramer{} + +type jsonFramer struct{} + +// NewFrameWriter implements stream framing for this serializer +func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer { + // we can write JSON objects directly to the writer, because they are self-framing + return w +} + +// NewFrameReader implements stream framing for this serializer +func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { + // we need to extract the JSON chunks of data to pass to Decode() + return framer.NewJSONFramedReader(r) +} + +// Framer is the default JSON framing behavior, with newlines delimiting individual objects. +var YAMLFramer = yamlFramer{} + +type yamlFramer struct{} + +// NewFrameWriter implements stream framing for this serializer +func (yamlFramer) NewFrameWriter(w io.Writer) io.Writer { + return yamlFrameWriter{w} +} + +// NewFrameReader implements stream framing for this serializer +func (yamlFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { + // extract the YAML document chunks directly + return utilyaml.NewDocumentDecoder(r) +} + +type yamlFrameWriter struct { + w io.Writer +} + +// Write separates each document with the YAML document separator (`---` followed by line +// break). Writers must write well formed YAML documents (include a final line break). +func (w yamlFrameWriter) Write(data []byte) (n int, err error) { + if _, err := w.w.Write([]byte("---\n")); err != nil { + return 0, err } - return ok, nil + return w.w.Write(data) } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/json/json_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/json/json_test.go new file mode 100644 index 000000000000..8b0fcac67c49 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/json/json_test.go @@ -0,0 +1,272 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json_test + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/json" + "k8s.io/kubernetes/pkg/util/diff" +) + +type testDecodable struct { + Other string + Value int `json:"value"` + gvk unversioned.GroupVersionKind +} + +func (d *testDecodable) GetObjectKind() unversioned.ObjectKind { return d } +func (d *testDecodable) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { d.gvk = gvk } +func (d *testDecodable) GroupVersionKind() unversioned.GroupVersionKind { return d.gvk } + +func TestDecode(t *testing.T) { + testCases := []struct { + creater runtime.ObjectCreater + typer runtime.ObjectTyper + yaml bool + pretty bool + + data []byte + defaultGVK *unversioned.GroupVersionKind + into runtime.Object + + errFn func(error) bool + expectedObject runtime.Object + expectedGVK *unversioned.GroupVersionKind + }{ + { + data: []byte("{}"), + + expectedGVK: &unversioned.GroupVersionKind{}, + errFn: func(err error) bool { return strings.Contains(err.Error(), "Object 'Kind' is missing in") }, + }, + { + data: []byte("{}"), + defaultGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + creater: &mockCreater{err: fmt.Errorf("fake error")}, + + expectedGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + errFn: func(err error) bool { return err.Error() == "fake error" }, + }, + { + data: []byte("{}"), + defaultGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + creater: &mockCreater{err: fmt.Errorf("fake error")}, + + expectedGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + errFn: func(err error) bool { return err.Error() == "fake error" }, + }, + { + data: []byte("{}"), + defaultGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + creater: &mockCreater{obj: &testDecodable{}}, + expectedObject: &testDecodable{}, + expectedGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + }, + + // version without group is not defaulted + { + data: []byte(`{"apiVersion":"blah"}`), + defaultGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + creater: &mockCreater{obj: &testDecodable{}}, + expectedObject: &testDecodable{}, + expectedGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "", Version: "blah"}, + }, + // group without version is defaulted + { + data: []byte(`{"apiVersion":"other/"}`), + defaultGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + creater: &mockCreater{obj: &testDecodable{}}, + expectedObject: &testDecodable{}, + expectedGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + }, + + // accept runtime.Unknown as into and bypass creator + { + data: []byte(`{}`), + into: &runtime.Unknown{}, + + expectedGVK: &unversioned.GroupVersionKind{}, + expectedObject: &runtime.Unknown{ + Raw: []byte(`{}`), + ContentType: runtime.ContentTypeJSON, + }, + }, + { + data: []byte(`{"test":"object"}`), + into: &runtime.Unknown{}, + + expectedGVK: &unversioned.GroupVersionKind{}, + expectedObject: &runtime.Unknown{ + Raw: []byte(`{"test":"object"}`), + ContentType: runtime.ContentTypeJSON, + }, + }, + { + data: []byte(`{"test":"object"}`), + into: &runtime.Unknown{}, + defaultGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + expectedGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + expectedObject: &runtime.Unknown{ + TypeMeta: runtime.TypeMeta{APIVersion: "other/blah", Kind: "Test"}, + Raw: []byte(`{"test":"object"}`), + ContentType: runtime.ContentTypeJSON, + }, + }, + + // unregistered objects can be decoded into directly + { + data: []byte(`{"kind":"Test","apiVersion":"other/blah","value":1,"Other":"test"}`), + into: &testDecodable{}, + typer: &mockTyper{err: runtime.NewNotRegisteredErr(unversioned.GroupVersionKind{}, nil)}, + expectedGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + expectedObject: &testDecodable{ + Other: "test", + Value: 1, + }, + }, + // registered types get defaulted by the into object kind + { + data: []byte(`{"value":1,"Other":"test"}`), + into: &testDecodable{}, + typer: &mockTyper{gvk: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}}, + expectedGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + expectedObject: &testDecodable{ + Other: "test", + Value: 1, + }, + }, + // registered types get defaulted by the into object kind even without version, but return an error + { + data: []byte(`{"value":1,"Other":"test"}`), + into: &testDecodable{}, + typer: &mockTyper{gvk: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: ""}}, + expectedGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: ""}, + errFn: func(err error) bool { return strings.Contains(err.Error(), "Object 'apiVersion' is missing in") }, + expectedObject: &testDecodable{ + Other: "test", + Value: 1, + }, + }, + + // runtime.VersionedObjects are decoded + { + data: []byte(`{"value":1,"Other":"test"}`), + into: &runtime.VersionedObjects{Objects: []runtime.Object{}}, + creater: &mockCreater{obj: &testDecodable{}}, + typer: &mockTyper{gvk: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}}, + defaultGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + expectedGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + expectedObject: &runtime.VersionedObjects{ + Objects: []runtime.Object{ + &testDecodable{ + Other: "test", + Value: 1, + }, + }, + }, + }, + // runtime.VersionedObjects with an object are decoded into + { + data: []byte(`{"Other":"test"}`), + into: &runtime.VersionedObjects{Objects: []runtime.Object{&testDecodable{Value: 2}}}, + typer: &mockTyper{gvk: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}}, + expectedGVK: &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"}, + expectedObject: &runtime.VersionedObjects{ + Objects: []runtime.Object{ + &testDecodable{ + Other: "test", + Value: 2, + }, + }, + }, + }, + } + + for i, test := range testCases { + var s runtime.Serializer + if test.yaml { + s = json.NewYAMLSerializer(json.DefaultMetaFactory, test.creater, test.typer) + } else { + s = json.NewSerializer(json.DefaultMetaFactory, test.creater, test.typer, test.pretty) + } + obj, gvk, err := s.Decode([]byte(test.data), test.defaultGVK, test.into) + + if !reflect.DeepEqual(test.expectedGVK, gvk) { + t.Errorf("%d: unexpected GVK: %v", i, gvk) + } + + switch { + case err == nil && test.errFn != nil: + t.Errorf("%d: failed: %v", i, err) + continue + case err != nil && test.errFn == nil: + t.Errorf("%d: failed: %v", i, err) + continue + case err != nil: + if !test.errFn(err) { + t.Errorf("%d: failed: %v", i, err) + } + if obj != nil { + t.Errorf("%d: should have returned nil object", i) + } + continue + } + + if test.into != nil && test.into != obj { + t.Errorf("%d: expected into to be returned: %v", i, obj) + continue + } + + if !reflect.DeepEqual(test.expectedObject, obj) { + t.Errorf("%d: unexpected object:\n%s", i, diff.ObjectGoPrintSideBySide(test.expectedObject, obj)) + } + } +} + +type mockCreater struct { + apiVersion string + kind string + err error + obj runtime.Object +} + +func (c *mockCreater) New(kind unversioned.GroupVersionKind) (runtime.Object, error) { + c.apiVersion, c.kind = kind.GroupVersion().String(), kind.Kind + return c.obj, c.err +} + +type mockTyper struct { + gvk *unversioned.GroupVersionKind + err error +} + +func (t *mockTyper) ObjectKinds(obj runtime.Object) ([]unversioned.GroupVersionKind, bool, error) { + if t.gvk == nil { + return nil, false, t.err + } + return []unversioned.GroupVersionKind{*t.gvk}, false, t.err +} + +func (t *mockTyper) Recognizes(_ unversioned.GroupVersionKind) bool { + return false +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/json/meta_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/json/meta_test.go new file mode 100644 index 000000000000..4b6351286f7a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/json/meta_test.go @@ -0,0 +1,45 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import "testing" + +func TestSimpleMetaFactoryInterpret(t *testing.T) { + factory := SimpleMetaFactory{} + gvk, err := factory.Interpret([]byte(`{"apiVersion":"1","kind":"object"}`)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if gvk.Version != "1" || gvk.Kind != "object" { + t.Errorf("unexpected interpret: %#v", gvk) + } + + // no kind or version + gvk, err = factory.Interpret([]byte(`{}`)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if gvk.Version != "" || gvk.Kind != "" { + t.Errorf("unexpected interpret: %#v", gvk) + } + + // unparsable + gvk, err = factory.Interpret([]byte(`{`)) + if err == nil { + t.Errorf("unexpected non-error") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/negotiated_codec.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/negotiated_codec.go new file mode 100644 index 000000000000..6f6a56dd3fa5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/negotiated_codec.go @@ -0,0 +1,57 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// TODO: We should figure out what happens when someone asks +// encoder for version and it conflicts with the raw serializer. +type negotiatedSerializerWrapper struct { + info runtime.SerializerInfo + streamInfo runtime.StreamSerializerInfo +} + +func NegotiatedSerializerWrapper(info runtime.SerializerInfo, streamInfo runtime.StreamSerializerInfo) runtime.NegotiatedSerializer { + return &negotiatedSerializerWrapper{info, streamInfo} +} + +func (n *negotiatedSerializerWrapper) SupportedMediaTypes() []string { + return []string{} +} + +func (n *negotiatedSerializerWrapper) SerializerForMediaType(mediaType string, options map[string]string) (runtime.SerializerInfo, bool) { + return n.info, true +} + +func (n *negotiatedSerializerWrapper) SupportedStreamingMediaTypes() []string { + return []string{} +} + +func (n *negotiatedSerializerWrapper) StreamingSerializerForMediaType(mediaType string, options map[string]string) (runtime.StreamSerializerInfo, bool) { + return n.streamInfo, true +} + +func (n *negotiatedSerializerWrapper) EncoderForVersion(e runtime.Encoder, _ unversioned.GroupVersion) runtime.Encoder { + return e +} + +func (n *negotiatedSerializerWrapper) DecoderToVersion(d runtime.Decoder, _gv unversioned.GroupVersion) runtime.Decoder { + return d +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/protobuf/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go similarity index 88% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/protobuf/doc.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go index 33316d0c4d86..91b86af6cdb7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/protobuf/doc.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package protobuf implements ProtoBuf serialization and deserialization. +// Package protobuf provides a Kubernetes serializer for the protobuf format. package protobuf diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf.go new file mode 100644 index 000000000000..a4e4f2cdd777 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf.go @@ -0,0 +1,433 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "bytes" + "fmt" + "io" + "reflect" + + "github.com/gogo/protobuf/proto" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/framer" +) + +var ( + // protoEncodingPrefix serves as a magic number for an encoded protobuf message on this serializer. All + // proto messages serialized by this schema will be preceded by the bytes 0x6b 0x38 0x73, with the fourth + // byte being reserved for the encoding style. The only encoding style defined is 0x00, which means that + // the rest of the byte stream is a message of type k8s.io.kubernetes.pkg.runtime.Unknown (proto2). + // + // See k8s.io/kubernetes/pkg/runtime/generated.proto for details of the runtime.Unknown message. + // + // This encoding scheme is experimental, and is subject to change at any time. + protoEncodingPrefix = []byte{0x6b, 0x38, 0x73, 0x00} +) + +type errNotMarshalable struct { + t reflect.Type +} + +func (e errNotMarshalable) Error() string { + return fmt.Sprintf("object %v does not implement the protobuf marshalling interface and cannot be encoded to a protobuf message", e.t) +} + +func IsNotMarshalable(err error) bool { + _, ok := err.(errNotMarshalable) + return err != nil && ok +} + +// NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer +// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written +// as-is (any type info passed with the object will be used). +// +// This encoding scheme is experimental, and is subject to change at any time. +func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *Serializer { + return &Serializer{ + prefix: protoEncodingPrefix, + creater: creater, + typer: typer, + contentType: defaultContentType, + } +} + +type Serializer struct { + prefix []byte + creater runtime.ObjectCreater + typer runtime.ObjectTyper + contentType string +} + +var _ runtime.Serializer = &Serializer{} + +// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default +// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, +// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will +// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is +// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most +// errors, the method will return the calculated schema kind. +func (s *Serializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { + if versioned, ok := into.(*runtime.VersionedObjects); ok { + into = versioned.Last() + obj, actual, err := s.Decode(originalData, gvk, into) + if err != nil { + return nil, actual, err + } + // the last item in versioned becomes into, so if versioned was not originally empty we reset the object + // array so the first position is the decoded object and the second position is the outermost object. + // if there were no objects in the versioned list passed to us, only add ourselves. + if into != nil && into != obj { + versioned.Objects = []runtime.Object{obj, into} + } else { + versioned.Objects = []runtime.Object{obj} + } + return versioned, actual, err + } + + prefixLen := len(s.prefix) + switch { + case len(originalData) == 0: + // TODO: treat like decoding {} from JSON with defaulting + return nil, nil, fmt.Errorf("empty data") + case len(originalData) < prefixLen || !bytes.Equal(s.prefix, originalData[:prefixLen]): + return nil, nil, fmt.Errorf("provided data does not appear to be a protobuf message, expected prefix %v", s.prefix) + case len(originalData) == prefixLen: + // TODO: treat like decoding {} from JSON with defaulting + return nil, nil, fmt.Errorf("empty body") + } + + data := originalData[prefixLen:] + unk := runtime.Unknown{} + if err := unk.Unmarshal(data); err != nil { + return nil, nil, err + } + + actual := unk.GroupVersionKind() + copyKindDefaults(&actual, gvk) + + if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { + *intoUnknown = unk + if len(intoUnknown.ContentType) == 0 { + intoUnknown.ContentType = s.contentType + } + return intoUnknown, &actual, nil + } + + if into != nil { + types, _, err := s.typer.ObjectKinds(into) + switch { + case runtime.IsNotRegisteredError(err): + pb, ok := into.(proto.Message) + if !ok { + return nil, &actual, errNotMarshalable{reflect.TypeOf(into)} + } + if err := proto.Unmarshal(unk.Raw, pb); err != nil { + return nil, &actual, err + } + return into, &actual, nil + case err != nil: + return nil, &actual, err + default: + copyKindDefaults(&actual, &types[0]) + // if the result of defaulting did not set a version or group, ensure that at least group is set + // (copyKindDefaults will not assign Group if version is already set). This guarantees that the group + // of into is set if there is no better information from the caller or object. + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = types[0].Group + } + } + } + + if len(actual.Kind) == 0 { + return nil, &actual, runtime.NewMissingKindErr(fmt.Sprintf("%#v", unk.TypeMeta)) + } + if len(actual.Version) == 0 { + return nil, &actual, runtime.NewMissingVersionErr(fmt.Sprintf("%#v", unk.TypeMeta)) + } + + return unmarshalToObject(s.typer, s.creater, &actual, into, unk.Raw) +} + +// EncodeToStream serializes the provided object to the given writer. Overrides is ignored. +func (s *Serializer) EncodeToStream(obj runtime.Object, w io.Writer, overrides ...unversioned.GroupVersion) error { + var unk runtime.Unknown + kind := obj.GetObjectKind().GroupVersionKind() + unk = runtime.Unknown{ + TypeMeta: runtime.TypeMeta{ + Kind: kind.Kind, + APIVersion: kind.GroupVersion().String(), + }, + } + + prefixSize := uint64(len(s.prefix)) + + switch t := obj.(type) { + case bufferedMarshaller: + // this path performs a single allocation during write but requires the caller to implement + // the more efficient Size and MarshalTo methods + encodedSize := uint64(t.Size()) + estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize) + data := make([]byte, estimatedSize) + + i, err := unk.NestedMarshalTo(data[prefixSize:], t, encodedSize) + if err != nil { + return err + } + + copy(data, s.prefix) + + _, err = w.Write(data[:prefixSize+uint64(i)]) + return err + + case proto.Marshaler: + // this path performs extra allocations + data, err := t.Marshal() + if err != nil { + return err + } + unk.Raw = data + + estimatedSize := prefixSize + uint64(unk.Size()) + data = make([]byte, estimatedSize) + + i, err := unk.MarshalTo(data[prefixSize:]) + if err != nil { + return err + } + + copy(data, s.prefix) + + _, err = w.Write(data[:prefixSize+uint64(i)]) + return err + + default: + // TODO: marshal with a different content type and serializer (JSON for third party objects) + return errNotMarshalable{reflect.TypeOf(obj)} + } +} + +// RecognizesData implements the RecognizingDecoder interface. +func (s *Serializer) RecognizesData(peek io.Reader) (bool, error) { + prefix := make([]byte, 4) + n, err := peek.Read(prefix) + if err != nil { + if err == io.EOF { + return false, nil + } + return false, err + } + if n != 4 { + return false, nil + } + return bytes.Equal(s.prefix, prefix), nil +} + +// copyKindDefaults defaults dst to the value in src if dst does not have a value set. +func copyKindDefaults(dst, src *unversioned.GroupVersionKind) { + if src == nil { + return + } + // apply kind and version defaulting from provided default + if len(dst.Kind) == 0 { + dst.Kind = src.Kind + } + if len(dst.Version) == 0 && len(src.Version) > 0 { + dst.Group = src.Group + dst.Version = src.Version + } +} + +// bufferedMarshaller describes a more efficient marshalling interface that can avoid allocating multiple +// byte buffers by pre-calculating the size of the final buffer needed. +type bufferedMarshaller interface { + proto.Sizer + runtime.ProtobufMarshaller +} + +// estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown +// object with a nil RawJSON struct and the expected size of the provided buffer. The +// returned size will not be correct if RawJSOn is set on unk. +func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 { + size := uint64(unk.Size()) + // protobuf uses 1 byte for the tag, a varint for the length of the array (at most 8 bytes - uint64 - here), + // and the size of the array. + size += 1 + 8 + byteSize + return size +} + +// NewRawSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If typer +// is not nil, the object has the group, version, and kind fields set. This serializer does not provide type information for the +// encoded object, and thus is not self describing (callers must know what type is being described in order to decode). +// +// This encoding scheme is experimental, and is subject to change at any time. +func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *RawSerializer { + return &RawSerializer{ + creater: creater, + typer: typer, + contentType: defaultContentType, + } +} + +// RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying +// type). +type RawSerializer struct { + creater runtime.ObjectCreater + typer runtime.ObjectTyper + contentType string +} + +var _ runtime.Serializer = &RawSerializer{} + +// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default +// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, +// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will +// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is +// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most +// errors, the method will return the calculated schema kind. +func (s *RawSerializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { + if into == nil { + return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s) + } + + if versioned, ok := into.(*runtime.VersionedObjects); ok { + into = versioned.Last() + obj, actual, err := s.Decode(originalData, gvk, into) + if err != nil { + return nil, actual, err + } + if into != nil && into != obj { + versioned.Objects = []runtime.Object{obj, into} + } else { + versioned.Objects = []runtime.Object{obj} + } + return versioned, actual, err + } + + if len(originalData) == 0 { + // TODO: treat like decoding {} from JSON with defaulting + return nil, nil, fmt.Errorf("empty data") + } + data := originalData + + actual := &unversioned.GroupVersionKind{} + copyKindDefaults(actual, gvk) + + if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { + intoUnknown.Raw = data + intoUnknown.ContentEncoding = "" + intoUnknown.ContentType = s.contentType + intoUnknown.SetGroupVersionKind(*actual) + return intoUnknown, actual, nil + } + + types, _, err := s.typer.ObjectKinds(into) + switch { + case runtime.IsNotRegisteredError(err): + pb, ok := into.(proto.Message) + if !ok { + return nil, actual, errNotMarshalable{reflect.TypeOf(into)} + } + if err := proto.Unmarshal(data, pb); err != nil { + return nil, actual, err + } + return into, actual, nil + case err != nil: + return nil, actual, err + default: + copyKindDefaults(actual, &types[0]) + // if the result of defaulting did not set a version or group, ensure that at least group is set + // (copyKindDefaults will not assign Group if version is already set). This guarantees that the group + // of into is set if there is no better information from the caller or object. + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = types[0].Group + } + } + + if len(actual.Kind) == 0 { + return nil, actual, runtime.NewMissingKindErr("") + } + if len(actual.Version) == 0 { + return nil, actual, runtime.NewMissingVersionErr("") + } + + return unmarshalToObject(s.typer, s.creater, actual, into, data) +} + +// unmarshalToObject is the common code between decode in the raw and normal serializer. +func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, actual *unversioned.GroupVersionKind, into runtime.Object, data []byte) (runtime.Object, *unversioned.GroupVersionKind, error) { + // use the target if necessary + obj, err := runtime.UseOrCreateObject(typer, creater, *actual, into) + if err != nil { + return nil, actual, err + } + + pb, ok := obj.(proto.Message) + if !ok { + return nil, actual, errNotMarshalable{reflect.TypeOf(obj)} + } + if err := proto.Unmarshal(data, pb); err != nil { + return nil, actual, err + } + return obj, actual, nil +} + +// EncodeToStream serializes the provided object to the given writer. Overrides is ignored. +func (s *RawSerializer) EncodeToStream(obj runtime.Object, w io.Writer, overrides ...unversioned.GroupVersion) error { + switch t := obj.(type) { + case bufferedMarshaller: + // this path performs a single allocation during write but requires the caller to implement + // the more efficient Size and MarshalTo methods + encodedSize := uint64(t.Size()) + data := make([]byte, encodedSize) + + n, err := t.MarshalTo(data) + if err != nil { + return err + } + _, err = w.Write(data[:n]) + return err + + case proto.Marshaler: + // this path performs extra allocations + data, err := t.Marshal() + if err != nil { + return err + } + _, err = w.Write(data) + return err + + default: + return errNotMarshalable{reflect.TypeOf(obj)} + } +} + +var LengthDelimitedFramer = lengthDelimitedFramer{} + +type lengthDelimitedFramer struct{} + +// NewFrameWriter implements stream framing for this serializer +func (lengthDelimitedFramer) NewFrameWriter(w io.Writer) io.Writer { + return framer.NewLengthDelimitedFrameWriter(w) +} + +// NewFrameReader implements stream framing for this serializer +func (lengthDelimitedFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { + return framer.NewLengthDelimitedFrameReader(r) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf_test.go new file mode 100644 index 000000000000..03bf2b254173 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf_test.go @@ -0,0 +1,351 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf_test + +import ( + "bytes" + "encoding/hex" + "fmt" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + _ "k8s.io/kubernetes/pkg/api/install" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/protobuf" + "k8s.io/kubernetes/pkg/util/diff" +) + +type testObject struct { + gvk unversioned.GroupVersionKind +} + +func (d *testObject) GetObjectKind() unversioned.ObjectKind { return d } +func (d *testObject) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { d.gvk = gvk } +func (d *testObject) GroupVersionKind() unversioned.GroupVersionKind { return d.gvk } + +type testMarshalable struct { + testObject + data []byte + err error +} + +func (d *testMarshalable) Marshal() ([]byte, error) { + return d.data, d.err +} + +type testBufferedMarshalable struct { + testObject + data []byte + err error +} + +func (d *testBufferedMarshalable) Marshal() ([]byte, error) { + return nil, fmt.Errorf("not invokable") +} + +func (d *testBufferedMarshalable) MarshalTo(data []byte) (int, error) { + copy(data, d.data) + return len(d.data), d.err +} + +func (d *testBufferedMarshalable) Size() int { + return len(d.data) +} + +func TestRecognize(t *testing.T) { + s := protobuf.NewSerializer(nil, nil, "application/protobuf") + ignores := [][]byte{ + nil, + {}, + []byte("k8s"), + {0x6b, 0x38, 0x73, 0x01}, + } + for i, data := range ignores { + if ok, err := s.RecognizesData(bytes.NewBuffer(data)); err != nil || ok { + t.Errorf("%d: should not recognize data: %v", i, err) + } + } + recognizes := [][]byte{ + {0x6b, 0x38, 0x73, 0x00}, + {0x6b, 0x38, 0x73, 0x00, 0x01}, + } + for i, data := range recognizes { + if ok, err := s.RecognizesData(bytes.NewBuffer(data)); err != nil || !ok { + t.Errorf("%d: should recognize data: %v", i, err) + } + } +} + +func TestEncode(t *testing.T) { + obj1 := &testMarshalable{testObject: testObject{}, data: []byte{}} + wire1 := []byte{ + 0x6b, 0x38, 0x73, 0x00, // prefix + 0x0a, 0x04, + 0x0a, 0x00, // apiversion + 0x12, 0x00, // kind + 0x12, 0x00, // data + 0x1a, 0x00, // content-type + 0x22, 0x00, // content-encoding + } + obj2 := &testMarshalable{ + testObject: testObject{gvk: unversioned.GroupVersionKind{Kind: "test", Group: "other", Version: "version"}}, + data: []byte{0x01, 0x02, 0x03}, + } + wire2 := []byte{ + 0x6b, 0x38, 0x73, 0x00, // prefix + 0x0a, 0x15, + 0x0a, 0x0d, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, // apiversion + 0x12, 0x04, 0x74, 0x65, 0x73, 0x74, // kind + 0x12, 0x03, 0x01, 0x02, 0x03, // data + 0x1a, 0x00, // content-type + 0x22, 0x00, // content-encoding + } + + err1 := fmt.Errorf("a test error") + + testCases := []struct { + obj runtime.Object + data []byte + errFn func(error) bool + }{ + { + obj: &testObject{}, + errFn: protobuf.IsNotMarshalable, + }, + { + obj: obj1, + data: wire1, + }, + { + obj: &testMarshalable{testObject: obj1.testObject, err: err1}, + errFn: func(err error) bool { return err == err1 }, + }, + { + // if this test fails, writing the "fast path" marshal is not the same as the "slow path" + obj: &testBufferedMarshalable{testObject: obj1.testObject, data: obj1.data}, + data: wire1, + }, + { + obj: obj2, + data: wire2, + }, + { + // if this test fails, writing the "fast path" marshal is not the same as the "slow path" + obj: &testBufferedMarshalable{testObject: obj2.testObject, data: obj2.data}, + data: wire2, + }, + { + obj: &testBufferedMarshalable{testObject: obj1.testObject, err: err1}, + errFn: func(err error) bool { return err == err1 }, + }, + } + + for i, test := range testCases { + s := protobuf.NewSerializer(nil, nil, "application/protobuf") + data, err := runtime.Encode(s, test.obj) + + switch { + case err == nil && test.errFn != nil: + t.Errorf("%d: failed: %v", i, err) + continue + case err != nil && test.errFn == nil: + t.Errorf("%d: failed: %v", i, err) + continue + case err != nil: + if !test.errFn(err) { + t.Errorf("%d: failed: %v", i, err) + } + if data != nil { + t.Errorf("%d: should not have returned nil data", i) + } + continue + } + + if test.data != nil && !bytes.Equal(test.data, data) { + t.Errorf("%d: unexpected data:\n%s", i, hex.Dump(data)) + continue + } + + if ok, err := s.RecognizesData(bytes.NewBuffer(data)); !ok || err != nil { + t.Errorf("%d: did not recognize data generated by call: %v", i, err) + } + } +} + +func TestDecode(t *testing.T) { + wire1 := []byte{ + 0x6b, 0x38, 0x73, 0x00, // prefix + 0x0a, 0x04, + 0x0a, 0x00, // apiversion + 0x12, 0x00, // kind + 0x12, 0x00, // data + 0x1a, 0x00, // content-type + 0x22, 0x00, // content-encoding + } + wire2 := []byte{ + 0x6b, 0x38, 0x73, 0x00, // prefix + 0x0a, 0x15, + 0x0a, 0x0d, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, // apiversion + 0x12, 0x04, 0x74, 0x65, 0x73, 0x74, // kind + 0x12, 0x03, 0x01, 0x02, 0x03, // data + 0x1a, 0x00, // content-type + 0x22, 0x00, // content-encoding + } + + //err1 := fmt.Errorf("a test error") + + testCases := []struct { + obj runtime.Object + data []byte + errFn func(error) bool + }{ + { + obj: &runtime.Unknown{}, + errFn: func(err error) bool { return err.Error() == "empty data" }, + }, + { + data: []byte{0x6b}, + errFn: func(err error) bool { return strings.Contains(err.Error(), "does not appear to be a protobuf message") }, + }, + { + obj: &runtime.Unknown{ + ContentType: "application/protobuf", + Raw: []byte{}, + }, + data: wire1, + }, + { + obj: &runtime.Unknown{ + TypeMeta: runtime.TypeMeta{ + APIVersion: "other/version", + Kind: "test", + }, + ContentType: "application/protobuf", + Raw: []byte{0x01, 0x02, 0x03}, + }, + data: wire2, + }, + } + + for i, test := range testCases { + s := protobuf.NewSerializer(nil, nil, "application/protobuf") + unk := &runtime.Unknown{} + err := runtime.DecodeInto(s, test.data, unk) + + switch { + case err == nil && test.errFn != nil: + t.Errorf("%d: failed: %v", i, err) + continue + case err != nil && test.errFn == nil: + t.Errorf("%d: failed: %v", i, err) + continue + case err != nil: + if !test.errFn(err) { + t.Errorf("%d: failed: %v", i, err) + } + continue + } + + if !reflect.DeepEqual(unk, test.obj) { + t.Errorf("%d: unexpected object:\n%#v", i, unk) + continue + } + } +} + +func TestDecodeObjects(t *testing.T) { + obj1 := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: "cool", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test", + }, + }, + }, + } + obj1wire, err := obj1.Marshal() + if err != nil { + t.Fatal(err) + } + + wire1, err := (&runtime.Unknown{ + TypeMeta: runtime.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + Raw: obj1wire, + }).Marshal() + if err != nil { + t.Fatal(err) + } + + unk2 := &runtime.Unknown{ + TypeMeta: runtime.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + } + wire2 := make([]byte, len(wire1)*2) + n, err := unk2.NestedMarshalTo(wire2, obj1, uint64(obj1.Size())) + if err != nil { + t.Fatal(err) + } + if n != len(wire1) || !bytes.Equal(wire1, wire2[:n]) { + t.Fatalf("unexpected wire:\n%s", hex.Dump(wire2[:n])) + } + + wire1 = append([]byte{0x6b, 0x38, 0x73, 0x00}, wire1...) + + testCases := []struct { + obj runtime.Object + data []byte + errFn func(error) bool + }{ + { + obj: obj1, + data: wire1, + }, + } + + for i, test := range testCases { + s := protobuf.NewSerializer(api.Scheme, api.Scheme, "application/protobuf") + obj, err := runtime.Decode(s, test.data) + + switch { + case err == nil && test.errFn != nil: + t.Errorf("%d: failed: %v", i, err) + continue + case err != nil && test.errFn == nil: + t.Errorf("%d: failed: %v", i, err) + continue + case err != nil: + if !test.errFn(err) { + t.Errorf("%d: failed: %v", i, err) + } + if obj != nil { + t.Errorf("%d: should not have returned an object", i) + } + continue + } + + if !api.Semantic.DeepEqual(obj, test.obj) { + t.Errorf("%d: unexpected object:\n%s", i, diff.ObjectGoPrintDiff(test.obj, obj)) + continue + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/protobuf_extension.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/protobuf_extension.go new file mode 100644 index 000000000000..a93708c45d05 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/protobuf_extension.go @@ -0,0 +1,52 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/protobuf" +) + +const ( + // contentTypeProtobuf is the protobuf type exposed for Kubernetes. It is private to prevent others from + // depending on it unintentionally. + // TODO: potentially move to pkg/api (since it's part of the Kube public API) and pass it in to the + // CodecFactory on initialization. + contentTypeProtobuf = "application/vnd.kubernetes.protobuf" + contentTypeProtobufWatch = contentTypeProtobuf + ";stream=watch" +) + +func protobufSerializer(scheme *runtime.Scheme) (serializerType, bool) { + serializer := protobuf.NewSerializer(scheme, scheme, contentTypeProtobuf) + raw := protobuf.NewRawSerializer(scheme, scheme, contentTypeProtobuf) + return serializerType{ + AcceptContentTypes: []string{contentTypeProtobuf}, + ContentType: contentTypeProtobuf, + FileExtensions: []string{"pb"}, + Serializer: serializer, + RawSerializer: raw, + + AcceptStreamContentTypes: []string{contentTypeProtobuf, contentTypeProtobufWatch}, + StreamContentType: contentTypeProtobufWatch, + Framer: protobuf.LengthDelimitedFramer, + StreamSerializer: raw, + }, true +} + +func init() { + serializerExtensions = append(serializerExtensions, protobufSerializer) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go index 14a2cb3e841f..4b8b1e204e39 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go @@ -17,6 +17,7 @@ limitations under the License. package recognizer import ( + "bufio" "bytes" "fmt" "io" @@ -27,51 +28,98 @@ import ( type RecognizingDecoder interface { runtime.Decoder - RecognizesData(peek io.Reader) (bool, error) + // RecognizesData should return true if the input provided in the provided reader + // belongs to this decoder, or an error if the data could not be read or is ambiguous. + // Unknown is true if the data could not be determined to match the decoder type. + // Decoders should assume that they can read as much of peek as they need (as the caller + // provides) and may return unknown if the data provided is not sufficient to make a + // a determination. When peek returns EOF that may mean the end of the input or the + // end of buffered input - recognizers should return the best guess at that time. + RecognizesData(peek io.Reader) (ok, unknown bool, err error) } +// NewDecoder creates a decoder that will attempt multiple decoders in an order defined +// by: +// +// 1. The decoder implements RecognizingDecoder and identifies the data +// 2. All other decoders, and any decoder that returned true for unknown. +// +// The order passed to the constructor is preserved within those priorities. func NewDecoder(decoders ...runtime.Decoder) runtime.Decoder { - recognizing, blind := []RecognizingDecoder{}, []runtime.Decoder{} - for _, d := range decoders { - if r, ok := d.(RecognizingDecoder); ok { - recognizing = append(recognizing, r) - } else { - blind = append(blind, d) - } - } return &decoder{ - recognizing: recognizing, - blind: blind, + decoders: decoders, } } type decoder struct { - recognizing []RecognizingDecoder - blind []runtime.Decoder + decoders []runtime.Decoder } -func (d *decoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { - var lastErr error - for _, r := range d.recognizing { - buf := bytes.NewBuffer(data) - ok, err := r.RecognizesData(buf) - if err != nil { - lastErr = err - continue +var _ RecognizingDecoder = &decoder{} + +func (d *decoder) RecognizesData(peek io.Reader) (bool, bool, error) { + var ( + lastErr error + anyUnknown bool + ) + data, _ := bufio.NewReaderSize(peek, 1024).Peek(1024) + for _, r := range d.decoders { + switch t := r.(type) { + case RecognizingDecoder: + ok, unknown, err := t.RecognizesData(bytes.NewBuffer(data)) + if err != nil { + lastErr = err + continue + } + anyUnknown = anyUnknown || unknown + if !ok { + continue + } + return true, false, nil } - if !ok { - continue + } + return false, anyUnknown, lastErr +} + +func (d *decoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { + var ( + lastErr error + skipped []runtime.Decoder + ) + + // try recognizers, record any decoders we need to give a chance later + for _, r := range d.decoders { + switch t := r.(type) { + case RecognizingDecoder: + buf := bytes.NewBuffer(data) + ok, unknown, err := t.RecognizesData(buf) + if err != nil { + lastErr = err + continue + } + if unknown { + skipped = append(skipped, t) + continue + } + if !ok { + continue + } + return r.Decode(data, gvk, into) + default: + skipped = append(skipped, t) } - return r.Decode(data, gvk, into) } - for _, d := range d.blind { - out, actual, err := d.Decode(data, gvk, into) + + // try recognizers that returned unknown or didn't recognize their data + for _, r := range skipped { + out, actual, err := r.Decode(data, gvk, into) if err != nil { lastErr = err continue } return out, actual, nil } + if lastErr == nil { lastErr = fmt.Errorf("no serialization format matched the provided data") } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer_test.go new file mode 100644 index 000000000000..9998f942ccf6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package recognizer + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/json" +) + +type A struct{} + +func (A) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } + +func TestRecognizer(t *testing.T) { + s := runtime.NewScheme() + s.AddKnownTypes(unversioned.GroupVersion{Version: "v1"}, &A{}) + d := NewDecoder( + json.NewSerializer(json.DefaultMetaFactory, s, s, false), + json.NewYAMLSerializer(json.DefaultMetaFactory, s, s), + ) + out, _, err := d.Decode([]byte(` +kind: A +apiVersion: v1 +`), nil, nil) + if err != nil { + t.Fatal(err) + } + t.Logf("%#v", out) + + out, _, err = d.Decode([]byte(` +{ + "kind":"A", + "apiVersion":"v1" +} +`), nil, nil) + if err != nil { + t.Fatal(err) + } + t.Logf("%#v", out) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming.go new file mode 100644 index 000000000000..6c5ff056326e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming.go @@ -0,0 +1,137 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package streaming implements encoder and decoder for streams +// of runtime.Objects over io.Writer/Readers. +package streaming + +import ( + "bytes" + "fmt" + "io" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// Encoder is a runtime.Encoder on a stream. +type Encoder interface { + // Encode will write the provided object to the stream or return an error. It obeys the same + // contract as runtime.Encoder. + Encode(obj runtime.Object, overrides ...unversioned.GroupVersion) error +} + +// Decoder is a runtime.Decoder from a stream. +type Decoder interface { + // Decode will return io.EOF when no more objects are available. + Decode(defaults *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) + // Close closes the underlying stream. + Close() error +} + +// Serializer is a factory for creating encoders and decoders that work over streams. +type Serializer interface { + NewEncoder(w io.Writer) Encoder + NewDecoder(r io.ReadCloser) Decoder +} + +type decoder struct { + reader io.ReadCloser + decoder runtime.Decoder + buf []byte + maxBytes int + resetRead bool +} + +// NewDecoder creates a streaming decoder that reads object chunks from r and decodes them with d. +// The reader is expected to return ErrShortRead if the provided buffer is not large enough to read +// an entire object. +func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder { + return &decoder{ + reader: r, + decoder: d, + buf: make([]byte, 1024), + maxBytes: 1024 * 1024, + } +} + +var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size") + +// Decode reads the next object from the stream and decodes it. +func (d *decoder) Decode(defaults *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { + base := 0 + for { + n, err := d.reader.Read(d.buf[base:]) + if err == io.ErrShortBuffer { + if n == 0 { + return nil, nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf)) + } + if d.resetRead { + continue + } + // double the buffer size up to maxBytes + if len(d.buf) < d.maxBytes { + base += n + d.buf = append(d.buf, make([]byte, len(d.buf))...) + continue + } + // must read the rest of the frame (until we stop getting ErrShortBuffer) + d.resetRead = true + base = 0 + return nil, nil, ErrObjectTooLarge + } + if err != nil { + return nil, nil, err + } + if d.resetRead { + // now that we have drained the large read, continue + d.resetRead = false + continue + } + base += n + break + } + return d.decoder.Decode(d.buf[:base], defaults, into) +} + +func (d *decoder) Close() error { + return d.reader.Close() +} + +type encoder struct { + writer io.Writer + encoder runtime.Encoder + buf *bytes.Buffer +} + +// NewEncoder returns a new streaming encoder. +func NewEncoder(w io.Writer, e runtime.Encoder) Encoder { + return &encoder{ + writer: w, + encoder: e, + buf: &bytes.Buffer{}, + } +} + +// Encode writes the provided object to the nested writer. +func (e *encoder) Encode(obj runtime.Object, overrides ...unversioned.GroupVersion) error { + if err := e.encoder.EncodeToStream(obj, e.buf, overrides...); err != nil { + return err + } + _, err := e.writer.Write(e.buf.Bytes()) + e.buf.Reset() + return err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming_test.go new file mode 100644 index 000000000000..b3d500c986e7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package streaming + +import ( + "bytes" + "io" + "io/ioutil" + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/framer" +) + +type fakeDecoder struct { + got []byte + obj runtime.Object + err error +} + +func (d *fakeDecoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { + d.got = data + return d.obj, nil, d.err +} + +func TestEmptyDecoder(t *testing.T) { + buf := bytes.NewBuffer([]byte{}) + d := &fakeDecoder{} + _, _, err := NewDecoder(ioutil.NopCloser(buf), d).Decode(nil, nil) + if err != io.EOF { + t.Fatal(err) + } +} + +func TestDecoder(t *testing.T) { + frames := [][]byte{ + make([]byte, 1025), + make([]byte, 1024*5), + make([]byte, 1024*1024*5), + make([]byte, 1025), + } + pr, pw := io.Pipe() + fw := framer.NewLengthDelimitedFrameWriter(pw) + go func() { + for i := range frames { + fw.Write(frames[i]) + } + pw.Close() + }() + + r := framer.NewLengthDelimitedFrameReader(pr) + d := &fakeDecoder{} + dec := NewDecoder(r, d) + if _, _, err := dec.Decode(nil, nil); err != nil || !bytes.Equal(d.got, frames[0]) { + t.Fatalf("unexpected %v %v", err, len(d.got)) + } + if _, _, err := dec.Decode(nil, nil); err != nil || !bytes.Equal(d.got, frames[1]) { + t.Fatalf("unexpected %v %v", err, len(d.got)) + } + if _, _, err := dec.Decode(nil, nil); err != ErrObjectTooLarge || !bytes.Equal(d.got, frames[1]) { + t.Fatalf("unexpected %v %v", err, len(d.got)) + } + if _, _, err := dec.Decode(nil, nil); err != nil || !bytes.Equal(d.got, frames[3]) { + t.Fatalf("unexpected %v %v", err, len(d.got)) + } + if _, _, err := dec.Decode(nil, nil); err != io.EOF { + t.Fatalf("unexpected %v %v", err, len(d.got)) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go index 0ded5365ddc3..d7c5cbe9db70 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go @@ -27,6 +27,7 @@ import ( // EnableCrossGroupDecoding modifies the given decoder in place, if it is a codec // from this package. It allows objects from one group to be auto-decoded into // another group. 'destGroup' must already exist in the codec. +// TODO: this is an encapsulation violation and should be refactored func EnableCrossGroupDecoding(d runtime.Decoder, sourceGroup, destGroup string) error { internal, ok := d.(*codec) if !ok { @@ -45,6 +46,7 @@ func EnableCrossGroupDecoding(d runtime.Decoder, sourceGroup, destGroup string) // EnableCrossGroupEncoding modifies the given encoder in place, if it is a codec // from this package. It allows objects from one group to be auto-decoded into // another group. 'destGroup' must already exist in the codec. +// TODO: this is an encapsulation violation and should be refactored func EnableCrossGroupEncoding(e runtime.Encoder, sourceGroup, destGroup string) error { internal, ok := e.(*codec) if !ok { @@ -64,31 +66,34 @@ func EnableCrossGroupEncoding(e runtime.Encoder, sourceGroup, destGroup string) func NewCodecForScheme( // TODO: I should be a scheme interface? scheme *runtime.Scheme, - serializer runtime.Serializer, + encoder runtime.Encoder, + decoder runtime.Decoder, encodeVersion []unversioned.GroupVersion, decodeVersion []unversioned.GroupVersion, ) runtime.Codec { - return NewCodec(serializer, scheme, scheme, scheme, runtime.ObjectTyperToTyper(scheme), encodeVersion, decodeVersion) + return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion) } // NewCodec takes objects in their internal versions and converts them to external versions before // serializing them. It assumes the serializer provided to it only deals with external versions. // This class is also a serializer, but is generally used with a specific version. func NewCodec( - serializer runtime.Serializer, + encoder runtime.Encoder, + decoder runtime.Decoder, convertor runtime.ObjectConvertor, creater runtime.ObjectCreater, copier runtime.ObjectCopier, - typer runtime.Typer, + typer runtime.ObjectTyper, encodeVersion []unversioned.GroupVersion, decodeVersion []unversioned.GroupVersion, ) runtime.Codec { internal := &codec{ - serializer: serializer, - convertor: convertor, - creater: creater, - copier: copier, - typer: typer, + encoder: encoder, + decoder: decoder, + convertor: convertor, + creater: creater, + copier: copier, + typer: typer, } if encodeVersion != nil { internal.encodeVersion = make(map[string]unversioned.GroupVersion) @@ -99,6 +104,11 @@ func NewCodec( } internal.encodeVersion[v.Group] = v } + if len(internal.encodeVersion) == 1 { + for _, v := range internal.encodeVersion { + internal.preferredEncodeVersion = []unversioned.GroupVersion{v} + } + } } if decodeVersion != nil { internal.decodeVersion = make(map[string]unversioned.GroupVersion) @@ -115,14 +125,17 @@ func NewCodec( } type codec struct { - serializer runtime.Serializer - convertor runtime.ObjectConvertor - creater runtime.ObjectCreater - copier runtime.ObjectCopier - typer runtime.Typer + encoder runtime.Encoder + decoder runtime.Decoder + convertor runtime.ObjectConvertor + creater runtime.ObjectCreater + copier runtime.ObjectCopier + typer runtime.ObjectTyper encodeVersion map[string]unversioned.GroupVersion decodeVersion map[string]unversioned.GroupVersion + + preferredEncodeVersion []unversioned.GroupVersion } // Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is @@ -134,7 +147,7 @@ func (c *codec) Decode(data []byte, defaultGVK *unversioned.GroupVersionKind, in into = versioned.Last() } - obj, gvk, err := c.serializer.Decode(data, defaultGVK, into) + obj, gvk, err := c.decoder.Decode(data, defaultGVK, into) if err != nil { return nil, gvk, err } @@ -198,7 +211,7 @@ func (c *codec) Decode(data []byte, defaultGVK *unversioned.GroupVersionKind, in } // Convert if needed. - out, err := c.convertor.ConvertToVersion(obj, targetGV.String()) + out, err := c.convertor.ConvertToVersion(obj, targetGV) if err != nil { return nil, gvk, err } @@ -213,18 +226,21 @@ func (c *codec) Decode(data []byte, defaultGVK *unversioned.GroupVersionKind, in // encoding the object the first override that matches the object's group is used. Other overrides are ignored. func (c *codec) EncodeToStream(obj runtime.Object, w io.Writer, overrides ...unversioned.GroupVersion) error { if _, ok := obj.(*runtime.Unknown); ok { - return c.serializer.EncodeToStream(obj, w, overrides...) + return c.encoder.EncodeToStream(obj, w, overrides...) } - gvk, isUnversioned, err := c.typer.ObjectKind(obj) + gvks, isUnversioned, err := c.typer.ObjectKinds(obj) if err != nil { return err } + gvk := gvks[0] if (c.encodeVersion == nil && len(overrides) == 0) || isUnversioned { - old := obj.GetObjectKind().GroupVersionKind() - obj.GetObjectKind().SetGroupVersionKind(gvk) - defer obj.GetObjectKind().SetGroupVersionKind(old) - return c.serializer.EncodeToStream(obj, w, overrides...) + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() + objectKind.SetGroupVersionKind(gvk) + err = c.encoder.EncodeToStream(obj, w, overrides...) + objectKind.SetGroupVersionKind(old) + return err } targetGV, ok := c.encodeVersion[gvk.Group] @@ -240,13 +256,16 @@ func (c *codec) EncodeToStream(obj runtime.Object, w io.Writer, overrides ...unv } // attempt a conversion to the sole encode version - if !ok && len(c.encodeVersion) == 1 { + if !ok && c.preferredEncodeVersion != nil { ok = true - for _, v := range c.encodeVersion { - targetGV = v + targetGV = c.preferredEncodeVersion[0] + if len(overrides) > 0 { + // ensure the target override is first + overrides = promoteOrPrependGroupVersion(targetGV, overrides) + } else { + // avoids allocating a new array for each call to EncodeToVersion + overrides = c.preferredEncodeVersion } - // ensure the target override is first - overrides = promoteOrPrependGroupVersion(targetGV, overrides) } // if no fallback is available, error @@ -255,22 +274,21 @@ func (c *codec) EncodeToStream(obj runtime.Object, w io.Writer, overrides ...unv } // Perform a conversion if necessary - if gvk.GroupVersion() != targetGV { - out, err := c.convertor.ConvertToVersion(obj, targetGV.String()) - if err != nil { - if ok { - return err - } - } else { - obj = out + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() + out, err := c.convertor.ConvertToVersion(obj, targetGV) + if err != nil { + if ok { + return err } } else { - old := obj.GetObjectKind().GroupVersionKind() - defer obj.GetObjectKind().SetGroupVersionKind(old) - obj.GetObjectKind().SetGroupVersionKind(&unversioned.GroupVersionKind{Group: targetGV.Group, Version: targetGV.Version, Kind: gvk.Kind}) + obj = out } - - return c.serializer.EncodeToStream(obj, w, overrides...) + // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object + err = c.encoder.EncodeToStream(obj, w, overrides...) + // restore the old GVK, in case conversion returned the same object + objectKind.SetGroupVersionKind(old) + return err } // promoteOrPrependGroupVersion finds the group version in the provided group versions that has the same group as target. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning_test.go new file mode 100644 index 000000000000..5ca6c2a9afcc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning_test.go @@ -0,0 +1,300 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versioning + +import ( + "fmt" + "io" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/diff" +) + +type testDecodable struct { + Other string + Value int `json:"value"` + gvk unversioned.GroupVersionKind +} + +func (d *testDecodable) GetObjectKind() unversioned.ObjectKind { return d } +func (d *testDecodable) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { d.gvk = gvk } +func (d *testDecodable) GroupVersionKind() unversioned.GroupVersionKind { return d.gvk } + +func TestDecode(t *testing.T) { + gvk1 := &unversioned.GroupVersionKind{Kind: "Test", Group: "other", Version: "blah"} + decodable1 := &testDecodable{} + decodable2 := &testDecodable{} + decodable3 := &testDecodable{} + versionedDecodable1 := &runtime.VersionedObjects{Objects: []runtime.Object{decodable1}} + + testCases := []struct { + serializer runtime.Serializer + convertor runtime.ObjectConvertor + creater runtime.ObjectCreater + copier runtime.ObjectCopier + typer runtime.ObjectTyper + yaml bool + pretty bool + + encodes, decodes []unversioned.GroupVersion + + defaultGVK *unversioned.GroupVersionKind + into runtime.Object + + errFn func(error) bool + expectedObject runtime.Object + sameObject runtime.Object + expectedGVK *unversioned.GroupVersionKind + }{ + { + serializer: &mockSerializer{actual: gvk1}, + convertor: &checkConvertor{groupVersion: unversioned.GroupVersion{Group: "other", Version: "__internal"}}, + expectedGVK: gvk1, + }, + { + serializer: &mockSerializer{actual: gvk1, obj: decodable1}, + convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: unversioned.GroupVersion{Group: "other", Version: "__internal"}}, + expectedGVK: gvk1, + sameObject: decodable2, + }, + // defaultGVK.Group is allowed to force a conversion to the destination group + { + serializer: &mockSerializer{actual: gvk1, obj: decodable1}, + defaultGVK: &unversioned.GroupVersionKind{Group: "force"}, + convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: unversioned.GroupVersion{Group: "force", Version: "__internal"}}, + expectedGVK: gvk1, + sameObject: decodable2, + }, + // uses direct conversion for into when objects differ + { + into: decodable3, + serializer: &mockSerializer{actual: gvk1, obj: decodable1}, + convertor: &checkConvertor{in: decodable1, obj: decodable3, directConvert: true}, + expectedGVK: gvk1, + sameObject: decodable3, + }, + { + into: versionedDecodable1, + serializer: &mockSerializer{actual: gvk1, obj: decodable3}, + convertor: &checkConvertor{in: decodable3, obj: decodable1, directConvert: true}, + expectedGVK: gvk1, + sameObject: versionedDecodable1, + }, + // returns directly when serializer returns into + { + into: decodable3, + serializer: &mockSerializer{actual: gvk1, obj: decodable3}, + expectedGVK: gvk1, + sameObject: decodable3, + }, + // returns directly when serializer returns into + { + into: versionedDecodable1, + serializer: &mockSerializer{actual: gvk1, obj: decodable1}, + expectedGVK: gvk1, + sameObject: versionedDecodable1, + }, + + // runtime.VersionedObjects are decoded + { + into: &runtime.VersionedObjects{Objects: []runtime.Object{}}, + + serializer: &mockSerializer{actual: gvk1, obj: decodable1}, + copier: &checkCopy{in: decodable1, obj: decodable1}, + convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: unversioned.GroupVersion{Group: "other", Version: "__internal"}}, + expectedGVK: gvk1, + expectedObject: &runtime.VersionedObjects{Objects: []runtime.Object{decodable1, decodable2}}, + }, + { + into: &runtime.VersionedObjects{Objects: []runtime.Object{}}, + + serializer: &mockSerializer{actual: gvk1, obj: decodable1}, + copier: &checkCopy{in: decodable1, obj: nil, err: fmt.Errorf("error on copy")}, + convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: unversioned.GroupVersion{Group: "other", Version: "__internal"}}, + expectedGVK: gvk1, + expectedObject: &runtime.VersionedObjects{Objects: []runtime.Object{decodable1, decodable2}}, + }, + + // decode into the same version as the serialized object + { + decodes: []unversioned.GroupVersion{gvk1.GroupVersion()}, + + serializer: &mockSerializer{actual: gvk1, obj: decodable1}, + expectedGVK: gvk1, + expectedObject: decodable1, + }, + { + into: &runtime.VersionedObjects{Objects: []runtime.Object{}}, + decodes: []unversioned.GroupVersion{gvk1.GroupVersion()}, + + serializer: &mockSerializer{actual: gvk1, obj: decodable1}, + expectedGVK: gvk1, + expectedObject: &runtime.VersionedObjects{Objects: []runtime.Object{decodable1}}, + }, + + // codec with non matching version skips conversion altogether + { + decodes: []unversioned.GroupVersion{{Group: "something", Version: "else"}}, + + serializer: &mockSerializer{actual: gvk1, obj: decodable1}, + expectedGVK: gvk1, + expectedObject: decodable1, + }, + { + into: &runtime.VersionedObjects{Objects: []runtime.Object{}}, + decodes: []unversioned.GroupVersion{{Group: "something", Version: "else"}}, + + serializer: &mockSerializer{actual: gvk1, obj: decodable1}, + expectedGVK: gvk1, + expectedObject: &runtime.VersionedObjects{Objects: []runtime.Object{decodable1}}, + }, + } + + for i, test := range testCases { + t.Logf("%d", i) + s := NewCodec(test.serializer, test.serializer, test.convertor, test.creater, test.copier, test.typer, test.encodes, test.decodes) + obj, gvk, err := s.Decode([]byte(`{}`), test.defaultGVK, test.into) + + if !reflect.DeepEqual(test.expectedGVK, gvk) { + t.Errorf("%d: unexpected GVK: %v", i, gvk) + } + + switch { + case err == nil && test.errFn != nil: + t.Errorf("%d: failed: %v", i, err) + continue + case err != nil && test.errFn == nil: + t.Errorf("%d: failed: %v", i, err) + continue + case err != nil: + if !test.errFn(err) { + t.Errorf("%d: failed: %v", i, err) + } + if obj != nil { + t.Errorf("%d: should have returned nil object", i) + } + continue + } + + if test.into != nil && test.into != obj { + t.Errorf("%d: expected into to be returned: %v", i, obj) + continue + } + + switch { + case test.expectedObject != nil: + if !reflect.DeepEqual(test.expectedObject, obj) { + t.Errorf("%d: unexpected object:\n%s", i, diff.ObjectGoPrintSideBySide(test.expectedObject, obj)) + } + case test.sameObject != nil: + if test.sameObject != obj { + t.Errorf("%d: unexpected object:\n%s", i, diff.ObjectGoPrintSideBySide(test.sameObject, obj)) + } + case obj != nil: + t.Errorf("%d: unexpected object: %#v", i, obj) + } + } +} + +type checkCopy struct { + in, obj runtime.Object + err error +} + +func (c *checkCopy) Copy(obj runtime.Object) (runtime.Object, error) { + if c.in != nil && c.in != obj { + return nil, fmt.Errorf("unexpected input to copy: %#v", obj) + } + return c.obj, c.err +} + +type checkConvertor struct { + err error + in, obj runtime.Object + groupVersion unversioned.GroupVersion + directConvert bool +} + +func (c *checkConvertor) Convert(in, out interface{}) error { + if !c.directConvert { + return fmt.Errorf("unexpected call to Convert") + } + if c.in != nil && c.in != in { + return fmt.Errorf("unexpected in: %s", in) + } + if c.obj != nil && c.obj != out { + return fmt.Errorf("unexpected out: %s", out) + } + return c.err +} +func (c *checkConvertor) ConvertToVersion(in runtime.Object, outVersion unversioned.GroupVersion) (out runtime.Object, err error) { + if c.directConvert { + return nil, fmt.Errorf("unexpected call to ConvertToVersion") + } + if c.in != nil && c.in != in { + return nil, fmt.Errorf("unexpected in: %s", in) + } + if c.groupVersion != outVersion { + return nil, fmt.Errorf("unexpected outversion: %s", outVersion) + } + return c.obj, c.err +} +func (c *checkConvertor) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return "", "", fmt.Errorf("unexpected call to ConvertFieldLabel") +} + +type mockSerializer struct { + err error + obj runtime.Object + versions []unversioned.GroupVersion + + defaults, actual *unversioned.GroupVersionKind + into runtime.Object +} + +func (s *mockSerializer) Decode(data []byte, defaults *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { + s.defaults = defaults + s.into = into + return s.obj, s.actual, s.err +} + +func (s *mockSerializer) EncodeToStream(obj runtime.Object, w io.Writer, versions ...unversioned.GroupVersion) error { + s.obj = obj + s.versions = versions + return s.err +} + +type mockCreater struct { + err error + obj runtime.Object +} + +func (c *mockCreater) New(kind unversioned.GroupVersionKind) (runtime.Object, error) { + return c.obj, c.err +} + +type mockTyper struct { + gvk *unversioned.GroupVersionKind + err error +} + +func (t *mockTyper) ObjectKind(obj runtime.Object) (*unversioned.GroupVersionKind, bool, error) { + return t.gvk, false, t.err +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator.go index 8d922ccd663d..19b8378a4912 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator.go @@ -113,6 +113,37 @@ func fieldName(field *ast.Field) string { return jsonTag } +// A buffer of lines that will be written. +type bufferedLine struct { + line string + indentation int +} + +type buffer struct { + lines []bufferedLine +} + +func newBuffer() *buffer { + return &buffer{ + lines: make([]bufferedLine, 0), + } +} + +func (b *buffer) addLine(line string, indent int) { + b.lines = append(b.lines, bufferedLine{line, indent}) +} + +func (b *buffer) flushLines(w io.Writer) error { + for _, line := range b.lines { + indentation := strings.Repeat("\t", line.indentation) + fullLine := fmt.Sprintf("%s%s", indentation, line.line) + if _, err := io.WriteString(w, fullLine); err != nil { + return err + } + } + return nil +} + func writeFuncHeader(b *buffer, structName string, indent int) { s := fmt.Sprintf("var map_%s = map[string]string {\n", structName) b.addLine(s, indent) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/atomic/value.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator_test.go similarity index 51% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/atomic/value.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator_test.go index a9bc8cd813c7..095dddff58b3 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/atomic/value.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator_test.go @@ -14,29 +14,30 @@ See the License for the specific language governing permissions and limitations under the License. */ -package atomic +package runtime import ( - "sync" + "testing" ) -// TODO(ArtfulCoder) -// sync/atomic/Value was added in golang 1.4 -// Once support is dropped for go 1.3, this type must be deprecated in favor of sync/atomic/Value. -// The functions are named Load/Store to match sync/atomic/Value function names. -type Value struct { - value interface{} - valueMutex sync.RWMutex -} - -func (at *Value) Store(val interface{}) { - at.valueMutex.Lock() - defer at.valueMutex.Unlock() - at.value = val -} - -func (at *Value) Load() interface{} { - at.valueMutex.RLock() - defer at.valueMutex.RUnlock() - return at.value +func TestFmtRawDoc(t *testing.T) { + tests := []struct { + t, expected string + }{ + {"aaa\n --- asd\n TODO: tooooodo\n toooodoooooo\n", "aaa"}, + {"aaa\nasd\n TODO: tooooodo\nbbbb\n --- toooodoooooo\n", "aaa asd bbbb"}, + {" TODO: tooooodo\n", ""}, + {"Par1\n\nPar2\n\n", "Par1\\n\\nPar2"}, + {"", ""}, + {" ", ""}, + {" \n", ""}, + {" \n\n ", ""}, + {"Example:\n\tl1\n\t\tl2\n", "Example:\\n\\tl1\\n\\t\\tl2"}, + } + + for _, test := range tests { + if o := fmtRawDoc(test.t); o != test.expected { + t.Fatalf("Expected: %q, got %q", test.expected, o) + } + } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/types.go index c2007cc92ee9..0c6d48afc4a4 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/types.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/types.go @@ -16,6 +16,16 @@ limitations under the License. package runtime +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api/meta/metatypes" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/types" +) + // Note that the types provided in this file are not versioned and are intended to be // safe to use from within all versions of every API object. @@ -32,10 +42,14 @@ package runtime // // +protobuf=true type TypeMeta struct { - APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"` - Kind string `json:"kind,omitempty" yaml:"kind,omitempty"` + APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` + Kind string `json:"kind,omitempty" yaml:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"` } +const ( + ContentTypeJSON string = "application/json" +) + // RawExtension is used to hold extensions in external versions. // // To use this, make a field which has RawExtension as its type in your external, versioned @@ -80,8 +94,10 @@ type TypeMeta struct { // // +protobuf=true type RawExtension struct { - // RawJSON is the underlying serialization of this object. - RawJSON []byte + // Raw is the underlying serialization of this object. + // + // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. + Raw []byte `protobuf:"bytes,1,opt,name=raw"` // Object can hold a representation of this extension - useful for working with versioned // structs. Object Object `json:"-"` @@ -95,11 +111,17 @@ type RawExtension struct { // // +protobuf=true type Unknown struct { - TypeMeta `json:",inline"` - // RawJSON will hold the complete JSON of the object which couldn't be matched + TypeMeta `json:",inline" protobuf:"bytes,1,opt,name=typeMeta"` + // Raw will hold the complete serialized object which couldn't be matched // with a registered type. Most likely, nothing should be done with this // except for passing it through the system. - RawJSON []byte + Raw []byte `protobuf:"bytes,2,opt,name=raw"` + // ContentEncoding is encoding used to encode 'Raw' data. + // Unspecified means no encoding. + ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"` + // ContentType is serialization method used to serialize 'Raw'. + // Unspecified means ContentTypeJSON. + ContentType string `protobuf:"bytes,4,opt,name=contentType"` } // Unstructured allows objects that do not have Golang structs registered to be manipulated @@ -108,26 +130,360 @@ type Unknown struct { // TODO: Make this object have easy access to field based accessors and settors for // metadata and field mutatation. type Unstructured struct { - TypeMeta `json:",inline"` - - // Name is populated from metadata (if present) upon deserialization - Name string - // Object is a JSON compatible map with string, float, int, []interface{}, or map[string]interface{} // children. Object map[string]interface{} } +func getNestedField(obj map[string]interface{}, fields ...string) interface{} { + var val interface{} = obj + for _, field := range fields { + if _, ok := val.(map[string]interface{}); !ok { + return nil + } + val = val.(map[string]interface{})[field] + } + return val +} + +func getNestedString(obj map[string]interface{}, fields ...string) string { + if str, ok := getNestedField(obj, fields...).(string); ok { + return str + } + return "" +} + +func getNestedSlice(obj map[string]interface{}, fields ...string) []string { + if m, ok := getNestedField(obj, fields...).([]interface{}); ok { + strSlice := make([]string, 0, len(m)) + for _, v := range m { + if str, ok := v.(string); ok { + strSlice = append(strSlice, str) + } + } + return strSlice + } + return nil +} + +func getNestedMap(obj map[string]interface{}, fields ...string) map[string]string { + if m, ok := getNestedField(obj, fields...).(map[string]interface{}); ok { + strMap := make(map[string]string, len(m)) + for k, v := range m { + if str, ok := v.(string); ok { + strMap[k] = str + } + } + return strMap + } + return nil +} + +func setNestedField(obj map[string]interface{}, value interface{}, fields ...string) { + m := obj + if len(fields) > 1 { + for _, field := range fields[0 : len(fields)-1] { + if _, ok := m[field].(map[string]interface{}); !ok { + m[field] = make(map[string]interface{}) + } + m = m[field].(map[string]interface{}) + } + } + m[fields[len(fields)-1]] = value +} + +func setNestedSlice(obj map[string]interface{}, value []string, fields ...string) { + m := make([]interface{}, 0, len(value)) + for _, v := range value { + m = append(m, v) + } + setNestedField(obj, m, fields...) +} + +func setNestedMap(obj map[string]interface{}, value map[string]string, fields ...string) { + m := make(map[string]interface{}, len(value)) + for k, v := range value { + m[k] = v + } + setNestedField(obj, m, fields...) +} + +func (u *Unstructured) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedField(u.Object, value, fields...) +} + +func (u *Unstructured) setNestedSlice(value []string, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedSlice(u.Object, value, fields...) +} + +func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedMap(u.Object, value, fields...) +} + +func extractOwnerReference(src interface{}) metatypes.OwnerReference { + v := src.(map[string]interface{}) + return metatypes.OwnerReference{ + Kind: getNestedString(v, "kind"), + Name: getNestedString(v, "name"), + APIVersion: getNestedString(v, "apiVersion"), + UID: (types.UID)(getNestedString(v, "uid")), + } +} + +func setOwnerReference(src metatypes.OwnerReference) map[string]interface{} { + ret := make(map[string]interface{}) + setNestedField(ret, src.Kind, "kind") + setNestedField(ret, src.Name, "name") + setNestedField(ret, src.APIVersion, "apiVersion") + setNestedField(ret, string(src.UID), "uid") + return ret +} + +func getOwnerReferences(object map[string]interface{}) ([]map[string]interface{}, error) { + field := getNestedField(object, "metadata", "ownerReferences") + if field == nil { + return nil, fmt.Errorf("cannot find field metadata.ownerReferences in %v", object) + } + ownerReferences, ok := field.([]map[string]interface{}) + if ok { + return ownerReferences, nil + } + // TODO: This is hacky... + interfaces, ok := field.([]interface{}) + if !ok { + return nil, fmt.Errorf("expect metadata.ownerReferences to be a slice in %#v", object) + } + ownerReferences = make([]map[string]interface{}, 0, len(interfaces)) + for i := 0; i < len(interfaces); i++ { + r, ok := interfaces[i].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("expect element metadata.ownerReferences to be a map[string]interface{} in %#v", object) + } + ownerReferences = append(ownerReferences, r) + } + return ownerReferences, nil +} + +func (u *Unstructured) GetOwnerReferences() []metatypes.OwnerReference { + original, err := getOwnerReferences(u.Object) + if err != nil { + glog.V(6).Info(err) + return nil + } + ret := make([]metatypes.OwnerReference, 0, len(original)) + for i := 0; i < len(original); i++ { + ret = append(ret, extractOwnerReference(original[i])) + } + return ret +} + +func (u *Unstructured) SetOwnerReferences(references []metatypes.OwnerReference) { + var newReferences = make([]map[string]interface{}, 0, len(references)) + for i := 0; i < len(references); i++ { + newReferences = append(newReferences, setOwnerReference(references[i])) + } + u.setNestedField(newReferences, "metadata", "ownerReferences") +} + +func (u *Unstructured) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *Unstructured) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *Unstructured) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *Unstructured) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *Unstructured) GetNamespace() string { + return getNestedString(u.Object, "metadata", "namespace") +} + +func (u *Unstructured) SetNamespace(namespace string) { + u.setNestedField(namespace, "metadata", "namespace") +} + +func (u *Unstructured) GetName() string { + return getNestedString(u.Object, "metadata", "name") +} + +func (u *Unstructured) SetName(name string) { + u.setNestedField(name, "metadata", "name") +} + +func (u *Unstructured) GetGenerateName() string { + return getNestedString(u.Object, "metadata", "generateName") +} + +func (u *Unstructured) SetGenerateName(name string) { + u.setNestedField(name, "metadata", "generateName") +} + +func (u *Unstructured) GetUID() types.UID { + return types.UID(getNestedString(u.Object, "metadata", "uid")) +} + +func (u *Unstructured) SetUID(uid types.UID) { + u.setNestedField(string(uid), "metadata", "uid") +} + +func (u *Unstructured) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *Unstructured) SetResourceVersion(version string) { + u.setNestedField(version, "metadata", "resourceVersion") +} + +func (u *Unstructured) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *Unstructured) SetSelfLink(selfLink string) { + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *Unstructured) GetCreationTimestamp() unversioned.Time { + var timestamp unversioned.Time + timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) + return timestamp +} + +func (u *Unstructured) SetCreationTimestamp(timestamp unversioned.Time) { + ts, _ := timestamp.MarshalQueryParameter() + u.setNestedField(ts, "metadata", "creationTimestamp") +} + +func (u *Unstructured) GetDeletionTimestamp() *unversioned.Time { + var timestamp unversioned.Time + timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp")) + if timestamp.IsZero() { + return nil + } + return ×tamp +} + +func (u *Unstructured) SetDeletionTimestamp(timestamp *unversioned.Time) { + ts, _ := timestamp.MarshalQueryParameter() + u.setNestedField(ts, "metadata", "deletionTimestamp") +} + +func (u *Unstructured) GetLabels() map[string]string { + return getNestedMap(u.Object, "metadata", "labels") +} + +func (u *Unstructured) SetLabels(labels map[string]string) { + u.setNestedMap(labels, "metadata", "labels") +} + +func (u *Unstructured) GetAnnotations() map[string]string { + return getNestedMap(u.Object, "metadata", "annotations") +} + +func (u *Unstructured) SetAnnotations(annotations map[string]string) { + u.setNestedMap(annotations, "metadata", "annotations") +} + +func (u *Unstructured) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *Unstructured) GroupVersionKind() unversioned.GroupVersionKind { + gv, err := unversioned.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return unversioned.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + +func (u *Unstructured) GetFinalizers() []string { + return getNestedSlice(u.Object, "metadata", "finalizers") +} + +func (u *Unstructured) SetFinalizers(finalizers []string) { + u.setNestedSlice(finalizers, "metadata", "finalizers") +} + // UnstructuredList allows lists that do not have Golang structs // registered to be manipulated generically. This can be used to deal // with the API lists from a plug-in. type UnstructuredList struct { - TypeMeta `json:",inline"` + Object map[string]interface{} // Items is a list of unstructured objects. Items []*Unstructured `json:"items"` } +func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedField(u.Object, value, fields...) +} + +func (u *UnstructuredList) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *UnstructuredList) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *UnstructuredList) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *UnstructuredList) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *UnstructuredList) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) SetResourceVersion(version string) { + u.setNestedField(version, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *UnstructuredList) SetSelfLink(selfLink string) { + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *UnstructuredList) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *UnstructuredList) GroupVersionKind() unversioned.GroupVersionKind { + gv, err := unversioned.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return unversioned.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + // VersionedObjects is used by Decoders to give callers a way to access all versions // of an object during the decoding process. type VersionedObjects struct { diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/types_proto.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/types_proto.go new file mode 100644 index 000000000000..142dd05daaac --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/types_proto.go @@ -0,0 +1,69 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" +) + +type ProtobufMarshaller interface { + MarshalTo(data []byte) (int, error) +} + +// NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown +// that will contain an object that implements ProtobufMarshaller. +func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) + n1, err := m.TypeMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + + if b != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, size) + n2, err := b.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + if uint64(n2) != size { + // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n2) + } + i += n2 + } + + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) + i += copy(data[i:], m.ContentEncoding) + + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) + i += copy(data[i:], m.ContentType) + return i, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/unstructured.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/unstructured.go index e4cdef8c9213..4f87b9aa29a1 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/unstructured.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/unstructured.go @@ -17,10 +17,14 @@ limitations under the License. package runtime import ( - "encoding/json" + gojson "encoding/json" + "errors" + "fmt" "io" + "strings" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/util/json" ) // UnstructuredJSONScheme is capable of converting JSON data into the Unstructured @@ -44,10 +48,10 @@ func (s unstructuredJSONScheme) Decode(data []byte, _ *unversioned.GroupVersionK gvk := obj.GetObjectKind().GroupVersionKind() if len(gvk.Kind) == 0 { - return nil, gvk, NewMissingKindErr(string(data)) + return nil, &gvk, NewMissingKindErr(string(data)) } - return obj, gvk, nil + return obj, &gvk, nil } func (unstructuredJSONScheme) EncodeToStream(obj Object, w io.Writer, overrides ...unversioned.GroupVersion) error { @@ -55,19 +59,16 @@ func (unstructuredJSONScheme) EncodeToStream(obj Object, w io.Writer, overrides case *Unstructured: return json.NewEncoder(w).Encode(t.Object) case *UnstructuredList: - type encodeList struct { - TypeMeta `json:",inline"` - Items []map[string]interface{} `json:"items"` - } - eList := encodeList{ - TypeMeta: t.TypeMeta, - } + items := make([]map[string]interface{}, 0, len(t.Items)) for _, i := range t.Items { - eList.Items = append(eList.Items, i.Object) + items = append(items, i.Object) } - return json.NewEncoder(w).Encode(eList) + t.Object["items"] = items + defer func() { delete(t.Object, "items") }() + return json.NewEncoder(w).Encode(t.Object) case *Unknown: - _, err := w.Write(t.RawJSON) + // TODO: Unstructured needs to deal with ContentType. + _, err := w.Write(t.Raw) return err default: return json.NewEncoder(w).Encode(t) @@ -76,7 +77,7 @@ func (unstructuredJSONScheme) EncodeToStream(obj Object, w io.Writer, overrides func (s unstructuredJSONScheme) decode(data []byte) (Object, error) { type detector struct { - Items json.RawMessage + Items gojson.RawMessage } var det detector if err := json.Unmarshal(data, &det); err != nil { @@ -100,6 +101,13 @@ func (s unstructuredJSONScheme) decodeInto(data []byte, obj Object) error { return s.decodeToUnstructured(data, x) case *UnstructuredList: return s.decodeToList(data, x) + case *VersionedObjects: + u := new(Unstructured) + err := s.decodeToUnstructured(data, u) + if err == nil { + x.Objects = []Object{u} + } + return err default: return json.Unmarshal(data, x) } @@ -111,25 +119,6 @@ func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstru return err } - if v, ok := m["kind"]; ok { - if s, ok := v.(string); ok { - unstruct.Kind = s - } - } - if v, ok := m["apiVersion"]; ok { - if s, ok := v.(string); ok { - unstruct.APIVersion = s - } - } - if metadata, ok := m["metadata"]; ok { - if metadata, ok := metadata.(map[string]interface{}); ok { - if name, ok := metadata["name"]; ok { - if name, ok := name.(string); ok { - unstruct.Name = name - } - } - } - } unstruct.Object = m return nil @@ -137,8 +126,7 @@ func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstru func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { type decodeList struct { - TypeMeta `json:",inline"` - Items []json.RawMessage + Items []gojson.RawMessage } var dList decodeList @@ -146,14 +134,66 @@ func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList return err } - list.TypeMeta = dList.TypeMeta + if err := json.Unmarshal(data, &list.Object); err != nil { + return err + } + + // For typed lists, e.g., a PodList, API server doesn't set each item's + // APIVersion and Kind. We need to set it. + listAPIVersion := list.GetAPIVersion() + listKind := list.GetKind() + itemKind := strings.TrimSuffix(listKind, "List") + + delete(list.Object, "items") list.Items = nil for _, i := range dList.Items { unstruct := &Unstructured{} if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { return err } + // This is hacky. Set the item's Kind and APIVersion to those inferred + // from the List. + if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { + unstruct.SetKind(itemKind) + unstruct.SetAPIVersion(listAPIVersion) + } list.Items = append(list.Items, unstruct) } return nil } + +// UnstructuredObjectConverter is an ObjectConverter for use with +// Unstructured objects. Since it has no schema or type information, +// it will only succeed for no-op conversions. This is provided as a +// sane implementation for APIs that require an object converter. +type UnstructuredObjectConverter struct{} + +func (UnstructuredObjectConverter) Convert(in, out interface{}) error { + unstructIn, ok := in.(*Unstructured) + if !ok { + return fmt.Errorf("input type %T in not valid for unstructured conversion", in) + } + + unstructOut, ok := out.(*Unstructured) + if !ok { + return fmt.Errorf("output type %T in not valid for unstructured conversion", out) + } + + // maybe deep copy the map? It is documented in the + // ObjectConverter interface that this function is not + // guaranteeed to not mutate the input. Or maybe set the input + // object to nil. + unstructOut.Object = unstructIn.Object + return nil +} + +func (UnstructuredObjectConverter) ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (Object, error) { + if gvk := in.GetObjectKind().GroupVersionKind(); gvk.GroupVersion() != outVersion { + return nil, errors.New("unstructured converter cannot convert versions") + } + return in, nil +} + +func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return "", "", errors.New("unstructured cannot convert field labels") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/unstructured_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/unstructured_test.go new file mode 100644 index 000000000000..db87f78420b1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/unstructured_test.go @@ -0,0 +1,430 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime_test + +import ( + "fmt" + "reflect" + "strings" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta/metatypes" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" +) + +func TestDecodeUnstructured(t *testing.T) { + groupVersionString := testapi.Default.GroupVersion().String() + rawJson := fmt.Sprintf(`{"kind":"Pod","apiVersion":"%s","metadata":{"name":"test"}}`, groupVersionString) + pl := &api.List{ + Items: []runtime.Object{ + &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}}, + &runtime.Unknown{ + TypeMeta: runtime.TypeMeta{Kind: "Pod", APIVersion: groupVersionString}, + Raw: []byte(rawJson), + ContentType: runtime.ContentTypeJSON, + }, + &runtime.Unknown{ + TypeMeta: runtime.TypeMeta{Kind: "", APIVersion: groupVersionString}, + Raw: []byte(rawJson), + ContentType: runtime.ContentTypeJSON, + }, + &runtime.Unstructured{ + Object: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "Bar", + "test": "value", + }, + }, + }, + } + if errs := runtime.DecodeList(pl.Items, runtime.UnstructuredJSONScheme); len(errs) == 1 { + t.Fatalf("unexpected error %v", errs) + } + if pod, ok := pl.Items[1].(*runtime.Unstructured); !ok || pod.Object["kind"] != "Pod" || pod.Object["metadata"].(map[string]interface{})["name"] != "test" { + t.Errorf("object not converted: %#v", pl.Items[1]) + } + if pod, ok := pl.Items[2].(*runtime.Unstructured); !ok || pod.Object["kind"] != "Pod" || pod.Object["metadata"].(map[string]interface{})["name"] != "test" { + t.Errorf("object not converted: %#v", pl.Items[2]) + } +} + +func TestDecode(t *testing.T) { + tcs := []struct { + json []byte + want runtime.Object + }{ + { + json: []byte(`{"apiVersion": "test", "kind": "test_kind"}`), + want: &runtime.Unstructured{ + Object: map[string]interface{}{"apiVersion": "test", "kind": "test_kind"}, + }, + }, + { + json: []byte(`{"apiVersion": "test", "kind": "test_list", "items": []}`), + want: &runtime.UnstructuredList{ + Object: map[string]interface{}{"apiVersion": "test", "kind": "test_list"}, + }, + }, + { + json: []byte(`{"items": [{"metadata": {"name": "object1"}, "apiVersion": "test", "kind": "test_kind"}, {"metadata": {"name": "object2"}, "apiVersion": "test", "kind": "test_kind"}], "apiVersion": "test", "kind": "test_list"}`), + want: &runtime.UnstructuredList{ + Object: map[string]interface{}{"apiVersion": "test", "kind": "test_list"}, + Items: []*runtime.Unstructured{ + { + Object: map[string]interface{}{ + "metadata": map[string]interface{}{"name": "object1"}, + "apiVersion": "test", + "kind": "test_kind", + }, + }, + { + Object: map[string]interface{}{ + "metadata": map[string]interface{}{"name": "object2"}, + "apiVersion": "test", + "kind": "test_kind", + }, + }, + }, + }, + }, + } + + for _, tc := range tcs { + got, _, err := runtime.UnstructuredJSONScheme.Decode(tc.json, nil, nil) + if err != nil { + t.Errorf("Unexpected error for %q: %v", string(tc.json), err) + continue + } + + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("Decode(%q) want: %v\ngot: %v", string(tc.json), tc.want, got) + } + } +} + +func TestUnstructuredGetters(t *testing.T) { + unstruct := runtime.Unstructured{ + Object: map[string]interface{}{ + "kind": "test_kind", + "apiVersion": "test_version", + "metadata": map[string]interface{}{ + "name": "test_name", + "namespace": "test_namespace", + "generateName": "test_generateName", + "uid": "test_uid", + "resourceVersion": "test_resourceVersion", + "selfLink": "test_selfLink", + "creationTimestamp": "2009-11-10T23:00:00Z", + "deletionTimestamp": "2010-11-10T23:00:00Z", + "labels": map[string]interface{}{ + "test_label": "test_value", + }, + "annotations": map[string]interface{}{ + "test_annotation": "test_value", + }, + "ownerReferences": []map[string]interface{}{ + { + "kind": "Pod", + "name": "poda", + "apiVersion": "v1", + "uid": "1", + }, + { + "kind": "Pod", + "name": "podb", + "apiVersion": "v1", + "uid": "2", + }, + }, + "finalizers": []interface{}{ + "finalizer.1", + "finalizer.2", + }, + }, + }, + } + + if got, want := unstruct.GetAPIVersion(), "test_version"; got != want { + t.Errorf("GetAPIVersions() = %s, want %s", got, want) + } + + if got, want := unstruct.GetKind(), "test_kind"; got != want { + t.Errorf("GetKind() = %s, want %s", got, want) + } + + if got, want := unstruct.GetNamespace(), "test_namespace"; got != want { + t.Errorf("GetNamespace() = %s, want %s", got, want) + } + + if got, want := unstruct.GetName(), "test_name"; got != want { + t.Errorf("GetName() = %s, want %s", got, want) + } + + if got, want := unstruct.GetGenerateName(), "test_generateName"; got != want { + t.Errorf("GetGenerateName() = %s, want %s", got, want) + } + + if got, want := unstruct.GetUID(), types.UID("test_uid"); got != want { + t.Errorf("GetUID() = %s, want %s", got, want) + } + + if got, want := unstruct.GetResourceVersion(), "test_resourceVersion"; got != want { + t.Errorf("GetResourceVersion() = %s, want %s", got, want) + } + + if got, want := unstruct.GetSelfLink(), "test_selfLink"; got != want { + t.Errorf("GetSelfLink() = %s, want %s", got, want) + } + + if got, want := unstruct.GetCreationTimestamp(), unversioned.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC); !got.Equal(want) { + t.Errorf("GetCreationTimestamp() = %s, want %s", got, want) + } + + if got, want := unstruct.GetDeletionTimestamp(), unversioned.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC); got == nil || !got.Equal(want) { + t.Errorf("GetDeletionTimestamp() = %s, want %s", got, want) + } + + if got, want := unstruct.GetLabels(), map[string]string{"test_label": "test_value"}; !reflect.DeepEqual(got, want) { + t.Errorf("GetLabels() = %s, want %s", got, want) + } + + if got, want := unstruct.GetAnnotations(), map[string]string{"test_annotation": "test_value"}; !reflect.DeepEqual(got, want) { + t.Errorf("GetAnnotations() = %s, want %s", got, want) + } + refs := unstruct.GetOwnerReferences() + expectedOwnerReferences := []metatypes.OwnerReference{ + { + Kind: "Pod", + Name: "poda", + APIVersion: "v1", + UID: "1", + }, + { + Kind: "Pod", + Name: "podb", + APIVersion: "v1", + UID: "2", + }, + } + if got, want := refs, expectedOwnerReferences; !reflect.DeepEqual(got, want) { + t.Errorf("GetOwnerReferences()=%v, want %v", got, want) + } + if got, want := unstruct.GetFinalizers(), []string{"finalizer.1", "finalizer.2"}; !reflect.DeepEqual(got, want) { + t.Errorf("GetFinalizers()=%v, want %v", got, want) + } +} + +func TestUnstructuredSetters(t *testing.T) { + unstruct := runtime.Unstructured{} + + want := runtime.Unstructured{ + Object: map[string]interface{}{ + "kind": "test_kind", + "apiVersion": "test_version", + "metadata": map[string]interface{}{ + "name": "test_name", + "namespace": "test_namespace", + "generateName": "test_generateName", + "uid": "test_uid", + "resourceVersion": "test_resourceVersion", + "selfLink": "test_selfLink", + "creationTimestamp": "2009-11-10T23:00:00Z", + "deletionTimestamp": "2010-11-10T23:00:00Z", + "labels": map[string]interface{}{ + "test_label": "test_value", + }, + "annotations": map[string]interface{}{ + "test_annotation": "test_value", + }, + "ownerReferences": []map[string]interface{}{ + { + "kind": "Pod", + "name": "poda", + "apiVersion": "v1", + "uid": "1", + }, + { + "kind": "Pod", + "name": "podb", + "apiVersion": "v1", + "uid": "2", + }, + }, + "finalizers": []interface{}{ + "finalizer.1", + "finalizer.2", + }, + }, + }, + } + + unstruct.SetAPIVersion("test_version") + unstruct.SetKind("test_kind") + unstruct.SetNamespace("test_namespace") + unstruct.SetName("test_name") + unstruct.SetGenerateName("test_generateName") + unstruct.SetUID(types.UID("test_uid")) + unstruct.SetResourceVersion("test_resourceVersion") + unstruct.SetSelfLink("test_selfLink") + unstruct.SetCreationTimestamp(unversioned.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)) + date := unversioned.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) + unstruct.SetDeletionTimestamp(&date) + unstruct.SetLabels(map[string]string{"test_label": "test_value"}) + unstruct.SetAnnotations(map[string]string{"test_annotation": "test_value"}) + newOwnerReferences := []metatypes.OwnerReference{ + { + Kind: "Pod", + Name: "poda", + APIVersion: "v1", + UID: "1", + }, + { + Kind: "Pod", + Name: "podb", + APIVersion: "v1", + UID: "2", + }, + } + unstruct.SetOwnerReferences(newOwnerReferences) + unstruct.SetFinalizers([]string{"finalizer.1", "finalizer.2"}) + + if !reflect.DeepEqual(unstruct, want) { + t.Errorf("Wanted: \n%s\n Got:\n%s", want, unstruct) + } +} + +func TestUnstructuredListGetters(t *testing.T) { + unstruct := runtime.UnstructuredList{ + Object: map[string]interface{}{ + "kind": "test_kind", + "apiVersion": "test_version", + "metadata": map[string]interface{}{ + "resourceVersion": "test_resourceVersion", + "selfLink": "test_selfLink", + }, + }, + } + + if got, want := unstruct.GetAPIVersion(), "test_version"; got != want { + t.Errorf("GetAPIVersions() = %s, want %s", got, want) + } + + if got, want := unstruct.GetKind(), "test_kind"; got != want { + t.Errorf("GetKind() = %s, want %s", got, want) + } + + if got, want := unstruct.GetResourceVersion(), "test_resourceVersion"; got != want { + t.Errorf("GetResourceVersion() = %s, want %s", got, want) + } + + if got, want := unstruct.GetSelfLink(), "test_selfLink"; got != want { + t.Errorf("GetSelfLink() = %s, want %s", got, want) + } +} + +func TestUnstructuredListSetters(t *testing.T) { + unstruct := runtime.UnstructuredList{} + + want := runtime.UnstructuredList{ + Object: map[string]interface{}{ + "kind": "test_kind", + "apiVersion": "test_version", + "metadata": map[string]interface{}{ + "resourceVersion": "test_resourceVersion", + "selfLink": "test_selfLink", + }, + }, + } + + unstruct.SetAPIVersion("test_version") + unstruct.SetKind("test_kind") + unstruct.SetResourceVersion("test_resourceVersion") + unstruct.SetSelfLink("test_selfLink") + + if !reflect.DeepEqual(unstruct, want) { + t.Errorf("Wanted: \n%s\n Got:\n%s", unstruct, want) + } +} + +func TestDecodeNumbers(t *testing.T) { + + // Start with a valid pod + originalJSON := []byte(`{ + "kind":"Pod", + "apiVersion":"v1", + "metadata":{"name":"pod","namespace":"foo"}, + "spec":{ + "containers":[{"name":"container","image":"container"}], + "activeDeadlineSeconds":1000030003 + } + }`) + + pod := &api.Pod{} + + // Decode with structured codec + codec, err := testapi.GetCodecForObject(pod) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + err = runtime.DecodeInto(codec, originalJSON, pod) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // ensure pod is valid + if errs := validation.ValidatePod(pod); len(errs) > 0 { + t.Fatalf("pod should be valid: %v", errs) + } + + // Round-trip with unstructured codec + unstructuredObj, err := runtime.Decode(runtime.UnstructuredJSONScheme, originalJSON) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + roundtripJSON, err := runtime.Encode(runtime.UnstructuredJSONScheme, unstructuredObj) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Make sure we serialize back out in int form + if !strings.Contains(string(roundtripJSON), `"activeDeadlineSeconds":1000030003`) { + t.Errorf("Expected %s, got %s", `"activeDeadlineSeconds":1000030003`, string(roundtripJSON)) + } + + // Decode with structured codec again + obj2, err := runtime.Decode(codec, roundtripJSON) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // ensure pod is still valid + pod2, ok := obj2.(*api.Pod) + if !ok { + t.Fatalf("expected an *api.Pod, got %#v", obj2) + } + if errs := validation.ValidatePod(pod2); len(errs) > 0 { + t.Fatalf("pod should be valid: %v", errs) + } + // ensure round-trip preserved large integers + if !reflect.DeepEqual(pod, pod2) { + t.Fatalf("Expected\n\t%#v, got \n\t%#v", pod, pod2) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/unversioned_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/unversioned_test.go new file mode 100644 index 000000000000..7943581ea513 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/runtime/unversioned_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime_test + +import ( + "encoding/json" + "reflect" + "testing" + + // TODO: Ideally we should create the necessary package structure in e.g., + // pkg/conversion/test/... instead of importing pkg/api here. + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/runtime" +) + +var status = &unversioned.Status{ + Status: unversioned.StatusFailure, + Code: 200, + Reason: unversioned.StatusReasonUnknown, + Message: "", +} + +func TestV1EncodeDecodeStatus(t *testing.T) { + + v1Codec := testapi.Default.Codec() + + encoded, err := runtime.Encode(v1Codec, status) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + typeMeta := unversioned.TypeMeta{} + if err := json.Unmarshal(encoded, &typeMeta); err != nil { + t.Errorf("unexpected error: %v", err) + } + if typeMeta.Kind != "Status" { + t.Errorf("Kind is not set to \"Status\". Got %v", string(encoded)) + } + if typeMeta.APIVersion != "v1" { + t.Errorf("APIVersion is not set to \"v1\". Got %v", string(encoded)) + } + decoded, err := runtime.Decode(v1Codec, encoded) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(status, decoded) { + t.Errorf("expected: %v, got: %v", status, decoded) + } +} + +func TestExperimentalEncodeDecodeStatus(t *testing.T) { + // TODO: caesarxuchao: use the testapi.Extensions.Codec() once the PR that + // moves experimental from v1 to v1beta1 got merged. + expCodec := api.Codecs.LegacyCodec(extensions.SchemeGroupVersion) + encoded, err := runtime.Encode(expCodec, status) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + typeMeta := unversioned.TypeMeta{} + if err := json.Unmarshal(encoded, &typeMeta); err != nil { + t.Errorf("unexpected error: %v", err) + } + if typeMeta.Kind != "Status" { + t.Errorf("Kind is not set to \"Status\". Got %s", encoded) + } + if typeMeta.APIVersion != "v1" { + t.Errorf("APIVersion is not set to \"\". Got %s", encoded) + } + decoded, err := runtime.Decode(expCodec, encoded) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(status, decoded) { + t.Errorf("expected: %v, got: %v", status, decoded) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/mustrunas.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/mustrunas.go new file mode 100644 index 000000000000..1ffc55fdba7f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/mustrunas.go @@ -0,0 +1,149 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capabilities + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// defaultCapabilities implements the CapabilitiesStrategy interface +type defaultCapabilities struct { + defaultAddCapabilities []api.Capability + requiredDropCapabilities []api.Capability + allowedCaps []api.Capability +} + +var _ CapabilitiesStrategy = &defaultCapabilities{} + +// NewDefaultCapabilities creates a new defaultCapabilities strategy that will provide defaults and validation +// based on the configured initial caps and allowed caps. +func NewDefaultCapabilities(defaultAddCapabilities, requiredDropCapabilities, allowedCaps []api.Capability) (CapabilitiesStrategy, error) { + return &defaultCapabilities{ + defaultAddCapabilities: defaultAddCapabilities, + requiredDropCapabilities: requiredDropCapabilities, + allowedCaps: allowedCaps, + }, nil +} + +// Generate creates the capabilities based on policy rules. Generate will produce the following: +// 1. a capabilities.Add set containing all the required adds (unless the +// container specifically is dropping the cap) and container requested adds +// 2. a capabilities.Drop set containing all the required drops and container requested drops +func (s *defaultCapabilities) Generate(pod *api.Pod, container *api.Container) (*api.Capabilities, error) { + defaultAdd := makeCapSet(s.defaultAddCapabilities) + requiredDrop := makeCapSet(s.requiredDropCapabilities) + containerAdd := sets.NewString() + containerDrop := sets.NewString() + + if container.SecurityContext != nil && container.SecurityContext.Capabilities != nil { + containerAdd = makeCapSet(container.SecurityContext.Capabilities.Add) + containerDrop = makeCapSet(container.SecurityContext.Capabilities.Drop) + } + + // remove any default adds that the container is specifically dropping + defaultAdd = defaultAdd.Difference(containerDrop) + + combinedAdd := defaultAdd.Union(containerAdd).List() + combinedDrop := requiredDrop.Union(containerDrop).List() + + // nothing generated? return nil + if len(combinedAdd) == 0 && len(combinedDrop) == 0 { + return nil, nil + } + + return &api.Capabilities{ + Add: capabilityFromStringSlice(combinedAdd), + Drop: capabilityFromStringSlice(combinedDrop), + }, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *defaultCapabilities) Validate(pod *api.Pod, container *api.Container) field.ErrorList { + allErrs := field.ErrorList{} + + // if the security context isn't set then we haven't generated correctly. Shouldn't get here + // if using the provider correctly + if container.SecurityContext == nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("securityContext"), container.SecurityContext, "no security context is set")) + return allErrs + } + + if container.SecurityContext.Capabilities == nil { + // if container.SC.Caps is nil then nothing was defaulted by the strat or requested by the pod author + // if there are no required caps on the strategy and nothing is requested on the pod + // then we can safely return here without further validation. + if len(s.defaultAddCapabilities) == 0 && len(s.requiredDropCapabilities) == 0 { + return allErrs + } + + // container has no requested caps but we have required caps. We should have something in + // at least the drops on the container. + allErrs = append(allErrs, field.Invalid(field.NewPath("capabilities"), container.SecurityContext.Capabilities, + "required capabilities are not set on the securityContext")) + return allErrs + } + + // validate that anything being added is in the default or allowed sets + defaultAdd := makeCapSet(s.defaultAddCapabilities) + allowedAdd := makeCapSet(s.allowedCaps) + + for _, cap := range container.SecurityContext.Capabilities.Add { + sCap := string(cap) + if !defaultAdd.Has(sCap) && !allowedAdd.Has(sCap) { + allErrs = append(allErrs, field.Invalid(field.NewPath("capabilities", "add"), sCap, "capability may not be added")) + } + } + + // validate that anything that is required to be dropped is in the drop set + containerDrops := makeCapSet(container.SecurityContext.Capabilities.Drop) + + for _, requiredDrop := range s.requiredDropCapabilities { + sDrop := string(requiredDrop) + if !containerDrops.Has(sDrop) { + allErrs = append(allErrs, field.Invalid(field.NewPath("capabilities", "drop"), container.SecurityContext.Capabilities.Drop, + fmt.Sprintf("%s is required to be dropped but was not found", sDrop))) + } + } + + return allErrs +} + +// capabilityFromStringSlice creates a capability slice from a string slice. +func capabilityFromStringSlice(slice []string) []api.Capability { + if len(slice) == 0 { + return nil + } + caps := []api.Capability{} + for _, c := range slice { + caps = append(caps, api.Capability(c)) + } + return caps +} + +// makeCapSet makes a string set from capabilities and normalizes them to be all lower case to help +// with comparisons. +func makeCapSet(caps []api.Capability) sets.String { + s := sets.NewString() + for _, c := range caps { + s.Insert(string(c)) + } + return s +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/mustrunas_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/mustrunas_test.go new file mode 100644 index 000000000000..46cd2a2a4ec4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/mustrunas_test.go @@ -0,0 +1,387 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capabilities + +import ( + "k8s.io/kubernetes/pkg/api" + "reflect" + "testing" +) + +func TestGenerateAdds(t *testing.T) { + tests := map[string]struct { + defaultAddCaps []api.Capability + requiredDropCaps []api.Capability + containerCaps *api.Capabilities + expectedCaps *api.Capabilities + }{ + "no required, no container requests": { + expectedCaps: nil, + }, + "required, no container requests": { + defaultAddCaps: []api.Capability{"foo"}, + expectedCaps: &api.Capabilities{ + Add: []api.Capability{"foo"}, + }, + }, + "required, container requests add required": { + defaultAddCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Add: []api.Capability{"foo"}, + }, + expectedCaps: &api.Capabilities{ + Add: []api.Capability{"foo"}, + }, + }, + "multiple required, container requests add required": { + defaultAddCaps: []api.Capability{"foo", "bar", "baz"}, + containerCaps: &api.Capabilities{ + Add: []api.Capability{"foo"}, + }, + expectedCaps: &api.Capabilities{ + Add: []api.Capability{"bar", "baz", "foo"}, + }, + }, + "required, container requests add non-required": { + defaultAddCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Add: []api.Capability{"bar"}, + }, + expectedCaps: &api.Capabilities{ + Add: []api.Capability{"bar", "foo"}, + }, + }, + "generation dedupes": { + defaultAddCaps: []api.Capability{"foo", "foo", "foo", "foo"}, + containerCaps: &api.Capabilities{ + Add: []api.Capability{"foo", "foo", "foo"}, + }, + expectedCaps: &api.Capabilities{ + Add: []api.Capability{"foo"}, + }, + }, + "generation is case sensitive - will not dedupe": { + defaultAddCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Add: []api.Capability{"FOO"}, + }, + expectedCaps: &api.Capabilities{ + Add: []api.Capability{"FOO", "foo"}, + }, + }, + } + + for k, v := range tests { + container := &api.Container{ + SecurityContext: &api.SecurityContext{ + Capabilities: v.containerCaps, + }, + } + + strategy, err := NewDefaultCapabilities(v.defaultAddCaps, v.requiredDropCaps, nil) + if err != nil { + t.Errorf("%s failed: %v", k, err) + continue + } + generatedCaps, err := strategy.Generate(nil, container) + if err != nil { + t.Errorf("%s failed generating: %v", k, err) + continue + } + if v.expectedCaps == nil && generatedCaps != nil { + t.Errorf("%s expected nil caps to be generated but got %v", k, generatedCaps) + continue + } + if !reflect.DeepEqual(v.expectedCaps, generatedCaps) { + t.Errorf("%s did not generate correctly. Expected: %#v, Actual: %#v", k, v.expectedCaps, generatedCaps) + } + } +} + +func TestGenerateDrops(t *testing.T) { + tests := map[string]struct { + defaultAddCaps []api.Capability + requiredDropCaps []api.Capability + containerCaps *api.Capabilities + expectedCaps *api.Capabilities + }{ + "no required, no container requests": { + expectedCaps: nil, + }, + "required drops are defaulted": { + requiredDropCaps: []api.Capability{"foo"}, + expectedCaps: &api.Capabilities{ + Drop: []api.Capability{"foo"}, + }, + }, + "required drops are defaulted when making container requests": { + requiredDropCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Drop: []api.Capability{"foo", "bar"}, + }, + expectedCaps: &api.Capabilities{ + Drop: []api.Capability{"bar", "foo"}, + }, + }, + "can drop a required add": { + defaultAddCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Drop: []api.Capability{"foo"}, + }, + expectedCaps: &api.Capabilities{ + Drop: []api.Capability{"foo"}, + }, + }, + "can drop non-required add": { + defaultAddCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Drop: []api.Capability{"bar"}, + }, + expectedCaps: &api.Capabilities{ + Add: []api.Capability{"foo"}, + Drop: []api.Capability{"bar"}, + }, + }, + "defaulting adds and drops, dropping a required add": { + defaultAddCaps: []api.Capability{"foo", "bar", "baz"}, + requiredDropCaps: []api.Capability{"abc"}, + containerCaps: &api.Capabilities{ + Drop: []api.Capability{"foo"}, + }, + expectedCaps: &api.Capabilities{ + Add: []api.Capability{"bar", "baz"}, + Drop: []api.Capability{"abc", "foo"}, + }, + }, + "generation dedupes": { + requiredDropCaps: []api.Capability{"bar", "bar", "bar", "bar"}, + containerCaps: &api.Capabilities{ + Drop: []api.Capability{"bar", "bar", "bar"}, + }, + expectedCaps: &api.Capabilities{ + Drop: []api.Capability{"bar"}, + }, + }, + "generation is case sensitive - will not dedupe": { + requiredDropCaps: []api.Capability{"bar"}, + containerCaps: &api.Capabilities{ + Drop: []api.Capability{"BAR"}, + }, + expectedCaps: &api.Capabilities{ + Drop: []api.Capability{"BAR", "bar"}, + }, + }, + } + for k, v := range tests { + container := &api.Container{ + SecurityContext: &api.SecurityContext{ + Capabilities: v.containerCaps, + }, + } + + strategy, err := NewDefaultCapabilities(v.defaultAddCaps, v.requiredDropCaps, nil) + if err != nil { + t.Errorf("%s failed: %v", k, err) + continue + } + generatedCaps, err := strategy.Generate(nil, container) + if err != nil { + t.Errorf("%s failed generating: %v", k, err) + continue + } + if v.expectedCaps == nil && generatedCaps != nil { + t.Errorf("%s expected nil caps to be generated but got %#v", k, generatedCaps) + continue + } + if !reflect.DeepEqual(v.expectedCaps, generatedCaps) { + t.Errorf("%s did not generate correctly. Expected: %#v, Actual: %#v", k, v.expectedCaps, generatedCaps) + } + } +} + +func TestValidateAdds(t *testing.T) { + tests := map[string]struct { + defaultAddCaps []api.Capability + requiredDropCaps []api.Capability + allowedCaps []api.Capability + containerCaps *api.Capabilities + shouldPass bool + }{ + // no container requests + "no required, no allowed, no container requests": { + shouldPass: true, + }, + "no required, allowed, no container requests": { + allowedCaps: []api.Capability{"foo"}, + shouldPass: true, + }, + "required, no allowed, no container requests": { + defaultAddCaps: []api.Capability{"foo"}, + shouldPass: false, + }, + + // container requests match required + "required, no allowed, container requests valid": { + defaultAddCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Add: []api.Capability{"foo"}, + }, + shouldPass: true, + }, + "required, no allowed, container requests invalid": { + defaultAddCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Add: []api.Capability{"bar"}, + }, + shouldPass: false, + }, + + // container requests match allowed + "no required, allowed, container requests valid": { + allowedCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Add: []api.Capability{"foo"}, + }, + shouldPass: true, + }, + "no required, allowed, container requests invalid": { + allowedCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Add: []api.Capability{"bar"}, + }, + shouldPass: false, + }, + + // required and allowed + "required, allowed, container requests valid required": { + defaultAddCaps: []api.Capability{"foo"}, + allowedCaps: []api.Capability{"bar"}, + containerCaps: &api.Capabilities{ + Add: []api.Capability{"foo"}, + }, + shouldPass: true, + }, + "required, allowed, container requests valid allowed": { + defaultAddCaps: []api.Capability{"foo"}, + allowedCaps: []api.Capability{"bar"}, + containerCaps: &api.Capabilities{ + Add: []api.Capability{"bar"}, + }, + shouldPass: true, + }, + "required, allowed, container requests invalid": { + defaultAddCaps: []api.Capability{"foo"}, + allowedCaps: []api.Capability{"bar"}, + containerCaps: &api.Capabilities{ + Add: []api.Capability{"baz"}, + }, + shouldPass: false, + }, + "validation is case sensitive": { + defaultAddCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Add: []api.Capability{"FOO"}, + }, + shouldPass: false, + }, + } + + for k, v := range tests { + container := &api.Container{ + SecurityContext: &api.SecurityContext{ + Capabilities: v.containerCaps, + }, + } + + strategy, err := NewDefaultCapabilities(v.defaultAddCaps, v.requiredDropCaps, v.allowedCaps) + if err != nil { + t.Errorf("%s failed: %v", k, err) + continue + } + errs := strategy.Validate(nil, container) + if v.shouldPass && len(errs) > 0 { + t.Errorf("%s should have passed but had errors %v", k, errs) + continue + } + if !v.shouldPass && len(errs) == 0 { + t.Errorf("%s should have failed but recieved no errors", k) + } + } +} + +func TestValidateDrops(t *testing.T) { + tests := map[string]struct { + defaultAddCaps []api.Capability + requiredDropCaps []api.Capability + containerCaps *api.Capabilities + shouldPass bool + }{ + // no container requests + "no required, no container requests": { + shouldPass: true, + }, + "required, no container requests": { + requiredDropCaps: []api.Capability{"foo"}, + shouldPass: false, + }, + + // container requests match required + "required, container requests valid": { + requiredDropCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Drop: []api.Capability{"foo"}, + }, + shouldPass: true, + }, + "required, container requests invalid": { + requiredDropCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Drop: []api.Capability{"bar"}, + }, + shouldPass: false, + }, + "validation is case sensitive": { + requiredDropCaps: []api.Capability{"foo"}, + containerCaps: &api.Capabilities{ + Drop: []api.Capability{"FOO"}, + }, + shouldPass: false, + }, + } + + for k, v := range tests { + container := &api.Container{ + SecurityContext: &api.SecurityContext{ + Capabilities: v.containerCaps, + }, + } + + strategy, err := NewDefaultCapabilities(v.defaultAddCaps, v.requiredDropCaps, nil) + if err != nil { + t.Errorf("%s failed: %v", k, err) + continue + } + errs := strategy.Validate(nil, container) + if v.shouldPass && len(errs) > 0 { + t.Errorf("%s should have passed but had errors %v", k, errs) + continue + } + if !v.shouldPass && len(errs) == 0 { + t.Errorf("%s should have failed but recieved no errors", k) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/types.go new file mode 100644 index 000000000000..428f24558426 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/types.go @@ -0,0 +1,30 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capabilities + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// CapabilitiesStrategy defines the interface for all cap constraint strategies. +type CapabilitiesStrategy interface { + // Generate creates the capabilities based on policy rules. + Generate(pod *api.Pod, container *api.Container) (*api.Capabilities, error) + // Validate ensures that the specified values fall within the range of the strategy. + Validate(pod *api.Pod, container *api.Container) field.ErrorList +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/factory.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/factory.go new file mode 100644 index 000000000000..477845f14768 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/factory.go @@ -0,0 +1,135 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podsecuritypolicy + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities" + "k8s.io/kubernetes/pkg/security/podsecuritypolicy/group" + "k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux" + "k8s.io/kubernetes/pkg/security/podsecuritypolicy/user" + "k8s.io/kubernetes/pkg/util/errors" +) + +type simpleStrategyFactory struct{} + +var _ StrategyFactory = &simpleStrategyFactory{} + +func NewSimpleStrategyFactory() StrategyFactory { + return &simpleStrategyFactory{} +} + +func (f *simpleStrategyFactory) CreateStrategies(psp *extensions.PodSecurityPolicy, namespace string) (*ProviderStrategies, error) { + errs := []error{} + + userStrat, err := createUserStrategy(&psp.Spec.RunAsUser) + if err != nil { + errs = append(errs, err) + } + + seLinuxStrat, err := createSELinuxStrategy(&psp.Spec.SELinux) + if err != nil { + errs = append(errs, err) + } + + fsGroupStrat, err := createFSGroupStrategy(&psp.Spec.FSGroup) + if err != nil { + errs = append(errs, err) + } + + supGroupStrat, err := createSupplementalGroupStrategy(&psp.Spec.SupplementalGroups) + if err != nil { + errs = append(errs, err) + } + + capStrat, err := createCapabilitiesStrategy(psp.Spec.DefaultAddCapabilities, psp.Spec.RequiredDropCapabilities, psp.Spec.AllowedCapabilities) + if err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return nil, errors.NewAggregate(errs) + } + + strategies := &ProviderStrategies{ + RunAsUserStrategy: userStrat, + SELinuxStrategy: seLinuxStrat, + FSGroupStrategy: fsGroupStrat, + SupplementalGroupStrategy: supGroupStrat, + CapabilitiesStrategy: capStrat, + } + + return strategies, nil +} + +// createUserStrategy creates a new user strategy. +func createUserStrategy(opts *extensions.RunAsUserStrategyOptions) (user.RunAsUserStrategy, error) { + switch opts.Rule { + case extensions.RunAsUserStrategyMustRunAs: + return user.NewMustRunAs(opts) + case extensions.RunAsUserStrategyMustRunAsNonRoot: + return user.NewRunAsNonRoot(opts) + case extensions.RunAsUserStrategyRunAsAny: + return user.NewRunAsAny(opts) + default: + return nil, fmt.Errorf("Unrecognized RunAsUser strategy type %s", opts.Rule) + } +} + +// createSELinuxStrategy creates a new selinux strategy. +func createSELinuxStrategy(opts *extensions.SELinuxStrategyOptions) (selinux.SELinuxStrategy, error) { + switch opts.Rule { + case extensions.SELinuxStrategyMustRunAs: + return selinux.NewMustRunAs(opts) + case extensions.SELinuxStrategyRunAsAny: + return selinux.NewRunAsAny(opts) + default: + return nil, fmt.Errorf("Unrecognized SELinuxContext strategy type %s", opts.Rule) + } +} + +// createFSGroupStrategy creates a new fsgroup strategy +func createFSGroupStrategy(opts *extensions.FSGroupStrategyOptions) (group.GroupStrategy, error) { + switch opts.Rule { + case extensions.FSGroupStrategyRunAsAny: + return group.NewRunAsAny() + case extensions.FSGroupStrategyMustRunAs: + return group.NewMustRunAs(opts.Ranges, fsGroupField) + default: + return nil, fmt.Errorf("Unrecognized FSGroup strategy type %s", opts.Rule) + } +} + +// createSupplementalGroupStrategy creates a new supplemental group strategy +func createSupplementalGroupStrategy(opts *extensions.SupplementalGroupsStrategyOptions) (group.GroupStrategy, error) { + switch opts.Rule { + case extensions.SupplementalGroupsStrategyRunAsAny: + return group.NewRunAsAny() + case extensions.SupplementalGroupsStrategyMustRunAs: + return group.NewMustRunAs(opts.Ranges, supplementalGroupsField) + default: + return nil, fmt.Errorf("Unrecognized SupplementalGroups strategy type %s", opts.Rule) + } +} + +// createCapabilitiesStrategy creates a new capabilities strategy. +func createCapabilitiesStrategy(defaultAddCaps, requiredDropCaps, allowedCaps []api.Capability) (capabilities.CapabilitiesStrategy, error) { + return capabilities.NewDefaultCapabilities(defaultAddCaps, requiredDropCaps, allowedCaps) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/mustrunas.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/mustrunas.go new file mode 100644 index 000000000000..bcb2edade9f3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/mustrunas.go @@ -0,0 +1,93 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package group + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// mustRunAs implements the GroupStrategy interface +type mustRunAs struct { + ranges []extensions.IDRange + field string +} + +var _ GroupStrategy = &mustRunAs{} + +// NewMustRunAs provides a new MustRunAs strategy based on ranges. +func NewMustRunAs(ranges []extensions.IDRange, field string) (GroupStrategy, error) { + if len(ranges) == 0 { + return nil, fmt.Errorf("ranges must be supplied for MustRunAs") + } + return &mustRunAs{ + ranges: ranges, + field: field, + }, nil +} + +// Generate creates the group based on policy rules. By default this returns the first group of the +// first range (min val). +func (s *mustRunAs) Generate(pod *api.Pod) ([]int64, error) { + return []int64{s.ranges[0].Min}, nil +} + +// Generate a single value to be applied. This is used for FSGroup. This strategy will return +// the first group of the first range (min val). +func (s *mustRunAs) GenerateSingle(pod *api.Pod) (*int64, error) { + single := new(int64) + *single = s.ranges[0].Min + return single, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +// Groups are passed in here to allow this strategy to support multiple group fields (fsgroup and +// supplemental groups). +func (s *mustRunAs) Validate(pod *api.Pod, groups []int64) field.ErrorList { + allErrs := field.ErrorList{} + + if pod.Spec.SecurityContext == nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("securityContext"), pod.Spec.SecurityContext, "unable to validate nil security context")) + return allErrs + } + + if len(groups) == 0 && len(s.ranges) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath(s.field), groups, "unable to validate empty groups against required ranges")) + } + + for _, group := range groups { + if !s.isGroupValid(group) { + detail := fmt.Sprintf("%d is not an allowed group", group) + allErrs = append(allErrs, field.Invalid(field.NewPath(s.field), groups, detail)) + } + } + + return allErrs +} + +func (s *mustRunAs) isGroupValid(group int64) bool { + for _, rng := range s.ranges { + if psputil.FallsInRange(group, rng) { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/mustrunas_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/mustrunas_test.go new file mode 100644 index 000000000000..31c2c21098b8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/mustrunas_test.go @@ -0,0 +1,193 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package group + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func TestMustRunAsOptions(t *testing.T) { + tests := map[string]struct { + ranges []extensions.IDRange + pass bool + }{ + "empty": { + ranges: []extensions.IDRange{}, + }, + "ranges": { + ranges: []extensions.IDRange{ + {Min: 1, Max: 1}, + }, + pass: true, + }, + } + + for k, v := range tests { + _, err := NewMustRunAs(v.ranges, "") + if v.pass && err != nil { + t.Errorf("error creating strategy for %s: %v", k, err) + } + if !v.pass && err == nil { + t.Errorf("expected error for %s but got none", k) + } + } +} + +func TestGenerate(t *testing.T) { + tests := map[string]struct { + ranges []extensions.IDRange + expected []int64 + }{ + "multi value": { + ranges: []extensions.IDRange{ + {Min: 1, Max: 2}, + }, + expected: []int64{1}, + }, + "single value": { + ranges: []extensions.IDRange{ + {Min: 1, Max: 1}, + }, + expected: []int64{1}, + }, + "multi range": { + ranges: []extensions.IDRange{ + {Min: 1, Max: 1}, + {Min: 2, Max: 500}, + }, + expected: []int64{1}, + }, + } + + for k, v := range tests { + s, err := NewMustRunAs(v.ranges, "") + if err != nil { + t.Errorf("error creating strategy for %s: %v", k, err) + } + actual, err := s.Generate(nil) + if err != nil { + t.Errorf("unexpected error for %s: %v", k, err) + } + if len(actual) != len(v.expected) { + t.Errorf("unexpected generated values. Expected %v, got %v", v.expected, actual) + continue + } + if len(actual) > 0 && len(v.expected) > 0 { + if actual[0] != v.expected[0] { + t.Errorf("unexpected generated values. Expected %v, got %v", v.expected, actual) + } + } + + single, err := s.GenerateSingle(nil) + if err != nil { + t.Errorf("unexpected error for %s: %v", k, err) + } + if single == nil { + t.Errorf("unexpected nil generated value for %s: %v", k, single) + } + if *single != v.expected[0] { + t.Errorf("unexpected generated single value. Expected %v, got %v", v.expected, actual) + } + } +} + +func TestValidate(t *testing.T) { + validPod := func() *api.Pod { + return &api.Pod{ + Spec: api.PodSpec{ + SecurityContext: &api.PodSecurityContext{}, + }, + } + } + + tests := map[string]struct { + ranges []extensions.IDRange + pod *api.Pod + groups []int64 + pass bool + }{ + "nil security context": { + pod: &api.Pod{}, + ranges: []extensions.IDRange{ + {Min: 1, Max: 3}, + }, + }, + "empty groups": { + pod: validPod(), + ranges: []extensions.IDRange{ + {Min: 1, Max: 3}, + }, + }, + "not in range": { + pod: validPod(), + groups: []int64{5}, + ranges: []extensions.IDRange{ + {Min: 1, Max: 3}, + {Min: 4, Max: 4}, + }, + }, + "in range 1": { + pod: validPod(), + groups: []int64{2}, + ranges: []extensions.IDRange{ + {Min: 1, Max: 3}, + }, + pass: true, + }, + "in range boundry min": { + pod: validPod(), + groups: []int64{1}, + ranges: []extensions.IDRange{ + {Min: 1, Max: 3}, + }, + pass: true, + }, + "in range boundry max": { + pod: validPod(), + groups: []int64{3}, + ranges: []extensions.IDRange{ + {Min: 1, Max: 3}, + }, + pass: true, + }, + "singular range": { + pod: validPod(), + groups: []int64{4}, + ranges: []extensions.IDRange{ + {Min: 4, Max: 4}, + }, + pass: true, + }, + } + + for k, v := range tests { + s, err := NewMustRunAs(v.ranges, "") + if err != nil { + t.Errorf("error creating strategy for %s: %v", k, err) + } + errs := s.Validate(v.pod, v.groups) + if v.pass && len(errs) > 0 { + t.Errorf("unexpected errors for %s: %v", k, errs) + } + if !v.pass && len(errs) == 0 { + t.Errorf("expected no errors for %s but got: %v", k, errs) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/runasany.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/runasany.go new file mode 100644 index 000000000000..2398a2ab5387 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/runasany.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package group + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// mustRunAs implements the GroupStrategy interface +type runAsAny struct { +} + +var _ GroupStrategy = &runAsAny{} + +// NewRunAsAny provides a new RunAsAny strategy. +func NewRunAsAny() (GroupStrategy, error) { + return &runAsAny{}, nil +} + +// Generate creates the group based on policy rules. This strategy returns an empty slice. +func (s *runAsAny) Generate(pod *api.Pod) ([]int64, error) { + return []int64{}, nil +} + +// Generate a single value to be applied. This is used for FSGroup. This strategy returns nil. +func (s *runAsAny) GenerateSingle(pod *api.Pod) (*int64, error) { + return nil, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *runAsAny) Validate(pod *api.Pod, groups []int64) field.ErrorList { + return field.ErrorList{} + +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/runasany_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/runasany_test.go new file mode 100644 index 000000000000..be8b239b5786 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/runasany_test.go @@ -0,0 +1,60 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package group + +import ( + "testing" +) + +func TestRunAsAnyGenerate(t *testing.T) { + s, err := NewRunAsAny() + if err != nil { + t.Fatalf("unexpected error initializing NewRunAsAny %v", err) + } + groups, err := s.Generate(nil) + if len(groups) > 0 { + t.Errorf("expected empty but got %v", groups) + } + if err != nil { + t.Errorf("unexpected error generating groups: %v", err) + } +} + +func TestRunAsAnyGenerateSingle(t *testing.T) { + s, err := NewRunAsAny() + if err != nil { + t.Fatalf("unexpected error initializing NewRunAsAny %v", err) + } + group, err := s.GenerateSingle(nil) + if group != nil { + t.Errorf("expected empty but got %v", group) + } + if err != nil { + t.Errorf("unexpected error generating groups: %v", err) + } +} + +func TestRunAsAnyValidte(t *testing.T) { + s, err := NewRunAsAny() + if err != nil { + t.Fatalf("unexpected error initializing NewRunAsAny %v", err) + } + errs := s.Validate(nil, nil) + if len(errs) != 0 { + t.Errorf("unexpected errors: %v", errs) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/types.go new file mode 100644 index 000000000000..be19fe9b9dc8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/types.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package group + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// GroupStrategy defines the interface for all group constraint strategies. +type GroupStrategy interface { + // Generate creates the group based on policy rules. The underlying implementation can + // decide whether it will return a full range of values or a subset of values from the + // configured ranges. + Generate(pod *api.Pod) ([]int64, error) + // Generate a single value to be applied. The underlying implementation decides which + // value to return if configured with multiple ranges. This is used for FSGroup. + GenerateSingle(pod *api.Pod) (*int64, error) + // Validate ensures that the specified values fall within the range of the strategy. + Validate(pod *api.Pod, groups []int64) field.ErrorList +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/provider.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/provider.go new file mode 100644 index 000000000000..bf606a3384bc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/provider.go @@ -0,0 +1,297 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podsecuritypolicy + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// used to pass in the field being validated for reusable group strategies so they +// can create informative error messages. +const ( + fsGroupField = "fsGroup" + supplementalGroupsField = "supplementalGroups" +) + +// simpleProvider is the default implementation of Provider. +type simpleProvider struct { + psp *extensions.PodSecurityPolicy + strategies *ProviderStrategies +} + +// ensure we implement the interface correctly. +var _ Provider = &simpleProvider{} + +// NewSimpleProvider creates a new Provider instance. +func NewSimpleProvider(psp *extensions.PodSecurityPolicy, namespace string, strategyFactory StrategyFactory) (Provider, error) { + if psp == nil { + return nil, fmt.Errorf("NewSimpleProvider requires a PodSecurityPolicy") + } + if strategyFactory == nil { + return nil, fmt.Errorf("NewSimpleProvider requires a StrategyFactory") + } + + strategies, err := strategyFactory.CreateStrategies(psp, namespace) + if err != nil { + return nil, err + } + + return &simpleProvider{ + psp: psp, + strategies: strategies, + }, nil +} + +// Create a PodSecurityContext based on the given constraints. If a setting is already set +// on the PodSecurityContext it will not be changed. Validate should be used after the context +// is created to ensure it complies with the required restrictions. +// +// NOTE: this method works on a copy of the PodSecurityContext. It is up to the caller to +// apply the PSC if validation passes. +func (s *simpleProvider) CreatePodSecurityContext(pod *api.Pod) (*api.PodSecurityContext, error) { + var sc *api.PodSecurityContext = nil + if pod.Spec.SecurityContext != nil { + // work with a copy + copy := *pod.Spec.SecurityContext + sc = © + } else { + sc = &api.PodSecurityContext{} + } + + if len(sc.SupplementalGroups) == 0 { + supGroups, err := s.strategies.SupplementalGroupStrategy.Generate(pod) + if err != nil { + return nil, err + } + sc.SupplementalGroups = supGroups + } + + if sc.FSGroup == nil { + fsGroup, err := s.strategies.FSGroupStrategy.GenerateSingle(pod) + if err != nil { + return nil, err + } + sc.FSGroup = fsGroup + } + + if sc.SELinuxOptions == nil { + seLinux, err := s.strategies.SELinuxStrategy.Generate(pod, nil) + if err != nil { + return nil, err + } + sc.SELinuxOptions = seLinux + } + + return sc, nil +} + +// Create a SecurityContext based on the given constraints. If a setting is already set on the +// container's security context then it will not be changed. Validation should be used after +// the context is created to ensure it complies with the required restrictions. +// +// NOTE: this method works on a copy of the SC of the container. It is up to the caller to apply +// the SC if validation passes. +func (s *simpleProvider) CreateContainerSecurityContext(pod *api.Pod, container *api.Container) (*api.SecurityContext, error) { + var sc *api.SecurityContext = nil + if container.SecurityContext != nil { + // work with a copy of the original + copy := *container.SecurityContext + sc = © + } else { + sc = &api.SecurityContext{} + } + if sc.RunAsUser == nil { + uid, err := s.strategies.RunAsUserStrategy.Generate(pod, container) + if err != nil { + return nil, err + } + sc.RunAsUser = uid + } + + if sc.SELinuxOptions == nil { + seLinux, err := s.strategies.SELinuxStrategy.Generate(pod, container) + if err != nil { + return nil, err + } + sc.SELinuxOptions = seLinux + } + + if sc.Privileged == nil { + priv := false + sc.Privileged = &priv + } + + // if we're using the non-root strategy set the marker that this container should not be + // run as root which will signal to the kubelet to do a final check either on the runAsUser + // or, if runAsUser is not set, the image UID will be checked. + if s.psp.Spec.RunAsUser.Rule == extensions.RunAsUserStrategyMustRunAsNonRoot { + nonRoot := true + sc.RunAsNonRoot = &nonRoot + } + + caps, err := s.strategies.CapabilitiesStrategy.Generate(pod, container) + if err != nil { + return nil, err + } + sc.Capabilities = caps + + // if the PSP requires a read only root filesystem and the container has not made a specific + // request then default ReadOnlyRootFilesystem to true. + if s.psp.Spec.ReadOnlyRootFilesystem && sc.ReadOnlyRootFilesystem == nil { + readOnlyRootFS := true + sc.ReadOnlyRootFilesystem = &readOnlyRootFS + } + + return sc, nil +} + +// Ensure a pod's SecurityContext is in compliance with the given constraints. +func (s *simpleProvider) ValidatePodSecurityContext(pod *api.Pod, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if pod.Spec.SecurityContext == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("securityContext"), pod.Spec.SecurityContext, "No security context is set")) + return allErrs + } + + fsGroups := []int64{} + if pod.Spec.SecurityContext.FSGroup != nil { + fsGroups = append(fsGroups, *pod.Spec.SecurityContext.FSGroup) + } + allErrs = append(allErrs, s.strategies.FSGroupStrategy.Validate(pod, fsGroups)...) + allErrs = append(allErrs, s.strategies.SupplementalGroupStrategy.Validate(pod, pod.Spec.SecurityContext.SupplementalGroups)...) + + // make a dummy container context to reuse the selinux strategies + container := &api.Container{ + Name: pod.Name, + SecurityContext: &api.SecurityContext{ + SELinuxOptions: pod.Spec.SecurityContext.SELinuxOptions, + }, + } + allErrs = append(allErrs, s.strategies.SELinuxStrategy.Validate(pod, container)...) + + if !s.psp.Spec.HostNetwork && pod.Spec.SecurityContext.HostNetwork { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostNetwork"), pod.Spec.SecurityContext.HostNetwork, "Host network is not allowed to be used")) + } + + if !s.psp.Spec.HostPID && pod.Spec.SecurityContext.HostPID { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPID"), pod.Spec.SecurityContext.HostPID, "Host PID is not allowed to be used")) + } + + if !s.psp.Spec.HostIPC && pod.Spec.SecurityContext.HostIPC { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostIPC"), pod.Spec.SecurityContext.HostIPC, "Host IPC is not allowed to be used")) + } + + return allErrs +} + +// Ensure a container's SecurityContext is in compliance with the given constraints +func (s *simpleProvider) ValidateContainerSecurityContext(pod *api.Pod, container *api.Container, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if container.SecurityContext == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("securityContext"), container.SecurityContext, "No security context is set")) + return allErrs + } + + sc := container.SecurityContext + allErrs = append(allErrs, s.strategies.RunAsUserStrategy.Validate(pod, container)...) + allErrs = append(allErrs, s.strategies.SELinuxStrategy.Validate(pod, container)...) + + if !s.psp.Spec.Privileged && *sc.Privileged { + allErrs = append(allErrs, field.Invalid(fldPath.Child("privileged"), *sc.Privileged, "Privileged containers are not allowed")) + } + + allErrs = append(allErrs, s.strategies.CapabilitiesStrategy.Validate(pod, container)...) + + if len(pod.Spec.Volumes) > 0 && !psputil.PSPAllowsAllVolumes(s.psp) { + allowedVolumes := psputil.FSTypeToStringSet(s.psp.Spec.Volumes) + for i, v := range pod.Spec.Volumes { + fsType, err := psputil.GetVolumeFSType(v) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("volumes").Index(i), string(fsType), err.Error())) + continue + } + + if !allowedVolumes.Has(string(fsType)) { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("volumes").Index(i), string(fsType), + fmt.Sprintf("%s volumes are not allowed to be used", string(fsType)))) + } + } + } + + if !s.psp.Spec.HostNetwork && pod.Spec.SecurityContext.HostNetwork { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostNetwork"), pod.Spec.SecurityContext.HostNetwork, "Host network is not allowed to be used")) + } + + containersPath := fldPath.Child("containers") + for idx, c := range pod.Spec.Containers { + idxPath := containersPath.Index(idx) + allErrs = append(allErrs, s.hasInvalidHostPort(&c, idxPath)...) + } + + if !s.psp.Spec.HostPID && pod.Spec.SecurityContext.HostPID { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPID"), pod.Spec.SecurityContext.HostPID, "Host PID is not allowed to be used")) + } + + if !s.psp.Spec.HostIPC && pod.Spec.SecurityContext.HostIPC { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostIPC"), pod.Spec.SecurityContext.HostIPC, "Host IPC is not allowed to be used")) + } + + if s.psp.Spec.ReadOnlyRootFilesystem { + if sc.ReadOnlyRootFilesystem == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("readOnlyRootFilesystem"), sc.ReadOnlyRootFilesystem, "ReadOnlyRootFilesystem may not be nil and must be set to true")) + } else if !*sc.ReadOnlyRootFilesystem { + allErrs = append(allErrs, field.Invalid(fldPath.Child("readOnlyRootFilesystem"), *sc.ReadOnlyRootFilesystem, "ReadOnlyRootFilesystem must be set to true")) + } + } + + return allErrs +} + +// hasHostPort checks the port definitions on the container for HostPort > 0. +func (s *simpleProvider) hasInvalidHostPort(container *api.Container, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, cp := range container.Ports { + if cp.HostPort > 0 && !s.isValidHostPort(int(cp.HostPort)) { + detail := fmt.Sprintf("Host port %d is not allowed to be used. Allowed ports: %v", cp.HostPort, s.psp.Spec.HostPorts) + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPort"), cp.HostPort, detail)) + } + } + return allErrs +} + +// isValidHostPort returns true if the port falls in any range allowed by the PSP. +func (s *simpleProvider) isValidHostPort(port int) bool { + for _, hostPortRange := range s.psp.Spec.HostPorts { + if port >= hostPortRange.Min && port <= hostPortRange.Max { + return true + } + } + return false +} + +// Get the name of the PSP that this provider was initialized with. +func (s *simpleProvider) GetPSPName() string { + return s.psp.Name +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/provider_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/provider_test.go new file mode 100644 index 000000000000..70c89b7e54d0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/provider_test.go @@ -0,0 +1,822 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podsecuritypolicy + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" + "k8s.io/kubernetes/pkg/util/diff" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func TestCreatePodSecurityContextNonmutating(t *testing.T) { + // Create a pod with a security context that needs filling in + createPod := func() *api.Pod { + return &api.Pod{ + Spec: api.PodSpec{ + SecurityContext: &api.PodSecurityContext{}, + }, + } + } + + // Create a PSP with strategies that will populate a blank psc + createPSP := func() *extensions.PodSecurityPolicy { + return &extensions.PodSecurityPolicy{ + ObjectMeta: api.ObjectMeta{ + Name: "psp-sa", + }, + Spec: extensions.PodSecurityPolicySpec{ + DefaultAddCapabilities: []api.Capability{"foo"}, + RequiredDropCapabilities: []api.Capability{"bar"}, + RunAsUser: extensions.RunAsUserStrategyOptions{ + Rule: extensions.RunAsUserStrategyRunAsAny, + }, + SELinux: extensions.SELinuxStrategyOptions{ + Rule: extensions.SELinuxStrategyRunAsAny, + }, + // these are pod mutating strategies that are tested above + FSGroup: extensions.FSGroupStrategyOptions{ + Rule: extensions.FSGroupStrategyMustRunAs, + Ranges: []extensions.IDRange{ + {Min: 1, Max: 1}, + }, + }, + SupplementalGroups: extensions.SupplementalGroupsStrategyOptions{ + Rule: extensions.SupplementalGroupsStrategyMustRunAs, + Ranges: []extensions.IDRange{ + {Min: 1, Max: 1}, + }, + }, + }, + } + } + + pod := createPod() + psp := createPSP() + + provider, err := NewSimpleProvider(psp, "namespace", NewSimpleStrategyFactory()) + if err != nil { + t.Fatalf("unable to create provider %v", err) + } + sc, err := provider.CreatePodSecurityContext(pod) + if err != nil { + t.Fatalf("unable to create psc %v", err) + } + + // The generated security context should have filled in missing options, so they should differ + if reflect.DeepEqual(sc, &pod.Spec.SecurityContext) { + t.Error("expected created security context to be different than container's, but they were identical") + } + + // Creating the provider or the security context should not have mutated the psp or pod + if !reflect.DeepEqual(createPod(), pod) { + diffs := diff.ObjectDiff(createPod(), pod) + t.Errorf("pod was mutated by CreatePodSecurityContext. diff:\n%s", diffs) + } + if !reflect.DeepEqual(createPSP(), psp) { + t.Error("psp was mutated by CreatePodSecurityContext") + } +} + +func TestCreateContainerSecurityContextNonmutating(t *testing.T) { + // Create a pod with a security context that needs filling in + createPod := func() *api.Pod { + return &api.Pod{ + Spec: api.PodSpec{ + Containers: []api.Container{{ + SecurityContext: &api.SecurityContext{}, + }}, + }, + } + } + + // Create a PSP with strategies that will populate a blank security context + createPSP := func() *extensions.PodSecurityPolicy { + var uid int64 = 1 + return &extensions.PodSecurityPolicy{ + ObjectMeta: api.ObjectMeta{ + Name: "psp-sa", + }, + Spec: extensions.PodSecurityPolicySpec{ + DefaultAddCapabilities: []api.Capability{"foo"}, + RequiredDropCapabilities: []api.Capability{"bar"}, + RunAsUser: extensions.RunAsUserStrategyOptions{ + Rule: extensions.RunAsUserStrategyMustRunAs, + Ranges: []extensions.IDRange{{Min: uid, Max: uid}}, + }, + SELinux: extensions.SELinuxStrategyOptions{ + Rule: extensions.SELinuxStrategyMustRunAs, + SELinuxOptions: &api.SELinuxOptions{User: "you"}, + }, + // these are pod mutating strategies that are tested above + FSGroup: extensions.FSGroupStrategyOptions{ + Rule: extensions.FSGroupStrategyRunAsAny, + }, + SupplementalGroups: extensions.SupplementalGroupsStrategyOptions{ + Rule: extensions.SupplementalGroupsStrategyRunAsAny, + }, + // mutates the container SC by defaulting to true if container sets nil + ReadOnlyRootFilesystem: true, + }, + } + } + + pod := createPod() + psp := createPSP() + + provider, err := NewSimpleProvider(psp, "namespace", NewSimpleStrategyFactory()) + if err != nil { + t.Fatalf("unable to create provider %v", err) + } + sc, err := provider.CreateContainerSecurityContext(pod, &pod.Spec.Containers[0]) + if err != nil { + t.Fatalf("unable to create container security context %v", err) + } + + // The generated security context should have filled in missing options, so they should differ + if reflect.DeepEqual(sc, &pod.Spec.Containers[0].SecurityContext) { + t.Error("expected created security context to be different than container's, but they were identical") + } + + // Creating the provider or the security context should not have mutated the psp or pod + if !reflect.DeepEqual(createPod(), pod) { + diffs := diff.ObjectDiff(createPod(), pod) + t.Errorf("pod was mutated by CreateContainerSecurityContext. diff:\n%s", diffs) + } + if !reflect.DeepEqual(createPSP(), psp) { + t.Error("psp was mutated by CreateContainerSecurityContext") + } +} + +func TestValidatePodSecurityContextFailures(t *testing.T) { + failHostNetworkPod := defaultPod() + failHostNetworkPod.Spec.SecurityContext.HostNetwork = true + + failHostPIDPod := defaultPod() + failHostPIDPod.Spec.SecurityContext.HostPID = true + + failHostIPCPod := defaultPod() + failHostIPCPod.Spec.SecurityContext.HostIPC = true + + failSupplementalGroupPod := defaultPod() + failSupplementalGroupPod.Spec.SecurityContext.SupplementalGroups = []int64{999} + failSupplementalGroupPSP := defaultPSP() + failSupplementalGroupPSP.Spec.SupplementalGroups = extensions.SupplementalGroupsStrategyOptions{ + Rule: extensions.SupplementalGroupsStrategyMustRunAs, + Ranges: []extensions.IDRange{ + {Min: 1, Max: 1}, + }, + } + + failFSGroupPod := defaultPod() + fsGroup := int64(999) + failFSGroupPod.Spec.SecurityContext.FSGroup = &fsGroup + failFSGroupPSP := defaultPSP() + failFSGroupPSP.Spec.FSGroup = extensions.FSGroupStrategyOptions{ + Rule: extensions.FSGroupStrategyMustRunAs, + Ranges: []extensions.IDRange{ + {Min: 1, Max: 1}, + }, + } + + failNilSELinuxPod := defaultPod() + failSELinuxPSP := defaultPSP() + failSELinuxPSP.Spec.SELinux.Rule = extensions.SELinuxStrategyMustRunAs + failSELinuxPSP.Spec.SELinux.SELinuxOptions = &api.SELinuxOptions{ + Level: "foo", + } + + failInvalidSELinuxPod := defaultPod() + failInvalidSELinuxPod.Spec.SecurityContext.SELinuxOptions = &api.SELinuxOptions{ + Level: "bar", + } + + errorCases := map[string]struct { + pod *api.Pod + psp *extensions.PodSecurityPolicy + expectedError string + }{ + "failHostNetwork": { + pod: failHostNetworkPod, + psp: defaultPSP(), + expectedError: "Host network is not allowed to be used", + }, + "failHostPID": { + pod: failHostPIDPod, + psp: defaultPSP(), + expectedError: "Host PID is not allowed to be used", + }, + "failHostIPC": { + pod: failHostIPCPod, + psp: defaultPSP(), + expectedError: "Host IPC is not allowed to be used", + }, + "failSupplementalGroupOutOfRange": { + pod: failSupplementalGroupPod, + psp: failSupplementalGroupPSP, + expectedError: "999 is not an allowed group", + }, + "failSupplementalGroupEmpty": { + pod: defaultPod(), + psp: failSupplementalGroupPSP, + expectedError: "unable to validate empty groups against required ranges", + }, + "failFSGroupOutOfRange": { + pod: failFSGroupPod, + psp: failFSGroupPSP, + expectedError: "999 is not an allowed group", + }, + "failFSGroupEmpty": { + pod: defaultPod(), + psp: failFSGroupPSP, + expectedError: "unable to validate empty groups against required ranges", + }, + "failNilSELinux": { + pod: failNilSELinuxPod, + psp: failSELinuxPSP, + expectedError: "unable to validate nil seLinuxOptions", + }, + "failInvalidSELinux": { + pod: failInvalidSELinuxPod, + psp: failSELinuxPSP, + expectedError: "does not match required level. Found bar, wanted foo", + }, + } + for k, v := range errorCases { + provider, err := NewSimpleProvider(v.psp, "namespace", NewSimpleStrategyFactory()) + if err != nil { + t.Fatalf("unable to create provider %v", err) + } + errs := provider.ValidatePodSecurityContext(v.pod, field.NewPath("")) + if len(errs) == 0 { + t.Errorf("%s expected validation failure but did not receive errors", k) + continue + } + if !strings.Contains(errs[0].Error(), v.expectedError) { + t.Errorf("%s received unexpected error %v", k, errs) + } + } +} + +func TestValidateContainerSecurityContextFailures(t *testing.T) { + // fail user strat + failUserPSP := defaultPSP() + var uid int64 = 999 + var badUID int64 = 1 + failUserPSP.Spec.RunAsUser = extensions.RunAsUserStrategyOptions{ + Rule: extensions.RunAsUserStrategyMustRunAs, + Ranges: []extensions.IDRange{{Min: uid, Max: uid}}, + } + failUserPod := defaultPod() + failUserPod.Spec.Containers[0].SecurityContext.RunAsUser = &badUID + + // fail selinux strat + failSELinuxPSP := defaultPSP() + failSELinuxPSP.Spec.SELinux = extensions.SELinuxStrategyOptions{ + Rule: extensions.SELinuxStrategyMustRunAs, + SELinuxOptions: &api.SELinuxOptions{ + Level: "foo", + }, + } + failSELinuxPod := defaultPod() + failSELinuxPod.Spec.Containers[0].SecurityContext.SELinuxOptions = &api.SELinuxOptions{ + Level: "bar", + } + + failPrivPod := defaultPod() + var priv bool = true + failPrivPod.Spec.Containers[0].SecurityContext.Privileged = &priv + + failCapsPod := defaultPod() + failCapsPod.Spec.Containers[0].SecurityContext.Capabilities = &api.Capabilities{ + Add: []api.Capability{"foo"}, + } + + failHostDirPod := defaultPod() + failHostDirPod.Spec.Volumes = []api.Volume{ + { + Name: "bad volume", + VolumeSource: api.VolumeSource{ + HostPath: &api.HostPathVolumeSource{}, + }, + }, + } + + failHostPortPod := defaultPod() + failHostPortPod.Spec.Containers[0].Ports = []api.ContainerPort{{HostPort: 1}} + + readOnlyRootFSPSP := defaultPSP() + readOnlyRootFSPSP.Spec.ReadOnlyRootFilesystem = true + + readOnlyRootFSPodFalse := defaultPod() + readOnlyRootFS := false + readOnlyRootFSPodFalse.Spec.Containers[0].SecurityContext.ReadOnlyRootFilesystem = &readOnlyRootFS + + errorCases := map[string]struct { + pod *api.Pod + psp *extensions.PodSecurityPolicy + expectedError string + }{ + "failUserPSP": { + pod: failUserPod, + psp: failUserPSP, + expectedError: "does not match required range", + }, + "failSELinuxPSP": { + pod: failSELinuxPod, + psp: failSELinuxPSP, + expectedError: "does not match required level", + }, + "failPrivPSP": { + pod: failPrivPod, + psp: defaultPSP(), + expectedError: "Privileged containers are not allowed", + }, + "failCapsPSP": { + pod: failCapsPod, + psp: defaultPSP(), + expectedError: "capability may not be added", + }, + "failHostDirPSP": { + pod: failHostDirPod, + psp: defaultPSP(), + expectedError: "hostPath volumes are not allowed to be used", + }, + "failHostPortPSP": { + pod: failHostPortPod, + psp: defaultPSP(), + expectedError: "Host port 1 is not allowed to be used. Allowed ports: []", + }, + "failReadOnlyRootFS - nil": { + pod: defaultPod(), + psp: readOnlyRootFSPSP, + expectedError: "ReadOnlyRootFilesystem may not be nil and must be set to true", + }, + "failReadOnlyRootFS - false": { + pod: readOnlyRootFSPodFalse, + psp: readOnlyRootFSPSP, + expectedError: "ReadOnlyRootFilesystem must be set to true", + }, + } + + for k, v := range errorCases { + provider, err := NewSimpleProvider(v.psp, "namespace", NewSimpleStrategyFactory()) + if err != nil { + t.Fatalf("unable to create provider %v", err) + } + errs := provider.ValidateContainerSecurityContext(v.pod, &v.pod.Spec.Containers[0], field.NewPath("")) + if len(errs) == 0 { + t.Errorf("%s expected validation failure but did not receive errors", k) + continue + } + if !strings.Contains(errs[0].Error(), v.expectedError) { + t.Errorf("%s received unexpected error %v", k, errs) + } + } +} + +func TestValidatePodSecurityContextSuccess(t *testing.T) { + hostNetworkPSP := defaultPSP() + hostNetworkPSP.Spec.HostNetwork = true + hostNetworkPod := defaultPod() + hostNetworkPod.Spec.SecurityContext.HostNetwork = true + + hostPIDPSP := defaultPSP() + hostPIDPSP.Spec.HostPID = true + hostPIDPod := defaultPod() + hostPIDPod.Spec.SecurityContext.HostPID = true + + hostIPCPSP := defaultPSP() + hostIPCPSP.Spec.HostIPC = true + hostIPCPod := defaultPod() + hostIPCPod.Spec.SecurityContext.HostIPC = true + + supGroupPSP := defaultPSP() + supGroupPSP.Spec.SupplementalGroups = extensions.SupplementalGroupsStrategyOptions{ + Rule: extensions.SupplementalGroupsStrategyMustRunAs, + Ranges: []extensions.IDRange{ + {Min: 1, Max: 5}, + }, + } + supGroupPod := defaultPod() + supGroupPod.Spec.SecurityContext.SupplementalGroups = []int64{3} + + fsGroupPSP := defaultPSP() + fsGroupPSP.Spec.FSGroup = extensions.FSGroupStrategyOptions{ + Rule: extensions.FSGroupStrategyMustRunAs, + Ranges: []extensions.IDRange{ + {Min: 1, Max: 5}, + }, + } + fsGroupPod := defaultPod() + fsGroup := int64(3) + fsGroupPod.Spec.SecurityContext.FSGroup = &fsGroup + + seLinuxPod := defaultPod() + seLinuxPod.Spec.SecurityContext.SELinuxOptions = &api.SELinuxOptions{ + User: "user", + Role: "role", + Type: "type", + Level: "level", + } + seLinuxPSP := defaultPSP() + seLinuxPSP.Spec.SELinux.Rule = extensions.SELinuxStrategyMustRunAs + seLinuxPSP.Spec.SELinux.SELinuxOptions = &api.SELinuxOptions{ + User: "user", + Role: "role", + Type: "type", + Level: "level", + } + + errorCases := map[string]struct { + pod *api.Pod + psp *extensions.PodSecurityPolicy + }{ + "pass hostNetwork validating PSP": { + pod: hostNetworkPod, + psp: hostNetworkPSP, + }, + "pass hostPID validating PSP": { + pod: hostPIDPod, + psp: hostPIDPSP, + }, + "pass hostIPC validating PSP": { + pod: hostIPCPod, + psp: hostIPCPSP, + }, + "pass supplemental group validating PSP": { + pod: supGroupPod, + psp: supGroupPSP, + }, + "pass fs group validating PSP": { + pod: fsGroupPod, + psp: fsGroupPSP, + }, + "pass selinux validating PSP": { + pod: seLinuxPod, + psp: seLinuxPSP, + }, + } + + for k, v := range errorCases { + provider, err := NewSimpleProvider(v.psp, "namespace", NewSimpleStrategyFactory()) + if err != nil { + t.Fatalf("unable to create provider %v", err) + } + errs := provider.ValidatePodSecurityContext(v.pod, field.NewPath("")) + if len(errs) != 0 { + t.Errorf("%s expected validation pass but received errors %v", k, errs) + continue + } + } +} + +func TestValidateContainerSecurityContextSuccess(t *testing.T) { + var notPriv bool = false + defaultPod := func() *api.Pod { + return &api.Pod{ + Spec: api.PodSpec{ + SecurityContext: &api.PodSecurityContext{}, + Containers: []api.Container{ + { + SecurityContext: &api.SecurityContext{ + // expected to be set by defaulting mechanisms + Privileged: ¬Priv, + // fill in the rest for test cases + }, + }, + }, + }, + } + } + + // fail user strat + userPSP := defaultPSP() + var uid int64 = 999 + userPSP.Spec.RunAsUser = extensions.RunAsUserStrategyOptions{ + Rule: extensions.RunAsUserStrategyMustRunAs, + Ranges: []extensions.IDRange{{Min: uid, Max: uid}}, + } + userPod := defaultPod() + userPod.Spec.Containers[0].SecurityContext.RunAsUser = &uid + + // fail selinux strat + seLinuxPSP := defaultPSP() + seLinuxPSP.Spec.SELinux = extensions.SELinuxStrategyOptions{ + Rule: extensions.SELinuxStrategyMustRunAs, + SELinuxOptions: &api.SELinuxOptions{ + Level: "foo", + }, + } + seLinuxPod := defaultPod() + seLinuxPod.Spec.Containers[0].SecurityContext.SELinuxOptions = &api.SELinuxOptions{ + Level: "foo", + } + + privPSP := defaultPSP() + privPSP.Spec.Privileged = true + privPod := defaultPod() + var priv bool = true + privPod.Spec.Containers[0].SecurityContext.Privileged = &priv + + capsPSP := defaultPSP() + capsPSP.Spec.AllowedCapabilities = []api.Capability{"foo"} + capsPod := defaultPod() + capsPod.Spec.Containers[0].SecurityContext.Capabilities = &api.Capabilities{ + Add: []api.Capability{"foo"}, + } + + // pod should be able to request caps that are in the required set even if not specified in the allowed set + requiredCapsPSP := defaultPSP() + requiredCapsPSP.Spec.DefaultAddCapabilities = []api.Capability{"foo"} + requiredCapsPod := defaultPod() + requiredCapsPod.Spec.Containers[0].SecurityContext.Capabilities = &api.Capabilities{ + Add: []api.Capability{"foo"}, + } + + hostDirPSP := defaultPSP() + hostDirPSP.Spec.Volumes = []extensions.FSType{extensions.HostPath} + hostDirPod := defaultPod() + hostDirPod.Spec.Volumes = []api.Volume{ + { + Name: "bad volume", + VolumeSource: api.VolumeSource{ + HostPath: &api.HostPathVolumeSource{}, + }, + }, + } + + hostPortPSP := defaultPSP() + hostPortPSP.Spec.HostPorts = []extensions.HostPortRange{{Min: 1, Max: 1}} + hostPortPod := defaultPod() + hostPortPod.Spec.Containers[0].Ports = []api.ContainerPort{{HostPort: 1}} + + readOnlyRootFSPodFalse := defaultPod() + readOnlyRootFSFalse := false + readOnlyRootFSPodFalse.Spec.Containers[0].SecurityContext.ReadOnlyRootFilesystem = &readOnlyRootFSFalse + + readOnlyRootFSPodTrue := defaultPod() + readOnlyRootFSTrue := true + readOnlyRootFSPodTrue.Spec.Containers[0].SecurityContext.ReadOnlyRootFilesystem = &readOnlyRootFSTrue + + errorCases := map[string]struct { + pod *api.Pod + psp *extensions.PodSecurityPolicy + }{ + "pass user must run as PSP": { + pod: userPod, + psp: userPSP, + }, + "pass seLinux must run as PSP": { + pod: seLinuxPod, + psp: seLinuxPSP, + }, + "pass priv validating PSP": { + pod: privPod, + psp: privPSP, + }, + "pass allowed caps validating PSP": { + pod: capsPod, + psp: capsPSP, + }, + "pass required caps validating PSP": { + pod: requiredCapsPod, + psp: requiredCapsPSP, + }, + "pass hostDir validating PSP": { + pod: hostDirPod, + psp: hostDirPSP, + }, + "pass hostPort validating PSP": { + pod: hostPortPod, + psp: hostPortPSP, + }, + "pass read only root fs - nil": { + pod: defaultPod(), + psp: defaultPSP(), + }, + "pass read only root fs - false": { + pod: readOnlyRootFSPodFalse, + psp: defaultPSP(), + }, + "pass read only root fs - true": { + pod: readOnlyRootFSPodTrue, + psp: defaultPSP(), + }, + } + + for k, v := range errorCases { + provider, err := NewSimpleProvider(v.psp, "namespace", NewSimpleStrategyFactory()) + if err != nil { + t.Fatalf("unable to create provider %v", err) + } + errs := provider.ValidateContainerSecurityContext(v.pod, &v.pod.Spec.Containers[0], field.NewPath("")) + if len(errs) != 0 { + t.Errorf("%s expected validation pass but received errors %v", k, errs) + continue + } + } +} + +func TestGenerateContainerSecurityContextReadOnlyRootFS(t *testing.T) { + truePSP := defaultPSP() + truePSP.Spec.ReadOnlyRootFilesystem = true + + trueVal := true + expectTrue := &trueVal + falseVal := false + expectFalse := &falseVal + + falsePod := defaultPod() + falsePod.Spec.Containers[0].SecurityContext.ReadOnlyRootFilesystem = expectFalse + + truePod := defaultPod() + truePod.Spec.Containers[0].SecurityContext.ReadOnlyRootFilesystem = expectTrue + + tests := map[string]struct { + pod *api.Pod + psp *extensions.PodSecurityPolicy + expected *bool + }{ + "false psp, nil sc": { + psp: defaultPSP(), + pod: defaultPod(), + expected: nil, + }, + "false psp, false sc": { + psp: defaultPSP(), + pod: falsePod, + expected: expectFalse, + }, + "false psp, true sc": { + psp: defaultPSP(), + pod: truePod, + expected: expectTrue, + }, + "true psp, nil sc": { + psp: truePSP, + pod: defaultPod(), + expected: expectTrue, + }, + "true psp, false sc": { + psp: truePSP, + pod: falsePod, + // expect false even though it defaults to true to ensure it doesn't change set values + // validation catches the mismatch, not generation + expected: expectFalse, + }, + "true psp, true sc": { + psp: truePSP, + pod: truePod, + expected: expectTrue, + }, + } + + for k, v := range tests { + provider, err := NewSimpleProvider(v.psp, "namespace", NewSimpleStrategyFactory()) + if err != nil { + t.Errorf("%s unable to create provider %v", k, err) + continue + } + sc, err := provider.CreateContainerSecurityContext(v.pod, &v.pod.Spec.Containers[0]) + if err != nil { + t.Errorf("%s unable to create container security context %v", k, err) + continue + } + + if v.expected == nil && sc.ReadOnlyRootFilesystem != nil { + t.Errorf("%s expected a nil ReadOnlyRootFilesystem but got %t", k, *sc.ReadOnlyRootFilesystem) + } + if v.expected != nil && sc.ReadOnlyRootFilesystem == nil { + t.Errorf("%s expected a non nil ReadOnlyRootFilesystem but recieved nil", k) + } + if v.expected != nil && sc.ReadOnlyRootFilesystem != nil && (*v.expected != *sc.ReadOnlyRootFilesystem) { + t.Errorf("%s expected a non nil ReadOnlyRootFilesystem set to %t but got %t", k, *v.expected, *sc.ReadOnlyRootFilesystem) + } + + } +} + +func defaultPSP() *extensions.PodSecurityPolicy { + return &extensions.PodSecurityPolicy{ + ObjectMeta: api.ObjectMeta{ + Name: "psp-sa", + }, + Spec: extensions.PodSecurityPolicySpec{ + RunAsUser: extensions.RunAsUserStrategyOptions{ + Rule: extensions.RunAsUserStrategyRunAsAny, + }, + SELinux: extensions.SELinuxStrategyOptions{ + Rule: extensions.SELinuxStrategyRunAsAny, + }, + FSGroup: extensions.FSGroupStrategyOptions{ + Rule: extensions.FSGroupStrategyRunAsAny, + }, + SupplementalGroups: extensions.SupplementalGroupsStrategyOptions{ + Rule: extensions.SupplementalGroupsStrategyRunAsAny, + }, + }, + } +} + +func defaultPod() *api.Pod { + var notPriv bool = false + return &api.Pod{ + Spec: api.PodSpec{ + SecurityContext: &api.PodSecurityContext{ + // fill in for test cases + }, + Containers: []api.Container{ + { + SecurityContext: &api.SecurityContext{ + // expected to be set by defaulting mechanisms + Privileged: ¬Priv, + // fill in the rest for test cases + }, + }, + }, + }, + } +} + +// TestValidateAllowedVolumes will test that for every field of VolumeSource we can create +// a pod with that type of volume and deny it, accept it explicitly, or accept it with +// the FSTypeAll wildcard. +func TestValidateAllowedVolumes(t *testing.T) { + val := reflect.ValueOf(api.VolumeSource{}) + + for i := 0; i < val.NumField(); i++ { + // reflectively create the volume source + fieldVal := val.Type().Field(i) + + volumeSource := api.VolumeSource{} + volumeSourceVolume := reflect.New(fieldVal.Type.Elem()) + + reflect.ValueOf(&volumeSource).Elem().FieldByName(fieldVal.Name).Set(volumeSourceVolume) + volume := api.Volume{VolumeSource: volumeSource} + + // sanity check before moving on + fsType, err := psputil.GetVolumeFSType(volume) + if err != nil { + t.Errorf("error getting FSType for %s: %s", fieldVal.Name, err.Error()) + continue + } + + // add the volume to the pod + pod := defaultPod() + pod.Spec.Volumes = []api.Volume{volume} + + // create a PSP that allows no volumes + psp := defaultPSP() + + provider, err := NewSimpleProvider(psp, "namespace", NewSimpleStrategyFactory()) + if err != nil { + t.Errorf("error creating provider for %s: %s", fieldVal.Name, err.Error()) + continue + } + + // expect a denial for this PSP and test the error message to ensure it's related to the volumesource + errs := provider.ValidateContainerSecurityContext(pod, &pod.Spec.Containers[0], field.NewPath("")) + if len(errs) != 1 { + t.Errorf("expected exactly 1 error for %s but got %v", fieldVal.Name, errs) + } else { + if !strings.Contains(errs.ToAggregate().Error(), fmt.Sprintf("%s volumes are not allowed to be used", fsType)) { + t.Errorf("did not find the expected error, received: %v", errs) + } + } + + // now add the fstype directly to the psp and it should validate + psp.Spec.Volumes = []extensions.FSType{fsType} + errs = provider.ValidateContainerSecurityContext(pod, &pod.Spec.Containers[0], field.NewPath("")) + if len(errs) != 0 { + t.Errorf("directly allowing volume expected no errors for %s but got %v", fieldVal.Name, errs) + } + + // now change the psp to allow any volumes and the pod should still validate + psp.Spec.Volumes = []extensions.FSType{extensions.All} + errs = provider.ValidateContainerSecurityContext(pod, &pod.Spec.Containers[0], field.NewPath("")) + if len(errs) != 0 { + t.Errorf("wildcard volume expected no errors for %s but got %v", fieldVal.Name, errs) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/mustrunas.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/mustrunas.go new file mode 100644 index 000000000000..4b59211bb4d2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/mustrunas.go @@ -0,0 +1,84 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selinux + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +type mustRunAs struct { + opts *extensions.SELinuxStrategyOptions +} + +var _ SELinuxStrategy = &mustRunAs{} + +func NewMustRunAs(options *extensions.SELinuxStrategyOptions) (SELinuxStrategy, error) { + if options == nil { + return nil, fmt.Errorf("MustRunAs requires SELinuxContextStrategyOptions") + } + if options.SELinuxOptions == nil { + return nil, fmt.Errorf("MustRunAs requires SELinuxOptions") + } + return &mustRunAs{ + opts: options, + }, nil +} + +// Generate creates the SELinuxOptions based on constraint rules. +func (s *mustRunAs) Generate(pod *api.Pod, container *api.Container) (*api.SELinuxOptions, error) { + return s.opts.SELinuxOptions, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *mustRunAs) Validate(pod *api.Pod, container *api.Container) field.ErrorList { + allErrs := field.ErrorList{} + + if container.SecurityContext == nil { + detail := fmt.Sprintf("unable to validate nil security context for %s", container.Name) + allErrs = append(allErrs, field.Invalid(field.NewPath("securityContext"), container.SecurityContext, detail)) + return allErrs + } + if container.SecurityContext.SELinuxOptions == nil { + detail := fmt.Sprintf("unable to validate nil seLinuxOptions for %s", container.Name) + allErrs = append(allErrs, field.Invalid(field.NewPath("seLinuxOptions"), container.SecurityContext.SELinuxOptions, detail)) + return allErrs + } + seLinuxOptionsPath := field.NewPath("seLinuxOptions") + seLinux := container.SecurityContext.SELinuxOptions + if seLinux.Level != s.opts.SELinuxOptions.Level { + detail := fmt.Sprintf("seLinuxOptions.level on %s does not match required level. Found %s, wanted %s", container.Name, seLinux.Level, s.opts.SELinuxOptions.Level) + allErrs = append(allErrs, field.Invalid(seLinuxOptionsPath.Child("level"), seLinux.Level, detail)) + } + if seLinux.Role != s.opts.SELinuxOptions.Role { + detail := fmt.Sprintf("seLinuxOptions.role on %s does not match required role. Found %s, wanted %s", container.Name, seLinux.Role, s.opts.SELinuxOptions.Role) + allErrs = append(allErrs, field.Invalid(seLinuxOptionsPath.Child("role"), seLinux.Role, detail)) + } + if seLinux.Type != s.opts.SELinuxOptions.Type { + detail := fmt.Sprintf("seLinuxOptions.type on %s does not match required type. Found %s, wanted %s", container.Name, seLinux.Type, s.opts.SELinuxOptions.Type) + allErrs = append(allErrs, field.Invalid(seLinuxOptionsPath.Child("type"), seLinux.Type, detail)) + } + if seLinux.User != s.opts.SELinuxOptions.User { + detail := fmt.Sprintf("seLinuxOptions.user on %s does not match required user. Found %s, wanted %s", container.Name, seLinux.User, s.opts.SELinuxOptions.User) + allErrs = append(allErrs, field.Invalid(seLinuxOptionsPath.Child("user"), seLinux.User, detail)) + } + + return allErrs +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/mustrunas_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/mustrunas_test.go new file mode 100644 index 000000000000..153c3e5072c5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/mustrunas_test.go @@ -0,0 +1,159 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selinux + +import ( + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func TestMustRunAsOptions(t *testing.T) { + tests := map[string]struct { + opts *extensions.SELinuxStrategyOptions + pass bool + }{ + "invalid opts": { + opts: &extensions.SELinuxStrategyOptions{}, + pass: false, + }, + "valid opts": { + opts: &extensions.SELinuxStrategyOptions{SELinuxOptions: &api.SELinuxOptions{}}, + pass: true, + }, + } + for name, tc := range tests { + _, err := NewMustRunAs(tc.opts) + if err != nil && tc.pass { + t.Errorf("%s expected to pass but received error %#v", name, err) + } + if err == nil && !tc.pass { + t.Errorf("%s expected to fail but did not receive an error", name) + } + } +} + +func TestMustRunAsGenerate(t *testing.T) { + opts := &extensions.SELinuxStrategyOptions{ + SELinuxOptions: &api.SELinuxOptions{ + User: "user", + Role: "role", + Type: "type", + Level: "level", + }, + } + mustRunAs, err := NewMustRunAs(opts) + if err != nil { + t.Fatalf("unexpected error initializing NewMustRunAs %v", err) + } + generated, err := mustRunAs.Generate(nil, nil) + if err != nil { + t.Fatalf("unexpected error generating selinux %v", err) + } + if !reflect.DeepEqual(generated, opts.SELinuxOptions) { + t.Errorf("generated selinux does not equal configured selinux") + } +} + +func TestMustRunAsValidate(t *testing.T) { + newValidOpts := func() *api.SELinuxOptions { + return &api.SELinuxOptions{ + User: "user", + Role: "role", + Level: "level", + Type: "type", + } + } + + role := newValidOpts() + role.Role = "invalid" + + user := newValidOpts() + user.User = "invalid" + + level := newValidOpts() + level.Level = "invalid" + + seType := newValidOpts() + seType.Type = "invalid" + + tests := map[string]struct { + seLinux *api.SELinuxOptions + expectedMsg string + }{ + "invalid role": { + seLinux: role, + expectedMsg: "does not match required role", + }, + "invalid user": { + seLinux: user, + expectedMsg: "does not match required user", + }, + "invalid level": { + seLinux: level, + expectedMsg: "does not match required level", + }, + "invalid type": { + seLinux: seType, + expectedMsg: "does not match required type", + }, + "valid": { + seLinux: newValidOpts(), + expectedMsg: "", + }, + } + + opts := &extensions.SELinuxStrategyOptions{ + SELinuxOptions: newValidOpts(), + } + + for name, tc := range tests { + mustRunAs, err := NewMustRunAs(opts) + if err != nil { + t.Errorf("unexpected error initializing NewMustRunAs for testcase %s: %#v", name, err) + continue + } + container := &api.Container{ + SecurityContext: &api.SecurityContext{ + SELinuxOptions: tc.seLinux, + }, + } + + errs := mustRunAs.Validate(nil, container) + //should've passed but didn't + if len(tc.expectedMsg) == 0 && len(errs) > 0 { + t.Errorf("%s expected no errors but received %v", name, errs) + } + //should've failed but didn't + if len(tc.expectedMsg) != 0 && len(errs) == 0 { + t.Errorf("%s expected error %s but received no errors", name, tc.expectedMsg) + } + //failed with additional messages + if len(tc.expectedMsg) != 0 && len(errs) > 1 { + t.Errorf("%s expected error %s but received multiple errors: %v", name, tc.expectedMsg, errs) + } + //check that we got the right message + if len(tc.expectedMsg) != 0 && len(errs) == 1 { + if !strings.Contains(errs[0].Error(), tc.expectedMsg) { + t.Errorf("%s expected error to contain %s but it did not: %v", name, tc.expectedMsg, errs) + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/runasany.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/runasany.go new file mode 100644 index 000000000000..1418fc331c28 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/runasany.go @@ -0,0 +1,43 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selinux + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// runAsAny implements the SELinuxStrategy interface. +type runAsAny struct{} + +var _ SELinuxStrategy = &runAsAny{} + +// NewRunAsAny provides a strategy that will return the configured se linux context or nil. +func NewRunAsAny(options *extensions.SELinuxStrategyOptions) (SELinuxStrategy, error) { + return &runAsAny{}, nil +} + +// Generate creates the SELinuxOptions based on constraint rules. +func (s *runAsAny) Generate(pod *api.Pod, container *api.Container) (*api.SELinuxOptions, error) { + return nil, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *runAsAny) Validate(pod *api.Pod, container *api.Container) field.ErrorList { + return field.ErrorList{} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/runasany_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/runasany_test.go new file mode 100644 index 000000000000..4f5db4e68fd5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/runasany_test.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selinux + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func TestRunAsAnyOptions(t *testing.T) { + _, err := NewRunAsAny(nil) + if err != nil { + t.Fatalf("unexpected error initializing NewRunAsAny %v", err) + } + _, err = NewRunAsAny(&extensions.SELinuxStrategyOptions{}) + if err != nil { + t.Errorf("unexpected error initializing NewRunAsAny %v", err) + } +} + +func TestRunAsAnyGenerate(t *testing.T) { + s, err := NewRunAsAny(&extensions.SELinuxStrategyOptions{}) + if err != nil { + t.Fatalf("unexpected error initializing NewRunAsAny %v", err) + } + uid, err := s.Generate(nil, nil) + if uid != nil { + t.Errorf("expected nil uid but got %v", *uid) + } + if err != nil { + t.Errorf("unexpected error generating uid %v", err) + } +} + +func TestRunAsAnyValidate(t *testing.T) { + s, err := NewRunAsAny(&extensions.SELinuxStrategyOptions{ + SELinuxOptions: &api.SELinuxOptions{ + Level: "foo", + }, + }, + ) + if err != nil { + t.Fatalf("unexpected error initializing NewRunAsAny %v", err) + } + errs := s.Validate(nil, nil) + if len(errs) != 0 { + t.Errorf("unexpected errors validating with ") + } + s, err = NewRunAsAny(&extensions.SELinuxStrategyOptions{}) + if err != nil { + t.Fatalf("unexpected error initializing NewRunAsAny %v", err) + } + errs = s.Validate(nil, nil) + if len(errs) != 0 { + t.Errorf("unexpected errors validating %v", errs) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/types.go new file mode 100644 index 000000000000..25613d62a139 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/types.go @@ -0,0 +1,30 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selinux + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// SELinuxStrategy defines the interface for all SELinux constraint strategies. +type SELinuxStrategy interface { + // Generate creates the SELinuxOptions based on constraint rules. + Generate(pod *api.Pod, container *api.Container) (*api.SELinuxOptions, error) + // Validate ensures that the specified values fall within the range of the strategy. + Validate(pod *api.Pod, container *api.Container) field.ErrorList +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/types.go new file mode 100644 index 000000000000..64535ee4af74 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/types.go @@ -0,0 +1,62 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podsecuritypolicy + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities" + "k8s.io/kubernetes/pkg/security/podsecuritypolicy/group" + "k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux" + "k8s.io/kubernetes/pkg/security/podsecuritypolicy/user" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// Provider provides the implementation to generate a new security +// context based on constraints or validate an existing security context against constraints. +type Provider interface { + // Create a PodSecurityContext based on the given constraints. + CreatePodSecurityContext(pod *api.Pod) (*api.PodSecurityContext, error) + // Create a container SecurityContext based on the given constraints + CreateContainerSecurityContext(pod *api.Pod, container *api.Container) (*api.SecurityContext, error) + // Ensure a pod's SecurityContext is in compliance with the given constraints. + ValidatePodSecurityContext(pod *api.Pod, fldPath *field.Path) field.ErrorList + // Ensure a container's SecurityContext is in compliance with the given constraints + ValidateContainerSecurityContext(pod *api.Pod, container *api.Container, fldPath *field.Path) field.ErrorList + // Get the name of the PSP that this provider was initialized with. + GetPSPName() string +} + +// StrategyFactory abstracts how the strategies are created from the provider so that you may +// implement your own custom strategies that may pull information from other resources as necessary. +// For example, if you would like to populate the strategies with values from namespace annotations +// you may create a factory with a client that can pull the namespace and populate the appropriate +// values. +type StrategyFactory interface { + // CreateStrategies creates the strategies that a provider will use. The namespace argument + // should be the namespace of the object being checked (the pod's namespace). + CreateStrategies(psp *extensions.PodSecurityPolicy, namespace string) (*ProviderStrategies, error) +} + +// ProviderStrategies is a holder for all strategies that the provider requires to be populated. +type ProviderStrategies struct { + RunAsUserStrategy user.RunAsUserStrategy + SELinuxStrategy selinux.SELinuxStrategy + FSGroupStrategy group.GroupStrategy + SupplementalGroupStrategy group.GroupStrategy + CapabilitiesStrategy capabilities.CapabilitiesStrategy +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/mustrunas.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/mustrunas.go new file mode 100644 index 000000000000..f48c803c99ed --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/mustrunas.go @@ -0,0 +1,84 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// mustRunAs implements the RunAsUserStrategy interface +type mustRunAs struct { + opts *extensions.RunAsUserStrategyOptions +} + +// NewMustRunAs provides a strategy that requires the container to run as a specific UID in a range. +func NewMustRunAs(options *extensions.RunAsUserStrategyOptions) (RunAsUserStrategy, error) { + if options == nil { + return nil, fmt.Errorf("MustRunAsRange requires run as user options") + } + if len(options.Ranges) == 0 { + return nil, fmt.Errorf("MustRunAsRange requires at least one range") + } + return &mustRunAs{ + opts: options, + }, nil +} + +// Generate creates the uid based on policy rules. MustRunAs returns the first range's Min. +func (s *mustRunAs) Generate(pod *api.Pod, container *api.Container) (*int64, error) { + return &s.opts.Ranges[0].Min, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *mustRunAs) Validate(pod *api.Pod, container *api.Container) field.ErrorList { + allErrs := field.ErrorList{} + + securityContextPath := field.NewPath("securityContext") + if container.SecurityContext == nil { + detail := fmt.Sprintf("unable to validate nil security context for container %s", container.Name) + allErrs = append(allErrs, field.Invalid(securityContextPath, container.SecurityContext, detail)) + return allErrs + } + if container.SecurityContext.RunAsUser == nil { + detail := fmt.Sprintf("unable to validate nil RunAsUser for container %s", container.Name) + allErrs = append(allErrs, field.Invalid(securityContextPath.Child("runAsUser"), container.SecurityContext.RunAsUser, detail)) + return allErrs + } + + if !s.isValidUID(*container.SecurityContext.RunAsUser) { + detail := fmt.Sprintf("UID on container %s does not match required range. Found %d, allowed: %v", + container.Name, + *container.SecurityContext.RunAsUser, + s.opts.Ranges) + allErrs = append(allErrs, field.Invalid(securityContextPath.Child("runAsUser"), *container.SecurityContext.RunAsUser, detail)) + } + return allErrs +} + +func (s *mustRunAs) isValidUID(id int64) bool { + for _, rng := range s.opts.Ranges { + if psputil.FallsInRange(id, rng) { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/mustrunas_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/mustrunas_test.go new file mode 100644 index 000000000000..1a8c7eb02627 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/mustrunas_test.go @@ -0,0 +1,152 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user + +import ( + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func TestNewMustRunAs(t *testing.T) { + tests := map[string]struct { + opts *extensions.RunAsUserStrategyOptions + pass bool + }{ + "nil opts": { + opts: nil, + pass: false, + }, + "invalid opts": { + opts: &extensions.RunAsUserStrategyOptions{}, + pass: false, + }, + "valid opts": { + opts: &extensions.RunAsUserStrategyOptions{ + Ranges: []extensions.IDRange{ + {Min: 1, Max: 1}, + }, + }, + pass: true, + }, + } + for name, tc := range tests { + _, err := NewMustRunAs(tc.opts) + if err != nil && tc.pass { + t.Errorf("%s expected to pass but received error %#v", name, err) + } + if err == nil && !tc.pass { + t.Errorf("%s expected to fail but did not receive an error", name) + } + } +} + +func TestGenerate(t *testing.T) { + opts := &extensions.RunAsUserStrategyOptions{ + Ranges: []extensions.IDRange{ + {Min: 1, Max: 1}, + }, + } + mustRunAs, err := NewMustRunAs(opts) + if err != nil { + t.Fatalf("unexpected error initializing NewMustRunAs %v", err) + } + generated, err := mustRunAs.Generate(nil, nil) + if err != nil { + t.Fatalf("unexpected error generating runAsUser %v", err) + } + if *generated != opts.Ranges[0].Min { + t.Errorf("generated runAsUser does not equal configured runAsUser") + } +} + +func TestValidate(t *testing.T) { + opts := &extensions.RunAsUserStrategyOptions{ + Ranges: []extensions.IDRange{ + {Min: 1, Max: 1}, + {Min: 10, Max: 20}, + }, + } + + tests := map[string]struct { + container *api.Container + expectedMsg string + }{ + "good container": { + container: &api.Container{ + SecurityContext: &api.SecurityContext{ + RunAsUser: int64Ptr(15), + }, + }, + }, + "nil security context": { + container: &api.Container{ + SecurityContext: nil, + }, + expectedMsg: "unable to validate nil security context for container", + }, + "nil run as user": { + container: &api.Container{ + SecurityContext: &api.SecurityContext{ + RunAsUser: nil, + }, + }, + expectedMsg: "unable to validate nil RunAsUser for container", + }, + "invalid id": { + container: &api.Container{ + SecurityContext: &api.SecurityContext{ + RunAsUser: int64Ptr(21), + }, + }, + expectedMsg: "does not match required range", + }, + } + + for name, tc := range tests { + mustRunAs, err := NewMustRunAs(opts) + if err != nil { + t.Errorf("unexpected error initializing NewMustRunAs for testcase %s: %#v", name, err) + continue + } + errs := mustRunAs.Validate(nil, tc.container) + //should've passed but didn't + if len(tc.expectedMsg) == 0 && len(errs) > 0 { + t.Errorf("%s expected no errors but received %v", name, errs) + } + //should've failed but didn't + if len(tc.expectedMsg) != 0 && len(errs) == 0 { + t.Errorf("%s expected error %s but received no errors", name, tc.expectedMsg) + } + //failed with additional messages + if len(tc.expectedMsg) != 0 && len(errs) > 1 { + t.Errorf("%s expected error %s but received multiple errors: %v", name, tc.expectedMsg, errs) + } + //check that we got the right message + if len(tc.expectedMsg) != 0 && len(errs) == 1 { + if !strings.Contains(errs[0].Error(), tc.expectedMsg) { + t.Errorf("%s expected error to contain %s but it did not: %v", name, tc.expectedMsg, errs) + } + } + } +} + +func int64Ptr(i int64) *int64 { + return &i +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/nonroot.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/nonroot.go new file mode 100644 index 000000000000..fc7c356a0b3f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/nonroot.go @@ -0,0 +1,59 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +type nonRoot struct{} + +var _ RunAsUserStrategy = &nonRoot{} + +func NewRunAsNonRoot(options *extensions.RunAsUserStrategyOptions) (RunAsUserStrategy, error) { + return &nonRoot{}, nil +} + +// Generate creates the uid based on policy rules. This strategy does return a UID. It assumes +// that the user will specify a UID or the container image specifies a UID. +func (s *nonRoot) Generate(pod *api.Pod, container *api.Container) (*int64, error) { + return nil, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. Validation +// of this will pass if either the UID is not set, assuming that the image will provided the UID +// or if the UID is set it is not root. In order to work properly this assumes that the kubelet +// performs a final check on runAsUser or the image UID when runAsUser is nil. +func (s *nonRoot) Validate(pod *api.Pod, container *api.Container) field.ErrorList { + allErrs := field.ErrorList{} + securityContextPath := field.NewPath("securityContext") + if container.SecurityContext == nil { + detail := fmt.Sprintf("unable to validate nil security context for container %s", container.Name) + allErrs = append(allErrs, field.Invalid(securityContextPath, container.SecurityContext, detail)) + return allErrs + } + if container.SecurityContext.RunAsUser != nil && *container.SecurityContext.RunAsUser == 0 { + detail := fmt.Sprintf("running with the root UID is forbidden by the pod security policy %s", container.Name) + allErrs = append(allErrs, field.Invalid(securityContextPath.Child("runAsUser"), *container.SecurityContext.RunAsUser, detail)) + return allErrs + } + return allErrs +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/nonroot_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/nonroot_test.go new file mode 100644 index 000000000000..73e2b1abe7c6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/nonroot_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func TestNonRootOptions(t *testing.T) { + _, err := NewRunAsNonRoot(nil) + if err != nil { + t.Fatalf("unexpected error initializing NewRunAsNonRoot %v", err) + } + _, err = NewRunAsNonRoot(&extensions.RunAsUserStrategyOptions{}) + if err != nil { + t.Errorf("unexpected error initializing NewRunAsNonRoot %v", err) + } +} + +func TestNonRootGenerate(t *testing.T) { + s, err := NewRunAsNonRoot(&extensions.RunAsUserStrategyOptions{}) + if err != nil { + t.Fatalf("unexpected error initializing NewRunAsNonRoot %v", err) + } + uid, err := s.Generate(nil, nil) + if uid != nil { + t.Errorf("expected nil uid but got %d", *uid) + } + if err != nil { + t.Errorf("unexpected error generating uid %v", err) + } +} + +func TestNonRootValidate(t *testing.T) { + var uid int64 = 1 + var badUID int64 = 0 + s, err := NewRunAsNonRoot(&extensions.RunAsUserStrategyOptions{}) + if err != nil { + t.Fatalf("unexpected error initializing NewMustRunAs %v", err) + } + container := &api.Container{ + SecurityContext: &api.SecurityContext{ + RunAsUser: &badUID, + }, + } + + errs := s.Validate(nil, container) + if len(errs) == 0 { + t.Errorf("expected errors from root uid but got none") + } + + container.SecurityContext.RunAsUser = &uid + errs = s.Validate(nil, container) + if len(errs) != 0 { + t.Errorf("expected no errors from non-root uid but got %v", errs) + } + + container.SecurityContext.RunAsUser = nil + errs = s.Validate(nil, container) + if len(errs) != 0 { + t.Errorf("expected no errors from nil uid but got %v", errs) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/runasany.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/runasany.go new file mode 100644 index 000000000000..6fbf1e032153 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/runasany.go @@ -0,0 +1,43 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// runAsAny implements the interface RunAsUserStrategy. +type runAsAny struct{} + +var _ RunAsUserStrategy = &runAsAny{} + +// NewRunAsAny provides a strategy that will return nil. +func NewRunAsAny(options *extensions.RunAsUserStrategyOptions) (RunAsUserStrategy, error) { + return &runAsAny{}, nil +} + +// Generate creates the uid based on policy rules. +func (s *runAsAny) Generate(pod *api.Pod, container *api.Container) (*int64, error) { + return nil, nil +} + +// Validate ensures that the specified values fall within the range of the strategy. +func (s *runAsAny) Validate(pod *api.Pod, container *api.Container) field.ErrorList { + return field.ErrorList{} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/runasany_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/runasany_test.go new file mode 100644 index 000000000000..8da79fffc4e3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/runasany_test.go @@ -0,0 +1,59 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user + +import ( + "testing" + + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func TestRunAsAnyOptions(t *testing.T) { + _, err := NewRunAsAny(nil) + if err != nil { + t.Fatalf("unexpected error initializing NewRunAsAny %v", err) + } + _, err = NewRunAsAny(&extensions.RunAsUserStrategyOptions{}) + if err != nil { + t.Errorf("unexpected error initializing NewRunAsAny %v", err) + } +} + +func TestRunAsAnyGenerate(t *testing.T) { + s, err := NewRunAsAny(&extensions.RunAsUserStrategyOptions{}) + if err != nil { + t.Fatalf("unexpected error initializing NewRunAsAny %v", err) + } + uid, err := s.Generate(nil, nil) + if uid != nil { + t.Errorf("expected nil uid but got %d", *uid) + } + if err != nil { + t.Errorf("unexpected error generating uid %v", err) + } +} + +func TestRunAsAnyValidate(t *testing.T) { + s, err := NewRunAsAny(&extensions.RunAsUserStrategyOptions{}) + if err != nil { + t.Fatalf("unexpected error initializing NewRunAsAny %v", err) + } + errs := s.Validate(nil, nil) + if len(errs) != 0 { + t.Errorf("unexpected errors validating with ") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/types.go new file mode 100644 index 000000000000..ee691c7becfe --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/types.go @@ -0,0 +1,30 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// RunAsUserStrategy defines the interface for all uid constraint strategies. +type RunAsUserStrategy interface { + // Generate creates the uid based on policy rules. + Generate(pod *api.Pod, container *api.Container) (*int64, error) + // Validate ensures that the specified values fall within the range of the strategy. + Validate(pod *api.Pod, container *api.Container) field.ErrorList +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go new file mode 100644 index 000000000000..097b1a6c21d9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go @@ -0,0 +1,145 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/util/sets" +) + +const ( + ValidatedPSPAnnotation = "kubernetes.io/psp" +) + +func GetAllFSTypesExcept(exceptions ...string) sets.String { + fstypes := GetAllFSTypesAsSet() + for _, e := range exceptions { + fstypes.Delete(e) + } + return fstypes +} + +func GetAllFSTypesAsSet() sets.String { + fstypes := sets.NewString() + fstypes.Insert( + string(extensions.HostPath), + string(extensions.AzureFile), + string(extensions.Flocker), + string(extensions.FlexVolume), + string(extensions.EmptyDir), + string(extensions.GCEPersistentDisk), + string(extensions.AWSElasticBlockStore), + string(extensions.GitRepo), + string(extensions.Secret), + string(extensions.NFS), + string(extensions.ISCSI), + string(extensions.Glusterfs), + string(extensions.PersistentVolumeClaim), + string(extensions.RBD), + string(extensions.Cinder), + string(extensions.CephFS), + string(extensions.DownwardAPI), + string(extensions.FC), + string(extensions.ConfigMap), + string(extensions.VsphereVolume)) + return fstypes +} + +// getVolumeFSType gets the FSType for a volume. +func GetVolumeFSType(v api.Volume) (extensions.FSType, error) { + switch { + case v.HostPath != nil: + return extensions.HostPath, nil + case v.EmptyDir != nil: + return extensions.EmptyDir, nil + case v.GCEPersistentDisk != nil: + return extensions.GCEPersistentDisk, nil + case v.AWSElasticBlockStore != nil: + return extensions.AWSElasticBlockStore, nil + case v.GitRepo != nil: + return extensions.GitRepo, nil + case v.Secret != nil: + return extensions.Secret, nil + case v.NFS != nil: + return extensions.NFS, nil + case v.ISCSI != nil: + return extensions.ISCSI, nil + case v.Glusterfs != nil: + return extensions.Glusterfs, nil + case v.PersistentVolumeClaim != nil: + return extensions.PersistentVolumeClaim, nil + case v.RBD != nil: + return extensions.RBD, nil + case v.FlexVolume != nil: + return extensions.FlexVolume, nil + case v.Cinder != nil: + return extensions.Cinder, nil + case v.CephFS != nil: + return extensions.CephFS, nil + case v.Flocker != nil: + return extensions.Flocker, nil + case v.DownwardAPI != nil: + return extensions.DownwardAPI, nil + case v.FC != nil: + return extensions.FC, nil + case v.AzureFile != nil: + return extensions.AzureFile, nil + case v.ConfigMap != nil: + return extensions.ConfigMap, nil + case v.VsphereVolume != nil: + return extensions.VsphereVolume, nil + } + + return "", fmt.Errorf("unknown volume type for volume: %#v", v) +} + +// fsTypeToStringSet converts an FSType slice to a string set. +func FSTypeToStringSet(fsTypes []extensions.FSType) sets.String { + set := sets.NewString() + for _, v := range fsTypes { + set.Insert(string(v)) + } + return set +} + +// PSPAllowsAllVolumes checks for FSTypeAll in the psp's allowed volumes. +func PSPAllowsAllVolumes(psp *extensions.PodSecurityPolicy) bool { + return PSPAllowsFSType(psp, extensions.All) +} + +// PSPAllowsFSType is a utility for checking if a PSP allows a particular FSType. +// If all volumes are allowed then this will return true for any FSType passed. +func PSPAllowsFSType(psp *extensions.PodSecurityPolicy, fsType extensions.FSType) bool { + if psp == nil { + return false + } + + for _, v := range psp.Spec.Volumes { + if v == fsType || v == extensions.All { + return true + } + } + return false +} + +// FallsInRange is a utility to determine it the id falls in the valid range. +func FallsInRange(id int64, rng extensions.IDRange) bool { + return id >= rng.Min && id <= rng.Max +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util_test.go new file mode 100644 index 000000000000..5c32b7487f63 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util_test.go @@ -0,0 +1,105 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +// TestVolumeSourceFSTypeDrift ensures that for every known type of volume source (by the fields on +// a VolumeSource object that GetVolumeFSType is returning a good value. This ensures both that we're +// returning an FSType for the VolumeSource field (protect the GetVolumeFSType method) and that we +// haven't drifted (ensure new fields in VolumeSource are covered). +func TestVolumeSourceFSTypeDrift(t *testing.T) { + allFSTypes := GetAllFSTypesAsSet() + val := reflect.ValueOf(api.VolumeSource{}) + + for i := 0; i < val.NumField(); i++ { + fieldVal := val.Type().Field(i) + + volumeSource := api.VolumeSource{} + volumeSourceVolume := reflect.New(fieldVal.Type.Elem()) + + reflect.ValueOf(&volumeSource).Elem().FieldByName(fieldVal.Name).Set(volumeSourceVolume) + + fsType, err := GetVolumeFSType(api.Volume{VolumeSource: volumeSource}) + if err != nil { + t.Errorf("error getting fstype for field %s. This likely means that drift has occured between FSType and VolumeSource. Please update the api and getVolumeFSType", fieldVal.Name) + } + + if !allFSTypes.Has(string(fsType)) { + t.Errorf("%s was missing from GetFSTypesAsSet", fsType) + } + } +} + +func TestPSPAllowsFSType(t *testing.T) { + tests := map[string]struct { + psp *extensions.PodSecurityPolicy + fsType extensions.FSType + allows bool + }{ + "nil psp": { + psp: nil, + fsType: extensions.HostPath, + allows: false, + }, + "empty volumes": { + psp: &extensions.PodSecurityPolicy{}, + fsType: extensions.HostPath, + allows: false, + }, + "non-matching": { + psp: &extensions.PodSecurityPolicy{ + Spec: extensions.PodSecurityPolicySpec{ + Volumes: []extensions.FSType{extensions.AWSElasticBlockStore}, + }, + }, + fsType: extensions.HostPath, + allows: false, + }, + "match on FSTypeAll": { + psp: &extensions.PodSecurityPolicy{ + Spec: extensions.PodSecurityPolicySpec{ + Volumes: []extensions.FSType{extensions.All}, + }, + }, + fsType: extensions.HostPath, + allows: true, + }, + "match on direct match": { + psp: &extensions.PodSecurityPolicy{ + Spec: extensions.PodSecurityPolicySpec{ + Volumes: []extensions.FSType{extensions.HostPath}, + }, + }, + fsType: extensions.HostPath, + allows: true, + }, + } + + for k, v := range tests { + allows := PSPAllowsFSType(v.psp, v.fsType) + if v.allows != allows { + t.Errorf("%s expected PSPAllowsFSType to return %t but got %t", k, v.allows, allows) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/OWNERS b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/OWNERS new file mode 100644 index 000000000000..a57ded7f6587 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/OWNERS @@ -0,0 +1,6 @@ +assignees: + - lavalamp + - liggitt + - timothysc + - wojtek-t + - xiang90 diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/cacher.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/cacher.go new file mode 100644 index 000000000000..e7b4d63e2f05 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/cacher.go @@ -0,0 +1,629 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" + + "github.com/golang/glog" + "golang.org/x/net/context" +) + +// CacherConfig contains the configuration for a given Cache. +type CacherConfig struct { + // Maximum size of the history cached in memory. + CacheCapacity int + + // An underlying storage.Interface. + Storage Interface + + // An underlying storage.Versioner. + Versioner Versioner + + // The Cache will be caching objects of a given Type and assumes that they + // are all stored under ResourcePrefix directory in the underlying database. + Type interface{} + ResourcePrefix string + + // KeyFunc is used to get a key in the underyling storage for a given object. + KeyFunc func(runtime.Object) (string, error) + + // NewList is a function that creates new empty object storing a list of + // objects of type Type. + NewListFunc func() runtime.Object +} + +// Cacher is responsible for serving WATCH and LIST requests for a given +// resource from its internal cache and updating its cache in the background +// based on the underlying storage contents. +// Cacher implements storage.Interface (although most of the calls are just +// delegated to the underlying storage). +type Cacher struct { + sync.RWMutex + + // Each user-facing method that is not simply redirected to the underlying + // storage has to read-lock on this mutex before starting any processing. + // This is necessary to prevent users from accessing structures that are + // uninitialized or are being repopulated right now. + // NOTE: We cannot easily reuse the main mutex for it due to multi-threaded + // interactions of Cacher with the underlying WatchCache. Since Cacher is + // caling WatchCache directly and WatchCache is calling Cacher methods + // via its OnEvent and OnReplace hooks, we explicitly assume that if mutexes + // of both structures are held, the one from WatchCache is acquired first + // to avoid deadlocks. Unfortunately, forcing this rule in startCaching + // would be very difficult and introducing one more mutex seems to be much + // easier. + usable sync.RWMutex + + // Underlying storage.Interface. + storage Interface + + // "sliding window" of recent changes of objects and the current state. + watchCache *watchCache + reflector *cache.Reflector + + // Registered watchers. + watcherIdx int + watchers map[int]*cacheWatcher + + // Versioner is used to handle resource versions. + versioner Versioner + + // keyFunc is used to get a key in the underyling storage for a given object. + keyFunc func(runtime.Object) (string, error) + + // Handling graceful termination. + stopLock sync.RWMutex + stopped bool + stopCh chan struct{} + stopWg sync.WaitGroup +} + +// Create a new Cacher responsible from service WATCH and LIST requests from its +// internal cache and updating its cache in the background based on the given +// configuration. +func NewCacher( + storage Interface, + capacity int, + versioner Versioner, + objectType runtime.Object, + resourcePrefix string, + scopeStrategy rest.NamespaceScopedStrategy, + newListFunc func() runtime.Object) Interface { + config := CacherConfig{ + CacheCapacity: capacity, + Storage: storage, + Versioner: versioner, + Type: objectType, + ResourcePrefix: resourcePrefix, + NewListFunc: newListFunc, + } + if scopeStrategy.NamespaceScoped() { + config.KeyFunc = func(obj runtime.Object) (string, error) { + return NamespaceKeyFunc(resourcePrefix, obj) + } + } else { + config.KeyFunc = func(obj runtime.Object) (string, error) { + return NoNamespaceKeyFunc(resourcePrefix, obj) + } + } + return NewCacherFromConfig(config) +} + +// Create a new Cacher responsible from service WATCH and LIST requests from its +// internal cache and updating its cache in the background based on the given +// configuration. +func NewCacherFromConfig(config CacherConfig) *Cacher { + watchCache := newWatchCache(config.CacheCapacity) + listerWatcher := newCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) + + // Give this error when it is constructed rather than when you get the + // first watch item, because it's much easier to track down that way. + if obj, ok := config.Type.(runtime.Object); ok { + if err := runtime.CheckCodec(config.Storage.Codec(), obj); err != nil { + panic("storage codec doesn't seem to match given type: " + err.Error()) + } + } + + cacher := &Cacher{ + usable: sync.RWMutex{}, + storage: config.Storage, + watchCache: watchCache, + reflector: cache.NewReflector(listerWatcher, config.Type, watchCache, 0), + watcherIdx: 0, + watchers: make(map[int]*cacheWatcher), + versioner: config.Versioner, + keyFunc: config.KeyFunc, + stopped: false, + // We need to (potentially) stop both: + // - wait.Until go-routine + // - reflector.ListAndWatch + // and there are no guarantees on the order that they will stop. + // So we will be simply closing the channel, and synchronizing on the WaitGroup. + stopCh: make(chan struct{}), + stopWg: sync.WaitGroup{}, + } + // See startCaching method for explanation and where this is unlocked. + cacher.usable.Lock() + watchCache.SetOnEvent(cacher.processEvent) + + stopCh := cacher.stopCh + cacher.stopWg.Add(1) + go func() { + defer cacher.stopWg.Done() + wait.Until( + func() { + if !cacher.isStopped() { + cacher.startCaching(stopCh) + } + }, time.Second, stopCh, + ) + }() + return cacher +} + +func (c *Cacher) startCaching(stopChannel <-chan struct{}) { + // The 'usable' lock is always 'RLock'able when it is safe to use the cache. + // It is safe to use the cache after a successful list until a disconnection. + // We start with usable (write) locked. The below OnReplace function will + // unlock it after a successful list. The below defer will then re-lock + // it when this function exits (always due to disconnection), only if + // we actually got a successful list. This cycle will repeat as needed. + successfulList := false + c.watchCache.SetOnReplace(func() { + successfulList = true + c.usable.Unlock() + }) + defer func() { + if successfulList { + c.usable.Lock() + } + }() + + c.terminateAllWatchers() + // Note that since onReplace may be not called due to errors, we explicitly + // need to retry it on errors under lock. + // Also note that startCaching is called in a loop, so there's no need + // to have another loop here. + if err := c.reflector.ListAndWatch(stopChannel); err != nil { + glog.Errorf("unexpected ListAndWatch error: %v", err) + } +} + +// Implements storage.Interface. +func (c *Cacher) Backends(ctx context.Context) []string { + return c.storage.Backends(ctx) +} + +// Implements storage.Interface. +func (c *Cacher) Versioner() Versioner { + return c.storage.Versioner() +} + +// Implements storage.Interface. +func (c *Cacher) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { + return c.storage.Create(ctx, key, obj, out, ttl) +} + +// Implements storage.Interface. +func (c *Cacher) Delete(ctx context.Context, key string, out runtime.Object, preconditions *Preconditions) error { + return c.storage.Delete(ctx, key, out, preconditions) +} + +// Implements storage.Interface. +func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) { + watchRV, err := ParseWatchResourceVersion(resourceVersion) + if err != nil { + return nil, err + } + + // Do NOT allow Watch to start when the underlying structures are not propagated. + c.usable.RLock() + defer c.usable.RUnlock() + + // We explicitly use thread unsafe version and do locking ourself to ensure that + // no new events will be processed in the meantime. The watchCache will be unlocked + // on return from this function. + // Note that we cannot do it under Cacher lock, to avoid a deadlock, since the + // underlying watchCache is calling processEvent under its lock. + c.watchCache.RLock() + defer c.watchCache.RUnlock() + initEvents, err := c.watchCache.GetAllEventsSinceThreadUnsafe(watchRV) + if err != nil { + // To match the uncached watch implementation, once we have passed authn/authz/admission, + // and successfully parsed a resource version, other errors must fail with a watch event of type ERROR, + // rather than a directly returned error. + return newErrWatcher(err), nil + } + + c.Lock() + defer c.Unlock() + watcher := newCacheWatcher(watchRV, initEvents, filterFunction(key, c.keyFunc, filter), forgetWatcher(c, c.watcherIdx)) + c.watchers[c.watcherIdx] = watcher + c.watcherIdx++ + return watcher, nil +} + +// Implements storage.Interface. +func (c *Cacher) WatchList(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) { + return c.Watch(ctx, key, resourceVersion, filter) +} + +// Implements storage.Interface. +func (c *Cacher) Get(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) error { + return c.storage.Get(ctx, key, objPtr, ignoreNotFound) +} + +// Implements storage.Interface. +func (c *Cacher) GetToList(ctx context.Context, key string, filter FilterFunc, listObj runtime.Object) error { + return c.storage.GetToList(ctx, key, filter, listObj) +} + +// Implements storage.Interface. +func (c *Cacher) List(ctx context.Context, key string, resourceVersion string, filter FilterFunc, listObj runtime.Object) error { + if resourceVersion == "" { + // If resourceVersion is not specified, serve it from underlying + // storage (for backward compatibility). + return c.storage.List(ctx, key, resourceVersion, filter, listObj) + } + + // If resourceVersion is specified, serve it from cache. + // It's guaranteed that the returned value is at least that + // fresh as the given resourceVersion. + + listRV, err := ParseListResourceVersion(resourceVersion) + if err != nil { + return err + } + + // To avoid situation when List is processed before the underlying + // watchCache is propagated for the first time, we acquire and immediately + // release the 'usable' lock. + // We don't need to hold it all the time, because watchCache is thread-safe + // and it would complicate already very difficult locking pattern. + c.usable.RLock() + c.usable.RUnlock() + + // List elements from cache, with at least 'listRV'. + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + listVal, err := conversion.EnforcePtr(listPtr) + if err != nil || listVal.Kind() != reflect.Slice { + return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) + } + filterFunc := filterFunction(key, c.keyFunc, filter) + + objs, readResourceVersion, err := c.watchCache.WaitUntilFreshAndList(listRV) + if err != nil { + return fmt.Errorf("failed to wait for fresh list: %v", err) + } + for _, obj := range objs { + object, ok := obj.(runtime.Object) + if !ok { + return fmt.Errorf("non runtime.Object returned from storage: %v", obj) + } + if filterFunc(object) { + listVal.Set(reflect.Append(listVal, reflect.ValueOf(object).Elem())) + } + } + if c.versioner != nil { + if err := c.versioner.UpdateList(listObj, readResourceVersion); err != nil { + return err + } + } + return nil +} + +// Implements storage.Interface. +func (c *Cacher) GuaranteedUpdate(ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, preconditions *Preconditions, tryUpdate UpdateFunc) error { + return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate) +} + +// Implements storage.Interface. +func (c *Cacher) Codec() runtime.Codec { + return c.storage.Codec() +} + +func (c *Cacher) processEvent(event watchCacheEvent) { + c.Lock() + defer c.Unlock() + for _, watcher := range c.watchers { + watcher.add(event) + } +} + +func (c *Cacher) terminateAllWatchers() { + c.Lock() + defer c.Unlock() + for key, watcher := range c.watchers { + delete(c.watchers, key) + watcher.stop() + } +} + +func (c *Cacher) isStopped() bool { + c.stopLock.RLock() + defer c.stopLock.RUnlock() + return c.stopped +} + +func (c *Cacher) Stop() { + c.stopLock.Lock() + c.stopped = true + c.stopLock.Unlock() + close(c.stopCh) + c.stopWg.Wait() +} + +func forgetWatcher(c *Cacher, index int) func(bool) { + return func(lock bool) { + if lock { + c.Lock() + defer c.Unlock() + } + // It's possible that the watcher is already not in the map (e.g. in case of + // simulaneous Stop() and terminateAllWatchers(), but it doesn't break anything. + delete(c.watchers, index) + } +} + +func filterFunction(key string, keyFunc func(runtime.Object) (string, error), filter FilterFunc) FilterFunc { + return func(obj runtime.Object) bool { + objKey, err := keyFunc(obj) + if err != nil { + glog.Errorf("invalid object for filter: %v", obj) + return false + } + if !strings.HasPrefix(objKey, key) { + return false + } + return filter(obj) + } +} + +// Returns resource version to which the underlying cache is synced. +func (c *Cacher) LastSyncResourceVersion() (uint64, error) { + // To avoid situation when LastSyncResourceVersion is processed before the + // underlying watchCache is propagated, we acquire 'usable' lock. + c.usable.RLock() + defer c.usable.RUnlock() + + c.RLock() + defer c.RUnlock() + + resourceVersion := c.reflector.LastSyncResourceVersion() + if resourceVersion == "" { + return 0, nil + } + return strconv.ParseUint(resourceVersion, 10, 64) +} + +// cacherListerWatcher opaques storage.Interface to expose cache.ListerWatcher. +type cacherListerWatcher struct { + storage Interface + resourcePrefix string + newListFunc func() runtime.Object +} + +func newCacherListerWatcher(storage Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher { + return &cacherListerWatcher{ + storage: storage, + resourcePrefix: resourcePrefix, + newListFunc: newListFunc, + } +} + +// Implements cache.ListerWatcher interface. +func (lw *cacherListerWatcher) List(options api.ListOptions) (runtime.Object, error) { + list := lw.newListFunc() + if err := lw.storage.List(context.TODO(), lw.resourcePrefix, "", Everything, list); err != nil { + return nil, err + } + return list, nil +} + +// Implements cache.ListerWatcher interface. +func (lw *cacherListerWatcher) Watch(options api.ListOptions) (watch.Interface, error) { + return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, options.ResourceVersion, Everything) +} + +// cacherWatch implements watch.Interface to return a single error +type errWatcher struct { + result chan watch.Event +} + +func newErrWatcher(err error) *errWatcher { + // Create an error event + errEvent := watch.Event{Type: watch.Error} + switch err := err.(type) { + case runtime.Object: + errEvent.Object = err + case *errors.StatusError: + errEvent.Object = &err.ErrStatus + default: + errEvent.Object = &unversioned.Status{ + Status: unversioned.StatusFailure, + Message: err.Error(), + Reason: unversioned.StatusReasonInternalError, + Code: http.StatusInternalServerError, + } + } + + // Create a watcher with room for a single event, populate it, and close the channel + watcher := &errWatcher{result: make(chan watch.Event, 1)} + watcher.result <- errEvent + close(watcher.result) + + return watcher +} + +// Implements watch.Interface. +func (c *errWatcher) ResultChan() <-chan watch.Event { + return c.result +} + +// Implements watch.Interface. +func (c *errWatcher) Stop() { + // no-op +} + +// cacherWatch implements watch.Interface +type cacheWatcher struct { + sync.Mutex + input chan watchCacheEvent + result chan watch.Event + filter FilterFunc + stopped bool + forget func(bool) +} + +func newCacheWatcher(resourceVersion uint64, initEvents []watchCacheEvent, filter FilterFunc, forget func(bool)) *cacheWatcher { + watcher := &cacheWatcher{ + input: make(chan watchCacheEvent, 10), + result: make(chan watch.Event, 10), + filter: filter, + stopped: false, + forget: forget, + } + go watcher.process(initEvents, resourceVersion) + return watcher +} + +// Implements watch.Interface. +func (c *cacheWatcher) ResultChan() <-chan watch.Event { + return c.result +} + +// Implements watch.Interface. +func (c *cacheWatcher) Stop() { + c.forget(true) + c.stop() +} + +func (c *cacheWatcher) stop() { + c.Lock() + defer c.Unlock() + if !c.stopped { + c.stopped = true + close(c.input) + } +} + +var timerPool sync.Pool + +func (c *cacheWatcher) add(event watchCacheEvent) { + // Try to send the event immediately, without blocking. + select { + case c.input <- event: + return + default: + } + + // OK, block sending, but only for up to 5 seconds. + // cacheWatcher.add is called very often, so arrange + // to reuse timers instead of constantly allocating. + const timeout = 5 * time.Second + t, ok := timerPool.Get().(*time.Timer) + if ok { + t.Reset(timeout) + } else { + t = time.NewTimer(timeout) + } + defer timerPool.Put(t) + + select { + case c.input <- event: + stopped := t.Stop() + if !stopped { + // Consume triggered (but not yet received) timer event + // so that future reuse does not get a spurious timeout. + <-t.C + } + case <-t.C: + // This means that we couldn't send event to that watcher. + // Since we don't want to block on it infinitely, + // we simply terminate it. + c.forget(false) + c.stop() + } +} + +func (c *cacheWatcher) sendWatchCacheEvent(event watchCacheEvent) { + curObjPasses := event.Type != watch.Deleted && c.filter(event.Object) + oldObjPasses := false + if event.PrevObject != nil { + oldObjPasses = c.filter(event.PrevObject) + } + if !curObjPasses && !oldObjPasses { + // Watcher is not interested in that object. + return + } + + object, err := api.Scheme.Copy(event.Object) + if err != nil { + glog.Errorf("unexpected copy error: %v", err) + return + } + switch { + case curObjPasses && !oldObjPasses: + c.result <- watch.Event{Type: watch.Added, Object: object} + case curObjPasses && oldObjPasses: + c.result <- watch.Event{Type: watch.Modified, Object: object} + case !curObjPasses && oldObjPasses: + c.result <- watch.Event{Type: watch.Deleted, Object: object} + } +} + +func (c *cacheWatcher) process(initEvents []watchCacheEvent, resourceVersion uint64) { + defer utilruntime.HandleCrash() + + for _, event := range initEvents { + c.sendWatchCacheEvent(event) + } + defer close(c.result) + defer c.Stop() + for { + event, ok := <-c.input + if !ok { + return + } + // only send events newer than resourceVersion + if event.ResourceVersion > resourceVersion { + c.sendWatchCacheEvent(event) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/cacher_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/cacher_test.go new file mode 100644 index 000000000000..37408f876c9b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/cacher_test.go @@ -0,0 +1,401 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage_test + +import ( + "fmt" + "reflect" + goruntime "runtime" + "strconv" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" + "k8s.io/kubernetes/pkg/storage/etcd/etcdtest" + etcdtesting "k8s.io/kubernetes/pkg/storage/etcd/testing" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" + + "golang.org/x/net/context" +) + +func newEtcdTestStorage(t *testing.T, codec runtime.Codec, prefix string) (*etcdtesting.EtcdTestServer, storage.Interface) { + server := etcdtesting.NewEtcdTestClientServer(t) + storage := etcdstorage.NewEtcdStorage(server.Client, codec, prefix, false, etcdtest.DeserializationCacheSize) + return server, storage +} + +func newTestCacher(s storage.Interface) *storage.Cacher { + prefix := "pods" + config := storage.CacherConfig{ + CacheCapacity: 10, + Storage: s, + Versioner: etcdstorage.APIObjectVersioner{}, + Type: &api.Pod{}, + ResourcePrefix: prefix, + KeyFunc: func(obj runtime.Object) (string, error) { return storage.NamespaceKeyFunc(prefix, obj) }, + NewListFunc: func() runtime.Object { return &api.PodList{} }, + } + return storage.NewCacherFromConfig(config) +} + +func makeTestPod(name string) *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{Namespace: "ns", Name: name}, + Spec: apitesting.DeepEqualSafePodSpec(), + } +} + +func updatePod(t *testing.T, s storage.Interface, obj, old *api.Pod) *api.Pod { + updateFn := func(input runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { + newObj, err := api.Scheme.DeepCopy(obj) + if err != nil { + t.Errorf("unexpected error: %v", err) + return nil, nil, err + } + return newObj.(*api.Pod), nil, nil + } + key := etcdtest.AddPrefix("pods/ns/" + obj.Name) + if err := s.GuaranteedUpdate(context.TODO(), key, &api.Pod{}, old == nil, nil, updateFn); err != nil { + t.Errorf("unexpected error: %v", err) + } + obj.ResourceVersion = "" + result := &api.Pod{} + if err := s.Get(context.TODO(), key, result, false); err != nil { + t.Errorf("unexpected error: %v", err) + } + return result +} + +func TestList(t *testing.T) { + server, etcdStorage := newEtcdTestStorage(t, testapi.Default.Codec(), etcdtest.PathPrefix()) + defer server.Terminate(t) + cacher := newTestCacher(etcdStorage) + defer cacher.Stop() + + podFoo := makeTestPod("foo") + podBar := makeTestPod("bar") + podBaz := makeTestPod("baz") + + podFooPrime := makeTestPod("foo") + podFooPrime.Spec.NodeName = "fakeNode" + + fooCreated := updatePod(t, etcdStorage, podFoo, nil) + _ = updatePod(t, etcdStorage, podBar, nil) + _ = updatePod(t, etcdStorage, podBaz, nil) + + _ = updatePod(t, etcdStorage, podFooPrime, fooCreated) + + deleted := api.Pod{} + if err := etcdStorage.Delete(context.TODO(), etcdtest.AddPrefix("pods/ns/bar"), &deleted, nil); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // We first List directly from etcd by passing empty resourceVersion, + // to get the current etcd resourceVersion. + rvResult := &api.PodList{} + if err := cacher.List(context.TODO(), "pods/ns", "", storage.Everything, rvResult); err != nil { + t.Errorf("Unexpected error: %v", err) + } + deletedPodRV := rvResult.ListMeta.ResourceVersion + + result := &api.PodList{} + // We pass the current etcd ResourceVersion received from the above List() operation, + // since there is not easy way to get ResourceVersion of barPod deletion operation. + if err := cacher.List(context.TODO(), "pods/ns", deletedPodRV, storage.Everything, result); err != nil { + t.Errorf("Unexpected error: %v", err) + } + if result.ListMeta.ResourceVersion != deletedPodRV { + t.Errorf("Incorrect resource version: %v", result.ListMeta.ResourceVersion) + } + if len(result.Items) != 2 { + t.Errorf("Unexpected list result: %d", len(result.Items)) + } + keys := sets.String{} + for _, item := range result.Items { + keys.Insert(item.Name) + } + if !keys.HasAll("foo", "baz") { + t.Errorf("Unexpected list result: %#v", result) + } + for _, item := range result.Items { + // unset fields that are set by the infrastructure + item.ResourceVersion = "" + item.CreationTimestamp = unversioned.Time{} + + var expected *api.Pod + switch item.Name { + case "foo": + expected = podFooPrime + case "baz": + expected = podBaz + default: + t.Errorf("Unexpected item: %v", item) + } + if e, a := *expected, item; !reflect.DeepEqual(e, a) { + t.Errorf("Expected: %#v, got: %#v", e, a) + } + } +} + +func verifyWatchEvent(t *testing.T, w watch.Interface, eventType watch.EventType, eventObject runtime.Object) { + _, _, line, _ := goruntime.Caller(1) + select { + case event := <-w.ResultChan(): + if e, a := eventType, event.Type; e != a { + t.Logf("(called from line %d)", line) + t.Errorf("Expected: %s, got: %s", eventType, event.Type) + } + if e, a := eventObject, event.Object; !api.Semantic.DeepDerivative(e, a) { + t.Logf("(called from line %d)", line) + t.Errorf("Expected (%s): %#v, got: %#v", eventType, e, a) + } + case <-time.After(wait.ForeverTestTimeout): + t.Logf("(called from line %d)", line) + t.Errorf("Timed out waiting for an event") + } +} + +type injectListError struct { + errors int + storage.Interface +} + +func (self *injectListError) List(ctx context.Context, key string, resourceVersion string, filter storage.FilterFunc, listObj runtime.Object) error { + if self.errors > 0 { + self.errors-- + return fmt.Errorf("injected error") + } + return self.Interface.List(ctx, key, resourceVersion, filter, listObj) +} + +func TestWatch(t *testing.T) { + server, etcdStorage := newEtcdTestStorage(t, testapi.Default.Codec(), etcdtest.PathPrefix()) + // Inject one list error to make sure we test the relist case. + etcdStorage = &injectListError{errors: 1, Interface: etcdStorage} + defer server.Terminate(t) + cacher := newTestCacher(etcdStorage) + defer cacher.Stop() + + podFoo := makeTestPod("foo") + podBar := makeTestPod("bar") + + podFooPrime := makeTestPod("foo") + podFooPrime.Spec.NodeName = "fakeNode" + + podFooBis := makeTestPod("foo") + podFooBis.Spec.NodeName = "anotherFakeNode" + + // initialVersion is used to initate the watcher at the beginning of the world, + // which is not defined precisely in etcd. + initialVersion, err := cacher.LastSyncResourceVersion() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + startVersion := strconv.Itoa(int(initialVersion)) + + // Set up Watch for object "podFoo". + watcher, err := cacher.Watch(context.TODO(), "pods/ns/foo", startVersion, storage.Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer watcher.Stop() + + fooCreated := updatePod(t, etcdStorage, podFoo, nil) + _ = updatePod(t, etcdStorage, podBar, nil) + fooUpdated := updatePod(t, etcdStorage, podFooPrime, fooCreated) + + verifyWatchEvent(t, watcher, watch.Added, podFoo) + verifyWatchEvent(t, watcher, watch.Modified, podFooPrime) + + // Check whether we get too-old error via the watch channel + tooOldWatcher, err := cacher.Watch(context.TODO(), "pods/ns/foo", "1", storage.Everything) + if err != nil { + t.Fatalf("Expected no direct error, got %v", err) + } + defer tooOldWatcher.Stop() + // Ensure we get a "Gone" error + expectedGoneError := errors.NewGone("").ErrStatus + verifyWatchEvent(t, tooOldWatcher, watch.Error, &expectedGoneError) + + initialWatcher, err := cacher.Watch(context.TODO(), "pods/ns/foo", fooCreated.ResourceVersion, storage.Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer initialWatcher.Stop() + + verifyWatchEvent(t, initialWatcher, watch.Modified, podFooPrime) + + // Now test watch from "now". + nowWatcher, err := cacher.Watch(context.TODO(), "pods/ns/foo", "0", storage.Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer nowWatcher.Stop() + + verifyWatchEvent(t, nowWatcher, watch.Added, podFooPrime) + + _ = updatePod(t, etcdStorage, podFooBis, fooUpdated) + + verifyWatchEvent(t, nowWatcher, watch.Modified, podFooBis) +} + +func TestWatcherTimeout(t *testing.T) { + server, etcdStorage := newEtcdTestStorage(t, testapi.Default.Codec(), etcdtest.PathPrefix()) + defer server.Terminate(t) + cacher := newTestCacher(etcdStorage) + defer cacher.Stop() + + // initialVersion is used to initate the watcher at the beginning of the world, + // which is not defined precisely in etcd. + initialVersion, err := cacher.LastSyncResourceVersion() + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + startVersion := strconv.Itoa(int(initialVersion)) + + // Create a watcher that will not be reading any result. + watcher, err := cacher.WatchList(context.TODO(), "pods/ns", startVersion, storage.Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer watcher.Stop() + + // Create a second watcher that will be reading result. + readingWatcher, err := cacher.WatchList(context.TODO(), "pods/ns", startVersion, storage.Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer readingWatcher.Stop() + + for i := 1; i <= 22; i++ { + pod := makeTestPod(strconv.Itoa(i)) + _ = updatePod(t, etcdStorage, pod, nil) + verifyWatchEvent(t, readingWatcher, watch.Added, pod) + } +} + +func TestFiltering(t *testing.T) { + server, etcdStorage := newEtcdTestStorage(t, testapi.Default.Codec(), etcdtest.PathPrefix()) + defer server.Terminate(t) + cacher := newTestCacher(etcdStorage) + defer cacher.Stop() + + // Ensure that the cacher is initialized, before creating any pods, + // so that we are sure that all events will be present in cacher. + syncWatcher, err := cacher.Watch(context.TODO(), "pods/ns/foo", "0", storage.Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + syncWatcher.Stop() + + podFoo := makeTestPod("foo") + podFoo.Labels = map[string]string{"filter": "foo"} + podFooFiltered := makeTestPod("foo") + podFooPrime := makeTestPod("foo") + podFooPrime.Labels = map[string]string{"filter": "foo"} + podFooPrime.Spec.NodeName = "fakeNode" + + fooCreated := updatePod(t, etcdStorage, podFoo, nil) + fooFiltered := updatePod(t, etcdStorage, podFooFiltered, fooCreated) + fooUnfiltered := updatePod(t, etcdStorage, podFoo, fooFiltered) + _ = updatePod(t, etcdStorage, podFooPrime, fooUnfiltered) + + deleted := api.Pod{} + if err := etcdStorage.Delete(context.TODO(), etcdtest.AddPrefix("pods/ns/foo"), &deleted, nil); err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // Set up Watch for object "podFoo" with label filter set. + selector := labels.SelectorFromSet(labels.Set{"filter": "foo"}) + filter := func(obj runtime.Object) bool { + metadata, err := meta.Accessor(obj) + if err != nil { + t.Errorf("Unexpected error: %v", err) + return false + } + return selector.Matches(labels.Set(metadata.GetLabels())) + } + watcher, err := cacher.Watch(context.TODO(), "pods/ns/foo", fooCreated.ResourceVersion, filter) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer watcher.Stop() + + verifyWatchEvent(t, watcher, watch.Deleted, podFooFiltered) + verifyWatchEvent(t, watcher, watch.Added, podFoo) + verifyWatchEvent(t, watcher, watch.Modified, podFooPrime) + verifyWatchEvent(t, watcher, watch.Deleted, podFooPrime) +} + +func TestStartingResourceVersion(t *testing.T) { + server, etcdStorage := newEtcdTestStorage(t, testapi.Default.Codec(), etcdtest.PathPrefix()) + defer server.Terminate(t) + cacher := newTestCacher(etcdStorage) + defer cacher.Stop() + + // add 1 object + podFoo := makeTestPod("foo") + fooCreated := updatePod(t, etcdStorage, podFoo, nil) + + // Set up Watch starting at fooCreated.ResourceVersion + 10 + rv, err := storage.ParseWatchResourceVersion(fooCreated.ResourceVersion) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + rv += 10 + startVersion := strconv.Itoa(int(rv)) + + watcher, err := cacher.Watch(context.TODO(), "pods/ns/foo", startVersion, storage.Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer watcher.Stop() + + lastFoo := fooCreated + for i := 0; i < 11; i++ { + podFooForUpdate := makeTestPod("foo") + podFooForUpdate.Labels = map[string]string{"foo": strconv.Itoa(i)} + lastFoo = updatePod(t, etcdStorage, podFooForUpdate, lastFoo) + } + + select { + case e := <-watcher.ResultChan(): + pod := e.Object.(*api.Pod) + podRV, err := storage.ParseWatchResourceVersion(pod.ResourceVersion) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // event should have at least rv + 1, since we're starting the watch at rv + if podRV <= rv { + t.Errorf("expected event with resourceVersion of at least %d, got %d", rv+1, podRV) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("timed out waiting for event") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/doc.go new file mode 100644 index 000000000000..dca0d5b70965 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Interfaces for database-related operations. +package storage diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/errors.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/errors.go new file mode 100644 index 000000000000..61b3cba52c75 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/errors.go @@ -0,0 +1,174 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/util/validation/field" +) + +const ( + ErrCodeKeyNotFound int = iota + 1 + ErrCodeKeyExists + ErrCodeResourceVersionConflicts + ErrCodeInvalidObj + ErrCodeUnreachable +) + +var errCodeToMessage = map[int]string{ + ErrCodeKeyNotFound: "key not found", + ErrCodeKeyExists: "key exists", + ErrCodeResourceVersionConflicts: "resource version conflicts", + ErrCodeInvalidObj: "invalid object", + ErrCodeUnreachable: "server unreachable", +} + +func NewKeyNotFoundError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeKeyNotFound, + Key: key, + ResourceVersion: rv, + } +} + +func NewKeyExistsError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeKeyExists, + Key: key, + ResourceVersion: rv, + } +} + +func NewResourceVersionConflictsError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeResourceVersionConflicts, + Key: key, + ResourceVersion: rv, + } +} + +func NewUnreachableError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeUnreachable, + Key: key, + ResourceVersion: rv, + } +} + +func NewInvalidObjError(key, msg string) *StorageError { + return &StorageError{ + Code: ErrCodeInvalidObj, + Key: key, + AdditionalErrorMsg: msg, + } +} + +type StorageError struct { + Code int + Key string + ResourceVersion int64 + AdditionalErrorMsg string +} + +func (e *StorageError) Error() string { + return fmt.Sprintf("StorageError: %s, Code: %d, Key: %s, ResourceVersion: %d, AdditionalErrorMsg: %s", + errCodeToMessage[e.Code], e.Code, e.Key, e.ResourceVersion, e.AdditionalErrorMsg) +} + +// IsNotFound returns true if and only if err is "key" not found error. +func IsNotFound(err error) bool { + return isErrCode(err, ErrCodeKeyNotFound) +} + +// IsNodeExist returns true if and only if err is an node already exist error. +func IsNodeExist(err error) bool { + return isErrCode(err, ErrCodeKeyExists) +} + +// IsUnreachable returns true if and only if err indicates the server could not be reached. +func IsUnreachable(err error) bool { + return isErrCode(err, ErrCodeUnreachable) +} + +// IsTestFailed returns true if and only if err is a write conflict. +func IsTestFailed(err error) bool { + return isErrCode(err, ErrCodeResourceVersionConflicts, ErrCodeInvalidObj) +} + +// IsInvalidUID returns true if and only if err is invalid UID error +func IsInvalidObj(err error) bool { + return isErrCode(err, ErrCodeInvalidObj) +} + +func isErrCode(err error, codes ...int) bool { + if err == nil { + return false + } + if e, ok := err.(*StorageError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// InvalidError is generated when an error caused by invalid API object occurs +// in the storage package. +type InvalidError struct { + Errs field.ErrorList +} + +func (e InvalidError) Error() string { + return e.Errs.ToAggregate().Error() +} + +// IsInvalidError returns true if and only if err is an InvalidError. +func IsInvalidError(err error) bool { + _, ok := err.(InvalidError) + return ok +} + +func NewInvalidError(errors field.ErrorList) InvalidError { + return InvalidError{errors} +} + +// InternalError is generated when an error occurs in the storage package, i.e., +// not from the underlying storage backend (e.g., etcd). +type InternalError struct { + Reason string +} + +func (e InternalError) Error() string { + return e.Reason +} + +// IsInternalError returns true if and only if err is an InternalError. +func IsInternalError(err error) bool { + _, ok := err.(InternalError) + return ok +} + +func NewInternalError(reason string) InternalError { + return InternalError{reason} +} + +func NewInternalErrorf(format string, a ...interface{}) InternalError { + return InternalError{fmt.Sprintf(format, a)} +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/api_object_versioner.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/api_object_versioner.go new file mode 100644 index 000000000000..639f24a8afde --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/api_object_versioner.go @@ -0,0 +1,98 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "strconv" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" +) + +// APIObjectVersioner implements versioning and extracting etcd node information +// for objects that have an embedded ObjectMeta or ListMeta field. +type APIObjectVersioner struct{} + +// UpdateObject implements Versioner +func (a APIObjectVersioner) UpdateObject(obj runtime.Object, resourceVersion uint64) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + versionString := "" + if resourceVersion != 0 { + versionString = strconv.FormatUint(resourceVersion, 10) + } + accessor.SetResourceVersion(versionString) + return nil +} + +// UpdateList implements Versioner +func (a APIObjectVersioner) UpdateList(obj runtime.Object, resourceVersion uint64) error { + listMeta, err := api.ListMetaFor(obj) + if err != nil || listMeta == nil { + return err + } + versionString := "" + if resourceVersion != 0 { + versionString = strconv.FormatUint(resourceVersion, 10) + } + listMeta.ResourceVersion = versionString + return nil +} + +// ObjectResourceVersion implements Versioner +func (a APIObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + version := accessor.GetResourceVersion() + if len(version) == 0 { + return 0, nil + } + return strconv.ParseUint(version, 10, 64) +} + +// APIObjectVersioner implements Versioner +var Versioner storage.Versioner = APIObjectVersioner{} + +// CompareResourceVersion compares etcd resource versions. Outside this API they are all strings, +// but etcd resource versions are special, they're actually ints, so we can easily compare them. +func (a APIObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int { + lhsVersion, err := Versioner.ObjectResourceVersion(lhs) + if err != nil { + // coder error + panic(err) + } + rhsVersion, err := Versioner.ObjectResourceVersion(rhs) + if err != nil { + // coder error + panic(err) + } + + if lhsVersion == rhsVersion { + return 0 + } + if lhsVersion < rhsVersion { + return -1 + } + + return 1 +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/api_object_versioner_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/api_object_versioner_test.go new file mode 100644 index 000000000000..86767c8e07a4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/api_object_versioner_test.go @@ -0,0 +1,58 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api" + storagetesting "k8s.io/kubernetes/pkg/storage/testing" +) + +func TestObjectVersioner(t *testing.T) { + v := APIObjectVersioner{} + if ver, err := v.ObjectResourceVersion(&storagetesting.TestResource{ObjectMeta: api.ObjectMeta{ResourceVersion: "5"}}); err != nil || ver != 5 { + t.Errorf("unexpected version: %d %v", ver, err) + } + if ver, err := v.ObjectResourceVersion(&storagetesting.TestResource{ObjectMeta: api.ObjectMeta{ResourceVersion: "a"}}); err == nil || ver != 0 { + t.Errorf("unexpected version: %d %v", ver, err) + } + obj := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{ResourceVersion: "a"}} + if err := v.UpdateObject(obj, 5); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if obj.ResourceVersion != "5" || obj.DeletionTimestamp != nil { + t.Errorf("unexpected resource version: %#v", obj) + } +} + +func TestCompareResourceVersion(t *testing.T) { + five := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{ResourceVersion: "5"}} + six := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{ResourceVersion: "6"}} + + versioner := APIObjectVersioner{} + + if e, a := -1, versioner.CompareResourceVersion(five, six); e != a { + t.Errorf("expected %v got %v", e, a) + } + if e, a := 1, versioner.CompareResourceVersion(six, five); e != a { + t.Errorf("expected %v got %v", e, a) + } + if e, a := 0, versioner.CompareResourceVersion(six, six); e != a { + t.Errorf("expected %v got %v", e, a) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/doc.go new file mode 100644 index 000000000000..44a2b9d44505 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcd_helper.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcd_helper.go new file mode 100644 index 000000000000..40c9337a0619 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcd_helper.go @@ -0,0 +1,638 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "errors" + "fmt" + "path" + "reflect" + "strings" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + "k8s.io/kubernetes/pkg/storage/etcd/metrics" + etcdutil "k8s.io/kubernetes/pkg/storage/etcd/util" + "k8s.io/kubernetes/pkg/util" + utilcache "k8s.io/kubernetes/pkg/util/cache" + "k8s.io/kubernetes/pkg/watch" + + etcd "github.com/coreos/etcd/client" + "github.com/golang/glog" + "golang.org/x/net/context" +) + +// Creates a new storage interface from the client +// TODO: deprecate in favor of storage.Config abstraction over time +func NewEtcdStorage(client etcd.Client, codec runtime.Codec, prefix string, quorum bool, cacheSize int) storage.Interface { + return &etcdHelper{ + etcdMembersAPI: etcd.NewMembersAPI(client), + etcdKeysAPI: etcd.NewKeysAPI(client), + codec: codec, + versioner: APIObjectVersioner{}, + copier: api.Scheme, + pathPrefix: path.Join("/", prefix), + quorum: quorum, + cache: utilcache.NewCache(cacheSize), + } +} + +// etcdHelper is the reference implementation of storage.Interface. +type etcdHelper struct { + etcdMembersAPI etcd.MembersAPI + etcdKeysAPI etcd.KeysAPI + codec runtime.Codec + copier runtime.ObjectCopier + // Note that versioner is required for etcdHelper to work correctly. + // The public constructors (NewStorage & NewEtcdStorage) are setting it + // correctly, so be careful when manipulating with it manually. + // optional, has to be set to perform any atomic operations + versioner storage.Versioner + // prefix for all etcd keys + pathPrefix string + // if true, perform quorum read + quorum bool + + // We cache objects stored in etcd. For keys we use Node.ModifiedIndex which is equivalent + // to resourceVersion. + // This depends on etcd's indexes being globally unique across all objects/types. This will + // have to revisited if we decide to do things like multiple etcd clusters, or etcd will + // support multi-object transaction that will result in many objects with the same index. + // Number of entries stored in the cache is controlled by maxEtcdCacheEntries constant. + // TODO: Measure how much this cache helps after the conversion code is optimized. + cache utilcache.Cache +} + +func init() { + metrics.Register() +} + +// Codec provides access to the underlying codec being used by the implementation. +func (h *etcdHelper) Codec() runtime.Codec { + return h.codec +} + +// Implements storage.Interface. +func (h *etcdHelper) Backends(ctx context.Context) []string { + if ctx == nil { + glog.Errorf("Context is nil") + } + members, err := h.etcdMembersAPI.List(ctx) + if err != nil { + glog.Errorf("Error obtaining etcd members list: %q", err) + return nil + } + mlist := []string{} + for _, member := range members { + mlist = append(mlist, member.ClientURLs...) + } + return mlist +} + +// Implements storage.Interface. +func (h *etcdHelper) Versioner() storage.Versioner { + return h.versioner +} + +// Implements storage.Interface. +func (h *etcdHelper) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { + trace := util.NewTrace("etcdHelper::Create " + getTypeName(obj)) + defer trace.LogIfLong(250 * time.Millisecond) + if ctx == nil { + glog.Errorf("Context is nil") + } + key = h.prefixEtcdKey(key) + data, err := runtime.Encode(h.codec, obj) + trace.Step("Object encoded") + if err != nil { + return err + } + if version, err := h.versioner.ObjectResourceVersion(obj); err == nil && version != 0 { + return errors.New("resourceVersion may not be set on objects to be created") + } + trace.Step("Version checked") + + startTime := time.Now() + opts := etcd.SetOptions{ + TTL: time.Duration(ttl) * time.Second, + PrevExist: etcd.PrevNoExist, + } + response, err := h.etcdKeysAPI.Set(ctx, key, string(data), &opts) + metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime) + trace.Step("Object created") + if err != nil { + return toStorageErr(err, key, 0) + } + if out != nil { + if _, err := conversion.EnforcePtr(out); err != nil { + panic("unable to convert output object to pointer") + } + _, _, err = h.extractObj(response, err, out, false, false) + } + return err +} + +func checkPreconditions(preconditions *storage.Preconditions, out runtime.Object) error { + if preconditions == nil { + return nil + } + objMeta, err := api.ObjectMetaFor(out) + if err != nil { + return storage.NewInternalErrorf("can't enforce preconditions %v on un-introspectable object %v, got error: %v", *preconditions, out, err) + } + if preconditions.UID != nil && *preconditions.UID != objMeta.UID { + return etcd.Error{Code: etcd.ErrorCodeTestFailed, Message: fmt.Sprintf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *preconditions.UID, objMeta.UID)} + } + return nil +} + +// Implements storage.Interface. +func (h *etcdHelper) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions) error { + if ctx == nil { + glog.Errorf("Context is nil") + } + key = h.prefixEtcdKey(key) + v, err := conversion.EnforcePtr(out) + if err != nil { + panic("unable to convert output object to pointer") + } + + if preconditions == nil { + startTime := time.Now() + response, err := h.etcdKeysAPI.Delete(ctx, key, nil) + metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime) + if !etcdutil.IsEtcdNotFound(err) { + // if the object that existed prior to the delete is returned by etcd, update the out object. + if err != nil || response.PrevNode != nil { + _, _, err = h.extractObj(response, err, out, false, true) + } + } + return toStorageErr(err, key, 0) + } + + // Check the preconditions match. + obj := reflect.New(v.Type()).Interface().(runtime.Object) + for { + _, node, res, err := h.bodyAndExtractObj(ctx, key, obj, false) + if err != nil { + return toStorageErr(err, key, 0) + } + if err := checkPreconditions(preconditions, obj); err != nil { + return toStorageErr(err, key, 0) + } + index := uint64(0) + if node != nil { + index = node.ModifiedIndex + } else if res != nil { + index = res.Index + } + opt := etcd.DeleteOptions{PrevIndex: index} + startTime := time.Now() + response, err := h.etcdKeysAPI.Delete(ctx, key, &opt) + metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime) + if etcdutil.IsEtcdTestFailed(err) { + glog.Infof("deletion of %s failed because of a conflict, going to retry", key) + } else { + if !etcdutil.IsEtcdNotFound(err) { + // if the object that existed prior to the delete is returned by etcd, update the out object. + if err != nil || response.PrevNode != nil { + _, _, err = h.extractObj(response, err, out, false, true) + } + } + return toStorageErr(err, key, 0) + } + } +} + +// Implements storage.Interface. +func (h *etcdHelper) Watch(ctx context.Context, key string, resourceVersion string, filter storage.FilterFunc) (watch.Interface, error) { + if ctx == nil { + glog.Errorf("Context is nil") + } + watchRV, err := storage.ParseWatchResourceVersion(resourceVersion) + if err != nil { + return nil, err + } + key = h.prefixEtcdKey(key) + w := newEtcdWatcher(false, h.quorum, nil, filter, h.codec, h.versioner, nil, h) + go w.etcdWatch(ctx, h.etcdKeysAPI, key, watchRV) + return w, nil +} + +// Implements storage.Interface. +func (h *etcdHelper) WatchList(ctx context.Context, key string, resourceVersion string, filter storage.FilterFunc) (watch.Interface, error) { + if ctx == nil { + glog.Errorf("Context is nil") + } + watchRV, err := storage.ParseWatchResourceVersion(resourceVersion) + if err != nil { + return nil, err + } + key = h.prefixEtcdKey(key) + w := newEtcdWatcher(true, h.quorum, exceptKey(key), filter, h.codec, h.versioner, nil, h) + go w.etcdWatch(ctx, h.etcdKeysAPI, key, watchRV) + return w, nil +} + +// Implements storage.Interface. +func (h *etcdHelper) Get(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) error { + if ctx == nil { + glog.Errorf("Context is nil") + } + key = h.prefixEtcdKey(key) + _, _, _, err := h.bodyAndExtractObj(ctx, key, objPtr, ignoreNotFound) + return err +} + +// bodyAndExtractObj performs the normal Get path to etcd, returning the parsed node and response for additional information +// about the response, like the current etcd index and the ttl. +func (h *etcdHelper) bodyAndExtractObj(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) (body string, node *etcd.Node, res *etcd.Response, err error) { + if ctx == nil { + glog.Errorf("Context is nil") + } + startTime := time.Now() + + opts := &etcd.GetOptions{ + Quorum: h.quorum, + } + + response, err := h.etcdKeysAPI.Get(ctx, key, opts) + metrics.RecordEtcdRequestLatency("get", getTypeName(objPtr), startTime) + if err != nil && !etcdutil.IsEtcdNotFound(err) { + return "", nil, nil, toStorageErr(err, key, 0) + } + body, node, err = h.extractObj(response, err, objPtr, ignoreNotFound, false) + return body, node, response, toStorageErr(err, key, 0) +} + +func (h *etcdHelper) extractObj(response *etcd.Response, inErr error, objPtr runtime.Object, ignoreNotFound, prevNode bool) (body string, node *etcd.Node, err error) { + if response != nil { + if prevNode { + node = response.PrevNode + } else { + node = response.Node + } + } + if inErr != nil || node == nil || len(node.Value) == 0 { + if ignoreNotFound { + v, err := conversion.EnforcePtr(objPtr) + if err != nil { + return "", nil, err + } + v.Set(reflect.Zero(v.Type())) + return "", nil, nil + } else if inErr != nil { + return "", nil, inErr + } + return "", nil, fmt.Errorf("unable to locate a value on the response: %#v", response) + } + body = node.Value + out, gvk, err := h.codec.Decode([]byte(body), nil, objPtr) + if err != nil { + return body, nil, err + } + if out != objPtr { + return body, nil, fmt.Errorf("unable to decode object %s into %v", gvk.String(), reflect.TypeOf(objPtr)) + } + // being unable to set the version does not prevent the object from being extracted + _ = h.versioner.UpdateObject(objPtr, node.ModifiedIndex) + return body, node, err +} + +// Implements storage.Interface. +func (h *etcdHelper) GetToList(ctx context.Context, key string, filter storage.FilterFunc, listObj runtime.Object) error { + if ctx == nil { + glog.Errorf("Context is nil") + } + trace := util.NewTrace("GetToList " + getTypeName(listObj)) + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + key = h.prefixEtcdKey(key) + startTime := time.Now() + trace.Step("About to read etcd node") + + opts := &etcd.GetOptions{ + Quorum: h.quorum, + } + response, err := h.etcdKeysAPI.Get(ctx, key, opts) + + metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime) + trace.Step("Etcd node read") + if err != nil { + if etcdutil.IsEtcdNotFound(err) { + return nil + } + return toStorageErr(err, key, 0) + } + + nodes := make([]*etcd.Node, 0) + nodes = append(nodes, response.Node) + + if err := h.decodeNodeList(nodes, filter, listPtr); err != nil { + return err + } + trace.Step("Object decoded") + if err := h.versioner.UpdateList(listObj, response.Index); err != nil { + return err + } + return nil +} + +// decodeNodeList walks the tree of each node in the list and decodes into the specified object +func (h *etcdHelper) decodeNodeList(nodes []*etcd.Node, filter storage.FilterFunc, slicePtr interface{}) error { + trace := util.NewTrace("decodeNodeList " + getTypeName(slicePtr)) + defer trace.LogIfLong(400 * time.Millisecond) + v, err := conversion.EnforcePtr(slicePtr) + if err != nil || v.Kind() != reflect.Slice { + // This should not happen at runtime. + panic("need ptr to slice") + } + for _, node := range nodes { + if node.Dir { + trace.Step("Decoding dir " + node.Key + " START") + if err := h.decodeNodeList(node.Nodes, filter, slicePtr); err != nil { + return err + } + trace.Step("Decoding dir " + node.Key + " END") + continue + } + if obj, found := h.getFromCache(node.ModifiedIndex, filter); found { + // obj != nil iff it matches the filter function. + if obj != nil { + v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) + } + } else { + obj, _, err := h.codec.Decode([]byte(node.Value), nil, reflect.New(v.Type().Elem()).Interface().(runtime.Object)) + if err != nil { + return err + } + // being unable to set the version does not prevent the object from being extracted + _ = h.versioner.UpdateObject(obj, node.ModifiedIndex) + if filter(obj) { + v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) + } + if node.ModifiedIndex != 0 { + h.addToCache(node.ModifiedIndex, obj) + } + } + } + trace.Step(fmt.Sprintf("Decoded %v nodes", len(nodes))) + return nil +} + +// Implements storage.Interface. +func (h *etcdHelper) List(ctx context.Context, key string, resourceVersion string, filter storage.FilterFunc, listObj runtime.Object) error { + if ctx == nil { + glog.Errorf("Context is nil") + } + trace := util.NewTrace("List " + getTypeName(listObj)) + defer trace.LogIfLong(400 * time.Millisecond) + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + key = h.prefixEtcdKey(key) + startTime := time.Now() + trace.Step("About to list etcd node") + nodes, index, err := h.listEtcdNode(ctx, key) + metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime) + trace.Step("Etcd node listed") + if err != nil { + return err + } + if err := h.decodeNodeList(nodes, filter, listPtr); err != nil { + return err + } + trace.Step("Node list decoded") + if err := h.versioner.UpdateList(listObj, index); err != nil { + return err + } + return nil +} + +func (h *etcdHelper) listEtcdNode(ctx context.Context, key string) ([]*etcd.Node, uint64, error) { + if ctx == nil { + glog.Errorf("Context is nil") + } + opts := etcd.GetOptions{ + Recursive: true, + Sort: true, + Quorum: h.quorum, + } + result, err := h.etcdKeysAPI.Get(ctx, key, &opts) + if err != nil { + var index uint64 + if etcdError, ok := err.(etcd.Error); ok { + index = etcdError.Index + } + nodes := make([]*etcd.Node, 0) + if etcdutil.IsEtcdNotFound(err) { + return nodes, index, nil + } else { + return nodes, index, toStorageErr(err, key, 0) + } + } + return result.Node.Nodes, result.Index, nil +} + +// Implements storage.Interface. +func (h *etcdHelper) GuaranteedUpdate(ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc) error { + if ctx == nil { + glog.Errorf("Context is nil") + } + v, err := conversion.EnforcePtr(ptrToType) + if err != nil { + // Panic is appropriate, because this is a programming error. + panic("need ptr to type") + } + key = h.prefixEtcdKey(key) + for { + obj := reflect.New(v.Type()).Interface().(runtime.Object) + origBody, node, res, err := h.bodyAndExtractObj(ctx, key, obj, ignoreNotFound) + if err != nil { + return toStorageErr(err, key, 0) + } + if err := checkPreconditions(preconditions, obj); err != nil { + return toStorageErr(err, key, 0) + } + meta := storage.ResponseMeta{} + if node != nil { + meta.TTL = node.TTL + meta.ResourceVersion = node.ModifiedIndex + } + // Get the object to be written by calling tryUpdate. + ret, newTTL, err := tryUpdate(obj, meta) + if err != nil { + return toStorageErr(err, key, 0) + } + + index := uint64(0) + ttl := uint64(0) + if node != nil { + index = node.ModifiedIndex + if node.TTL != 0 { + ttl = uint64(node.TTL) + } + if node.Expiration != nil && ttl == 0 { + ttl = 1 + } + } else if res != nil { + index = res.Index + } + + if newTTL != nil { + if ttl != 0 && *newTTL == 0 { + // TODO: remove this after we have verified this is no longer an issue + glog.V(4).Infof("GuaranteedUpdate is clearing TTL for %q, may not be intentional", key) + } + ttl = *newTTL + } + + // Since update object may have a resourceVersion set, we need to clear it here. + if err := h.versioner.UpdateObject(ret, 0); err != nil { + return errors.New("resourceVersion cannot be set on objects store in etcd") + } + + data, err := runtime.Encode(h.codec, ret) + if err != nil { + return err + } + + // First time this key has been used, try creating new value. + if index == 0 { + startTime := time.Now() + opts := etcd.SetOptions{ + TTL: time.Duration(ttl) * time.Second, + PrevExist: etcd.PrevNoExist, + } + response, err := h.etcdKeysAPI.Set(ctx, key, string(data), &opts) + metrics.RecordEtcdRequestLatency("create", getTypeName(ptrToType), startTime) + if etcdutil.IsEtcdNodeExist(err) { + continue + } + _, _, err = h.extractObj(response, err, ptrToType, false, false) + return toStorageErr(err, key, 0) + } + + if string(data) == origBody { + // If we don't send an update, we simply return the currently existing + // version of the object. + _, _, err := h.extractObj(res, nil, ptrToType, ignoreNotFound, false) + return err + } + + startTime := time.Now() + // Swap origBody with data, if origBody is the latest etcd data. + opts := etcd.SetOptions{ + PrevValue: origBody, + PrevIndex: index, + TTL: time.Duration(ttl) * time.Second, + } + response, err := h.etcdKeysAPI.Set(ctx, key, string(data), &opts) + metrics.RecordEtcdRequestLatency("compareAndSwap", getTypeName(ptrToType), startTime) + if etcdutil.IsEtcdTestFailed(err) { + // Try again. + continue + } + _, _, err = h.extractObj(response, err, ptrToType, false, false) + return toStorageErr(err, key, int64(index)) + } +} + +func (h *etcdHelper) prefixEtcdKey(key string) string { + if strings.HasPrefix(key, h.pathPrefix) { + return key + } + return path.Join(h.pathPrefix, key) +} + +// etcdCache defines interface used for caching objects stored in etcd. Objects are keyed by +// their Node.ModifiedIndex, which is unique across all types. +// All implementations must be thread-safe. +type etcdCache interface { + getFromCache(index uint64, filter storage.FilterFunc) (runtime.Object, bool) + addToCache(index uint64, obj runtime.Object) +} + +func getTypeName(obj interface{}) string { + return reflect.TypeOf(obj).String() +} + +func (h *etcdHelper) getFromCache(index uint64, filter storage.FilterFunc) (runtime.Object, bool) { + startTime := time.Now() + defer func() { + metrics.ObserveGetCache(startTime) + }() + obj, found := h.cache.Get(index) + if found { + if !filter(obj.(runtime.Object)) { + return nil, true + } + // We should not return the object itself to avoid polluting the cache if someone + // modifies returned values. + objCopy, err := h.copier.Copy(obj.(runtime.Object)) + if err != nil { + glog.Errorf("Error during DeepCopy of cached object: %q", err) + // We can't return a copy, thus we report the object as not found. + return nil, false + } + metrics.ObserveCacheHit() + return objCopy.(runtime.Object), true + } + metrics.ObserveCacheMiss() + return nil, false +} + +func (h *etcdHelper) addToCache(index uint64, obj runtime.Object) { + startTime := time.Now() + defer func() { + metrics.ObserveAddCache(startTime) + }() + objCopy, err := h.copier.Copy(obj) + if err != nil { + glog.Errorf("Error during DeepCopy of cached object: %q", err) + return + } + isOverwrite := h.cache.Add(index, objCopy) + if !isOverwrite { + metrics.ObserveNewEntry() + } +} + +func toStorageErr(err error, key string, rv int64) error { + if err == nil { + return nil + } + switch { + case etcdutil.IsEtcdNotFound(err): + return storage.NewKeyNotFoundError(key, rv) + case etcdutil.IsEtcdNodeExist(err): + return storage.NewKeyExistsError(key, rv) + case etcdutil.IsEtcdTestFailed(err): + return storage.NewResourceVersionConflictsError(key, rv) + case etcdutil.IsEtcdUnreachable(err): + return storage.NewUnreachableError(key, rv) + default: + return err + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcd_helper_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcd_helper_test.go new file mode 100644 index 000000000000..a5e7f575383b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcd_helper_test.go @@ -0,0 +1,562 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "path" + "reflect" + "sync" + "testing" + "time" + + etcd "github.com/coreos/etcd/client" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer" + "k8s.io/kubernetes/pkg/storage" + "k8s.io/kubernetes/pkg/storage/etcd/etcdtest" + etcdtesting "k8s.io/kubernetes/pkg/storage/etcd/testing" + storagetesting "k8s.io/kubernetes/pkg/storage/testing" +) + +const validEtcdVersion = "etcd 2.0.9" + +func testScheme(t *testing.T) (*runtime.Scheme, runtime.Codec) { + scheme := runtime.NewScheme() + scheme.Log(t) + scheme.AddKnownTypes(*testapi.Default.GroupVersion(), &storagetesting.TestResource{}) + scheme.AddKnownTypes(testapi.Default.InternalGroupVersion(), &storagetesting.TestResource{}) + if err := scheme.AddConversionFuncs( + func(in *storagetesting.TestResource, out *storagetesting.TestResource, s conversion.Scope) error { + *out = *in + return nil + }, + func(in, out *time.Time, s conversion.Scope) error { + *out = *in + return nil + }, + ); err != nil { + panic(err) + } + codec := serializer.NewCodecFactory(scheme).LegacyCodec(*testapi.Default.GroupVersion()) + return scheme, codec +} + +func newEtcdHelper(client etcd.Client, codec runtime.Codec, prefix string) etcdHelper { + return *NewEtcdStorage(client, codec, prefix, false, etcdtest.DeserializationCacheSize).(*etcdHelper) +} + +// Returns an encoded version of api.Pod with the given name. +func getEncodedPod(name string) string { + pod, _ := runtime.Encode(testapi.Default.Codec(), &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: name}, + }) + return string(pod) +} + +func createObj(t *testing.T, helper etcdHelper, name string, obj, out runtime.Object, ttl uint64) error { + err := helper.Create(context.TODO(), name, obj, out, ttl) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + return err +} + +func createPodList(t *testing.T, helper etcdHelper, list *api.PodList) error { + for i := range list.Items { + returnedObj := &api.Pod{} + err := createObj(t, helper, list.Items[i].Name, &list.Items[i], returnedObj, 0) + if err != nil { + return err + } + list.Items[i] = *returnedObj + } + return nil +} + +func TestList(t *testing.T) { + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + key := etcdtest.AddPrefix("/some/key") + helper := newEtcdHelper(server.Client, testapi.Default.Codec(), key) + + list := api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{Name: "bar"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + { + ObjectMeta: api.ObjectMeta{Name: "baz"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + }, + } + + createPodList(t, helper, &list) + var got api.PodList + // TODO: a sorted filter function could be applied such implied + // ordering on the returned list doesn't matter. + err := helper.List(context.TODO(), key, "", storage.Everything, &got) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + + if e, a := list.Items, got.Items; !reflect.DeepEqual(e, a) { + t.Errorf("Expected %#v, got %#v", e, a) + } +} + +func TestListFiltered(t *testing.T) { + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + key := etcdtest.AddPrefix("/some/key") + helper := newEtcdHelper(server.Client, testapi.Default.Codec(), key) + + list := api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{Name: "bar"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + { + ObjectMeta: api.ObjectMeta{Name: "baz"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + }, + } + + createPodList(t, helper, &list) + filter := func(obj runtime.Object) bool { + pod := obj.(*api.Pod) + return pod.Name == "bar" + } + + var got api.PodList + err := helper.List(context.TODO(), key, "", filter, &got) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + // Check to make certain that the filter function only returns "bar" + if e, a := list.Items[0], got.Items[0]; !reflect.DeepEqual(e, a) { + t.Errorf("Expected %#v, got %#v", e, a) + } +} + +// TestListAcrossDirectories ensures that the client excludes directories and flattens tree-response - simulates cross-namespace query +func TestListAcrossDirectories(t *testing.T) { + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + rootkey := etcdtest.AddPrefix("/some/key") + key1 := etcdtest.AddPrefix("/some/key/directory1") + key2 := etcdtest.AddPrefix("/some/key/directory2") + + roothelper := newEtcdHelper(server.Client, testapi.Default.Codec(), rootkey) + helper1 := newEtcdHelper(server.Client, testapi.Default.Codec(), key1) + helper2 := newEtcdHelper(server.Client, testapi.Default.Codec(), key2) + + list := api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{Name: "baz"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + { + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + { + ObjectMeta: api.ObjectMeta{Name: "bar"}, + Spec: apitesting.DeepEqualSafePodSpec(), + }, + }, + } + + returnedObj := &api.Pod{} + // create the 1st 2 elements in one directory + createObj(t, helper1, list.Items[0].Name, &list.Items[0], returnedObj, 0) + list.Items[0] = *returnedObj + createObj(t, helper1, list.Items[1].Name, &list.Items[1], returnedObj, 0) + list.Items[1] = *returnedObj + // create the last element in the other directory + createObj(t, helper2, list.Items[2].Name, &list.Items[2], returnedObj, 0) + list.Items[2] = *returnedObj + + var got api.PodList + err := roothelper.List(context.TODO(), rootkey, "", storage.Everything, &got) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if e, a := list.Items, got.Items; !reflect.DeepEqual(e, a) { + t.Errorf("Expected %#v, got %#v", e, a) + } +} + +func TestGet(t *testing.T) { + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + key := etcdtest.AddPrefix("/some/key") + helper := newEtcdHelper(server.Client, testapi.Default.Codec(), key) + expect := api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: apitesting.DeepEqualSafePodSpec(), + } + var got api.Pod + if err := helper.Create(context.TODO(), key, &expect, &got, 0); err != nil { + t.Errorf("Unexpected error %#v", err) + } + expect = got + if err := helper.Get(context.TODO(), key, &got, false); err != nil { + t.Errorf("Unexpected error %#v", err) + } + if !reflect.DeepEqual(got, expect) { + t.Errorf("Wanted %#v, got %#v", expect, got) + } +} + +func TestGetNotFoundErr(t *testing.T) { + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + key := etcdtest.AddPrefix("/some/key") + boguskey := etcdtest.AddPrefix("/some/boguskey") + helper := newEtcdHelper(server.Client, testapi.Default.Codec(), key) + + var got api.Pod + err := helper.Get(context.TODO(), boguskey, &got, false) + if !storage.IsNotFound(err) { + t.Errorf("Unexpected reponse on key=%v, err=%v", key, err) + } +} + +func TestCreate(t *testing.T) { + obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + helper := newEtcdHelper(server.Client, testapi.Default.Codec(), etcdtest.PathPrefix()) + returnedObj := &api.Pod{} + err := helper.Create(context.TODO(), "/some/key", obj, returnedObj, 5) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + _, err = runtime.Encode(testapi.Default.Codec(), obj) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + err = helper.Get(context.TODO(), "/some/key", returnedObj, false) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + _, err = runtime.Encode(testapi.Default.Codec(), returnedObj) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + if obj.Name != returnedObj.Name { + t.Errorf("Wanted %v, got %v", obj.Name, returnedObj.Name) + } +} + +func TestCreateNilOutParam(t *testing.T) { + obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + helper := newEtcdHelper(server.Client, testapi.Default.Codec(), etcdtest.PathPrefix()) + err := helper.Create(context.TODO(), "/some/key", obj, nil, 5) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } +} + +func TestGuaranteedUpdate(t *testing.T) { + _, codec := testScheme(t) + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + key := etcdtest.AddPrefix("/some/key") + helper := newEtcdHelper(server.Client, codec, key) + + obj := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} + err := helper.GuaranteedUpdate(context.TODO(), key, &storagetesting.TestResource{}, true, nil, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + return obj, nil + })) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + + // Update an existing node. + callbackCalled := false + objUpdate := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 2} + err = helper.GuaranteedUpdate(context.TODO(), key, &storagetesting.TestResource{}, true, nil, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + callbackCalled = true + + if in.(*storagetesting.TestResource).Value != 1 { + t.Errorf("Callback input was not current set value") + } + + return objUpdate, nil + })) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + objCheck := &storagetesting.TestResource{} + err = helper.Get(context.TODO(), key, objCheck, false) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + if objCheck.Value != 2 { + t.Errorf("Value should have been 2 but got %v", objCheck.Value) + } + + if !callbackCalled { + t.Errorf("tryUpdate callback should have been called.") + } +} + +func TestGuaranteedUpdateNoChange(t *testing.T) { + _, codec := testScheme(t) + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + key := etcdtest.AddPrefix("/some/key") + helper := newEtcdHelper(server.Client, codec, key) + + obj := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} + err := helper.GuaranteedUpdate(context.TODO(), key, &storagetesting.TestResource{}, true, nil, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + return obj, nil + })) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + + // Update an existing node with the same data + callbackCalled := false + objUpdate := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} + err = helper.GuaranteedUpdate(context.TODO(), key, &storagetesting.TestResource{}, true, nil, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + callbackCalled = true + return objUpdate, nil + })) + if err != nil { + t.Fatalf("Unexpected error %#v", err) + } + if !callbackCalled { + t.Errorf("tryUpdate callback should have been called.") + } +} + +func TestGuaranteedUpdateKeyNotFound(t *testing.T) { + _, codec := testScheme(t) + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + key := etcdtest.AddPrefix("/some/key") + helper := newEtcdHelper(server.Client, codec, key) + + // Create a new node. + obj := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} + + f := storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + return obj, nil + }) + + ignoreNotFound := false + err := helper.GuaranteedUpdate(context.TODO(), key, &storagetesting.TestResource{}, ignoreNotFound, nil, f) + if err == nil { + t.Errorf("Expected error for key not found.") + } + + ignoreNotFound = true + err = helper.GuaranteedUpdate(context.TODO(), key, &storagetesting.TestResource{}, ignoreNotFound, nil, f) + if err != nil { + t.Errorf("Unexpected error %v.", err) + } +} + +func TestGuaranteedUpdate_CreateCollision(t *testing.T) { + _, codec := testScheme(t) + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + key := etcdtest.AddPrefix("/some/key") + helper := newEtcdHelper(server.Client, codec, etcdtest.PathPrefix()) + + const concurrency = 10 + var wgDone sync.WaitGroup + var wgForceCollision sync.WaitGroup + wgDone.Add(concurrency) + wgForceCollision.Add(concurrency) + + for i := 0; i < concurrency; i++ { + // Increment storagetesting.TestResource.Value by 1 + go func() { + defer wgDone.Done() + + firstCall := true + err := helper.GuaranteedUpdate(context.TODO(), key, &storagetesting.TestResource{}, true, nil, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + defer func() { firstCall = false }() + + if firstCall { + // Force collision by joining all concurrent GuaranteedUpdate operations here. + wgForceCollision.Done() + wgForceCollision.Wait() + } + + currValue := in.(*storagetesting.TestResource).Value + obj := &storagetesting.TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: currValue + 1} + return obj, nil + })) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + }() + } + wgDone.Wait() + + stored := &storagetesting.TestResource{} + err := helper.Get(context.TODO(), key, stored, false) + if err != nil { + t.Errorf("Unexpected error %#v", stored) + } + if stored.Value != concurrency { + t.Errorf("Some of the writes were lost. Stored value: %d", stored.Value) + } +} + +func TestGuaranteedUpdateUIDMismatch(t *testing.T) { + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + prefix := path.Join("/", etcdtest.PathPrefix()) + helper := newEtcdHelper(server.Client, testapi.Default.Codec(), prefix) + + obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", UID: "A"}} + podPtr := &api.Pod{} + err := helper.Create(context.TODO(), "/some/key", obj, podPtr, 0) + if err != nil { + t.Fatalf("Unexpected error %#v", err) + } + err = helper.GuaranteedUpdate(context.TODO(), "/some/key", podPtr, true, storage.NewUIDPreconditions("B"), storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + return obj, nil + })) + if !storage.IsTestFailed(err) { + t.Fatalf("Expect a Test Failed (write conflict) error, got: %v", err) + } +} + +func TestPrefixEtcdKey(t *testing.T) { + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + prefix := path.Join("/", etcdtest.PathPrefix()) + helper := newEtcdHelper(server.Client, testapi.Default.Codec(), prefix) + + baseKey := "/some/key" + + // Verify prefix is added + keyBefore := baseKey + keyAfter := helper.prefixEtcdKey(keyBefore) + + assert.Equal(t, keyAfter, path.Join(prefix, baseKey), "Prefix incorrectly added by EtcdHelper") + + // Verify prefix is not added + keyBefore = path.Join(prefix, baseKey) + keyAfter = helper.prefixEtcdKey(keyBefore) + + assert.Equal(t, keyBefore, keyAfter, "Prefix incorrectly added by EtcdHelper") +} + +func TestDeleteUIDMismatch(t *testing.T) { + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + prefix := path.Join("/", etcdtest.PathPrefix()) + helper := newEtcdHelper(server.Client, testapi.Default.Codec(), prefix) + + obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", UID: "A"}} + podPtr := &api.Pod{} + err := helper.Create(context.TODO(), "/some/key", obj, podPtr, 0) + if err != nil { + t.Fatalf("Unexpected error %#v", err) + } + err = helper.Delete(context.TODO(), "/some/key", obj, storage.NewUIDPreconditions("B")) + if !storage.IsTestFailed(err) { + t.Fatalf("Expect a Test Failed (write conflict) error, got: %v", err) + } +} + +type getFunc func(ctx context.Context, key string, opts *etcd.GetOptions) (*etcd.Response, error) + +type fakeDeleteKeysAPI struct { + etcd.KeysAPI + fakeGetFunc getFunc + getCount int + // The fakeGetFunc will be called fakeGetCap times before the KeysAPI's Get will be called. + fakeGetCap int +} + +func (f *fakeDeleteKeysAPI) Get(ctx context.Context, key string, opts *etcd.GetOptions) (*etcd.Response, error) { + f.getCount++ + if f.getCount < f.fakeGetCap { + return f.fakeGetFunc(ctx, key, opts) + } + return f.KeysAPI.Get(ctx, key, opts) +} + +// This is to emulate the case where another party updates the object when +// etcdHelper.Delete has verified the preconditions, but hasn't carried out the +// deletion yet. Etcd will fail the deletion and report the conflict. etcdHelper +// should retry until there is no conflict. +func TestDeleteWithRetry(t *testing.T) { + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + prefix := path.Join("/", etcdtest.PathPrefix()) + + obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", UID: "A"}} + // fakeGet returns a large ModifiedIndex to emulate the case that another + // party has updated the object. + fakeGet := func(ctx context.Context, key string, opts *etcd.GetOptions) (*etcd.Response, error) { + data, _ := runtime.Encode(testapi.Default.Codec(), obj) + return &etcd.Response{Node: &etcd.Node{Value: string(data), ModifiedIndex: 99}}, nil + } + expectedRetries := 3 + helper := newEtcdHelper(server.Client, testapi.Default.Codec(), prefix) + fake := &fakeDeleteKeysAPI{KeysAPI: helper.etcdKeysAPI, fakeGetCap: expectedRetries, fakeGetFunc: fakeGet} + helper.etcdKeysAPI = fake + + returnedObj := &api.Pod{} + err := helper.Create(context.TODO(), "/some/key", obj, returnedObj, 0) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + + err = helper.Delete(context.TODO(), "/some/key", obj, storage.NewUIDPreconditions("A")) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + if fake.getCount != expectedRetries { + t.Errorf("Expect %d retries, got %d", expectedRetries, fake.getCount) + } + err = helper.Get(context.TODO(), "/some/key", obj, false) + if !storage.IsNotFound(err) { + t.Errorf("Expect an NotFound error, got %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcd_watcher.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcd_watcher.go new file mode 100644 index 000000000000..c856b59ccf89 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcd_watcher.go @@ -0,0 +1,497 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "fmt" + "net/http" + "sync" + "sync/atomic" + "time" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + etcdutil "k8s.io/kubernetes/pkg/storage/etcd/util" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/watch" + + etcd "github.com/coreos/etcd/client" + "github.com/golang/glog" + "golang.org/x/net/context" +) + +// Etcd watch event actions +const ( + EtcdCreate = "create" + EtcdGet = "get" + EtcdSet = "set" + EtcdCAS = "compareAndSwap" + EtcdDelete = "delete" + EtcdCAD = "compareAndDelete" + EtcdExpire = "expire" +) + +// HighWaterMark is a thread-safe object for tracking the maximum value seen +// for some quantity. +type HighWaterMark int64 + +// Update returns true if and only if 'current' is the highest value ever seen. +func (hwm *HighWaterMark) Update(current int64) bool { + for { + old := atomic.LoadInt64((*int64)(hwm)) + if current <= old { + return false + } + if atomic.CompareAndSwapInt64((*int64)(hwm), old, current) { + return true + } + } +} + +// TransformFunc attempts to convert an object to another object for use with a watcher. +type TransformFunc func(runtime.Object) (runtime.Object, error) + +// includeFunc returns true if the given key should be considered part of a watch +type includeFunc func(key string) bool + +// exceptKey is an includeFunc that returns false when the provided key matches the watched key +func exceptKey(except string) includeFunc { + return func(key string) bool { + return key != except + } +} + +// etcdWatcher converts a native etcd watch to a watch.Interface. +type etcdWatcher struct { + encoding runtime.Codec + // Note that versioner is required for etcdWatcher to work correctly. + // There is no public constructor of it, so be careful when manipulating + // with it manually. + versioner storage.Versioner + transform TransformFunc + + list bool // If we're doing a recursive watch, should be true. + quorum bool // If we enable quorum, shoule be true + include includeFunc + filter storage.FilterFunc + + etcdIncoming chan *etcd.Response + etcdError chan error + ctx context.Context + cancel context.CancelFunc + etcdCallEnded chan struct{} + + outgoing chan watch.Event + userStop chan struct{} + stopped bool + stopLock sync.Mutex + // wg is used to avoid calls to etcd after Stop(), and to make sure + // that the translate goroutine is not leaked. + wg sync.WaitGroup + + // Injectable for testing. Send the event down the outgoing channel. + emit func(watch.Event) + + cache etcdCache +} + +// watchWaitDuration is the amount of time to wait for an error from watch. +const watchWaitDuration = 100 * time.Millisecond + +// newEtcdWatcher returns a new etcdWatcher; if list is true, watch sub-nodes. +// The versioner must be able to handle the objects that transform creates. +func newEtcdWatcher( + list bool, quorum bool, include includeFunc, filter storage.FilterFunc, + encoding runtime.Codec, versioner storage.Versioner, transform TransformFunc, + cache etcdCache) *etcdWatcher { + w := &etcdWatcher{ + encoding: encoding, + versioner: versioner, + transform: transform, + list: list, + quorum: quorum, + include: include, + filter: filter, + // Buffer this channel, so that the etcd client is not forced + // to context switch with every object it gets, and so that a + // long time spent decoding an object won't block the *next* + // object. Basically, we see a lot of "401 window exceeded" + // errors from etcd, and that's due to the client not streaming + // results but rather getting them one at a time. So we really + // want to never block the etcd client, if possible. The 100 is + // mostly arbitrary--we know it goes as high as 50, though. + // There's a V(2) log message that prints the length so we can + // monitor how much of this buffer is actually used. + etcdIncoming: make(chan *etcd.Response, 100), + etcdError: make(chan error, 1), + outgoing: make(chan watch.Event), + userStop: make(chan struct{}), + stopped: false, + wg: sync.WaitGroup{}, + cache: cache, + ctx: nil, + cancel: nil, + } + w.emit = func(e watch.Event) { + // Give up on user stop, without this we leak a lot of goroutines in tests. + select { + case w.outgoing <- e: + case <-w.userStop: + } + } + // translate will call done. We need to Add() here because otherwise, + // if Stop() gets called before translate gets started, there'd be a + // problem. + w.wg.Add(1) + go w.translate() + return w +} + +// etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called +// as a goroutine. +func (w *etcdWatcher) etcdWatch(ctx context.Context, client etcd.KeysAPI, key string, resourceVersion uint64) { + defer utilruntime.HandleCrash() + defer close(w.etcdError) + defer close(w.etcdIncoming) + + // All calls to etcd are coming from this function - once it is finished + // no other call to etcd should be generated by this watcher. + done := func() {} + + // We need to be prepared, that Stop() can be called at any time. + // It can potentially also be called, even before this function is called. + // If that is the case, we simply skip all the code here. + // See #18928 for more details. + var watcher etcd.Watcher + returned := func() bool { + w.stopLock.Lock() + defer w.stopLock.Unlock() + if w.stopped { + // Watcher has already been stopped - don't event initiate it here. + return true + } + w.wg.Add(1) + done = w.wg.Done + // Perform initialization of watcher under lock - we want to avoid situation when + // Stop() is called in the meantime (which in tests can cause etcd termination and + // strange behavior here). + if resourceVersion == 0 { + latest, err := etcdGetInitialWatchState(ctx, client, key, w.list, w.quorum, w.etcdIncoming) + if err != nil { + w.etcdError <- err + return true + } + resourceVersion = latest + } + + opts := etcd.WatcherOptions{ + Recursive: w.list, + AfterIndex: resourceVersion, + } + watcher = client.Watcher(key, &opts) + w.ctx, w.cancel = context.WithCancel(ctx) + return false + }() + defer done() + if returned { + return + } + + for { + resp, err := watcher.Next(w.ctx) + if err != nil { + w.etcdError <- err + return + } + w.etcdIncoming <- resp + } +} + +// etcdGetInitialWatchState turns an etcd Get request into a watch equivalent +func etcdGetInitialWatchState(ctx context.Context, client etcd.KeysAPI, key string, recursive bool, quorum bool, incoming chan<- *etcd.Response) (resourceVersion uint64, err error) { + opts := etcd.GetOptions{ + Recursive: recursive, + Sort: false, + Quorum: quorum, + } + resp, err := client.Get(ctx, key, &opts) + if err != nil { + if !etcdutil.IsEtcdNotFound(err) { + utilruntime.HandleError(fmt.Errorf("watch was unable to retrieve the current index for the provided key (%q): %v", key, err)) + return resourceVersion, toStorageErr(err, key, 0) + } + if etcdError, ok := err.(etcd.Error); ok { + resourceVersion = etcdError.Index + } + return resourceVersion, nil + } + resourceVersion = resp.Index + convertRecursiveResponse(resp.Node, resp, incoming) + return +} + +// convertRecursiveResponse turns a recursive get response from etcd into individual response objects +// by copying the original response. This emulates the behavior of a recursive watch. +func convertRecursiveResponse(node *etcd.Node, response *etcd.Response, incoming chan<- *etcd.Response) { + if node.Dir { + for i := range node.Nodes { + convertRecursiveResponse(node.Nodes[i], response, incoming) + } + return + } + copied := *response + copied.Action = "get" + copied.Node = node + incoming <- &copied +} + +var ( + watchChannelHWM HighWaterMark +) + +// translate pulls stuff from etcd, converts, and pushes out the outgoing channel. Meant to be +// called as a goroutine. +func (w *etcdWatcher) translate() { + defer w.wg.Done() + defer close(w.outgoing) + defer utilruntime.HandleCrash() + + for { + select { + case err := <-w.etcdError: + if err != nil { + var status *unversioned.Status + switch { + case etcdutil.IsEtcdWatchExpired(err): + status = &unversioned.Status{ + Status: unversioned.StatusFailure, + Message: err.Error(), + Code: http.StatusGone, // Gone + Reason: unversioned.StatusReasonExpired, + } + // TODO: need to generate errors using api/errors which has a circular dependency on this package + // no other way to inject errors + // case etcdutil.IsEtcdUnreachable(err): + // status = errors.NewServerTimeout(...) + default: + status = &unversioned.Status{ + Status: unversioned.StatusFailure, + Message: err.Error(), + Code: http.StatusInternalServerError, + Reason: unversioned.StatusReasonInternalError, + } + } + w.emit(watch.Event{ + Type: watch.Error, + Object: status, + }) + } + return + case <-w.userStop: + return + case res, ok := <-w.etcdIncoming: + if ok { + if curLen := int64(len(w.etcdIncoming)); watchChannelHWM.Update(curLen) { + // Monitor if this gets backed up, and how much. + glog.V(2).Infof("watch: %v objects queued in channel.", curLen) + } + w.sendResult(res) + } + // If !ok, don't return here-- must wait for etcdError channel + // to give an error or be closed. + } + } +} + +func (w *etcdWatcher) decodeObject(node *etcd.Node) (runtime.Object, error) { + if obj, found := w.cache.getFromCache(node.ModifiedIndex, storage.Everything); found { + return obj, nil + } + + obj, err := runtime.Decode(w.encoding, []byte(node.Value)) + if err != nil { + return nil, err + } + + // ensure resource version is set on the object we load from etcd + if err := w.versioner.UpdateObject(obj, node.ModifiedIndex); err != nil { + utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", node.ModifiedIndex, obj, err)) + } + + // perform any necessary transformation + if w.transform != nil { + obj, err = w.transform(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failure to transform api object %#v: %v", obj, err)) + return nil, err + } + } + + if node.ModifiedIndex != 0 { + w.cache.addToCache(node.ModifiedIndex, obj) + } + return obj, nil +} + +func (w *etcdWatcher) sendAdd(res *etcd.Response) { + if res.Node == nil { + utilruntime.HandleError(fmt.Errorf("unexpected nil node: %#v", res)) + return + } + if w.include != nil && !w.include(res.Node.Key) { + return + } + obj, err := w.decodeObject(res.Node) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failure to decode api object: %v\n'%v' from %#v %#v", err, string(res.Node.Value), res, res.Node)) + // TODO: expose an error through watch.Interface? + // Ignore this value. If we stop the watch on a bad value, a client that uses + // the resourceVersion to resume will never be able to get past a bad value. + return + } + if !w.filter(obj) { + return + } + action := watch.Added + if res.Node.ModifiedIndex != res.Node.CreatedIndex { + action = watch.Modified + } + w.emit(watch.Event{ + Type: action, + Object: obj, + }) +} + +func (w *etcdWatcher) sendModify(res *etcd.Response) { + if res.Node == nil { + glog.Errorf("unexpected nil node: %#v", res) + return + } + if w.include != nil && !w.include(res.Node.Key) { + return + } + curObj, err := w.decodeObject(res.Node) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failure to decode api object: %v\n'%v' from %#v %#v", err, string(res.Node.Value), res, res.Node)) + // TODO: expose an error through watch.Interface? + // Ignore this value. If we stop the watch on a bad value, a client that uses + // the resourceVersion to resume will never be able to get past a bad value. + return + } + curObjPasses := w.filter(curObj) + oldObjPasses := false + var oldObj runtime.Object + if res.PrevNode != nil && res.PrevNode.Value != "" { + // Ignore problems reading the old object. + if oldObj, err = w.decodeObject(res.PrevNode); err == nil { + if err := w.versioner.UpdateObject(oldObj, res.Node.ModifiedIndex); err != nil { + utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", res.Node.ModifiedIndex, oldObj, err)) + } + oldObjPasses = w.filter(oldObj) + } + } + // Some changes to an object may cause it to start or stop matching a filter. + // We need to report those as adds/deletes. So we have to check both the previous + // and current value of the object. + switch { + case curObjPasses && oldObjPasses: + w.emit(watch.Event{ + Type: watch.Modified, + Object: curObj, + }) + case curObjPasses && !oldObjPasses: + w.emit(watch.Event{ + Type: watch.Added, + Object: curObj, + }) + case !curObjPasses && oldObjPasses: + w.emit(watch.Event{ + Type: watch.Deleted, + Object: oldObj, + }) + } + // Do nothing if neither new nor old object passed the filter. +} + +func (w *etcdWatcher) sendDelete(res *etcd.Response) { + if res.PrevNode == nil { + utilruntime.HandleError(fmt.Errorf("unexpected nil prev node: %#v", res)) + return + } + if w.include != nil && !w.include(res.PrevNode.Key) { + return + } + node := *res.PrevNode + if res.Node != nil { + // Note that this sends the *old* object with the etcd index for the time at + // which it gets deleted. This will allow users to restart the watch at the right + // index. + node.ModifiedIndex = res.Node.ModifiedIndex + } + obj, err := w.decodeObject(&node) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failure to decode api object: %v\nfrom %#v %#v", err, res, res.Node)) + // TODO: expose an error through watch.Interface? + // Ignore this value. If we stop the watch on a bad value, a client that uses + // the resourceVersion to resume will never be able to get past a bad value. + return + } + if !w.filter(obj) { + return + } + w.emit(watch.Event{ + Type: watch.Deleted, + Object: obj, + }) +} + +func (w *etcdWatcher) sendResult(res *etcd.Response) { + switch res.Action { + case EtcdCreate, EtcdGet: + w.sendAdd(res) + case EtcdSet, EtcdCAS: + w.sendModify(res) + case EtcdDelete, EtcdExpire, EtcdCAD: + w.sendDelete(res) + default: + utilruntime.HandleError(fmt.Errorf("unknown action: %v", res.Action)) + } +} + +// ResultChan implements watch.Interface. +func (w *etcdWatcher) ResultChan() <-chan watch.Event { + return w.outgoing +} + +// Stop implements watch.Interface. +func (w *etcdWatcher) Stop() { + w.stopLock.Lock() + if w.cancel != nil { + w.cancel() + w.cancel = nil + } + if !w.stopped { + w.stopped = true + close(w.userStop) + } + w.stopLock.Unlock() + + // Wait until all calls to etcd are finished and no other + // will be issued. + w.wg.Wait() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcd_watcher_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcd_watcher_test.go new file mode 100644 index 000000000000..c1aa0f11cba6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcd_watcher_test.go @@ -0,0 +1,580 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "math/rand" + rt "runtime" + "sync" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + "k8s.io/kubernetes/pkg/storage/etcd/etcdtest" + etcdtesting "k8s.io/kubernetes/pkg/storage/etcd/testing" + "k8s.io/kubernetes/pkg/watch" + + etcd "github.com/coreos/etcd/client" + "golang.org/x/net/context" +) + +var versioner = APIObjectVersioner{} + +// Implements etcdCache interface as empty methods (i.e. does not cache any objects) +type fakeEtcdCache struct{} + +func (f *fakeEtcdCache) getFromCache(index uint64, filter storage.FilterFunc) (runtime.Object, bool) { + return nil, false +} + +func (f *fakeEtcdCache) addToCache(index uint64, obj runtime.Object) { +} + +var _ etcdCache = &fakeEtcdCache{} + +func TestWatchInterpretations(t *testing.T) { + codec := testapi.Default.Codec() + // Declare some pods to make the test cases compact. + podFoo := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + podBar := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}} + podBaz := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "baz"}} + firstLetterIsB := func(obj runtime.Object) bool { + return obj.(*api.Pod).Name[0] == 'b' + } + + // All of these test cases will be run with the firstLetterIsB FilterFunc. + table := map[string]struct { + actions []string // Run this test item for every action here. + prevNodeValue string + nodeValue string + expectEmit bool + expectType watch.EventType + expectObject runtime.Object + }{ + "create": { + actions: []string{"create", "get"}, + nodeValue: runtime.EncodeOrDie(codec, podBar), + expectEmit: true, + expectType: watch.Added, + expectObject: podBar, + }, + "create but filter blocks": { + actions: []string{"create", "get"}, + nodeValue: runtime.EncodeOrDie(codec, podFoo), + expectEmit: false, + }, + "delete": { + actions: []string{"delete"}, + prevNodeValue: runtime.EncodeOrDie(codec, podBar), + expectEmit: true, + expectType: watch.Deleted, + expectObject: podBar, + }, + "delete but filter blocks": { + actions: []string{"delete"}, + nodeValue: runtime.EncodeOrDie(codec, podFoo), + expectEmit: false, + }, + "modify appears to create 1": { + actions: []string{"set", "compareAndSwap"}, + nodeValue: runtime.EncodeOrDie(codec, podBar), + expectEmit: true, + expectType: watch.Added, + expectObject: podBar, + }, + "modify appears to create 2": { + actions: []string{"set", "compareAndSwap"}, + prevNodeValue: runtime.EncodeOrDie(codec, podFoo), + nodeValue: runtime.EncodeOrDie(codec, podBar), + expectEmit: true, + expectType: watch.Added, + expectObject: podBar, + }, + "modify appears to delete": { + actions: []string{"set", "compareAndSwap"}, + prevNodeValue: runtime.EncodeOrDie(codec, podBar), + nodeValue: runtime.EncodeOrDie(codec, podFoo), + expectEmit: true, + expectType: watch.Deleted, + expectObject: podBar, // Should return last state that passed the filter! + }, + "modify modifies": { + actions: []string{"set", "compareAndSwap"}, + prevNodeValue: runtime.EncodeOrDie(codec, podBar), + nodeValue: runtime.EncodeOrDie(codec, podBaz), + expectEmit: true, + expectType: watch.Modified, + expectObject: podBaz, + }, + "modify ignores": { + actions: []string{"set", "compareAndSwap"}, + nodeValue: runtime.EncodeOrDie(codec, podFoo), + expectEmit: false, + }, + } + + for name, item := range table { + for _, action := range item.actions { + w := newEtcdWatcher(true, false, nil, firstLetterIsB, codec, versioner, nil, &fakeEtcdCache{}) + emitCalled := false + w.emit = func(event watch.Event) { + emitCalled = true + if !item.expectEmit { + return + } + if e, a := item.expectType, event.Type; e != a { + t.Errorf("'%v - %v': expected %v, got %v", name, action, e, a) + } + if e, a := item.expectObject, event.Object; !api.Semantic.DeepDerivative(e, a) { + t.Errorf("'%v - %v': expected %v, got %v", name, action, e, a) + } + } + + var n, pn *etcd.Node + if item.nodeValue != "" { + n = &etcd.Node{Value: item.nodeValue} + } + if item.prevNodeValue != "" { + pn = &etcd.Node{Value: item.prevNodeValue} + } + + w.sendResult(&etcd.Response{ + Action: action, + Node: n, + PrevNode: pn, + }) + + if e, a := item.expectEmit, emitCalled; e != a { + t.Errorf("'%v - %v': expected %v, got %v", name, action, e, a) + } + w.Stop() + } + } +} + +func TestWatchInterpretation_ResponseNotSet(t *testing.T) { + _, codec := testScheme(t) + w := newEtcdWatcher(false, false, nil, storage.Everything, codec, versioner, nil, &fakeEtcdCache{}) + w.emit = func(e watch.Event) { + t.Errorf("Unexpected emit: %v", e) + } + + w.sendResult(&etcd.Response{ + Action: "update", + }) + w.Stop() +} + +func TestWatchInterpretation_ResponseNoNode(t *testing.T) { + _, codec := testScheme(t) + actions := []string{"create", "set", "compareAndSwap", "delete"} + for _, action := range actions { + w := newEtcdWatcher(false, false, nil, storage.Everything, codec, versioner, nil, &fakeEtcdCache{}) + w.emit = func(e watch.Event) { + t.Errorf("Unexpected emit: %v", e) + } + w.sendResult(&etcd.Response{ + Action: action, + }) + w.Stop() + } +} + +func TestWatchInterpretation_ResponseBadData(t *testing.T) { + _, codec := testScheme(t) + actions := []string{"create", "set", "compareAndSwap", "delete"} + for _, action := range actions { + w := newEtcdWatcher(false, false, nil, storage.Everything, codec, versioner, nil, &fakeEtcdCache{}) + w.emit = func(e watch.Event) { + t.Errorf("Unexpected emit: %v", e) + } + w.sendResult(&etcd.Response{ + Action: action, + Node: &etcd.Node{ + Value: "foobar", + }, + }) + w.sendResult(&etcd.Response{ + Action: action, + PrevNode: &etcd.Node{ + Value: "foobar", + }, + }) + w.Stop() + } +} + +func TestSendResultDeleteEventHaveLatestIndex(t *testing.T) { + codec := testapi.Default.Codec() + filter := func(obj runtime.Object) bool { + return obj.(*api.Pod).Name != "bar" + } + w := newEtcdWatcher(false, false, nil, filter, codec, versioner, nil, &fakeEtcdCache{}) + + eventChan := make(chan watch.Event, 1) + w.emit = func(e watch.Event) { + eventChan <- e + } + + fooPod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + barPod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}} + fooBytes, err := runtime.Encode(codec, fooPod) + if err != nil { + t.Fatalf("Encode failed: %v", err) + } + barBytes, err := runtime.Encode(codec, barPod) + if err != nil { + t.Fatalf("Encode failed: %v", err) + } + + tests := []struct { + response *etcd.Response + expRV string + }{{ // Delete event + response: &etcd.Response{ + Action: EtcdDelete, + Node: &etcd.Node{ + ModifiedIndex: 2, + }, + PrevNode: &etcd.Node{ + Value: string(fooBytes), + ModifiedIndex: 1, + }, + }, + expRV: "2", + }, { // Modify event with uninterested data + response: &etcd.Response{ + Action: EtcdSet, + Node: &etcd.Node{ + Value: string(barBytes), + ModifiedIndex: 2, + }, + PrevNode: &etcd.Node{ + Value: string(fooBytes), + ModifiedIndex: 1, + }, + }, + expRV: "2", + }} + + for i, tt := range tests { + w.sendResult(tt.response) + ev := <-eventChan + if ev.Type != watch.Deleted { + t.Errorf("#%d: event type want=Deleted, get=%s", i, ev.Type) + return + } + rv := ev.Object.(*api.Pod).ResourceVersion + if rv != tt.expRV { + t.Errorf("#%d: resource version want=%s, get=%s", i, tt.expRV, rv) + } + } + w.Stop() +} + +func TestWatch(t *testing.T) { + codec := testapi.Default.Codec() + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + key := "/some/key" + h := newEtcdHelper(server.Client, codec, etcdtest.PathPrefix()) + + watching, err := h.Watch(context.TODO(), key, "0", storage.Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + // watching is explicitly closed below. + + // Test normal case + pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + returnObj := &api.Pod{} + err = h.Create(context.TODO(), key, pod, returnObj, 0) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + event := <-watching.ResultChan() + if e, a := watch.Added, event.Type; e != a { + t.Errorf("Expected %v, got %v", e, a) + } + if e, a := pod, event.Object; !api.Semantic.DeepDerivative(e, a) { + t.Errorf("Expected %v, got %v", e, a) + } + + watching.Stop() + + // There is a race in etcdWatcher so that after calling Stop() one of + // two things can happen: + // - ResultChan() may be closed (triggered by closing userStop channel) + // - an Error "context cancelled" may be emitted (triggered by cancelling request + // to etcd and putting that error to etcdError channel) + // We need to be prepared for both here. + event, open := <-watching.ResultChan() + if open && event.Type != watch.Error { + t.Errorf("Unexpected event from stopped watcher: %#v", event) + } +} + +func emptySubsets() []api.EndpointSubset { + return []api.EndpointSubset{} +} + +func makeSubsets(ip string, port int) []api.EndpointSubset { + return []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: ip}}, + Ports: []api.EndpointPort{{Port: int32(port)}}, + }} +} + +func TestWatchEtcdState(t *testing.T) { + codec := testapi.Default.Codec() + key := etcdtest.AddPrefix("/somekey/foo") + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + + h := newEtcdHelper(server.Client, codec, etcdtest.PathPrefix()) + watching, err := h.Watch(context.TODO(), key, "0", storage.Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer watching.Stop() + + endpoint := &api.Endpoints{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Subsets: emptySubsets(), + } + + err = h.Create(context.TODO(), key, endpoint, endpoint, 0) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + event := <-watching.ResultChan() + if event.Type != watch.Added { + t.Errorf("Unexpected event %#v", event) + } + + subset := makeSubsets("127.0.0.1", 9000) + endpoint.Subsets = subset + endpoint.ResourceVersion = "" + + // CAS the previous value + updateFn := func(input runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { + newObj, err := api.Scheme.DeepCopy(endpoint) + if err != nil { + t.Errorf("unexpected error: %v", err) + return nil, nil, err + } + return newObj.(*api.Endpoints), nil, nil + } + err = h.GuaranteedUpdate(context.TODO(), key, &api.Endpoints{}, false, nil, updateFn) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + event = <-watching.ResultChan() + if event.Type != watch.Modified { + t.Errorf("Unexpected event %#v", event) + } + + if e, a := endpoint, event.Object; !api.Semantic.DeepDerivative(e, a) { + t.Errorf("Unexpected error: expected %#v, got %#v", e, a) + } +} + +func TestWatchFromZeroIndex(t *testing.T) { + codec := testapi.Default.Codec() + pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + + key := etcdtest.AddPrefix("/somekey/foo") + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + + h := newEtcdHelper(server.Client, codec, etcdtest.PathPrefix()) + + // set before the watch and verify events + err := h.Create(context.TODO(), key, pod, pod, 0) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + pod.ResourceVersion = "" + + // check for concatenation on watch event with CAS + updateFn := func(input runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { + pod := input.(*api.Pod) + pod.Name = "bar" + return pod, nil, nil + } + err = h.GuaranteedUpdate(context.TODO(), key, &api.Pod{}, false, nil, updateFn) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + watching, err := h.Watch(context.TODO(), key, "0", storage.Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer watching.Stop() + + // marked as modified b/c of concatenation + event := <-watching.ResultChan() + if event.Type != watch.Modified { + t.Errorf("Unexpected event %#v", event) + } + + pod.Name = "baz" + updateFn = func(input runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { + pod := input.(*api.Pod) + pod.Name = "baz" + return pod, nil, nil + } + err = h.GuaranteedUpdate(context.TODO(), key, &api.Pod{}, false, nil, updateFn) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + event = <-watching.ResultChan() + if event.Type != watch.Modified { + t.Errorf("Unexpected event %#v", event) + } + + if e, a := pod, event.Object; !api.Semantic.DeepDerivative(e, a) { + t.Errorf("Unexpected error: expected %#v, got %#v", e, a) + } +} + +func TestWatchListFromZeroIndex(t *testing.T) { + codec := testapi.Default.Codec() + key := etcdtest.AddPrefix("/some/key") + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + h := newEtcdHelper(server.Client, codec, key) + + watching, err := h.WatchList(context.TODO(), key, "0", storage.Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer watching.Stop() + + // creates key/foo which should trigger the WatchList for "key" + pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + err = h.Create(context.TODO(), pod.Name, pod, pod, 0) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + event, _ := <-watching.ResultChan() + if event.Type != watch.Added { + t.Errorf("Unexpected event %#v", event) + } + + if e, a := pod, event.Object; !api.Semantic.DeepDerivative(e, a) { + t.Errorf("Unexpected error: expected %v, got %v", e, a) + } +} + +func TestWatchListIgnoresRootKey(t *testing.T) { + codec := testapi.Default.Codec() + pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + key := etcdtest.AddPrefix("/some/key") + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + h := newEtcdHelper(server.Client, codec, key) + + watching, err := h.WatchList(context.TODO(), key, "0", storage.Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer watching.Stop() + + // creates key/foo which should trigger the WatchList for "key" + err = h.Create(context.TODO(), key, pod, pod, 0) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // force context switch to ensure watches would catch and notify. + rt.Gosched() + + select { + case event, _ := <-watching.ResultChan(): + t.Fatalf("Unexpected event: %#v", event) + default: + // fall through, expected behavior + } +} + +func TestWatchPurposefulShutdown(t *testing.T) { + _, codec := testScheme(t) + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + key := "/some/key" + h := newEtcdHelper(server.Client, codec, etcdtest.PathPrefix()) + + // Test purposeful shutdown + watching, err := h.Watch(context.TODO(), key, "0", storage.Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + watching.Stop() + rt.Gosched() + + // There is a race in etcdWatcher so that after calling Stop() one of + // two things can happen: + // - ResultChan() may be closed (triggered by closing userStop channel) + // - an Error "context cancelled" may be emitted (triggered by cancelling request + // to etcd and putting that error to etcdError channel) + // We need to be prepared for both here. + event, open := <-watching.ResultChan() + if open && event.Type != watch.Error { + t.Errorf("Unexpected event from stopped watcher: %#v", event) + } +} + +func TestHighWaterMark(t *testing.T) { + var h HighWaterMark + + for i := int64(10); i < 20; i++ { + if !h.Update(i) { + t.Errorf("unexpected false for %v", i) + } + if h.Update(i - 1) { + t.Errorf("unexpected true for %v", i-1) + } + } + + m := int64(0) + wg := sync.WaitGroup{} + for i := 0; i < 300; i++ { + wg.Add(1) + v := rand.Int63() + go func(v int64) { + defer wg.Done() + h.Update(v) + }(v) + if v > m { + m = v + } + } + wg.Wait() + if m != int64(h) { + t.Errorf("unexpected value, wanted %v, got %v", m, int64(h)) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcdtest/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcdtest/doc.go new file mode 100644 index 000000000000..ef9e6ce8607b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcdtest/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcdtest diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcdtest/etcdtest.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcdtest/etcdtest.go new file mode 100644 index 000000000000..d248eedb27ff --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/etcdtest/etcdtest.go @@ -0,0 +1,39 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcdtest + +import ( + "os" + "path" +) + +// Cache size to use for tests. +const DeserializationCacheSize = 150 + +// Returns the prefix set via the ETCD_PREFIX environment variable (if any). +func PathPrefix() string { + pref := os.Getenv("ETCD_PREFIX") + if pref == "" { + pref = "registry" + } + return path.Join("/", pref) +} + +// Adds the ETCD_PREFIX to the provided key +func AddPrefix(in string) string { + return path.Join(PathPrefix(), in) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/metrics/metrics.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/metrics/metrics.go new file mode 100644 index 000000000000..7e88e43c2e6f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/metrics/metrics.go @@ -0,0 +1,113 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + cacheHitCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "etcd_helper_cache_hit_count", + Help: "Counter of etcd helper cache hits.", + }, + ) + cacheMissCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "etcd_helper_cache_miss_count", + Help: "Counter of etcd helper cache miss.", + }, + ) + cacheEntryCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "etcd_helper_cache_entry_count", + Help: "Counter of etcd helper cache entries. This can be different from etcd_helper_cache_miss_count " + + "because two concurrent threads can miss the cache and generate the same entry twice.", + }, + ) + cacheGetLatency = prometheus.NewSummary( + prometheus.SummaryOpts{ + Name: "etcd_request_cache_get_latencies_summary", + Help: "Latency in microseconds of getting an object from etcd cache", + }, + ) + cacheAddLatency = prometheus.NewSummary( + prometheus.SummaryOpts{ + Name: "etcd_request_cache_add_latencies_summary", + Help: "Latency in microseconds of adding an object to etcd cache", + }, + ) + etcdRequestLatenciesSummary = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "etcd_request_latencies_summary", + Help: "Etcd request latency summary in microseconds for each operation and object type.", + }, + []string{"operation", "type"}, + ) +) + +var registerMetrics sync.Once + +// Register all metrics. +func Register() { + // Register the metrics. + registerMetrics.Do(func() { + prometheus.MustRegister(cacheHitCounter) + prometheus.MustRegister(cacheMissCounter) + prometheus.MustRegister(cacheEntryCounter) + prometheus.MustRegister(cacheAddLatency) + prometheus.MustRegister(cacheGetLatency) + prometheus.MustRegister(etcdRequestLatenciesSummary) + }) +} + +func RecordEtcdRequestLatency(verb, resource string, startTime time.Time) { + etcdRequestLatenciesSummary.WithLabelValues(verb, resource).Observe(float64(time.Since(startTime) / time.Microsecond)) +} + +func ObserveGetCache(startTime time.Time) { + cacheGetLatency.Observe(float64(time.Since(startTime) / time.Microsecond)) +} + +func ObserveAddCache(startTime time.Time) { + cacheAddLatency.Observe(float64(time.Since(startTime) / time.Microsecond)) +} + +func ObserveCacheHit() { + cacheHitCounter.Inc() +} + +func ObserveCacheMiss() { + cacheMissCounter.Inc() +} + +func ObserveNewEntry() { + cacheEntryCounter.Inc() +} + +func Reset() { + cacheHitCounter.Set(0) + cacheMissCounter.Set(0) + cacheEntryCounter.Set(0) + // TODO: Reset cacheAddLatency. + // TODO: Reset cacheGetLatency. + etcdRequestLatenciesSummary.Reset() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/testing/certificates.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/testing/certificates.go new file mode 100644 index 000000000000..c3fea5ffc130 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/testing/certificates.go @@ -0,0 +1,113 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +// You can use cfssl tool to generate certificates, please refer +// https://github.com/coreos/etcd/tree/master/hack/tls-setup for more details. +// +// ca-config.json: +// expiry was changed from 1 year to 100 years (876000h) +// ca-csr.json: +// ca expiry was set to 100 years (876000h) ("ca":{"expiry":"876000h"}) +// key was changed from ecdsa,384 to rsa,2048 +// req-csr.json: +// key was changed from ecdsa,384 to rsa,2048 +// hosts were changed to "localhost","127.0.0.1" +const CAFileContent = ` +-----BEGIN CERTIFICATE----- +MIIEUDCCAzigAwIBAgIUKfV5+qwlw3JneAPdJS7JCO8xIlYwDQYJKoZIhvcNAQEL +BQAwgawxCzAJBgNVBAYTAlVTMSowKAYDVQQKEyFIb25lc3QgQWNobWVkJ3MgVXNl +ZCBDZXJ0aWZpY2F0ZXMxKTAnBgNVBAsTIEhhc3RpbHktR2VuZXJhdGVkIFZhbHVl +cyBEaXZpc29uMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRMwEQYDVQQIEwpDYWxp +Zm9ybmlhMRkwFwYDVQQDExBBdXRvZ2VuZXJhdGVkIENBMCAXDTE2MDMxMjIzMTQw +MFoYDzIxMTYwMjE3MjMxNDAwWjCBrDELMAkGA1UEBhMCVVMxKjAoBgNVBAoTIUhv +bmVzdCBBY2htZWQncyBVc2VkIENlcnRpZmljYXRlczEpMCcGA1UECxMgSGFzdGls +eS1HZW5lcmF0ZWQgVmFsdWVzIERpdmlzb24xFjAUBgNVBAcTDVNhbiBGcmFuY2lz +Y28xEzARBgNVBAgTCkNhbGlmb3JuaWExGTAXBgNVBAMTEEF1dG9nZW5lcmF0ZWQg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDP+acpr1USrObZFu+6 +v+Bk6rYw+sWynP373cNUUiHfnZ3D7f9yJsDscV0Mo4R8DddqkxawrA5fK2Fm2Z9G +vvY5par4/JbwRIEkXmeM4e52Mqv0Yuoz62O+0jQvRawnCCJMcKuo+ijHMjmm0AF1 +JdhTpTgvUwEP9WtY9JVTkfMCnDqZiqOU5D+d4YWUtkKqgQNvbZRs6wGubhMCZe8X +m+3bK8YAsWWtoFgr7plxXk4D8MLh+PqJ3oJjfxfW5A9dHbnSEmdZ3vrYwrKgyfNf +bvHE5qQmiSZUbUaCw3mKfaEMCNesPT46nBHxhAWc5aiL1tOXzvV5Uze7A7huPoI9 +a3etAgMBAAGjZjBkMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEC +MB0GA1UdDgQWBBQYc0xXQ6VNjFvIOqWfXorxx9rKRzAfBgNVHSMEGDAWgBQYc0xX +Q6VNjFvIOqWfXorxx9rKRzANBgkqhkiG9w0BAQsFAAOCAQEAaKyHDWYVjEyEKTXJ +qS9r46ehL5FZlWD2ZytBP8aHE307l9AfQ+DFWldCNaqMXLZozsresVaSzSOI6UUD +lCIQLDpPyxbpR320u8mC08+lhhwR/YRkrEqKHk56Wl4OaqoyWmguqYU9p0DiQeTU +sZsxOwG7cyEEvvs+XmZ/vBLBOr59xyjwn4seQqzwZj3VYeiKLw40iQt1yT442rcP +CfdlE9wTEONvWT+kBGMt0JlalXH3jFvlfcGQdDfRmDeTJtA+uIbvJhwJuGCNHHAc +xqC+4mAGBPN/dMPXpjayHD5dOXIKLfrNpqse6jImYlY9zduvwIHRDK/zvqTyPlNZ +uR84Nw== +-----END CERTIFICATE----- +` +const CertFileContent = ` +-----BEGIN CERTIFICATE----- +MIIELzCCAxegAwIBAgIUcjkJA3cmHeoBQggaKZmfKebFL9cwDQYJKoZIhvcNAQEL +BQAwgawxCzAJBgNVBAYTAlVTMSowKAYDVQQKEyFIb25lc3QgQWNobWVkJ3MgVXNl +ZCBDZXJ0aWZpY2F0ZXMxKTAnBgNVBAsTIEhhc3RpbHktR2VuZXJhdGVkIFZhbHVl +cyBEaXZpc29uMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRMwEQYDVQQIEwpDYWxp +Zm9ybmlhMRkwFwYDVQQDExBBdXRvZ2VuZXJhdGVkIENBMCAXDTE2MDMxMjIzMTQw +MFoYDzIxMTYwMjE3MjMxNDAwWjBVMRYwFAYDVQQKEw1hdXRvZ2VuZXJhdGVkMRUw +EwYDVQQLEwxldGNkIGNsdXN0ZXIxFTATBgNVBAcTDHRoZSBpbnRlcm5ldDENMAsG +A1UEAxMEZXRjZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOiW5A65 +hWGbnwceoZHM0+OexU4cPF/FpP+7BOK5i7ymSWAqfKfNuio2TB1lAErC1oX7bgTX +ieP10uz3FYWQNrlDn0I4KSA888rFPtx8GwoxH/52fGlE80BUV9PNeOVP+mYza0ih +oFj2+PhXVL/JZbx9P/2RLSNbEnq+OPk8AN82SkNtpFzanwtpb3f+kt73878KNoQu +xYZaCF1sK45Kn7mjKSDu/b3xUbTrNwnyVAGOdLzI7CCWOu+ECoZYAH4ZNHHakbyY +eWQ7U9leocEOPlqxsQAKodaCYjuAaOFIcz8/W81q+3qNw/6GbZ4znjRKQ3OtIPZ4 +JH1iNofCudWDp+0CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYw +FAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFMJE +43qLCWhyZAE/wxNneSJw7aUVMB8GA1UdIwQYMBaAFBhzTFdDpU2MW8g6pZ9eivHH +2spHMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOC +AQEAuELC8tbmpyKlA4HLSDHOUquypNyiE6ftBIifJtp8bvBd+jiv4Pr8oVGxHoqq +48X7lamvDirLV5gmK0CxO+EXkIUHhULzPyYPynqsR7KZlk1PWghqsF65nwqcjS3b +tykLttD1AUDIozYvujVYBKXGxb6jcGM1rBF1XtslciFZ5qQnj6dTUujo9/xBA2ql +kOKiVXBNU8KFzq4c20RzHFLfWkbc30Q4XG4dTDVBeGupnFQRkZ0y2dSSU82QcLA/ +HgAyQSO7+csN13r84zbmDuRpUgo6eTXzJ+77G19KDkEL7XEtlw2jB2L6/o+3RGtw +JLOpEsgi7hsvOYCuTA3Krw52Mw== +-----END CERTIFICATE----- +` +const KeyFileContent = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA6JbkDrmFYZufBx6hkczT457FThw8X8Wk/7sE4rmLvKZJYCp8 +p826KjZMHWUASsLWhftuBNeJ4/XS7PcVhZA2uUOfQjgpIDzzysU+3HwbCjEf/nZ8 +aUTzQFRX08145U/6ZjNrSKGgWPb4+FdUv8llvH0//ZEtI1sSer44+TwA3zZKQ22k +XNqfC2lvd/6S3vfzvwo2hC7FhloIXWwrjkqfuaMpIO79vfFRtOs3CfJUAY50vMjs +IJY674QKhlgAfhk0cdqRvJh5ZDtT2V6hwQ4+WrGxAAqh1oJiO4Bo4UhzPz9bzWr7 +eo3D/oZtnjOeNEpDc60g9ngkfWI2h8K51YOn7QIDAQABAoIBAQCj88Fc08++x0kp +ZqEzunPebsvcTLEOPa8aiUVfYLWszHbKsAhg7Pb+zHmI+upiyMcZeOvLw/eyVlVR +rrZgCRFaNN2texMaY3zigXnXSDBzVb+cyv7V4cGqpgmnBp7i3ia/Jh3I/A2gyK8l +t8HI03nAjXWvE0gDNS5okXBt16sxq6ZWyzHHVbN3UYtCDxnyh2Ibck4b+K8I8Bn1 +mwMsSqPXJS1UQ3U5UqcaMs7WOEGx+xmaPJTWm5Lb//BkakGuBTQj+7wotyXQYG5U +uZdPPcFRk6cqgjzUeKVUtGkdmfgHSTdIwZowkKibB4rdrudsRnSwfeB+83Jp9JwG +JPrGvsbNAoGBAPULIO+vVBZXVpUEAhvNSXtmOi/hAbQhOuix8iwHbJ5EbrWaDn4B +Reb2cw/fZGgGG4jtAOXdiY8R1XGGP8+RPZ5El10ZWnNrKQfpZ27gK/5yeq5dfGBG +4JLUpcrT180FJo00rgiQYJnHCk1fWrnzXNV6K08ZZHGr6yv4S/jbq/7vAoGBAPL9 +NTN/UWXWFlSHVcb2dFHcvIiPwRj9KwhuMu90b/CilBbSJ1av13xtf2ar5zkrEtWH +CB3q3wBaklQP9MfOqEWGZeOUcd9AbYWtxHjHmP5fJA9RjErjlTtqGkusNtZJbchU +UWfT/Tl9pREpCvJ/8iawc1hx7sHHKzYwnDnMaQbjAoGAfJdd9cBltr5NjZLuJ4in +dhCyQSncnePPegUQJwbXWVleGQPtnm+zRQ3Fzyo8eQ+x7Frk+/s6N/5PUlt6EmW8 +uL4TYAjGDq1LvXQVXTCp7cPzULjDxogDI2Tvr0MrFFksEtvYKQ6Pr2CeglybWrS8 +XOazIpK8mXdaKY8jwbKfrw0CgYAFnfrb3OaZzxAnFhXSiqH3vn2RPpl9JWUYRcvh +ozRvQKLhwCvuohP+KV3XlsO6m5dM3lk+r85F6NIXJWNINyvGp6u1ThovygJ+I502 +GY8c2kAwJndyx74MaJCBDVMbMwlZpzFWkBz7dj8ZnXRGVNTZNh0Ef2XAjwUdtJP3 +9hS7dwKBgQDCzq0RIxFyy3F5baGHWLVICxmhNExQ2+Vebh+DvsPKtnz6OrWdRbGX +wgGVLrn53s6eCblnXLtKr/Li+t7fS8IkQkvu5guOvI9VeVUmZhFET3GVmUxu+JTb +iQY4uBgaf8Fgay4dkOfjvlOpFDR4E7UbJpg8/cFKTrpwgOiUVyFVdQ== +-----END RSA PRIVATE KEY----- +` diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/testing/utils.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/testing/utils.go new file mode 100644 index 000000000000..aaa7b67a82de --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/testing/utils.go @@ -0,0 +1,252 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "path" + "testing" + "time" + + "k8s.io/kubernetes/pkg/util/wait" + + etcd "github.com/coreos/etcd/client" + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v2http" + "github.com/coreos/etcd/pkg/testutil" + "github.com/coreos/etcd/pkg/transport" + "github.com/coreos/etcd/pkg/types" + "github.com/golang/glog" + "golang.org/x/net/context" +) + +// EtcdTestServer encapsulates the datastructures needed to start local instance for testing +type EtcdTestServer struct { + etcdserver.ServerConfig + PeerListeners, ClientListeners []net.Listener + Client etcd.Client + + CertificatesDir string + CertFile string + KeyFile string + CAFile string + + raftHandler http.Handler + s *etcdserver.EtcdServer + hss []*httptest.Server +} + +// newLocalListener opens a port localhost using any port +func newLocalListener(t *testing.T) net.Listener { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + return l +} + +// newSecuredLocalListener opens a port localhost using any port +// with SSL enable +func newSecuredLocalListener(t *testing.T, certFile, keyFile, caFile string) net.Listener { + var l net.Listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + tlsInfo := transport.TLSInfo{ + CertFile: certFile, + KeyFile: keyFile, + CAFile: caFile, + } + tlscfg, err := tlsInfo.ServerConfig() + if err != nil { + t.Fatalf("unexpected serverConfig error: %v", err) + } + l, err = transport.NewKeepAliveListener(l, "https", tlscfg) + if err != nil { + t.Fatal(err) + } + return l +} + +func newHttpTransport(t *testing.T, certFile, keyFile, caFile string) etcd.CancelableTransport { + tlsInfo := transport.TLSInfo{ + CertFile: certFile, + KeyFile: keyFile, + CAFile: caFile, + } + tr, err := transport.NewTransport(tlsInfo, time.Second) + if err != nil { + t.Fatal(err) + } + return tr +} + +// configureTestCluster will set the params to start an etcd server +func configureTestCluster(t *testing.T, name string) *EtcdTestServer { + var err error + m := &EtcdTestServer{} + + pln := newLocalListener(t) + m.PeerListeners = []net.Listener{pln} + m.PeerURLs, err = types.NewURLs([]string{"http://" + pln.Addr().String()}) + if err != nil { + t.Fatal(err) + } + + m.CertificatesDir, err = ioutil.TempDir(os.TempDir(), "etcd_certificates") + if err != nil { + t.Fatal(err) + } + m.CertFile = path.Join(m.CertificatesDir, "etcdcert.pem") + if err = ioutil.WriteFile(m.CertFile, []byte(CertFileContent), 0644); err != nil { + t.Fatal(err) + } + m.KeyFile = path.Join(m.CertificatesDir, "etcdkey.pem") + if err = ioutil.WriteFile(m.KeyFile, []byte(KeyFileContent), 0644); err != nil { + t.Fatal(err) + } + m.CAFile = path.Join(m.CertificatesDir, "ca.pem") + if err = ioutil.WriteFile(m.CAFile, []byte(CAFileContent), 0644); err != nil { + t.Fatal(err) + } + + cln := newSecuredLocalListener(t, m.CertFile, m.KeyFile, m.CAFile) + m.ClientListeners = []net.Listener{cln} + m.ClientURLs, err = types.NewURLs([]string{"https://" + cln.Addr().String()}) + if err != nil { + t.Fatal(err) + } + + m.Name = name + m.DataDir, err = ioutil.TempDir(os.TempDir(), "etcd") + if err != nil { + t.Fatal(err) + } + + clusterStr := fmt.Sprintf("%s=http://%s", name, pln.Addr().String()) + m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) + if err != nil { + t.Fatal(err) + } + m.InitialClusterToken = "TestEtcd" + m.NewCluster = true + m.ForceNewCluster = false + m.ElectionTicks = 10 + m.TickMs = uint(10) + + return m +} + +// launch will attempt to start the etcd server +func (m *EtcdTestServer) launch(t *testing.T) error { + var err error + if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil { + return fmt.Errorf("failed to initialize the etcd server: %v", err) + } + m.s.SyncTicker = time.Tick(500 * time.Millisecond) + m.s.Start() + m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)} + for _, ln := range m.PeerListeners { + hs := &httptest.Server{ + Listener: ln, + Config: &http.Server{Handler: m.raftHandler}, + } + hs.Start() + m.hss = append(m.hss, hs) + } + for _, ln := range m.ClientListeners { + hs := &httptest.Server{ + Listener: ln, + Config: &http.Server{Handler: v2http.NewClientHandler(m.s, m.ServerConfig.ReqTimeout())}, + } + hs.Start() + m.hss = append(m.hss, hs) + } + return nil +} + +// waitForEtcd wait until etcd is propagated correctly +func (m *EtcdTestServer) waitUntilUp() error { + membersAPI := etcd.NewMembersAPI(m.Client) + for start := time.Now(); time.Since(start) < wait.ForeverTestTimeout; time.Sleep(10 * time.Millisecond) { + members, err := membersAPI.List(context.TODO()) + if err != nil { + glog.Errorf("Error when getting etcd cluster members") + continue + } + if len(members) == 1 && len(members[0].ClientURLs) > 0 { + return nil + } + } + return fmt.Errorf("timeout on waiting for etcd cluster") +} + +// Terminate will shutdown the running etcd server +func (m *EtcdTestServer) Terminate(t *testing.T) { + m.Client = nil + m.s.Stop() + // TODO: This is a pretty ugly hack to workaround races during closing + // in-memory etcd server in unit tests - see #18928 for more details. + // We should get rid of it as soon as we have a proper fix - etcd clients + // have overwritten transport counting opened connections (probably by + // overwriting Dial function) and termination function waiting for all + // connections to be closed and stopping accepting new ones. + time.Sleep(250 * time.Millisecond) + for _, hs := range m.hss { + hs.CloseClientConnections() + hs.Close() + } + if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil { + t.Fatal(err) + } + if err := os.RemoveAll(m.CertificatesDir); err != nil { + t.Fatal(err) + } +} + +// NewEtcdTestClientServer creates a new client and server for testing +func NewEtcdTestClientServer(t *testing.T) *EtcdTestServer { + server := configureTestCluster(t, "foo") + err := server.launch(t) + if err != nil { + t.Fatalf("Failed to start etcd server error=%v", err) + return nil + } + + cfg := etcd.Config{ + Endpoints: server.ClientURLs.StringSlice(), + Transport: newHttpTransport(t, server.CertFile, server.KeyFile, server.CAFile), + } + server.Client, err = etcd.New(cfg) + if err != nil { + server.Terminate(t) + t.Fatalf("Unexpected error in NewEtcdTestClientServer (%v)", err) + return nil + } + if err := server.waitUntilUp(); err != nil { + server.Terminate(t) + t.Fatalf("Unexpected error in waitUntilUp (%v)", err) + return nil + } + return server +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/util/doc.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/util/doc.go new file mode 100644 index 000000000000..aa1039fafa8c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/util/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package util holds generic etcd-related utility functions that any user of ectd might want to +// use, without pulling in kubernetes-specific code. +package util diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/util/etcd_util.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/util/etcd_util.go new file mode 100644 index 000000000000..b15ec5bd0199 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/util/etcd_util.go @@ -0,0 +1,99 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + + etcd "github.com/coreos/etcd/client" +) + +// IsEtcdNotFound returns true if and only if err is an etcd not found error. +func IsEtcdNotFound(err error) bool { + return isEtcdErrorNum(err, etcd.ErrorCodeKeyNotFound) +} + +// IsEtcdNodeExist returns true if and only if err is an etcd node already exist error. +func IsEtcdNodeExist(err error) bool { + return isEtcdErrorNum(err, etcd.ErrorCodeNodeExist) +} + +// IsEtcdTestFailed returns true if and only if err is an etcd write conflict. +func IsEtcdTestFailed(err error) bool { + return isEtcdErrorNum(err, etcd.ErrorCodeTestFailed) +} + +// IsEtcdWatchExpired returns true if and only if err indicates the watch has expired. +func IsEtcdWatchExpired(err error) bool { + // NOTE: This seems weird why it wouldn't be etcd.ErrorCodeWatcherCleared + // I'm using the previous matching value + return isEtcdErrorNum(err, etcd.ErrorCodeEventIndexCleared) +} + +// IsEtcdUnreachable returns true if and only if err indicates the server could not be reached. +func IsEtcdUnreachable(err error) bool { + // NOTE: The logic has changed previous error code no longer applies + return err == etcd.ErrClusterUnavailable +} + +// isEtcdErrorNum returns true if and only if err is an etcd error, whose errorCode matches errorCode +func isEtcdErrorNum(err error, errorCode int) bool { + if err != nil { + if etcdError, ok := err.(etcd.Error); ok { + return etcdError.Code == errorCode + } + // NOTE: There are other error types returned + } + return false +} + +// GetEtcdVersion performs a version check against the provided Etcd server, +// returning the string response, and error (if any). +func GetEtcdVersion(host string) (string, error) { + response, err := http.Get(host + "/version") + if err != nil { + return "", err + } + defer response.Body.Close() + if response.StatusCode != http.StatusOK { + return "", fmt.Errorf("unsuccessful response from etcd server %q: %v", host, err) + } + versionBytes, err := ioutil.ReadAll(response.Body) + if err != nil { + return "", err + } + return string(versionBytes), nil +} + +type etcdHealth struct { + // Note this has to be public so the json library can modify it. + Health string `json:"health"` +} + +func EtcdHealthCheck(data []byte) error { + obj := etcdHealth{} + if err := json.Unmarshal(data, &obj); err != nil { + return err + } + if obj.Health != "true" { + return fmt.Errorf("Unhealthy status: %s", obj.Health) + } + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/util/etcd_util_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/util/etcd_util_test.go new file mode 100644 index 000000000000..cc41958ad042 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd/util/etcd_util_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "math/rand" + "net" + "net/http" + "net/http/httptest" + "strconv" + "testing" + "time" + + etcd "github.com/coreos/etcd/client" + "github.com/stretchr/testify/assert" +) + +const validEtcdVersion = "etcd 2.0.9" + +func TestIsEtcdNotFound(t *testing.T) { + try := func(err error, isNotFound bool) { + if IsEtcdNotFound(err) != isNotFound { + t.Errorf("Expected %#v to return %v, but it did not", err, isNotFound) + } + } + try(&etcd.Error{Code: 101}, false) + try(nil, false) + try(fmt.Errorf("some other kind of error"), false) +} + +func TestGetEtcdVersion_ValidVersion(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, validEtcdVersion) + })) + defer testServer.Close() + + var version string + var err error + if version, err = GetEtcdVersion(testServer.URL); err != nil { + t.Errorf("Unexpected error: %v", err) + } + assert.Equal(t, validEtcdVersion, version, "Unexpected version") + assert.Nil(t, err) +} + +func TestGetEtcdVersion_ErrorStatus(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + })) + defer testServer.Close() + + _, err := GetEtcdVersion(testServer.URL) + assert.NotNil(t, err) +} + +func TestGetEtcdVersion_NotListening(t *testing.T) { + portIsOpen := func(port int) bool { + conn, err := net.DialTimeout("tcp", "127.0.0.1:"+strconv.Itoa(port), 1*time.Second) + if err == nil { + conn.Close() + return true + } + return false + } + + port := rand.Intn((1 << 16) - 1) + for tried := 0; portIsOpen(port); tried++ { + if tried >= 10 { + t.Fatal("Couldn't find a closed TCP port to continue testing") + } + port++ + } + + _, err := GetEtcdVersion("http://127.0.0.1:" + strconv.Itoa(port)) + assert.NotNil(t, err) +} + +func TestEtcdHealthCheck(t *testing.T) { + tests := []struct { + data string + expectErr bool + }{ + { + data: "{\"health\": \"true\"}", + expectErr: false, + }, + { + data: "{\"health\": \"false\"}", + expectErr: true, + }, + { + data: "invalid json", + expectErr: true, + }, + } + for _, test := range tests { + err := EtcdHealthCheck([]byte(test.data)) + if err != nil && !test.expectErr { + t.Errorf("unexpected error: %v", err) + } + if err == nil && test.expectErr { + t.Error("unexpected non-error") + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/compact.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/compact.go new file mode 100644 index 000000000000..ddd8e312c3ec --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/compact.go @@ -0,0 +1,104 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "sync" + "time" + + "github.com/coreos/etcd/clientv3" + "github.com/golang/glog" + "golang.org/x/net/context" +) + +const compactInterval = 10 * time.Minute + +var ( + endpointsMapMu sync.Mutex + endpointsMap map[string]struct{} +) + +func init() { + endpointsMap = make(map[string]struct{}) +} + +// StartCompactor starts a compactor in the background in order to compact keys +// older than fixed time. +// We need to compact keys because we can't let on disk data grow forever. +// We save the most recent 10 minutes data. It should be enough for slow watchers and to tolerate burst. +// TODO: We might keep a longer history (12h) in the future once storage API can take +// advantage of multi-version key. +func StartCompactor(ctx context.Context, client *clientv3.Client) { + endpointsMapMu.Lock() + defer endpointsMapMu.Unlock() + + // We can't have multiple compaction jobs for the same cluster. + // Currently we rely on endpoints to differentiate clusters. + var emptyStruct struct{} + for _, ep := range client.Endpoints() { + if _, ok := endpointsMap[ep]; ok { + glog.V(4).Infof("compactor already exists for endpoints %v", client.Endpoints()) + return + } + } + for _, ep := range client.Endpoints() { + endpointsMap[ep] = emptyStruct + } + + go compactor(ctx, client, compactInterval) +} + +// compactor periodically compacts historical versions of keys in etcd. +// After compaction, old versions of keys set before given interval will be gone. +// Any API call for the old versions of keys will return error. +// interval: the interval between each compaction. The first compaction happens after "interval". +func compactor(ctx context.Context, client *clientv3.Client, interval time.Duration) { + var curRev int64 + var err error + for { + select { + case <-time.After(interval): + case <-ctx.Done(): + return + } + + curRev, err = compact(ctx, client, curRev) + if err != nil { + glog.Error(err) + continue + } + } +} + +// compact compacts etcd store and returns current rev. +// If it couldn't get current revision, the old rev will be returned. +func compact(ctx context.Context, client *clientv3.Client, oldRev int64) (int64, error) { + resp, err := client.Get(ctx, "/") + if err != nil { + return oldRev, err + } + curRev := resp.Header.Revision + if oldRev == 0 { + return curRev, nil + } + err = client.Compact(ctx, oldRev) + if err != nil { + return curRev, err + } + glog.Infof("etcd: Compacted rev %d, endpoints %v", oldRev, client.Endpoints()) + return curRev, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/compact_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/compact_test.go new file mode 100644 index 000000000000..af5a3ed4d1d7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/compact_test.go @@ -0,0 +1,48 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "testing" + + "github.com/coreos/etcd/clientv3" + etcdrpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + "github.com/coreos/etcd/integration" + "golang.org/x/net/context" +) + +func TestCompact(t *testing.T) { + cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + defer cluster.Terminate(t) + client := cluster.RandClient() + ctx := context.Background() + + putResp, err := client.Put(ctx, "/somekey", "data") + if err != nil { + t.Fatalf("Put failed: %v", err) + } + + _, err = compact(ctx, client, putResp.Header.Revision) + if err != nil { + t.Fatalf("compact failed: %v", err) + } + + _, err = client.Get(ctx, "/somekey", clientv3.WithRev(putResp.Header.Revision)) + if err != etcdrpc.ErrCompacted { + t.Errorf("Expecting ErrCompacted, but get=%v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/event.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/event.go new file mode 100644 index 000000000000..58072bd7b4f2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/event.go @@ -0,0 +1,50 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/storage/storagepb" +) + +type event struct { + key string + value []byte + rev int64 + isDeleted bool + isCreated bool +} + +func parseKV(kv *storagepb.KeyValue) *event { + return &event{ + key: string(kv.Key), + value: kv.Value, + rev: kv.ModRevision, + isDeleted: false, + isCreated: kv.ModRevision == kv.CreateRevision, + } +} + +func parseEvent(e *clientv3.Event) *event { + return &event{ + key: string(e.Kv.Key), + value: e.Kv.Value, + rev: e.Kv.ModRevision, + isDeleted: e.Type == clientv3.EventTypeDelete, + isCreated: e.IsCreate(), + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/store.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/store.go new file mode 100644 index 000000000000..4ac455df5d64 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/store.go @@ -0,0 +1,474 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "bytes" + "errors" + "fmt" + "path" + "reflect" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + "k8s.io/kubernetes/pkg/storage/etcd" + "k8s.io/kubernetes/pkg/watch" + + "github.com/coreos/etcd/clientv3" + "github.com/golang/glog" + "golang.org/x/net/context" +) + +type store struct { + client *clientv3.Client + codec runtime.Codec + versioner storage.Versioner + pathPrefix string + watcher *watcher +} + +type elemForDecode struct { + data []byte + rev uint64 +} + +type objState struct { + obj runtime.Object + meta *storage.ResponseMeta + rev int64 + data []byte +} + +// New returns an etcd3 implementation of storage.Interface. +func New(c *clientv3.Client, codec runtime.Codec, prefix string) storage.Interface { + return newStore(c, codec, prefix) +} + +func newStore(c *clientv3.Client, codec runtime.Codec, prefix string) *store { + versioner := etcd.APIObjectVersioner{} + return &store{ + client: c, + versioner: versioner, + codec: codec, + pathPrefix: prefix, + watcher: newWatcher(c, codec, versioner), + } +} + +// Backends implements storage.Interface.Backends. +func (s *store) Backends(ctx context.Context) []string { + resp, err := s.client.MemberList(ctx) + if err != nil { + glog.Errorf("Error obtaining etcd members list: %q", err) + return nil + } + var mlist []string + for _, member := range resp.Members { + mlist = append(mlist, member.ClientURLs...) + } + return mlist +} + +// Codec implements storage.Interface.Codec. +func (s *store) Codec() runtime.Codec { + return s.codec +} + +// Versioner implements storage.Interface.Versioner. +func (s *store) Versioner() storage.Versioner { + return s.versioner +} + +// Get implements storage.Interface.Get. +func (s *store) Get(ctx context.Context, key string, out runtime.Object, ignoreNotFound bool) error { + key = keyWithPrefix(s.pathPrefix, key) + getResp, err := s.client.KV.Get(ctx, key) + if err != nil { + return err + } + + if len(getResp.Kvs) == 0 { + if ignoreNotFound { + return runtime.SetZeroValue(out) + } + return storage.NewKeyNotFoundError(key, 0) + } + kv := getResp.Kvs[0] + return decode(s.codec, s.versioner, kv.Value, out, kv.ModRevision) +} + +// Create implements storage.Interface.Create. +func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { + if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 { + return errors.New("resourceVersion should not be set on objects to be created") + } + data, err := runtime.Encode(s.codec, obj) + if err != nil { + return err + } + key = keyWithPrefix(s.pathPrefix, key) + + opts, err := s.ttlOpts(ctx, int64(ttl)) + if err != nil { + return err + } + + txnResp, err := s.client.KV.Txn(ctx).If( + notFound(key), + ).Then( + clientv3.OpPut(key, string(data), opts...), + ).Commit() + if err != nil { + return err + } + if !txnResp.Succeeded { + return storage.NewKeyExistsError(key, 0) + } + + if out != nil { + putResp := txnResp.Responses[0].GetResponsePut() + return decode(s.codec, s.versioner, data, out, putResp.Header.Revision) + } + return nil +} + +// Delete implements storage.Interface.Delete. +func (s *store) Delete(ctx context.Context, key string, out runtime.Object, precondtions *storage.Preconditions) error { + v, err := conversion.EnforcePtr(out) + if err != nil { + panic("unable to convert output object to pointer") + } + key = keyWithPrefix(s.pathPrefix, key) + if precondtions == nil { + return s.unconditionalDelete(ctx, key, out) + } + return s.conditionalDelete(ctx, key, out, v, precondtions) +} + +func (s *store) unconditionalDelete(ctx context.Context, key string, out runtime.Object) error { + // We need to do get and delete in single transaction in order to + // know the value and revision before deleting it. + txnResp, err := s.client.KV.Txn(ctx).If().Then( + clientv3.OpGet(key), + clientv3.OpDelete(key), + ).Commit() + if err != nil { + return err + } + getResp := txnResp.Responses[0].GetResponseRange() + if len(getResp.Kvs) == 0 { + return storage.NewKeyNotFoundError(key, 0) + } + + kv := getResp.Kvs[0] + return decode(s.codec, s.versioner, kv.Value, out, kv.ModRevision) +} + +func (s *store) conditionalDelete(ctx context.Context, key string, out runtime.Object, v reflect.Value, precondtions *storage.Preconditions) error { + getResp, err := s.client.KV.Get(ctx, key) + if err != nil { + return err + } + for { + origState, err := s.getState(getResp, key, v, false) + if err != nil { + return err + } + if err := checkPreconditions(key, precondtions, origState.obj); err != nil { + return err + } + txnResp, err := s.client.KV.Txn(ctx).If( + clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev), + ).Then( + clientv3.OpDelete(key), + ).Else( + clientv3.OpGet(key), + ).Commit() + if err != nil { + return err + } + if !txnResp.Succeeded { + getResp = (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange()) + glog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key) + continue + } + return decode(s.codec, s.versioner, origState.data, out, origState.rev) + } +} + +// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate. +func (s *store) GuaranteedUpdate(ctx context.Context, key string, out runtime.Object, ignoreNotFound bool, precondtions *storage.Preconditions, tryUpdate storage.UpdateFunc) error { + v, err := conversion.EnforcePtr(out) + if err != nil { + panic("unable to convert output object to pointer") + } + key = keyWithPrefix(s.pathPrefix, key) + getResp, err := s.client.KV.Get(ctx, key) + if err != nil { + return err + } + for { + origState, err := s.getState(getResp, key, v, ignoreNotFound) + if err != nil { + return err + } + + if err := checkPreconditions(key, precondtions, origState.obj); err != nil { + return err + } + + ret, ttl, err := s.updateState(origState, tryUpdate) + if err != nil { + return err + } + + data, err := runtime.Encode(s.codec, ret) + if err != nil { + return err + } + if bytes.Equal(data, origState.data) { + return decode(s.codec, s.versioner, origState.data, out, origState.rev) + } + + opts, err := s.ttlOpts(ctx, int64(ttl)) + if err != nil { + return err + } + + txnResp, err := s.client.KV.Txn(ctx).If( + clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev), + ).Then( + clientv3.OpPut(key, string(data), opts...), + ).Else( + clientv3.OpGet(key), + ).Commit() + if err != nil { + return err + } + if !txnResp.Succeeded { + getResp = (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange()) + glog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key) + continue + } + putResp := txnResp.Responses[0].GetResponsePut() + return decode(s.codec, s.versioner, data, out, putResp.Header.Revision) + } +} + +// GetToList implements storage.Interface.GetToList. +func (s *store) GetToList(ctx context.Context, key string, filter storage.FilterFunc, listObj runtime.Object) error { + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + key = keyWithPrefix(s.pathPrefix, key) + + getResp, err := s.client.KV.Get(ctx, key) + if err != nil { + return err + } + if len(getResp.Kvs) == 0 { + return nil + } + elems := []*elemForDecode{{ + data: getResp.Kvs[0].Value, + rev: uint64(getResp.Kvs[0].ModRevision), + }} + if err := decodeList(elems, filter, listPtr, s.codec, s.versioner); err != nil { + return err + } + // update version with cluster level revision + return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision)) +} + +// List implements storage.Interface.List. +func (s *store) List(ctx context.Context, key, resourceVersion string, filter storage.FilterFunc, listObj runtime.Object) error { + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + key = keyWithPrefix(s.pathPrefix, key) + // We need to make sure the key ended with "/" so that we only get children "directories". + // e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three, + // while with prefix "/a/" will return only "/a/b" which is the correct answer. + if !strings.HasSuffix(key, "/") { + key += "/" + } + getResp, err := s.client.KV.Get(ctx, key, clientv3.WithPrefix()) + if err != nil { + return err + } + + elems := make([]*elemForDecode, len(getResp.Kvs)) + for i, kv := range getResp.Kvs { + elems[i] = &elemForDecode{ + data: kv.Value, + rev: uint64(kv.ModRevision), + } + } + if err := decodeList(elems, filter, listPtr, s.codec, s.versioner); err != nil { + return err + } + // update version with cluster level revision + return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision)) +} + +// Watch implements storage.Interface.Watch. +func (s *store) Watch(ctx context.Context, key string, resourceVersion string, filter storage.FilterFunc) (watch.Interface, error) { + return s.watch(ctx, key, resourceVersion, filter, false) +} + +// WatchList implements storage.Interface.WatchList. +func (s *store) WatchList(ctx context.Context, key string, resourceVersion string, filter storage.FilterFunc) (watch.Interface, error) { + return s.watch(ctx, key, resourceVersion, filter, true) +} + +func (s *store) watch(ctx context.Context, key string, rv string, filter storage.FilterFunc, recursive bool) (watch.Interface, error) { + rev, err := storage.ParseWatchResourceVersion(rv) + if err != nil { + return nil, err + } + key = keyWithPrefix(s.pathPrefix, key) + return s.watcher.Watch(ctx, key, int64(rev), recursive, filter) +} + +func (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) { + state := &objState{ + obj: reflect.New(v.Type()).Interface().(runtime.Object), + meta: &storage.ResponseMeta{}, + } + if len(getResp.Kvs) == 0 { + if !ignoreNotFound { + return nil, storage.NewKeyNotFoundError(key, 0) + } + if err := runtime.SetZeroValue(state.obj); err != nil { + return nil, err + } + } else { + state.rev = getResp.Kvs[0].ModRevision + state.meta.ResourceVersion = uint64(state.rev) + state.data = getResp.Kvs[0].Value + if err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil { + return nil, err + } + } + return state, nil +} + +func (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) { + ret, ttlPtr, err := userUpdate(st.obj, *st.meta) + if err != nil { + return nil, 0, err + } + + version, err := s.versioner.ObjectResourceVersion(ret) + if err != nil { + return nil, 0, err + } + if version != 0 { + // We cannot store object with resourceVersion in etcd. We need to reset it. + if err := s.versioner.UpdateObject(ret, 0); err != nil { + return nil, 0, fmt.Errorf("UpdateObject failed: %v", err) + } + } + var ttl uint64 + if ttlPtr != nil { + ttl = *ttlPtr + } + return ret, ttl, nil +} + +// ttlOpts returns client options based on given ttl. +// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length +func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) { + if ttl == 0 { + return nil, nil + } + // TODO: one lease per ttl key is expensive. Based on current use case, we can have a long window to + // put keys within into same lease. We shall benchmark this and optimize the performance. + lcr, err := s.client.Lease.Grant(ctx, ttl) + if err != nil { + return nil, err + } + return []clientv3.OpOption{clientv3.WithLease(clientv3.LeaseID(lcr.ID))}, nil +} + +func keyWithPrefix(prefix, key string) string { + if strings.HasPrefix(key, prefix) { + return key + } + return path.Join(prefix, key) +} + +// decode decodes value of bytes into object. It will also set the object resource version to rev. +// On success, objPtr would be set to the object. +func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error { + if _, err := conversion.EnforcePtr(objPtr); err != nil { + panic("unable to convert output object to pointer") + } + _, _, err := codec.Decode(value, nil, objPtr) + if err != nil { + return err + } + // being unable to set the version does not prevent the object from being extracted + versioner.UpdateObject(objPtr, uint64(rev)) + return nil +} + +// decodeList decodes a list of values into a list of objects, with resource version set to corresponding rev. +// On success, ListPtr would be set to the list of objects. +func decodeList(elems []*elemForDecode, filter storage.FilterFunc, ListPtr interface{}, codec runtime.Codec, versioner storage.Versioner) error { + v, err := conversion.EnforcePtr(ListPtr) + if err != nil || v.Kind() != reflect.Slice { + panic("need ptr to slice") + } + for _, elem := range elems { + obj, _, err := codec.Decode(elem.data, nil, reflect.New(v.Type().Elem()).Interface().(runtime.Object)) + if err != nil { + return err + } + // being unable to set the version does not prevent the object from being extracted + versioner.UpdateObject(obj, elem.rev) + if filter(obj) { + v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) + } + } + return nil +} + +func checkPreconditions(key string, preconditions *storage.Preconditions, out runtime.Object) error { + if preconditions == nil { + return nil + } + objMeta, err := api.ObjectMetaFor(out) + if err != nil { + return storage.NewInternalErrorf("can't enforce preconditions %v on un-introspectable object %v, got error: %v", *preconditions, out, err) + } + if preconditions.UID != nil && *preconditions.UID != objMeta.UID { + errMsg := fmt.Sprintf("Precondition failed: UID in precondition: %v, UID in object meta: %v", preconditions.UID, objMeta.UID) + return storage.NewInvalidObjError(key, errMsg) + } + return nil +} + +func notFound(key string) clientv3.Cmp { + return clientv3.Compare(clientv3.ModRevision(key), "=", 0) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/store_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/store_test.go new file mode 100644 index 000000000000..2fd686d2ee67 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/store_test.go @@ -0,0 +1,554 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "fmt" + "reflect" + "sync" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + + "github.com/coreos/etcd/integration" + "golang.org/x/net/context" + "k8s.io/kubernetes/pkg/watch" +) + +func TestCreate(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + etcdClient := cluster.RandClient() + + key := "/testkey" + out := &api.Pod{} + obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + + // verify that kv pair is empty before set + getResp, err := etcdClient.KV.Get(ctx, key) + if err != nil { + t.Fatalf("etcdClient.KV.Get failed: %v", err) + } + if len(getResp.Kvs) != 0 { + t.Fatalf("expecting empty result on key: %s", key) + } + + err = store.Create(ctx, key, obj, out, 0) + if err != nil { + t.Fatalf("Set failed: %v", err) + } + // basic tests of the output + if obj.ObjectMeta.Name != out.ObjectMeta.Name { + t.Errorf("pod name want=%s, get=%s", obj.ObjectMeta.Name, out.ObjectMeta.Name) + } + if out.ResourceVersion == "" { + t.Errorf("output should have non-empty resource version") + } + + // verify that kv pair is not empty after set + getResp, err = etcdClient.KV.Get(ctx, key) + if err != nil { + t.Fatalf("etcdClient.KV.Get failed: %v", err) + } + if len(getResp.Kvs) == 0 { + t.Fatalf("expecting non empty result on key: %s", key) + } +} + +func TestCreateWithTTL(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + + input := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + key := "/somekey" + + out := &api.Pod{} + if err := store.Create(ctx, key, input, out, 1); err != nil { + t.Fatalf("Create failed: %v", err) + } + + w, err := store.Watch(ctx, key, out.ResourceVersion, storage.Everything) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + testCheckEventType(t, watch.Deleted, w) +} + +func TestCreateWithKeyExist(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + key, _ := testPropogateStore(t, store, ctx, obj) + out := &api.Pod{} + err := store.Create(ctx, key, obj, out, 0) + if err == nil || !storage.IsNodeExist(err) { + t.Errorf("expecting key exists error, but get: %s", err) + } +} + +func TestGet(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + key, storedObj := testPropogateStore(t, store, ctx, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + + tests := []struct { + key string + ignoreNotFound bool + expectNotFoundErr bool + expectedOut *api.Pod + }{{ // test get on existing item + key: key, + ignoreNotFound: false, + expectNotFoundErr: false, + expectedOut: storedObj, + }, { // test get on non-existing item with ignoreNotFound=false + key: "/non-existing", + ignoreNotFound: false, + expectNotFoundErr: true, + }, { // test get on non-existing item with ignoreNotFound=true + key: "/non-existing", + ignoreNotFound: true, + expectNotFoundErr: false, + expectedOut: &api.Pod{}, + }} + + for i, tt := range tests { + out := &api.Pod{} + err := store.Get(ctx, tt.key, out, tt.ignoreNotFound) + if tt.expectNotFoundErr { + if err == nil || !storage.IsNotFound(err) { + t.Errorf("#%d: expecting not found error, but get: %s", i, err) + } + continue + } + if err != nil { + t.Fatalf("Get failed: %v", err) + } + if !reflect.DeepEqual(tt.expectedOut, out) { + t.Errorf("#%d: pod want=%#v, get=%#v", i, tt.expectedOut, out) + } + } +} + +func TestUnconditionalDelete(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + key, storedObj := testPropogateStore(t, store, ctx, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + + tests := []struct { + key string + expectedObj *api.Pod + expectNotFoundErr bool + }{{ // test unconditional delete on existing key + key: key, + expectedObj: storedObj, + expectNotFoundErr: false, + }, { // test unconditional delete on non-existing key + key: "/non-existing", + expectedObj: nil, + expectNotFoundErr: true, + }} + + for i, tt := range tests { + out := &api.Pod{} // reset + err := store.Delete(ctx, tt.key, out, nil) + if tt.expectNotFoundErr { + if err == nil || !storage.IsNotFound(err) { + t.Errorf("#%d: expecting not found error, but get: %s", i, err) + } + continue + } + if err != nil { + t.Fatalf("Delete failed: %v", err) + } + if !reflect.DeepEqual(tt.expectedObj, out) { + t.Errorf("#%d: pod want=%#v, get=%#v", i, tt.expectedObj, out) + } + } +} + +func TestConditionalDelete(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + key, storedObj := testPropogateStore(t, store, ctx, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", UID: "A"}}) + + tests := []struct { + precondition *storage.Preconditions + expectInvalidObjErr bool + }{{ // test conditional delete with UID match + precondition: storage.NewUIDPreconditions("A"), + expectInvalidObjErr: false, + }, { // test conditional delete with UID mismatch + precondition: storage.NewUIDPreconditions("B"), + expectInvalidObjErr: true, + }} + + for i, tt := range tests { + out := &api.Pod{} + err := store.Delete(ctx, key, out, tt.precondition) + if tt.expectInvalidObjErr { + if err == nil || !storage.IsInvalidObj(err) { + t.Errorf("#%d: expecting invalid UID error, but get: %s", i, err) + } + continue + } + if err != nil { + t.Fatalf("Delete failed: %v", err) + } + if !reflect.DeepEqual(storedObj, out) { + t.Errorf("#%d: pod want=%#v, get=%#v", i, storedObj, out) + } + key, storedObj = testPropogateStore(t, store, ctx, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", UID: "A"}}) + } +} + +func TestGetToList(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + key, storedObj := testPropogateStore(t, store, ctx, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + + tests := []struct { + key string + filter storage.FilterFunc + expectedOut []*api.Pod + }{{ // test GetToList on existing key + key: key, + filter: storage.Everything, + expectedOut: []*api.Pod{storedObj}, + }, { // test GetToList on non-existing key + key: "/non-existing", + filter: storage.Everything, + expectedOut: nil, + }, { // test GetToList with filter to reject the pod + key: "/non-existing", + filter: func(obj runtime.Object) bool { + pod, ok := obj.(*api.Pod) + if !ok { + t.Fatal("It should be able to convert obj to *api.Pod") + } + return pod.Name != storedObj.Name + }, + expectedOut: nil, + }} + + for i, tt := range tests { + out := &api.PodList{} + err := store.GetToList(ctx, tt.key, tt.filter, out) + if err != nil { + t.Fatalf("GetToList failed: %v", err) + } + if len(out.Items) != len(tt.expectedOut) { + t.Errorf("#%d: length of list want=%d, get=%d", i, len(tt.expectedOut), len(out.Items)) + continue + } + for j, wantPod := range tt.expectedOut { + getPod := &out.Items[j] + if !reflect.DeepEqual(wantPod, getPod) { + t.Errorf("#%d: pod want=%#v, get=%#v", i, wantPod, getPod) + } + } + } +} + +func TestGuaranteedUpdate(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + key, storeObj := testPropogateStore(t, store, ctx, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", UID: "A"}}) + + tests := []struct { + key string + name string + ignoreNotFound bool + precondition *storage.Preconditions + expectNotFoundErr bool + expectInvalidObjErr bool + expectNoUpdate bool + }{{ // GuaranteedUpdate on non-existing key with ignoreNotFound=false + key: "/non-existing", + ignoreNotFound: false, + precondition: nil, + expectNotFoundErr: true, + expectInvalidObjErr: false, + expectNoUpdate: false, + }, { // GuaranteedUpdate on non-existing key with ignoreNotFound=true + key: "/non-existing", + ignoreNotFound: true, + precondition: nil, + expectNotFoundErr: false, + expectInvalidObjErr: false, + expectNoUpdate: false, + }, { // GuaranteedUpdate on existing key + key: key, + ignoreNotFound: false, + precondition: nil, + expectNotFoundErr: false, + expectInvalidObjErr: false, + expectNoUpdate: false, + }, { // GuaranteedUpdate with same data + key: key, + ignoreNotFound: false, + precondition: nil, + expectNotFoundErr: false, + expectInvalidObjErr: false, + expectNoUpdate: true, + }, { // GuaranteedUpdate with UID match + key: key, + ignoreNotFound: false, + precondition: storage.NewUIDPreconditions("A"), + expectNotFoundErr: false, + expectInvalidObjErr: false, + expectNoUpdate: true, + }, { // GuaranteedUpdate with UID mismatch + key: key, + ignoreNotFound: false, + precondition: storage.NewUIDPreconditions("B"), + expectNotFoundErr: false, + expectInvalidObjErr: true, + expectNoUpdate: true, + }} + + for i, tt := range tests { + out := &api.Pod{} + name := fmt.Sprintf("foo-%d", i) + if tt.expectNoUpdate { + name = storeObj.Name + } + version := storeObj.ResourceVersion + err := store.GuaranteedUpdate(ctx, tt.key, out, tt.ignoreNotFound, tt.precondition, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + if tt.expectNotFoundErr && tt.ignoreNotFound { + if pod := obj.(*api.Pod); pod.Name != "" { + t.Errorf("#%d: expecting zero value, but get=%#v", i, pod) + } + } + pod := *storeObj + pod.Name = name + return &pod, nil + })) + + if tt.expectNotFoundErr { + if err == nil || !storage.IsNotFound(err) { + t.Errorf("#%d: expecting not found error, but get: %v", i, err) + } + continue + } + if tt.expectInvalidObjErr { + if err == nil || !storage.IsInvalidObj(err) { + t.Errorf("#%d: expecting invalid UID error, but get: %s", i, err) + } + continue + } + if err != nil { + t.Fatalf("GuaranteedUpdate failed: %v", err) + } + if out.ObjectMeta.Name != name { + t.Errorf("#%d: pod name want=%s, get=%s", i, name, out.ObjectMeta.Name) + } + switch tt.expectNoUpdate { + case true: + if version != out.ResourceVersion { + t.Errorf("#%d: expect no version change, before=%s, after=%s", i, version, out.ResourceVersion) + } + case false: + if version == out.ResourceVersion { + t.Errorf("#%d: expect version change, but get the same version=%s", i, version) + } + } + storeObj = out + } +} + +func TestGuaranteedUpdateWithTTL(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + + input := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + key := "/somekey" + + out := &api.Pod{} + err := store.GuaranteedUpdate(ctx, key, out, true, nil, + func(_ runtime.Object, _ storage.ResponseMeta) (runtime.Object, *uint64, error) { + ttl := uint64(1) + return input, &ttl, nil + }) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + + w, err := store.Watch(ctx, key, out.ResourceVersion, storage.Everything) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + testCheckEventType(t, watch.Deleted, w) +} + +func TestGuaranteedUpdateWithConflict(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + key, _ := testPropogateStore(t, store, ctx, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + + errChan := make(chan error, 1) + var firstToFinish sync.WaitGroup + var secondToEnter sync.WaitGroup + firstToFinish.Add(1) + secondToEnter.Add(1) + + go func() { + err := store.GuaranteedUpdate(ctx, key, &api.Pod{}, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + pod := obj.(*api.Pod) + pod.Name = "foo-1" + secondToEnter.Wait() + return pod, nil + })) + firstToFinish.Done() + errChan <- err + }() + + updateCount := 0 + err := store.GuaranteedUpdate(ctx, key, &api.Pod{}, false, nil, + storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + if updateCount == 0 { + secondToEnter.Done() + firstToFinish.Wait() + } + updateCount++ + pod := obj.(*api.Pod) + pod.Name = "foo-2" + return pod, nil + })) + if err != nil { + t.Fatalf("Second GuaranteedUpdate error %#v", err) + } + if err := <-errChan; err != nil { + t.Fatalf("First GuaranteedUpdate error %#v", err) + } + + if updateCount != 2 { + t.Errorf("Should have conflict and called update func twice") + } +} + +func TestList(t *testing.T) { + cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + defer cluster.Terminate(t) + store := newStore(cluster.RandClient(), testapi.Default.Codec(), "") + ctx := context.Background() + + // Setup storage with the following structure: + // / + // - one-level/ + // | - test + // | + // - two-level/ + // - 1/ + // | - test + // | + // - 2/ + // - test + preset := []struct { + key string + obj *api.Pod + storedObj *api.Pod + }{{ + key: "/one-level/test", + obj: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}, + }, { + key: "/two-level/1/test", + obj: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}, + }, { + key: "/two-level/2/test", + obj: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}}, + }} + + for i, ps := range preset { + preset[i].storedObj = &api.Pod{} + err := store.Create(ctx, ps.key, ps.obj, preset[i].storedObj, 0) + if err != nil { + t.Fatalf("Set failed: %v", err) + } + } + + tests := []struct { + prefix string + filter storage.FilterFunc + expectedOut []*api.Pod + }{{ // test List on existing key + prefix: "/one-level/", + filter: storage.Everything, + expectedOut: []*api.Pod{preset[0].storedObj}, + }, { // test List on non-existing key + prefix: "/non-existing/", + filter: storage.Everything, + expectedOut: nil, + }, { // test List with filter + prefix: "/one-level/", + filter: func(obj runtime.Object) bool { + pod, ok := obj.(*api.Pod) + if !ok { + t.Fatal("It should be able to convert obj to *api.Pod") + } + return pod.Name != preset[0].storedObj.Name + }, + expectedOut: nil, + }, { // test List with multiple levels of directories and expect flattened result + prefix: "/two-level/", + filter: storage.Everything, + expectedOut: []*api.Pod{preset[1].storedObj, preset[2].storedObj}, + }} + + for i, tt := range tests { + out := &api.PodList{} + err := store.List(ctx, tt.prefix, "0", tt.filter, out) + if err != nil { + t.Fatalf("List failed: %v", err) + } + if len(tt.expectedOut) != len(out.Items) { + t.Errorf("#%d: length of list want=%d, get=%d", i, len(tt.expectedOut), len(out.Items)) + continue + } + for j, wantPod := range tt.expectedOut { + getPod := &out.Items[j] + if !reflect.DeepEqual(wantPod, getPod) { + t.Errorf("#%d: pod want=%#v, get=%#v", i, wantPod, getPod) + } + } + } +} + +func testSetup(t *testing.T) (context.Context, *store, *integration.ClusterV3) { + cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + store := newStore(cluster.RandClient(), testapi.Default.Codec(), "") + ctx := context.Background() + return ctx, store, cluster +} + +// testPropogateStore helps propogates store with objects, automates key generation, and returns +// keys and stored objects. +func testPropogateStore(t *testing.T, store *store, ctx context.Context, obj *api.Pod) (string, *api.Pod) { + // Setup store with a key and grab the output for returning. + key := "/testkey" + setOutput := &api.Pod{} + err := store.Create(ctx, key, obj, setOutput, 0) + if err != nil { + t.Fatalf("Set failed: %v", err) + } + return key, setOutput +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/watcher.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/watcher.go new file mode 100644 index 000000000000..1e13d59b5847 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/watcher.go @@ -0,0 +1,347 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "fmt" + "net/http" + "strings" + "sync" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + "k8s.io/kubernetes/pkg/watch" + + "github.com/coreos/etcd/clientv3" + etcdrpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + "github.com/golang/glog" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +const ( + // We have set a buffer in order to reduce times of context switches. + incomingBufSize = 100 + outgoingBufSize = 100 +) + +type watcher struct { + client *clientv3.Client + codec runtime.Codec + versioner storage.Versioner +} + +// watchChan implements watch.Interface. +type watchChan struct { + watcher *watcher + key string + initialRev int64 + recursive bool + filter storage.FilterFunc + ctx context.Context + cancel context.CancelFunc + incomingEventChan chan *event + resultChan chan watch.Event + errChan chan error +} + +func newWatcher(client *clientv3.Client, codec runtime.Codec, versioner storage.Versioner) *watcher { + return &watcher{ + client: client, + codec: codec, + versioner: versioner, + } +} + +// Watch watches on a key and returns a watch.Interface that transfers relevant notifications. +// If rev is zero, it will return the existing object(s) and then start watching from +// the maximum revision+1 from returned objects. +// If rev is non-zero, it will watch events happened after given revision. +// If recursive is false, it watches on given key. +// If recursive is true, it watches any children and directories under the key, excluding the root key itself. +// filter must be non-nil. Only if filter returns true will the changes be returned. +func (w *watcher) Watch(ctx context.Context, key string, rev int64, recursive bool, filter storage.FilterFunc) (watch.Interface, error) { + if recursive && !strings.HasSuffix(key, "/") { + key += "/" + } + wc := w.createWatchChan(ctx, key, rev, recursive, filter) + go wc.run() + return wc, nil +} + +func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, recursive bool, filter storage.FilterFunc) *watchChan { + wc := &watchChan{ + watcher: w, + key: key, + initialRev: rev, + recursive: recursive, + filter: filter, + incomingEventChan: make(chan *event, incomingBufSize), + resultChan: make(chan watch.Event, outgoingBufSize), + errChan: make(chan error, 1), + } + wc.ctx, wc.cancel = context.WithCancel(ctx) + return wc +} + +func (wc *watchChan) run() { + go wc.startWatching() + + var resultChanWG sync.WaitGroup + resultChanWG.Add(1) + go wc.processEvent(&resultChanWG) + + select { + case err := <-wc.errChan: + errResult := parseError(err) + if errResult != nil { + // error result is guaranteed to be received by user before closing ResultChan. + select { + case wc.resultChan <- *errResult: + case <-wc.ctx.Done(): // user has given up all results + } + } + wc.cancel() + case <-wc.ctx.Done(): + } + // we need to wait until resultChan wouldn't be sent to anymore + resultChanWG.Wait() + close(wc.resultChan) +} + +func (wc *watchChan) Stop() { + wc.cancel() +} + +func (wc *watchChan) ResultChan() <-chan watch.Event { + return wc.resultChan +} + +// sync tries to retrieve existing data and send them to process. +// The revision to watch will be set to the revision in response. +func (wc *watchChan) sync() error { + opts := []clientv3.OpOption{} + if wc.recursive { + opts = append(opts, clientv3.WithPrefix()) + } + getResp, err := wc.watcher.client.Get(wc.ctx, wc.key, opts...) + if err != nil { + return err + } + wc.initialRev = getResp.Header.Revision + + for _, kv := range getResp.Kvs { + wc.sendEvent(parseKV(kv)) + } + return nil +} + +// startWatching does: +// - get current objects if initialRev=0; set initialRev to current rev +// - watch on given key and send events to process. +func (wc *watchChan) startWatching() { + if wc.initialRev == 0 { + if err := wc.sync(); err != nil { + wc.sendError(err) + return + } + } + opts := []clientv3.OpOption{clientv3.WithRev(wc.initialRev + 1)} + if wc.recursive { + opts = append(opts, clientv3.WithPrefix()) + } + wch := wc.watcher.client.Watch(wc.ctx, wc.key, opts...) + for wres := range wch { + if wres.Err() != nil { + // If there is an error on server (e.g. compaction), the channel will return it before closed. + wc.sendError(wres.Err()) + return + } + for _, e := range wres.Events { + wc.sendEvent(parseEvent(e)) + } + } +} + +// processEvent processes events from etcd watcher and sends results to resultChan. +func (wc *watchChan) processEvent(wg *sync.WaitGroup) { + defer wg.Done() + + for { + select { + case e := <-wc.incomingEventChan: + res := wc.transform(e) + if res == nil { + continue + } + // If user couldn't receive results fast enough, we also block incoming events from watcher. + // Because storing events in local will cause more memory usage. + // The worst case would be closing the fast watcher. + select { + case wc.resultChan <- *res: + case <-wc.ctx.Done(): + return + } + case <-wc.ctx.Done(): + return + } + } +} + +// transform transforms an event into a result for user if not filtered. +// TODO (Optimization): +// - Save remote round-trip. +// Currently, DELETE and PUT event don't contain the previous value. +// We need to do another Get() in order to get previous object and have logic upon it. +// We could potentially do some optimizations: +// - For PUT, we can save current and previous objects into the value. +// - For DELETE, See https://github.com/coreos/etcd/issues/4620 +func (wc *watchChan) transform(e *event) (res *watch.Event) { + curObj, oldObj, err := prepareObjs(wc.ctx, e, wc.watcher.client, wc.watcher.codec, wc.watcher.versioner) + if err != nil { + wc.sendError(err) + return nil + } + + switch { + case e.isDeleted: + if !wc.filter(oldObj) { + return nil + } + res = &watch.Event{ + Type: watch.Deleted, + Object: oldObj, + } + case e.isCreated: + if !wc.filter(curObj) { + return nil + } + res = &watch.Event{ + Type: watch.Added, + Object: curObj, + } + default: + curObjPasses := wc.filter(curObj) + oldObjPasses := wc.filter(oldObj) + switch { + case curObjPasses && oldObjPasses: + res = &watch.Event{ + Type: watch.Modified, + Object: curObj, + } + case curObjPasses && !oldObjPasses: + res = &watch.Event{ + Type: watch.Added, + Object: curObj, + } + case !curObjPasses && oldObjPasses: + res = &watch.Event{ + Type: watch.Deleted, + Object: oldObj, + } + } + } + return res +} + +func parseError(err error) *watch.Event { + var status *unversioned.Status + switch { + case err == etcdrpc.ErrCompacted: + status = &unversioned.Status{ + Status: unversioned.StatusFailure, + Message: err.Error(), + Code: http.StatusGone, + Reason: unversioned.StatusReasonExpired, + } + default: + status = &unversioned.Status{ + Status: unversioned.StatusFailure, + Message: err.Error(), + Code: http.StatusInternalServerError, + Reason: unversioned.StatusReasonInternalError, + } + } + + return &watch.Event{ + Type: watch.Error, + Object: status, + } +} + +func (wc *watchChan) sendError(err error) { + // Context.canceled is an expected behavior. + // We should just stop all goroutines in watchChan without returning error. + // TODO: etcd client should return context.Canceled instead of grpc specific error. + if grpc.Code(err) == codes.Canceled || err == context.Canceled { + return + } + select { + case wc.errChan <- err: + case <-wc.ctx.Done(): + } +} + +func (wc *watchChan) sendEvent(e *event) { + if len(wc.incomingEventChan) == incomingBufSize { + glog.V(2).Infof("Fast watcher, slow processing. Number of buffered events: %d."+ + "Probably caused by slow decoding, user not receiving fast, or other processing logic", + incomingBufSize) + } + select { + case wc.incomingEventChan <- e: + case <-wc.ctx.Done(): + } +} + +func prepareObjs(ctx context.Context, e *event, client *clientv3.Client, codec runtime.Codec, versioner storage.Versioner) (curObj runtime.Object, oldObj runtime.Object, err error) { + if !e.isDeleted { + curObj, err = decodeObj(codec, versioner, e.value, e.rev) + if err != nil { + return nil, nil, err + } + } + if e.isDeleted || !e.isCreated { + getResp, err := client.Get(ctx, e.key, clientv3.WithRev(e.rev-1)) + if err != nil { + return nil, nil, err + } + // Note that this sends the *old* object with the etcd revision for the time at + // which it gets deleted. + // We assume old object is returned only in Deleted event. Users (e.g. cacher) need + // to have larger than previous rev to tell the ordering. + oldObj, err = decodeObj(codec, versioner, getResp.Kvs[0].Value, e.rev) + if err != nil { + return nil, nil, err + } + } + return curObj, oldObj, nil +} + +func decodeObj(codec runtime.Codec, versioner storage.Versioner, data []byte, rev int64) (runtime.Object, error) { + obj, err := runtime.Decode(codec, []byte(data)) + if err != nil { + return nil, err + } + // ensure resource version is set on the object we load from etcd + if err := versioner.UpdateObject(obj, uint64(rev)); err != nil { + return nil, fmt.Errorf("failure to version api object (%d) %#v: %v", rev, obj, err) + } + return obj, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/watcher_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/watcher_test.go new file mode 100644 index 000000000000..aafef5792e42 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/etcd3/watcher_test.go @@ -0,0 +1,310 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "errors" + "fmt" + "reflect" + "sync" + "testing" + "time" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/integration" + "golang.org/x/net/context" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" +) + +func TestWatch(t *testing.T) { + testWatch(t, false) +} + +func TestWatchList(t *testing.T) { + testWatch(t, true) +} + +// It tests that +// - first occurrence of objects should notify Add event +// - update should trigger Modified event +// - update that gets filtered should trigger Deleted event +func testWatch(t *testing.T, recursive bool) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + + podFoo := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} + podBar := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}} + + tests := []struct { + key string + filter storage.FilterFunc + watchTests []*testWatchStruct + }{{ // create a key + key: "/somekey-1", + watchTests: []*testWatchStruct{{podFoo, true, watch.Added}}, + filter: storage.Everything, + }, { // create a key but obj gets filtered + key: "/somekey-2", + watchTests: []*testWatchStruct{{podFoo, false, ""}}, + filter: func(runtime.Object) bool { return false }, + }, { // create a key but obj gets filtered. Then update it with unfiltered obj + key: "/somekey-3", + watchTests: []*testWatchStruct{{podFoo, false, ""}, {podBar, true, watch.Added}}, + filter: func(obj runtime.Object) bool { + pod := obj.(*api.Pod) + return pod.Name == "bar" + }, + }, { // update + key: "/somekey-4", + watchTests: []*testWatchStruct{{podFoo, true, watch.Added}, {podBar, true, watch.Modified}}, + filter: storage.Everything, + }, { // delete because of being filtered + key: "/somekey-5", + watchTests: []*testWatchStruct{{podFoo, true, watch.Added}, {podBar, true, watch.Deleted}}, + filter: func(obj runtime.Object) bool { + pod := obj.(*api.Pod) + return pod.Name != "bar" + }, + }} + for i, tt := range tests { + w, err := store.watch(ctx, tt.key, "0", tt.filter, recursive) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + var prevObj *api.Pod + for _, watchTest := range tt.watchTests { + out := &api.Pod{} + key := tt.key + if recursive { + key = key + "/item" + } + err := store.GuaranteedUpdate(ctx, key, out, true, nil, storage.SimpleUpdate( + func(runtime.Object) (runtime.Object, error) { + return watchTest.obj, nil + })) + if err != nil { + t.Fatalf("GuaranteedUpdate failed: %v", err) + } + if watchTest.expectEvent { + expectObj := out + if watchTest.watchType == watch.Deleted { + expectObj = prevObj + expectObj.ResourceVersion = out.ResourceVersion + } + testCheckResult(t, i, watchTest.watchType, w, expectObj) + } + prevObj = out + } + w.Stop() + testCheckStop(t, i, w) + } +} + +func TestDeleteTriggerWatch(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + key, storedObj := testPropogateStore(t, store, ctx, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + w, err := store.Watch(ctx, key, storedObj.ResourceVersion, storage.Everything) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + if err := store.Delete(ctx, key, &api.Pod{}, nil); err != nil { + t.Fatalf("Delete failed: %v", err) + } + testCheckEventType(t, watch.Deleted, w) +} + +// TestWatchSync tests that +// - watch from 0 should sync up and grab the object added before +// - watch from non-0 should just watch changes after given version +func TestWatchFromZeroAndNoneZero(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + key, storedObj := testPropogateStore(t, store, ctx, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + + w, err := store.Watch(ctx, key, "0", storage.Everything) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + testCheckResult(t, 0, watch.Added, w, storedObj) + w.Stop() + testCheckStop(t, 0, w) + + w, err = store.Watch(ctx, key, storedObj.ResourceVersion, storage.Everything) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + out := &api.Pod{} + store.GuaranteedUpdate(ctx, key, out, true, nil, storage.SimpleUpdate( + func(runtime.Object) (runtime.Object, error) { + return &api.Pod{ObjectMeta: api.ObjectMeta{Name: "bar"}}, err + })) + testCheckResult(t, 0, watch.Modified, w, out) +} + +func TestWatchError(t *testing.T) { + cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + defer cluster.Terminate(t) + invalidStore := newStore(cluster.RandClient(), &testCodec{testapi.Default.Codec()}, "") + ctx := context.Background() + w, err := invalidStore.Watch(ctx, "/abc", "0", storage.Everything) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + validStore := newStore(cluster.RandClient(), testapi.Default.Codec(), "") + validStore.GuaranteedUpdate(ctx, "/abc", &api.Pod{}, true, nil, storage.SimpleUpdate( + func(runtime.Object) (runtime.Object, error) { + return &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}, nil + })) + testCheckEventType(t, watch.Error, w) +} + +func TestWatchContextCancel(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + canceledCtx, cancel := context.WithCancel(ctx) + cancel() + w := store.watcher.createWatchChan(canceledCtx, "/abc", 0, false, storage.Everything) + // When we do a client.Get with a canceled context, it will return error. + // Nonetheless, when we try to send it over internal errChan, we should detect + // it's context canceled and not send it. + err := w.sync() + w.ctx = ctx + w.sendError(err) + select { + case err := <-w.errChan: + t.Errorf("cancelling context shouldn't return any error. Err: %v", err) + default: + } +} + +func TestWatchErrResultNotBlockAfterCancel(t *testing.T) { + origCtx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + ctx, cancel := context.WithCancel(origCtx) + w := store.watcher.createWatchChan(ctx, "/abc", 0, false, storage.Everything) + // make resutlChan and errChan blocking to ensure ordering. + w.resultChan = make(chan watch.Event) + w.errChan = make(chan error) + // The event flow goes like: + // - first we send an error, it should block on resultChan. + // - Then we cancel ctx. The blocking on resultChan should be freed up + // and run() goroutine should return. + var wg sync.WaitGroup + wg.Add(1) + go func() { + w.run() + wg.Done() + }() + w.errChan <- fmt.Errorf("some error") + cancel() + wg.Wait() +} + +func TestWatchDeleteEventObjectHaveLatestRV(t *testing.T) { + ctx, store, cluster := testSetup(t) + defer cluster.Terminate(t) + key, storedObj := testPropogateStore(t, store, ctx, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + + w, err := store.Watch(ctx, key, storedObj.ResourceVersion, storage.Everything) + if err != nil { + t.Fatalf("Watch failed: %v", err) + } + etcdW := cluster.RandClient().Watch(ctx, "/", clientv3.WithPrefix()) + + if err := store.Delete(ctx, key, &api.Pod{}, &storage.Preconditions{}); err != nil { + t.Fatalf("Delete failed: %v", err) + } + + e := <-w.ResultChan() + watchedDeleteObj := e.Object.(*api.Pod) + var wres clientv3.WatchResponse + wres = <-etcdW + + watchedDeleteRev, err := storage.ParseWatchResourceVersion(watchedDeleteObj.ResourceVersion) + if err != nil { + t.Fatalf("ParseWatchResourceVersion failed: %v", err) + } + if int64(watchedDeleteRev) != wres.Events[0].Kv.ModRevision { + t.Errorf("Object from delete event have version: %v, should be the same as etcd delete's mod rev: %d", + watchedDeleteRev, wres.Events[0].Kv.ModRevision) + } +} + +type testWatchStruct struct { + obj *api.Pod + expectEvent bool + watchType watch.EventType +} + +type testCodec struct { + runtime.Codec +} + +func (c *testCodec) Decode(data []byte, defaults *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { + return nil, nil, errors.New("Expected decoding failure") +} + +func testCheckEventType(t *testing.T, expectEventType watch.EventType, w watch.Interface) { + select { + case res := <-w.ResultChan(): + if res.Type != expectEventType { + t.Errorf("event type want=%v, get=%v", expectEventType, res.Type) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("time out after waiting %v on ResultChan", wait.ForeverTestTimeout) + } +} + +func testCheckResult(t *testing.T, i int, expectEventType watch.EventType, w watch.Interface, expectObj *api.Pod) { + select { + case res := <-w.ResultChan(): + if res.Type != expectEventType { + t.Errorf("#%d: event type want=%v, get=%v", i, expectEventType, res.Type) + return + } + if !reflect.DeepEqual(expectObj, res.Object) { + t.Errorf("#%d: obj want=\n%#v\nget=\n%#v", i, expectObj, res.Object) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("#%d: time out after waiting %v on ResultChan", i, wait.ForeverTestTimeout) + } +} + +func testCheckStop(t *testing.T, i int, w watch.Interface) { + select { + case e, ok := <-w.ResultChan(): + if ok { + var obj string + switch e.Object.(type) { + case *api.Pod: + obj = e.Object.(*api.Pod).Name + case *unversioned.Status: + obj = e.Object.(*unversioned.Status).Message + } + t.Errorf("#%d: ResultChan should have been closed. Event: %s. Object: %s", i, e.Type, obj) + } + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("#%d: time out after waiting 1s on ResultChan", i) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/interfaces.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/interfaces.go new file mode 100644 index 000000000000..89290e29abe5 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/interfaces.go @@ -0,0 +1,171 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "golang.org/x/net/context" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/watch" +) + +// Versioner abstracts setting and retrieving metadata fields from database response +// onto the object ot list. +type Versioner interface { + // UpdateObject sets storage metadata into an API object. Returns an error if the object + // cannot be updated correctly. May return nil if the requested object does not need metadata + // from database. + UpdateObject(obj runtime.Object, resourceVersion uint64) error + // UpdateList sets the resource version into an API list object. Returns an error if the object + // cannot be updated correctly. May return nil if the requested object does not need metadata + // from database. + UpdateList(obj runtime.Object, resourceVersion uint64) error + // ObjectResourceVersion returns the resource version (for persistence) of the specified object. + // Should return an error if the specified object does not have a persistable version. + ObjectResourceVersion(obj runtime.Object) (uint64, error) +} + +// ResponseMeta contains information about the database metadata that is associated with +// an object. It abstracts the actual underlying objects to prevent coupling with concrete +// database and to improve testability. +type ResponseMeta struct { + // TTL is the time to live of the node that contained the returned object. It may be + // zero or negative in some cases (objects may be expired after the requested + // expiration time due to server lag). + TTL int64 + // The resource version of the node that contained the returned object. + ResourceVersion uint64 +} + +// FilterFunc is a predicate which takes an API object and returns true +// if and only if the object should remain in the set. +type FilterFunc func(obj runtime.Object) bool + +// Everything is a FilterFunc which accepts all objects. +func Everything(runtime.Object) bool { + return true +} + +// Pass an UpdateFunc to Interface.GuaranteedUpdate to make an update +// that is guaranteed to succeed. +// See the comment for GuaranteedUpdate for more details. +type UpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Object, ttl *uint64, err error) + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + UID *types.UID `json:"uid,omitempty"` +} + +// NewUIDPreconditions returns a Preconditions with UID set. +func NewUIDPreconditions(uid string) *Preconditions { + u := types.UID(uid) + return &Preconditions{UID: &u} +} + +// Interface offers a common interface for object marshaling/unmarshling operations and +// hides all the storage-related operations behind it. +type Interface interface { + // Returns list of servers addresses of the underyling database. + // TODO: This method is used only in a single place. Consider refactoring and getting rid + // of this method from the interface. + Backends(ctx context.Context) []string + + // Returns Versioner associated with this interface. + Versioner() Versioner + + // Create adds a new object at a key unless it already exists. 'ttl' is time-to-live + // in seconds (0 means forever). If no error is returned and out is not nil, out will be + // set to the read value from database. + Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error + + // Delete removes the specified key and returns the value that existed at that spot. + // If key didn't exist, it will return NotFound storage error. + Delete(ctx context.Context, key string, out runtime.Object, preconditions *Preconditions) error + + // Watch begins watching the specified key. Events are decoded into API objects, + // and any items passing 'filter' are sent down to returned watch.Interface. + // resourceVersion may be used to specify what version to begin watching, + // which should be the current resourceVersion, and no longer rv+1 + // (e.g. reconnecting without missing any updates). + Watch(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) + + // WatchList begins watching the specified key's items. Items are decoded into API + // objects and any item passing 'filter' are sent down to returned watch.Interface. + // resourceVersion may be used to specify what version to begin watching, + // which should be the current resourceVersion, and no longer rv+1 + // (e.g. reconnecting without missing any updates). + WatchList(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) + + // Get unmarshals json found at key into objPtr. On a not found error, will either + // return a zero object of the requested type, or an error, depending on ignoreNotFound. + // Treats empty responses and nil response nodes exactly like a not found error. + Get(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) error + + // GetToList unmarshals json found at key and opaque it into *List api object + // (an object that satisfies the runtime.IsList definition). + GetToList(ctx context.Context, key string, filter FilterFunc, listObj runtime.Object) error + + // List unmarshalls jsons found at directory defined by key and opaque them + // into *List api object (an object that satisfies runtime.IsList definition). + // The returned contents may be delayed, but it is guaranteed that they will + // be have at least 'resourceVersion'. + List(ctx context.Context, key string, resourceVersion string, filter FilterFunc, listObj runtime.Object) error + + // GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType') + // retrying the update until success if there is index conflict. + // Note that object passed to tryUpdate may change across invocations of tryUpdate() if + // other writers are simultaneously updating it, so tryUpdate() needs to take into account + // the current contents of the object when deciding how the update object should look. + // If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false + // or zero value in 'ptrToType' parameter otherwise. + // If the object to update has the same value as previous, it won't do any update + // but will return the object in 'ptrToType' parameter. + // + // Example: + // + // s := /* implementation of Interface */ + // err := s.GuaranteedUpdate( + // "myKey", &MyType{}, true, + // func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { + // // Before each incovation of the user defined function, "input" is reset to + // // current contents for "myKey" in database. + // curr := input.(*MyType) // Guaranteed to succeed. + // + // // Make the modification + // curr.Counter++ + // + // // Return the modified object - return an error to stop iterating. Return + // // a uint64 to alter the TTL on the object, or nil to keep it the same value. + // return cur, nil, nil + // } + // }) + GuaranteedUpdate(ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, precondtions *Preconditions, tryUpdate UpdateFunc) error + + // Codec provides access to the underlying codec being used by the implementation. + Codec() runtime.Codec +} + +// Config interface allows storage tiers to generate the proper storage.interface +// and reduce the dependencies to encapsulate storage. +type Config interface { + // Creates the Interface base on ConfigObject + NewStorage() (Interface, error) + + // This function is used to enforce membership, and return the underlying type + GetType() string +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/storagebackend/config.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/storagebackend/config.go new file mode 100644 index 000000000000..d1e17c87caea --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/storagebackend/config.go @@ -0,0 +1,43 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storagebackend + +const ( + StorageTypeUnset = "" + StorageTypeETCD2 = "etcd2" + StorageTypeETCD3 = "etcd3" +) + +// Config is configuration for creating a storage backend. +type Config struct { + // Type defines the type of storage backend, e.g. "etcd2", etcd3". Default ("") is "etcd2". + Type string + // Prefix is the prefix to all keys passed to storage.Interface methods. + Prefix string + // ServerList is the list of storage servers to connect with. + ServerList []string + // TLS credentials + KeyFile string + CertFile string + CAFile string + // Quorum indicates that whether read operations should be quorum-level consistent. + Quorum bool + // DeserializationCacheSize is the size of cache of deserialized objects. + // Currently this is only supported in etcd2. + // We will drop the cache once using protobuf. + DeserializationCacheSize int +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/storagebackend/factory/etcd2.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/storagebackend/factory/etcd2.go new file mode 100644 index 000000000000..4ac526d99968 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/storagebackend/factory/etcd2.go @@ -0,0 +1,81 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package factory + +import ( + "net" + "net/http" + "time" + + etcd2client "github.com/coreos/etcd/client" + "github.com/coreos/etcd/pkg/transport" + + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + "k8s.io/kubernetes/pkg/storage/etcd" + "k8s.io/kubernetes/pkg/storage/storagebackend" + utilnet "k8s.io/kubernetes/pkg/util/net" +) + +func newETCD2Storage(c storagebackend.Config, codec runtime.Codec) (storage.Interface, error) { + tr, err := newTransportForETCD2(c.CertFile, c.KeyFile, c.CAFile) + if err != nil { + return nil, err + } + client, err := newETCD2Client(tr, c.ServerList) + if err != nil { + return nil, err + } + return etcd.NewEtcdStorage(client, codec, c.Prefix, c.Quorum, c.DeserializationCacheSize), nil +} + +func newETCD2Client(tr *http.Transport, serverList []string) (etcd2client.Client, error) { + cli, err := etcd2client.New(etcd2client.Config{ + Endpoints: serverList, + Transport: tr, + }) + if err != nil { + return nil, err + } + + return cli, nil +} + +func newTransportForETCD2(certFile, keyFile, caFile string) (*http.Transport, error) { + info := transport.TLSInfo{ + CertFile: certFile, + KeyFile: keyFile, + CAFile: caFile, + } + cfg, err := info.ClientConfig() + if err != nil { + return nil, err + } + // Copied from etcd.DefaultTransport declaration. + // TODO: Determine if transport needs optimization + tr := utilnet.SetTransportDefaults(&http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + MaxIdleConnsPerHost: 500, + TLSClientConfig: cfg, + }) + return tr, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/storagebackend/factory/etcd3.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/storagebackend/factory/etcd3.go new file mode 100644 index 000000000000..add091a06904 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/storagebackend/factory/etcd3.go @@ -0,0 +1,45 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package factory + +import ( + "strings" + + "github.com/coreos/etcd/clientv3" + "golang.org/x/net/context" + + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + "k8s.io/kubernetes/pkg/storage/etcd3" + "k8s.io/kubernetes/pkg/storage/storagebackend" +) + +func newETCD3Storage(c storagebackend.Config, codec runtime.Codec) (storage.Interface, error) { + endpoints := c.ServerList + for i, s := range endpoints { + endpoints[i] = strings.TrimLeft(s, "http://") + } + cfg := clientv3.Config{ + Endpoints: endpoints, + } + client, err := clientv3.New(cfg) + if err != nil { + return nil, err + } + etcd3.StartCompactor(context.Background(), client) + return etcd3.New(client, codec, c.Prefix), nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/storagebackend/factory/factory.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/storagebackend/factory/factory.go new file mode 100644 index 000000000000..cc7ae052e910 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/storagebackend/factory/factory.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package factory + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" + "k8s.io/kubernetes/pkg/storage/storagebackend" +) + +// Create creates a storage backend based on given config. +func Create(c storagebackend.Config, codec runtime.Codec) (storage.Interface, error) { + switch c.Type { + case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD2: + return newETCD2Storage(c, codec) + case storagebackend.StorageTypeETCD3: + // TODO: We have the following features to implement: + // - Support secure connection by using key, cert, and CA files. + // - Honor "https" scheme to support secure connection in gRPC. + // - Support non-quorum read. + return newETCD3Storage(c, codec) + default: + return nil, fmt.Errorf("unknown storage type: %s", c.Type) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/testing/types.generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/testing/types.generated.go new file mode 100644 index 000000000000..2c64c8f4ed8f --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/testing/types.generated.go @@ -0,0 +1,365 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package testing + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg2_api "k8s.io/kubernetes/pkg/api" + pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + pkg3_types "k8s.io/kubernetes/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg2_api.ObjectMeta + var v1 pkg1_unversioned.TypeMeta + var v2 pkg3_types.UID + var v3 time.Time + _, _, _, _ = v0, v1, v2, v3 + } +} + +func (x *TestResource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.Kind != "" + yyq2[3] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.ObjectMeta + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeInt(int64(x.Value)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Value)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TestResource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TestResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yyv4.CodecDecodeSelf(d) + } + case "value": + if r.TryDecodeAsNil() { + x.Value = 0 + } else { + x.Value = int(r.DecodeInt(codecSelferBitsize1234)) + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TestResource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv9 := &x.ObjectMeta + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = 0 + } else { + x.Value = int(r.DecodeInt(codecSelferBitsize1234)) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/testing/types.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/testing/types.go new file mode 100644 index 000000000000..a1377aa04ec6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/testing/types.go @@ -0,0 +1,30 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +type TestResource struct { + unversioned.TypeMeta `json:",inline"` + api.ObjectMeta `json:"metadata"` + Value int `json:"value"` +} + +func (obj *TestResource) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/testing/utils.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/testing/utils.go new file mode 100644 index 000000000000..8858e33007fb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/testing/utils.go @@ -0,0 +1,61 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "path" + + "golang.org/x/net/context" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" +) + +// CreateObj will create a single object using the storage interface +func CreateObj(helper storage.Interface, name string, obj, out runtime.Object, ttl uint64) error { + return helper.Create(context.TODO(), name, obj, out, ttl) +} + +//CreateObjList will create a list from the array of objects +func CreateObjList(prefix string, helper storage.Interface, items []runtime.Object) error { + for i := range items { + obj := items[i] + meta, err := meta.Accessor(obj) + if err != nil { + return err + } + err = CreateObj(helper, path.Join(prefix, meta.GetName()), obj, obj, 0) + if err != nil { + return err + } + items[i] = obj + } + return nil +} + +// CreateList will properly create a list using the storage interface +func CreateList(prefix string, helper storage.Interface, list runtime.Object) error { + items, err := meta.ExtractList(list) + if err != nil { + return err + } + err = CreateObjList(prefix, helper, items) + if err != nil { + return err + } + return meta.SetList(list, items) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/util.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/util.go new file mode 100644 index 000000000000..6595a6763828 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/util.go @@ -0,0 +1,90 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "strconv" + + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +type SimpleUpdateFunc func(runtime.Object) (runtime.Object, error) + +// SimpleUpdateFunc converts SimpleUpdateFunc into UpdateFunc +func SimpleUpdate(fn SimpleUpdateFunc) UpdateFunc { + return func(input runtime.Object, _ ResponseMeta) (runtime.Object, *uint64, error) { + out, err := fn(input) + return out, nil, err + } +} + +// ParseWatchResourceVersion takes a resource version argument and converts it to +// the etcd version we should pass to helper.Watch(). Because resourceVersion is +// an opaque value, the default watch behavior for non-zero watch is to watch +// the next value (if you pass "1", you will see updates from "2" onwards). +func ParseWatchResourceVersion(resourceVersion string) (uint64, error) { + if resourceVersion == "" || resourceVersion == "0" { + return 0, nil + } + version, err := strconv.ParseUint(resourceVersion, 10, 64) + if err != nil { + return 0, NewInvalidError(field.ErrorList{ + // Validation errors are supposed to return version-specific field + // paths, but this is probably close enough. + field.Invalid(field.NewPath("resourceVersion"), resourceVersion, err.Error()), + }) + } + return version, nil +} + +// ParseListResourceVersion takes a resource version argument and converts it to +// the etcd version. +func ParseListResourceVersion(resourceVersion string) (uint64, error) { + if resourceVersion == "" { + return 0, nil + } + version, err := strconv.ParseUint(resourceVersion, 10, 64) + return version, err +} + +func NamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return "", err + } + name := meta.GetName() + if msgs := validation.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", fmt.Errorf("invalid name: %v", msgs) + } + return prefix + "/" + meta.GetNamespace() + "/" + name, nil +} + +func NoNamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return "", err + } + name := meta.GetName() + if msgs := validation.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", fmt.Errorf("invalid name: %v", msgs) + } + return prefix + "/" + name, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/util_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/util_test.go new file mode 100644 index 000000000000..7d0675cceed9 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/util_test.go @@ -0,0 +1,53 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import "testing" + +func TestEtcdParseWatchResourceVersion(t *testing.T) { + testCases := []struct { + Version string + ExpectVersion uint64 + Err bool + }{ + {Version: "", ExpectVersion: 0}, + {Version: "a", Err: true}, + {Version: " ", Err: true}, + {Version: "1", ExpectVersion: 1}, + {Version: "10", ExpectVersion: 10}, + } + for _, testCase := range testCases { + version, err := ParseWatchResourceVersion(testCase.Version) + switch { + case testCase.Err: + if err == nil { + t.Errorf("%s: unexpected non-error", testCase.Version) + continue + } + if !IsInvalidError(err) { + t.Errorf("%s: unexpected error: %v", testCase.Version, err) + continue + } + case !testCase.Err && err != nil: + t.Errorf("%s: unexpected error: %v", testCase.Version, err) + continue + } + if version != testCase.ExpectVersion { + t.Errorf("%s: expected version %d but was %d", testCase.Version, testCase.ExpectVersion, version) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/watch_cache.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/watch_cache.go new file mode 100644 index 000000000000..3e5ce5d73003 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/watch_cache.go @@ -0,0 +1,331 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "sort" + "strconv" + "sync" + "time" + + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/watch" +) + +const ( + // MaximumListWait determines how long we're willing to wait for a + // list if a client specified a resource version in the future. + MaximumListWait = 60 * time.Second +) + +// watchCacheEvent is a single "watch event" that is send to users of +// watchCache. Additionally to a typical "watch.Event" it contains +// the previous value of the object to enable proper filtering in the +// upper layers. +type watchCacheEvent struct { + Type watch.EventType + Object runtime.Object + PrevObject runtime.Object + ResourceVersion uint64 +} + +// watchCacheElement is a single "watch event" stored in a cache. +// It contains the resource version of the object and the object +// itself. +type watchCacheElement struct { + resourceVersion uint64 + watchCacheEvent watchCacheEvent +} + +// watchCache implements a Store interface. +// However, it depends on the elements implementing runtime.Object interface. +// +// watchCache is a "sliding window" (with a limited capacity) of objects +// observed from a watch. +type watchCache struct { + sync.RWMutex + + // Condition on which lists are waiting for the fresh enough + // resource version. + cond *sync.Cond + + // Maximum size of history window. + capacity int + + // cache is used a cyclic buffer - its first element (with the smallest + // resourceVersion) is defined by startIndex, its last element is defined + // by endIndex (if cache is full it will be startIndex + capacity). + // Both startIndex and endIndex can be greater than buffer capacity - + // you should always apply modulo capacity to get an index in cache array. + cache []watchCacheElement + startIndex int + endIndex int + + // store will effectively support LIST operation from the "end of cache + // history" i.e. from the moment just after the newest cached watched event. + // It is necessary to effectively allow clients to start watching at now. + store cache.Store + + // ResourceVersion up to which the watchCache is propagated. + resourceVersion uint64 + + // This handler is run at the end of every successful Replace() method. + onReplace func() + + // This handler is run at the end of every Add/Update/Delete method + // and additionally gets the previous value of the object. + onEvent func(watchCacheEvent) + + // for testing timeouts. + clock util.Clock +} + +func newWatchCache(capacity int) *watchCache { + wc := &watchCache{ + capacity: capacity, + cache: make([]watchCacheElement, capacity), + startIndex: 0, + endIndex: 0, + store: cache.NewStore(cache.MetaNamespaceKeyFunc), + resourceVersion: 0, + clock: util.RealClock{}, + } + wc.cond = sync.NewCond(wc.RLocker()) + return wc +} + +func (w *watchCache) Add(obj interface{}) error { + object, resourceVersion, err := objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Added, Object: object} + + f := func(obj runtime.Object) error { return w.store.Add(obj) } + return w.processEvent(event, resourceVersion, f) +} + +func (w *watchCache) Update(obj interface{}) error { + object, resourceVersion, err := objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Modified, Object: object} + + f := func(obj runtime.Object) error { return w.store.Update(obj) } + return w.processEvent(event, resourceVersion, f) +} + +func (w *watchCache) Delete(obj interface{}) error { + object, resourceVersion, err := objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Deleted, Object: object} + + f := func(obj runtime.Object) error { return w.store.Delete(obj) } + return w.processEvent(event, resourceVersion, f) +} + +func objectToVersionedRuntimeObject(obj interface{}) (runtime.Object, uint64, error) { + object, ok := obj.(runtime.Object) + if !ok { + return nil, 0, fmt.Errorf("obj does not implement runtime.Object interface: %v", obj) + } + meta, err := meta.Accessor(object) + if err != nil { + return nil, 0, err + } + resourceVersion, err := parseResourceVersion(meta.GetResourceVersion()) + if err != nil { + return nil, 0, err + } + return object, resourceVersion, nil +} + +func parseResourceVersion(resourceVersion string) (uint64, error) { + if resourceVersion == "" { + return 0, nil + } + return strconv.ParseUint(resourceVersion, 10, 64) +} + +func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, updateFunc func(runtime.Object) error) error { + w.Lock() + defer w.Unlock() + previous, exists, err := w.store.Get(event.Object) + if err != nil { + return err + } + var prevObject runtime.Object + if exists { + prevObject = previous.(runtime.Object) + } + watchCacheEvent := watchCacheEvent{event.Type, event.Object, prevObject, resourceVersion} + if w.onEvent != nil { + w.onEvent(watchCacheEvent) + } + w.updateCache(resourceVersion, watchCacheEvent) + w.resourceVersion = resourceVersion + w.cond.Broadcast() + return updateFunc(event.Object) +} + +// Assumes that lock is already held for write. +func (w *watchCache) updateCache(resourceVersion uint64, event watchCacheEvent) { + if w.endIndex == w.startIndex+w.capacity { + // Cache is full - remove the oldest element. + w.startIndex++ + } + w.cache[w.endIndex%w.capacity] = watchCacheElement{resourceVersion, event} + w.endIndex++ +} + +func (w *watchCache) List() []interface{} { + w.RLock() + defer w.RUnlock() + return w.store.List() +} + +func (w *watchCache) WaitUntilFreshAndList(resourceVersion uint64) ([]interface{}, uint64, error) { + startTime := w.clock.Now() + go func() { + // Wake us up when the time limit has expired. The docs + // promise that time.After (well, NewTimer, which it calls) + // will wait *at least* the duration given. Since this go + // routine starts sometime after we record the start time, and + // it will wake up the loop below sometime after the broadcast, + // we don't need to worry about waking it up before the time + // has expired accidentally. + <-w.clock.After(MaximumListWait) + w.cond.Broadcast() + }() + + w.RLock() + defer w.RUnlock() + for w.resourceVersion < resourceVersion { + if w.clock.Since(startTime) >= MaximumListWait { + return nil, 0, fmt.Errorf("time limit exceeded while waiting for resource version %v (current value: %v)", resourceVersion, w.resourceVersion) + } + w.cond.Wait() + } + return w.store.List(), w.resourceVersion, nil +} + +func (w *watchCache) ListKeys() []string { + w.RLock() + defer w.RUnlock() + return w.store.ListKeys() +} + +func (w *watchCache) Get(obj interface{}) (interface{}, bool, error) { + w.RLock() + defer w.RUnlock() + return w.store.Get(obj) +} + +func (w *watchCache) GetByKey(key string) (interface{}, bool, error) { + w.RLock() + defer w.RUnlock() + return w.store.GetByKey(key) +} + +func (w *watchCache) Replace(objs []interface{}, resourceVersion string) error { + version, err := parseResourceVersion(resourceVersion) + if err != nil { + return err + } + + w.Lock() + defer w.Unlock() + + w.startIndex = 0 + w.endIndex = 0 + if err := w.store.Replace(objs, resourceVersion); err != nil { + return err + } + w.resourceVersion = version + if w.onReplace != nil { + w.onReplace() + } + w.cond.Broadcast() + return nil +} + +func (w *watchCache) SetOnReplace(onReplace func()) { + w.Lock() + defer w.Unlock() + w.onReplace = onReplace +} + +func (w *watchCache) SetOnEvent(onEvent func(watchCacheEvent)) { + w.Lock() + defer w.Unlock() + w.onEvent = onEvent +} + +func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]watchCacheEvent, error) { + size := w.endIndex - w.startIndex + oldest := w.resourceVersion + if size > 0 { + oldest = w.cache[w.startIndex%w.capacity].resourceVersion + } + if resourceVersion == 0 { + // resourceVersion = 0 means that we don't require any specific starting point + // and we would like to start watching from ~now. + // However, to keep backward compatibility, we additionally need to return the + // current state and only then start watching from that point. + // + // TODO: In v2 api, we should stop returning the current state - #13969. + allItems := w.store.List() + result := make([]watchCacheEvent, len(allItems)) + for i, item := range allItems { + result[i] = watchCacheEvent{Type: watch.Added, Object: item.(runtime.Object)} + } + return result, nil + } + if resourceVersion < oldest-1 { + return nil, errors.NewGone(fmt.Sprintf("too old resource version: %d (%d)", resourceVersion, oldest-1)) + } + + // Binary search the smallest index at which resourceVersion is greater than the given one. + f := func(i int) bool { + return w.cache[(w.startIndex+i)%w.capacity].resourceVersion > resourceVersion + } + first := sort.Search(size, f) + result := make([]watchCacheEvent, size-first) + for i := 0; i < size-first; i++ { + result[i] = w.cache[(w.startIndex+first+i)%w.capacity].watchCacheEvent + } + return result, nil +} + +func (w *watchCache) GetAllEventsSince(resourceVersion uint64) ([]watchCacheEvent, error) { + w.RLock() + defer w.RUnlock() + return w.GetAllEventsSinceThreadUnsafe(resourceVersion) +} + +func (w *watchCache) Resync() error { + // Nothing to do + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/watch_cache_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/watch_cache_test.go new file mode 100644 index 000000000000..8d9327e3cb76 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/storage/watch_cache_test.go @@ -0,0 +1,334 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "strconv" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" +) + +func makeTestPod(name string, resourceVersion uint64) *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Namespace: "ns", + Name: name, + ResourceVersion: strconv.FormatUint(resourceVersion, 10), + }, + } +} + +// newTestWatchCache just adds a fake clock. +func newTestWatchCache(capacity int) *watchCache { + wc := newWatchCache(capacity) + wc.clock = util.NewFakeClock(time.Now()) + return wc +} + +func TestWatchCacheBasic(t *testing.T) { + store := newTestWatchCache(2) + + // Test Add/Update/Delete. + pod1 := makeTestPod("pod", 1) + if err := store.Add(pod1); err != nil { + t.Errorf("unexpected error: %v", err) + } + if item, ok, _ := store.Get(pod1); !ok { + t.Errorf("didn't find pod") + } else { + if !api.Semantic.DeepEqual(pod1, item) { + t.Errorf("expected %v, got %v", pod1, item) + } + } + pod2 := makeTestPod("pod", 2) + if err := store.Update(pod2); err != nil { + t.Errorf("unexpected error: %v", err) + } + if item, ok, _ := store.Get(pod2); !ok { + t.Errorf("didn't find pod") + } else { + if !api.Semantic.DeepEqual(pod2, item) { + t.Errorf("expected %v, got %v", pod1, item) + } + } + pod3 := makeTestPod("pod", 3) + if err := store.Delete(pod3); err != nil { + t.Errorf("unexpected error: %v", err) + } + if _, ok, _ := store.Get(pod3); ok { + t.Errorf("found pod") + } + + // Test List. + store.Add(makeTestPod("pod1", 4)) + store.Add(makeTestPod("pod2", 5)) + store.Add(makeTestPod("pod3", 6)) + { + podNames := sets.String{} + for _, item := range store.List() { + podNames.Insert(item.(*api.Pod).ObjectMeta.Name) + } + if !podNames.HasAll("pod1", "pod2", "pod3") { + t.Errorf("missing pods, found %v", podNames) + } + if len(podNames) != 3 { + t.Errorf("found missing/extra items") + } + } + + // Test Replace. + store.Replace([]interface{}{ + makeTestPod("pod4", 7), + makeTestPod("pod5", 8), + }, "8") + { + podNames := sets.String{} + for _, item := range store.List() { + podNames.Insert(item.(*api.Pod).ObjectMeta.Name) + } + if !podNames.HasAll("pod4", "pod5") { + t.Errorf("missing pods, found %v", podNames) + } + if len(podNames) != 2 { + t.Errorf("found missing/extra items") + } + } +} + +func TestEvents(t *testing.T) { + store := newTestWatchCache(5) + + store.Add(makeTestPod("pod", 3)) + + // Test for Added event. + { + _, err := store.GetAllEventsSince(1) + if err == nil { + t.Errorf("expected error too old") + } + if _, ok := err.(*errors.StatusError); !ok { + t.Errorf("expected error to be of type StatusError") + } + } + { + result, err := store.GetAllEventsSince(2) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(result) != 1 { + t.Fatalf("unexpected events: %v", result) + } + if result[0].Type != watch.Added { + t.Errorf("unexpected event type: %v", result[0].Type) + } + pod := makeTestPod("pod", uint64(3)) + if !api.Semantic.DeepEqual(pod, result[0].Object) { + t.Errorf("unexpected item: %v, expected: %v", result[0].Object, pod) + } + if result[0].PrevObject != nil { + t.Errorf("unexpected item: %v", result[0].PrevObject) + } + } + + store.Update(makeTestPod("pod", 4)) + store.Update(makeTestPod("pod", 5)) + + // Test with not full cache. + { + _, err := store.GetAllEventsSince(1) + if err == nil { + t.Errorf("expected error too old") + } + } + { + result, err := store.GetAllEventsSince(3) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(result) != 2 { + t.Fatalf("unexpected events: %v", result) + } + for i := 0; i < 2; i++ { + if result[i].Type != watch.Modified { + t.Errorf("unexpected event type: %v", result[i].Type) + } + pod := makeTestPod("pod", uint64(i+4)) + if !api.Semantic.DeepEqual(pod, result[i].Object) { + t.Errorf("unexpected item: %v, expected: %v", result[i].Object, pod) + } + prevPod := makeTestPod("pod", uint64(i+3)) + if !api.Semantic.DeepEqual(prevPod, result[i].PrevObject) { + t.Errorf("unexpected item: %v, expected: %v", result[i].PrevObject, prevPod) + } + } + } + + for i := 6; i < 10; i++ { + store.Update(makeTestPod("pod", uint64(i))) + } + + // Test with full cache - there should be elements from 5 to 9. + { + _, err := store.GetAllEventsSince(3) + if err == nil { + t.Errorf("expected error too old") + } + } + { + result, err := store.GetAllEventsSince(4) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(result) != 5 { + t.Fatalf("unexpected events: %v", result) + } + for i := 0; i < 5; i++ { + pod := makeTestPod("pod", uint64(i+5)) + if !api.Semantic.DeepEqual(pod, result[i].Object) { + t.Errorf("unexpected item: %v, expected: %v", result[i].Object, pod) + } + } + } + + // Test for delete event. + store.Delete(makeTestPod("pod", uint64(10))) + + { + result, err := store.GetAllEventsSince(9) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(result) != 1 { + t.Fatalf("unexpected events: %v", result) + } + if result[0].Type != watch.Deleted { + t.Errorf("unexpected event type: %v", result[0].Type) + } + pod := makeTestPod("pod", uint64(10)) + if !api.Semantic.DeepEqual(pod, result[0].Object) { + t.Errorf("unexpected item: %v, expected: %v", result[0].Object, pod) + } + prevPod := makeTestPod("pod", uint64(9)) + if !api.Semantic.DeepEqual(prevPod, result[0].PrevObject) { + t.Errorf("unexpected item: %v, expected: %v", result[0].PrevObject, prevPod) + } + } +} + +func TestWaitUntilFreshAndList(t *testing.T) { + store := newTestWatchCache(3) + + // In background, update the store. + go func() { + store.Add(makeTestPod("foo", 2)) + store.Add(makeTestPod("bar", 5)) + }() + + list, resourceVersion, err := store.WaitUntilFreshAndList(5) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if resourceVersion != 5 { + t.Errorf("unexpected resourceVersion: %v, expected: 5", resourceVersion) + } + if len(list) != 2 { + t.Errorf("unexpected list returned: %#v", list) + } +} + +func TestWaitUntilFreshAndListTimeout(t *testing.T) { + store := newTestWatchCache(3) + fc := store.clock.(*util.FakeClock) + + // In background, step clock after the below call starts the timer. + go func() { + for !fc.HasWaiters() { + time.Sleep(time.Millisecond) + } + fc.Step(MaximumListWait) + + // Add an object to make sure the test would + // eventually fail instead of just waiting + // forever. + time.Sleep(30 * time.Second) + store.Add(makeTestPod("bar", 5)) + }() + + _, _, err := store.WaitUntilFreshAndList(5) + if err == nil { + t.Fatalf("unexpected lack of timeout error") + } +} + +type testLW struct { + ListFunc func(options api.ListOptions) (runtime.Object, error) + WatchFunc func(options api.ListOptions) (watch.Interface, error) +} + +func (t *testLW) List(options api.ListOptions) (runtime.Object, error) { + return t.ListFunc(options) +} +func (t *testLW) Watch(options api.ListOptions) (watch.Interface, error) { + return t.WatchFunc(options) +} + +func TestReflectorForWatchCache(t *testing.T) { + store := newTestWatchCache(5) + + { + _, version, err := store.WaitUntilFreshAndList(0) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if version != 0 { + t.Errorf("unexpected resource version: %d", version) + } + } + + lw := &testLW{ + WatchFunc: func(options api.ListOptions) (watch.Interface, error) { + fw := watch.NewFake() + go fw.Stop() + return fw, nil + }, + ListFunc: func(options api.ListOptions) (runtime.Object, error) { + return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "10"}}, nil + }, + } + r := cache.NewReflector(lw, &api.Pod{}, store, 0) + r.ListAndWatch(wait.NeverStop) + + { + _, version, err := store.WaitUntilFreshAndList(10) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if version != 10 { + t.Errorf("unexpected resource version: %d", version) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/bandwidth/linux_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/bandwidth/linux_test.go new file mode 100644 index 000000000000..b6d2b559cd19 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/bandwidth/linux_test.go @@ -0,0 +1,634 @@ +// +build linux + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bandwidth + +import ( + "errors" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/util/exec" +) + +var tcClassOutput = `class htb 1:1 root prio 0 rate 10000bit ceil 10000bit burst 1600b cburst 1600b +class htb 1:2 root prio 0 rate 10000bit ceil 10000bit burst 1600b cburst 1600b +class htb 1:3 root prio 0 rate 10000bit ceil 10000bit burst 1600b cburst 1600b +class htb 1:4 root prio 0 rate 10000bit ceil 10000bit burst 1600b cburst 1600b +` + +var tcClassOutput2 = `class htb 1:1 root prio 0 rate 10000bit ceil 10000bit burst 1600b cburst 1600b +class htb 1:2 root prio 0 rate 10000bit ceil 10000bit burst 1600b cburst 1600b +class htb 1:3 root prio 0 rate 10000bit ceil 10000bit burst 1600b cburst 1600b +class htb 1:4 root prio 0 rate 10000bit ceil 10000bit burst 1600b cburst 1600b +class htb 1:5 root prio 0 rate 10000bit ceil 10000bit burst 1600b cburst 1600b +` + +func TestNextClassID(t *testing.T) { + tests := []struct { + output string + expectErr bool + expected int + err error + }{ + { + output: tcClassOutput, + expected: 5, + }, + { + output: "\n", + expected: 1, + }, + { + expected: -1, + expectErr: true, + err: errors.New("test error"), + }, + } + for _, test := range tests { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte(test.output), test.err }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { + return exec.InitFakeCmd(&fcmd, cmd, args...) + }, + }, + } + shaper := &tcShaper{e: &fexec} + class, err := shaper.nextClassID() + if test.expectErr { + if err == nil { + t.Errorf("unexpected non-error") + } + } else { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if class != test.expected { + t.Errorf("expected: %d, found %d", test.expected, class) + } + } + } +} + +func TestHexCIDR(t *testing.T) { + tests := []struct { + input string + output string + expectErr bool + }{ + { + input: "1.2.0.0/16", + output: "01020000/ffff0000", + }, + { + input: "172.17.0.2/32", + output: "ac110002/ffffffff", + }, + { + input: "foo", + expectErr: true, + }, + } + for _, test := range tests { + output, err := hexCIDR(test.input) + if test.expectErr { + if err == nil { + t.Error("unexpected non-error") + } + } else { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if output != test.output { + t.Errorf("expected: %s, saw: %s", test.output, output) + } + input, err := asciiCIDR(output) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if input != test.input { + t.Errorf("expected: %s, saw: %s", test.input, input) + } + } + } +} + +var tcFilterOutput = `filter parent 1: protocol ip pref 1 u32 +filter parent 1: protocol ip pref 1 u32 fh 800: ht divisor 1 +filter parent 1: protocol ip pref 1 u32 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:1 + match ac110002/ffffffff at 16 +filter parent 1: protocol ip pref 1 u32 fh 800::801 order 2049 key ht 800 bkt 0 flowid 1:2 + match 01020000/ffff0000 at 16 +` + +func TestFindCIDRClass(t *testing.T) { + tests := []struct { + cidr string + output string + expectErr bool + expectNotFound bool + expectedClass string + expectedHandle string + err error + }{ + { + cidr: "172.17.0.2/32", + output: tcFilterOutput, + expectedClass: "1:1", + expectedHandle: "800::800", + }, + { + cidr: "1.2.3.4/16", + output: tcFilterOutput, + expectedClass: "1:2", + expectedHandle: "800::801", + }, + { + cidr: "2.2.3.4/16", + output: tcFilterOutput, + expectNotFound: true, + }, + { + err: errors.New("test error"), + expectErr: true, + }, + } + for _, test := range tests { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte(test.output), test.err }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { + return exec.InitFakeCmd(&fcmd, cmd, args...) + }, + }, + } + shaper := &tcShaper{e: &fexec} + class, handle, found, err := shaper.findCIDRClass(test.cidr) + if test.expectErr { + if err == nil { + t.Errorf("unexpected non-error") + } + } else { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if test.expectNotFound { + if found { + t.Errorf("unexpectedly found an interface: %s %s", class, handle) + } + } else { + if class != test.expectedClass { + t.Errorf("expected: %s, found %s", test.expectedClass, class) + } + if handle != test.expectedHandle { + t.Errorf("expected: %s, found %s", test.expectedHandle, handle) + } + } + } + } +} + +func TestGetCIDRs(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte(tcFilterOutput), nil }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { + return exec.InitFakeCmd(&fcmd, cmd, args...) + }, + }, + } + shaper := &tcShaper{e: &fexec} + cidrs, err := shaper.GetCIDRs() + if err != nil { + t.Errorf("unexpected error: %v", err) + } + expectedCidrs := []string{"172.17.0.2/32", "1.2.0.0/16"} + if !reflect.DeepEqual(cidrs, expectedCidrs) { + t.Errorf("expected: %v, saw: %v", expectedCidrs, cidrs) + } +} + +func TestLimit(t *testing.T) { + tests := []struct { + cidr string + ingress *resource.Quantity + egress *resource.Quantity + expectErr bool + expectedCalls int + err error + }{ + { + cidr: "1.2.3.4/32", + ingress: resource.NewQuantity(10, resource.DecimalSI), + egress: resource.NewQuantity(20, resource.DecimalSI), + expectedCalls: 6, + }, + { + cidr: "1.2.3.4/32", + ingress: resource.NewQuantity(10, resource.DecimalSI), + egress: nil, + expectedCalls: 3, + }, + { + cidr: "1.2.3.4/32", + ingress: nil, + egress: resource.NewQuantity(20, resource.DecimalSI), + expectedCalls: 3, + }, + { + cidr: "1.2.3.4/32", + ingress: nil, + egress: nil, + expectedCalls: 0, + }, + { + err: errors.New("test error"), + ingress: resource.NewQuantity(10, resource.DecimalSI), + egress: resource.NewQuantity(20, resource.DecimalSI), + expectErr: true, + }, + } + + for _, test := range tests { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte(tcClassOutput), test.err }, + func() ([]byte, error) { return []byte{}, test.err }, + func() ([]byte, error) { return []byte{}, test.err }, + func() ([]byte, error) { return []byte(tcClassOutput2), test.err }, + func() ([]byte, error) { return []byte{}, test.err }, + func() ([]byte, error) { return []byte{}, test.err }, + }, + } + + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + iface := "cbr0" + shaper := &tcShaper{e: &fexec, iface: iface} + if err := shaper.Limit(test.cidr, test.ingress, test.egress); err != nil && !test.expectErr { + t.Errorf("unexpected error: %v", err) + return + } else if err == nil && test.expectErr { + t.Error("unexpected non-error") + return + } + // No more testing in the error case + if test.expectErr { + if fcmd.CombinedOutputCalls != 1 { + t.Errorf("unexpected number of calls: %d, expected: 1", fcmd.CombinedOutputCalls) + } + return + } + + if fcmd.CombinedOutputCalls != test.expectedCalls { + t.Errorf("unexpected number of calls: %d, expected: %d", fcmd.CombinedOutputCalls, test.expectedCalls) + } + + for ix := range fcmd.CombinedOutputLog { + output := fcmd.CombinedOutputLog[ix] + if output[0] != "tc" { + t.Errorf("unexpected command: %s, expected tc", output[0]) + } + if output[4] != iface { + t.Errorf("unexpected interface: %s, expected %s (%v)", output[4], iface, output) + } + if ix == 1 { + var expectedRate string + if test.ingress != nil { + expectedRate = makeKBitString(test.ingress) + } else { + expectedRate = makeKBitString(test.egress) + } + if output[11] != expectedRate { + t.Errorf("unexpected ingress: %s, expected: %s", output[11], expectedRate) + } + if output[8] != "1:5" { + t.Errorf("unexpected class: %s, expected: %s", output[8], "1:5") + } + } + if ix == 2 { + if output[15] != test.cidr { + t.Errorf("unexpected cidr: %s, expected: %s", output[15], test.cidr) + } + if output[17] != "1:5" { + t.Errorf("unexpected class: %s, expected: %s", output[17], "1:5") + } + } + if ix == 4 { + if output[11] != makeKBitString(test.egress) { + t.Errorf("unexpected egress: %s, expected: %s", output[11], makeKBitString(test.egress)) + } + if output[8] != "1:6" { + t.Errorf("unexpected class: %s, expected: %s", output[8], "1:6") + } + } + if ix == 5 { + if output[15] != test.cidr { + t.Errorf("unexpected cidr: %s, expected: %s", output[15], test.cidr) + } + if output[17] != "1:6" { + t.Errorf("unexpected class: %s, expected: %s", output[17], "1:5") + } + } + } + } +} + +func TestReset(t *testing.T) { + tests := []struct { + cidr string + err error + expectErr bool + expectedHandle string + expectedClass string + }{ + { + cidr: "1.2.3.4/16", + expectedHandle: "800::801", + expectedClass: "1:2", + }, + { + cidr: "172.17.0.2/32", + expectedHandle: "800::800", + expectedClass: "1:1", + }, + { + err: errors.New("test error"), + expectErr: true, + }, + } + for _, test := range tests { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte(tcFilterOutput), test.err }, + func() ([]byte, error) { return []byte{}, test.err }, + func() ([]byte, error) { return []byte{}, test.err }, + }, + } + + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + iface := "cbr0" + shaper := &tcShaper{e: &fexec, iface: iface} + + if err := shaper.Reset(test.cidr); err != nil && !test.expectErr { + t.Errorf("unexpected error: %v", err) + return + } else if test.expectErr && err == nil { + t.Error("unexpected non-error") + return + } + + // No more testing in the error case + if test.expectErr { + if fcmd.CombinedOutputCalls != 1 { + t.Errorf("unexpected number of calls: %d, expected: 1", fcmd.CombinedOutputCalls) + } + return + } + + if fcmd.CombinedOutputCalls != 3 { + t.Errorf("unexpected number of calls: %d, expected: 3", fcmd.CombinedOutputCalls) + } + + for ix := range fcmd.CombinedOutputLog { + output := fcmd.CombinedOutputLog[ix] + if output[0] != "tc" { + t.Errorf("unexpected command: %s, expected tc", output[0]) + } + if output[4] != iface { + t.Errorf("unexpected interface: %s, expected %s (%v)", output[4], iface, output) + } + if ix == 1 && output[12] != test.expectedHandle { + t.Errorf("unexpected handle: %s, expected: %s", output[12], test.expectedHandle) + } + if ix == 2 && output[8] != test.expectedClass { + t.Errorf("unexpected class: %s, expected: %s", output[8], test.expectedClass) + } + } + } +} + +var tcQdisc = "qdisc htb 1: root refcnt 2 r2q 10 default 30 direct_packets_stat 0\n" + +func TestReconcileInterfaceExists(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte(tcQdisc), nil }, + }, + } + + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + iface := "cbr0" + shaper := &tcShaper{e: &fexec, iface: iface} + err := shaper.ReconcileInterface() + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if fcmd.CombinedOutputCalls != 1 { + t.Errorf("unexpected number of calls: %d", fcmd.CombinedOutputCalls) + } + + output := fcmd.CombinedOutputLog[0] + if len(output) != 5 { + t.Errorf("unexpected command: %v", output) + } + if output[0] != "tc" { + t.Errorf("unexpected command: %s", output[0]) + } + if output[4] != iface { + t.Errorf("unexpected interface: %s, expected %s", output[4], iface) + } + if output[2] != "show" { + t.Errorf("unexpected action: %s", output[2]) + } +} + +func testReconcileInterfaceHasNoData(t *testing.T, output string) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte(output), nil }, + func() ([]byte, error) { return []byte(output), nil }, + }, + } + + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + iface := "cbr0" + shaper := &tcShaper{e: &fexec, iface: iface} + err := shaper.ReconcileInterface() + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if fcmd.CombinedOutputCalls != 2 { + t.Errorf("unexpected number of calls: %d", fcmd.CombinedOutputCalls) + } + + for ix, output := range fcmd.CombinedOutputLog { + if output[0] != "tc" { + t.Errorf("unexpected command: %s", output[0]) + } + if output[4] != iface { + t.Errorf("unexpected interface: %s, expected %s", output[4], iface) + } + if ix == 0 { + if len(output) != 5 { + t.Errorf("unexpected command: %v", output) + } + if output[2] != "show" { + t.Errorf("unexpected action: %s", output[2]) + } + } + if ix == 1 { + if len(output) != 11 { + t.Errorf("unexpected command: %v", output) + } + if output[2] != "add" { + t.Errorf("unexpected action: %s", output[2]) + } + if output[7] != "1:" { + t.Errorf("unexpected root class: %s", output[7]) + } + if output[8] != "htb" { + t.Errorf("unexpected qdisc algo: %s", output[8]) + } + } + } +} + +func TestReconcileInterfaceDoesntExist(t *testing.T) { + testReconcileInterfaceHasNoData(t, "\n") +} + +var tcQdiscNoqueue = "qdisc noqueue 0: root refcnt 2 \n" + +func TestReconcileInterfaceExistsWithNoqueue(t *testing.T) { + testReconcileInterfaceHasNoData(t, tcQdiscNoqueue) +} + +var tcQdiscWrong = []string{ + "qdisc htb 2: root refcnt 2 r2q 10 default 30 direct_packets_stat 0\n", + "qdisc foo 1: root refcnt 2 r2q 10 default 30 direct_packets_stat 0\n", +} + +func TestReconcileInterfaceIsWrong(t *testing.T) { + for _, test := range tcQdiscWrong { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte(test), nil }, + func() ([]byte, error) { return []byte("\n"), nil }, + func() ([]byte, error) { return []byte("\n"), nil }, + }, + } + + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + iface := "cbr0" + shaper := &tcShaper{e: &fexec, iface: iface} + err := shaper.ReconcileInterface() + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if fcmd.CombinedOutputCalls != 3 { + t.Errorf("unexpected number of calls: %d", fcmd.CombinedOutputCalls) + } + + for ix, output := range fcmd.CombinedOutputLog { + if output[0] != "tc" { + t.Errorf("unexpected command: %s", output[0]) + } + if output[4] != iface { + t.Errorf("unexpected interface: %s, expected %s", output[4], iface) + } + if ix == 0 { + if len(output) != 5 { + t.Errorf("unexpected command: %v", output) + } + if output[2] != "show" { + t.Errorf("unexpected action: %s", output[2]) + } + } + if ix == 1 { + if len(output) != 8 { + t.Errorf("unexpected command: %v", output) + } + if output[2] != "delete" { + t.Errorf("unexpected action: %s", output[2]) + } + if output[7] != strings.Split(test, " ")[2] { + t.Errorf("unexpected class: %s, expected: %s", output[7], strings.Split(test, " ")[2]) + } + } + if ix == 2 { + if len(output) != 11 { + t.Errorf("unexpected command: %v", output) + } + if output[7] != "1:" { + t.Errorf("unexpected root class: %s", output[7]) + } + if output[8] != "htb" { + t.Errorf("unexpected qdisc algo: %s", output[8]) + } + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/bandwidth/utils.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/bandwidth/utils.go new file mode 100644 index 000000000000..b501e9b5c803 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/bandwidth/utils.go @@ -0,0 +1,62 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bandwidth + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api/resource" +) + +var minRsrc = resource.MustParse("1k") +var maxRsrc = resource.MustParse("1P") + +func validateBandwidthIsReasonable(rsrc *resource.Quantity) error { + if rsrc.Value() < minRsrc.Value() { + return fmt.Errorf("resource is unreasonably small (< 1kbit)") + } + if rsrc.Value() > maxRsrc.Value() { + return fmt.Errorf("resoruce is unreasonably large (> 1Pbit)") + } + return nil +} + +func ExtractPodBandwidthResources(podAnnotations map[string]string) (ingress, egress *resource.Quantity, err error) { + str, found := podAnnotations["kubernetes.io/ingress-bandwidth"] + if found { + ingressValue, err := resource.ParseQuantity(str) + if err != nil { + return nil, nil, err + } + ingress = &ingressValue + if err := validateBandwidthIsReasonable(ingress); err != nil { + return nil, nil, err + } + } + str, found = podAnnotations["kubernetes.io/egress-bandwidth"] + if found { + egressValue, err := resource.ParseQuantity(str) + if err != nil { + return nil, nil, err + } + egress = &egressValue + if err := validateBandwidthIsReasonable(egress); err != nil { + return nil, nil, err + } + } + return ingress, egress, nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/bool_flag.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/bool_flag.go deleted file mode 100644 index 768fc30350b6..000000000000 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/bool_flag.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "strconv" -) - -// BoolFlag is a boolean flag compatible with flags and pflags that keeps track of whether it had a value supplied or not. -// Getting this flag to act like a normal bool, where true/false are not required needs a little bit of extra code, example: -// f := cmd.Flags().VarPF(&BoolFlagVar, "flagname", "", "help about the flag") -// f.NoOptDefVal = "true" -type BoolFlag struct { - // If Set has been invoked this value is true - provided bool - // The exact value provided on the flag - value bool -} - -func (f *BoolFlag) Default(value bool) { - f.value = value -} - -func (f BoolFlag) String() string { - return fmt.Sprintf("%t", f.value) -} - -func (f BoolFlag) Value() bool { - return f.value -} - -func (f *BoolFlag) Set(value string) error { - boolVal, err := strconv.ParseBool(value) - if err != nil { - return err - } - - f.value = boolVal - f.provided = true - - return nil -} - -func (f BoolFlag) Provided() bool { - return f.provided -} - -func (f *BoolFlag) Type() string { - return "bool" -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache/cache.go similarity index 93% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache/cache.go index 47e30ca31d09..1f96c9b9aacc 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache/cache.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package cache import ( "sync" @@ -27,6 +27,9 @@ const ( type Cache []*cacheShard func NewCache(maxSize int) Cache { + if maxSize < shardsCount { + maxSize = shardsCount + } cache := make(Cache, shardsCount) for i := 0; i < shardsCount; i++ { cache[i] = &cacheShard{ @@ -61,14 +64,14 @@ func (s *cacheShard) add(index uint64, obj interface{}) bool { s.Lock() defer s.Unlock() _, isOverwrite := s.items[index] - s.items[index] = obj - if len(s.items) > s.maxSize { + if !isOverwrite && len(s.items) >= s.maxSize { var randomKey uint64 for randomKey = range s.items { break } delete(s.items, randomKey) } + s.items[index] = obj return isOverwrite } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache/cache_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache/cache_test.go new file mode 100644 index 000000000000..e08c27911d53 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache/cache_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "testing" +) + +const ( + maxTestCacheSize int = shardsCount * 2 +) + +func ExpectEntry(t *testing.T, cache Cache, index uint64, expectedValue interface{}) bool { + elem, found := cache.Get(index) + if !found { + t.Errorf("Expected to find entry with key %d", index) + return false + } else if elem != expectedValue { + t.Errorf("Expected to find %v, got %v", expectedValue, elem) + return false + } + return true +} + +func TestBasic(t *testing.T) { + cache := NewCache(maxTestCacheSize) + cache.Add(1, "xxx") + ExpectEntry(t, cache, 1, "xxx") +} + +func TestOverflow(t *testing.T) { + cache := NewCache(maxTestCacheSize) + for i := 0; i < maxTestCacheSize+1; i++ { + cache.Add(uint64(i), "xxx") + } + foundIndexes := make([]uint64, 0) + for i := 0; i < maxTestCacheSize+1; i++ { + _, found := cache.Get(uint64(i)) + if found { + foundIndexes = append(foundIndexes, uint64(i)) + } + } + if len(foundIndexes) != maxTestCacheSize { + t.Errorf("Expect to find %d elements, got %d %v", maxTestCacheSize, len(foundIndexes), foundIndexes) + } +} + +func TestOverwrite(t *testing.T) { + cache := NewCache(maxTestCacheSize) + cache.Add(1, "xxx") + ExpectEntry(t, cache, 1, "xxx") + cache.Add(1, "yyy") + ExpectEntry(t, cache, 1, "yyy") +} + +// TestEvict this test will fail sporatically depending on what add() +// selects for the randomKey to be evicted. Ensure that randomKey +// is never the key we most recently added. Since the chance of failure +// on each evict is 50%, if we do it 7 times, it should catch the problem +// if it exists >99% of the time. +func TestEvict(t *testing.T) { + cache := NewCache(shardsCount) + var found bool + for retry := 0; retry < 7; retry++ { + cache.Add(uint64(shardsCount), "xxx") + found = ExpectEntry(t, cache, uint64(shardsCount), "xxx") + if !found { + break + } + cache.Add(0, "xxx") + found = ExpectEntry(t, cache, 0, "xxx") + if !found { + break + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache/lruexpirecache.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache/lruexpirecache.go new file mode 100644 index 000000000000..22f7f27679c2 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache/lruexpirecache.go @@ -0,0 +1,66 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "sync" + "time" + + "github.com/golang/groupcache/lru" +) + +type LRUExpireCache struct { + cache *lru.Cache + lock sync.RWMutex +} + +func NewLRUExpireCache(maxSize int) *LRUExpireCache { + return &LRUExpireCache{cache: lru.New(maxSize)} +} + +type cacheEntry struct { + value interface{} + expireTime time.Time +} + +func (c *LRUExpireCache) Add(key lru.Key, value interface{}, ttl time.Duration) { + c.lock.Lock() + defer c.lock.Unlock() + c.cache.Add(key, &cacheEntry{value, time.Now().Add(ttl)}) + // Remove entry from cache after ttl. + time.AfterFunc(ttl, func() { c.remove(key) }) +} + +func (c *LRUExpireCache) Get(key lru.Key) (interface{}, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + e, ok := c.cache.Get(key) + if !ok { + return nil, false + } + if time.Now().After(e.(*cacheEntry).expireTime) { + go c.remove(key) + return nil, false + } + return e.(*cacheEntry).value, true +} + +func (c *LRUExpireCache) remove(key lru.Key) { + c.lock.Lock() + defer c.lock.Unlock() + c.cache.Remove(key) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache/lruexpirecache_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache/lruexpirecache_test.go new file mode 100644 index 000000000000..07465ea5643a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/cache/lruexpirecache_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "testing" + "time" + + "github.com/golang/groupcache/lru" +) + +func expectEntry(t *testing.T, c *LRUExpireCache, key lru.Key, value interface{}) { + result, ok := c.Get(key) + if !ok || result != value { + t.Errorf("Expected cache[%v]: %v, got %v", key, value, result) + } +} + +func expectNotEntry(t *testing.T, c *LRUExpireCache, key lru.Key) { + if result, ok := c.Get(key); ok { + t.Errorf("Expected cache[%v] to be empty, got %v", key, result) + } +} + +func TestSimpleGet(t *testing.T) { + c := NewLRUExpireCache(10) + c.Add("long-lived", "12345", 10*time.Hour) + expectEntry(t, c, "long-lived", "12345") +} + +func TestExpiredGet(t *testing.T) { + c := NewLRUExpireCache(10) + c.Add("short-lived", "12345", 0*time.Second) + expectNotEntry(t, c, "short-lived") +} + +func TestLRUOverflow(t *testing.T) { + c := NewLRUExpireCache(4) + c.Add("elem1", "1", 10*time.Hour) + c.Add("elem2", "2", 10*time.Hour) + c.Add("elem3", "3", 10*time.Hour) + c.Add("elem4", "4", 10*time.Hour) + c.Add("elem5", "5", 10*time.Hour) + expectNotEntry(t, c, "elem1") + expectEntry(t, c, "elem2", "2") + expectEntry(t, c, "elem3", "3") + expectEntry(t, c, "elem4", "4") + expectEntry(t, c, "elem5", "5") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/clock.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/clock.go index ac2d738d6281..474cbb68d11f 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/clock.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/clock.go @@ -28,6 +28,7 @@ type Clock interface { Since(time.Time) time.Duration After(d time.Duration) <-chan time.Time Sleep(d time.Duration) + Tick(d time.Duration) <-chan time.Time } var ( @@ -54,6 +55,10 @@ func (RealClock) After(d time.Duration) <-chan time.Time { return time.After(d) } +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + func (RealClock) Sleep(d time.Duration) { time.Sleep(d) } @@ -68,8 +73,10 @@ type FakeClock struct { } type fakeClockWaiter struct { - targetTime time.Time - destChan chan<- time.Time + targetTime time.Time + stepInterval time.Duration + skipIfBlocked bool + destChan chan<- time.Time } func NewFakeClock(t time.Time) *FakeClock { @@ -105,7 +112,22 @@ func (f *FakeClock) After(d time.Duration) <-chan time.Time { return ch } -// Move clock by Duration, notify anyone that's called After +func (f *FakeClock) Tick(d time.Duration) <-chan time.Time { + f.lock.Lock() + defer f.lock.Unlock() + tickTime := f.time.Add(d) + ch := make(chan time.Time, 1) // hold one tick + f.waiters = append(f.waiters, fakeClockWaiter{ + targetTime: tickTime, + stepInterval: d, + skipIfBlocked: true, + destChan: ch, + }) + + return ch +} + +// Move clock by Duration, notify anyone that's called After or Tick func (f *FakeClock) Step(d time.Duration) { f.lock.Lock() defer f.lock.Unlock() @@ -126,7 +148,23 @@ func (f *FakeClock) setTimeLocked(t time.Time) { for i := range f.waiters { w := &f.waiters[i] if !w.targetTime.After(t) { - w.destChan <- t + + if w.skipIfBlocked { + select { + case w.destChan <- t: + default: + } + } else { + w.destChan <- t + } + + if w.stepInterval > 0 { + for !w.targetTime.After(t) { + w.targetTime = w.targetTime.Add(w.stepInterval) + } + newWaiters = append(newWaiters, *w) + } + } else { newWaiters = append(newWaiters, f.waiters[i]) } @@ -169,6 +207,12 @@ func (*IntervalClock) After(d time.Duration) <-chan time.Time { panic("IntervalClock doesn't implement After") } +// Unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement Tick") +} + func (*IntervalClock) Sleep(d time.Duration) { panic("IntervalClock doesn't implement Sleep") } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/clock_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/clock_test.go new file mode 100644 index 000000000000..ee60fcb0d235 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/clock_test.go @@ -0,0 +1,184 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "testing" + "time" +) + +func TestFakeClock(t *testing.T) { + startTime := time.Now() + tc := NewFakeClock(startTime) + tc.Step(time.Second) + now := tc.Now() + if now.Sub(startTime) != time.Second { + t.Errorf("input: %s now=%s gap=%s expected=%s", startTime, now, now.Sub(startTime), time.Second) + } + + tt := tc.Now() + tc.SetTime(tt.Add(time.Hour)) + if tc.Now().Sub(tt) != time.Hour { + t.Errorf("input: %s now=%s gap=%s expected=%s", tt, tc.Now(), tc.Now().Sub(tt), time.Hour) + } +} + +func TestFakeClockSleep(t *testing.T) { + startTime := time.Now() + tc := NewFakeClock(startTime) + tc.Sleep(time.Duration(1) * time.Hour) + now := tc.Now() + if now.Sub(startTime) != time.Hour { + t.Errorf("Fake sleep failed, expected time to advance by one hour, instead, its %v", now.Sub(startTime)) + } +} + +func TestFakeAfter(t *testing.T) { + tc := NewFakeClock(time.Now()) + if tc.HasWaiters() { + t.Errorf("unexpected waiter?") + } + oneSec := tc.After(time.Second) + if !tc.HasWaiters() { + t.Errorf("unexpected lack of waiter?") + } + + oneOhOneSec := tc.After(time.Second + time.Millisecond) + twoSec := tc.After(2 * time.Second) + select { + case <-oneSec: + t.Errorf("unexpected channel read") + case <-oneOhOneSec: + t.Errorf("unexpected channel read") + case <-twoSec: + t.Errorf("unexpected channel read") + default: + } + + tc.Step(999 * time.Millisecond) + select { + case <-oneSec: + t.Errorf("unexpected channel read") + case <-oneOhOneSec: + t.Errorf("unexpected channel read") + case <-twoSec: + t.Errorf("unexpected channel read") + default: + } + + tc.Step(time.Millisecond) + select { + case <-oneSec: + // Expected! + case <-oneOhOneSec: + t.Errorf("unexpected channel read") + case <-twoSec: + t.Errorf("unexpected channel read") + default: + t.Errorf("unexpected non-channel read") + } + tc.Step(time.Millisecond) + select { + case <-oneSec: + // should not double-trigger! + t.Errorf("unexpected channel read") + case <-oneOhOneSec: + // Expected! + case <-twoSec: + t.Errorf("unexpected channel read") + default: + t.Errorf("unexpected non-channel read") + } +} + +func TestFakeTick(t *testing.T) { + tc := NewFakeClock(time.Now()) + if tc.HasWaiters() { + t.Errorf("unexpected waiter?") + } + oneSec := tc.Tick(time.Second) + if !tc.HasWaiters() { + t.Errorf("unexpected lack of waiter?") + } + + oneOhOneSec := tc.Tick(time.Second + time.Millisecond) + twoSec := tc.Tick(2 * time.Second) + select { + case <-oneSec: + t.Errorf("unexpected channel read") + case <-oneOhOneSec: + t.Errorf("unexpected channel read") + case <-twoSec: + t.Errorf("unexpected channel read") + default: + } + + tc.Step(999 * time.Millisecond) // t=.999 + select { + case <-oneSec: + t.Errorf("unexpected channel read") + case <-oneOhOneSec: + t.Errorf("unexpected channel read") + case <-twoSec: + t.Errorf("unexpected channel read") + default: + } + + tc.Step(time.Millisecond) // t=1.000 + select { + case <-oneSec: + // Expected! + case <-oneOhOneSec: + t.Errorf("unexpected channel read") + case <-twoSec: + t.Errorf("unexpected channel read") + default: + t.Errorf("unexpected non-channel read") + } + tc.Step(time.Millisecond) // t=1.001 + select { + case <-oneSec: + // should not double-trigger! + t.Errorf("unexpected channel read") + case <-oneOhOneSec: + // Expected! + case <-twoSec: + t.Errorf("unexpected channel read") + default: + t.Errorf("unexpected non-channel read") + } + + tc.Step(time.Second) // t=2.001 + tc.Step(time.Second) // t=3.001 + tc.Step(time.Second) // t=4.001 + tc.Step(time.Second) // t=5.001 + + // The one second ticker should not accumulate ticks + accumulatedTicks := 0 + drained := false + for !drained { + select { + case <-oneSec: + accumulatedTicks++ + default: + drained = true + } + } + if accumulatedTicks != 1 { + t.Errorf("unexpected number of accumulated ticks: %d", accumulatedTicks) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/config/config_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/config/config_test.go new file mode 100644 index 000000000000..4ebab7bf99f6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/config/config_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "reflect" + "testing" +) + +func TestConfigurationChannels(t *testing.T) { + mux := NewMux(nil) + channelOne := mux.Channel("one") + if channelOne != mux.Channel("one") { + t.Error("Didn't get the same muxuration channel back with the same name") + } + channelTwo := mux.Channel("two") + if channelOne == channelTwo { + t.Error("Got back the same muxuration channel for different names") + } +} + +type MergeMock struct { + source string + update interface{} + t *testing.T +} + +func (m MergeMock) Merge(source string, update interface{}) error { + if m.source != source { + m.t.Errorf("Expected %s, Got %s", m.source, source) + } + if !reflect.DeepEqual(m.update, update) { + m.t.Errorf("Expected %s, Got %s", m.update, update) + } + return nil +} + +func TestMergeInvoked(t *testing.T) { + merger := MergeMock{"one", "test", t} + mux := NewMux(&merger) + mux.Channel("one") <- "test" +} + +func TestMergeFuncInvoked(t *testing.T) { + ch := make(chan bool) + mux := NewMux(MergeFunc(func(source string, update interface{}) error { + if source != "one" { + t.Errorf("Expected %s, Got %s", "one", source) + } + if update.(string) != "test" { + t.Errorf("Expected %s, Got %s", "test", update) + } + ch <- true + return nil + })) + mux.Channel("one") <- "test" + <-ch +} + +func TestSimultaneousMerge(t *testing.T) { + ch := make(chan bool, 2) + mux := NewMux(MergeFunc(func(source string, update interface{}) error { + switch source { + case "one": + if update.(string) != "test" { + t.Errorf("Expected %s, Got %s", "test", update) + } + case "two": + if update.(string) != "test2" { + t.Errorf("Expected %s, Got %s", "test2", update) + } + default: + t.Errorf("Unexpected source, Got %s", update) + } + ch <- true + return nil + })) + source := mux.Channel("one") + source2 := mux.Channel("two") + source <- "test" + source2 <- "test2" + <-ch + <-ch +} + +func TestBroadcaster(t *testing.T) { + b := NewBroadcaster() + b.Notify(struct{}{}) + + ch := make(chan bool, 2) + b.Add(ListenerFunc(func(object interface{}) { + if object != "test" { + t.Errorf("Expected %s, Got %s", "test", object) + } + ch <- true + })) + b.Add(ListenerFunc(func(object interface{}) { + if object != "test" { + t.Errorf("Expected %s, Got %s", "test", object) + } + ch <- true + })) + b.Notify("test") + <-ch + <-ch +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/configuration_map.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/config/configuration_map.go similarity index 98% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/configuration_map.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/config/configuration_map.go index cf2b326a9085..e7ad4fadb3d7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/configuration_map.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/config/configuration_map.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package config import ( "fmt" diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/configz/configz_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/configz/configz_test.go new file mode 100644 index 000000000000..7e520205019b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/configz/configz_test.go @@ -0,0 +1,77 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configz + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" +) + +func TestConfigz(t *testing.T) { + v, err := New("testing") + if err != nil { + t.Fatalf("err: %v", err) + } + + v.Set("blah") + + s := httptest.NewServer(http.HandlerFunc(handle)) + defer s.Close() + + resp, err := http.Get(s.URL + "/configz") + if err != nil { + t.Fatalf("err: %v", err) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("err: %v", err) + } + if string(body) != `{"testing":"blah"}` { + t.Fatalf("unexpected output: %v", err) + } + + v.Set("bing") + resp, err = http.Get(s.URL + "/configz") + if err != nil { + t.Fatalf("err: %v", err) + } + + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("err: %v", err) + } + if string(body) != `{"testing":"bing"}` { + t.Fatalf("unexpected output: %v", err) + } + + Delete("testing") + resp, err = http.Get(s.URL + "/configz") + if err != nil { + t.Fatalf("err: %v", err) + } + + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("err: %v", err) + } + if string(body) != `{}` { + t.Fatalf("unexpected output: %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/crlf.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/crlf/crlf.go similarity index 99% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/crlf.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/crlf/crlf.go index 935785ce974b..e098e860079b 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/crlf.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/crlf/crlf.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package crlf import ( "bytes" diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/crypto.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/crypto/crypto.go similarity index 99% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/crypto.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/crypto/crypto.go index 42b890d1891f..1085c0b300d9 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/crypto.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/crypto/crypto.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package crypto import ( "bytes" diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/dbus/dbus_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/dbus/dbus_test.go new file mode 100644 index 000000000000..96670373b6aa --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/dbus/dbus_test.go @@ -0,0 +1,249 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "fmt" + "os" + "testing" + + godbus "github.com/godbus/dbus" +) + +const ( + DBusNameFlagAllowReplacement uint32 = 1 << (iota + 1) + DBusNameFlagReplaceExisting + DBusNameFlagDoNotQueue +) + +const ( + DBusRequestNameReplyPrimaryOwner uint32 = iota + 1 + DBusRequestNameReplyInQueue + DBusRequestNameReplyExists + DBusRequestNameReplyAlreadyOwner +) + +const ( + DBusReleaseNameReplyReleased uint32 = iota + 1 + DBusReleaseNameReplyNonExistent + DBusReleaseNameReplyNotOwner +) + +func doDBusTest(t *testing.T, dbus Interface, real bool) { + bus, err := dbus.SystemBus() + if err != nil { + if !real { + t.Errorf("dbus.SystemBus() failed with fake Interface") + } + t.Skipf("D-Bus is not running: %v", err) + } + busObj := bus.BusObject() + + id := "" + err = busObj.Call("org.freedesktop.DBus.GetId", 0).Store(&id) + if err != nil { + t.Errorf("expected success, got %v", err) + } + if len(id) == 0 { + t.Errorf("expected non-empty Id, got \"\"") + } + + // Switch to the session bus for the rest, since the system bus is more + // locked down (and thus harder to trick into emitting signals). + + bus, err = dbus.SessionBus() + if err != nil { + if !real { + t.Errorf("dbus.SystemBus() failed with fake Interface") + } + t.Skipf("D-Bus session bus is not available: %v", err) + } + busObj = bus.BusObject() + + name := fmt.Sprintf("io.kubernetes.dbus_test_%d", os.Getpid()) + owner := "" + err = busObj.Call("org.freedesktop.DBus.GetNameOwner", 0, name).Store(&owner) + if err == nil { + t.Errorf("expected '%s' to be un-owned, but found owner %s", name, owner) + } + dbuserr, ok := err.(godbus.Error) + if !ok { + t.Errorf("expected godbus.Error, but got %#v", err) + } + if dbuserr.Name != "org.freedesktop.DBus.Error.NameHasNoOwner" { + t.Errorf("expected NameHasNoOwner error but got %v", err) + } + + sigchan := make(chan *godbus.Signal, 10) + bus.Signal(sigchan) + + rule := fmt.Sprintf("type='signal',interface='org.freedesktop.DBus',member='NameOwnerChanged',path='/org/freedesktop/DBus',sender='org.freedesktop.DBus',arg0='%s'", name) + err = busObj.Call("org.freedesktop.DBus.AddMatch", 0, rule).Store() + if err != nil { + t.Errorf("expected success, got %v", err) + } + + var ret uint32 + err = busObj.Call("org.freedesktop.DBus.RequestName", 0, name, DBusNameFlagDoNotQueue).Store(&ret) + if err != nil { + t.Errorf("expected success, got %v", err) + } + if ret != DBusRequestNameReplyPrimaryOwner { + t.Errorf("expected %v, got %v", DBusRequestNameReplyPrimaryOwner, ret) + } + + err = busObj.Call("org.freedesktop.DBus.GetNameOwner", 0, name).Store(&owner) + if err != nil { + t.Errorf("expected success, got %v", err) + } + + var changedSignal, acquiredSignal, lostSignal *godbus.Signal + + sig1 := <-sigchan + sig2 := <-sigchan + // We get two signals, but the order isn't guaranteed + if sig1.Name == "org.freedesktop.DBus.NameOwnerChanged" { + changedSignal = sig1 + acquiredSignal = sig2 + } else { + acquiredSignal = sig1 + changedSignal = sig2 + } + + if acquiredSignal.Sender != "org.freedesktop.DBus" || acquiredSignal.Name != "org.freedesktop.DBus.NameAcquired" { + t.Errorf("expected NameAcquired signal, got %v", acquiredSignal) + } + acquiredName := acquiredSignal.Body[0].(string) + if acquiredName != name { + t.Errorf("unexpected NameAcquired arguments: %v", acquiredSignal) + } + + if changedSignal.Sender != "org.freedesktop.DBus" || changedSignal.Name != "org.freedesktop.DBus.NameOwnerChanged" { + t.Errorf("expected NameOwnerChanged signal, got %v", changedSignal) + } + + changedName := changedSignal.Body[0].(string) + oldOwner := changedSignal.Body[1].(string) + newOwner := changedSignal.Body[2].(string) + if changedName != name || oldOwner != "" || newOwner != owner { + t.Errorf("unexpected NameOwnerChanged arguments: %v", changedSignal) + } + + err = busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&ret) + if err != nil { + t.Errorf("expected success, got %v", err) + } + if ret != DBusReleaseNameReplyReleased { + t.Errorf("expected %v, got %v", DBusReleaseNameReplyReleased, ret) + } + + sig1 = <-sigchan + sig2 = <-sigchan + if sig1.Name == "org.freedesktop.DBus.NameOwnerChanged" { + changedSignal = sig1 + lostSignal = sig2 + } else { + lostSignal = sig1 + changedSignal = sig2 + } + + if lostSignal.Sender != "org.freedesktop.DBus" || lostSignal.Name != "org.freedesktop.DBus.NameLost" { + t.Errorf("expected NameLost signal, got %v", lostSignal) + } + lostName := lostSignal.Body[0].(string) + if lostName != name { + t.Errorf("unexpected NameLost arguments: %v", lostSignal) + } + + if changedSignal.Sender != "org.freedesktop.DBus" || changedSignal.Name != "org.freedesktop.DBus.NameOwnerChanged" { + t.Errorf("expected NameOwnerChanged signal, got %v", changedSignal) + } + + changedName = changedSignal.Body[0].(string) + oldOwner = changedSignal.Body[1].(string) + newOwner = changedSignal.Body[2].(string) + if changedName != name || oldOwner != owner || newOwner != "" { + t.Errorf("unexpected NameOwnerChanged arguments: %v", changedSignal) + } + + if len(sigchan) != 0 { + t.Errorf("unexpected extra signals (%d)", len(sigchan)) + } + + // Unregister sigchan + bus.Signal(sigchan) +} + +func TestRealDBus(t *testing.T) { + dbus := New() + doDBusTest(t, dbus, true) +} + +func TestFakeDBus(t *testing.T) { + uniqueName := ":1.1" + ownedName := "" + + fakeSystem := NewFakeConnection() + fakeSystem.SetBusObject( + func(method string, args ...interface{}) ([]interface{}, error) { + if method == "org.freedesktop.DBus.GetId" { + return []interface{}{"foo"}, nil + } + return nil, fmt.Errorf("unexpected method call '%s'", method) + }, + ) + + fakeSession := NewFakeConnection() + fakeSession.SetBusObject( + func(method string, args ...interface{}) ([]interface{}, error) { + if method == "org.freedesktop.DBus.GetNameOwner" { + checkName := args[0].(string) + if checkName != ownedName { + return nil, godbus.Error{Name: "org.freedesktop.DBus.Error.NameHasNoOwner", Body: nil} + } else { + return []interface{}{uniqueName}, nil + } + } else if method == "org.freedesktop.DBus.RequestName" { + reqName := args[0].(string) + _ = args[1].(uint32) + if ownedName != "" { + return []interface{}{DBusRequestNameReplyAlreadyOwner}, nil + } + ownedName = reqName + fakeSession.EmitSignal("org.freedesktop.DBus", "/org/freedesktop/DBus", "org.freedesktop.DBus", "NameAcquired", reqName) + fakeSession.EmitSignal("org.freedesktop.DBus", "/org/freedesktop/DBus", "org.freedesktop.DBus", "NameOwnerChanged", reqName, "", uniqueName) + return []interface{}{DBusRequestNameReplyPrimaryOwner}, nil + } else if method == "org.freedesktop.DBus.ReleaseName" { + reqName := args[0].(string) + if reqName != ownedName { + return []interface{}{DBusReleaseNameReplyNotOwner}, nil + } + ownedName = "" + fakeSession.EmitSignal("org.freedesktop.DBus", "/org/freedesktop/DBus", "org.freedesktop.DBus", "NameOwnerChanged", reqName, uniqueName, "") + fakeSession.EmitSignal("org.freedesktop.DBus", "/org/freedesktop/DBus", "org.freedesktop.DBus", "NameLost", reqName) + return []interface{}{DBusReleaseNameReplyReleased}, nil + } else if method == "org.freedesktop.DBus.AddMatch" { + return nil, nil + } else { + return nil, fmt.Errorf("unexpected method call '%s'", method) + } + }, + ) + + dbus := NewFake(fakeSystem, fakeSession) + doDBusTest(t, dbus, false) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/deployment/deployment.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/deployment/deployment.go index 6160f5d138f9..9df49a2d8bc2 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/deployment/deployment.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/deployment/deployment.go @@ -237,8 +237,8 @@ func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template api.P } // Returns the sum of Replicas of the given replica sets. -func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int { - totalReplicaCount := 0 +func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { + totalReplicaCount := int32(0) for _, rs := range replicaSets { if rs != nil { totalReplicaCount += rs.Spec.Replicas @@ -248,8 +248,8 @@ func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int { } // GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets. -func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int { - totalReplicaCount := 0 +func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { + totalReplicaCount := int32(0) for _, rs := range replicaSets { if rs != nil { totalReplicaCount += rs.Status.Replicas @@ -259,7 +259,7 @@ func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) i } // Returns the number of available pods corresponding to the given replica sets. -func GetAvailablePodsForReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int) (int, error) { +func GetAvailablePodsForReplicaSets(c clientset.Interface, rss []*extensions.ReplicaSet, minReadySeconds int32) (int32, error) { allPods, err := GetPodsForReplicaSets(c, rss) if err != nil { return 0, err @@ -267,8 +267,8 @@ func GetAvailablePodsForReplicaSets(c clientset.Interface, rss []*extensions.Rep return getReadyPodsCount(allPods, minReadySeconds), nil } -func getReadyPodsCount(pods []api.Pod, minReadySeconds int) int { - readyPodCount := 0 +func getReadyPodsCount(pods []api.Pod, minReadySeconds int32) int32 { + readyPodCount := int32(0) for _, pod := range pods { if IsPodAvailable(&pod, minReadySeconds) { readyPodCount++ @@ -277,7 +277,7 @@ func getReadyPodsCount(pods []api.Pod, minReadySeconds int) int { return readyPodCount } -func IsPodAvailable(pod *api.Pod, minReadySeconds int) bool { +func IsPodAvailable(pod *api.Pod, minReadySeconds int32) bool { if !controller.IsPodActive(*pod) { return false } @@ -286,11 +286,11 @@ func IsPodAvailable(pod *api.Pod, minReadySeconds int) bool { for _, c := range pod.Status.Conditions { // we only care about pod ready conditions if c.Type == api.PodReady && c.Status == api.ConditionTrue { - // 2 cases that this ready condition is valid (passed minReadySeconds, i.e. the pod is ready): - // 1. minReadySeconds <= 0 + // 2 cases that this ready condition is valid (passed minReadySeconds, i.e. the pod is available): + // 1. minReadySeconds == 0, or // 2. LastTransitionTime (is set) + minReadySeconds (>0) < current time minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second - if minReadySeconds <= 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(time.Now()) { + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(time.Now()) { return true } } @@ -340,17 +340,17 @@ func IsRollingUpdate(deployment *extensions.Deployment) bool { // When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it. // 1) The new RS is saturated: newRS's replicas == deployment's replicas // 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas -func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int, error) { +func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int32, error) { switch deployment.Spec.Strategy.Type { case extensions.RollingUpdateDeploymentStrategyType: // Check if we can scale up. - maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Replicas, true) + maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(deployment.Spec.Replicas), true) if err != nil { return 0, err } // Find the total number of pods currentPodCount := GetReplicaCountForReplicaSets(allRSs) - maxTotalPods := deployment.Spec.Replicas + maxSurge + maxTotalPods := deployment.Spec.Replicas + int32(maxSurge) if currentPodCount >= maxTotalPods { // Cannot scale up. return newRS.Spec.Replicas, nil @@ -358,7 +358,7 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re // Scale up. scaleUpCount := maxTotalPods - currentPodCount // Do not exceed the number of desired replicas. - scaleUpCount = integer.IntMin(scaleUpCount, deployment.Spec.Replicas-newRS.Spec.Replicas) + scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(deployment.Spec.Replicas-newRS.Spec.Replicas))) return newRS.Spec.Replicas + scaleUpCount, nil case extensions.RecreateDeploymentStrategyType: return deployment.Spec.Replicas, nil @@ -389,12 +389,12 @@ func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment, // 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) // 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) // 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) -func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int) (int, int, error) { - surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, desired, true) +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { + surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true) if err != nil { return 0, 0, err } - unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, desired, false) + unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false) if err != nil { return 0, 0, err } @@ -407,5 +407,5 @@ func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired unavailable = 1 } - return surge, unavailable, nil + return int32(surge), int32(unavailable), nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/deployment/deployment_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/deployment/deployment_test.go new file mode 100644 index 000000000000..31d23d2c18bc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/deployment/deployment_test.go @@ -0,0 +1,409 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "fmt" + "reflect" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/unversioned/testclient" + "k8s.io/kubernetes/pkg/runtime" +) + +func addListRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset { + fakeClient.AddReactor("list", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + return true, obj, nil + }) + return fakeClient +} + +func addListPodsReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset { + fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { + return true, obj, nil + }) + return fakeClient +} + +func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset { + rsList, ok := obj.(*extensions.ReplicaSetList) + fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + name := action.(testclient.GetAction).GetName() + if ok { + for _, rs := range rsList.Items { + if rs.Name == name { + return true, &rs, nil + } + } + } + return false, nil, fmt.Errorf("could not find the requested replica set: %s", name) + + }) + return fakeClient +} + +func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset { + fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + obj := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet) + return true, obj, nil + }) + return fakeClient +} + +func addUpdatePodsReactor(fakeClient *fake.Clientset) *fake.Clientset { + fakeClient.AddReactor("update", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { + obj := action.(testclient.UpdateAction).GetObject().(*api.Pod) + return true, obj, nil + }) + return fakeClient +} + +func newPod(now time.Time, ready bool, beforeSec int) api.Pod { + conditionStatus := api.ConditionFalse + if ready { + conditionStatus = api.ConditionTrue + } + return api.Pod{ + Status: api.PodStatus{ + Conditions: []api.PodCondition{ + { + Type: api.PodReady, + LastTransitionTime: unversioned.NewTime(now.Add(-1 * time.Duration(beforeSec) * time.Second)), + Status: conditionStatus, + }, + }, + }, + } +} + +func TestGetReadyPodsCount(t *testing.T) { + now := time.Now() + tests := []struct { + pods []api.Pod + minReadySeconds int + expected int + }{ + { + []api.Pod{ + newPod(now, true, 0), + newPod(now, true, 2), + newPod(now, false, 1), + }, + 1, + 1, + }, + { + []api.Pod{ + newPod(now, true, 2), + newPod(now, true, 11), + newPod(now, true, 5), + }, + 10, + 1, + }, + } + + for _, test := range tests { + if count := getReadyPodsCount(test.pods, int32(test.minReadySeconds)); int(count) != test.expected { + t.Errorf("Pods = %#v, minReadySeconds = %d, expected %d, got %d", test.pods, test.minReadySeconds, test.expected, count) + } + } +} + +// generatePodFromRS creates a pod, with the input ReplicaSet's selector and its template +func generatePodFromRS(rs extensions.ReplicaSet) api.Pod { + return api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: rs.Labels, + }, + Spec: rs.Spec.Template.Spec, + } +} + +func generatePod(labels map[string]string, image string) api.Pod { + return api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: labels, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: image, + Image: image, + ImagePullPolicy: api.PullAlways, + TerminationMessagePath: api.TerminationMessagePathDefault, + }, + }, + }, + } +} + +func generateRSWithLabel(labels map[string]string, image string) extensions.ReplicaSet { + return extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{ + Name: api.SimpleNameGenerator.GenerateName("replicaset"), + Labels: labels, + }, + Spec: extensions.ReplicaSetSpec{ + Replicas: 1, + Selector: &unversioned.LabelSelector{MatchLabels: labels}, + Template: api.PodTemplateSpec{ + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: image, + Image: image, + ImagePullPolicy: api.PullAlways, + TerminationMessagePath: api.TerminationMessagePathDefault, + }, + }, + }, + }, + }, + } +} + +// generateRS creates a replica set, with the input deployment's template as its template +func generateRS(deployment extensions.Deployment) extensions.ReplicaSet { + template := GetNewReplicaSetTemplate(&deployment) + return extensions.ReplicaSet{ + ObjectMeta: api.ObjectMeta{ + Name: api.SimpleNameGenerator.GenerateName("replicaset"), + Labels: template.Labels, + }, + Spec: extensions.ReplicaSetSpec{ + Template: template, + Selector: &unversioned.LabelSelector{MatchLabels: template.Labels}, + }, + } +} + +// generateDeployment creates a deployment, with the input image as its template +func generateDeployment(image string) extensions.Deployment { + podLabels := map[string]string{"name": image} + terminationSec := int64(30) + return extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + Name: image, + }, + Spec: extensions.DeploymentSpec{ + Replicas: 1, + Selector: &unversioned.LabelSelector{MatchLabels: podLabels}, + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: podLabels, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: image, + Image: image, + ImagePullPolicy: api.PullAlways, + TerminationMessagePath: api.TerminationMessagePathDefault, + }, + }, + DNSPolicy: api.DNSClusterFirst, + TerminationGracePeriodSeconds: &terminationSec, + RestartPolicy: api.RestartPolicyAlways, + SecurityContext: &api.PodSecurityContext{}, + }, + }, + }, + } +} + +func TestGetNewRC(t *testing.T) { + newDeployment := generateDeployment("nginx") + newRC := generateRS(newDeployment) + + tests := []struct { + test string + objs []runtime.Object + expected *extensions.ReplicaSet + }{ + { + "No new ReplicaSet", + []runtime.Object{ + &api.PodList{}, + &extensions.ReplicaSetList{ + Items: []extensions.ReplicaSet{ + generateRS(generateDeployment("foo")), + generateRS(generateDeployment("bar")), + }, + }, + }, + nil, + }, + { + "Has new ReplicaSet", + []runtime.Object{ + &api.PodList{}, + &extensions.ReplicaSetList{ + Items: []extensions.ReplicaSet{ + generateRS(generateDeployment("foo")), + generateRS(generateDeployment("bar")), + generateRS(generateDeployment("abc")), + newRC, + generateRS(generateDeployment("xyz")), + }, + }, + }, + &newRC, + }, + } + + for _, test := range tests { + fakeClient := &fake.Clientset{} + fakeClient = addListPodsReactor(fakeClient, test.objs[0]) + fakeClient = addListRSReactor(fakeClient, test.objs[1]) + fakeClient = addUpdatePodsReactor(fakeClient) + fakeClient = addUpdateRSReactor(fakeClient) + rs, err := GetNewReplicaSet(&newDeployment, fakeClient) + if err != nil { + t.Errorf("In test case %s, got unexpected error %v", test.test, err) + } + if !api.Semantic.DeepEqual(rs, test.expected) { + t.Errorf("In test case %s, expected %+v, got %+v", test.test, test.expected, rs) + } + } +} + +func TestGetOldRCs(t *testing.T) { + newDeployment := generateDeployment("nginx") + newRS := generateRS(newDeployment) + newRS.Status.FullyLabeledReplicas = newRS.Spec.Replicas + newPod := generatePodFromRS(newRS) + + // create 2 old deployments and related replica sets/pods, with the same labels but different template + oldDeployment := generateDeployment("nginx") + oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1" + oldRS := generateRS(oldDeployment) + oldRS.Status.FullyLabeledReplicas = oldRS.Spec.Replicas + oldPod := generatePodFromRS(oldRS) + oldDeployment2 := generateDeployment("nginx") + oldDeployment2.Spec.Template.Spec.Containers[0].Name = "nginx-old-2" + oldRS2 := generateRS(oldDeployment2) + oldRS2.Status.FullyLabeledReplicas = oldRS2.Spec.Replicas + oldPod2 := generatePodFromRS(oldRS2) + + // create 1 ReplicaSet that existed before the deployment, with the same labels as the deployment + existedPod := generatePod(newDeployment.Spec.Template.Labels, "foo") + existedRS := generateRSWithLabel(newDeployment.Spec.Template.Labels, "foo") + existedRS.Status.FullyLabeledReplicas = existedRS.Spec.Replicas + + tests := []struct { + test string + objs []runtime.Object + expected []*extensions.ReplicaSet + }{ + { + "No old ReplicaSets", + []runtime.Object{ + &api.PodList{ + Items: []api.Pod{ + generatePod(newDeployment.Spec.Template.Labels, "foo"), + generatePod(newDeployment.Spec.Template.Labels, "bar"), + newPod, + }, + }, + &extensions.ReplicaSetList{ + Items: []extensions.ReplicaSet{ + generateRS(generateDeployment("foo")), + newRS, + generateRS(generateDeployment("bar")), + }, + }, + }, + []*extensions.ReplicaSet{}, + }, + { + "Has old ReplicaSet", + []runtime.Object{ + &api.PodList{ + Items: []api.Pod{ + oldPod, + oldPod2, + generatePod(map[string]string{"name": "bar"}, "bar"), + generatePod(map[string]string{"name": "xyz"}, "xyz"), + existedPod, + generatePod(newDeployment.Spec.Template.Labels, "abc"), + }, + }, + &extensions.ReplicaSetList{ + Items: []extensions.ReplicaSet{ + oldRS2, + oldRS, + existedRS, + newRS, + generateRSWithLabel(map[string]string{"name": "xyz"}, "xyz"), + generateRSWithLabel(map[string]string{"name": "bar"}, "bar"), + }, + }, + }, + []*extensions.ReplicaSet{&oldRS, &oldRS2, &existedRS}, + }, + } + + for _, test := range tests { + fakeClient := &fake.Clientset{} + fakeClient = addListPodsReactor(fakeClient, test.objs[0]) + fakeClient = addListRSReactor(fakeClient, test.objs[1]) + fakeClient = addGetRSReactor(fakeClient, test.objs[1]) + fakeClient = addUpdatePodsReactor(fakeClient) + fakeClient = addUpdateRSReactor(fakeClient) + rss, _, err := GetOldReplicaSets(&newDeployment, fakeClient) + if err != nil { + t.Errorf("In test case %s, got unexpected error %v", test.test, err) + } + if !equal(rss, test.expected) { + t.Errorf("In test case %q, expected:", test.test) + for _, rs := range test.expected { + t.Errorf("rs = %+v", rs) + } + t.Errorf("In test case %q, got:", test.test) + for _, rs := range rss { + t.Errorf("rs = %+v", rs) + } + } + } +} + +// equal compares the equality of two ReplicaSet slices regardless of their ordering +func equal(rss1, rss2 []*extensions.ReplicaSet) bool { + if reflect.DeepEqual(rss1, rss2) { + return true + } + if rss1 == nil || rss2 == nil || len(rss1) != len(rss2) { + return false + } + count := 0 + for _, rs1 := range rss1 { + for _, rs2 := range rss2 { + if reflect.DeepEqual(rs1, rs2) { + count++ + break + } + } + } + return count == len(rss1) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/diff.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/diff.go deleted file mode 100644 index 4e203cb93723..000000000000 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/diff.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - "text/tabwriter" - - "github.com/davecgh/go-spew/spew" -) - -// StringDiff diffs a and b and returns a human readable diff. -func StringDiff(a, b string) string { - ba := []byte(a) - bb := []byte(b) - out := []byte{} - i := 0 - for ; i < len(ba) && i < len(bb); i++ { - if ba[i] != bb[i] { - break - } - out = append(out, ba[i]) - } - out = append(out, []byte("\n\nA: ")...) - out = append(out, ba[i:]...) - out = append(out, []byte("\n\nB: ")...) - out = append(out, bb[i:]...) - out = append(out, []byte("\n\n")...) - return string(out) -} - -// ObjectDiff writes the two objects out as JSON and prints out the identical part of -// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'. -// For debugging tests. -func ObjectDiff(a, b interface{}) string { - ab, err := json.Marshal(a) - if err != nil { - panic(fmt.Sprintf("a: %v", err)) - } - bb, err := json.Marshal(b) - if err != nil { - panic(fmt.Sprintf("b: %v", err)) - } - return StringDiff(string(ab), string(bb)) -} - -// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects, -// which shows absolutely everything by recursing into every single pointer -// (go's %#v formatters OTOH stop at a certain point). This is needed when you -// can't figure out why reflect.DeepEqual is returning false and nothing is -// showing you differences. This will. -func ObjectGoPrintDiff(a, b interface{}) string { - s := spew.ConfigState{DisableMethods: true} - return StringDiff( - s.Sprintf("%#v", a), - s.Sprintf("%#v", b), - ) -} - -// ObjectGoPrintSideBySide prints a and b as textual dumps side by side, -// enabling easy visual scanning for mismatches. -func ObjectGoPrintSideBySide(a, b interface{}) string { - s := spew.ConfigState{ - Indent: " ", - // Extra deep spew. - DisableMethods: true, - } - sA := s.Sdump(a) - sB := s.Sdump(b) - - linesA := strings.Split(sA, "\n") - linesB := strings.Split(sB, "\n") - width := 0 - for _, s := range linesA { - l := len(s) - if l > width { - width = l - } - } - for _, s := range linesB { - l := len(s) - if l > width { - width = l - } - } - buf := &bytes.Buffer{} - w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0) - max := len(linesA) - if len(linesB) > max { - max = len(linesB) - } - for i := 0; i < max; i++ { - var a, b string - if i < len(linesA) { - a = linesA[i] - } - if i < len(linesB) { - b = linesB[i] - } - fmt.Fprintf(w, "%s\t%s\n", a, b) - } - w.Flush() - return buf.String() -} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/diff/diff.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/diff/diff.go new file mode 100644 index 000000000000..c1fd723752bb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/diff/diff.go @@ -0,0 +1,267 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package diff + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "sort" + "strings" + "text/tabwriter" + + "github.com/davecgh/go-spew/spew" + + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// StringDiff diffs a and b and returns a human readable diff. +func StringDiff(a, b string) string { + ba := []byte(a) + bb := []byte(b) + out := []byte{} + i := 0 + for ; i < len(ba) && i < len(bb); i++ { + if ba[i] != bb[i] { + break + } + out = append(out, ba[i]) + } + out = append(out, []byte("\n\nA: ")...) + out = append(out, ba[i:]...) + out = append(out, []byte("\n\nB: ")...) + out = append(out, bb[i:]...) + out = append(out, []byte("\n\n")...) + return string(out) +} + +// ObjectDiff writes the two objects out as JSON and prints out the identical part of +// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'. +// For debugging tests. +func ObjectDiff(a, b interface{}) string { + ab, err := json.Marshal(a) + if err != nil { + panic(fmt.Sprintf("a: %v", err)) + } + bb, err := json.Marshal(b) + if err != nil { + panic(fmt.Sprintf("b: %v", err)) + } + return StringDiff(string(ab), string(bb)) +} + +// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects, +// which shows absolutely everything by recursing into every single pointer +// (go's %#v formatters OTOH stop at a certain point). This is needed when you +// can't figure out why reflect.DeepEqual is returning false and nothing is +// showing you differences. This will. +func ObjectGoPrintDiff(a, b interface{}) string { + s := spew.ConfigState{DisableMethods: true} + return StringDiff( + s.Sprintf("%#v", a), + s.Sprintf("%#v", b), + ) +} + +func ObjectReflectDiff(a, b interface{}) string { + vA, vB := reflect.ValueOf(a), reflect.ValueOf(b) + if vA.Type() != vB.Type() { + return fmt.Sprintf("type A %T and type B %T do not match", a, b) + } + diffs := objectReflectDiff(field.NewPath("object"), vA, vB) + if len(diffs) == 0 { + return "" + } + out := []string{""} + for _, d := range diffs { + out = append(out, + fmt.Sprintf("%s:", d.path), + limit(fmt.Sprintf(" a: %#v", d.a), 80), + limit(fmt.Sprintf(" b: %#v", d.b), 80), + ) + } + return strings.Join(out, "\n") +} + +func limit(s string, max int) string { + if len(s) > max { + return s[:max] + } + return s +} + +func public(s string) bool { + if len(s) == 0 { + return false + } + return s[:1] == strings.ToUpper(s[:1]) +} + +type diff struct { + path *field.Path + a, b interface{} +} + +type orderedDiffs []diff + +func (d orderedDiffs) Len() int { return len(d) } +func (d orderedDiffs) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d orderedDiffs) Less(i, j int) bool { + a, b := d[i].path.String(), d[j].path.String() + if a < b { + return true + } + return false +} + +func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff { + switch a.Type().Kind() { + case reflect.Struct: + var changes []diff + for i := 0; i < a.Type().NumField(); i++ { + if !public(a.Type().Field(i).Name) { + if reflect.DeepEqual(a.Interface(), b.Interface()) { + return nil + } + return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} + } + if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 { + changes = append(changes, sub...) + } + } + return changes + case reflect.Ptr: + if a.IsNil() || b.IsNil() { + switch { + case a.IsNil() && b.IsNil(): + return nil + case a.IsNil(): + return []diff{{path: path, a: nil, b: b.Interface()}} + default: + return []diff{{path: path, a: a.Interface(), b: nil}} + } + } + return objectReflectDiff(path, a.Elem(), b.Elem()) + case reflect.Chan: + if !reflect.DeepEqual(a.Interface(), b.Interface()) { + return []diff{{path: path, a: a.Interface(), b: b.Interface()}} + } + return nil + case reflect.Slice: + if reflect.DeepEqual(a, b) { + return nil + } + lA, lB := a.Len(), b.Len() + l := lA + if lB < lA { + l = lB + } + for i := 0; i < l; i++ { + if !reflect.DeepEqual(a.Index(i), b.Index(i)) { + return objectReflectDiff(path.Index(i), a.Index(i), b.Index(i)) + } + } + var diffs []diff + for i := l; l < lA; i++ { + diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil}) + } + for i := l; l < lB; i++ { + diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)}) + } + return diffs + case reflect.Map: + if reflect.DeepEqual(a, b) { + return nil + } + aKeys := make(map[interface{}]interface{}) + for _, key := range a.MapKeys() { + aKeys[key.Interface()] = a.MapIndex(key).Interface() + } + var missing []diff + for _, key := range b.MapKeys() { + if _, ok := aKeys[key.Interface()]; ok { + delete(aKeys, key.Interface()) + if reflect.DeepEqual(a.MapIndex(key).Interface(), b.MapIndex(key).Interface()) { + continue + } + missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: a.MapIndex(key).Interface(), b: b.MapIndex(key).Interface()}) + continue + } + missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: nil, b: b.MapIndex(key).Interface()}) + } + for key, value := range aKeys { + missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key)), a: value, b: nil}) + } + sort.Sort(orderedDiffs(missing)) + return missing + default: + if reflect.DeepEqual(a.Interface(), b.Interface()) { + return nil + } + if !a.CanInterface() { + return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} + } + return []diff{{path: path, a: a.Interface(), b: b.Interface()}} + } +} + +// ObjectGoPrintSideBySide prints a and b as textual dumps side by side, +// enabling easy visual scanning for mismatches. +func ObjectGoPrintSideBySide(a, b interface{}) string { + s := spew.ConfigState{ + Indent: " ", + // Extra deep spew. + DisableMethods: true, + } + sA := s.Sdump(a) + sB := s.Sdump(b) + + linesA := strings.Split(sA, "\n") + linesB := strings.Split(sB, "\n") + width := 0 + for _, s := range linesA { + l := len(s) + if l > width { + width = l + } + } + for _, s := range linesB { + l := len(s) + if l > width { + width = l + } + } + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0) + max := len(linesA) + if len(linesB) > max { + max = len(linesB) + } + for i := 0; i < max; i++ { + var a, b string + if i < len(linesA) { + a = linesA[i] + } + if i < len(linesB) { + b = linesB[i] + } + fmt.Fprintf(w, "%s\t%s\n", a, b) + } + w.Flush() + return buf.String() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/diff/diff_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/diff/diff_test.go new file mode 100644 index 000000000000..925b4a3129cd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/diff/diff_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package diff + +import ( + "testing" +) + +func TestObjectReflectDiff(t *testing.T) { + expect := ` +object[other]: + a: 2 + b: +object[test]: + a: 1 + b: 2 +object[third]: + a: + b: 3` + a := map[string]int{"test": 1, "other": 2} + b := map[string]int{"test": 2, "third": 3} + if actual := ObjectReflectDiff(a, b); actual != expect { + t.Errorf("unexpected output: %s", actual) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/env.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/env/env.go similarity index 98% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/env.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/env/env.go index 6a479bd94999..ad4310c25cea 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/env.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/env/env.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package env import ( "os" diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/env/env_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/env/env_test.go new file mode 100644 index 000000000000..f7ff0a235136 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/env/env_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package env + +import ( + "os" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetEnvAsStringOrFallback(t *testing.T) { + const expected = "foo" + + assert := assert.New(t) + + key := "FLOCKER_SET_VAR" + os.Setenv(key, expected) + assert.Equal(expected, GetEnvAsStringOrFallback(key, "~"+expected)) + + key = "FLOCKER_UNSET_VAR" + assert.Equal(expected, GetEnvAsStringOrFallback(key, expected)) +} + +func TestGetEnvAsIntOrFallback(t *testing.T) { + const expected = 1 + + assert := assert.New(t) + + key := "FLOCKER_SET_VAR" + os.Setenv(key, strconv.Itoa(expected)) + returnVal, _ := GetEnvAsIntOrFallback(key, 1) + assert.Equal(expected, returnVal) + + key = "FLOCKER_UNSET_VAR" + returnVal, _ = GetEnvAsIntOrFallback(key, expected) + assert.Equal(expected, returnVal) +} + +func TestGetEnvAsFloat64OrFallback(t *testing.T) { + const expected = 1.0 + + assert := assert.New(t) + + key := "FLOCKER_SET_VAR" + os.Setenv(key, "1.0") + returnVal, _ := GetEnvAsFloat64OrFallback(key, 2.0) + assert.Equal(expected, returnVal) + + key = "FLOCKER_UNSET_VAR" + returnVal, _ = GetEnvAsFloat64OrFallback(key, 1.0) + assert.Equal(expected, returnVal) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/errors/errors_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/errors/errors_test.go new file mode 100644 index 000000000000..7ecf919ffb0e --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/errors/errors_test.go @@ -0,0 +1,286 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "fmt" + "reflect" + "testing" +) + +func TestEmptyAggregate(t *testing.T) { + var slice []error + var agg Aggregate + var err error + + agg = NewAggregate(slice) + if agg != nil { + t.Errorf("expected nil, got %#v", agg) + } + err = NewAggregate(slice) + if err != nil { + t.Errorf("expected nil, got %#v", err) + } + + // This is not normally possible, but pedantry demands I test it. + agg = aggregate(slice) // empty aggregate + if s := agg.Error(); s != "" { + t.Errorf("expected empty string, got %q", s) + } + if s := agg.Errors(); len(s) != 0 { + t.Errorf("expected empty slice, got %#v", s) + } + err = agg.(error) + if s := err.Error(); s != "" { + t.Errorf("expected empty string, got %q", s) + } +} + +func TestSingularAggregate(t *testing.T) { + var slice []error = []error{fmt.Errorf("err")} + var agg Aggregate + var err error + + agg = NewAggregate(slice) + if agg == nil { + t.Errorf("expected non-nil") + } + if s := agg.Error(); s != "err" { + t.Errorf("expected 'err', got %q", s) + } + if s := agg.Errors(); len(s) != 1 { + t.Errorf("expected one-element slice, got %#v", s) + } + if s := agg.Errors()[0].Error(); s != "err" { + t.Errorf("expected 'err', got %q", s) + } + + err = agg.(error) + if err == nil { + t.Errorf("expected non-nil") + } + if s := err.Error(); s != "err" { + t.Errorf("expected 'err', got %q", s) + } +} + +func TestPluralAggregate(t *testing.T) { + var slice []error = []error{fmt.Errorf("abc"), fmt.Errorf("123")} + var agg Aggregate + var err error + + agg = NewAggregate(slice) + if agg == nil { + t.Errorf("expected non-nil") + } + if s := agg.Error(); s != "[abc, 123]" { + t.Errorf("expected '[abc, 123]', got %q", s) + } + if s := agg.Errors(); len(s) != 2 { + t.Errorf("expected two-elements slice, got %#v", s) + } + if s := agg.Errors()[0].Error(); s != "abc" { + t.Errorf("expected '[abc, 123]', got %q", s) + } + + err = agg.(error) + if err == nil { + t.Errorf("expected non-nil") + } + if s := err.Error(); s != "[abc, 123]" { + t.Errorf("expected '[abc, 123]', got %q", s) + } +} + +func TestFilterOut(t *testing.T) { + testCases := []struct { + err error + filter []Matcher + expected error + }{ + { + nil, + []Matcher{}, + nil, + }, + { + aggregate{}, + []Matcher{}, + nil, + }, + { + aggregate{fmt.Errorf("abc")}, + []Matcher{}, + aggregate{fmt.Errorf("abc")}, + }, + { + aggregate{fmt.Errorf("abc")}, + []Matcher{func(err error) bool { return false }}, + aggregate{fmt.Errorf("abc")}, + }, + { + aggregate{fmt.Errorf("abc")}, + []Matcher{func(err error) bool { return true }}, + nil, + }, + { + aggregate{fmt.Errorf("abc")}, + []Matcher{func(err error) bool { return false }, func(err error) bool { return false }}, + aggregate{fmt.Errorf("abc")}, + }, + { + aggregate{fmt.Errorf("abc")}, + []Matcher{func(err error) bool { return false }, func(err error) bool { return true }}, + nil, + }, + { + aggregate{fmt.Errorf("abc"), fmt.Errorf("def"), fmt.Errorf("ghi")}, + []Matcher{func(err error) bool { return err.Error() == "def" }}, + aggregate{fmt.Errorf("abc"), fmt.Errorf("ghi")}, + }, + { + aggregate{aggregate{fmt.Errorf("abc")}}, + []Matcher{}, + aggregate{aggregate{fmt.Errorf("abc")}}, + }, + { + aggregate{aggregate{fmt.Errorf("abc"), aggregate{fmt.Errorf("def")}}}, + []Matcher{}, + aggregate{aggregate{fmt.Errorf("abc"), aggregate{fmt.Errorf("def")}}}, + }, + { + aggregate{aggregate{fmt.Errorf("abc"), aggregate{fmt.Errorf("def")}}}, + []Matcher{func(err error) bool { return err.Error() == "def" }}, + aggregate{aggregate{fmt.Errorf("abc")}}, + }, + } + for i, testCase := range testCases { + err := FilterOut(testCase.err, testCase.filter...) + if !reflect.DeepEqual(testCase.expected, err) { + t.Errorf("%d: expected %v, got %v", i, testCase.expected, err) + } + } +} + +func TestFlatten(t *testing.T) { + testCases := []struct { + agg Aggregate + expected Aggregate + }{ + { + nil, + nil, + }, + { + aggregate{}, + nil, + }, + { + aggregate{fmt.Errorf("abc")}, + aggregate{fmt.Errorf("abc")}, + }, + { + aggregate{fmt.Errorf("abc"), fmt.Errorf("def"), fmt.Errorf("ghi")}, + aggregate{fmt.Errorf("abc"), fmt.Errorf("def"), fmt.Errorf("ghi")}, + }, + { + aggregate{aggregate{fmt.Errorf("abc")}}, + aggregate{fmt.Errorf("abc")}, + }, + { + aggregate{aggregate{aggregate{fmt.Errorf("abc")}}}, + aggregate{fmt.Errorf("abc")}, + }, + { + aggregate{aggregate{fmt.Errorf("abc"), aggregate{fmt.Errorf("def")}}}, + aggregate{fmt.Errorf("abc"), fmt.Errorf("def")}, + }, + { + aggregate{aggregate{aggregate{fmt.Errorf("abc")}, fmt.Errorf("def"), aggregate{fmt.Errorf("ghi")}}}, + aggregate{fmt.Errorf("abc"), fmt.Errorf("def"), fmt.Errorf("ghi")}, + }, + } + for i, testCase := range testCases { + agg := Flatten(testCase.agg) + if !reflect.DeepEqual(testCase.expected, agg) { + t.Errorf("%d: expected %v, got %v", i, testCase.expected, agg) + } + } +} + +func TestAggregateGoroutines(t *testing.T) { + testCases := []struct { + errs []error + expected map[string]bool // can't compare directly to Aggregate due to non-deterministic ordering + }{ + { + []error{}, + nil, + }, + { + []error{nil}, + nil, + }, + { + []error{nil, nil}, + nil, + }, + { + []error{fmt.Errorf("1")}, + map[string]bool{"1": true}, + }, + { + []error{fmt.Errorf("1"), nil}, + map[string]bool{"1": true}, + }, + { + []error{fmt.Errorf("1"), fmt.Errorf("267")}, + map[string]bool{"1": true, "267": true}, + }, + { + []error{fmt.Errorf("1"), nil, fmt.Errorf("1234")}, + map[string]bool{"1": true, "1234": true}, + }, + { + []error{nil, fmt.Errorf("1"), nil, fmt.Errorf("1234"), fmt.Errorf("22")}, + map[string]bool{"1": true, "1234": true, "22": true}, + }, + } + for i, testCase := range testCases { + funcs := make([]func() error, len(testCase.errs)) + for i := range testCase.errs { + err := testCase.errs[i] + funcs[i] = func() error { return err } + } + agg := AggregateGoroutines(funcs...) + if agg == nil { + if len(testCase.expected) > 0 { + t.Errorf("%d: expected %v, got nil", i, testCase.expected) + } + continue + } + if len(agg.Errors()) != len(testCase.expected) { + t.Errorf("%d: expected %d errors in aggregate, got %v", i, len(testCase.expected), agg) + continue + } + for _, err := range agg.Errors() { + if !testCase.expected[err.Error()] { + t.Errorf("%d: expected %v, got aggregate containing %v", i, testCase.expected, err) + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/exec/exec_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/exec/exec_test.go new file mode 100644 index 000000000000..d24f9cc1935a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/exec/exec_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + osexec "os/exec" + "testing" +) + +func TestExecutorNoArgs(t *testing.T) { + ex := New() + + cmd := ex.Command("true") + out, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("expected success, got %v", err) + } + if len(out) != 0 { + t.Errorf("expected no output, got %q", string(out)) + } + + cmd = ex.Command("false") + out, err = cmd.CombinedOutput() + if err == nil { + t.Errorf("expected failure, got nil error") + } + if len(out) != 0 { + t.Errorf("expected no output, got %q", string(out)) + } + ee, ok := err.(ExitError) + if !ok { + t.Errorf("expected an ExitError, got %+v", err) + } + if ee.Exited() { + if code := ee.ExitStatus(); code != 1 { + t.Errorf("expected exit status 1, got %d", code) + } + } + + cmd = ex.Command("/does/not/exist") + out, err = cmd.CombinedOutput() + if err == nil { + t.Errorf("expected failure, got nil error") + } + if ee, ok := err.(ExitError); ok { + t.Errorf("expected non-ExitError, got %+v", ee) + } +} + +func TestExecutorWithArgs(t *testing.T) { + ex := New() + + cmd := ex.Command("/bin/echo", "stdout") + out, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("expected success, got %+v", err) + } + if string(out) != "stdout\n" { + t.Errorf("unexpected output: %q", string(out)) + } + + cmd = ex.Command("/bin/sh", "-c", "echo stderr > /dev/stderr") + out, err = cmd.CombinedOutput() + if err != nil { + t.Errorf("expected success, got %+v", err) + } + if string(out) != "stderr\n" { + t.Errorf("unexpected output: %q", string(out)) + } +} + +func TestLookPath(t *testing.T) { + ex := New() + + shExpected, _ := osexec.LookPath("sh") + sh, _ := ex.LookPath("sh") + if sh != shExpected { + t.Errorf("unexpected result for LookPath: got %s, expected %s", sh, shExpected) + } +} + +func TestExecutableNotFound(t *testing.T) { + exec := New() + cmd := exec.Command("fake_executable_name") + _, err := cmd.CombinedOutput() + if err != ErrExecutableNotFound { + t.Errorf("Expected error ErrExecutableNotFound but got %v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flags.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flag/flags.go similarity index 99% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flags.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flag/flags.go index 3d5799243cf1..94b9f733f5af 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flags.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flag/flags.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package flag import ( goflag "flag" diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flag/tristate.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flag/tristate.go new file mode 100644 index 000000000000..a9359695f5a6 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flag/tristate.go @@ -0,0 +1,83 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "strconv" +) + +// Tristate is a flag compatible with flags and pflags that +// keeps track of whether it had a value supplied or not. +type Tristate int + +const ( + Unset Tristate = iota // 0 + True + False +) + +func (f *Tristate) Default(value bool) { + *f = triFromBool(value) +} + +func (f Tristate) String() string { + b := boolFromTri(f) + return fmt.Sprintf("%t", b) +} + +func (f Tristate) Value() bool { + b := boolFromTri(f) + return b +} + +func (f *Tristate) Set(value string) error { + boolVal, err := strconv.ParseBool(value) + if err != nil { + return err + } + + *f = triFromBool(boolVal) + return nil +} + +func (f Tristate) Provided() bool { + if f != Unset { + return true + } + return false +} + +func (f *Tristate) Type() string { + return "tristate" +} + +func boolFromTri(t Tristate) bool { + if t == True { + return true + } else { + return false + } +} + +func triFromBool(b bool) Tristate { + if b { + return True + } else { + return False + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/backoff.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go similarity index 95% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/backoff.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go index 275a58a22981..1898c55c99d7 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/backoff.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go @@ -14,12 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package flowcontrol import ( "sync" "time" + "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/integer" ) @@ -30,13 +31,13 @@ type backoffEntry struct { type Backoff struct { sync.Mutex - Clock Clock + Clock util.Clock defaultDuration time.Duration maxDuration time.Duration perItemBackoff map[string]*backoffEntry } -func NewFakeBackOff(initial, max time.Duration, tc *FakeClock) *Backoff { +func NewFakeBackOff(initial, max time.Duration, tc *util.FakeClock) *Backoff { return &Backoff{ perItemBackoff: map[string]*backoffEntry{}, Clock: tc, @@ -48,7 +49,7 @@ func NewFakeBackOff(initial, max time.Duration, tc *FakeClock) *Backoff { func NewBackOff(initial, max time.Duration) *Backoff { return &Backoff{ perItemBackoff: map[string]*backoffEntry{}, - Clock: RealClock{}, + Clock: util.RealClock{}, defaultDuration: initial, maxDuration: max, } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flowcontrol/backoff_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flowcontrol/backoff_test.go new file mode 100644 index 000000000000..350c5b790ed7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flowcontrol/backoff_test.go @@ -0,0 +1,194 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "k8s.io/kubernetes/pkg/util" + "testing" + "time" +) + +func TestSlowBackoff(t *testing.T) { + id := "_idSlow" + tc := util.NewFakeClock(time.Now()) + step := time.Second + maxDuration := 50 * step + + b := NewFakeBackOff(step, maxDuration, tc) + cases := []time.Duration{0, 1, 2, 4, 8, 16, 32, 50, 50, 50} + for ix, c := range cases { + tc.Step(step) + w := b.Get(id) + if w != c*step { + t.Errorf("input: '%d': expected %s, got %s", ix, c*step, w) + } + b.Next(id, tc.Now()) + } + + //Now confirm that the Reset cancels backoff. + b.Next(id, tc.Now()) + b.Reset(id) + if b.Get(id) != 0 { + t.Errorf("Reset didn't clear the backoff.") + } + +} + +func TestBackoffReset(t *testing.T) { + id := "_idReset" + tc := util.NewFakeClock(time.Now()) + step := time.Second + maxDuration := step * 5 + b := NewFakeBackOff(step, maxDuration, tc) + startTime := tc.Now() + + // get to backoff = maxDuration + for i := 0; i <= int(maxDuration/step); i++ { + tc.Step(step) + b.Next(id, tc.Now()) + } + + // backoff should be capped at maxDuration + if !b.IsInBackOffSince(id, tc.Now()) { + t.Errorf("expected to be in Backoff got %s", b.Get(id)) + } + + lastUpdate := tc.Now() + tc.Step(2*maxDuration + step) // time += 11s, 11 > 2*maxDuration + if b.IsInBackOffSince(id, lastUpdate) { + t.Errorf("expected to not be in Backoff after reset (start=%s, now=%s, lastUpdate=%s), got %s", startTime, tc.Now(), lastUpdate, b.Get(id)) + } +} + +func TestBackoffHightWaterMark(t *testing.T) { + id := "_idHiWaterMark" + tc := util.NewFakeClock(time.Now()) + step := time.Second + maxDuration := 5 * step + b := NewFakeBackOff(step, maxDuration, tc) + + // get to backoff = maxDuration + for i := 0; i <= int(maxDuration/step); i++ { + tc.Step(step) + b.Next(id, tc.Now()) + } + + // backoff high watermark expires after 2*maxDuration + tc.Step(maxDuration + step) + b.Next(id, tc.Now()) + + if b.Get(id) != maxDuration { + t.Errorf("expected Backoff to stay at high watermark %s got %s", maxDuration, b.Get(id)) + } +} + +func TestBackoffGC(t *testing.T) { + id := "_idGC" + tc := util.NewFakeClock(time.Now()) + step := time.Second + maxDuration := 5 * step + + b := NewFakeBackOff(step, maxDuration, tc) + + for i := 0; i <= int(maxDuration/step); i++ { + tc.Step(step) + b.Next(id, tc.Now()) + } + lastUpdate := tc.Now() + tc.Step(maxDuration + step) + b.GC() + _, found := b.perItemBackoff[id] + if !found { + t.Errorf("expected GC to skip entry, elapsed time=%s maxDuration=%s", tc.Now().Sub(lastUpdate), maxDuration) + } + + tc.Step(maxDuration + step) + b.GC() + r, found := b.perItemBackoff[id] + if found { + t.Errorf("expected GC of entry after %s got entry %v", tc.Now().Sub(lastUpdate), r) + } +} + +func TestIsInBackOffSinceUpdate(t *testing.T) { + id := "_idIsInBackOffSinceUpdate" + tc := util.NewFakeClock(time.Now()) + step := time.Second + maxDuration := 10 * step + b := NewFakeBackOff(step, maxDuration, tc) + startTime := tc.Now() + + cases := []struct { + tick time.Duration + inBackOff bool + value int + }{ + {tick: 0, inBackOff: false, value: 0}, + {tick: 1, inBackOff: false, value: 1}, + {tick: 2, inBackOff: true, value: 2}, + {tick: 3, inBackOff: false, value: 2}, + {tick: 4, inBackOff: true, value: 4}, + {tick: 5, inBackOff: true, value: 4}, + {tick: 6, inBackOff: true, value: 4}, + {tick: 7, inBackOff: false, value: 4}, + {tick: 8, inBackOff: true, value: 8}, + {tick: 9, inBackOff: true, value: 8}, + {tick: 10, inBackOff: true, value: 8}, + {tick: 11, inBackOff: true, value: 8}, + {tick: 12, inBackOff: true, value: 8}, + {tick: 13, inBackOff: true, value: 8}, + {tick: 14, inBackOff: true, value: 8}, + {tick: 15, inBackOff: false, value: 8}, + {tick: 16, inBackOff: true, value: 10}, + {tick: 17, inBackOff: true, value: 10}, + {tick: 18, inBackOff: true, value: 10}, + {tick: 19, inBackOff: true, value: 10}, + {tick: 20, inBackOff: true, value: 10}, + {tick: 21, inBackOff: true, value: 10}, + {tick: 22, inBackOff: true, value: 10}, + {tick: 23, inBackOff: true, value: 10}, + {tick: 24, inBackOff: true, value: 10}, + {tick: 25, inBackOff: false, value: 10}, + {tick: 26, inBackOff: true, value: 10}, + {tick: 27, inBackOff: true, value: 10}, + {tick: 28, inBackOff: true, value: 10}, + {tick: 29, inBackOff: true, value: 10}, + {tick: 30, inBackOff: true, value: 10}, + {tick: 31, inBackOff: true, value: 10}, + {tick: 32, inBackOff: true, value: 10}, + {tick: 33, inBackOff: true, value: 10}, + {tick: 34, inBackOff: true, value: 10}, + {tick: 35, inBackOff: false, value: 10}, + {tick: 56, inBackOff: false, value: 0}, + {tick: 57, inBackOff: false, value: 1}, + } + + for _, c := range cases { + tc.SetTime(startTime.Add(c.tick * step)) + if c.inBackOff != b.IsInBackOffSinceUpdate(id, tc.Now()) { + t.Errorf("expected IsInBackOffSinceUpdate %v got %v at tick %s", c.inBackOff, b.IsInBackOffSinceUpdate(id, tc.Now()), c.tick*step) + } + + if c.inBackOff && (time.Duration(c.value)*step != b.Get(id)) { + t.Errorf("expected backoff value=%s got %s at tick %s", time.Duration(c.value)*step, b.Get(id), c.tick*step) + } + + if !c.inBackOff { + b.Next(id, tc.Now()) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/throttle.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go similarity index 99% rename from Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/throttle.go rename to Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go index c1caea099fee..a63817ca8c10 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/throttle.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package flowcontrol import ( "sync" diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flowcontrol/throttle_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flowcontrol/throttle_test.go new file mode 100644 index 000000000000..30b792ec0bb4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flowcontrol/throttle_test.go @@ -0,0 +1,127 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "math" + "sync" + "testing" + "time" +) + +func TestBasicThrottle(t *testing.T) { + r := NewTokenBucketRateLimiter(1, 3) + for i := 0; i < 3; i++ { + if !r.TryAccept() { + t.Error("unexpected false accept") + } + } + if r.TryAccept() { + t.Error("unexpected true accept") + } +} + +func TestIncrementThrottle(t *testing.T) { + r := NewTokenBucketRateLimiter(1, 1) + if !r.TryAccept() { + t.Error("unexpected false accept") + } + if r.TryAccept() { + t.Error("unexpected true accept") + } + + // Allow to refill + time.Sleep(2 * time.Second) + + if !r.TryAccept() { + t.Error("unexpected false accept") + } +} + +func TestThrottle(t *testing.T) { + r := NewTokenBucketRateLimiter(10, 5) + + // Should consume 5 tokens immediately, then + // the remaining 11 should take at least 1 second (0.1s each) + expectedFinish := time.Now().Add(time.Second * 1) + for i := 0; i < 16; i++ { + r.Accept() + } + if time.Now().Before(expectedFinish) { + t.Error("rate limit was not respected, finished too early") + } +} + +func TestRateLimiterSaturation(t *testing.T) { + const e = 0.000001 + tests := []struct { + capacity int + take int + + expectedSaturation float64 + }{ + {1, 1, 1}, + {10, 3, 0.3}, + } + for i, tt := range tests { + rl := NewTokenBucketRateLimiter(1, tt.capacity) + for i := 0; i < tt.take; i++ { + rl.Accept() + } + if math.Abs(rl.Saturation()-tt.expectedSaturation) > e { + t.Fatalf("#%d: Saturation rate difference isn't within tolerable range\n want=%f, get=%f", + i, tt.expectedSaturation, rl.Saturation()) + } + } +} + +func TestAlwaysFake(t *testing.T) { + rl := NewFakeAlwaysRateLimiter() + if !rl.TryAccept() { + t.Error("TryAccept in AlwaysFake should return true.") + } + // If this will block the test will timeout + rl.Accept() +} + +func TestNeverFake(t *testing.T) { + rl := NewFakeNeverRateLimiter() + if rl.TryAccept() { + t.Error("TryAccept in NeverFake should return false.") + } + + finished := false + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + rl.Accept() + finished = true + wg.Done() + }() + + // Wait some time to make sure it never finished. + time.Sleep(time.Second) + if finished { + t.Error("Accept should block forever in NeverFake.") + } + + rl.Stop() + wg.Wait() + if !finished { + t.Error("Stop should make Accept unblock in NeverFake.") + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flushwriter/writer_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flushwriter/writer_test.go new file mode 100644 index 000000000000..d40b0bb004ec --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/flushwriter/writer_test.go @@ -0,0 +1,86 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flushwriter + +import ( + "fmt" + "testing" +) + +type writerWithFlush struct { + writeCount, flushCount int + err error +} + +func (w *writerWithFlush) Flush() { + w.flushCount++ +} + +func (w *writerWithFlush) Write(p []byte) (n int, err error) { + w.writeCount++ + return len(p), w.err +} + +type writerWithNoFlush struct { + writeCount int +} + +func (w *writerWithNoFlush) Write(p []byte) (n int, err error) { + w.writeCount++ + return len(p), nil +} + +func TestWriteWithFlush(t *testing.T) { + w := &writerWithFlush{} + fw := Wrap(w) + for i := 0; i < 10; i++ { + _, err := fw.Write([]byte("Test write")) + if err != nil { + t.Errorf("Unexpected error while writing with flush writer: %v", err) + } + } + if w.flushCount != 10 { + t.Errorf("Flush not called the expected number of times. Actual: %d", w.flushCount) + } + if w.writeCount != 10 { + t.Errorf("Write not called the expected number of times. Actual: %d", w.writeCount) + } +} + +func TestWriteWithoutFlush(t *testing.T) { + w := &writerWithNoFlush{} + fw := Wrap(w) + for i := 0; i < 10; i++ { + _, err := fw.Write([]byte("Test write")) + if err != nil { + t.Errorf("Unexpected error while writing with flush writer: %v", err) + } + } + if w.writeCount != 10 { + t.Errorf("Write not called the expected number of times. Actual: %d", w.writeCount) + } +} + +func TestWriteError(t *testing.T) { + e := fmt.Errorf("Error") + w := &writerWithFlush{err: e} + fw := Wrap(w) + _, err := fw.Write([]byte("Test write")) + if err != e { + t.Errorf("Did not get expected error. Got: %#v", err) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/framer/framer.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/framer/framer.go new file mode 100644 index 000000000000..7ca806fa0547 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/framer/framer.go @@ -0,0 +1,167 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package framer implements simple frame decoding techniques for an io.ReadCloser +package framer + +import ( + "encoding/binary" + "encoding/json" + "io" +) + +type lengthDelimitedFrameWriter struct { + w io.Writer + h [4]byte +} + +func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer { + return &lengthDelimitedFrameWriter{w: w} +} + +// Write writes a single frame to the nested writer, prepending it with the length in +// in bytes of data (as a 4 byte, bigendian uint32). +func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) { + binary.BigEndian.PutUint32(w.h[:], uint32(len(data))) + n, err := w.w.Write(w.h[:]) + if err != nil { + return 0, err + } + if n != len(w.h) { + return 0, io.ErrShortWrite + } + return w.w.Write(data) +} + +type lengthDelimitedFrameReader struct { + r io.ReadCloser + remaining int +} + +// NewLengthDelimitedFrameReader returns an io.Reader that will decode length-prefixed +// frames off of a stream. +// +// The protocol is: +// +// stream: message ... +// message: prefix body +// prefix: 4 byte uint32 in BigEndian order, denotes length of body +// body: bytes (0..prefix) +// +// If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead +// will be returned along with the number of bytes read. +func NewLengthDelimitedFrameReader(r io.ReadCloser) io.ReadCloser { + return &lengthDelimitedFrameReader{r: r} +} + +// Read attempts to read an entire frame into data. If that is not possible, io.ErrShortBuffer +// is returned and subsequent calls will attempt to read the last frame. A frame is complete when +// err is nil. +func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) { + if r.remaining <= 0 { + header := [4]byte{} + n, err := io.ReadAtLeast(r.r, header[:4], 4) + if err != nil { + return 0, err + } + if n != 4 { + return 0, io.ErrUnexpectedEOF + } + frameLength := int(binary.BigEndian.Uint32(header[:])) + r.remaining = frameLength + } + + expect := r.remaining + max := expect + if max > len(data) { + max = len(data) + } + n, err := io.ReadAtLeast(r.r, data[:max], int(max)) + r.remaining -= n + if err == io.ErrShortBuffer || r.remaining > 0 { + return n, io.ErrShortBuffer + } + if err != nil { + return n, err + } + if n != expect { + return n, io.ErrUnexpectedEOF + } + + return n, nil +} + +func (r *lengthDelimitedFrameReader) Close() error { + return r.r.Close() +} + +type jsonFrameReader struct { + r io.ReadCloser + decoder *json.Decoder + remaining []byte +} + +// NewJSONFramedReader returns an io.Reader that will decode individual JSON objects off +// of a wire. +// +// The boundaries between each frame are valid JSON objects. A JSON parsing error will terminate +// the read. +func NewJSONFramedReader(r io.ReadCloser) io.ReadCloser { + return &jsonFrameReader{ + r: r, + decoder: json.NewDecoder(r), + } +} + +// ReadFrame decodes the next JSON object in the stream, or returns an error. The returned +// byte slice will be modified the next time ReadFrame is invoked and should not be altered. +func (r *jsonFrameReader) Read(data []byte) (int, error) { + // Return whatever remaining data exists from an in progress frame + if n := len(r.remaining); n > 0 { + if n <= len(data) { + data = append(data[0:0], r.remaining...) + r.remaining = nil + return n, nil + } + + n = len(data) + data = append(data[0:0], r.remaining[:n]...) + r.remaining = r.remaining[n:] + return n, io.ErrShortBuffer + } + + // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see + // data written to data, or be larger than data and a different array. + n := len(data) + m := json.RawMessage(data[:0]) + if err := r.decoder.Decode(&m); err != nil { + return 0, err + } + + // If capacity of data is less than length of the message, decoder will allocate a new slice + // and set m to it, which means we need to copy the partial result back into data and preserve + // the remaining result for subsequent reads. + if len(m) > n { + data = append(data[0:0], m[:n]...) + r.remaining = m[n:] + return n, io.ErrShortBuffer + } + return len(m), nil +} + +func (r *jsonFrameReader) Close() error { + return r.r.Close() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/framer/framer_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/framer/framer_test.go new file mode 100644 index 000000000000..c5e4ba874ee3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/framer/framer_test.go @@ -0,0 +1,176 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framer + +import ( + "bytes" + "io" + "io/ioutil" + "testing" +) + +func TestRead(t *testing.T) { + data := []byte{ + 0x00, 0x00, 0x00, 0x04, + 0x01, 0x02, 0x03, 0x04, + 0x00, 0x00, 0x00, 0x03, + 0x05, 0x06, 0x07, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x08, + } + b := bytes.NewBuffer(data) + r := NewLengthDelimitedFrameReader(ioutil.NopCloser(b)) + buf := make([]byte, 1) + if n, err := r.Read(buf); err != io.ErrShortBuffer && n != 1 && bytes.Equal(buf, []byte{0x01}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + if n, err := r.Read(buf); err != io.ErrShortBuffer && n != 1 && bytes.Equal(buf, []byte{0x02}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read the remaining frame + buf = make([]byte, 2) + if n, err := r.Read(buf); err != nil && n != 2 && bytes.Equal(buf, []byte{0x03, 0x04}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read with buffer equal to frame + buf = make([]byte, 3) + if n, err := r.Read(buf); err != nil && n != 3 && bytes.Equal(buf, []byte{0x05, 0x06, 0x07}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read empty frame + buf = make([]byte, 3) + if n, err := r.Read(buf); err != nil && n != 0 && bytes.Equal(buf, []byte{}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read with larger buffer than frame + buf = make([]byte, 3) + if n, err := r.Read(buf); err != nil && n != 1 && bytes.Equal(buf, []byte{0x08}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read EOF + if n, err := r.Read(buf); err != io.EOF && n != 0 { + t.Fatalf("unexpected: %v %d", err, n) + } +} + +func TestReadLarge(t *testing.T) { + data := []byte{ + 0x00, 0x00, 0x00, 0x04, + 0x01, 0x02, 0x03, 0x04, + 0x00, 0x00, 0x00, 0x03, + 0x05, 0x06, 0x07, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x08, + } + b := bytes.NewBuffer(data) + r := NewLengthDelimitedFrameReader(ioutil.NopCloser(b)) + buf := make([]byte, 40) + if n, err := r.Read(buf); err != nil && n != 4 && bytes.Equal(buf, []byte{0x01, 0x02, 0x03, 0x04}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + if n, err := r.Read(buf); err != nil && n != 3 && bytes.Equal(buf, []byte{0x05, 0x06, 0x7}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + if n, err := r.Read(buf); err != nil && n != 0 && bytes.Equal(buf, []byte{}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + if n, err := r.Read(buf); err != nil && n != 1 && bytes.Equal(buf, []byte{0x08}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read EOF + if n, err := r.Read(buf); err != io.EOF && n != 0 { + t.Fatalf("unexpected: %v %d", err, n) + } +} +func TestReadInvalidFrame(t *testing.T) { + data := []byte{ + 0x00, 0x00, 0x00, 0x04, + 0x01, 0x02, + } + b := bytes.NewBuffer(data) + r := NewLengthDelimitedFrameReader(ioutil.NopCloser(b)) + buf := make([]byte, 1) + if n, err := r.Read(buf); err != io.ErrShortBuffer && n != 1 && bytes.Equal(buf, []byte{0x01}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read the remaining frame + buf = make([]byte, 3) + if n, err := r.Read(buf); err != io.ErrUnexpectedEOF && n != 1 && bytes.Equal(buf, []byte{0x02}) { + t.Fatalf("unexpected: %v %d %v", err, n, buf) + } + // read EOF + if n, err := r.Read(buf); err != io.EOF && n != 0 { + t.Fatalf("unexpected: %v %d", err, n) + } +} + +func TestJSONFrameReader(t *testing.T) { + b := bytes.NewBufferString("{\"test\":true}\n1\n[\"a\"]") + r := NewJSONFramedReader(ioutil.NopCloser(b)) + buf := make([]byte, 20) + if n, err := r.Read(buf); err != nil || n != 13 || string(buf[:n]) != `{"test":true}` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != nil || n != 1 || string(buf[:n]) != `1` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != nil || n != 5 || string(buf[:n]) != `["a"]` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != io.EOF || n != 0 { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } +} + +func TestJSONFrameReaderShortBuffer(t *testing.T) { + b := bytes.NewBufferString("{\"test\":true}\n1\n[\"a\"]") + r := NewJSONFramedReader(ioutil.NopCloser(b)) + buf := make([]byte, 3) + + if n, err := r.Read(buf); err != io.ErrShortBuffer || n != 3 || string(buf[:n]) != `{"t` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != io.ErrShortBuffer || n != 3 || string(buf[:n]) != `est` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != io.ErrShortBuffer || n != 3 || string(buf[:n]) != `":t` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != io.ErrShortBuffer || n != 3 || string(buf[:n]) != `rue` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != nil || n != 1 || string(buf[:n]) != `}` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + + if n, err := r.Read(buf); err != nil || n != 1 || string(buf[:n]) != `1` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + + if n, err := r.Read(buf); err != io.ErrShortBuffer || n != 3 || string(buf[:n]) != `["a` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + if n, err := r.Read(buf); err != nil || n != 2 || string(buf[:n]) != `"]` { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } + + if n, err := r.Read(buf); err != io.EOF || n != 0 { + t.Fatalf("unexpected: %v %d %q", err, n, buf) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap.go new file mode 100644 index 000000000000..cbf8cd3462a0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap.go @@ -0,0 +1,88 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package goroutinemap implements a data structure for managing go routines +by name. It prevents the creation of new go routines if an existing go routine +with the same name exists. +*/ +package goroutinemap + +import ( + "fmt" + "sync" + + "k8s.io/kubernetes/pkg/util/runtime" +) + +// GoRoutineMap defines the supported set of operations. +type GoRoutineMap interface { + // Run adds operationName to the list of running operations and spawns a new + // go routine to execute the operation. If an operation with the same name + // already exists, an error is returned. Once the operation is complete, the + // go routine is terminated and the operationName is removed from the list + // of executing operations allowing a new operation to be started with the + // same name without error. + Run(operationName string, operation func() error) error + + // Wait blocks until all operations are completed. This is typically + // necessary during tests - the test should wait until all operations finish + // and evaluate results after that. + Wait() +} + +// NewGoRoutineMap returns a new instance of GoRoutineMap. +func NewGoRoutineMap() GoRoutineMap { + return &goRoutineMap{ + operations: make(map[string]bool), + } +} + +type goRoutineMap struct { + operations map[string]bool + sync.Mutex + wg sync.WaitGroup +} + +func (grm *goRoutineMap) Run(operationName string, operation func() error) error { + grm.Lock() + defer grm.Unlock() + if grm.operations[operationName] { + // Operation with name exists + return fmt.Errorf("Failed to create operation with name %q. An operation with that name already exists.", operationName) + } + + grm.operations[operationName] = true + grm.wg.Add(1) + go func() { + defer grm.operationComplete(operationName) + defer runtime.HandleCrash() + operation() + }() + + return nil +} + +func (grm *goRoutineMap) operationComplete(operationName string) { + defer grm.wg.Done() + grm.Lock() + defer grm.Unlock() + delete(grm.operations, operationName) +} + +func (grm *goRoutineMap) Wait() { + grm.wg.Wait() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap_test.go new file mode 100644 index 000000000000..04d205c45de4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/goroutinemap/goroutinemap_test.go @@ -0,0 +1,263 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goroutinemap + +import ( + "fmt" + "testing" + "time" + + "k8s.io/kubernetes/pkg/util/wait" +) + +// testTimeout is a timeout of goroutines to finish. This _should_ be just a +// "context switch" and it should take several ms, however, Clayton says "We +// have had flakes due to tests that assumed that 15s is long enough to sleep") +const testTimeout = 1 * time.Minute + +func Test_NewGoRoutineMap_Positive_SingleOp(t *testing.T) { + // Arrange + grm := NewGoRoutineMap() + operationName := "operation-name" + operation := func() error { return nil } + + // Act + err := grm.Run(operationName, operation) + + // Assert + if err != nil { + t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err) + } +} + +func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletes(t *testing.T) { + // Arrange + grm := NewGoRoutineMap() + operationName := "operation-name" + operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) + operation1 := generateCallbackFunc(operation1DoneCh) + err1 := grm.Run(operationName, operation1) + if err1 != nil { + t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) + } + operation2 := generateNoopFunc() + <-operation1DoneCh // Force operation1 to complete + + // Act + err2 := retryWithExponentialBackOff( + time.Duration(20*time.Millisecond), + func() (bool, error) { + err := grm.Run(operationName, operation2) + if err != nil { + t.Logf("Warning: NewGoRoutine failed. Expected: Actual: <%v>. Will retry.", err) + return false, nil + } + return true, nil + }, + ) + + // Assert + if err2 != nil { + t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err2) + } +} + +func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanics(t *testing.T) { + // Arrange + grm := NewGoRoutineMap() + operationName := "operation-name" + operation1 := generatePanicFunc() + err1 := grm.Run(operationName, operation1) + if err1 != nil { + t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) + } + operation2 := generateNoopFunc() + + // Act + err2 := retryWithExponentialBackOff( + time.Duration(20*time.Millisecond), + func() (bool, error) { + err := grm.Run(operationName, operation2) + if err != nil { + t.Logf("Warning: NewGoRoutine failed. Expected: Actual: <%v>. Will retry.", err) + return false, nil + } + return true, nil + }, + ) + + // Assert + if err2 != nil { + t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err2) + } +} + +func Test_NewGoRoutineMap_Negative_SecondOpBeforeFirstCompletes(t *testing.T) { + // Arrange + grm := NewGoRoutineMap() + operationName := "operation-name" + operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) + operation1 := generateWaitFunc(operation1DoneCh) + err1 := grm.Run(operationName, operation1) + if err1 != nil { + t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) + } + operation2 := generateNoopFunc() + + // Act + err2 := grm.Run(operationName, operation2) + + // Assert + if err2 == nil { + t.Fatalf("NewGoRoutine did not fail. Expected: Actual: ", operationName) + } +} + +func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletes(t *testing.T) { + // Arrange + grm := NewGoRoutineMap() + operationName := "operation-name" + operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) + operation1 := generateWaitFunc(operation1DoneCh) + err1 := grm.Run(operationName, operation1) + if err1 != nil { + t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) + } + operation2 := generateNoopFunc() + operation3 := generateNoopFunc() + + // Act + err2 := grm.Run(operationName, operation2) + + // Assert + if err2 == nil { + t.Fatalf("NewGoRoutine did not fail. Expected: Actual: ", operationName) + } + + // Act + operation1DoneCh <- true // Force operation1 to complete + err3 := retryWithExponentialBackOff( + time.Duration(20*time.Millisecond), + func() (bool, error) { + err := grm.Run(operationName, operation3) + if err != nil { + t.Logf("Warning: NewGoRoutine failed. Expected: Actual: <%v>. Will retry.", err) + return false, nil + } + return true, nil + }, + ) + + // Assert + if err3 != nil { + t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err3) + } +} + +func generateCallbackFunc(done chan<- interface{}) func() error { + return func() error { + done <- true + return nil + } +} + +func generateWaitFunc(done <-chan interface{}) func() error { + return func() error { + <-done + return nil + } +} + +func generatePanicFunc() func() error { + return func() error { + panic("testing panic") + } +} + +func generateNoopFunc() func() error { + return func() error { return nil } +} + +func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error { + backoff := wait.Backoff{ + Duration: initialDuration, + Factor: 3, + Jitter: 0, + Steps: 4, + } + return wait.ExponentialBackoff(backoff, fn) +} + +func Test_NewGoRoutineMap_Positive_WaitEmpty(t *testing.T) { + // Test than Wait() on empty GoRoutineMap always succeeds without blocking + // Arrange + grm := NewGoRoutineMap() + + // Act + waitDoneCh := make(chan interface{}, 1) + go func() { + grm.Wait() + waitDoneCh <- true + }() + + // Assert + err := waitChannelWithTimeout(waitDoneCh, testTimeout) + if err != nil { + t.Errorf("Error waiting for GoRoutineMap.Wait: %v", err) + } +} + +func Test_NewGoRoutineMap_Positive_Wait(t *testing.T) { + // Test that Wait() really blocks until the last operation succeeds + // Arrange + grm := NewGoRoutineMap() + operationName := "operation-name" + operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) + operation1 := generateWaitFunc(operation1DoneCh) + err := grm.Run(operationName, operation1) + if err != nil { + t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err) + } + + // Act + waitDoneCh := make(chan interface{}, 1) + go func() { + grm.Wait() + waitDoneCh <- true + }() + + // Finish the operation + operation1DoneCh <- true + + // Assert + err = waitChannelWithTimeout(waitDoneCh, testTimeout) + if err != nil { + t.Fatalf("Error waiting for GoRoutineMap.Wait: %v", err) + } +} + +func waitChannelWithTimeout(ch <-chan interface{}, timeout time.Duration) error { + timer := time.NewTimer(timeout) + + select { + case <-ch: + // Success! + return nil + case <-timer.C: + return fmt.Errorf("timeout after %v", timeout) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/hash/hash_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/hash/hash_test.go new file mode 100644 index 000000000000..3bba3b074bab --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/hash/hash_test.go @@ -0,0 +1,147 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hash + +import ( + "fmt" + "hash/adler32" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +type A struct { + x int + y string +} + +type B struct { + x []int + y map[string]bool +} + +type C struct { + x int + y string +} + +func (c C) String() string { + return fmt.Sprintf("%d:%s", c.x, c.y) +} + +func TestDeepHashObject(t *testing.T) { + successCases := []func() interface{}{ + func() interface{} { return 8675309 }, + func() interface{} { return "Jenny, I got your number" }, + func() interface{} { return []string{"eight", "six", "seven"} }, + func() interface{} { return [...]int{5, 3, 0, 9} }, + func() interface{} { return map[int]string{8: "8", 6: "6", 7: "7"} }, + func() interface{} { return map[string]int{"5": 5, "3": 3, "0": 0, "9": 9} }, + func() interface{} { return A{867, "5309"} }, + func() interface{} { return &A{867, "5309"} }, + func() interface{} { + return B{[]int{8, 6, 7}, map[string]bool{"5": true, "3": true, "0": true, "9": true}} + }, + func() interface{} { return map[A]bool{A{8675309, "Jenny"}: true, A{9765683, "!Jenny"}: false} }, + func() interface{} { return map[C]bool{C{8675309, "Jenny"}: true, C{9765683, "!Jenny"}: false} }, + func() interface{} { return map[*A]bool{&A{8675309, "Jenny"}: true, &A{9765683, "!Jenny"}: false} }, + func() interface{} { return map[*C]bool{&C{8675309, "Jenny"}: true, &C{9765683, "!Jenny"}: false} }, + } + + for _, tc := range successCases { + hasher1 := adler32.New() + DeepHashObject(hasher1, tc()) + hash1 := hasher1.Sum32() + DeepHashObject(hasher1, tc()) + hash2 := hasher1.Sum32() + if hash1 != hash2 { + t.Fatalf("hash of the same object (%q) produced different results: %d vs %d", toString(tc()), hash1, hash2) + } + for i := 0; i < 100; i++ { + hasher2 := adler32.New() + + DeepHashObject(hasher1, tc()) + hash1a := hasher1.Sum32() + DeepHashObject(hasher2, tc()) + hash2a := hasher2.Sum32() + + if hash1a != hash1 { + t.Errorf("repeated hash of the same object (%q) produced different results: %d vs %d", toString(tc()), hash1, hash1a) + } + if hash2a != hash2 { + t.Errorf("repeated hash of the same object (%q) produced different results: %d vs %d", toString(tc()), hash2, hash2a) + } + if hash1a != hash2a { + t.Errorf("hash of the same object produced (%q) different results: %d vs %d", toString(tc()), hash1a, hash2a) + } + } + } +} + +func toString(obj interface{}) string { + return spew.Sprintf("%#v", obj) +} + +type wheel struct { + radius uint32 +} + +type unicycle struct { + primaryWheel *wheel + licencePlateID string + tags map[string]string +} + +func TestDeepObjectPointer(t *testing.T) { + // Arrange + wheel1 := wheel{radius: 17} + wheel2 := wheel{radius: 22} + wheel3 := wheel{radius: 17} + + myUni1 := unicycle{licencePlateID: "blah", primaryWheel: &wheel1, tags: map[string]string{"color": "blue", "name": "john"}} + myUni2 := unicycle{licencePlateID: "blah", primaryWheel: &wheel2, tags: map[string]string{"color": "blue", "name": "john"}} + myUni3 := unicycle{licencePlateID: "blah", primaryWheel: &wheel3, tags: map[string]string{"color": "blue", "name": "john"}} + + // Run it more than once to verify determinism of hasher. + for i := 0; i < 100; i++ { + hasher1 := adler32.New() + hasher2 := adler32.New() + hasher3 := adler32.New() + // Act + DeepHashObject(hasher1, myUni1) + hash1 := hasher1.Sum32() + DeepHashObject(hasher1, myUni1) + hash1a := hasher1.Sum32() + DeepHashObject(hasher2, myUni2) + hash2 := hasher2.Sum32() + DeepHashObject(hasher3, myUni3) + hash3 := hasher3.Sum32() + + // Assert + if hash1 != hash1a { + t.Errorf("repeated hash of the same object produced different results: %d vs %d", hash1, hash1a) + } + + if hash1 == hash2 { + t.Errorf("hash1 (%d) and hash2(%d) must be different because they have different values for wheel size", hash1, hash2) + } + + if hash1 != hash3 { + t.Errorf("hash1 (%d) and hash3(%d) must be the same because although they point to different objects, they have the same values for wheel size", hash1, hash3) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/homedir/homedir.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/homedir/homedir.go new file mode 100644 index 000000000000..57171e109bbd --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/homedir/homedir.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package homedir + +import ( + "os" + "runtime" +) + +// HomeDir returns the home directory for the current user +func HomeDir() string { + if runtime.GOOS == "windows" { + if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { + homeDir := homeDrive + homePath + if _, err := os.Stat(homeDir); err == nil { + return homeDir + } + } + if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 { + if _, err := os.Stat(userProfile); err == nil { + return userProfile + } + } + } + return os.Getenv("HOME") +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/httpstream.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/httpstream.go index 4f6b608ce7a3..3ce3b02a0198 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/httpstream.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/httpstream.go @@ -114,20 +114,24 @@ func negotiateProtocol(clientProtocols, serverProtocols []string) string { return "" } -// Handshake performs a subprotocol negotiation. If the client did not request -// a specific subprotocol, defaultProtocol is used. If the client did request a +// Handshake performs a subprotocol negotiation. If the client did request a // subprotocol, Handshake will select the first common value found in // serverProtocols. If a match is found, Handshake adds a response header // indicating the chosen subprotocol. If no match is found, HTTP forbidden is // returned, along with a response header containing the list of protocols the // server can accept. -func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string, defaultProtocol string) (string, error) { +func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) { clientProtocols := req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)] if len(clientProtocols) == 0 { - // Kube 1.0 client that didn't support subprotocol negotiation - // TODO remove this defaulting logic once Kube 1.0 is no longer supported - w.Header().Add(HeaderProtocolVersion, defaultProtocol) - return defaultProtocol, nil + // Kube 1.0 clients didn't support subprotocol negotiation. + // TODO require clientProtocols once Kube 1.0 is no longer supported + return "", nil + } + + if len(serverProtocols) == 0 { + // Kube 1.0 servers didn't support subprotocol negotiation. This is mainly for testing. + // TODO require serverProtocols once Kube 1.0 is no longer supported + return "", nil } negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/httpstream_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/httpstream_test.go new file mode 100644 index 000000000000..d21e60c0ee9b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/httpstream_test.go @@ -0,0 +1,127 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httpstream + +import ( + "net/http" + "reflect" + "testing" + + "k8s.io/kubernetes/pkg/api" +) + +type responseWriter struct { + header http.Header + statusCode *int +} + +func newResponseWriter() *responseWriter { + return &responseWriter{ + header: make(http.Header), + } +} + +func (r *responseWriter) Header() http.Header { + return r.header +} + +func (r *responseWriter) WriteHeader(code int) { + r.statusCode = &code +} + +func (r *responseWriter) Write([]byte) (int, error) { + return 0, nil +} + +func TestHandshake(t *testing.T) { + tests := map[string]struct { + clientProtocols []string + serverProtocols []string + expectedProtocol string + expectError bool + }{ + "no client protocols": { + clientProtocols: []string{}, + serverProtocols: []string{"a", "b"}, + expectedProtocol: "", + }, + "no common protocol": { + clientProtocols: []string{"c"}, + serverProtocols: []string{"a", "b"}, + expectedProtocol: "", + expectError: true, + }, + "common protocol": { + clientProtocols: []string{"b"}, + serverProtocols: []string{"a", "b"}, + expectedProtocol: "b", + }, + } + + for name, test := range tests { + req, err := http.NewRequest("GET", "http://www.example.com/", nil) + if err != nil { + t.Fatalf("%s: error creating request: %v", name, err) + } + + for _, p := range test.clientProtocols { + req.Header.Add(HeaderProtocolVersion, p) + } + + w := newResponseWriter() + negotiated, err := Handshake(req, w, test.serverProtocols) + + // verify negotiated protocol + if e, a := test.expectedProtocol, negotiated; e != a { + t.Errorf("%s: protocol: expected %q, got %q", name, e, a) + } + + if test.expectError { + if err == nil { + t.Errorf("%s: expected error but did not get one", name) + } + if w.statusCode == nil { + t.Errorf("%s: expected w.statusCode to be set", name) + } else if e, a := http.StatusForbidden, *w.statusCode; e != a { + t.Errorf("%s: w.statusCode: expected %d, got %d", name, e, a) + } + if e, a := test.serverProtocols, w.Header()[HeaderAcceptedProtocolVersions]; !reflect.DeepEqual(e, a) { + t.Errorf("%s: accepted server protocols: expected %v, got %v", name, e, a) + } + continue + } + if !test.expectError && err != nil { + t.Errorf("%s: unexpected error: %v", name, err) + continue + } + if w.statusCode != nil { + t.Errorf("%s: unexpected non-nil w.statusCode: %d", name, w.statusCode) + } + + if len(test.expectedProtocol) == 0 { + if len(w.Header()[HeaderProtocolVersion]) > 0 { + t.Errorf("%s: unexpected protocol version response header: %s", name, w.Header()[HeaderProtocolVersion]) + } + continue + } + + // verify response headers + if e, a := []string{test.expectedProtocol}, w.Header()[HeaderProtocolVersion]; !api.Semantic.DeepEqual(e, a) { + t.Errorf("%s: protocol response header: expected %v, got %v", name, e, a) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/spdy/connection_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/spdy/connection_test.go new file mode 100644 index 000000000000..36b096402bff --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/spdy/connection_test.go @@ -0,0 +1,163 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "io" + "net" + "net/http" + "sync" + "testing" + "time" + + "k8s.io/kubernetes/pkg/util/httpstream" +) + +func runProxy(t *testing.T, backendUrl string, proxyUrl chan<- string, proxyDone chan<- struct{}) { + listener, err := net.Listen("tcp4", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + defer listener.Close() + + proxyUrl <- listener.Addr().String() + + clientConn, err := listener.Accept() + if err != nil { + t.Errorf("proxy: error accepting client connection: %v", err) + return + } + + backendConn, err := net.Dial("tcp4", backendUrl) + if err != nil { + t.Errorf("proxy: error dialing backend: %v", err) + return + } + defer backendConn.Close() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + io.Copy(backendConn, clientConn) + }() + + go func() { + defer wg.Done() + io.Copy(clientConn, backendConn) + }() + + wg.Wait() + + proxyDone <- struct{}{} +} + +func runServer(t *testing.T, backendUrl chan<- string, serverDone chan<- struct{}) { + listener, err := net.Listen("tcp4", "localhost:0") + if err != nil { + t.Fatalf("server: error listening: %v", err) + } + defer listener.Close() + + backendUrl <- listener.Addr().String() + + conn, err := listener.Accept() + if err != nil { + t.Errorf("server: error accepting connection: %v", err) + return + } + + streamChan := make(chan httpstream.Stream) + replySentChan := make(chan (<-chan struct{})) + spdyConn, err := NewServerConnection(conn, func(stream httpstream.Stream, replySent <-chan struct{}) error { + streamChan <- stream + replySentChan <- replySent + return nil + }) + if err != nil { + t.Errorf("server: error creating spdy connection: %v", err) + return + } + + stream := <-streamChan + replySent := <-replySentChan + <-replySent + + buf := make([]byte, 1) + _, err = stream.Read(buf) + if err != io.EOF { + t.Errorf("server: unexpected read error: %v", err) + return + } + + <-spdyConn.CloseChan() + raw := spdyConn.(*connection).conn + if err := raw.Wait(15 * time.Second); err != nil { + t.Errorf("server: timed out waiting for connection closure: %v", err) + } + + serverDone <- struct{}{} +} + +func TestConnectionCloseIsImmediateThroughAProxy(t *testing.T) { + serverDone := make(chan struct{}) + backendUrlChan := make(chan string) + go runServer(t, backendUrlChan, serverDone) + backendUrl := <-backendUrlChan + + proxyDone := make(chan struct{}) + proxyUrlChan := make(chan string) + go runProxy(t, backendUrl, proxyUrlChan, proxyDone) + proxyUrl := <-proxyUrlChan + + conn, err := net.Dial("tcp4", proxyUrl) + if err != nil { + t.Fatalf("client: error connecting to proxy: %v", err) + } + + spdyConn, err := NewClientConnection(conn) + if err != nil { + t.Fatalf("client: error creating spdy connection: %v", err) + } + + if _, err := spdyConn.CreateStream(http.Header{}); err != nil { + t.Fatalf("client: error creating stream: %v", err) + } + + spdyConn.Close() + raw := spdyConn.(*connection).conn + if err := raw.Wait(15 * time.Second); err != nil { + t.Fatalf("client: timed out waiting for connection closure: %v", err) + } + + expired := time.NewTimer(15 * time.Second) + i := 0 + for { + select { + case <-expired.C: + t.Fatalf("timed out waiting for proxy and/or server closure") + case <-serverDone: + i++ + case <-proxyDone: + i++ + } + if i == 2 { + break + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/spdy/roundtripper.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/spdy/roundtripper.go index ca7e9370ecf2..6091c4e4b103 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/spdy/roundtripper.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/spdy/roundtripper.go @@ -125,6 +125,10 @@ func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) { return nil, err } + if s.tlsConfig == nil { + s.tlsConfig = &tls.Config{} + } + if len(s.tlsConfig.ServerName) == 0 { s.tlsConfig.ServerName = host } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/spdy/roundtripper_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/spdy/roundtripper_test.go new file mode 100644 index 000000000000..c80a24a69b61 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/spdy/roundtripper_test.go @@ -0,0 +1,423 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/elazarl/goproxy" + "k8s.io/kubernetes/pkg/util/httpstream" +) + +func TestRoundTripAndNewConnection(t *testing.T) { + localhostPool := x509.NewCertPool() + if !localhostPool.AppendCertsFromPEM(localhostCert) { + t.Errorf("error setting up localhostCert pool") + } + + httpsServerInvalidHostname := func(h http.Handler) *httptest.Server { + cert, err := tls.X509KeyPair(exampleCert, exampleKey) + if err != nil { + t.Errorf("https (invalid hostname): proxy_test: %v", err) + } + ts := httptest.NewUnstartedServer(h) + ts.TLS = &tls.Config{ + Certificates: []tls.Certificate{cert}, + } + ts.StartTLS() + return ts + } + + httpsServerValidHostname := func(h http.Handler) *httptest.Server { + cert, err := tls.X509KeyPair(localhostCert, localhostKey) + if err != nil { + t.Errorf("https (valid hostname): proxy_test: %v", err) + } + ts := httptest.NewUnstartedServer(h) + ts.TLS = &tls.Config{ + Certificates: []tls.Certificate{cert}, + } + ts.StartTLS() + return ts + } + + testCases := map[string]struct { + serverFunc func(http.Handler) *httptest.Server + proxyServerFunc func(http.Handler) *httptest.Server + proxyAuth *url.Userinfo + clientTLS *tls.Config + serverConnectionHeader string + serverUpgradeHeader string + serverStatusCode int + shouldError bool + }{ + "no headers": { + serverFunc: httptest.NewServer, + serverConnectionHeader: "", + serverUpgradeHeader: "", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: true, + }, + "no upgrade header": { + serverFunc: httptest.NewServer, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: true, + }, + "no connection header": { + serverFunc: httptest.NewServer, + serverConnectionHeader: "", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: true, + }, + "no switching protocol status code": { + serverFunc: httptest.NewServer, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusForbidden, + shouldError: true, + }, + "http": { + serverFunc: httptest.NewServer, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: false, + }, + "https (invalid hostname + InsecureSkipVerify)": { + serverFunc: httpsServerInvalidHostname, + clientTLS: &tls.Config{InsecureSkipVerify: true}, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: false, + }, + "https (invalid hostname + hostname verification)": { + serverFunc: httpsServerInvalidHostname, + clientTLS: &tls.Config{InsecureSkipVerify: false}, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: true, + }, + "https (valid hostname + RootCAs)": { + serverFunc: httpsServerValidHostname, + clientTLS: &tls.Config{RootCAs: localhostPool}, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: false, + }, + "proxied http->http": { + serverFunc: httptest.NewServer, + proxyServerFunc: httptest.NewServer, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: false, + }, + "proxied https (invalid hostname + InsecureSkipVerify) -> http": { + serverFunc: httptest.NewServer, + proxyServerFunc: httpsServerInvalidHostname, + clientTLS: &tls.Config{InsecureSkipVerify: true}, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: false, + }, + "proxied https with auth (invalid hostname + InsecureSkipVerify) -> http": { + serverFunc: httptest.NewServer, + proxyServerFunc: httpsServerInvalidHostname, + proxyAuth: url.UserPassword("proxyuser", "proxypasswd"), + clientTLS: &tls.Config{InsecureSkipVerify: true}, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: false, + }, + "proxied https (invalid hostname + hostname verification) -> http": { + serverFunc: httptest.NewServer, + proxyServerFunc: httpsServerInvalidHostname, + clientTLS: &tls.Config{InsecureSkipVerify: false}, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: true, // fails because the client doesn't trust the proxy + }, + "proxied https (valid hostname + RootCAs) -> http": { + serverFunc: httptest.NewServer, + proxyServerFunc: httpsServerValidHostname, + clientTLS: &tls.Config{RootCAs: localhostPool}, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: false, + }, + "proxied https with auth (valid hostname + RootCAs) -> http": { + serverFunc: httptest.NewServer, + proxyServerFunc: httpsServerValidHostname, + proxyAuth: url.UserPassword("proxyuser", "proxypasswd"), + clientTLS: &tls.Config{RootCAs: localhostPool}, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: false, + }, + "proxied https (invalid hostname + InsecureSkipVerify) -> https (invalid hostname)": { + serverFunc: httpsServerInvalidHostname, + proxyServerFunc: httpsServerInvalidHostname, + clientTLS: &tls.Config{InsecureSkipVerify: true}, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: false, // works because the test proxy ignores TLS errors + }, + "proxied https with auth (invalid hostname + InsecureSkipVerify) -> https (invalid hostname)": { + serverFunc: httpsServerInvalidHostname, + proxyServerFunc: httpsServerInvalidHostname, + proxyAuth: url.UserPassword("proxyuser", "proxypasswd"), + clientTLS: &tls.Config{InsecureSkipVerify: true}, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: false, // works because the test proxy ignores TLS errors + }, + "proxied https (invalid hostname + hostname verification) -> https (invalid hostname)": { + serverFunc: httpsServerInvalidHostname, + proxyServerFunc: httpsServerInvalidHostname, + clientTLS: &tls.Config{InsecureSkipVerify: false}, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: true, // fails because the client doesn't trust the proxy + }, + "proxied https (valid hostname + RootCAs) -> https (valid hostname + RootCAs)": { + serverFunc: httpsServerValidHostname, + proxyServerFunc: httpsServerValidHostname, + clientTLS: &tls.Config{RootCAs: localhostPool}, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: false, + }, + "proxied https with auth (valid hostname + RootCAs) -> https (valid hostname + RootCAs)": { + serverFunc: httpsServerValidHostname, + proxyServerFunc: httpsServerValidHostname, + proxyAuth: url.UserPassword("proxyuser", "proxypasswd"), + clientTLS: &tls.Config{RootCAs: localhostPool}, + serverConnectionHeader: "Upgrade", + serverUpgradeHeader: "SPDY/3.1", + serverStatusCode: http.StatusSwitchingProtocols, + shouldError: false, + }, + } + + for k, testCase := range testCases { + server := testCase.serverFunc(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if testCase.shouldError { + if e, a := httpstream.HeaderUpgrade, req.Header.Get(httpstream.HeaderConnection); e != a { + t.Fatalf("%s: Expected connection=upgrade header, got '%s", k, a) + } + + w.Header().Set(httpstream.HeaderConnection, testCase.serverConnectionHeader) + w.Header().Set(httpstream.HeaderUpgrade, testCase.serverUpgradeHeader) + w.WriteHeader(testCase.serverStatusCode) + + return + } + + streamCh := make(chan httpstream.Stream) + + responseUpgrader := NewResponseUpgrader() + spdyConn := responseUpgrader.UpgradeResponse(w, req, func(s httpstream.Stream, replySent <-chan struct{}) error { + streamCh <- s + return nil + }) + if spdyConn == nil { + t.Fatalf("%s: unexpected nil spdyConn", k) + } + defer spdyConn.Close() + + stream := <-streamCh + io.Copy(stream, stream) + })) + defer server.Close() + + serverURL, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("%s: Error creating request: %s", k, err) + } + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("%s: Error creating request: %s", k, err) + } + + spdyTransport := NewSpdyRoundTripper(testCase.clientTLS) + + var proxierCalled bool + var proxyCalledWithHost string + var proxyCalledWithAuth bool + var proxyCalledWithAuthHeader string + if testCase.proxyServerFunc != nil { + proxyHandler := goproxy.NewProxyHttpServer() + + proxyHandler.OnRequest().HandleConnectFunc(func(host string, ctx *goproxy.ProxyCtx) (*goproxy.ConnectAction, string) { + proxyCalledWithHost = host + + proxyAuthHeaderName := "Proxy-Authorization" + _, proxyCalledWithAuth = ctx.Req.Header[proxyAuthHeaderName] + proxyCalledWithAuthHeader = ctx.Req.Header.Get(proxyAuthHeaderName) + return goproxy.OkConnect, host + }) + + proxy := testCase.proxyServerFunc(proxyHandler) + + spdyTransport.proxier = func(proxierReq *http.Request) (*url.URL, error) { + proxierCalled = true + proxyURL, err := url.Parse(proxy.URL) + if err != nil { + return nil, err + } + proxyURL.User = testCase.proxyAuth + return proxyURL, nil + } + defer proxy.Close() + } + + client := &http.Client{Transport: spdyTransport} + + resp, err := client.Do(req) + var conn httpstream.Connection + if err == nil { + conn, err = spdyTransport.NewConnection(resp) + } + haveErr := err != nil + if e, a := testCase.shouldError, haveErr; e != a { + t.Fatalf("%s: shouldError=%t, got %t: %v", k, e, a, err) + } + if testCase.shouldError { + continue + } + defer conn.Close() + + if resp.StatusCode != http.StatusSwitchingProtocols { + t.Fatalf("%s: expected http 101 switching protocols, got %d", k, resp.StatusCode) + } + + stream, err := conn.CreateStream(http.Header{}) + if err != nil { + t.Fatalf("%s: error creating client stream: %s", k, err) + } + + n, err := stream.Write([]byte("hello")) + if err != nil { + t.Fatalf("%s: error writing to stream: %s", k, err) + } + if n != 5 { + t.Fatalf("%s: Expected to write 5 bytes, but actually wrote %d", k, n) + } + + b := make([]byte, 5) + n, err = stream.Read(b) + if err != nil { + t.Fatalf("%s: error reading from stream: %s", k, err) + } + if n != 5 { + t.Fatalf("%s: Expected to read 5 bytes, but actually read %d", k, n) + } + if e, a := "hello", string(b[0:n]); e != a { + t.Fatalf("%s: expected '%s', got '%s'", k, e, a) + } + + if testCase.proxyServerFunc != nil { + if !proxierCalled { + t.Fatalf("%s: Expected to use a proxy but proxier in SpdyRoundTripper wasn't called", k) + } + if proxyCalledWithHost != serverURL.Host { + t.Fatalf("%s: Expected to see a call to the proxy for backend %q, got %q", k, serverURL.Host, proxyCalledWithHost) + } + } + + var expectedProxyAuth string + if testCase.proxyAuth != nil { + encodedCredentials := base64.StdEncoding.EncodeToString([]byte(testCase.proxyAuth.String())) + expectedProxyAuth = "Basic " + encodedCredentials + } + if len(expectedProxyAuth) == 0 && proxyCalledWithAuth { + t.Fatalf("%s: Proxy authorization unexpected, got %q", k, proxyCalledWithAuthHeader) + } + if proxyCalledWithAuthHeader != expectedProxyAuth { + t.Fatalf("%s: Expected to see a call to the proxy with credentials %q, got %q", k, testCase.proxyAuth, proxyCalledWithAuthHeader) + } + } +} + +// exampleCert was generated from crypto/tls/generate_cert.go with the following command: +// go run generate_cert.go --rsa-bits 512 --host example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +var exampleCert = []byte(`-----BEGIN CERTIFICATE----- +MIIBcjCCAR6gAwIBAgIQBOUTYowZaENkZi0faI9DgTALBgkqhkiG9w0BAQswEjEQ +MA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2MDAw +MFowEjEQMA4GA1UEChMHQWNtZSBDbzBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQCZ +xfR3sgeHBraGFfF/24tTn4PRVAHOf2UOOxSQRs+aYjNqimFqf/SRIblQgeXdBJDR +gVK5F1Js2zwlehw0bHxRAgMBAAGjUDBOMA4GA1UdDwEB/wQEAwIApDATBgNVHSUE +DDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MBYGA1UdEQQPMA2CC2V4YW1w +bGUuY29tMAsGCSqGSIb3DQEBCwNBAI/mfBB8dm33IpUl+acSyWfL6gX5Wc0FFyVj +dKeesE1XBuPX1My/rzU6Oy/YwX7LOL4FaeNUS6bbL4axSLPKYSs= +-----END CERTIFICATE-----`) + +var exampleKey = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIBOgIBAAJBAJnF9HeyB4cGtoYV8X/bi1Ofg9FUAc5/ZQ47FJBGz5piM2qKYWp/ +9JEhuVCB5d0EkNGBUrkXUmzbPCV6HDRsfFECAwEAAQJBAJLH9yPuButniACTn5L5 +IJQw1mWQt6zBw9eCo41YWkA0866EgjC53aPZaRjXMp0uNJGdIsys2V5rCOOLWN2C +ODECIQDICHsi8QQQ9wpuJy8X5l8MAfxHL+DIqI84wQTeVM91FQIhAMTME8A18/7h +1Ad6drdnxAkuC0tX6Sx0LDozrmen+HFNAiAlcEDrt0RVkIcpOrg7tuhPLQf0oudl +Zvb3Xlj069awSQIgcT15E/43w2+RASifzVNhQ2MCTr1sSA8lL+xzK+REmnUCIBhQ +j4139pf8Re1J50zBxS/JlQfgDQi9sO9pYeiHIxNs +-----END RSA PRIVATE KEY-----`) + +// localhostCert was generated from crypto/tls/generate_cert.go with the following command: +// go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +var localhostCert = []byte(`-----BEGIN CERTIFICATE----- +MIIBdzCCASOgAwIBAgIBADALBgkqhkiG9w0BAQUwEjEQMA4GA1UEChMHQWNtZSBD +bzAeFw03MDAxMDEwMDAwMDBaFw00OTEyMzEyMzU5NTlaMBIxEDAOBgNVBAoTB0Fj +bWUgQ28wWjALBgkqhkiG9w0BAQEDSwAwSAJBAN55NcYKZeInyTuhcCwFMhDHCmwa +IUSdtXdcbItRB/yfXGBhiex00IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEA +AaNoMGYwDgYDVR0PAQH/BAQDAgCkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1Ud +EwEB/wQFMAMBAf8wLgYDVR0RBCcwJYILZXhhbXBsZS5jb22HBH8AAAGHEAAAAAAA +AAAAAAAAAAAAAAEwCwYJKoZIhvcNAQEFA0EAAoQn/ytgqpiLcZu9XKbCJsJcvkgk +Se6AbGXgSlq+ZCEVo0qIwSgeBqmsJxUu7NCSOwVJLYNEBO2DtIxoYVk+MA== +-----END CERTIFICATE-----`) + +// localhostKey is the private key for localhostCert. +var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIBPAIBAAJBAN55NcYKZeInyTuhcCwFMhDHCmwaIUSdtXdcbItRB/yfXGBhiex0 +0IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEAAQJBAQdUx66rfh8sYsgfdcvV +NoafYpnEcB5s4m/vSVe6SU7dCK6eYec9f9wpT353ljhDUHq3EbmE4foNzJngh35d +AekCIQDhRQG5Li0Wj8TM4obOnnXUXf1jRv0UkzE9AHWLG5q3AwIhAPzSjpYUDjVW +MCUXgckTpKCuGwbJk7424Nb8bLzf3kllAiA5mUBgjfr/WtFSJdWcPQ4Zt9KTMNKD +EUO0ukpTwEIl6wIhAMbGqZK3zAAFdq8DD2jPx+UJXnh0rnOkZBzDtJ6/iN69AiEA +1Aq8MJgTaYsDQWyU/hDq5YkDJc9e9DSCvUIzqxQWMQE= +-----END RSA PRIVATE KEY-----`) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/spdy/upgrade_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/spdy/upgrade_test.go new file mode 100644 index 000000000000..4e111407e876 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/httpstream/spdy/upgrade_test.go @@ -0,0 +1,93 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestUpgradeResponse(t *testing.T) { + testCases := []struct { + connectionHeader string + upgradeHeader string + shouldError bool + }{ + { + connectionHeader: "", + upgradeHeader: "", + shouldError: true, + }, + { + connectionHeader: "Upgrade", + upgradeHeader: "", + shouldError: true, + }, + { + connectionHeader: "", + upgradeHeader: "SPDY/3.1", + shouldError: true, + }, + { + connectionHeader: "Upgrade", + upgradeHeader: "SPDY/3.1", + shouldError: false, + }, + } + + for i, testCase := range testCases { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + upgrader := NewResponseUpgrader() + conn := upgrader.UpgradeResponse(w, req, nil) + haveErr := conn == nil + if e, a := testCase.shouldError, haveErr; e != a { + t.Fatalf("%d: expected shouldErr=%t, got %t", i, testCase.shouldError, haveErr) + } + if haveErr { + return + } + if conn == nil { + t.Fatalf("%d: unexpected nil conn", i) + } + defer conn.Close() + })) + defer server.Close() + + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("%d: error creating request: %s", i, err) + } + + req.Header.Set("Connection", testCase.connectionHeader) + req.Header.Set("Upgrade", testCase.upgradeHeader) + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + t.Fatalf("%d: unexpected non-nil err from client.Do: %s", i, err) + } + + if testCase.shouldError { + continue + } + + if resp.StatusCode != http.StatusSwitchingProtocols { + t.Fatalf("%d: expected status 101 switching protocols, got %d", i, resp.StatusCode) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/integer/integer_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/integer/integer_test.go new file mode 100644 index 000000000000..0f8856738273 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/integer/integer_test.go @@ -0,0 +1,143 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integer + +import "testing" + +func TestIntMax(t *testing.T) { + tests := []struct { + nums []int + expectedMax int + }{ + { + nums: []int{-1, 0}, + expectedMax: 0, + }, + { + nums: []int{-1, -2}, + expectedMax: -1, + }, + { + nums: []int{0, 1}, + expectedMax: 1, + }, + { + nums: []int{1, 2}, + expectedMax: 2, + }, + } + + for i, test := range tests { + t.Logf("executing scenario %d", i) + if max := IntMax(test.nums[0], test.nums[1]); max != test.expectedMax { + t.Errorf("expected %v, got %v", test.expectedMax, max) + } + } +} + +func TestIntMin(t *testing.T) { + tests := []struct { + nums []int + expectedMin int + }{ + { + nums: []int{-1, 0}, + expectedMin: -1, + }, + { + nums: []int{-1, -2}, + expectedMin: -2, + }, + { + nums: []int{0, 1}, + expectedMin: 0, + }, + { + nums: []int{1, 2}, + expectedMin: 1, + }, + } + + for i, test := range tests { + t.Logf("executing scenario %d", i) + if min := IntMin(test.nums[0], test.nums[1]); min != test.expectedMin { + t.Errorf("expected %v, got %v", test.expectedMin, min) + } + } +} + +func TestInt64Max(t *testing.T) { + tests := []struct { + nums []int64 + expectedMax int64 + }{ + { + nums: []int64{-1, 0}, + expectedMax: 0, + }, + { + nums: []int64{-1, -2}, + expectedMax: -1, + }, + { + nums: []int64{0, 1}, + expectedMax: 1, + }, + { + nums: []int64{1, 2}, + expectedMax: 2, + }, + } + + for i, test := range tests { + t.Logf("executing scenario %d", i) + if max := Int64Max(test.nums[0], test.nums[1]); max != test.expectedMax { + t.Errorf("expected %v, got %v", test.expectedMax, max) + } + } +} + +func TestInt64Min(t *testing.T) { + tests := []struct { + nums []int64 + expectedMin int64 + }{ + { + nums: []int64{-1, 0}, + expectedMin: -1, + }, + { + nums: []int64{-1, -2}, + expectedMin: -2, + }, + { + nums: []int64{0, 1}, + expectedMin: 0, + }, + { + nums: []int64{1, 2}, + expectedMin: 1, + }, + } + + for i, test := range tests { + t.Logf("executing scenario %d", i) + if min := Int64Min(test.nums[0], test.nums[1]); min != test.expectedMin { + t.Errorf("expected %v, got %v", test.expectedMin, min) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/interrupt/interrupt.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/interrupt/interrupt.go new file mode 100644 index 000000000000..e9fbfd56f0f3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/interrupt/interrupt.go @@ -0,0 +1,104 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interrupt + +import ( + "os" + "os/signal" + "sync" + "syscall" +) + +// terminationSignals are signals that cause the program to exit in the +// supported platforms (linux, darwin, windows). +var terminationSignals = []os.Signal{syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT} + +// Handler guarantees execution of notifications after a critical section (the function passed +// to a Run method), even in the presence of process termination. It guarantees exactly once +// invocation of the provided notify functions. +type Handler struct { + notify []func() + final func(os.Signal) + once sync.Once +} + +// Chain creates a new handler that invokes all notify functions when the critical section exits +// and then invokes the optional handler's notifications. This allows critical sections to be +// nested without losing exactly once invocations. Notify functions can invoke any cleanup needed +// but should not exit (which is the responsibility of the parent handler). +func Chain(handler *Handler, notify ...func()) *Handler { + if handler == nil { + return New(nil, notify...) + } + return New(handler.Signal, append(notify, handler.Close)...) +} + +// New creates a new handler that guarantees all notify functions are run after the critical +// section exits (or is interrupted by the OS), then invokes the final handler. If no final +// handler is specified, the default final is `os.Exit(1)`. A handler can only be used for +// one critical section. +func New(final func(os.Signal), notify ...func()) *Handler { + return &Handler{ + final: final, + notify: notify, + } +} + +// Close executes all the notification handlers if they have not yet been executed. +func (h *Handler) Close() { + h.once.Do(func() { + for _, fn := range h.notify { + fn() + } + }) +} + +// Signal is called when an os.Signal is received, and guarantees that all notifications +// are executed, then the final handler is executed. This function should only be called once +// per Handler instance. +func (h *Handler) Signal(s os.Signal) { + h.once.Do(func() { + for _, fn := range h.notify { + fn() + } + if h.final == nil { + os.Exit(1) + } + h.final(s) + }) +} + +// Run ensures that any notifications are invoked after the provided fn exits (even if the +// process is interrupted by an OS termination signal). Notifications are only invoked once +// per Handler instance, so calling Run more than once will not behave as the user expects. +func (h *Handler) Run(fn func() error) error { + ch := make(chan os.Signal, 1) + signal.Notify(ch, terminationSignals...) + defer func() { + signal.Stop(ch) + close(ch) + }() + go func() { + sig, ok := <-ch + if !ok { + return + } + h.Signal(sig) + }() + defer h.Close() + return fn() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/deep_copy_generated.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/deep_copy_generated.go new file mode 100644 index 000000000000..29aef022349d --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/deep_copy_generated.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package intstr + +import ( + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func DeepCopy_intstr_IntOrString(in IntOrString, out *IntOrString, c *conversion.Cloner) error { + out.Type = in.Type + out.IntVal = in.IntVal + out.StrVal = in.StrVal + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/generated.pb.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/generated.pb.go new file mode 100644 index 000000000000..ef39cd58655b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/generated.pb.go @@ -0,0 +1,347 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/util/intstr/generated.proto +// DO NOT EDIT! + +/* + Package intstr is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/util/intstr/generated.proto + + It has these top-level messages: + IntOrString +*/ +package intstr + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} + +func init() { + proto.RegisterType((*IntOrString)(nil), "k8s.io.kubernetes.pkg.util.intstr.IntOrString") +} +func (m *IntOrString) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IntOrString) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Type)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.IntVal)) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.StrVal))) + i += copy(data[i:], m.StrVal) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *IntOrString) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Type)) + n += 1 + sovGenerated(uint64(m.IntVal)) + l = len(m.StrVal) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *IntOrString) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntOrString: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Type |= (Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType) + } + m.IntVal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.IntVal |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StrVal = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/generated.proto b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/generated.proto new file mode 100644 index 000000000000..834a20db9001 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/generated.proto @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.util.intstr; + +// Package-wide variables from generator "generated". +option go_package = "intstr"; + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message IntOrString { + optional int64 type = 1; + + optional int32 intVal = 2; + + optional string strVal = 3; +} + diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/intstr.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/intstr.go index aa3cde1fa87d..3724a717fb10 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/intstr.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/intstr.go @@ -35,9 +35,9 @@ import ( // +protobuf=true // +protobuf.options.(gogoproto.goproto_stringer)=false type IntOrString struct { - Type Type - IntVal int32 - StrVal string + Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"` + IntVal int32 `protobuf:"varint,2,opt,name=intVal"` + StrVal string `protobuf:"bytes,3,opt,name=strVal"` } // Type represents the stored type of IntOrString. diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/intstr_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/intstr_test.go new file mode 100644 index 000000000000..d4ccb6d286b7 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/intstr/intstr_test.go @@ -0,0 +1,176 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package intstr + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/ghodss/yaml" +) + +func TestFromInt(t *testing.T) { + i := FromInt(93) + if i.Type != Int || i.IntVal != 93 { + t.Errorf("Expected IntVal=93, got %+v", i) + } +} + +func TestFromString(t *testing.T) { + i := FromString("76") + if i.Type != String || i.StrVal != "76" { + t.Errorf("Expected StrVal=\"76\", got %+v", i) + } +} + +type IntOrStringHolder struct { + IOrS IntOrString `json:"val"` +} + +func TestIntOrStringUnmarshalJSON(t *testing.T) { + cases := []struct { + input string + result IntOrString + }{ + {"{\"val\": 123}", FromInt(123)}, + {"{\"val\": \"123\"}", FromString("123")}, + } + + for _, c := range cases { + var result IntOrStringHolder + if err := json.Unmarshal([]byte(c.input), &result); err != nil { + t.Errorf("Failed to unmarshal input '%v': %v", c.input, err) + } + if result.IOrS != c.result { + t.Errorf("Failed to unmarshal input '%v': expected %+v, got %+v", c.input, c.result, result) + } + } +} + +func TestIntOrStringMarshalJSON(t *testing.T) { + cases := []struct { + input IntOrString + result string + }{ + {FromInt(123), "{\"val\":123}"}, + {FromString("123"), "{\"val\":\"123\"}"}, + } + + for _, c := range cases { + input := IntOrStringHolder{c.input} + result, err := json.Marshal(&input) + if err != nil { + t.Errorf("Failed to marshal input '%v': %v", input, err) + } + if string(result) != c.result { + t.Errorf("Failed to marshal input '%v': expected: %+v, got %q", input, c.result, string(result)) + } + } +} + +func TestIntOrStringMarshalJSONUnmarshalYAML(t *testing.T) { + cases := []struct { + input IntOrString + }{ + {FromInt(123)}, + {FromString("123")}, + } + + for _, c := range cases { + input := IntOrStringHolder{c.input} + jsonMarshalled, err := json.Marshal(&input) + if err != nil { + t.Errorf("1: Failed to marshal input: '%v': %v", input, err) + } + + var result IntOrStringHolder + err = yaml.Unmarshal(jsonMarshalled, &result) + if err != nil { + t.Errorf("2: Failed to unmarshal '%+v': %v", string(jsonMarshalled), err) + } + + if !reflect.DeepEqual(input, result) { + t.Errorf("3: Failed to marshal input '%+v': got %+v", input, result) + } + } +} + +func TestGetValueFromIntOrPercent(t *testing.T) { + tests := []struct { + input IntOrString + total int + roundUp bool + expectErr bool + expectVal int + }{ + { + input: FromInt(123), + expectErr: false, + expectVal: 123, + }, + { + input: FromString("90%"), + total: 100, + roundUp: true, + expectErr: false, + expectVal: 90, + }, + { + input: FromString("90%"), + total: 95, + roundUp: true, + expectErr: false, + expectVal: 86, + }, + { + input: FromString("90%"), + total: 95, + roundUp: false, + expectErr: false, + expectVal: 85, + }, + { + input: FromString("%"), + expectErr: true, + }, + { + input: FromString("90#"), + expectErr: true, + }, + { + input: FromString("#%"), + expectErr: true, + }, + } + + for i, test := range tests { + t.Logf("test case %d", i) + value, err := GetValueFromIntOrPercent(&test.input, test.total, test.roundUp) + if test.expectErr && err == nil { + t.Errorf("expected error, but got none") + continue + } + if !test.expectErr && err != nil { + t.Errorf("unexpected err: %v", err) + continue + } + if test.expectVal != value { + t.Errorf("expected %v, but got %v", test.expectVal, value) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/io/io_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/io/io_test.go new file mode 100644 index 000000000000..1b9d4c67a345 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/io/io_test.go @@ -0,0 +1,56 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io_test + +import ( + "fmt" + "os" + "testing" + + "github.com/pborman/uuid" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/io" + utiltesting "k8s.io/kubernetes/pkg/util/testing" + "k8s.io/kubernetes/pkg/volume" +) + +func TestSavePodToFile(t *testing.T) { + pod := volume.NewPersistentVolumeRecyclerPodTemplate() + + // sets all default values on a pod for equality comparison after decoding from file + codec := api.Codecs.LegacyCodec(registered.GroupOrDie(api.GroupName).GroupVersion) + encoded, err := runtime.Encode(codec, pod) + runtime.DecodeInto(codec, encoded, pod) + + tmpDir := utiltesting.MkTmpdirOrDie("kube-io-test") + defer os.RemoveAll(tmpDir) + path := fmt.Sprintf("/%s/kube-io-test-%s", tmpDir, uuid.New()) + + if err := io.SavePodToFile(pod, path, 777); err != nil { + t.Fatalf("failed to save pod to file: %v", err) + } + + podFromFile, err := io.LoadPodFromFile(path) + if err != nil { + t.Fatalf("failed to load pod from file: %v", err) + } + if !api.Semantic.DeepEqual(pod, podFromFile) { + t.Errorf("\nexpected %#v\ngot %#v\n", pod, podFromFile) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/iptables/iptables_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/iptables/iptables_test.go new file mode 100644 index 000000000000..d5bb346910a3 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/iptables/iptables_test.go @@ -0,0 +1,768 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package iptables + +import ( + "strings" + "testing" + "time" + + "k8s.io/kubernetes/pkg/util/dbus" + "k8s.io/kubernetes/pkg/util/exec" + "k8s.io/kubernetes/pkg/util/sets" +) + +func getIptablesCommand(protocol Protocol) string { + if protocol == ProtocolIpv4 { + return cmdIptables + } + if protocol == ProtocolIpv6 { + return cmdIp6tables + } + panic("Unknown protocol") +} + +func testEnsureChain(t *testing.T, protocol Protocol) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.9.22"), nil }, + // Success. + func() ([]byte, error) { return []byte{}, nil }, + // Exists. + func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} }, + // Failure. + func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 2} }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), protocol) + defer runner.Destroy() + // Success. + exists, err := runner.EnsureChain(TableNAT, Chain("FOOBAR")) + if err != nil { + t.Errorf("expected success, got %v", err) + } + if exists { + t.Errorf("expected exists = false") + } + if fcmd.CombinedOutputCalls != 2 { + t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } + cmd := getIptablesCommand(protocol) + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll(cmd, "-t", "nat", "-N", "FOOBAR") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) + } + // Exists. + exists, err = runner.EnsureChain(TableNAT, Chain("FOOBAR")) + if err != nil { + t.Errorf("expected success, got %v", err) + } + if !exists { + t.Errorf("expected exists = true") + } + // Failure. + _, err = runner.EnsureChain(TableNAT, Chain("FOOBAR")) + if err == nil { + t.Errorf("expected failure") + } +} + +func TestEnsureChainIpv4(t *testing.T) { + testEnsureChain(t, ProtocolIpv4) +} + +func TestEnsureChainIpv6(t *testing.T) { + testEnsureChain(t, ProtocolIpv6) +} + +func TestFlushChain(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.9.22"), nil }, + // Success. + func() ([]byte, error) { return []byte{}, nil }, + // Failure. + func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4) + defer runner.Destroy() + // Success. + err := runner.FlushChain(TableNAT, Chain("FOOBAR")) + if err != nil { + t.Errorf("expected success, got %v", err) + } + if fcmd.CombinedOutputCalls != 2 { + t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-F", "FOOBAR") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) + } + // Failure. + err = runner.FlushChain(TableNAT, Chain("FOOBAR")) + if err == nil { + t.Errorf("expected failure") + } +} + +func TestDeleteChain(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.9.22"), nil }, + // Success. + func() ([]byte, error) { return []byte{}, nil }, + // Failure. + func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4) + defer runner.Destroy() + // Success. + err := runner.DeleteChain(TableNAT, Chain("FOOBAR")) + if err != nil { + t.Errorf("expected success, got %v", err) + } + if fcmd.CombinedOutputCalls != 2 { + t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-X", "FOOBAR") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) + } + // Failure. + err = runner.DeleteChain(TableNAT, Chain("FOOBAR")) + if err == nil { + t.Errorf("expected failure") + } +} + +func TestEnsureRuleAlreadyExists(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.9.22"), nil }, + // Success. + func() ([]byte, error) { return []byte{}, nil }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + // iptables version check + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + // The second Command() call is checking the rule. Success of that exec means "done". + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4) + defer runner.Destroy() + exists, err := runner.EnsureRule(Append, TableNAT, ChainOutput, "abc", "123") + if err != nil { + t.Errorf("expected success, got %v", err) + } + if !exists { + t.Errorf("expected exists = true") + } + if fcmd.CombinedOutputCalls != 2 { + t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-C", "OUTPUT", "abc", "123") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) + } +} + +func TestEnsureRuleNew(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.9.22"), nil }, + // Status 1 on the first call. + func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} }, + // Success on the second call. + func() ([]byte, error) { return []byte{}, nil }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + // iptables version check + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + // The second Command() call is checking the rule. Failure of that means create it. + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4) + defer runner.Destroy() + exists, err := runner.EnsureRule(Append, TableNAT, ChainOutput, "abc", "123") + if err != nil { + t.Errorf("expected success, got %v", err) + } + if exists { + t.Errorf("expected exists = false") + } + if fcmd.CombinedOutputCalls != 3 { + t.Errorf("expected 3 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } + if !sets.NewString(fcmd.CombinedOutputLog[2]...).HasAll("iptables", "-t", "nat", "-A", "OUTPUT", "abc", "123") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[2]) + } +} + +func TestEnsureRuleErrorChecking(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.9.22"), nil }, + // Status 2 on the first call. + func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 2} }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + // iptables version check + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + // The second Command() call is checking the rule. Failure of that means create it. + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4) + defer runner.Destroy() + _, err := runner.EnsureRule(Append, TableNAT, ChainOutput, "abc", "123") + if err == nil { + t.Errorf("expected failure") + } + if fcmd.CombinedOutputCalls != 2 { + t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } +} + +func TestEnsureRuleErrorCreating(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.9.22"), nil }, + // Status 1 on the first call. + func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} }, + // Status 1 on the second call. + func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + // iptables version check + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + // The second Command() call is checking the rule. Failure of that means create it. + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4) + defer runner.Destroy() + _, err := runner.EnsureRule(Append, TableNAT, ChainOutput, "abc", "123") + if err == nil { + t.Errorf("expected failure") + } + if fcmd.CombinedOutputCalls != 3 { + t.Errorf("expected 3 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } +} + +func TestDeleteRuleAlreadyExists(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.9.22"), nil }, + // Status 1 on the first call. + func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + // iptables version check + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + // The second Command() call is checking the rule. Failure of that exec means "does not exist". + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4) + defer runner.Destroy() + err := runner.DeleteRule(TableNAT, ChainOutput, "abc", "123") + if err != nil { + t.Errorf("expected success, got %v", err) + } + if fcmd.CombinedOutputCalls != 2 { + t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-C", "OUTPUT", "abc", "123") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) + } +} + +func TestDeleteRuleNew(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.9.22"), nil }, + // Success on the first call. + func() ([]byte, error) { return []byte{}, nil }, + // Success on the second call. + func() ([]byte, error) { return []byte{}, nil }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + // iptables version check + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + // The second Command() call is checking the rule. Success of that means delete it. + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4) + defer runner.Destroy() + err := runner.DeleteRule(TableNAT, ChainOutput, "abc", "123") + if err != nil { + t.Errorf("expected success, got %v", err) + } + if fcmd.CombinedOutputCalls != 3 { + t.Errorf("expected 3 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } + if !sets.NewString(fcmd.CombinedOutputLog[2]...).HasAll("iptables", "-t", "nat", "-D", "OUTPUT", "abc", "123") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[2]) + } +} + +func TestDeleteRuleErrorChecking(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.9.22"), nil }, + // Status 2 on the first call. + func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 2} }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + // iptables version check + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + // The second Command() call is checking the rule. Failure of that means create it. + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4) + defer runner.Destroy() + err := runner.DeleteRule(TableNAT, ChainOutput, "abc", "123") + if err == nil { + t.Errorf("expected failure") + } + if fcmd.CombinedOutputCalls != 2 { + t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } +} + +func TestDeleteRuleErrorCreating(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.9.22"), nil }, + // Success on the first call. + func() ([]byte, error) { return []byte{}, nil }, + // Status 1 on the second call. + func() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + // iptables version check + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + // The second Command() call is checking the rule. Success of that means delete it. + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4) + defer runner.Destroy() + err := runner.DeleteRule(TableNAT, ChainOutput, "abc", "123") + if err == nil { + t.Errorf("expected failure") + } + if fcmd.CombinedOutputCalls != 3 { + t.Errorf("expected 3 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } +} + +func TestGetIptablesHasCheckCommand(t *testing.T) { + testCases := []struct { + Version string + Err bool + Expected bool + }{ + {"iptables v1.4.7", false, false}, + {"iptables v1.4.11", false, true}, + {"iptables v1.4.19.1", false, true}, + {"iptables v2.0.0", false, true}, + {"total junk", true, false}, + } + + for _, testCase := range testCases { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte(testCase.Version), nil }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + version, err := getIptablesVersionString(&fexec) + if (err != nil) != testCase.Err { + t.Errorf("Expected error: %v, Got error: %v", testCase.Err, err) + } + if err == nil { + check := getIptablesHasCheckCommand(version) + if testCase.Expected != check { + t.Errorf("Expected result: %v, Got result: %v", testCase.Expected, check) + } + } + } +} + +func TestCheckRuleWithoutCheckPresent(t *testing.T) { + iptables_save_output := `# Generated by iptables-save v1.4.7 on Wed Oct 29 14:56:01 2014 +*nat +:PREROUTING ACCEPT [2136997:197881818] +:POSTROUTING ACCEPT [4284525:258542680] +:OUTPUT ACCEPT [5901660:357267963] +-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER +COMMIT +# Completed on Wed Oct 29 14:56:01 2014` + + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // Success. + func() ([]byte, error) { return []byte(iptables_save_output), nil }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + // The first Command() call is checking the rule. Success of that exec means "done". + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := &runner{exec: &fexec} + exists, err := runner.checkRuleWithoutCheck(TableNAT, ChainPrerouting, "-m", "addrtype", "-j", "DOCKER", "--dst-type", "LOCAL") + if err != nil { + t.Errorf("expected success, got %v", err) + } + if !exists { + t.Errorf("expected exists = true") + } + if fcmd.CombinedOutputCalls != 1 { + t.Errorf("expected 1 CombinedOutput() call, got %d", fcmd.CombinedOutputCalls) + } + if !sets.NewString(fcmd.CombinedOutputLog[0]...).HasAll("iptables-save", "-t", "nat") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[0]) + } +} + +func TestCheckRuleWithoutCheckAbsent(t *testing.T) { + iptables_save_output := `# Generated by iptables-save v1.4.7 on Wed Oct 29 14:56:01 2014 +*nat +:PREROUTING ACCEPT [2136997:197881818] +:POSTROUTING ACCEPT [4284525:258542680] +:OUTPUT ACCEPT [5901660:357267963] +-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER +COMMIT +# Completed on Wed Oct 29 14:56:01 2014` + + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // Success. + func() ([]byte, error) { return []byte(iptables_save_output), nil }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + // The first Command() call is checking the rule. Success of that exec means "done". + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := &runner{exec: &fexec} + exists, err := runner.checkRuleWithoutCheck(TableNAT, ChainPrerouting, "-m", "addrtype", "-j", "DOCKER") + if err != nil { + t.Errorf("expected success, got %v", err) + } + if exists { + t.Errorf("expected exists = false") + } + if fcmd.CombinedOutputCalls != 1 { + t.Errorf("expected 1 CombinedOutput() call, got %d", fcmd.CombinedOutputCalls) + } + if !sets.NewString(fcmd.CombinedOutputLog[0]...).HasAll("iptables-save", "-t", "nat") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[0]) + } +} + +func TestIptablesWaitFlag(t *testing.T) { + testCases := []struct { + Version string + Result string + }{ + {"0.55.55", ""}, + {"1.0.55", ""}, + {"1.4.19", ""}, + {"1.4.20", "-w"}, + {"1.4.21", "-w"}, + {"1.4.22", "-w2"}, + {"1.5.0", "-w2"}, + {"2.0.0", "-w2"}, + } + + for _, testCase := range testCases { + result := getIptablesWaitFlag(testCase.Version) + if strings.Join(result, "") != testCase.Result { + t.Errorf("For %s expected %v got %v", testCase.Version, testCase.Result, result) + } + } +} + +func TestWaitFlagUnavailable(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.4.19"), nil }, + // Success. + func() ([]byte, error) { return []byte{}, nil }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4) + defer runner.Destroy() + err := runner.DeleteChain(TableNAT, Chain("FOOBAR")) + if err != nil { + t.Errorf("expected success, got %v", err) + } + if fcmd.CombinedOutputCalls != 2 { + t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } + if sets.NewString(fcmd.CombinedOutputLog[1]...).HasAny("-w", "-w2") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) + } +} + +func TestWaitFlagOld(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.4.20"), nil }, + // Success. + func() ([]byte, error) { return []byte{}, nil }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4) + defer runner.Destroy() + err := runner.DeleteChain(TableNAT, Chain("FOOBAR")) + if err != nil { + t.Errorf("expected success, got %v", err) + } + if fcmd.CombinedOutputCalls != 2 { + t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-w") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) + } + if sets.NewString(fcmd.CombinedOutputLog[1]...).HasAny("-w2") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) + } +} + +func TestWaitFlagNew(t *testing.T) { + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.4.22"), nil }, + // Success. + func() ([]byte, error) { return []byte{}, nil }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4) + defer runner.Destroy() + err := runner.DeleteChain(TableNAT, Chain("FOOBAR")) + if err != nil { + t.Errorf("expected success, got %v", err) + } + if fcmd.CombinedOutputCalls != 2 { + t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) + } + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-w2") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) + } + if sets.NewString(fcmd.CombinedOutputLog[1]...).HasAny("-w") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) + } +} + +func TestReload(t *testing.T) { + dbusConn := dbus.NewFakeConnection() + dbusConn.SetBusObject(func(method string, args ...interface{}) ([]interface{}, error) { return nil, nil }) + dbusConn.AddObject(firewalldName, firewalldPath, func(method string, args ...interface{}) ([]interface{}, error) { return nil, nil }) + fdbus := dbus.NewFake(dbusConn, nil) + + reloaded := make(chan bool, 2) + + fcmd := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // iptables version check + func() ([]byte, error) { return []byte("iptables v1.4.22"), nil }, + + // first reload + // EnsureChain + func() ([]byte, error) { return []byte{}, nil }, + // EnsureRule abc check + func() ([]byte, error) { return []byte{}, &exec.FakeExitError{Status: 1} }, + // EnsureRule abc + func() ([]byte, error) { return []byte{}, nil }, + + // second reload + // EnsureChain + func() ([]byte, error) { return []byte{}, nil }, + // EnsureRule abc check + func() ([]byte, error) { return []byte{}, &exec.FakeExitError{Status: 1} }, + // EnsureRule abc + func() ([]byte, error) { return []byte{}, nil }, + }, + } + fexec := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + + runner := New(&fexec, fdbus, ProtocolIpv4) + defer runner.Destroy() + + runner.AddReloadFunc(func() { + exists, err := runner.EnsureChain(TableNAT, Chain("FOOBAR")) + if err != nil { + t.Errorf("expected success, got %v", err) + } + if exists { + t.Errorf("expected exists = false") + } + reloaded <- true + }) + + runner.AddReloadFunc(func() { + exists, err := runner.EnsureRule(Append, TableNAT, ChainOutput, "abc", "123") + if err != nil { + t.Errorf("expected success, got %v", err) + } + if exists { + t.Errorf("expected exists = false") + } + reloaded <- true + }) + + dbusConn.EmitSignal("org.freedesktop.DBus", "/org/freedesktop/DBus", "org.freedesktop.DBus", "NameOwnerChanged", firewalldName, "", ":1.1") + <-reloaded + <-reloaded + + if fcmd.CombinedOutputCalls != 4 { + t.Errorf("expected 4 CombinedOutput() calls total, got %d", fcmd.CombinedOutputCalls) + } + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-N", "FOOBAR") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) + } + if !sets.NewString(fcmd.CombinedOutputLog[2]...).HasAll("iptables", "-t", "nat", "-C", "OUTPUT", "abc", "123") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[2]) + } + if !sets.NewString(fcmd.CombinedOutputLog[3]...).HasAll("iptables", "-t", "nat", "-A", "OUTPUT", "abc", "123") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[3]) + } + + go func() { time.Sleep(time.Second / 100); reloaded <- true }() + dbusConn.EmitSignal(firewalldName, firewalldPath, firewalldInterface, "DefaultZoneChanged", "public") + dbusConn.EmitSignal("org.freedesktop.DBus", "/org/freedesktop/DBus", "org.freedesktop.DBus", "NameOwnerChanged", "io.k8s.Something", "", ":1.1") + <-reloaded + + if fcmd.CombinedOutputCalls != 4 { + t.Errorf("Incorrect signal caused a reload") + } + + dbusConn.EmitSignal(firewalldName, firewalldPath, firewalldInterface, "Reloaded") + <-reloaded + <-reloaded + + if fcmd.CombinedOutputCalls != 7 { + t.Errorf("expected 7 CombinedOutput() calls total, got %d", fcmd.CombinedOutputCalls) + } + if !sets.NewString(fcmd.CombinedOutputLog[4]...).HasAll("iptables", "-t", "nat", "-N", "FOOBAR") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[4]) + } + if !sets.NewString(fcmd.CombinedOutputLog[5]...).HasAll("iptables", "-t", "nat", "-C", "OUTPUT", "abc", "123") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[5]) + } + if !sets.NewString(fcmd.CombinedOutputLog[6]...).HasAll("iptables", "-t", "nat", "-A", "OUTPUT", "abc", "123") { + t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[6]) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/iptables/save_restore.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/iptables/save_restore.go new file mode 100644 index 000000000000..d86eb755ce71 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/iptables/save_restore.go @@ -0,0 +1,108 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package iptables + +import ( + "fmt" + "strings" +) + +// MakeChainLine return an iptables-save/restore formatted chain line given a Chain +func MakeChainLine(chain Chain) string { + return fmt.Sprintf(":%s - [0:0]", chain) +} + +// GetChainLines parses a table's iptables-save data to find chains in the table. +// It returns a map of iptables.Chain to string where the string is the chain line from the save (with counters etc). +func GetChainLines(table Table, save []byte) map[Chain]string { + chainsMap := make(map[Chain]string) + tablePrefix := "*" + string(table) + readIndex := 0 + // find beginning of table + for readIndex < len(save) { + line, n := ReadLine(readIndex, save) + readIndex = n + if strings.HasPrefix(line, tablePrefix) { + break + } + } + // parse table lines + for readIndex < len(save) { + line, n := ReadLine(readIndex, save) + readIndex = n + if len(line) == 0 { + continue + } + if strings.HasPrefix(line, "COMMIT") || strings.HasPrefix(line, "*") { + break + } else if strings.HasPrefix(line, "#") { + continue + } else if strings.HasPrefix(line, ":") && len(line) > 1 { + chain := Chain(strings.SplitN(line[1:], " ", 2)[0]) + chainsMap[chain] = line + } + } + return chainsMap +} + +func ReadLine(readIndex int, byteArray []byte) (string, int) { + currentReadIndex := readIndex + + // consume left spaces + for currentReadIndex < len(byteArray) { + if byteArray[currentReadIndex] == ' ' { + currentReadIndex++ + } else { + break + } + } + + // leftTrimIndex stores the left index of the line after the line is left-trimmed + leftTrimIndex := currentReadIndex + + // rightTrimIndex stores the right index of the line after the line is right-trimmed + // it is set to -1 since the correct value has not yet been determined. + rightTrimIndex := -1 + + for ; currentReadIndex < len(byteArray); currentReadIndex++ { + if byteArray[currentReadIndex] == ' ' { + // set rightTrimIndex + if rightTrimIndex == -1 { + rightTrimIndex = currentReadIndex + } + } else if (byteArray[currentReadIndex] == '\n') || (currentReadIndex == (len(byteArray) - 1)) { + // end of line or byte buffer is reached + if currentReadIndex <= leftTrimIndex { + return "", currentReadIndex + 1 + } + // set the rightTrimIndex + if rightTrimIndex == -1 { + rightTrimIndex = currentReadIndex + if currentReadIndex == (len(byteArray)-1) && (byteArray[currentReadIndex] != '\n') { + // ensure that the last character is part of the returned string, + // unless the last character is '\n' + rightTrimIndex = currentReadIndex + 1 + } + } + return string(byteArray[leftTrimIndex:rightTrimIndex]), currentReadIndex + 1 + } else { + // unset rightTrimIndex + rightTrimIndex = -1 + } + } + return "", currentReadIndex +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/json/json.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/json/json.go new file mode 100644 index 000000000000..1ff8cc0d4413 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/json/json.go @@ -0,0 +1,107 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "bytes" + "encoding/json" + "io" +) + +// NewEncoder delegates to json.NewEncoder +// It is only here so this package can be a drop-in for common encoding/json uses +func NewEncoder(w io.Writer) *json.Encoder { + return json.NewEncoder(w) +} + +// Marshal delegates to json.Marshal +// It is only here so this package can be a drop-in for common encoding/json uses +func Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals the given data +// If v is a *map[string]interface{}, numbers are converted to int64 or float64 +func Unmarshal(data []byte, v interface{}) error { + switch v := v.(type) { + case *map[string]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertMapNumbers(*v) + + default: + return json.Unmarshal(data, v) + } +} + +// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertMapNumbers(m map[string]interface{}) error { + var err error + for k, v := range m { + switch v := v.(type) { + case json.Number: + m[k], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v) + case []interface{}: + err = convertSliceNumbers(v) + } + if err != nil { + return err + } + } + return nil +} + +// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertSliceNumbers(s []interface{}) error { + var err error + for i, v := range s { + switch v := v.(type) { + case json.Number: + s[i], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v) + case []interface{}: + err = convertSliceNumbers(v) + } + if err != nil { + return err + } + } + return nil +} + +// convertNumber converts a json.Number to an int64 or float64, or returns an error +func convertNumber(n json.Number) (interface{}, error) { + // Attempt to convert to an int64 first + if i, err := n.Int64(); err == nil { + return i, nil + } + // Return a float64 (default json.Decode() behavior) + // An overflow will return an error + return n.Float64() +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/json/json_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/json/json_test.go new file mode 100644 index 000000000000..e41f6ce0486a --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/json/json_test.go @@ -0,0 +1,317 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "fmt" + "math" + "reflect" + "strconv" + "strings" + "testing" +) + +func TestEvaluateTypes(t *testing.T) { + testCases := []struct { + In string + Data interface{} + Out string + Err bool + }{ + // Invalid syntaxes + { + In: `x`, + Err: true, + }, + { + In: ``, + Err: true, + }, + + // Null + { + In: `null`, + Data: nil, + Out: `null`, + }, + // Booleans + { + In: `true`, + Data: true, + Out: `true`, + }, + { + In: `false`, + Data: false, + Out: `false`, + }, + + // Integers + { + In: `0`, + Data: int64(0), + Out: `0`, + }, + { + In: `-0`, + Data: int64(-0), + Out: `0`, + }, + { + In: `1`, + Data: int64(1), + Out: `1`, + }, + { + In: `2147483647`, + Data: int64(math.MaxInt32), + Out: `2147483647`, + }, + { + In: `-2147483648`, + Data: int64(math.MinInt32), + Out: `-2147483648`, + }, + { + In: `9223372036854775807`, + Data: int64(math.MaxInt64), + Out: `9223372036854775807`, + }, + { + In: `-9223372036854775808`, + Data: int64(math.MinInt64), + Out: `-9223372036854775808`, + }, + + // Int overflow + { + In: `9223372036854775808`, // MaxInt64 + 1 + Data: float64(9223372036854775808), + Out: strconv.FormatFloat(9223372036854775808, 'g', -1, 64), + }, + { + In: `-9223372036854775809`, // MinInt64 - 1 + Data: float64(math.MinInt64), + Out: strconv.FormatFloat(-9223372036854775809, 'g', -1, 64), + }, + + // Floats + { + In: `0.0`, + Data: float64(0), + Out: `0`, + }, + { + In: `-0.0`, + Data: float64(-0.0), + Out: `-0`, + }, + { + In: `0.5`, + Data: float64(0.5), + Out: `0.5`, + }, + { + In: `1e3`, + Data: float64(1e3), + Out: `1000`, + }, + { + In: `1.5`, + Data: float64(1.5), + Out: `1.5`, + }, + { + In: `-0.3`, + Data: float64(-.3), + Out: `-0.3`, + }, + { + // Largest representable float32 + In: `3.40282346638528859811704183484516925440e+38`, + Data: float64(math.MaxFloat32), + Out: strconv.FormatFloat(math.MaxFloat32, 'g', -1, 64), + }, + { + // Smallest float32 without losing precision + In: `1.175494351e-38`, + Data: float64(1.175494351e-38), + Out: `1.175494351e-38`, + }, + { + // float32 closest to zero + In: `1.401298464324817070923729583289916131280e-45`, + Data: float64(math.SmallestNonzeroFloat32), + Out: strconv.FormatFloat(math.SmallestNonzeroFloat32, 'g', -1, 64), + }, + { + // Largest representable float64 + In: `1.797693134862315708145274237317043567981e+308`, + Data: float64(math.MaxFloat64), + Out: strconv.FormatFloat(math.MaxFloat64, 'g', -1, 64), + }, + { + // Closest to zero without losing precision + In: `2.2250738585072014e-308`, + Data: float64(2.2250738585072014e-308), + Out: `2.2250738585072014e-308`, + }, + + { + // float64 closest to zero + In: `4.940656458412465441765687928682213723651e-324`, + Data: float64(math.SmallestNonzeroFloat64), + Out: strconv.FormatFloat(math.SmallestNonzeroFloat64, 'g', -1, 64), + }, + + { + // math.MaxFloat64 + 2 overflow + In: `1.7976931348623159e+308`, + Err: true, + }, + + // Strings + { + In: `""`, + Data: string(""), + Out: `""`, + }, + { + In: `"0"`, + Data: string("0"), + Out: `"0"`, + }, + { + In: `"A"`, + Data: string("A"), + Out: `"A"`, + }, + { + In: `"Iñtërnâtiônàlizætiøn"`, + Data: string("Iñtërnâtiônàlizætiøn"), + Out: `"Iñtërnâtiônàlizætiøn"`, + }, + + // Arrays + { + In: `[]`, + Data: []interface{}{}, + Out: `[]`, + }, + { + In: `[` + strings.Join([]string{ + `null`, + `true`, + `false`, + `0`, + `9223372036854775807`, + `0.0`, + `0.5`, + `1.0`, + `1.797693134862315708145274237317043567981e+308`, + `"0"`, + `"A"`, + `"Iñtërnâtiônàlizætiøn"`, + `[null,true,1,1.0,1.5]`, + `{"boolkey":true,"floatkey":1.0,"intkey":1,"nullkey":null}`, + }, ",") + `]`, + Data: []interface{}{ + nil, + true, + false, + int64(0), + int64(math.MaxInt64), + float64(0.0), + float64(0.5), + float64(1.0), + float64(math.MaxFloat64), + string("0"), + string("A"), + string("Iñtërnâtiônàlizætiøn"), + []interface{}{nil, true, int64(1), float64(1.0), float64(1.5)}, + map[string]interface{}{"nullkey": nil, "boolkey": true, "intkey": int64(1), "floatkey": float64(1.0)}, + }, + Out: `[` + strings.Join([]string{ + `null`, + `true`, + `false`, + `0`, + `9223372036854775807`, + `0`, + `0.5`, + `1`, + strconv.FormatFloat(math.MaxFloat64, 'g', -1, 64), + `"0"`, + `"A"`, + `"Iñtërnâtiônàlizætiøn"`, + `[null,true,1,1,1.5]`, + `{"boolkey":true,"floatkey":1,"intkey":1,"nullkey":null}`, // gets alphabetized by Marshal + }, ",") + `]`, + }, + + // Maps + { + In: `{}`, + Data: map[string]interface{}{}, + Out: `{}`, + }, + { + In: `{"boolkey":true,"floatkey":1.0,"intkey":1,"nullkey":null}`, + Data: map[string]interface{}{"nullkey": nil, "boolkey": true, "intkey": int64(1), "floatkey": float64(1.0)}, + Out: `{"boolkey":true,"floatkey":1,"intkey":1,"nullkey":null}`, // gets alphabetized by Marshal + }, + } + + for _, tc := range testCases { + inputJSON := fmt.Sprintf(`{"data":%s}`, tc.In) + expectedJSON := fmt.Sprintf(`{"data":%s}`, tc.Out) + m := map[string]interface{}{} + err := Unmarshal([]byte(inputJSON), &m) + if tc.Err && err != nil { + // Expected error + continue + } + if err != nil { + t.Errorf("%s: error decoding: %v", tc.In, err) + continue + } + if tc.Err { + t.Errorf("%s: expected error, got none", tc.In) + continue + } + data, ok := m["data"] + if !ok { + t.Errorf("%s: decoded object missing data key: %#v", tc.In, m) + continue + } + if !reflect.DeepEqual(tc.Data, data) { + t.Errorf("%s: expected\n\t%#v (%v), got\n\t%#v (%v)", tc.In, tc.Data, reflect.TypeOf(tc.Data), data, reflect.TypeOf(data)) + continue + } + + outputJSON, err := Marshal(m) + if err != nil { + t.Errorf("%s: error encoding: %v", tc.In, err) + continue + } + + if expectedJSON != string(outputJSON) { + t.Errorf("%s: expected\n\t%s, got\n\t%s", tc.In, expectedJSON, string(outputJSON)) + continue + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go index 35fcdd92d7af..7a402af49b11 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go @@ -325,7 +325,13 @@ func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect. return nil, err } } else if value.Kind() == reflect.Map { - result = value.MapIndex(reflect.ValueOf(node.Value)) + mapKeyType := value.Type().Key() + nodeValue := reflect.ValueOf(node.Value) + // node value type must be convertible to map key type + if !nodeValue.Type().ConvertibleTo(mapKeyType) { + return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType) + } + result = value.MapIndex(nodeValue.Convert(mapKeyType)) } if result.IsValid() { results = append(results, result) diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath_test.go new file mode 100644 index 000000000000..e01a9a4588ff --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath_test.go @@ -0,0 +1,264 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "sort" + "strings" + "testing" +) + +type jsonpathTest struct { + name string + template string + input interface{} + expect string +} + +func testJSONPath(tests []jsonpathTest, t *testing.T) { + for _, test := range tests { + j := New(test.name) + err := j.Parse(test.template) + if err != nil { + t.Errorf("in %s, parse %s error %v", test.name, test.template, err) + } + buf := new(bytes.Buffer) + err = j.Execute(buf, test.input) + if err != nil { + t.Errorf("in %s, execute error %v", test.name, err) + } + out := buf.String() + if out != test.expect { + t.Errorf(`in %s, expect to get "%s", got "%s"`, test.name, test.expect, out) + } + } +} + +// testJSONPathSortOutput test cases related to map, the results may print in random order +func testJSONPathSortOutput(tests []jsonpathTest, t *testing.T) { + for _, test := range tests { + j := New(test.name) + err := j.Parse(test.template) + if err != nil { + t.Errorf("in %s, parse %s error %v", test.name, test.template, err) + } + buf := new(bytes.Buffer) + err = j.Execute(buf, test.input) + if err != nil { + t.Errorf("in %s, execute error %v", test.name, err) + } + out := buf.String() + //since map is visited in random order, we need to sort the results. + sortedOut := strings.Fields(out) + sort.Strings(sortedOut) + sortedExpect := strings.Fields(test.expect) + sort.Strings(sortedExpect) + if !reflect.DeepEqual(sortedOut, sortedExpect) { + t.Errorf(`in %s, expect to get "%s", got "%s"`, test.name, test.expect, out) + } + } +} + +func testFailJSONPath(tests []jsonpathTest, t *testing.T) { + for _, test := range tests { + j := New(test.name) + err := j.Parse(test.template) + if err != nil { + t.Errorf("in %s, parse %s error %v", test.name, test.template, err) + } + buf := new(bytes.Buffer) + err = j.Execute(buf, test.input) + var out string + if err == nil { + out = "nil" + } else { + out = err.Error() + } + if out != test.expect { + t.Errorf("in %s, expect to get error %q, got %q", test.name, test.expect, out) + } + } +} + +type book struct { + Category string + Author string + Title string + Price float32 +} + +func (b book) String() string { + return fmt.Sprintf("{Category: %s, Author: %s, Title: %s, Price: %v}", b.Category, b.Author, b.Title, b.Price) +} + +type bicycle struct { + Color string + Price float32 +} + +type empName string +type job string +type store struct { + Book []book + Bicycle bicycle + Name string + Labels map[string]int + Employees map[empName]job +} + +func TestStructInput(t *testing.T) { + + storeData := store{ + Name: "jsonpath", + Book: []book{ + {"reference", "Nigel Rees", "Sayings of the Centurey", 8.95}, + {"fiction", "Evelyn Waugh", "Sword of Honour", 12.99}, + {"fiction", "Herman Melville", "Moby Dick", 8.99}, + }, + Bicycle: bicycle{"red", 19.95}, + Labels: map[string]int{ + "engieer": 10, + "web/html": 15, + "k8s-app": 20, + }, + Employees: map[empName]job{ + "jason": "manager", + "dan": "clerk", + }, + } + + storeTests := []jsonpathTest{ + {"plain", "hello jsonpath", nil, "hello jsonpath"}, + {"recursive", "{..}", []int{1, 2, 3}, "[1 2 3]"}, + {"filter", "{[?(@<5)]}", []int{2, 6, 3, 7}, "2 3"}, + {"quote", `{"{"}`, nil, "{"}, + {"union", "{[1,3,4]}", []int{0, 1, 2, 3, 4}, "1 3 4"}, + {"array", "{[0:2]}", []string{"Monday", "Tudesday"}, "Monday Tudesday"}, + {"variable", "hello {.Name}", storeData, "hello jsonpath"}, + {"dict/", "{$.Labels.web/html}", storeData, "15"}, + {"dict/", "{$.Employees.jason}", storeData, "manager"}, + {"dict/", "{$.Employees.dan}", storeData, "clerk"}, + {"dict-", "{.Labels.k8s-app}", storeData, "20"}, + {"nest", "{.Bicycle.Color}", storeData, "red"}, + {"allarray", "{.Book[*].Author}", storeData, "Nigel Rees Evelyn Waugh Herman Melville"}, + {"allfileds", "{.Bicycle.*}", storeData, "red 19.95"}, + {"recurfileds", "{..Price}", storeData, "8.95 12.99 8.99 19.95"}, + {"lastarray", "{.Book[-1:]}", storeData, + "{Category: fiction, Author: Herman Melville, Title: Moby Dick, Price: 8.99}"}, + {"recurarray", "{..Book[2]}", storeData, + "{Category: fiction, Author: Herman Melville, Title: Moby Dick, Price: 8.99}"}, + } + testJSONPath(storeTests, t) + + failStoreTests := []jsonpathTest{ + {"invalid identfier", "{hello}", storeData, "unrecognized identifier hello"}, + {"nonexistent field", "{.hello}", storeData, "hello is not found"}, + {"invalid array", "{.Labels[0]}", storeData, "map[string]int is not array or slice"}, + {"invalid filter operator", "{.Book[?(@.Price<>10)]}", storeData, "unrecognized filter operator <>"}, + {"redundent end", "{range .Labels.*}{@}{end}{end}", storeData, "not in range, nothing to end"}, + } + testFailJSONPath(failStoreTests, t) +} + +func TestJSONInput(t *testing.T) { + var pointsJSON = []byte(`[ + {"id": "i1", "x":4, "y":-5}, + {"id": "i2", "x":-2, "y":-5, "z":1}, + {"id": "i3", "x": 8, "y": 3 }, + {"id": "i4", "x": -6, "y": -1 }, + {"id": "i5", "x": 0, "y": 2, "z": 1 }, + {"id": "i6", "x": 1, "y": 4 } + ]`) + var pointsData interface{} + err := json.Unmarshal(pointsJSON, &pointsData) + if err != nil { + t.Error(err) + } + pointsTests := []jsonpathTest{ + {"exists filter", "{[?(@.z)].id}", pointsData, "i2 i5"}, + {"bracket key", "{[0]['id']}", pointsData, "i1"}, + } + testJSONPath(pointsTests, t) +} + +// TestKubernetes tests some use cases from kubernetes +func TestKubernetes(t *testing.T) { + var input = []byte(`{ + "kind": "List", + "items":[ + { + "kind":"None", + "metadata":{"name":"127.0.0.1"}, + "status":{ + "capacity":{"cpu":"4"}, + "addresses":[{"type": "LegacyHostIP", "address":"127.0.0.1"}] + } + }, + { + "kind":"None", + "metadata":{"name":"127.0.0.2"}, + "status":{ + "capacity":{"cpu":"8"}, + "addresses":[ + {"type": "LegacyHostIP", "address":"127.0.0.2"}, + {"type": "another", "address":"127.0.0.3"} + ] + } + } + ], + "users":[ + { + "name": "myself", + "user": {} + }, + { + "name": "e2e", + "user": {"username": "admin", "password": "secret"} + } + ] + }`) + var nodesData interface{} + err := json.Unmarshal(input, &nodesData) + if err != nil { + t.Error(err) + } + + nodesTests := []jsonpathTest{ + {"range item", `{range .items[*]}{.metadata.name}, {end}{.kind}`, nodesData, "127.0.0.1, 127.0.0.2, List"}, + {"range item with quote", `{range .items[*]}{.metadata.name}{"\t"}{end}`, nodesData, "127.0.0.1\t127.0.0.2\t"}, + {"range addresss", `{.items[*].status.addresses[*].address}`, nodesData, + "127.0.0.1 127.0.0.2 127.0.0.3"}, + {"double range", `{range .items[*]}{range .status.addresses[*]}{.address}, {end}{end}`, nodesData, + "127.0.0.1, 127.0.0.2, 127.0.0.3, "}, + {"item name", `{.items[*].metadata.name}`, nodesData, "127.0.0.1 127.0.0.2"}, + {"union nodes capacity", `{.items[*]['metadata.name', 'status.capacity']}`, nodesData, + "127.0.0.1 127.0.0.2 map[cpu:4] map[cpu:8]"}, + {"range nodes capacity", `{range .items[*]}[{.metadata.name}, {.status.capacity}] {end}`, nodesData, + "[127.0.0.1, map[cpu:4]] [127.0.0.2, map[cpu:8]] "}, + {"user password", `{.users[?(@.name=="e2e")].user.password}`, &nodesData, "secret"}, + } + testJSONPath(nodesTests, t) + + randomPrintOrderTests := []jsonpathTest{ + {"recursive name", "{..name}", nodesData, `127.0.0.1 127.0.0.2 myself e2e`}, + } + testJSONPathSortOutput(randomPrintOrderTests, t) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/node.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/node.go index 9d5242de7bf4..ddf015c049a5 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/node.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/node.go @@ -95,7 +95,7 @@ func (t *TextNode) String() string { return fmt.Sprintf("%s: %s", t.Type(), t.Text) } -// FieldNode holds filed of struct +// FieldNode holds field of struct type FieldNode struct { NodeType Value string diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/parser_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/parser_test.go new file mode 100644 index 000000000000..a061043b8364 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/jsonpath/parser_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import ( + "testing" +) + +type parserTest struct { + name string + text string + nodes []Node + shouldError bool +} + +var parserTests = []parserTest{ + {"plain", `hello jsonpath`, []Node{newText("hello jsonpath")}, false}, + {"variable", `hello {.jsonpath}`, + []Node{newText("hello "), newList(), newField("jsonpath")}, false}, + {"arrayfiled", `hello {['jsonpath']}`, + []Node{newText("hello "), newList(), newField("jsonpath")}, false}, + {"quote", `{"{"}`, []Node{newList(), newText("{")}, false}, + {"array", `{[1:3]}`, []Node{newList(), + newArray([3]ParamsEntry{{1, true}, {3, true}, {0, false}})}, false}, + {"allarray", `{.book[*].author}`, + []Node{newList(), newField("book"), + newArray([3]ParamsEntry{{0, false}, {0, false}, {0, false}}), newField("author")}, false}, + {"wildcard", `{.bicycle.*}`, + []Node{newList(), newField("bicycle"), newWildcard()}, false}, + {"filter", `{[?(@.price<3)]}`, + []Node{newList(), newFilter(newList(), newList(), "<"), + newList(), newField("price"), newList(), newInt(3)}, false}, + {"recursive", `{..}`, []Node{newList(), newRecursive()}, false}, + {"recurField", `{..price}`, + []Node{newList(), newRecursive(), newField("price")}, false}, + {"arraydict", `{['book.price']}`, []Node{newList(), + newField("book"), newField("price"), + }, false}, + {"union", `{['bicycle.price', 3, 'book.price']}`, []Node{newList(), newUnion([]*ListNode{}), + newList(), newField("bicycle"), newField("price"), + newList(), newArray([3]ParamsEntry{{3, true}, {4, true}, {0, false}}), + newList(), newField("book"), newField("price"), + }, false}, + {"range", `{range .items}{.name},{end}`, []Node{ + newList(), newIdentifier("range"), newField("items"), + newList(), newField("name"), newText(","), + newList(), newIdentifier("end"), + }, false}, + {"malformat input", `{\\\}`, []Node{}, true}, +} + +func collectNode(nodes []Node, cur Node) []Node { + nodes = append(nodes, cur) + switch cur.Type() { + case NodeList: + for _, node := range cur.(*ListNode).Nodes { + nodes = collectNode(nodes, node) + } + case NodeFilter: + nodes = collectNode(nodes, cur.(*FilterNode).Left) + nodes = collectNode(nodes, cur.(*FilterNode).Right) + case NodeUnion: + for _, node := range cur.(*UnionNode).Nodes { + nodes = collectNode(nodes, node) + } + } + return nodes +} + +func TestParser(t *testing.T) { + for _, test := range parserTests { + parser, err := Parse(test.name, test.text) + if test.shouldError { + if err == nil { + t.Errorf("unexpected non-error when parsing %s", test.name) + } + continue + } + if err != nil { + t.Errorf("parse %s error %v", test.name, err) + } + result := collectNode([]Node{}, parser.Root)[1:] + if len(result) != len(test.nodes) { + t.Errorf("in %s, expect to get %d nodes, got %d nodes", test.name, len(test.nodes), len(result)) + t.Error(result) + } + for i, expect := range test.nodes { + if result[i].String() != expect.String() { + t.Errorf("in %s, %dth node, expect %v, got %v", test.name, i, expect, result[i]) + } + } + } +} + +type failParserTest struct { + name string + text string + err string +} + +func TestFailParser(t *testing.T) { + failParserTests := []failParserTest{ + {"unclosed action", "{.hello", "unclosed action"}, + {"unrecognized character", "{*}", "unrecognized character in action: U+002A '*'"}, + {"invalid number", "{+12.3.0}", "cannot parse number +12.3.0"}, + {"unterminated array", "{[1}", "unterminated array"}, + {"invalid index", "{[::-1]}", "invalid array index ::-1"}, + {"unterminated filter", "{[?(.price]}", "unterminated filter"}, + } + for _, test := range failParserTests { + _, err := Parse(test.name, test.text) + var out string + if err == nil { + out = "nil" + } else { + out = err.Error() + } + if out != test.err { + t.Errorf("in %s, expect to get error %v, got %v", test.name, test.err, out) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/keymutex/keymutex_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/keymutex/keymutex_test.go new file mode 100644 index 000000000000..faa3be16aad8 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/keymutex/keymutex_test.go @@ -0,0 +1,111 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package keymutex + +import ( + "testing" + "time" +) + +const ( + callbackTimeout = 1 * time.Second +) + +func Test_SingleLock_NoUnlock(t *testing.T) { + // Arrange + km := NewKeyMutex() + key := "fakeid" + callbackCh := make(chan interface{}) + + // Act + go lockAndCallback(km, key, callbackCh) + + // Assert + verifyCallbackHappens(t, callbackCh) +} + +func Test_SingleLock_SingleUnlock(t *testing.T) { + // Arrange + km := NewKeyMutex() + key := "fakeid" + callbackCh := make(chan interface{}) + + // Act & Assert + go lockAndCallback(km, key, callbackCh) + verifyCallbackHappens(t, callbackCh) + km.UnlockKey(key) +} + +func Test_DoubleLock_DoubleUnlock(t *testing.T) { + // Arrange + km := NewKeyMutex() + key := "fakeid" + callbackCh1stLock := make(chan interface{}) + callbackCh2ndLock := make(chan interface{}) + + // Act & Assert + go lockAndCallback(km, key, callbackCh1stLock) + verifyCallbackHappens(t, callbackCh1stLock) + go lockAndCallback(km, key, callbackCh2ndLock) + verifyCallbackDoesntHappens(t, callbackCh2ndLock) + km.UnlockKey(key) + verifyCallbackHappens(t, callbackCh2ndLock) + km.UnlockKey(key) +} + +func lockAndCallback(km KeyMutex, id string, callbackCh chan<- interface{}) { + km.LockKey(id) + callbackCh <- true +} + +func verifyCallbackHappens(t *testing.T, callbackCh <-chan interface{}) bool { + select { + case <-callbackCh: + return true + case <-time.After(callbackTimeout): + t.Fatalf("Timed out waiting for callback.") + return false + } +} + +func verifyCallbackDoesntHappens(t *testing.T, callbackCh <-chan interface{}) bool { + select { + case <-callbackCh: + t.Fatalf("Unexpected callback.") + return false + case <-time.After(callbackTimeout): + return true + } +} + +func verifyNoError(t *testing.T, err error, name string) { + if err != nil { + t.Fatalf("Unexpected response on %q. Expected: Actual: <%v>", name, err) + } +} + +func verifyError(t *testing.T, err error, name string) { + if err == nil { + t.Fatalf("Unexpected response on %q. Expected: Actual: ", name) + } +} + +func verifyMsg(t *testing.T, expected, actual string) { + if actual != expected { + t.Fatalf("Unexpected testMsg value. Expected: <%v> Actual: <%v>", expected, actual) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/labels/labels_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/labels/labels_test.go new file mode 100644 index 000000000000..9adbd4554aef --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/labels/labels_test.go @@ -0,0 +1,60 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "reflect" + "testing" +) + +func TestCloneAndAddLabel(t *testing.T) { + labels := map[string]string{ + "foo1": "bar1", + "foo2": "bar2", + "foo3": "bar3", + } + + cases := []struct { + labels map[string]string + labelKey string + labelValue uint32 + want map[string]string + }{ + { + labels: labels, + want: labels, + }, + { + labels: labels, + labelKey: "foo4", + labelValue: uint32(42), + want: map[string]string{ + "foo1": "bar1", + "foo2": "bar2", + "foo3": "bar3", + "foo4": "42", + }, + }, + } + + for _, tc := range cases { + got := CloneAndAddLabel(tc.labels, tc.labelKey, tc.labelValue) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("got %v, want %v", got, tc.want) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/line_delimiter_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/line_delimiter_test.go new file mode 100644 index 000000000000..728d0abeae90 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/line_delimiter_test.go @@ -0,0 +1,40 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "os" +) + +func Example_trailingNewline() { + ld := NewLineDelimiter(os.Stdout, "|") + defer ld.Flush() + fmt.Fprint(ld, " Hello \n World \n") + // Output: + // | Hello | + // | World | + // || +} +func Example_noTrailingNewline() { + ld := NewLineDelimiter(os.Stdout, "|") + defer ld.Flush() + fmt.Fprint(ld, " Hello \n World ") + // Output: + // | Hello | + // | World | +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/metrics/util.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/metrics/util.go new file mode 100644 index 000000000000..ab74e35c463b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/metrics/util.go @@ -0,0 +1,71 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + "time" + + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/util/wait" + + "github.com/golang/glog" + "github.com/prometheus/client_golang/prometheus" +) + +const ( + updatePeriod = 5 * time.Second +) + +var ( + metricsLock sync.Mutex + rateLimiterMetrics map[string]prometheus.Gauge = make(map[string]prometheus.Gauge) +) + +func registerRateLimiterMetric(ownerName string) error { + metricsLock.Lock() + defer metricsLock.Unlock() + + if _, ok := rateLimiterMetrics[ownerName]; ok { + glog.Errorf("Metric for %v already registered", ownerName) + return fmt.Errorf("Metric for %v already registered", ownerName) + } + metric := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "rate_limiter_use", + Subsystem: ownerName, + Help: fmt.Sprintf("A metric measuring the saturation of the rate limiter for %v", ownerName), + }) + rateLimiterMetrics[ownerName] = metric + prometheus.MustRegister(metric) + return nil +} + +// RegisterMetricAndTrackRateLimiterUsage registers a metric ownerName_rate_limiter_use in prometheus to track +// how much used rateLimiter is and starts a goroutine that updates this metric every updatePeriod +func RegisterMetricAndTrackRateLimiterUsage(ownerName string, rateLimiter flowcontrol.RateLimiter) error { + err := registerRateLimiterMetric(ownerName) + if err != nil { + return err + } + go wait.Forever(func() { + metricsLock.Lock() + defer metricsLock.Unlock() + rateLimiterMetrics[ownerName].Set(rateLimiter.Saturation()) + }, updatePeriod) + return nil +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/mount.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/mount.go index 79f3f6ec2458..9c1d8d4f2658 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/mount.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/mount.go @@ -21,6 +21,7 @@ package mount import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/util/exec" + "path/filepath" ) type Interface interface { @@ -34,6 +35,7 @@ type Interface interface { // consistent. List() ([]MountPoint, error) // IsLikelyNotMountPoint determines if a directory is a mountpoint. + // It should return ErrNotExist when the directory does not exist. IsLikelyNotMountPoint(file string) (bool, error) } @@ -85,8 +87,13 @@ func GetMountRefs(mounter Interface, mountPath string) ([]string, error) { // Find the device name. deviceName := "" + // If mountPath is symlink, need get its target path. + slTarget, err := filepath.EvalSymlinks(mountPath) + if err != nil { + slTarget = mountPath + } for i := range mps { - if mps[i].Path == mountPath { + if mps[i].Path == slTarget { deviceName = mps[i].Device break } @@ -98,7 +105,7 @@ func GetMountRefs(mounter Interface, mountPath string) ([]string, error) { glog.Warningf("could not determine device for path: %q", mountPath) } else { for i := range mps { - if mps[i].Device == deviceName && mps[i].Path != mountPath { + if mps[i].Device == deviceName && mps[i].Path != slTarget { refs = append(refs, mps[i].Path) } } @@ -117,8 +124,13 @@ func GetDeviceNameFromMount(mounter Interface, mountPath string) (string, int, e // Find the device name. // FIXME if multiple devices mounted on the same mount path, only the first one is returned device := "" + // If mountPath is symlink, need get its target path. + slTarget, err := filepath.EvalSymlinks(mountPath) + if err != nil { + slTarget = mountPath + } for i := range mps { - if mps[i].Path == mountPath { + if mps[i].Path == slTarget { device = mps[i].Device break } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/mount_linux_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/mount_linux_test.go new file mode 100644 index 000000000000..cd802d7744cb --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/mount_linux_test.go @@ -0,0 +1,182 @@ +// +build linux + +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "strings" + "testing" +) + +func TestReadProcMountsFrom(t *testing.T) { + successCase := + `/dev/0 /path/to/0 type0 flags 0 0 + /dev/1 /path/to/1 type1 flags 1 1 + /dev/2 /path/to/2 type2 flags,1,2=3 2 2 + ` + hash, err := readProcMountsFrom(strings.NewReader(successCase), nil) + if err != nil { + t.Errorf("expected success") + } + if hash != 0xa3522051 { + t.Errorf("expected 0xa3522051, got %#x", hash) + } + mounts := []MountPoint{} + hash, err = readProcMountsFrom(strings.NewReader(successCase), &mounts) + if err != nil { + t.Errorf("expected success") + } + if hash != 0xa3522051 { + t.Errorf("expected 0xa3522051, got %#x", hash) + } + if len(mounts) != 3 { + t.Fatalf("expected 3 mounts, got %d", len(mounts)) + } + mp := MountPoint{"/dev/0", "/path/to/0", "type0", []string{"flags"}, 0, 0} + if !mountPointsEqual(&mounts[0], &mp) { + t.Errorf("got unexpected MountPoint[0]: %#v", mounts[0]) + } + mp = MountPoint{"/dev/1", "/path/to/1", "type1", []string{"flags"}, 1, 1} + if !mountPointsEqual(&mounts[1], &mp) { + t.Errorf("got unexpected MountPoint[1]: %#v", mounts[1]) + } + mp = MountPoint{"/dev/2", "/path/to/2", "type2", []string{"flags", "1", "2=3"}, 2, 2} + if !mountPointsEqual(&mounts[2], &mp) { + t.Errorf("got unexpected MountPoint[2]: %#v", mounts[2]) + } + + errorCases := []string{ + "/dev/0 /path/to/mount\n", + "/dev/1 /path/to/mount type flags a 0\n", + "/dev/2 /path/to/mount type flags 0 b\n", + } + for _, ec := range errorCases { + _, err := readProcMountsFrom(strings.NewReader(ec), &mounts) + if err == nil { + t.Errorf("expected error") + } + } +} + +func mountPointsEqual(a, b *MountPoint) bool { + if a.Device != b.Device || a.Path != b.Path || a.Type != b.Type || !slicesEqual(a.Opts, b.Opts) || a.Pass != b.Pass || a.Freq != b.Freq { + return false + } + return true +} + +func slicesEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +func TestGetMountRefs(t *testing.T) { + fm := &FakeMounter{ + MountPoints: []MountPoint{ + {Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd"}, + {Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd-in-pod"}, + {Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd2"}, + {Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod"}, + {Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod2"}, + }, + } + + tests := []struct { + mountPath string + expectedRefs []string + }{ + { + "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd-in-pod", + []string{ + "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd", + }, + }, + { + "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod", + []string{ + "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod2", + "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd2", + }, + }, + } + + for i, test := range tests { + if refs, err := GetMountRefs(fm, test.mountPath); err != nil || !setEquivalent(test.expectedRefs, refs) { + t.Errorf("%d. getMountRefs(%q) = %v, %v; expected %v, nil", i, test.mountPath, refs, err, test.expectedRefs) + } + } +} + +func setEquivalent(set1, set2 []string) bool { + map1 := make(map[string]bool) + map2 := make(map[string]bool) + for _, s := range set1 { + map1[s] = true + } + for _, s := range set2 { + map2[s] = true + } + + for s := range map1 { + if !map2[s] { + return false + } + } + for s := range map2 { + if !map1[s] { + return false + } + } + return true +} + +func TestGetDeviceNameFromMount(t *testing.T) { + fm := &FakeMounter{ + MountPoints: []MountPoint{ + {Device: "/dev/disk/by-path/prefix-lun-1", + Path: "/mnt/111"}, + {Device: "/dev/disk/by-path/prefix-lun-1", + Path: "/mnt/222"}, + }, + } + + tests := []struct { + mountPath string + expectedDevice string + expectedRefs int + }{ + { + "/mnt/222", + "/dev/disk/by-path/prefix-lun-1", + 2, + }, + } + + for i, test := range tests { + if device, refs, err := GetDeviceNameFromMount(fm, test.mountPath); err != nil || test.expectedRefs != refs || test.expectedDevice != device { + t.Errorf("%d. GetDeviceNameFromMount(%s) = (%s, %d), %v; expected (%s,%d), nil", i, test.mountPath, device, refs, err, test.expectedDevice, test.expectedRefs) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go index 7c4ed261c806..9be0e551d357 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go @@ -118,7 +118,7 @@ func (n *NsenterMounter) doNsenterMount(source, target, fstype string, options [ exec := exec.New() outputBytes, err := exec.Command(nsenterPath, args...).CombinedOutput() if len(outputBytes) != 0 { - glog.V(5).Infof("Output from mount command: %v", string(outputBytes)) + glog.V(5).Infof("Output of mounting %s to %s: %v", source, target, string(outputBytes)) } return err @@ -151,7 +151,7 @@ func (n *NsenterMounter) Unmount(target string) error { exec := exec.New() outputBytes, err := exec.Command(nsenterPath, args...).CombinedOutput() if len(outputBytes) != 0 { - glog.V(5).Infof("Output from mount command: %v", string(outputBytes)) + glog.V(5).Infof("Output of unmounting %s: %v", target, string(outputBytes)) } return err @@ -170,13 +170,19 @@ func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { return true, err } + // Check the directory exists + if _, err = os.Stat(file); os.IsNotExist(err) { + glog.V(5).Infof("findmnt: directory %s does not exist", file) + return true, err + } + args := []string{"--mount=/rootfs/proc/1/ns/mnt", "--", n.absHostPath("findmnt"), "-o", "target", "--noheadings", "--target", file} glog.V(5).Infof("findmnt command: %v %v", nsenterPath, args) exec := exec.New() out, err := exec.Command(nsenterPath, args...).CombinedOutput() if err != nil { - glog.V(2).Infof("Failed findmnt command: %v", err) + glog.V(2).Infof("Failed findmnt command for path %s: %v", file, err) // Different operating systems behave differently for paths which are not mount points. // On older versions (e.g. 2.20.1) we'd get error, on newer ones (e.g. 2.26.2) we'd get "/". // It's safer to assume that it's not a mount point. @@ -184,7 +190,7 @@ func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { } strOut := strings.TrimSuffix(string(out), "\n") - glog.V(5).Infof("IsLikelyNotMountPoint findmnt output: %v", strOut) + glog.V(5).Infof("IsLikelyNotMountPoint findmnt output for path %s: %v", file, strOut) if strOut == file { return false, nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/safe_format_and_mount_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/safe_format_and_mount_test.go new file mode 100644 index 000000000000..03c0b8442b14 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/mount/safe_format_and_mount_test.go @@ -0,0 +1,224 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "fmt" + "runtime" + "testing" + + "k8s.io/kubernetes/pkg/util/exec" +) + +type ErrorMounter struct { + *FakeMounter + errIndex int + err []error +} + +func (mounter *ErrorMounter) Mount(source string, target string, fstype string, options []string) error { + i := mounter.errIndex + mounter.errIndex++ + if mounter.err != nil && mounter.err[i] != nil { + return mounter.err[i] + } + return mounter.FakeMounter.Mount(source, target, fstype, options) +} + +type ExecArgs struct { + command string + args []string + output string + err error +} + +func TestSafeFormatAndMount(t *testing.T) { + if runtime.GOOS == "darwin" || runtime.GOOS == "windows" { + t.Skipf("not supported on GOOS=%s", runtime.GOOS) + } + tests := []struct { + description string + fstype string + mountOptions []string + execScripts []ExecArgs + mountErrs []error + expectedError error + }{ + { + description: "Test a read only mount", + fstype: "ext4", + mountOptions: []string{"ro"}, + }, + { + description: "Test a normal mount", + fstype: "ext4", + execScripts: []ExecArgs{ + {"fsck", []string{"-a", "/dev/foo"}, "", nil}, + }, + }, + { + description: "Test 'fsck' fails with exit status 4", + fstype: "ext4", + execScripts: []ExecArgs{ + {"fsck", []string{"-a", "/dev/foo"}, "", &exec.FakeExitError{Status: 4}}, + }, + expectedError: fmt.Errorf("'fsck' found errors on device /dev/foo but could not correct them: ."), + }, + { + description: "Test 'fsck' fails with exit status 1 (errors found and corrected)", + fstype: "ext4", + execScripts: []ExecArgs{ + {"fsck", []string{"-a", "/dev/foo"}, "", &exec.FakeExitError{Status: 1}}, + }, + }, + { + description: "Test 'fsck' fails with exit status other than 1 and 4 (likely unformatted device)", + fstype: "ext4", + execScripts: []ExecArgs{ + {"fsck", []string{"-a", "/dev/foo"}, "", &exec.FakeExitError{Status: 8}}, + }, + }, + { + description: "Test that 'lsblk' is called and fails", + fstype: "ext4", + mountErrs: []error{fmt.Errorf("unknown filesystem type '(null)'")}, + execScripts: []ExecArgs{ + {"fsck", []string{"-a", "/dev/foo"}, "", nil}, + {"lsblk", []string{"-nd", "-o", "FSTYPE", "/dev/foo"}, "ext4", nil}, + }, + expectedError: fmt.Errorf("unknown filesystem type '(null)'"), + }, + { + description: "Test that 'lsblk' is called and confirms unformatted disk, format fails", + fstype: "ext4", + mountErrs: []error{fmt.Errorf("unknown filesystem type '(null)'")}, + execScripts: []ExecArgs{ + {"fsck", []string{"-a", "/dev/foo"}, "", nil}, + {"lsblk", []string{"-nd", "-o", "FSTYPE", "/dev/foo"}, "", nil}, + {"mkfs.ext4", []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", "/dev/foo"}, "", fmt.Errorf("formatting failed")}, + }, + expectedError: fmt.Errorf("formatting failed"), + }, + { + description: "Test that 'lsblk' is called and confirms unformatted disk, format passes, second mount fails", + fstype: "ext4", + mountErrs: []error{fmt.Errorf("unknown filesystem type '(null)'"), fmt.Errorf("Still cannot mount")}, + execScripts: []ExecArgs{ + {"fsck", []string{"-a", "/dev/foo"}, "", nil}, + {"lsblk", []string{"-nd", "-o", "FSTYPE", "/dev/foo"}, "", nil}, + {"mkfs.ext4", []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", "/dev/foo"}, "", nil}, + }, + expectedError: fmt.Errorf("Still cannot mount"), + }, + { + description: "Test that 'lsblk' is called and confirms unformatted disk, format passes, second mount passes", + fstype: "ext4", + mountErrs: []error{fmt.Errorf("unknown filesystem type '(null)'"), nil}, + execScripts: []ExecArgs{ + {"fsck", []string{"-a", "/dev/foo"}, "", nil}, + {"lsblk", []string{"-nd", "-o", "FSTYPE", "/dev/foo"}, "", nil}, + {"mkfs.ext4", []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", "/dev/foo"}, "", nil}, + }, + expectedError: nil, + }, + { + description: "Test that 'lsblk' is called and confirms unformatted disk, format passes, second mount passes with ext3", + fstype: "ext3", + mountErrs: []error{fmt.Errorf("unknown filesystem type '(null)'"), nil}, + execScripts: []ExecArgs{ + {"fsck", []string{"-a", "/dev/foo"}, "", nil}, + {"lsblk", []string{"-nd", "-o", "FSTYPE", "/dev/foo"}, "", nil}, + {"mkfs.ext3", []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", "/dev/foo"}, "", nil}, + }, + expectedError: nil, + }, + { + description: "test that none ext4 fs does not get called with ext4 options.", + fstype: "xfs", + mountErrs: []error{fmt.Errorf("unknown filesystem type '(null)'"), nil}, + execScripts: []ExecArgs{ + {"fsck", []string{"-a", "/dev/foo"}, "", nil}, + {"lsblk", []string{"-nd", "-o", "FSTYPE", "/dev/foo"}, "", nil}, + {"mkfs.xfs", []string{"/dev/foo"}, "", nil}, + }, + expectedError: nil, + }, + } + + for _, test := range tests { + commandScripts := []exec.FakeCommandAction{} + for _, expected := range test.execScripts { + ecmd := expected.command + eargs := expected.args + output := expected.output + err := expected.err + commandScript := func(cmd string, args ...string) exec.Cmd { + if cmd != ecmd { + t.Errorf("Unexpected command %s. Expecting %s", cmd, ecmd) + } + + for j := range args { + if args[j] != eargs[j] { + t.Errorf("Unexpected args %v. Expecting %v", args, eargs) + } + } + fake := exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte(output), err }, + }, + } + return exec.InitFakeCmd(&fake, cmd, args...) + } + commandScripts = append(commandScripts, commandScript) + } + + fake := exec.FakeExec{ + CommandScript: commandScripts, + } + + fakeMounter := ErrorMounter{&FakeMounter{}, 0, test.mountErrs} + mounter := SafeFormatAndMount{ + Interface: &fakeMounter, + Runner: &fake, + } + + device := "/dev/foo" + dest := "/mnt/bar" + err := mounter.FormatAndMount(device, dest, test.fstype, test.mountOptions) + if test.expectedError == nil { + if err != nil { + t.Errorf("test \"%s\" unexpected non-error: %v", test.description, err) + } + + // Check that something was mounted on the directory + isNotMountPoint, err := fakeMounter.IsLikelyNotMountPoint(dest) + if err != nil || isNotMountPoint { + t.Errorf("test \"%s\" the directory was not mounted", test.description) + } + + //check that the correct device was mounted + mountedDevice, _, err := GetDeviceNameFromMount(fakeMounter.FakeMounter, dest) + if err != nil || mountedDevice != device { + t.Errorf("test \"%s\" the correct device was not mounted", test.description) + } + } else { + if err == nil || test.expectedError.Error() != err.Error() { + t.Errorf("test \"%s\" unexpected error: \n [%v]. \nExpecting [%v]", test.description, err, test.expectedError) + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/http.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/http.go index f3d4473f0a87..99d4cd2621e3 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/http.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/http.go @@ -23,6 +23,7 @@ import ( "net" "net/http" "net/url" + "os" "strconv" "strings" ) @@ -55,8 +56,10 @@ var defaultTransport = http.DefaultTransport.(*http.Transport) // SetTransportDefaults applies the defaults from http.DefaultTransport // for the Proxy, Dial, and TLSHandshakeTimeout fields if unset func SetTransportDefaults(t *http.Transport) *http.Transport { - if t.Proxy == nil { - t.Proxy = defaultTransport.Proxy + if t.Proxy == nil || isDefault(t.Proxy) { + // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings + // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY + t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) } if t.Dial == nil { t.Dial = defaultTransport.Dial @@ -150,6 +153,65 @@ func GetClientIP(req *http.Request) net.IP { } // Fallback to Remote Address in request, which will give the correct client IP when there is no proxy. - ip := net.ParseIP(req.RemoteAddr) - return ip + // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err == nil { + return net.ParseIP(host) + } + + // Fallback if Remote Address was just IP. + return net.ParseIP(req.RemoteAddr) +} + +var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment) + +// isDefault checks to see if the transportProxierFunc is pointing to the default one +func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool { + transportProxierPointer := fmt.Sprintf("%p", transportProxier) + return transportProxierPointer == defaultProxyFuncPointer +} + +// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if +// no matching CIDRs are found +func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { + // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it + noProxyEnv := os.Getenv("NO_PROXY") + noProxyRules := strings.Split(noProxyEnv, ",") + + cidrs := []*net.IPNet{} + for _, noProxyRule := range noProxyRules { + _, cidr, _ := net.ParseCIDR(noProxyRule) + if cidr != nil { + cidrs = append(cidrs, cidr) + } + } + + if len(cidrs) == 0 { + return delegate + } + + return func(req *http.Request) (*url.URL, error) { + host := req.URL.Host + // for some urls, the Host is already the host, not the host:port + if net.ParseIP(host) == nil { + var err error + host, _, err = net.SplitHostPort(req.URL.Host) + if err != nil { + return delegate(req) + } + } + + ip := net.ParseIP(host) + if ip == nil { + return delegate(req) + } + + for _, cidr := range cidrs { + if cidr.Contains(ip) { + return nil, nil + } + } + + return delegate(req) + } } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/http_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/http_test.go new file mode 100644 index 000000000000..08cc858816a0 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/http_test.go @@ -0,0 +1,170 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "net" + "net/http" + "net/url" + "os" + "reflect" + "testing" +) + +func TestGetClientIP(t *testing.T) { + ipString := "10.0.0.1" + ip := net.ParseIP(ipString) + invalidIPString := "invalidIPString" + testCases := []struct { + Request http.Request + ExpectedIP net.IP + }{ + { + Request: http.Request{}, + }, + { + Request: http.Request{ + Header: map[string][]string{ + "X-Real-Ip": {ipString}, + }, + }, + ExpectedIP: ip, + }, + { + Request: http.Request{ + Header: map[string][]string{ + "X-Real-Ip": {invalidIPString}, + }, + }, + }, + { + Request: http.Request{ + Header: map[string][]string{ + "X-Forwarded-For": {ipString}, + }, + }, + ExpectedIP: ip, + }, + { + Request: http.Request{ + Header: map[string][]string{ + "X-Forwarded-For": {invalidIPString}, + }, + }, + }, + { + Request: http.Request{ + Header: map[string][]string{ + "X-Forwarded-For": {invalidIPString + "," + ipString}, + }, + }, + ExpectedIP: ip, + }, + { + Request: http.Request{ + // RemoteAddr is in the form host:port + RemoteAddr: ipString + ":1234", + }, + ExpectedIP: ip, + }, + { + Request: http.Request{ + RemoteAddr: invalidIPString, + }, + }, + { + Request: http.Request{ + Header: map[string][]string{ + "X-Forwarded-For": {invalidIPString}, + }, + // RemoteAddr is in the form host:port + RemoteAddr: ipString, + }, + ExpectedIP: ip, + }, + } + + for i, test := range testCases { + if a, e := GetClientIP(&test.Request), test.ExpectedIP; reflect.DeepEqual(e, a) != true { + t.Fatalf("test case %d failed. expected: %v, actual: %v", i, e, a) + } + } +} + +func TestProxierWithNoProxyCIDR(t *testing.T) { + testCases := []struct { + name string + noProxy string + url string + + expectedDelegated bool + }{ + { + name: "no env", + url: "https://192.168.143.1/api", + expectedDelegated: true, + }, + { + name: "no cidr", + noProxy: "192.168.63.1", + url: "https://192.168.143.1/api", + expectedDelegated: true, + }, + { + name: "hostname", + noProxy: "192.168.63.0/24,192.168.143.0/24", + url: "https://my-hostname/api", + expectedDelegated: true, + }, + { + name: "match second cidr", + noProxy: "192.168.63.0/24,192.168.143.0/24", + url: "https://192.168.143.1/api", + expectedDelegated: false, + }, + { + name: "match second cidr with host:port", + noProxy: "192.168.63.0/24,192.168.143.0/24", + url: "https://192.168.143.1:8443/api", + expectedDelegated: false, + }, + } + + for _, test := range testCases { + os.Setenv("NO_PROXY", test.noProxy) + actualDelegated := false + proxyFunc := NewProxierWithNoProxyCIDR(func(req *http.Request) (*url.URL, error) { + actualDelegated = true + return nil, nil + }) + + req, err := http.NewRequest("GET", test.url, nil) + if err != nil { + t.Errorf("%s: unexpected err: %v", test.name, err) + continue + } + if _, err := proxyFunc(req); err != nil { + t.Errorf("%s: unexpected err: %v", test.name, err) + continue + } + + if test.expectedDelegated != actualDelegated { + t.Errorf("%s: expected %v, got %v", test.name, test.expectedDelegated, actualDelegated) + continue + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/interface_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/interface_test.go new file mode 100644 index 000000000000..9571e5b48bf4 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/interface_test.go @@ -0,0 +1,300 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "fmt" + "io" + "net" + "strings" + "testing" +) + +const gatewayfirst = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT +eth3 00000000 0100FE0A 0003 0 0 1024 00000000 0 0 0 +eth3 0000FE0A 00000000 0001 0 0 0 0080FFFF 0 0 0 +docker0 000011AC 00000000 0001 0 0 0 0000FFFF 0 0 0 +virbr0 007AA8C0 00000000 0001 0 0 0 00FFFFFF 0 0 0 +` +const gatewaylast = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT +docker0 000011AC 00000000 0001 0 0 0 0000FFFF 0 0 0 +virbr0 007AA8C0 00000000 0001 0 0 0 00FFFFFF 0 0 0 +eth3 0000FE0A 00000000 0001 0 0 0 0080FFFF 0 0 0 +eth3 00000000 0100FE0A 0003 0 0 1024 00000000 0 0 0 +` +const gatewaymiddle = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT +eth3 0000FE0A 00000000 0001 0 0 0 0080FFFF 0 0 0 +docker0 000011AC 00000000 0001 0 0 0 0000FFFF 0 0 0 +eth3 00000000 0100FE0A 0003 0 0 1024 00000000 0 0 0 +virbr0 007AA8C0 00000000 0001 0 0 0 00FFFFFF 0 0 0 +` +const noInternetConnection = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT +docker0 000011AC 00000000 0001 0 0 0 0000FFFF 0 0 0 +virbr0 007AA8C0 00000000 0001 0 0 0 00FFFFFF 0 0 0 +` +const nothing = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT +` +const gatewayfirstIpv6_1 = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT +eth3 00000000 0100FE0A 0003 0 0 1024 00000000 0 0 0 +eth3 0000FE0AA1 00000000 0001 0 0 0 0080FFFF 0 0 0 +docker0 000011AC 00000000 0001 0 0 0 0000FFFF 0 0 0 +virbr0 007AA8C0 00000000 0001 0 0 0 00FFFFFF 0 0 0 +` +const gatewayfirstIpv6_2 = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT +eth3 00000000 0100FE0AA1 0003 0 0 1024 00000000 0 0 0 +eth3 0000FE0A 00000000 0001 0 0 0 0080FFFF 0 0 0 +docker0 000011AC 00000000 0001 0 0 0 0000FFFF 0 0 0 +virbr0 007AA8C0 00000000 0001 0 0 0 00FFFFFF 0 0 0 +` +const route_Invalidhex = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT +eth3 00000000 0100FE0AA 0003 0 0 1024 00000000 0 0 0 +eth3 0000FE0A 00000000 0001 0 0 0 0080FFFF 0 0 0 +docker0 000011AC 00000000 0001 0 0 0 0000FFFF 0 0 0 +virbr0 007AA8C0 00000000 0001 0 0 0 00FFFFFF 0 0 0 +` + +// Based on DigitalOcean COREOS +const gatewayfirstLinkLocal = `Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT +eth0 00000000 0120372D 0001 0 0 0 00000000 0 0 0 +eth0 00000000 00000000 0001 0 0 2048 00000000 0 0 0 +` + +func TestGetRoutes(t *testing.T) { + testCases := []struct { + tcase string + route string + expected int + }{ + {"gatewayfirst", gatewayfirst, 4}, + {"gatewaymiddle", gatewaymiddle, 4}, + {"gatewaylast", gatewaylast, 4}, + {"nothing", nothing, 0}, + {"gatewayfirstIpv6_1", gatewayfirstIpv6_1, 0}, + {"gatewayfirstIpv6_2", gatewayfirstIpv6_2, 0}, + {"route_Invalidhex", route_Invalidhex, 0}, + } + for _, tc := range testCases { + r := strings.NewReader(tc.route) + routes, err := getRoutes(r) + if len(routes) != tc.expected { + t.Errorf("case[%v]: expected %v, got %v .err : %v", tc.tcase, tc.expected, len(routes), err) + } + } +} + +func TestParseIP(t *testing.T) { + testCases := []struct { + tcase string + ip string + success bool + expected net.IP + }{ + {"empty", "", false, nil}, + {"too short", "AA", false, nil}, + {"too long", "0011223344", false, nil}, + {"invalid", "invalid!", false, nil}, + {"zero", "00000000", true, net.IP{0, 0, 0, 0}}, + {"ffff", "FFFFFFFF", true, net.IP{0xff, 0xff, 0xff, 0xff}}, + {"valid", "12345678", true, net.IP{120, 86, 52, 18}}, + } + for _, tc := range testCases { + ip, err := parseIP(tc.ip) + if !ip.Equal(tc.expected) { + t.Errorf("case[%v]: expected %q, got %q . err : %v", tc.tcase, tc.expected, ip, err) + } + } +} + +func TestIsInterfaceUp(t *testing.T) { + testCases := []struct { + tcase string + intf net.Interface + expected bool + }{ + {"up", net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: net.FlagUp}, true}, + {"down", net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: 0}, false}, + {"nothing", net.Interface{}, false}, + } + for _, tc := range testCases { + it := isInterfaceUp(&tc.intf) + if it != tc.expected { + t.Errorf("case[%v]: expected %v, got %v .", tc.tcase, tc.expected, it) + } + } +} + +type addrStruct struct{ val string } + +func (a addrStruct) Network() string { + return a.val +} +func (a addrStruct) String() string { + return a.val +} + +func TestFinalIP(t *testing.T) { + testCases := []struct { + tcase string + addr []net.Addr + expected net.IP + }{ + {"ipv6", []net.Addr{addrStruct{val: "fe80::2f7:6fff:fe6e:2956/64"}}, nil}, + {"invalidCIDR", []net.Addr{addrStruct{val: "fe80::2f7:67fff:fe6e:2956/64"}}, nil}, + {"loopback", []net.Addr{addrStruct{val: "127.0.0.1/24"}}, nil}, + {"ip4", []net.Addr{addrStruct{val: "10.254.12.132/17"}}, net.ParseIP("10.254.12.132")}, + + {"nothing", []net.Addr{}, nil}, + } + for _, tc := range testCases { + ip, err := getFinalIP(tc.addr) + if !ip.Equal(tc.expected) { + t.Errorf("case[%v]: expected %v, got %v .err : %v", tc.tcase, tc.expected, ip, err) + } + } +} + +func TestAddrs(t *testing.T) { + var nw networkInterfacer = validNetworkInterface{} + intf := net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: 0} + addrs, err := nw.Addrs(&intf) + if err != nil { + t.Errorf("expected no error got : %v", err) + } + if len(addrs) != 2 { + t.Errorf("expected addrs: 2 got null") + } +} + +type validNetworkInterface struct { +} + +func (_ validNetworkInterface) InterfaceByName(intfName string) (*net.Interface, error) { + c := net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: net.FlagUp} + return &c, nil +} +func (_ validNetworkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { + var ifat []net.Addr + ifat = []net.Addr{ + addrStruct{val: "fe80::2f7:6fff:fe6e:2956/64"}, addrStruct{val: "10.254.71.145/17"}} + return ifat, nil +} + +type validNetworkInterfaceWithLinkLocal struct { +} + +func (_ validNetworkInterfaceWithLinkLocal) InterfaceByName(intfName string) (*net.Interface, error) { + c := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: net.FlagUp} + return &c, nil +} +func (_ validNetworkInterfaceWithLinkLocal) Addrs(intf *net.Interface) ([]net.Addr, error) { + var ifat []net.Addr + ifat = []net.Addr{addrStruct{val: "169.254.162.166/16"}, addrStruct{val: "45.55.47.146/19"}} + return ifat, nil +} + +type validNetworkInterfacewithIpv6Only struct { +} + +func (_ validNetworkInterfacewithIpv6Only) InterfaceByName(intfName string) (*net.Interface, error) { + c := net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: net.FlagUp} + return &c, nil +} +func (_ validNetworkInterfacewithIpv6Only) Addrs(intf *net.Interface) ([]net.Addr, error) { + var ifat []net.Addr + ifat = []net.Addr{addrStruct{val: "fe80::2f7:6fff:fe6e:2956/64"}} + return ifat, nil +} + +type noNetworkInterface struct { +} + +func (_ noNetworkInterface) InterfaceByName(intfName string) (*net.Interface, error) { + return nil, fmt.Errorf("unable get Interface") +} +func (_ noNetworkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { + return nil, nil +} + +type networkInterfacewithNoAddrs struct { +} + +func (_ networkInterfacewithNoAddrs) InterfaceByName(intfName string) (*net.Interface, error) { + c := net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: net.FlagUp} + return &c, nil +} +func (_ networkInterfacewithNoAddrs) Addrs(intf *net.Interface) ([]net.Addr, error) { + return nil, fmt.Errorf("unable get Addrs") +} + +type networkInterfacewithIpv6addrs struct { +} + +func (_ networkInterfacewithIpv6addrs) InterfaceByName(intfName string) (*net.Interface, error) { + c := net.Interface{Index: 0, MTU: 0, Name: "eth3", HardwareAddr: nil, Flags: net.FlagUp} + return &c, nil +} +func (_ networkInterfacewithIpv6addrs) Addrs(intf *net.Interface) ([]net.Addr, error) { + var ifat []net.Addr + ifat = []net.Addr{addrStruct{val: "fe80::2f7:6ffff:fe6e:2956/64"}} + return ifat, nil +} + +func TestGetIPFromInterface(t *testing.T) { + testCases := []struct { + tcase string + nwname string + nw networkInterfacer + expected net.IP + }{ + {"valid", "eth3", validNetworkInterface{}, net.ParseIP("10.254.71.145")}, + {"ipv6", "eth3", validNetworkInterfacewithIpv6Only{}, nil}, + {"nothing", "eth3", noNetworkInterface{}, nil}, + } + for _, tc := range testCases { + ip, err := getIPFromInterface(tc.nwname, tc.nw) + if !ip.Equal(tc.expected) { + t.Errorf("case[%v]: expected %v, got %+v .err : %v", tc.tcase, tc.expected, ip, err) + } + } +} + +func TestChooseHostInterfaceFromRoute(t *testing.T) { + testCases := []struct { + tcase string + inFile io.Reader + nw networkInterfacer + expected net.IP + }{ + {"valid_routefirst", strings.NewReader(gatewayfirst), validNetworkInterface{}, net.ParseIP("10.254.71.145")}, + {"valid_routelast", strings.NewReader(gatewaylast), validNetworkInterface{}, net.ParseIP("10.254.71.145")}, + {"valid_routemiddle", strings.NewReader(gatewaymiddle), validNetworkInterface{}, net.ParseIP("10.254.71.145")}, + {"valid_routemiddle_ipv6", strings.NewReader(gatewaymiddle), validNetworkInterfacewithIpv6Only{}, nil}, + {"no internet connection", strings.NewReader(noInternetConnection), validNetworkInterface{}, nil}, + {"no non-link-local ip", strings.NewReader(gatewayfirstLinkLocal), validNetworkInterfaceWithLinkLocal{}, net.ParseIP("45.55.47.146")}, + {"no route", strings.NewReader(nothing), validNetworkInterface{}, nil}, + {"no route file", nil, validNetworkInterface{}, nil}, + {"no interfaces", nil, noNetworkInterface{}, nil}, + {"no interface Addrs", strings.NewReader(gatewaymiddle), networkInterfacewithNoAddrs{}, nil}, + {"Invalid Addrs", strings.NewReader(gatewaymiddle), networkInterfacewithIpv6addrs{}, nil}, + } + for _, tc := range testCases { + ip, err := chooseHostInterfaceFromRoute(tc.inFile, tc.nw) + if !ip.Equal(tc.expected) { + t.Errorf("case[%v]: expected %v, got %+v .err : %v", tc.tcase, tc.expected, ip, err) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/port_range_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/port_range_test.go new file mode 100644 index 000000000000..9eb081aa7dda --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/port_range_test.go @@ -0,0 +1,66 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "testing" + + flag "github.com/spf13/pflag" +) + +func TestPortRange(t *testing.T) { + testCases := []struct { + input string + success bool + expected string + included int + excluded int + }{ + {"100-200", true, "100-200", 200, 201}, + {" 100-200 ", true, "100-200", 200, 201}, + {"0-0", true, "0-0", 0, 1}, + {"", true, "", -1, 0}, + {"100", false, "", -1, -1}, + {"100 - 200", false, "", -1, -1}, + {"-100", false, "", -1, -1}, + {"100-", false, "", -1, -1}, + } + + for i := range testCases { + tc := &testCases[i] + pr := &PortRange{} + var f flag.Value = pr + err := f.Set(tc.input) + if err != nil && tc.success == true { + t.Errorf("expected success, got %q", err) + continue + } else if err == nil && tc.success == false { + t.Errorf("expected failure") + continue + } else if tc.success { + if f.String() != tc.expected { + t.Errorf("expected %q, got %q", tc.expected, f.String()) + } + if tc.included >= 0 && !pr.Contains(tc.included) { + t.Errorf("expected %q to include %d", f.String(), tc.included) + } + if tc.excluded >= 0 && pr.Contains(tc.excluded) { + t.Errorf("expected %q to exclude %d", f.String(), tc.excluded) + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/port_split_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/port_split_test.go new file mode 100644 index 000000000000..2e9e135c0731 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/port_split_test.go @@ -0,0 +1,111 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "testing" +) + +func TestSplitSchemeNamePort(t *testing.T) { + table := []struct { + in string + name, port, scheme string + valid bool + normalized bool + }{ + { + in: "aoeu:asdf", + name: "aoeu", + port: "asdf", + valid: true, + }, { + in: "http:aoeu:asdf", + scheme: "http", + name: "aoeu", + port: "asdf", + valid: true, + }, { + in: "https:aoeu:", + scheme: "https", + name: "aoeu", + port: "", + valid: true, + normalized: false, + }, { + in: "https:aoeu:asdf", + scheme: "https", + name: "aoeu", + port: "asdf", + valid: true, + }, { + in: "aoeu:", + name: "aoeu", + valid: true, + normalized: false, + }, { + in: ":asdf", + valid: false, + }, { + in: "aoeu:asdf:htns", + valid: false, + }, { + in: "aoeu", + name: "aoeu", + valid: true, + }, { + in: "", + valid: false, + }, + } + + for _, item := range table { + scheme, name, port, valid := SplitSchemeNamePort(item.in) + if e, a := item.scheme, scheme; e != a { + t.Errorf("%q: Wanted %q, got %q", item.in, e, a) + } + if e, a := item.name, name; e != a { + t.Errorf("%q: Wanted %q, got %q", item.in, e, a) + } + if e, a := item.port, port; e != a { + t.Errorf("%q: Wanted %q, got %q", item.in, e, a) + } + if e, a := item.valid, valid; e != a { + t.Errorf("%q: Wanted %t, got %t", item.in, e, a) + } + + // Make sure valid items round trip through JoinSchemeNamePort + if item.valid { + out := JoinSchemeNamePort(scheme, name, port) + if item.normalized && out != item.in { + t.Errorf("%q: Wanted %s, got %s", item.in, item.in, out) + } + scheme, name, port, valid := SplitSchemeNamePort(out) + if e, a := item.scheme, scheme; e != a { + t.Errorf("%q: Wanted %q, got %q", item.in, e, a) + } + if e, a := item.name, name; e != a { + t.Errorf("%q: Wanted %q, got %q", item.in, e, a) + } + if e, a := item.port, port; e != a { + t.Errorf("%q: Wanted %q, got %q", item.in, e, a) + } + if e, a := item.valid, valid; e != a { + t.Errorf("%q: Wanted %t, got %t", item.in, e, a) + } + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/sets/ipnet_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/sets/ipnet_test.go new file mode 100644 index 000000000000..0223d1651cdc --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/net/sets/ipnet_test.go @@ -0,0 +1,155 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +import ( + "net" + "reflect" + "sort" + "testing" +) + +func parseIPNet(s string) *net.IPNet { + _, net, err := net.ParseCIDR(s) + if err != nil { + panic(err) + } + return net +} + +func TestIPNets(t *testing.T) { + s := IPNet{} + s2 := IPNet{} + if len(s) != 0 { + t.Errorf("Expected len=0: %d", len(s)) + } + a := parseIPNet("1.0.0.0/8") + b := parseIPNet("2.0.0.0/8") + c := parseIPNet("3.0.0.0/8") + d := parseIPNet("4.0.0.0/8") + + s.Insert(a, b) + if len(s) != 2 { + t.Errorf("Expected len=2: %d", len(s)) + } + s.Insert(c) + if s.Has(d) { + t.Errorf("Unexpected contents: %#v", s) + } + if !s.Has(a) { + t.Errorf("Missing contents: %#v", s) + } + s.Delete(a) + if s.Has(a) { + t.Errorf("Unexpected contents: %#v", s) + } + s.Insert(a) + if s.HasAll(a, b, d) { + t.Errorf("Unexpected contents: %#v", s) + } + if !s.HasAll(a, b) { + t.Errorf("Missing contents: %#v", s) + } + s2.Insert(a, b, d) + if s.IsSuperset(s2) { + t.Errorf("Unexpected contents: %#v", s) + } + s2.Delete(d) + if !s.IsSuperset(s2) { + t.Errorf("Missing contents: %#v", s) + } +} + +func TestIPNetSetDeleteMultiples(t *testing.T) { + s := IPNet{} + a := parseIPNet("1.0.0.0/8") + b := parseIPNet("2.0.0.0/8") + c := parseIPNet("3.0.0.0/8") + + s.Insert(a, b, c) + if len(s) != 3 { + t.Errorf("Expected len=3: %d", len(s)) + } + + s.Delete(a, c) + if len(s) != 1 { + t.Errorf("Expected len=1: %d", len(s)) + } + if s.Has(a) { + t.Errorf("Unexpected contents: %#v", s) + } + if s.Has(c) { + t.Errorf("Unexpected contents: %#v", s) + } + if !s.Has(b) { + t.Errorf("Missing contents: %#v", s) + } +} + +func TestNewIPSet(t *testing.T) { + s, err := ParseIPNets("1.0.0.0/8", "2.0.0.0/8", "3.0.0.0/8") + if err != nil { + t.Errorf("error parsing IPNets: %v", err) + } + if len(s) != 3 { + t.Errorf("Expected len=3: %d", len(s)) + } + a := parseIPNet("1.0.0.0/8") + b := parseIPNet("2.0.0.0/8") + c := parseIPNet("3.0.0.0/8") + + if !s.Has(a) || !s.Has(b) || !s.Has(c) { + t.Errorf("Unexpected contents: %#v", s) + } +} + +func TestIPNetSetDifference(t *testing.T) { + l, err := ParseIPNets("1.0.0.0/8", "2.0.0.0/8", "3.0.0.0/8") + if err != nil { + t.Errorf("error parsing IPNets: %v", err) + } + r, err := ParseIPNets("1.0.0.0/8", "2.0.0.0/8", "4.0.0.0/8", "5.0.0.0/8") + if err != nil { + t.Errorf("error parsing IPNets: %v", err) + } + c := l.Difference(r) + d := r.Difference(l) + if len(c) != 1 { + t.Errorf("Expected len=1: %d", len(c)) + } + if !c.Has(parseIPNet("3.0.0.0/8")) { + t.Errorf("Unexpected contents: %#v", c) + } + if len(d) != 2 { + t.Errorf("Expected len=2: %d", len(d)) + } + if !d.Has(parseIPNet("4.0.0.0/8")) || !d.Has(parseIPNet("5.0.0.0/8")) { + t.Errorf("Unexpected contents: %#v", d) + } +} + +func TestIPNetSetList(t *testing.T) { + s, err := ParseIPNets("3.0.0.0/8", "1.0.0.0/8", "2.0.0.0/8") + if err != nil { + t.Errorf("error parsing IPNets: %v", err) + } + l := s.StringSlice() + sort.Strings(l) + if !reflect.DeepEqual(l, []string{"1.0.0.0/8", "2.0.0.0/8", "3.0.0.0/8"}) { + t.Errorf("List gave unexpected result: %#v", l) + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/oom/oom_linux.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/oom/oom_linux.go index 5503bad8c2ca..c054682bdd7e 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/oom/oom_linux.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/oom/oom_linux.go @@ -20,7 +20,6 @@ package oom import ( "fmt" - "io/ioutil" "os" "path" "strconv" @@ -49,16 +48,6 @@ func getPids(cgroupName string) ([]int, error) { return fsManager.GetPids() } -func syscallNotExists(err error) bool { - if err == nil { - return false - } - if e, ok := err.(*os.SyscallError); ok && os.IsNotExist(e) { - return true - } - return false -} - // Writes 'value' to /proc//oom_score_adj. PID = 0 means self // Returns os.ErrNotExist if the `pid` does not exist. func applyOOMScoreAdj(pid int, oomScoreAdj int) error { @@ -78,12 +67,19 @@ func applyOOMScoreAdj(pid int, oomScoreAdj int) error { value := strconv.Itoa(oomScoreAdj) var err error for i := 0; i < maxTries; i++ { - if err = ioutil.WriteFile(oomScoreAdjPath, []byte(value), 0700); err != nil { - if syscallNotExists(err) { + f, err := os.Open(oomScoreAdjPath) + if err != nil { + if os.IsNotExist(err) { return os.ErrNotExist } - err = fmt.Errorf("failed to apply oom-score-adj to pid %d (%v)", err) + err = fmt.Errorf("failed to apply oom-score-adj to pid %d (%v)", pid, err) + continue + } + if _, err := f.Write([]byte(value)); err != nil { + err = fmt.Errorf("failed to apply oom-score-adj to pid %d (%v)", pid, err) + continue } + return nil } return err } @@ -96,20 +92,26 @@ func (oomAdjuster *OOMAdjuster) applyOOMScoreAdjContainer(cgroupName string, oom continueAdjusting := false pidList, err := oomAdjuster.pidLister(cgroupName) if err != nil { - if syscallNotExists(err) { + if os.IsNotExist(err) { // Nothing to do since the container doesn't exist anymore. return os.ErrNotExist } continueAdjusting = true - glog.Errorf("Error getting process list for cgroup %s: %+v", cgroupName, err) + glog.V(10).Infof("Error getting process list for cgroup %s: %+v", cgroupName, err) } else if len(pidList) == 0 { + glog.V(10).Infof("Pid list is empty") continueAdjusting = true } else { for _, pid := range pidList { if !adjustedProcessSet[pid] { - continueAdjusting = true + glog.V(10).Infof("pid %d needs to be set", pid) if err = oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err == nil { adjustedProcessSet[pid] = true + } else if err == os.ErrNotExist { + continue + } else { + glog.V(10).Infof("cannot adjust oom score for pid %d - %v", pid, err) + continueAdjusting = true } // Processes can come and go while we try to apply oom score adjust value. So ignore errors here. } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/oom/oom_linux_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/oom/oom_linux_test.go new file mode 100644 index 000000000000..10e9f3d9ec6b --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/oom/oom_linux_test.go @@ -0,0 +1,104 @@ +// +build cgo,linux + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oom + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Converts a sequence of PID lists into a PID lister. +// The PID lister returns pidListSequence[i] on the ith call. If i >= length of pidListSequence +// then return the last element of pidListSequence (the sequence is considered to have) stabilized. +func sequenceToPidLister(pidListSequence [][]int) func(string) ([]int, error) { + var numCalls int + return func(cgroupName string) ([]int, error) { + numCalls++ + if len(pidListSequence) == 0 { + return []int{}, nil + } else if numCalls > len(pidListSequence) { + return pidListSequence[len(pidListSequence)-1], nil + } + return pidListSequence[numCalls-1], nil + } +} + +// Tests that applyOOMScoreAdjContainer correctly applies OOM scores to relevant processes, or +// returns the right error. +func applyOOMScoreAdjContainerTester(pidListSequence [][]int, maxTries int, appliedPids []int, expectedError bool, t *testing.T) { + pidOOMs := make(map[int]bool) + + // Mock ApplyOOMScoreAdj and pidLister. + oomAdjuster := NewOOMAdjuster() + oomAdjuster.ApplyOOMScoreAdj = func(pid int, oomScoreAdj int) error { + pidOOMs[pid] = true + return nil + } + oomAdjuster.pidLister = sequenceToPidLister(pidListSequence) + err := oomAdjuster.ApplyOOMScoreAdjContainer("", 100, maxTries) + + // Check error value. + if expectedError && err == nil { + t.Errorf("Expected error %+v when running ApplyOOMScoreAdjContainer but got no error", expectedError) + return + } else if !expectedError && err != nil { + t.Errorf("Expected no error but got error %+v when running ApplyOOMScoreAdjContainer", err) + return + } else if err != nil { + return + } + // Check that OOM scores were applied to the right processes. + if len(appliedPids) != len(pidOOMs) { + t.Errorf("Applied OOM scores to incorrect number of processes - %+v vs %v", appliedPids, pidOOMs) + return + } + for _, pid := range appliedPids { + if !pidOOMs[pid] { + t.Errorf("Failed to apply OOM scores to process %d", pid) + } + } +} + +func TestOOMScoreAdjContainer(t *testing.T) { + pidListSequenceEmpty := [][]int{} + applyOOMScoreAdjContainerTester(pidListSequenceEmpty, 3, nil, true, t) + + pidListSequence1 := [][]int{ + {1, 2}, + } + applyOOMScoreAdjContainerTester(pidListSequence1, 1, []int{1, 2}, false, t) + + pidListSequenceLag := [][]int{ + {}, + {}, + {}, + {1, 2, 4}, + } + for i := 1; i < 4; i++ { + applyOOMScoreAdjContainerTester(pidListSequenceLag, i, nil, true, t) + } + applyOOMScoreAdjContainerTester(pidListSequenceLag, 4, []int{1, 2, 4}, false, t) +} + +func TestPidListerFailure(t *testing.T) { + _, err := getPids("/does/not/exist") + assert.True(t, os.IsNotExist(err), "expected getPids to return not exists error. Got %v", err) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/parsers/parsers.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/parsers/parsers.go index f7c352ef5034..a02f18d3eb19 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/parsers/parsers.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/parsers/parsers.go @@ -17,20 +17,38 @@ limitations under the License. package parsers import ( - "github.com/docker/docker/pkg/parsers" + "fmt" + + dockerref "github.com/docker/distribution/reference" ) const ( - defaultImageTag = "latest" + DefaultImageTag = "latest" ) -// parseImageName parses a docker image string into two parts: repo and tag. -// If tag is empty, return the defaultImageTag. -func ParseImageName(image string) (string, string) { - repoToPull, tag := parsers.ParseRepositoryTag(image) +// ParseImageName parses a docker image string into three parts: repo, tag and digest. +// If both tag and digest are empty, a default image tag will be returned. +func ParseImageName(image string) (string, string, string, error) { + named, err := dockerref.ParseNamed(image) + if err != nil { + return "", "", "", fmt.Errorf("couldn't parse image name: %v", err) + } + + repoToPull := named.Name() + var tag, digest string + + tagged, ok := named.(dockerref.Tagged) + if ok { + tag = tagged.Tag() + } + + digested, ok := named.(dockerref.Digested) + if ok { + digest = digested.Digest().String() + } // If no tag was specified, use the default "latest". - if len(tag) == 0 { - tag = defaultImageTag + if len(tag) == 0 && len(digest) == 0 { + tag = DefaultImageTag } - return repoToPull, tag + return repoToPull, tag, digest, nil } diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/parsers/parsers_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/parsers/parsers_test.go new file mode 100644 index 000000000000..371eee758a04 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/parsers/parsers_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parsers + +import ( + "testing" +) + +// Based on Docker test case removed in: +// https://github.com/docker/docker/commit/4352da7803d182a6013a5238ce20a7c749db979a +func TestParseImageName(t *testing.T) { + testCases := []struct { + Input string + Repo string + Tag string + Digest string + }{ + {Input: "root", Repo: "root", Tag: "latest"}, + {Input: "root:tag", Repo: "root", Tag: "tag"}, + {Input: "root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", Repo: "root", Digest: "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + {Input: "user/repo", Repo: "user/repo", Tag: "latest"}, + {Input: "user/repo:tag", Repo: "user/repo", Tag: "tag"}, + {Input: "user/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", Repo: "user/repo", Digest: "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + {Input: "url:5000/repo", Repo: "url:5000/repo", Tag: "latest"}, + {Input: "url:5000/repo:tag", Repo: "url:5000/repo", Tag: "tag"}, + {Input: "url:5000/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", Repo: "url:5000/repo", Digest: "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + } + for _, testCase := range testCases { + repo, tag, digest, err := ParseImageName(testCase.Input) + if err != nil { + t.Errorf("ParseImageName(%s) failed: %v", testCase.Input, err) + } else if repo != testCase.Repo || tag != testCase.Tag || digest != testCase.Digest { + t.Errorf("Expected repo: %q, tag: %q and digest: %q, got %q, %q and %q", testCase.Repo, testCase.Tag, testCase.Digest, + repo, tag, digest) + } + } +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/pod/pod.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/pod/pod.go index 8fb5cadd0b14..b59e15dbc5d5 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/pod/pod.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/pod/pod.go @@ -25,7 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" - unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" errorsutil "k8s.io/kubernetes/pkg/util/errors" hashutil "k8s.io/kubernetes/pkg/util/hash" "k8s.io/kubernetes/pkg/util/wait" diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/procfs/procfs.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/procfs/procfs.go index cc432255fb2c..d0c38bf21e75 100644 --- a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/procfs/procfs.go +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/procfs/procfs.go @@ -49,7 +49,7 @@ func (pfs *ProcFS) GetFullContainerName(pid int) (string, error) { filePath := path.Join("/proc", strconv.Itoa(pid), "cgroup") content, err := ioutil.ReadFile(filePath) if err != nil { - if e, ok := err.(*os.SyscallError); ok && os.IsNotExist(e) { + if os.IsNotExist(err) { return "", os.ErrNotExist } return "", err diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/procfs/procfs_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/procfs/procfs_test.go new file mode 100644 index 000000000000..609543dca7d1 --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/procfs/procfs_test.go @@ -0,0 +1,58 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package procfs + +import ( + "io/ioutil" + "testing" +) + +func verifyContainerName(procCgroupText, expectedName string, expectedErr bool, t *testing.T) { + name, err := containerNameFromProcCgroup(procCgroupText) + if expectedErr && err == nil { + t.Errorf("Expected error but did not get error in verifyContainerName") + return + } else if !expectedErr && err != nil { + t.Errorf("Expected no error, but got error %+v in verifyContainerName", err) + return + } else if expectedErr { + return + } + if name != expectedName { + t.Errorf("Expected container name %s but got name %s", expectedName, name) + } +} + +func TestContainerNameFromProcCgroup(t *testing.T) { + procCgroupValid := "2:devices:docker/kubelet" + verifyContainerName(procCgroupValid, "docker/kubelet", false, t) + + procCgroupEmpty := "" + verifyContainerName(procCgroupEmpty, "", true, t) + + content, err := ioutil.ReadFile("example_proc_cgroup") + if err != nil { + t.Errorf("Could not read example /proc cgroup file") + } + verifyContainerName(string(content), "/user/1000.user/c1.session", false, t) + + procCgroupNoDevice := "2:freezer:docker/kubelet\n5:cpuacct:pkg/kubectl" + verifyContainerName(procCgroupNoDevice, "", true, t) + + procCgroupInvalid := "devices:docker/kubelet\ncpuacct:pkg/kubectl" + verifyContainerName(procCgroupInvalid, "", true, t) +} diff --git a/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/proxy/transport_test.go b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/proxy/transport_test.go new file mode 100644 index 000000000000..fca115526b4c --- /dev/null +++ b/Godeps/_workspace/src/k8s.io/kubernetes/pkg/util/proxy/transport_test.go @@ -0,0 +1,261 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" +) + +func parseURLOrDie(inURL string) *url.URL { + parsed, err := url.Parse(inURL) + if err != nil { + panic(err) + } + return parsed +} + +func TestProxyTransport(t *testing.T) { + testTransport := &Transport{ + Scheme: "http", + Host: "foo.com", + PathPrepend: "/proxy/minion/minion1:10250", + } + testTransport2 := &Transport{ + Scheme: "https", + Host: "foo.com", + PathPrepend: "/proxy/minion/minion1:8080", + } + emptyHostTransport := &Transport{ + Scheme: "https", + PathPrepend: "/proxy/minion/minion1:10250", + } + emptySchemeTransport := &Transport{ + Host: "foo.com", + PathPrepend: "/proxy/minion/minion1:10250", + } + type Item struct { + input string + sourceURL string + transport *Transport + output string + contentType string + forwardedURI string + redirect string + redirectWant string + } + + table := map[string]Item{ + "normal": { + input: `
kubelet.loggoogle.log
`, + sourceURL: "http://myminion.com/logs/log.log", + transport: testTransport, + output: `
kubelet.loggoogle.log
`, + contentType: "text/html", + forwardedURI: "/proxy/minion/minion1:10250/logs/log.log", + }, + "full document": { + input: `
kubelet.loggoogle.log
`, + sourceURL: "http://myminion.com/logs/log.log", + transport: testTransport, + output: `
kubelet.loggoogle.log
`, + contentType: "text/html", + forwardedURI: "/proxy/minion/minion1:10250/logs/log.log", + }, + "trailing slash": { + input: `
kubelet.loggoogle.log
`, + sourceURL: "http://myminion.com/logs/log.log", + transport: testTransport, + output: `
kubelet.loggoogle.log
`, + contentType: "text/html", + forwardedURI: "/proxy/minion/minion1:10250/logs/log.log", + }, + "content-type charset": { + input: `
kubelet.loggoogle.log
`, + sourceURL: "http://myminion.com/logs/log.log", + transport: testTransport, + output: `
kubelet.loggoogle.log
`, + contentType: "text/html; charset=utf-8", + forwardedURI: "/proxy/minion/minion1:10250/logs/log.log", + }, + "content-type passthrough": { + input: `
kubelet.loggoogle.log
`, + sourceURL: "http://myminion.com/logs/log.log", + transport: testTransport, + output: `
kubelet.loggoogle.log
`, + contentType: "text/plain", + forwardedURI: "/proxy/minion/minion1:10250/logs/log.log", + }, + "subdir": { + input: `kubelet.loggoogle.log`, + sourceURL: "http://myminion.com/whatever/apt/somelog.log", + transport: testTransport2, + output: `kubelet.loggoogle.log`, + contentType: "text/html", + forwardedURI: "/proxy/minion/minion1:8080/whatever/apt/somelog.log", + }, + "image": { + input: `
`, + sourceURL: "http://myminion.com/", + transport: testTransport, + output: `
`, + contentType: "text/html", + forwardedURI: "/proxy/minion/minion1:10250/", + }, + "abs": { + input: `